();
37 |
38 | // Set all the elements of the output tensor to 0
39 | const int N = input.size();
40 | for (int i = 0; i < N; i++) {
41 | output(i) = 0;
42 | }
43 |
44 | // Preserve the requested input value
45 | output(preserve_index_) = input(preserve_index_);
46 | }
47 |
48 | private:
49 | int preserve_index_;
50 | };
51 |
52 | REGISTER_KERNEL_BUILDER(Name("ZeroOut").Device(DEVICE_CPU), ZeroOutOp);
53 |
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/colorby_device.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/colorby_device.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/colorby_structure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/colorby_structure.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/constant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/constant.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/control_edge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/control_edge.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/conv_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/conv_1.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/dataflow_edge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/dataflow_edge.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/graph_vis_animation.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/graph_vis_animation.gif
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/horizontal_stack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/horizontal_stack.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/infocard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/infocard.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/infocard_op.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/infocard_op.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/namespace_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/namespace_node.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/op_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/op_node.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/pool1_collapsed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/pool1_collapsed.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/pool1_expanded.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/pool1_expanded.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/reference_edge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/reference_edge.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/save.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/save.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/series.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/series.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/series_expanded.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/series_expanded.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/summary.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/summary.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/graph_viz/vertical_stack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/graph_viz/vertical_stack.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/overview.md:
--------------------------------------------------------------------------------
1 | # 综述 Overview
2 |
3 | ## Variables: 创建,初始化,保存,和恢复
4 |
5 | TensorFlow Variables 是内存中的容纳 tensor 的缓存。这一小节介绍了用它们在模型训练时(during training)创建、保存和更新模型参数(model parameters) 的方法。
6 |
7 | [参看教程](../how_tos/variables.md)
8 |
9 | ## TensorFlow 机制 101
10 |
11 | 用 MNIST 手写数字识别作为一个小例子,一步一步的将使用 TensorFlow 基础架构(infrastructure)训练大规模模型的细节做详细介绍。
12 |
13 | [参看教程](../tutorials/mnist_tf.md)
14 |
15 | ## TensorBoard: 学习过程的可视化
16 |
17 | 对模型进行训练和评估时,TensorBoard 是一个很有用的可视化工具。此教程解释了创建和运行 TensorBoard 的方法,和使用摘要操作(Summary ops)的方法,通过添加摘要操作(Summary ops),可以自动把数据传输到 TensorBoard 所使用的事件文件。
18 |
19 | [参看教程](../how_tos/summaries_and_tensorboard.md)
20 |
21 | ## TensorBoard: 图的可视化
22 |
23 | 此教程介绍了在 TensorBoard 中使用可视化工具的方法,它可以帮助你理解张量流图的过程并 debug。
24 |
25 | [参看教程](../how_tos/graph_viz.md)
26 |
27 | ## 数据读入
28 |
29 | 此教程介绍了把数据传入 TensorSlow 程序的三种主要的方法: Feeding, Reading 和 Preloading.
30 |
31 | [参看教程](../how_tos/reading_data.md)
32 |
33 | ## 线程和队列
34 |
35 | 此教程介绍 TensorFlow 中为了更容易进行异步和并发训练的各种不同结构(constructs)。
36 |
37 | [参看教程](../how_tos/threading_and_queues.md)
38 |
39 | ## 添加新的 Op
40 |
41 | TensorFlow 已经提供一整套节点操作()operation),你可以在你的 graph 中随意使用它们,不过这里有关于添加自定义操作(custom op)的细节。
42 |
43 | [参看教程](../how_tos/adding_an_op.md)。
44 |
45 | ## 自定义数据的 Readers
46 |
47 | 如果你有相当大量的自定义数据集合,可能你想要对 TensorFlow 的 Data Readers 进行扩展,使它能直接以数据自身的格式将其读入。
48 |
49 | [参看教程](../how_tos/new_data_formats.md)。
50 |
51 | ## 使用 GPUs
52 |
53 | 此教程描述了用多个 GPU 构建和运行模型的方法。
54 |
55 | [参看教程](../how_tos/using_gpu.md)
56 |
57 | ## 共享变量 Sharing Variables
58 |
59 | 当在多 GPU 上部署大型的模型,或展开复杂的 LSTMs 或 RNNs 时,在模型构建代码的不同位置对许多相同的变量(Variable)进行读写常常是必须的。设计变量作用域(Variable Scope)机制的目的就是为了帮助上述任务的实现。
60 |
61 | [参看教程](../how_tos/variable_scope/index.md)。
62 |
63 | 原文: [How-to](http://tensorflow.org/how_tos/index.html)
64 |
65 | 翻译:[Terence Cooper](https://github.com/TerenceCooper)
66 |
67 | 校对:[lonlonago](https://github.com/lonlonago)
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/SOURCE/how_tos/reading_data/AnimatedFileQueues.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/reading_data/AnimatedFileQueues.gif
--------------------------------------------------------------------------------
/SOURCE/how_tos/reading_data/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/reading_data/__init__.py
--------------------------------------------------------------------------------
/SOURCE/how_tos/reading_data/convert_to_records.py:
--------------------------------------------------------------------------------
1 | """Converts MNIST data to TFRecords file format with Example protos."""
2 | from __future__ import print_function
3 |
4 | import os
5 | import tensorflow.python.platform
6 |
7 | import numpy
8 | import tensorflow as tf
9 | from tensorflow.g3doc.tutorials.mnist import input_data
10 |
11 |
12 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' # MNIST filenames
13 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
14 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
15 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
16 |
17 |
18 | tf.app.flags.DEFINE_string('directory', 'data',
19 | 'Directory to download data files and write the '
20 | 'converted result')
21 | tf.app.flags.DEFINE_integer('validation_size', 5000,
22 | 'Number of examples to separate from the training '
23 | 'data for the validation set.')
24 | FLAGS = tf.app.flags.FLAGS
25 |
26 |
27 | def _int64_feature(value):
28 | return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
29 |
30 |
31 | def _bytes_feature(value):
32 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
33 |
34 |
35 | def convert_to(images, labels, name):
36 | num_examples = labels.shape[0]
37 | if images.shape[0] != num_examples:
38 | raise ValueError("Images size %d does not match label size %d." %
39 | (dat.shape[0], num_examples))
40 | rows = images.shape[1]
41 | cols = images.shape[2]
42 | depth = images.shape[3]
43 |
44 | filename = os.path.join(FLAGS.directory, name + '.tfrecords')
45 | print('Writing', filename)
46 | writer = tf.python_io.TFRecordWriter(filename)
47 | for index in range(num_examples):
48 | image_raw = images[index].tostring()
49 | example = tf.train.Example(features=tf.train.Features(feature={
50 | 'height': _int64_feature(rows),
51 | 'width': _int64_feature(cols),
52 | 'depth': _int64_feature(depth),
53 | 'label': _int64_feature(int(labels[index])),
54 | 'image_raw': _bytes_feature(image_raw)}))
55 | writer.write(example.SerializeToString())
56 |
57 |
58 | def main(argv):
59 | # Get the data.
60 | train_images_filename = input_data.maybe_download(
61 | TRAIN_IMAGES, FLAGS.directory)
62 | train_labels_filename = input_data.maybe_download(
63 | TRAIN_LABELS, FLAGS.directory)
64 | test_images_filename = input_data.maybe_download(
65 | TEST_IMAGES, FLAGS.directory)
66 | test_labels_filename = input_data.maybe_download(
67 | TEST_LABELS, FLAGS.directory)
68 |
69 | # Extract it into numpy arrays.
70 | train_images = input_data.extract_images(train_images_filename)
71 | train_labels = input_data.extract_labels(train_labels_filename)
72 | test_images = input_data.extract_images(test_images_filename)
73 | test_labels = input_data.extract_labels(test_labels_filename)
74 |
75 | # Generate a validation set.
76 | validation_images = train_images[:FLAGS.validation_size, :, :, :]
77 | validation_labels = train_labels[:FLAGS.validation_size]
78 | train_images = train_images[FLAGS.validation_size:, :, :, :]
79 | train_labels = train_labels[FLAGS.validation_size:]
80 |
81 | # Convert to Examples and write the result to TFRecords.
82 | convert_to(train_images, train_labels, 'train')
83 | convert_to(validation_images, validation_labels, 'validation')
84 | convert_to(test_images, test_labels, 'test')
85 |
86 |
87 | if __name__ == '__main__':
88 | tf.app.run()
89 |
--------------------------------------------------------------------------------
/SOURCE/how_tos/reading_data/fully_connected_preloaded.py:
--------------------------------------------------------------------------------
1 | """Trains the MNIST network using preloaded data in a constant.
2 |
3 | Command to run this py_binary target:
4 |
5 | bazel run -c opt \
6 | <...>/tensorflow/g3doc/how_tos/reading_data:fully_connected_preloaded
7 | """
8 | from __future__ import print_function
9 | import os.path
10 | import time
11 |
12 | import tensorflow.python.platform
13 | import numpy
14 | import tensorflow as tf
15 |
16 | from tensorflow.g3doc.tutorials.mnist import input_data
17 | from tensorflow.g3doc.tutorials.mnist import mnist
18 |
19 |
20 | # Basic model parameters as external flags.
21 | flags = tf.app.flags
22 | FLAGS = flags.FLAGS
23 | flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
24 | flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.')
25 | flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
26 | flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
27 | flags.DEFINE_integer('batch_size', 100, 'Batch size. '
28 | 'Must divide evenly into the dataset sizes.')
29 | flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')
30 | flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
31 | 'for unit testing.')
32 |
33 |
34 | def run_training():
35 | """Train MNIST for a number of epochs."""
36 | # Get the sets of images and labels for training, validation, and
37 | # test on MNIST.
38 | data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
39 |
40 | # Tell TensorFlow that the model will be built into the default Graph.
41 | with tf.Graph().as_default():
42 | with tf.name_scope('input'):
43 | # Input data
44 | input_images = tf.constant(data_sets.train.images)
45 | input_labels = tf.constant(data_sets.train.labels)
46 |
47 | image, label = tf.train.slice_input_producer(
48 | [input_images, input_labels], num_epochs=FLAGS.num_epochs)
49 | label = tf.cast(label, tf.int32)
50 | images, labels = tf.train.batch(
51 | [image, label], batch_size=FLAGS.batch_size)
52 |
53 | # Build a Graph that computes predictions from the inference model.
54 | logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
55 |
56 | # Add to the Graph the Ops for loss calculation.
57 | loss = mnist.loss(logits, labels)
58 |
59 | # Add to the Graph the Ops that calculate and apply gradients.
60 | train_op = mnist.training(loss, FLAGS.learning_rate)
61 |
62 | # Add the Op to compare the logits to the labels during evaluation.
63 | eval_correct = mnist.evaluation(logits, labels)
64 |
65 | # Build the summary operation based on the TF collection of Summaries.
66 | summary_op = tf.merge_all_summaries()
67 |
68 | # Create a saver for writing training checkpoints.
69 | saver = tf.train.Saver()
70 |
71 | # Create the op for initializing variables.
72 | init_op = tf.initialize_all_variables()
73 |
74 | # Create a session for running Ops on the Graph.
75 | sess = tf.Session()
76 |
77 | # Run the Op to initialize the variables.
78 | sess.run(init_op)
79 |
80 | # Instantiate a SummaryWriter to output summaries and the Graph.
81 | summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
82 | graph_def=sess.graph_def)
83 |
84 | # Start input enqueue threads.
85 | coord = tf.train.Coordinator()
86 | threads = tf.train.start_queue_runners(sess=sess, coord=coord)
87 |
88 | # And then after everything is built, start the training loop.
89 | try:
90 | step = 0
91 | while not coord.should_stop():
92 | start_time = time.time()
93 |
94 | # Run one step of the model.
95 | _, loss_value = sess.run([train_op, loss])
96 |
97 | duration = time.time() - start_time
98 |
99 | # Write the summaries and print an overview fairly often.
100 | if step % 100 == 0:
101 | # Print status to stdout.
102 | print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
103 | duration))
104 | # Update the events file.
105 | summary_str = sess.run(summary_op)
106 | summary_writer.add_summary(summary_str, step)
107 | step += 1
108 |
109 | # Save a checkpoint periodically.
110 | if (step + 1) % 1000 == 0:
111 | print('Saving')
112 | saver.save(sess, FLAGS.train_dir, global_step=step)
113 |
114 | step += 1
115 | except tf.errors.OutOfRangeError:
116 | print('Saving')
117 | saver.save(sess, FLAGS.train_dir, global_step=step)
118 | print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
119 | finally:
120 | # When done, ask the threads to stop.
121 | coord.request_stop()
122 |
123 | # Wait for threads to finish.
124 | coord.join(threads)
125 | sess.close()
126 |
127 |
128 | def main(_):
129 | run_training()
130 |
131 |
132 | if __name__ == '__main__':
133 | tf.app.run()
134 |
--------------------------------------------------------------------------------
/SOURCE/how_tos/summaries_and_tensorboard.md:
--------------------------------------------------------------------------------
1 | # TensorBoard:可视化学习
2 |
3 | TensorBoard 涉及到的运算,通常是在训练庞大的深度神经网络中出现的复杂而又难以理解的运算。
4 |
5 | 为了更方便 TensorFlow 程序的理解、调试与优化,我们发布了一套叫做 TensorBoard 的可视化工具。你可以用 TensorBoard 来展现你的 TensorFlow 图像,绘制图像生成的定量指标图以及附加数据。
6 |
7 | 当 TensorBoard 设置完成后,它应该是这样子的:
8 |
9 | 
10 |
11 | ## 数据序列化
12 |
13 | TensorBoard 通过读取 TensorFlow 的事件文件来运行。TensorFlow 的事件文件包括了你会在 TensorFlow 运行中涉及到的主要数据。下面是 TensorBoard 中汇总数据(Summary data)的大体生命周期。
14 |
15 | 首先,创建你想汇总数据的 TensorFlow 图,然后再选择你想在哪个节点进行[汇总(summary)操作](../api_docs/python/train.md#summary_options)。
16 |
17 | 比如,假设你正在训练一个卷积神经网络,用于识别 MNISt 标签。你可能希望记录学习速度(learning rate)的如何变化,以及目标函数如何变化。通过向节点附加[scalar_summary](../api_docs/python/train.md#scalary_summary)操作来分别输出学习速度和期望误差。然后你可以给每个 scalary_summary 分配一个有意义的 `标签`,比如 `'learning rate'` 和 `'loss function'`。
18 |
19 | 或者你还希望显示一个特殊层中激活的分布,或者梯度权重的分布。可以通过分别附加 [histogram_summary](../api_docs/python/train.md#histogram_summary) 运算来收集权重变量和梯度输出。
20 |
21 | 所有可用的 summary 操作详细信息,可以查看[summary_operation](../api_docs/python/train.md#summary_operation)文档。
22 |
23 | 在TensorFlow中,所有的操作只有当你执行,或者另一个操作依赖于它的输出时才会运行。我们刚才创建的这些节点(summary nodes)都围绕着你的图像:没有任何操作依赖于它们的结果。因此,为了生成汇总信息,我们需要运行所有这些节点。这样的手动工作是很乏味的,因此可以使用[tf.merge_all_summaries](../api_docs/python/train.md#scalary_summary)来将他们合并为一个操作。
24 |
25 | 然后你可以执行合并命令,它会依据特点步骤将所有数据生成一个序列化的`Summary` protobuf对象。最后,为了将汇总数据写入磁盘,需要将汇总的protobuf对象传递给[tf.train.Summarywriter](../api_docs/python/train.md#SummaryWriter)。
26 |
27 | `SummaryWriter` 的构造函数中包含了参数 logdir。这个 logdir 非常重要,所有事件都会写到它所指的目录下。此外,`SummaryWriter` 中还包含了一个可选择的参数 `GraphDef`。如果输入了该参数,那么 TensorBoard 也会显示你的图像。
28 |
29 | 现在已经修改了你的图,也有了 `SummaryWriter`,现在就可以运行你的神经网络了!如果你愿意的话,你可以每一步执行一次合并汇总,这样你会得到一大堆训练数据。这很有可能超过了你想要的数据量。你也可以每一百步执行一次合并汇总,或者如下面代码里示范的这样。
30 |
31 | ```python
32 | merged_summary_op = tf.merge_all_summaries()
33 | summary_writer = tf.train.SummaryWriter('/tmp/mnist_logs', sess.graph)
34 | total_step = 0
35 | while training:
36 | total_step += 1
37 | session.run(training_op)
38 | if total_step % 100 == 0:
39 | summary_str = session.run(merged_summary_op)
40 | summary_writer.add_summary(summary_str, total_step)
41 | ```
42 |
43 | 现在已经准备好用 TensorBoard 来可视化这些数据了。
44 |
45 | ## 启动TensorBoard
46 |
47 | 输入下面的指令来启动TensorBoard
48 |
49 | ```
50 | python tensorflow/tensorboard/tensorboard.py --logdir=path/to/log-directory
51 | ```
52 |
53 | 这里的参数 `logdir` 指向 `SummaryWriter` 序列化数据的存储路径。如果`logdir`目录的子目录中包含另一次运行时的数据,那么 TensorBoard 会展示所有运行的数据。一旦 TensorBoard 开始运行,你可以通过在浏览器中输入 `localhost:6006` 来查看 TensorBoard。
54 |
55 | 如果你已经通过pip安装了 TensorBoard,你可以通过执行更为简单地命令来访问 TensorBoard
56 |
57 | ```
58 | tensorboard --logdir=/path/to/log-directory
59 | ```
60 |
61 | 进入 TensorBoard 的界面时,你会在右上角看到导航选项卡,每一个选项卡将展现一组可视化的序列化数据集 。对于你查看的每一个选项卡,如果 TensorBoard 中没有数据与这个选项卡相关的话,则会显示一条提示信息指示你如何序列化相关数据。
62 |
63 | 更多更详细的关于如何使用 graph 选项来显示你的图像的信息。参见 [TensorBoard:图表可视化](./graph_viz.md)
64 |
65 | 原文地址:[TensorBoard:Visualizing Learning](http://tensorflow.org/how_tos/summaries_and_tensorboard/index.html#tensorboard-visualizing-learning) 翻译:[thylaco1eo](https://github.com/thylaco1eo) 校对:[lucky521](https://github.com/lucky521)
66 |
--------------------------------------------------------------------------------
/SOURCE/how_tos/summaries_and_tensorboard/mnist_tensorboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/summaries_and_tensorboard/mnist_tensorboard.png
--------------------------------------------------------------------------------
/SOURCE/how_tos/threading_and_queues/IncremeterFifoQueue.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/how_tos/threading_and_queues/IncremeterFifoQueue.gif
--------------------------------------------------------------------------------
/SOURCE/images/AnimatedFileQueues.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/AnimatedFileQueues.gif
--------------------------------------------------------------------------------
/SOURCE/images/IncremeterFifoQueue.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/IncremeterFifoQueue.gif
--------------------------------------------------------------------------------
/SOURCE/images/MNIST-Matrix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/MNIST-Matrix.png
--------------------------------------------------------------------------------
/SOURCE/images/MNIST.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/MNIST.png
--------------------------------------------------------------------------------
/SOURCE/images/Parallelism.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/Parallelism.png
--------------------------------------------------------------------------------
/SOURCE/images/TensorFlow.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/TensorFlow.jpg
--------------------------------------------------------------------------------
/SOURCE/images/audio-image-text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/audio-image-text.png
--------------------------------------------------------------------------------
/SOURCE/images/baseball_network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/baseball_network.png
--------------------------------------------------------------------------------
/SOURCE/images/blue_pill.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/blue_pill.png
--------------------------------------------------------------------------------
/SOURCE/images/cifar_activations.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/cifar_activations.png
--------------------------------------------------------------------------------
/SOURCE/images/cifar_graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/cifar_graph.png
--------------------------------------------------------------------------------
/SOURCE/images/cifar_image_summary.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/cifar_image_summary.png
--------------------------------------------------------------------------------
/SOURCE/images/cifar_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/cifar_loss.png
--------------------------------------------------------------------------------
/SOURCE/images/cifar_lr_decay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/cifar_lr_decay.png
--------------------------------------------------------------------------------
/SOURCE/images/cifar_samples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/cifar_samples.png
--------------------------------------------------------------------------------
/SOURCE/images/cifar_sparsity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/cifar_sparsity.png
--------------------------------------------------------------------------------
/SOURCE/images/cifar_var_histograms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/cifar_var_histograms.png
--------------------------------------------------------------------------------
/SOURCE/images/colorby_device.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/colorby_device.png
--------------------------------------------------------------------------------
/SOURCE/images/colorby_structure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/colorby_structure.png
--------------------------------------------------------------------------------
/SOURCE/images/constant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/constant.png
--------------------------------------------------------------------------------
/SOURCE/images/control_edge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/control_edge.png
--------------------------------------------------------------------------------
/SOURCE/images/conv_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/conv_1.png
--------------------------------------------------------------------------------
/SOURCE/images/dataflow_edge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/dataflow_edge.png
--------------------------------------------------------------------------------
/SOURCE/images/getting_started.dot:
--------------------------------------------------------------------------------
1 | digraph Dependencies {
2 | node [shape = oval];
3 | "predictions: MatMul()" -> "data: Concat()"
4 | "data: Concat()" -> data_left
5 | "data: Concat()" -> data_right
6 | "predictions: MatMul()" -> "weight_matrix: Reshape()"
7 | "weight_matrix: Reshape()" -> "new_weights: Add()"
8 | "new_weights: Add()" -> weights
9 | "new_weights: Add()" -> deltas
10 | "update: Assign()" -> weights
11 | "update: Assign()" -> "new_weights: Add()"
12 | "InitializeAllVariables()" -> weights
13 | "InitializeAllVariables()" -> init_value
14 | }
--------------------------------------------------------------------------------
/SOURCE/images/getting_started.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/getting_started.png
--------------------------------------------------------------------------------
/SOURCE/images/graph_vis_animation.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/graph_vis_animation.gif
--------------------------------------------------------------------------------
/SOURCE/images/horizontal_stack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/horizontal_stack.png
--------------------------------------------------------------------------------
/SOURCE/images/infocard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/infocard.png
--------------------------------------------------------------------------------
/SOURCE/images/infocard_op.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/infocard_op.png
--------------------------------------------------------------------------------
/SOURCE/images/jeff.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/jeff.png
--------------------------------------------------------------------------------
/SOURCE/images/linear-relationships.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/linear-relationships.png
--------------------------------------------------------------------------------
/SOURCE/images/mandelbrot_output.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mandelbrot_output.jpg
--------------------------------------------------------------------------------
/SOURCE/images/mnist-train-xs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist-train-xs.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist-train-ys.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist-train-ys.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist1.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist10.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist2.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist3.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist4.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist5.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist6.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist7.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist8.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist9.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist_digits.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist_digits.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist_subgraph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist_subgraph.png
--------------------------------------------------------------------------------
/SOURCE/images/mnist_tensorboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/mnist_tensorboard.png
--------------------------------------------------------------------------------
/SOURCE/images/namespace_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/namespace_node.png
--------------------------------------------------------------------------------
/SOURCE/images/nce-nplm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/nce-nplm.png
--------------------------------------------------------------------------------
/SOURCE/images/op_node.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/op_node.png
--------------------------------------------------------------------------------
/SOURCE/images/pde_output_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/pde_output_1.jpg
--------------------------------------------------------------------------------
/SOURCE/images/pde_output_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/pde_output_2.jpg
--------------------------------------------------------------------------------
/SOURCE/images/pool1_collapsed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/pool1_collapsed.png
--------------------------------------------------------------------------------
/SOURCE/images/pool1_expanded.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/pool1_expanded.png
--------------------------------------------------------------------------------
/SOURCE/images/re.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/re.png
--------------------------------------------------------------------------------
/SOURCE/images/re1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/re1.png
--------------------------------------------------------------------------------
/SOURCE/images/red_pill.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/red_pill.png
--------------------------------------------------------------------------------
/SOURCE/images/reference_edge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/reference_edge.png
--------------------------------------------------------------------------------
/SOURCE/images/results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/results.png
--------------------------------------------------------------------------------
/SOURCE/images/rw4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/rw4.png
--------------------------------------------------------------------------------
/SOURCE/images/rw5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/rw5.png
--------------------------------------------------------------------------------
/SOURCE/images/rw6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/rw6.png
--------------------------------------------------------------------------------
/SOURCE/images/rw7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/rw7.png
--------------------------------------------------------------------------------
/SOURCE/images/save.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/save.png
--------------------------------------------------------------------------------
/SOURCE/images/scatterplot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/scatterplot.png
--------------------------------------------------------------------------------
/SOURCE/images/series.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/series.png
--------------------------------------------------------------------------------
/SOURCE/images/series_expanded.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/series_expanded.png
--------------------------------------------------------------------------------
/SOURCE/images/softmax-nplm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/softmax-nplm.png
--------------------------------------------------------------------------------
/SOURCE/images/softmax-regression-scalarequation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/softmax-regression-scalarequation.png
--------------------------------------------------------------------------------
/SOURCE/images/softmax-regression-scalargraph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/softmax-regression-scalargraph.png
--------------------------------------------------------------------------------
/SOURCE/images/softmax-regression-vectorequation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/softmax-regression-vectorequation.png
--------------------------------------------------------------------------------
/SOURCE/images/softmax-weights.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/softmax-weights.png
--------------------------------------------------------------------------------
/SOURCE/images/summary.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/summary.png
--------------------------------------------------------------------------------
/SOURCE/images/tensorflow_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/tensorflow_logo.png
--------------------------------------------------------------------------------
/SOURCE/images/tensors_flowing.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/tensors_flowing.gif
--------------------------------------------------------------------------------
/SOURCE/images/tf_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/tf_logo.png
--------------------------------------------------------------------------------
/SOURCE/images/tf_logo_transp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/tf_logo_transp.png
--------------------------------------------------------------------------------
/SOURCE/images/theta.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/theta.png
--------------------------------------------------------------------------------
/SOURCE/images/tsne.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/tsne.png
--------------------------------------------------------------------------------
/SOURCE/images/vertical_stack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/vertical_stack.png
--------------------------------------------------------------------------------
/SOURCE/images/vr1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/vr1.png
--------------------------------------------------------------------------------
/SOURCE/images/vr2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/vr2.png
--------------------------------------------------------------------------------
/SOURCE/images/vr3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/vr3.png
--------------------------------------------------------------------------------
/SOURCE/images/vr4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/vr4.png
--------------------------------------------------------------------------------
/SOURCE/images/vr5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/vr5.png
--------------------------------------------------------------------------------
/SOURCE/images/weixin.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/weixin.jpg
--------------------------------------------------------------------------------
/SOURCE/images/weixin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/weixin.png
--------------------------------------------------------------------------------
/SOURCE/images/word2vec2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/images/word2vec2.png
--------------------------------------------------------------------------------
/SOURCE/index.md:
--------------------------------------------------------------------------------
1 | # TensorFlow
2 |
3 |
4 |
5 | ## Introduction
6 |
7 | TensorFlow™ is an open source software library for numerical computation
8 | using data flow graphs. Nodes in the graph represent mathematical operations,
9 | while the graph edges represent the multidimensional data arrays (tensors) that
10 | flow between them. This flexible architecture allows you to deploy computation
11 | to one or more CPUs or GPUs in a desktop, server, or mobile device without
12 | rewriting code. TensorFlow was originally developed by researchers and
13 | engineers working on the Google Brain team within Google's Machine Intelligence
14 | research organization for the purposes of conducting machine learning and deep
15 | neural networks research. The system is general enough to be applicable in a
16 | wide variety of other domains as well. The following documents show you how
17 | to set up and use the TensorFlow system.
18 |
19 | ## Table of Contents
20 |
21 |
22 |
--------------------------------------------------------------------------------
/SOURCE/personal.md:
--------------------------------------------------------------------------------
1 | ## TensorFlow 个人学习心得
2 |
3 | 说明:本章所列链接为个人学习TensorFlow的心得,很多是博客地址,从实践的角度帮助大家更好的理解官方文档的内容
4 |
5 | 示例:
6 |
7 | - [我的TensorFlow学习博客](http://wiki.jikexueyuan.com)
--------------------------------------------------------------------------------
/SOURCE/resource.md:
--------------------------------------------------------------------------------
1 | ## TensorFlow 相关资源
2 |
3 | - [Google官方Blog宣布TensorFlow开源](https://googleblog.blogspot.com/2015/11/tensorflow-smarter-machine-learning-for.html)
4 | - [TensorFlow WhitePaper(PDF下载)](http://download.tensorflow.org/paper/whitepaper2015.pdf)
5 | - [Jeff Dean 介绍 TensorFlow(视频)](https://www.youtube.com/watch?v=90-S1M7Ny_o&t=21m2s)
6 | - [TensorFlow 简化版接口 Scikit Flow](https://github.com/google/skflow)
7 | - [TensorFlow 使用样例](https://github.com/aymericdamien/TensorFlow-Examples)
8 | - [TensorFlow 与 mxnet, caffe 对比 @chenrudan](http://chenrudan.github.io/blog/2015/11/18/comparethreeopenlib.html)
9 | - [TensorFlow 与 torch7, caffe, mxnet 在内存使用量以及性能对比 @mli](https://github.com/dmlc/mxnet/blob/master/doc/overview_chn.md)
--------------------------------------------------------------------------------
/SOURCE/resources/bib.md:
--------------------------------------------------------------------------------
1 | # BibTex 引用
2 |
3 | 如果你在研究中使用了 TensorFlow,并且希望引用 TensorFlow系统。我们建议你引用一下白皮书。
4 |
5 | ```
6 | @misc{tensorflow2015-whitepaper,
7 | title={{TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
8 | url={http://tensorflow.org/},
9 | note={Software available from tensorflow.org},
10 | author={
11 | Mart\'{\i}n~Abadi and
12 | Ashish~Agarwal and
13 | Paul~Barham and
14 | Eugene~Brevdo and
15 | Zhifeng~Chen and
16 | Craig~Citro and
17 | Greg~S.~Corrado and
18 | Andy~Davis and
19 | Jeffrey~Dean and
20 | Matthieu~Devin and
21 | Sanjay~Ghemawat and
22 | Ian~Goodfellow and
23 | Andrew~Harp and
24 | Geoffrey~Irving and
25 | Michael~Isard and
26 | Yangqing Jia and
27 | Rafal~Jozefowicz and
28 | Lukasz~Kaiser and
29 | Manjunath~Kudlur and
30 | Josh~Levenberg and
31 | Dan~Man\'{e} and
32 | Rajat~Monga and
33 | Sherry~Moore and
34 | Derek~Murray and
35 | Chris~Olah and
36 | Mike~Schuster and
37 | Jonathon~Shlens and
38 | Benoit~Steiner and
39 | Ilya~Sutskever and
40 | Kunal~Talwar and
41 | Paul~Tucker and
42 | Vincent~Vanhoucke and
43 | Vijay~Vasudevan and
44 | Fernanda~Vi\'{e}gas and
45 | Oriol~Vinyals and
46 | Pete~Warden and
47 | Martin~Wattenberg and
48 | Martin~Wicke and
49 | Yuan~Yu and
50 | Xiaoqiang~Zheng},
51 | year={2015},
52 | }
53 | ```
54 |
55 | 文本形式如下:
56 |
57 | ```
58 | Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo,
59 | Zhifeng Chen, Craig Citro, Greg S. Corrado, Andy Davis,
60 | Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow,
61 | Andrew Harp, Geoffrey Irving, Michael Isard, Rafal Jozefowicz, Yangqing Jia,
62 | Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dan Mané, Mike Schuster,
63 | Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Jonathon Shlens,
64 | Benoit Steiner, Ilya Sutskever, Kunal Talwar, Paul Tucker,
65 | Vincent Vanhoucke, Vijay Vasudevan, Fernanda Viégas,
66 | Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke,
67 | Yuan Yu, and Xiaoqiang Zheng.
68 | TensorFlow: Large-scale machine learning on heterogeneous systems,
69 | 2015. Software available from tensorflow.org.
70 | ```
71 |
72 | > 原文:http://tensorflow.org/resources/bib.md 翻译:[Jim-Zenn](https://github.com/Jim-Zenn) 校对:[Wiki](https://github.com/jikexueyuanwiki)
73 |
--------------------------------------------------------------------------------
/SOURCE/resources/dims_types.md:
--------------------------------------------------------------------------------
1 | # 张量的阶、形状、数据类型
2 |
3 | TensorFlow用张量这种数据结构来表示所有的数据.你可以把一个张量想象成一个n维的数组或列表.一个张量有一个静态类型和动态类型的维数.张量可以在图中的节点之间流通.
4 |
5 | ## 阶
6 |
7 | 在TensorFlow系统中,张量的维数来被描述为*阶*.但是张量的阶和矩阵的阶并不是同一个概念.张量的阶(有时是关于如*顺序*或*度数*或者是*n维*)是张量维数的一个数量描述.比如,下面的张量(使用Python中list定义的)就是2阶.
8 |
9 | ```
10 | t = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
11 | ```
12 |
13 | 你可以认为一个二阶张量就是我们平常所说的矩阵,一阶张量可以认为是一个向量.对于一个二阶张量你可以用语句`t[i, j]`来访问其中的任何元素.而对于三阶张量你可以用't[i, j, k]'来访问其中的任何元素.
14 |
15 |
16 |
17 | 阶 |数学实例| Python 例子
18 | --- | --- | ---
19 | 0 | 纯量 (只有大小) | `s = 483`
20 | 1 | 向量(大小和方向) | `v = [1.1, 2.2, 3.3]`
21 | 2 | 矩阵(数据表) | `m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]`
22 | 3 | 3阶张量 (数据立体) | `t = [[[2], [4], [6]], [[8], [10], [12]], [[14], [16], [18]]]`
23 | n | n阶 (自己想想看) | `....`
24 |
25 | ## 形状
26 |
27 | TensorFlow文档中使用了三种记号来方便地描述张量的维度:阶,形状以及维数.下表展示了他们之间的关系:
28 |
29 | 阶 | 形状 | 维数 | 实例
30 | --- | --- | --- | ---
31 | 0 | [ ] | 0-D | 一个 0维张量. 一个纯量.
32 | 1 | [D0] | 1-D | 一个1维张量的形式[5].
33 | 2 | [D0, D1] | 2-D |一个2维张量的形式[3, 4].
34 | 3 | [D0, D1, D2] | 3-D | 一个3维张量的形式 [1, 4, 3].
35 | n | [D0, D1, ... Dn] | n-D | 一个n维张量的形式 [D0, D1, ... Dn].
36 |
37 |
38 | 形状可以通过Python中的整数列表或元祖(int list或tuples)来表示,也或者用[`TensorShape` class](../api_docs/python/framework.md#TensorShape).
39 |
40 | ## 数据类型
41 |
42 | 除了维度,Tensors有一个数据类型属性.你可以为一个张量指定下列数据类型中的任意一个类型:
43 |
44 |
45 | 数据类型 | Python 类型| 描述
46 | --- | --- | ---
47 | `DT_FLOAT` | `tf.float32` | 32 位浮点数.
48 | `DT_DOUBLE` | `tf.float64` | 64 位浮点数.
49 | `DT_INT64` | `tf.int64` | 64 位有符号整型.
50 | `DT_INT32` | `tf.int32` | 32 位有符号整型.
51 | `DT_INT16` | `tf.int16` | 16 位有符号整型.
52 | `DT_INT8` | `tf.int8` | 8 位有符号整型.
53 | `DT_UINT8` | `tf.uint8` | 8 位无符号整型.
54 | `DT_STRING` | `tf.string` | 可变长度的字节数组.每一个张量元素都是一个字节数组.
55 | `DT_BOOL` | `tf.bool` |布尔型.
56 | `DT_COMPLEX64` | `tf.complex64` | 由两个32位浮点数组成的复数:实数和虚数.
57 | `DT_QINT32` | `tf.qint32` | 用于量化Ops的32位有符号整型.
58 | `DT_QINT8` | `tf.qint8` | 用于量化Ops的8位有符号整型.
59 | `DT_QUINT8` | `tf.quint8` |用于量化Ops的8位无符号整型.
60 |
61 |
62 |
63 |
64 | 原文:[Tensor Ranks, Shapes, and Types](http://www.tensorflow.org/resources/dims_types.md)
65 |
66 | 翻译:[nb312](https://github.com/nb312)
67 |
68 | 校对:[lonlonago]( https://github.com/lonlonago)
69 |
--------------------------------------------------------------------------------
/SOURCE/resources/glossary.md:
--------------------------------------------------------------------------------
1 | # 术语表
2 |
3 | ### 广播操作(Broadcasting operation)
4 |
5 | 一种用[numpy-style broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)来保证tensor参数的形态兼容的操作。
6 |
7 | ### Devices
8 |
9 | 一块可以用来运算并且拥有自己的地址空间的硬件,比如GPU和CPU。
10 |
11 | ### eval
12 |
13 | Tensor 的一个方法,返回 Tensor 的值。触发任意一个图计算都需要计算出这个值。只能在一个已经启动的会话的图中才能调用该 Tensor 值。
14 |
15 | ### Feed
16 |
17 | TensorFlow 的一个概念:把一个 Tensor 直接连接到一个会话图表中的任意节点。feed 不是在构建图(graph)的时候创建,而是在触发图的执行操作时去申请。一个 feed 临时替代一个带有 Tensor 值的节点。把feed数据作为run( )方法和eval( )方法的参数来初始化运算。方法运行结束后,替换的 feed 就会消失,而最初的节点定义仍然还在。可以通过tf.placeholder( )把特定的节点指定为 feed 节点来创建它们。详见[Basic Usage](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/get_started/basic_usage.md).
18 |
19 | ### Fetch
20 |
21 | TensorFlow中的一个概念:为了取回运算操作的输出结果。取回的申请发生在触发执行图操作的时候,而不是发生在建立图的时候。如果要取回一个或多个节点(node)的 Tensor 值,可以通过在 Session 对象上调用run( )方法并将待取回节点(node)的列表作为参数来执行图表(graph)。详见[Basic Usage](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/get_started/basic_usage.md)。
22 |
23 | ### Graph(图)
24 |
25 | 把运算任务描述成一个直接的无环图形(DAG),图表中的节点(node)代表必须要实现的一些操作。图中的边代表数据或者可控的依赖。GratheDef 是系统中描述一个图表的协议(api),它由一个 NodeDefs 集合组成。一个GraphDef可以转化成一个更容易操作的图表对象。
26 |
27 | ### IndexedSlices(索引化切片)
28 |
29 | 在 Python API 中,TensorFlow 仅仅在第一维上对 Tensor 有所体现。如果一个 Tensor 有k维,那么一个 IndexedSlices 实例在逻辑上代表一个沿着这个 Tensor 第一维的(k-1)维切片的集合。切片的索引被连续储存在一个单独的一维向量中,而对应的切片则被拼接成一个单独的k维 Tensor。如果 sparsity 不是受限于第一维空间,请用
30 | SparseTensor。
31 |
32 | ### Node(节点)
33 |
34 | 图中的一个元素。
35 | 把启动一个特定操作的方式称为特定运算图表的一个节点,包括任何用来配置这个操作的属性的值。对于那些多形态的操作,这些属性包括能完全决定这个节点(Node)签名的充分信息。详见graph.proto。
36 |
37 | ### 操作(Op/operation)
38 |
39 | 在 TensorFlow 的运行时中,它是一种类似 add 或 matmul 或 concat的运算。可以用[how to add an op](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/how_tos/adding_an_op/index.md)中的方法来向运行时添加新的操作。
40 |
41 | 在 Python 的API中,它是图中的一个节点。在[tf.Operation](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/python/framework.md#Operation)类中列举出了这些操作。一个操作(Operation)的 type 属性决定这个节点(node)的操作类型,比如add和matmul。
42 |
43 | ### Run
44 |
45 | 在一个运行的图中执行某种操作的行为。要求图必须运行在会话中。
46 |
47 | 在 Python 的 API 中,它是 Session 类的一个方法[tf.Session.run](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/python/client.md#Session)。可以通过 Tensors 来订阅或获取run( )操作。
48 |
49 | 在C++的API中,它是[tensorflow::Session](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/python/client.md#Session)类
50 | 的一个方法。
51 |
52 | ### Session(会话)
53 |
54 | 启动图的第一步是创建一个 Session 对象。Session 提供在图中执行操作的一些方法。
55 |
56 | 在 Python API中,使用[tf.Session](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/python/client.md#Session)。
57 |
58 | 在 C++ 的API中,[tensorflow::Session](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/cc/ClassSession.md)是用来创建一个图并运行操作的类:
59 |
60 | ### Shape
61 |
62 | Tensor 的维度和它们的大小。
63 |
64 | 在一个已经启动的图中,它表示流动在节点(node)之间的 Tensor 的属性。一些操作对 shape 有比较强的要求,如果没有 Shape 属性则会报告错误。
65 |
66 | 在 Python API中,用创建图的 API 来说明 Tensor 的 Shape 属性。Tensor 的Shape 属性要么只有部分已知,要么全部未知。详见[tf.TensroShape](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/python/framework.md#TensorShape)
67 |
68 | 在C++中,Shape 类用来表示 Tensor 的维度。[tensorflow::TensorShape](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/cc/ClassTensorShape.md)。
69 |
70 | ### SparseTensor
71 |
72 | 在 Python API 中,它用来表示在 TensorFlow 中稀疏散落在任意地方的 Tensor 。SparseTensor 以字典-值格式来储存那些沿着索引的非空值。换言之,m个非空值,就包含一个长度为m的值向量和一个由m列索引(indices)组成的矩阵。为了提升效率,SparseTensor 需要将 indice(索引)按维度的增加来按序存储,比如行主序。如果稀疏值仅沿着第一维度,就用 IndexedSlices。
73 |
74 | ### Tensor
75 |
76 | Tensor是一种特定的多维数组。比如,一个浮点型的四维数组表示一小批由[batch,height,width,channel]组成的图片。
77 |
78 | 在一个运行的图(graph)中,它是一种流动在节点(node)之间的数据。
79 | 在 Python 中,Tensor 类表示添加到图的操作中的输入和输出,见[tf.Tensor](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/python/framework.md#Tensor),这样的类不持有数据。
80 |
81 | 在C++中,Tensor是方法[Session::Run( )](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/cc/ClassSession.md)的返回值,见[tensorflow::Tensor](https://github.com/jikexueyuanwiki/tensorflow-zh/blob/master/SOURCE/api_docs/cc/ClassTensor.md),这样的 Tensor 持有数据。
82 |
83 | 原文:[Glossary](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/g3doc/resources/glossary.md)
84 |
85 | 翻译:[leege100](https://github.com/leege100)
86 |
87 | 校对:[lonlonago](https://github.com/lonlonago)
--------------------------------------------------------------------------------
/SOURCE/resources/overview.md:
--------------------------------------------------------------------------------
1 | # 其他资源
2 |
3 | ## TensorFlow 白皮书
4 |
5 | 在这份白皮书里,你可以找到关于 TensorFlow 编程模型的更多详情和 TensorFlow 的实现原理。
6 |
7 | * [TensorFlow: Large-scale machine learning on heterogeneous systems](http://download.tensorflow.org/paper/whitepaper2015.pdf)
8 |
9 | ### 引用
10 |
11 | 如果你在你的研究中使用了 TensorFlow,并且希望在引用中注记 TensorFlow,我们建议你引用上面这篇论文。你可以使用这个 [BibTeX 目录](../resources/bib.md)。 随着项目的继续发展,我们还会在这个建议引用列表添加新的论文。
12 |
13 | ## 社区
14 |
15 | ### 讨论
16 |
17 | * [GitHub ](https://github.com/tensorflow/tensorflow)
18 | * [Stack Overflow](https://stackoverflow.com/questions/tagged/tensorflow)
19 | * [TensorFlow 讨论邮件列表](https://groups.google.com/a/tensorflow.org/d/forum/discuss)
20 |
21 | ### 报告 Issues
22 |
23 | * [TensorFlow issues](https://github.com/tensorflow/tensorflow/issues)
24 |
25 | ### 开发
26 |
27 | * 如果你有兴趣为 TensorFlow 贡献代码请
28 | [阅读这篇代码贡献指南](https://github.com/tensorflow/tensorflow/blob/master/CONTRIBUTING.md).
29 |
30 | > 原文:[http://tensorflow.org/resources/bib.md](http://tensorflow.org/resources/bib.md)
31 | >
32 | > 翻译:[Jim-Zenn](https://github.com/Jim-Zenn)
33 | >
34 | > 校对:[lonlonago]( https://github.com/lonlonago)
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/SOURCE/resources/uses.md:
--------------------------------------------------------------------------------
1 | # 应用实例
2 |
3 | 本页介绍了一些 TensorFlow 系统当前在实际中的应用。
4 |
5 | > 如果您在做研究、教育、或在某些产品中正在使用 TensorFlow,
6 | > 我们非常乐意在这里添加一些有关您的使用情况。
7 | > 请随时给我们发电子邮件简要说明您是如何使用TensorFlow的,
8 | > 或者给我们发 pull request来添加一个条目到本文件。
9 |
10 | 下面列出了一些 TensorFlow 的用途。
11 |
12 | * **RankBrain**
13 | * **组织**: Google
14 | * **域名**: Information Retrieval
15 | * **描述**: 对www.google.com搜索排名大规模部署的深层神经网络。
16 | * **更多信息**: ["Google Turning Over Its Lucrative Search to AI Machines"](http://www.bloomberg.com/news/articles/2015-10-26/google-turning-its-lucrative-web-search-over-to-ai-machines)
17 |
18 | * **Inception Image Classification Model**
19 | * **组织**: Google
20 | * **描述**: 研究高精确的计算机视觉模型,赢得了2014年Imagenet图像分类的挑战 (ILSVRC 2014)
21 | * **更多信息**: 关于 Baseline model 的描述 [Arxiv paper](http://arxiv.org/abs/1409.4842)
22 |
23 | * **SmartReply**
24 | * **组织**: Google
25 | * **描述**: 基于深度 LSTM 模型的自动生成电子邮件回复
26 | * **更多信息**: [Google research blog post](http://googleresearch.blogspot.com/2015/11/computer-respond-to-this-email.html)
27 |
28 | * **Massively Multitask Networks for Drug Discovery**
29 | * **组织**: Google and Stanford University
30 | * **域名**: Drug discovery
31 | * **描述**: 基于深度神经网络模型的药物探索
32 | * **更多信息**: [Arxiv paper](http://arxiv.org/abs/1502.02072)
33 |
34 | * **On-Device Computer Vision for OCR**
35 | * **组织**: Google
36 | * **描述**: 用设备内置的计算机视觉模型来做‘光学字符识别’(OCR)以实现实时翻译。
37 | * **更多信息**: [Google Research blog post](http://googleresearch.blogspot.com/2015/07/how-google-translate-squeezes-deep.html)
38 |
39 | 原文:[http://tensorflow.org/resources/uses.md](http://tensorflow.org/resources/uses.md)
40 |
41 | 翻 译:[andyiac](https://github.com/andyiac)
42 |
43 | 校 对:[lonlonago](https://github.com/lonlonago)
44 |
45 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/BUILD:
--------------------------------------------------------------------------------
1 | # Description:
2 | # Top-level tutorials files
3 |
4 | package(default_visibility = ["//tensorflow:internal"])
5 |
6 | licenses(["notice"]) # Apache 2.0
7 |
8 | exports_files(["LICENSE"])
9 |
10 | filegroup(
11 | name = "all_files",
12 | srcs = glob(
13 | ["**/*"],
14 | exclude = [
15 | "**/METADATA",
16 | "**/OWNERS",
17 | ],
18 | ),
19 | )
20 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/__init__.py
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/Parallelism.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/Parallelism.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_activations.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/cifar_activations.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/cifar_graph.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_image_summary.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/cifar_image_summary.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/cifar_loss.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_lr_decay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/cifar_lr_decay.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_samples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/cifar_samples.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_sparsity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/cifar_sparsity.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_tensorboard.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | TensorBoard Demo
5 |
6 |
7 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/deep_cnn/cifar_var_histograms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/deep_cnn/cifar_var_histograms.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mandelbrot.md:
--------------------------------------------------------------------------------
1 | # 曼德布洛特(Mandelbrot)集合
2 |
3 | 虽然可视化曼德布洛特(Mandelbrot)集合与机器学习没有任何关系,但这对于将TensorFlow应用在数学更广泛的领域是一个有趣的例子。实际上,这是tensorflow一个非常直截了当的可视化运用。(我们最终也许会提供一种更加精心设计的运用方式来生成真正更加美丽的图像。)
4 |
5 | 说明:本教程使用了IPython的notebook。
6 |
7 | ## 基本步骤
8 |
9 | 首先,我们需要导入一些库。
10 |
11 | ```python
12 | # 导入仿真库
13 | import tensorflow as tf
14 | import numpy as np
15 |
16 | # 导入可视化库
17 | import PIL.Image
18 | from cStringIO import StringIO
19 | from IPython.display import clear_output, Image, display
20 | import scipy.ndimage as nd
21 | ```
22 |
23 | 现在我们将定义一个函数来显示迭代计算出的图像。
24 |
25 | ```python
26 | def DisplayFractal(a, fmt='jpeg'):
27 | """显示迭代计算出的彩色分形图像。"""
28 | a_cyclic = (6.28*a/20.0).reshape(list(a.shape)+[1])
29 | img = np.concatenate([10+20*np.cos(a_cyclic),
30 | 30+50*np.sin(a_cyclic),
31 | 155-80*np.cos(a_cyclic)], 2)
32 | img[a==a.max()] = 0
33 | a = img
34 | a = np.uint8(np.clip(a, 0, 255))
35 | f = StringIO()
36 | PIL.Image.fromarray(a).save(f, fmt)
37 | display(Image(data=f.getvalue()))
38 | ```
39 |
40 | ## 会话(session)和变量(variable)初始化
41 |
42 | 为了操作的方便,我们常常使用交互式会话(interactive session),但普通会话(regular session)也能正常使用。
43 |
44 | ```python
45 | sess = tf.InteractiveSession()
46 | ```
47 |
48 | 我们可以自由的混合使用NumPy和TensorFlow,这一点非常方便。
49 |
50 | ```python
51 | # 使用NumPy创建一个在[-2,2]x[-2,2]范围内的2维复数数组
52 |
53 | Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
54 | Z = X+1j*Y
55 | ```
56 |
57 | 现在我们定义并初始化一组TensorFlow的张量 (tensors)。
58 |
59 | ```python
60 | xs = tf.constant(Z.astype("complex64"))
61 | zs = tf.Variable(xs)
62 | ns = tf.Variable(tf.zeros_like(xs, "float32"))
63 | ```
64 |
65 | TensorFlow在使用之前需要你明确给定变量的初始值。
66 |
67 | ```python
68 | tf.initialize_all_variables().run()
69 | ```
70 |
71 | ## 定义并运行计算
72 |
73 | 现在我们指定更多的计算...
74 |
75 | ```python
76 | # 计算一个新值z: z^2 + x
77 | zs_ = zs*zs + xs
78 |
79 | # 这个新值会发散吗?
80 | not_diverged = tf.complex_abs(zs_) < 4
81 |
82 | # 更新zs并且迭代计算。
83 | #
84 | # 说明:在这些值发散之后,我们仍然在计算zs,这个计算消耗特别大!
85 | # 如果稍微简单点,这里有更好的方法来处理。
86 | #
87 | step = tf.group(
88 | zs.assign(zs_),
89 | ns.assign_add(tf.cast(not_diverged, "float32"))
90 | )
91 | ```
92 |
93 | ...继续执行几百个步骤
94 |
95 | ```python
96 | for i in range(200): step.run()
97 | ```
98 |
99 | 让我们看看我们得到了什么。
100 |
101 | ```python
102 | DisplayFractal(ns.eval())
103 | ```
104 |
105 | 
106 |
107 | 结果不错!
108 |
109 | > 原文:[Mandelbrot Set](http://tensorflow.org/tutorials/mandelbrot/index.md) 翻译:[ericxk](https://github.com/ericxk) 校对:[tensorfly](https://github.com/tensorfly)
110 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/mandelbrot/index.md:
--------------------------------------------------------------------------------
1 | # Mandelbrot Set
2 |
3 | Visualizing the Mandelbrot set doesn't have anything to do with machine
4 | learning, but it makes for a fun example of how one can use TensorFlow for
5 | general mathematics. This is actually a pretty naive implementation of the
6 | visualization, but it makes the point. (We may end up providing a more
7 | elaborate implementation down the line to produce more truly beautiful images.)
8 |
9 | Note: This tutorial was originally prepared as an IPython notebook.
10 |
11 | ## Basic Setup
12 |
13 | We'll need a few imports to get started.
14 |
15 | ```python
16 | # Import libraries for simulation
17 | import tensorflow as tf
18 | import numpy as np
19 |
20 | # Imports for visualization
21 | import PIL.Image
22 | from cStringIO import StringIO
23 | from IPython.display import clear_output, Image, display
24 | import scipy.ndimage as nd
25 | ```
26 |
27 | Now we'll define a function to actually display the image once we have
28 | iteration counts.
29 |
30 | ```python
31 | def DisplayFractal(a, fmt='jpeg'):
32 | """Display an array of iteration counts as a
33 | colorful picture of a fractal."""
34 | a_cyclic = (6.28*a/20.0).reshape(list(a.shape)+[1])
35 | img = np.concatenate([10+20*np.cos(a_cyclic),
36 | 30+50*np.sin(a_cyclic),
37 | 155-80*np.cos(a_cyclic)], 2)
38 | img[a==a.max()] = 0
39 | a = img
40 | a = np.uint8(np.clip(a, 0, 255))
41 | f = StringIO()
42 | PIL.Image.fromarray(a).save(f, fmt)
43 | display(Image(data=f.getvalue()))
44 | ```
45 |
46 | ## Session and Variable Initialization
47 |
48 | For playing around like this, we often use an interactive session, but a regular
49 | session would work as well.
50 |
51 | ```python
52 | sess = tf.InteractiveSession()
53 | ```
54 |
55 | It's handy that we can freely mix NumPy and TensorFlow.
56 |
57 | ```python
58 | # Use NumPy to create a 2D array of complex numbers on [-2,2]x[-2,2]
59 |
60 | Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
61 | Z = X+1j*Y
62 | ```
63 |
64 | Now we define and initialize TensorFlow tensors.
65 |
66 | ```python
67 | xs = tf.constant(Z.astype("complex64"))
68 | zs = tf.Variable(xs)
69 | ns = tf.Variable(tf.zeros_like(xs, "float32"))
70 | ```
71 |
72 | TensorFlow requires that you explicitly initialize variables before using them.
73 |
74 | ```python
75 | tf.initialize_all_variables().run()
76 | ```
77 |
78 | ## Defining and Running the Computation
79 |
80 | Now we specify more of the computation...
81 |
82 | ```python
83 | # Compute the new values of z: z^2 + x
84 | zs_ = zs*zs + xs
85 |
86 | # Have we diverged with this new value?
87 | not_diverged = tf.complex_abs(zs_) < 4
88 |
89 | # Operation to update the zs and the iteration count.
90 | #
91 | # Note: We keep computing zs after they diverge! This
92 | # is very wasteful! There are better, if a little
93 | # less simple, ways to do this.
94 | #
95 | step = tf.group(
96 | zs.assign(zs_),
97 | ns.assign_add(tf.cast(not_diverged, "float32"))
98 | )
99 | ```
100 |
101 | ... and run it for a couple hundred steps
102 |
103 | ```python
104 | for i in range(200): step.run()
105 | ```
106 |
107 | Let's see what we've got.
108 |
109 | ```python
110 | DisplayFractal(ns.eval())
111 | ```
112 |
113 | 
114 |
115 | Not bad!
116 |
117 |
118 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/mandelbrot/mandelbrot_output.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mandelbrot/mandelbrot_output.jpg
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/__init__.py
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/beginners/img/MNIST-Matrix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/beginners/img/MNIST-Matrix.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/beginners/img/MNIST.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/beginners/img/MNIST.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/beginners/img/mnist-train-xs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/beginners/img/mnist-train-xs.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/beginners/img/mnist-train-ys.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/beginners/img/mnist-train-ys.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/beginners/img/softmax-regression-scalarequation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/beginners/img/softmax-regression-scalarequation.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/beginners/img/softmax-regression-scalargraph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/beginners/img/softmax-regression-scalargraph.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/beginners/img/softmax-regression-vectorequation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/beginners/img/softmax-regression-vectorequation.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/beginners/img/softmax-weights.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/beginners/img/softmax-weights.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/download/index.md:
--------------------------------------------------------------------------------
1 | # MNIST Data Download
2 |
3 | Code: [tensorflow/g3doc/tutorials/mnist/](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/g3doc/tutorials/mnist/)
4 |
5 | The goal of this tutorial is to show how to download the dataset files required
6 | for handwritten digit classification using the (classic) MNIST data set.
7 |
8 | ## Tutorial Files
9 |
10 | This tutorial references the following files:
11 |
12 | File | Purpose
13 | --- | ---
14 | [`input_data.py`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/g3doc/tutorials/mnist/input_data.py) | The code to download the MNIST dataset for training and evaluation.
15 |
16 | ## Prepare the Data
17 |
18 | MNIST is a classic problem in machine learning. The problem is to look at
19 | greyscale 28x28 pixel images of handwritten digits and determine which digit
20 | the image represents, for all the digits from zero to nine.
21 |
22 | 
23 |
24 | For more information, refer to [Yann LeCun's MNIST page](http://yann.lecun.com/exdb/mnist/)
25 | or [Chris Olah's visualizations of MNIST](http://colah.github.io/posts/2014-10-Visualizing-MNIST/).
26 |
27 | ### Download
28 |
29 | [Yann LeCun's MNIST page](http://yann.lecun.com/exdb/mnist/)
30 | also hosts the training and test data for download.
31 |
32 | File | Purpose
33 | --- | ---
34 | [`train-images-idx3-ubyte.gz`](http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz) | training set images - 55000 training images, 5000 validation images
35 | [`train-labels-idx1-ubyte.gz`](http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz) | training set labels matching the images
36 | [`t10k-images-idx3-ubyte.gz`](http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz) | test set images - 10000 images
37 | [`t10k-labels-idx1-ubyte.gz`](http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz) | test set labels matching the images
38 |
39 | In the `input_data.py` file, the `maybe_download()` function will ensure these
40 | files are downloaded into a local data folder for training.
41 |
42 | The folder name is specified in a flag variable at the top of the
43 | `fully_connected_feed.py` file and may be changed to fit your needs.
44 |
45 | ### Unpack and Reshape
46 |
47 | The files themselves are not in any standard image format and are manually
48 | unpacked (following the instructions available at the website) by the
49 | `extract_images()` and `extract_labels()` functions in `input_data.py`.
50 |
51 | The image data is extracted into a 2d tensor of: `[image index, pixel index]`
52 | where each entry is the intensity value of a specific pixel in a specific
53 | image, rescaled from `[0, 255]` to `[-0.5, 0.5]`. The "image index" corresponds
54 | to an image in the dataset, counting up from zero to the size of the dataset.
55 | And the "pixel index" corresponds to a specific pixel in that image, ranging
56 | from zero to the number of pixels in the image.
57 |
58 | The 60000 examples in the `train-*` files are then split into 55000 examples
59 | for training and 5000 examples for validation. For all of the 28x28
60 | pixel greyscale images in the datasets the image size is 784 and so the output
61 | tensor for the training set images is of shape `[55000, 784]`.
62 |
63 | The label data is extracted into a 1d tensor of: `[image index]`
64 | with the class identifier for each example as the value. For the training set
65 | labels, this would then be of shape `[55000]`.
66 |
67 | ### DataSet Object
68 |
69 | The underlying code will download, unpack, and reshape images and labels for
70 | the following datasets:
71 |
72 | Dataset | Purpose
73 | --- | ---
74 | `data_sets.train` | 55000 images and labels, for primary training.
75 | `data_sets.validation` | 5000 images and labels, for iterative validation of training accuracy.
76 | `data_sets.test` | 10000 images and labels, for final testing of trained accuracy.
77 |
78 | The `read_data_sets()` function will return a dictionary with a `DataSet`
79 | instance for each of these three sets of data. The `DataSet.next_batch()`
80 | method can be used to fetch a tuple consisting of `batch_size` lists of images
81 | and labels to be fed into the running TensorFlow session.
82 |
83 | ```python
84 | images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size)
85 | ```
86 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/mnist_softmax.py:
--------------------------------------------------------------------------------
1 | """A very simple MNIST classifer.
2 |
3 | See extensive documentation at ??????? (insert public URL)
4 | """
5 | from __future__ import print_function
6 |
7 | # Import data
8 | import input_data
9 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
10 |
11 | import tensorflow as tf
12 | sess = tf.InteractiveSession()
13 |
14 | # Create the model
15 | x = tf.placeholder("float", [None, 784])
16 | W = tf.Variable(tf.zeros([784,10]))
17 | b = tf.Variable(tf.zeros([10]))
18 | #y = tf.nn.softmax(tf.matmul(x,W) + b) # this will be lead an error because of log(0)
19 | y = tf.nn.log_softmax(tf.matmul(x,W) + b)
20 |
21 | # Define loss and optimizer
22 | y_ = tf.placeholder("float", [None,10])
23 | #cross_entropy = -tf.reduce_sum(y_*tf.log(y))
24 | cross_entropy = -tf.reduce_sum(y_*y)
25 | train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
26 |
27 | # Train
28 | tf.initialize_all_variables().run()
29 | for i in range(1000):
30 | batch_xs, batch_ys = mnist.train.next_batch(100)
31 | train_step.run({x: batch_xs, y_: batch_ys})
32 |
33 | # Test trained model
34 | correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
35 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
36 | print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))
37 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/tf/mnist_digits.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/tf/mnist_digits.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/tf/mnist_subgraph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/tf/mnist_subgraph.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist/tf/mnist_tensorboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/mnist/tf/mnist_tensorboard.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/mnist_download.md:
--------------------------------------------------------------------------------
1 | # MNIST 数据下载
2 |
3 | 源码: [tensorflow/g3doc/tutorials/mnist/](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/tutorials/mnist)
4 |
5 | 本教程的目标是展示如何下载用于手写数字分类问题所要用到的(经典)MNIST数据集。
6 |
7 | ## 教程 文件
8 |
9 | 本教程需要使用以下文件:
10 |
11 | 文件 | 目的
12 | --- | ---
13 | [`input_data.py`](https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/mnist/input_data.py) | 下载用于训练和测试的MNIST数据集的源码
14 |
15 | ## 准备数据
16 |
17 | MNIST是在机器学习领域中的一个经典问题。该问题解决的是把28x28像素的灰度手写数字图片识别为相应的数字,其中数字的范围从0到9.
18 |
19 | 
20 |
21 | 更多详情, 请参考 [Yann LeCun's MNIST page](http://yann.lecun.com/exdb/mnist/)
22 | 或 [Chris Olah's visualizations of MNIST](http://colah.github.io/posts/2014-10-Visualizing-MNIST/).
23 |
24 | ### 下载
25 |
26 | [Yann LeCun's MNIST page](http://yann.lecun.com/exdb/mnist/)
27 | 也提供了训练集与测试集数据的下载。
28 |
29 | 文件 | 内容
30 | --- | ---
31 | [`train-images-idx3-ubyte.gz`](http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz) | 训练集图片 - 55000 张 训练图片, 5000 张 验证图片
32 | [`train-labels-idx1-ubyte.gz`](http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz) | 训练集图片对应的数字标签
33 | [`t10k-images-idx3-ubyte.gz`](http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz) | 测试集图片 - 10000 张 图片
34 | [`t10k-labels-idx1-ubyte.gz`](http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz) | 测试集图片对应的数字标签
35 |
36 | 在 `input_data.py` 文件中, `maybe_download()` 函数可以确保这些训练数据下载到本地文件夹中。
37 |
38 | 文件夹的名字在
39 | `fully_connected_feed.py` 文件的顶部由一个标记变量指定,你可以根据自己的需要进行修改。
40 | ### 解压 与 重构
41 |
42 | 这些文件本身并没有使用标准的图片格式储存,并且需要使用`input_data.py`文件中`extract_images()` 和`extract_labels()`函数来手动解压(页面中有相关说明)。
43 |
44 | 图片数据将被解压成2维的tensor:`[image index, pixel index]`
45 | 其中每一项表示某一图片中特定像素的强度值, 范围从 `[0, 255]` 到 `[-0.5, 0.5]`。 "image index"代表数据集中图片的编号, 从0到数据集的上限值。"pixel index"代表该图片中像素点得个数, 从0到图片的像素上限值。
46 |
47 | 以`train-*`开头的文件中包括60000个样本,其中分割出55000个样本作为训练集,其余的5000个样本作为验证集。因为所有数据集中28x28像素的灰度图片的尺寸为784,所以训练集输出的tensor格式为`[55000, 784]`。
48 |
49 | 数字标签数据被解压称1维的tensor: `[image index]`,它定义了每个样本数值的类别分类。对于训练集的标签来说,这个数据规模就是:`[55000]`。
50 |
51 | ### 数据集 对象
52 |
53 | 底层的源码将会执行下载、解压、重构图片和标签数据来组成以下的数据集对象:
54 |
55 | 数据集 | 目的
56 | --- | ---
57 | `data_sets.train` | 55000 组 图片和标签, 用于训练。
58 | `data_sets.validation` | 5000 组 图片和标签, 用于迭代验证训练的准确性。
59 | `data_sets.test` | 10000 组 图片和标签, 用于最终测试训练的准确性。
60 |
61 | 执行`read_data_sets()`函数将会返回一个`DataSet`实例,其中包含了以上三个数据集。函数`DataSet.next_batch()`是用于获取以`batch_size`为大小的一个元组,其中包含了一组图片和标签,该元组会被用于当前的TensorFlow运算会话中。
62 | ```python
63 | images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size)
64 | ```
65 | 原文地址:[MNIST Data Download](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/g3doc/tutorials/mnist/download/index.md) 翻译:[btpeter](https://github.com/btpeter) 校对:waiwaizheng
--------------------------------------------------------------------------------
/SOURCE/tutorials/overview.md:
--------------------------------------------------------------------------------
1 | # 综述
2 |
3 | ## 面向机器学习初学者的 MNIST 初级教程
4 |
5 | 如果你是机器学习领域的新手, 我们推荐你从本文开始阅读. 本文通过讲述一个经典的问题, 手写数字识别 (MNIST), 让你对多类分类 (multiclass classification) 问题有直观的了解.
6 |
7 | [阅读教程](../tutorials/mnist_beginners.md)
8 |
9 | ## 面向机器学习专家的 MNIST 高级教程
10 |
11 | 如果你已经对其它深度学习软件比较熟悉, 并且也对 MNIST 很熟悉, 这篇教程能够引导你对 TensorFlow 有初步了解.
12 |
13 | [阅读教程](../tutorials/mnist_pros.md)
14 |
15 | ## TensorFlow 使用指南
16 |
17 | 这是一篇技术教程, 详细介绍了如何使用 TensorFlow 架构训练大规模模型. 本文继续使用MNIST 作为例子.
18 |
19 | [阅读教程](../tutorials/mnist_tf.md)
20 |
21 | ## 卷积神经网络
22 |
23 | 这篇文章介绍了如何使用 TensorFlow 在 CIFAR-10 数据集上训练卷积神经网络. 卷积神经网络是为图像识别量身定做的一个模型. 相比其它模型, 该模型利用了平移不变性(translation invariance), 从而能够更更简洁有效地表示视觉内容.
24 |
25 | [阅读教程](../tutorials/deep_cnn.md)
26 |
27 | ## 单词的向量表示
28 |
29 | 本文让你了解为什么学会使用向量来表示单词, 即单词嵌套 (word embedding), 是一件很有用的事情. 文章中介绍的 word2vec 模型, 是一种高效学习嵌套的方法. 本文还涉及了对比噪声(noise-contrastive) 训练方法的一些高级细节, 该训练方法是训练嵌套领域最近最大的进展.
30 |
31 | [阅读教程](../tutorials/word2vec.md)
32 |
33 | ## 循环神经网络 (Recurrent Neural Network, 简称 RNN)
34 |
35 | 一篇 RNN 的介绍文章, 文章中训练了一个 LSTM 网络来预测一个英文句子的下一个单词(该任务有时候被称作语言建模).
36 |
37 | [阅读教程](../tutorials/recurrent.md)
38 |
39 | ## 序列到序列模型 (Sequence-to-Sequence Model)
40 |
41 | RNN 教程的后续, 该教程采用序列到序列模型进行机器翻译. 你将学会构建一个完全基于机器学习,端到端的 `英语-法语` 翻译器.
42 |
43 | [阅读教程](../tutorials/seq2seq.md)
44 |
45 | ## Mandelbrot 集合
46 |
47 | TensorFlow 可以用于与机器学习完全无关的其它计算领域. 这里实现了一个原生的 Mandelbrot 集合的可视化程序.
48 |
49 | [阅读教程](../tutorials/mandelbrot.md)
50 |
51 | ## 偏微分方程
52 |
53 | 这是另外一个非机器学习计算的例子, 我们利用一个原生实现的偏微分方程, 对雨滴落在池塘上的过程进行仿真.
54 |
55 | [阅读教程](../tutorials/pdes.md)
56 |
57 | ## MNIST 数据下载
58 |
59 | 一篇关于下载 MNIST 手写识别数据集的详细教程.
60 |
61 | [阅读教程](../tutorials/mnist_download.md)
62 |
63 | ## 视觉物体识别 (Visual Object Recognition)
64 |
65 | 我们将毫无保留地发布已经选训练好的, 目前最先进的 Inception 物体识别模型.
66 |
67 | 敬请期待...
68 |
69 | ## Deep Dream 视幻觉软件
70 |
71 | 我们将发布一个 TensorFlow 版本的 [Deep Dream](https://github.com/google/deepdream),这是一款基于 Inception 识别模型的神经网络视幻觉软件.
72 |
73 | 敬请期待...
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 | > 原文:[Overview](http://tensorflow.org/tutorials) 翻译:[@doc001](https://github.com/PFZheng) 校对:[@eric_xu](https://github.com/ericxk)
90 |
91 |
92 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/pdes.md:
--------------------------------------------------------------------------------
1 | # 偏微分方程
2 |
3 | ***TensorFlow*** 不仅仅是用来机器学习,它更可以用来模拟仿真。在这里,我们将通过模拟仿真几滴落入一块方形水池的雨点的例子,来引导您如何使用 ***TensorFlow*** 中的偏微分方程来模拟仿真的基本使用方法。
4 |
5 | >注:本教程最初是准备做为一个 **IPython** 的手册。
6 | >>译者注:关于偏微分方程的相关知识,译者推荐读者查看 [**网易公开课**](http://open.163.com/) 上的[**《麻省理工学院公开课:多变量微积分》**](http://open.163.com/special/opencourse/multivariable.html)课程。
7 |
8 | ## 基本设置
9 |
10 | 首先,我们需要导入一些必要的引用。
11 |
12 | ```python
13 | #导入模拟仿真需要的库
14 | import tensorflow as tf
15 | import numpy as np
16 |
17 | #导入可视化需要的库
18 | import PIL.Image
19 | from cStringIO import StringIO
20 | from IPython.display import clear_output, Image, display
21 | ```
22 |
23 | 然后,我们还需要一个用于表示池塘表面状态的函数。
24 |
25 | ```python
26 | def DisplayArray(a, fmt='jpeg', rng=[0,1]):
27 | """Display an array as a picture."""
28 | a = (a - rng[0])/float(rng[1] - rng[0])*255
29 | a = np.uint8(np.clip(a, 0, 255))
30 | f = StringIO()
31 | PIL.Image.fromarray(a).save(f, fmt)
32 | display(Image(data=f.getvalue()))
33 | ```
34 |
35 | 最后,为了方便演示,这里我们需要打开一个 ***TensorFlow*** 的交互会话(interactive session)。当然为了以后能方便调用,我们可以把相关代码写到一个可以执行的***Python***文件中。
36 |
37 | ```python
38 | sess = tf.InteractiveSession()
39 | ```
40 |
41 | ## 定义计算函数
42 |
43 | ```python
44 | def make_kernel(a):
45 | """Transform a 2D array into a convolution kernel"""
46 | a = np.asarray(a)
47 | a = a.reshape(list(a.shape) + [1,1])
48 | return tf.constant(a, dtype=1)
49 |
50 | def simple_conv(x, k):
51 | """A simplified 2D convolution operation"""
52 | x = tf.expand_dims(tf.expand_dims(x, 0), -1)
53 | y = tf.nn.depthwise_conv2d(x, k, [1, 1, 1, 1], padding='SAME')
54 | return y[0, :, :, 0]
55 |
56 | def laplace(x):
57 | """Compute the 2D laplacian of an array"""
58 | laplace_k = make_kernel([[0.5, 1.0, 0.5],
59 | [1.0, -6., 1.0],
60 | [0.5, 1.0, 0.5]])
61 | return simple_conv(x, laplace_k)
62 | ```
63 |
64 | ## 定义偏微分方程
65 |
66 | 首先,我们需要创建一个完美的 500 × 500 的正方形池塘,就像是我们在现实中找到的一样。
67 |
68 | ```python
69 | N = 500
70 | ```
71 |
72 | 然后,我们需要创建了一个池塘和几滴将要坠入池塘的雨滴。
73 |
74 | ```python
75 | # Initial Conditions -- some rain drops hit a pond
76 |
77 | # Set everything to zero
78 | u_init = np.zeros([N, N], dtype="float32")
79 | ut_init = np.zeros([N, N], dtype="float32")
80 |
81 | # Some rain drops hit a pond at random points
82 | for n in range(40):
83 | a,b = np.random.randint(0, N, 2)
84 | u_init[a,b] = np.random.uniform()
85 |
86 | DisplayArray(u_init, rng=[-0.1, 0.1])
87 | ```
88 |
89 | 
90 |
91 | 现在,让我们来指定该微分方程的一些详细参数。
92 |
93 | ```python
94 | # Parameters:
95 | # eps -- time resolution
96 | # damping -- wave damping
97 | eps = tf.placeholder(tf.float32, shape=())
98 | damping = tf.placeholder(tf.float32, shape=())
99 |
100 | # Create variables for simulation state
101 | U = tf.Variable(u_init)
102 | Ut = tf.Variable(ut_init)
103 |
104 | # Discretized PDE update rules
105 | U_ = U + eps * Ut
106 | Ut_ = Ut + eps * (laplace(U) - damping * Ut)
107 |
108 | # Operation to update the state
109 | step = tf.group(
110 | U.assign(U_),
111 | Ut.assign(Ut_))
112 | ```
113 |
114 | ## 开始仿真
115 |
116 | 为了能看清仿真效果,我们可以用一个简单的 **for** 循环来远行我们的仿真程序。
117 |
118 | ```python
119 | # Initialize state to initial conditions
120 | tf.initialize_all_variables().run()
121 |
122 | # Run 1000 steps of PDE
123 | for i in range(1000):
124 | # Step simulation
125 | step.run({eps: 0.03, damping: 0.04})
126 | # Visualize every 50 steps
127 | if i % 50 == 0:
128 | clear_output()
129 | DisplayArray(U.eval(), rng=[-0.1, 0.1])
130 | ```
131 |
132 | 
133 |
134 | 看!! 雨点落在池塘中,和现实中一样的泛起了涟漪。
135 |
136 | > 原文链接:[http://tensorflow.org/tutorials/pdes/index.md](http://tensorflow.org/tutorials/pdes/index.md) 翻译:[@wangaicc](https://github.com/wangaicc) 校对:[@tensorfly](https://github.com/tensorfly)
137 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/pdes/index.md:
--------------------------------------------------------------------------------
1 | # 偏积分方程
2 |
3 | ***TensorFlow*** 不仅仅是用来机器学习,它更可以用来模拟仿真。在这里,我们将通过模拟仿真几滴落入一块方形水池的雨点的例子,来引导您如何使用 ***TensorFlow*** 中的偏积分方程来模拟仿真的基本使用方法。
4 |
5 | >注:本教程最初是准备做为一个 **IPython** 的手册。
6 | >>译者注:关于偏积分方程的相关知识,译者推荐读者查看 [**网易公开课**](http://open.163.com/) 上的[**《麻省理工学院公开课:多变量微积分》**](http://open.163.com/special/opencourse/multivariable.html)课程。
7 |
8 | ## 基本设置
9 |
10 | 首先,我们需要导入一些必要的引用。
11 |
12 | ```python
13 | #Import libraries for simulation
14 | import tensorflow as tf
15 | import numpy as np
16 |
17 | #Imports for visualization
18 | import PIL.Image
19 | from cStringIO import StringIO
20 | from IPython.display import clear_output, Image, display
21 | ```
22 |
23 | 然后,我们还需要一个用于表示池塘表面状态的函数。
24 |
25 | ```python
26 | def DisplayArray(a, fmt='jpeg', rng=[0,1]):
27 | """Display an array as a picture."""
28 | a = (a - rng[0])/float(rng[1] - rng[0])*255
29 | a = np.uint8(np.clip(a, 0, 255))
30 | f = StringIO()
31 | PIL.Image.fromarray(a).save(f, fmt)
32 | display(Image(data=f.getvalue()))
33 | ```
34 |
35 | 最后,为了方便演示,这里我们需要打开一个交互的 ***TensorFlow*** 会话。当然为了以后能方便调用,我们可以把相关代码写到一个可以执行的***Python***文件中。
36 |
37 | ```python
38 | sess = tf.InteractiveSession()
39 | ```
40 |
41 | ## 定义计算函数
42 |
43 |
44 | ```python
45 | def make_kernel(a):
46 | """Transform a 2D array into a convolution kernel"""
47 | a = np.asarray(a)
48 | a = a.reshape(list(a.shape) + [1,1])
49 | return tf.constant(a, dtype=1)
50 |
51 | def simple_conv(x, k):
52 | """A simplified 2D convolution operation"""
53 | x = tf.expand_dims(tf.expand_dims(x, 0), -1)
54 | y = tf.nn.depthwise_conv2d(x, k, [1, 1, 1, 1], padding='SAME')
55 | return y[0, :, :, 0]
56 |
57 | def laplace(x):
58 | """Compute the 2D laplacian of an array"""
59 | laplace_k = make_kernel([[0.5, 1.0, 0.5],
60 | [1.0, -6., 1.0],
61 | [0.5, 1.0, 0.5]])
62 | return simple_conv(x, laplace_k)
63 | ```
64 |
65 | ## 定义偏积分方程
66 |
67 | 首先,我们需要创建一个完美的 500 × 500 的正方形池塘,就像是我们在现实中找到的一样。.
68 |
69 | ```python
70 | N = 500
71 | ```
72 |
73 | 然后,我们需要创建了一个池塘和几滴将要坠入池塘的雨滴。
74 |
75 | ```python
76 | # Initial Conditions -- some rain drops hit a pond
77 |
78 | # Set everything to zero
79 | u_init = np.zeros([N, N], dtype="float32")
80 | ut_init = np.zeros([N, N], dtype="float32")
81 |
82 | # Some rain drops hit a pond at random points
83 | for n in range(40):
84 | a,b = np.random.randint(0, N, 2)
85 | u_init[a,b] = np.random.uniform()
86 |
87 | DisplayArray(u_init, rng=[-0.1, 0.1])
88 | ```
89 |
90 | 
91 |
92 | 现在,让我们来指定该微分方程的一些详细参数。
93 |
94 | ```python
95 | # Parameters:
96 | # eps -- time resolution
97 | # damping -- wave damping
98 | eps = tf.placeholder(tf.float32, shape=())
99 | damping = tf.placeholder(tf.float32, shape=())
100 |
101 | # Create variables for simulation state
102 | U = tf.Variable(u_init)
103 | Ut = tf.Variable(ut_init)
104 |
105 | # Discretized PDE update rules
106 | U_ = U + eps * Ut
107 | Ut_ = Ut + eps * (laplace(U) - damping * Ut)
108 |
109 | # Operation to update the state
110 | step = tf.group(
111 | U.assign(U_),
112 | Ut.assign(Ut_))
113 | ```
114 |
115 | ## 开始仿真
116 |
117 | 为了能看清仿真效果,我们可以用一个简单的 **for** 循环来远行我们的仿真程序。
118 |
119 | ```python
120 | # Initialize state to initial conditions
121 | tf.initialize_all_variables().run()
122 |
123 | # Run 1000 steps of PDE
124 | for i in range(1000):
125 | # Step simulation
126 | step.run({eps: 0.03, damping: 0.04})
127 | # Visualize every 50 steps
128 | if i % 50 == 0:
129 | clear_output()
130 | DisplayArray(U.eval(), rng=[-0.1, 0.1])
131 | ```
132 |
133 | 
134 |
135 | 看!! 雨点落在池塘中,和现实中泛起了无数涟漪。
136 |
137 | > 原文链接:[http://tensorflow.org/tutorials/pdes/index.md](http://tensorflow.org/tutorials/pdes/index.md) 翻译:[@wangaicc](https://github.com/wangaicc) 校对:
138 |
--------------------------------------------------------------------------------
/SOURCE/tutorials/pdes/pde_output_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/pdes/pde_output_1.jpg
--------------------------------------------------------------------------------
/SOURCE/tutorials/pdes/pde_output_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/pdes/pde_output_2.jpg
--------------------------------------------------------------------------------
/SOURCE/tutorials/seq2seq/attention_seq2seq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/seq2seq/attention_seq2seq.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/seq2seq/basic_seq2seq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/seq2seq/basic_seq2seq.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/word2vec/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/word2vec/__init__.py
--------------------------------------------------------------------------------
/SOURCE/tutorials/word2vec/img/audio-image-text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/word2vec/img/audio-image-text.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/word2vec/img/linear-relationships.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/word2vec/img/linear-relationships.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/word2vec/img/nce-nplm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/word2vec/img/nce-nplm.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/word2vec/img/softmax-nplm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/word2vec/img/softmax-nplm.png
--------------------------------------------------------------------------------
/SOURCE/tutorials/word2vec/img/tsne.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/SOURCE/tutorials/word2vec/img/tsne.png
--------------------------------------------------------------------------------
/TOC.md:
--------------------------------------------------------------------------------
1 | - 起步
2 | - [介绍](SOURCE/get_started/introduction.md)
3 | - [下载及安装](SOURCE/get_started/os_setup.md)
4 | - [基本用法](SOURCE/get_started/basic_usage.md)
5 | - 教程
6 | - [总览](SOURCE/tutorials/overview.md)
7 | - [MNIST 机器学习入门](SOURCE/tutorials/mnist_beginners.md)
8 | - [深入 MNIST](SOURCE/tutorials/mnist_pros.md)
9 | - [TensorFlow 运作方式入门](SOURCE/tutorials/mnist_tf.md)
10 | - [卷积神经网络](SOURCE/tutorials/deep_cnn.md)
11 | - [字词的向量表示](SOURCE/tutorials/word2vec.md)
12 | - [递归神经网络](SOURCE/tutorials/recurrent.md)
13 | - [曼德布洛特(Mandelbrot)集合](SOURCE/tutorials/mandelbrot.md)
14 | - [偏微分方程](SOURCE/tutorials/pdes.md)
15 | - [MNIST数据下载](SOURCE/tutorials/mnist_download.md)
16 | - 运作方式
17 | - [总览](SOURCE/how_tos/overview.md)
18 | - [变量:创建、初始化、保存和加载](SOURCE/how_tos/variables.md)
19 | - [TensorBoard:可视化学习](SOURCE/how_tos/summaries_and_tensorboard.md)
20 | - [TensorBoard:图表可视化](SOURCE/how_tos/graph_viz.md)
21 | - [读取数据](SOURCE/how_tos/reading_data.md)
22 | - [线程和队列](SOURCE/how_tos/threading_and_queues.md)
23 | - [添加新的Op](SOURCE/how_tos/adding_an_op.md)
24 | - [自定义数据读取](SOURCE/how_tos/new_data_formats.md)
25 | - [使用gpu](SOURCE/how_tos/using_gpu.md)
26 | - [共享变量](SOURCE/how_tos/variable_scope.md)
27 | - 资源
28 | - [总览](SOURCE/resources/overview.md)
29 | - [BibTex 引用](SOURCE/resources/bib.md)
30 | - [示例使用](SOURCE/resources/uses.md)
31 | - [FAQ](SOURCE/resources/faq.md)
32 | - [术语表](SOURCE/resources/glossary.md)
33 | - [Tensor排名、形状和类型](SOURCE/resources/dims_types.md)
34 | - 其他
35 | - [常见问题汇总](SOURCE/faq.md)
36 | - [相关资源](SOURCE/resource.md)
37 | - [个人学习心得](SOURCE/personal.md)
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "TensorFlow 官方文档中文版",
3 | "introduction": "Google 人工智能系统 TensorFlow 官方文档中文版协同翻译。",
4 | "path": {
5 | "content": "SOURCE",
6 | "images": "SOURCE/images"
7 | }
8 |
9 | }
10 |
--------------------------------------------------------------------------------
/cover/background.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/cover/background.jpg
--------------------------------------------------------------------------------
/cover/cover.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/cover/cover.jpg
--------------------------------------------------------------------------------
/cover/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/cover/logo.png
--------------------------------------------------------------------------------
/learn-github.md:
--------------------------------------------------------------------------------
1 |
2 | ## 注册 GitHub 账号
3 |
4 |
5 |
6 | ## 创建仓库
7 |
8 |
9 |
10 | ## 工具安装,克隆仓库到本地
11 |
12 | Windows 用户详见文档:
13 |
14 |
15 |
16 | Mac 用户详见文档:
17 |
18 |
19 |
20 | >注意:你克隆到本地的文件内至少应包含一个 README.md 文件。
21 |
22 | 至此,你已经拥有了一个 GitHub 账号,一个远程仓库,一个本地仓库。下一步,你将要花费 5 分钟的时间了解下 [Markdown 的简单语法](markdown.md)
23 |
--------------------------------------------------------------------------------
/markdown.md:
--------------------------------------------------------------------------------
1 | ## Markdown 工具下载
2 |
3 | - Windows 用户推荐使用编辑器 MarkdownPad
4 | 下载地址:
5 |
6 | - Mac 用户推荐使用编辑器 Mou
7 | 下载地址:
8 |
9 | ## 标题
10 |
11 | 在标题内容前添加 `# + 空格` 就可以实现标题样式的添加:
12 |
13 | - 一级标题就添加一个 `#` + 空格,例如:# 我是一级标题
14 | - 二级标题就添加两个 `#` + 空格,例如:## 我是二级标题。
15 | - 三级标题就添加三个 `#` + 空格,例如:### 我是三级标题。
16 |
17 | 依次类推...
18 |
19 | ## 特殊文字处理
20 |
21 | ### 换行
22 |
23 | 书写示例:
24 |
25 | ```
26 | 我是第一行(后面有两个空格)
27 | 我是第二行
28 | ```
29 |
30 | 效果:
31 |
32 | 我是第一行(后面有两个空格)
33 | 我是第二行
34 |
35 | ### 加粗
36 |
37 | 书写示例:
38 |
39 | ```
40 | **我是粗体**
41 | ```
42 |
43 | 效果:
44 | **我是粗体**
45 |
46 | ### 标亮
47 |
48 | 书写示例:
49 |
50 | ```
51 | `请把我标亮`
52 | ```
53 |
54 | 效果:
55 |
56 | `请把我标亮`
57 |
58 | ## 列表
59 |
60 | ### 一级列表(不带序号)
61 |
62 | 书写示例:
63 |
64 | ```
65 | - 列表 1(减号 + 空格)
66 | - 列表 2
67 | ```
68 |
69 | 效果:
70 |
71 | - 列表 1(减号 + 空格)
72 | - 列表 2
73 |
74 | ### 一级列表(带序号)
75 |
76 | 书写示例:
77 |
78 | ```
79 | 1. 列表 1(自然数 + 点 + 空格)
80 | 2. 列表 2
81 | ```
82 |
83 | 效果:
84 |
85 | 1. 列表 1(自然数 + 点 + 空格)
86 | 2. 列表 2
87 |
88 | ### 二级列表(不带序号)
89 |
90 | 书写示例:
91 |
92 | ```
93 | - 列表 1(一级列表:减号 + 空格)
94 | - 列表 1.1(二级列表:空格 + 减号 + 空格)
95 | - 列表 1.2
96 | - 列表 2
97 | ```
98 |
99 | 效果:
100 |
101 | - 列表 1(一级列表:减号 + 空格)
102 | - 列表 1.1(二级列表:空格 + 减号 + 空格)
103 | - 列表 1.2
104 | - 列表 2
105 |
106 | ## 添加图片
107 |
108 | 书写示例:
109 |
110 | ```
111 | 
112 | ```
113 |
114 | 效果:
115 |
116 | 
117 |
118 | ## 添加超链接
119 |
120 | 书写示例:
121 |
122 | ```
123 | [链接描述文字(可见)](链接地址)
124 | ```
125 |
126 | 效果:
127 |
128 | [点击跳转到极客学院](http://www.jikexueyuan.com)
129 |
130 | ## 引用
131 |
132 | 书写示例:
133 |
134 | ```
135 | >注意:
136 | 我是引用的内容(空一行就结束引用)。
137 | ```
138 |
139 | 效果:
140 |
141 | >注意:
142 | 我是引用的内容(空一行就结束引用)。
143 |
144 | ## 添加代码
145 |
146 | 书写示例:
147 |
148 |
149 | ```
150 | // 代码区域的上下分别用三个 ` 括起来
151 | public class Person {
152 | // 代码缩进请使用 四个空格,不要使用 Tab
153 | }
154 | ```
155 |
156 |
157 | 效果:
158 |
159 | ```
160 | // 代码区域的上下分别用三个 ` 括起来
161 | public class Person {
162 | // 代码缩进请使用 四个空格,不要使用 Tab
163 | }
164 | ```
165 |
166 | ## 锚点使用
167 |
168 | 书写示例:
169 |
170 | ```
171 | 极客学院是中国最大的 IT 职业在线教育平台。[[锚点名]](#footnode)
172 |
173 | [锚点名]我的示例。
174 |
175 | ```
176 |
177 | 效果:
178 |
179 | 极客学院是中国最大的 IT 职业在线教育平台。[[锚点名]](#footnode)
180 |
181 | [锚点名]我的示例。
182 |
183 | ## 特别注意
184 |
185 | 1. 英文字母与中文之间需要两边空格(英文与符号之间不空格)
186 | 2. 高亮英文之间需要两边空格
187 | 3. 代码符号\`\`\`上面需要空一行,结束```下面需要空一行
188 | 4. 中文之间和短英文之间使用中文标点
189 | 5. 一篇文章中只能出现一个#号标题
190 | 6. 标题#号之后需要空一格
191 |
192 |
193 | ## 表格
194 |
195 | 书写示例:
196 |
197 | ```
198 | | Prefix | Framework |
199 | | ------------- |:-------------:| -----:|
200 | | NS | Foundation (OS X and iOS) and Application Kit (OS X) |
201 | | UI | UIKit (iOS) |
202 | | AB | Address Book |
203 | | CA | Core Animation |
204 | | CI | Core Image |
205 | ```
206 |
207 | 效果:
208 |
209 | | Prefix | Framework |
210 | | ------------- |:-------------:| -----:|
211 | | NS | Foundation (OS X and iOS) and Application Kit (OS X) |
212 | | UI | UIKit (iOS) |
213 | | AB | Address Book |
214 | | CA | Core Animation |
215 | | CI | Core Image |
216 |
--------------------------------------------------------------------------------
/tex_pdf/api/c4s00.tex:
--------------------------------------------------------------------------------
1 |
2 |
3 | \section{Overview}\label{api_overview}
4 |
5 | TensorFlow has APIs available in several languages both for constructing
6 | and executing a TensorFlow graph. The Python API is at present the most
7 | complete and the easiest to use, but the C++ API may offer some
8 | performance advantages in graph execution, and supports deployment to
9 | small devices such as Android.
10 |
11 | Over time, we hope that the TensorFlow community will develop front ends
12 | for languages like Go, Java, JavaScript, Lua R, and perhaps others. With
13 | \href{http://swig.org}{SWIG}, it's relatively easy to develop a
14 | TensorFlow interface for your favorite language.
15 |
16 | Note: Many practical aspects of usage are covered in the Mechanics tab,
17 | and some additional documentation not specific to any particular
18 | language API is available in the Resources tab.
--------------------------------------------------------------------------------
/tex_pdf/api/cc/ClassRandomAccessFile.md:
--------------------------------------------------------------------------------
1 | # Class `tensorflow::RandomAccessFile`
2 |
3 | A file abstraction for randomly reading the contents of a file.
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`tensorflow::RandomAccessFile::RandomAccessFile()`](#tensorflow_RandomAccessFile_RandomAccessFile)
10 | * [`virtual tensorflow::RandomAccessFile::~RandomAccessFile()`](#virtual_tensorflow_RandomAccessFile_RandomAccessFile)
11 | * [`virtual Status tensorflow::RandomAccessFile::Read(uint64 offset, size_t n, StringPiece *result, char *scratch) const =0`](#virtual_Status_tensorflow_RandomAccessFile_Read)
12 | * Reads up to "n" bytes from the file starting at "offset".
13 |
14 | ##Member Details
15 |
16 | #### `tensorflow::RandomAccessFile::RandomAccessFile()`
17 |
18 |
19 |
20 |
21 |
22 | #### `virtual tensorflow::RandomAccessFile::~RandomAccessFile()`
23 |
24 |
25 |
26 |
27 |
28 | #### `virtual Status tensorflow::RandomAccessFile::Read(uint64 offset, size_t n, StringPiece *result, char *scratch) const =0`
29 |
30 | Reads up to "n" bytes from the file starting at "offset".
31 |
32 | "scratch[0..n-1]" may be written by this routine. Sets "*result" to the data that was read (including if fewer than "n" bytes were successfully read). May set "*result" to point at data in "scratch[0..n-1]", so "scratch[0..n-1]" must be live when "*result" is used.
33 |
34 | On OK returned status: "n" bytes have been stored in "*result". On non-OK returned status: [0..n] bytes have been stored in "*result".
35 |
36 | Returns `OUT_OF_RANGE` if fewer than n bytes were stored in "*result" because of EOF.
37 |
38 | Safe for concurrent use by multiple threads.
39 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/ClassStatus.md:
--------------------------------------------------------------------------------
1 | # Class `tensorflow::Status`
2 |
3 |
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`tensorflow::Status::Status()`](#tensorflow_Status_Status)
10 | * Create a success status.
11 | * [`tensorflow::Status::~Status()`](#tensorflow_Status_Status)
12 | * [`tensorflow::Status::Status(tensorflow::error::Code code, tensorflow::StringPiece msg)`](#tensorflow_Status_Status)
13 | * Create a status with the specified error code and msg as a human-readable string containing more detailed information.
14 | * [`tensorflow::Status::Status(const Status &s)`](#tensorflow_Status_Status)
15 | * Copy the specified status.
16 | * [`void tensorflow::Status::operator=(const Status &s)`](#void_tensorflow_Status_operator_)
17 | * [`bool tensorflow::Status::ok() const`](#bool_tensorflow_Status_ok)
18 | * Returns true iff the status indicates success.
19 | * [`tensorflow::error::Code tensorflow::Status::code() const`](#tensorflow_error_Code_tensorflow_Status_code)
20 | * [`const string& tensorflow::Status::error_message() const`](#const_string_tensorflow_Status_error_message)
21 | * [`bool tensorflow::Status::operator==(const Status &x) const`](#bool_tensorflow_Status_operator_)
22 | * [`bool tensorflow::Status::operator!=(const Status &x) const`](#bool_tensorflow_Status_operator_)
23 | * [`void tensorflow::Status::Update(const Status &new_status)`](#void_tensorflow_Status_Update)
24 | * If ` ok() `, stores `new_status` into `*this`. If `!ok()`, preserves the current status, but may augment with additional information about `new_status`.
25 | * [`string tensorflow::Status::ToString() const`](#string_tensorflow_Status_ToString)
26 | * Return a string representation of this status suitable for printing. Returns the string `"OK"` for success.
27 | * [`static Status tensorflow::Status::OK()`](#static_Status_tensorflow_Status_OK)
28 |
29 | ##Member Details
30 |
31 | #### `tensorflow::Status::Status()`
32 |
33 | Create a success status.
34 |
35 |
36 |
37 | #### `tensorflow::Status::~Status()`
38 |
39 |
40 |
41 |
42 |
43 | #### `tensorflow::Status::Status(tensorflow::error::Code code, tensorflow::StringPiece msg)`
44 |
45 | Create a status with the specified error code and msg as a human-readable string containing more detailed information.
46 |
47 |
48 |
49 | #### `tensorflow::Status::Status(const Status &s)`
50 |
51 | Copy the specified status.
52 |
53 |
54 |
55 | #### `void tensorflow::Status::operator=(const Status &s)`
56 |
57 |
58 |
59 |
60 |
61 | #### `bool tensorflow::Status::ok() const`
62 |
63 | Returns true iff the status indicates success.
64 |
65 |
66 |
67 | #### `tensorflow::error::Code tensorflow::Status::code() const`
68 |
69 |
70 |
71 |
72 |
73 | #### `const string& tensorflow::Status::error_message() const`
74 |
75 |
76 |
77 |
78 |
79 | #### `bool tensorflow::Status::operator==(const Status &x) const`
80 |
81 |
82 |
83 |
84 |
85 | #### `bool tensorflow::Status::operator!=(const Status &x) const`
86 |
87 |
88 |
89 |
90 |
91 | #### `void tensorflow::Status::Update(const Status &new_status)`
92 |
93 | If ` ok() `, stores `new_status` into `*this`. If `!ok()`, preserves the current status, but may augment with additional information about `new_status`.
94 |
95 | Convenient way of keeping track of the first error encountered. Instead of: `if (overall_status.ok()) overall_status = new_status` Use: `overall_status.Update(new_status);`
96 |
97 | #### `string tensorflow::Status::ToString() const`
98 |
99 | Return a string representation of this status suitable for printing. Returns the string `"OK"` for success.
100 |
101 |
102 |
103 | #### `static Status tensorflow::Status::OK()`
104 |
105 |
106 |
107 |
108 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/ClassTensorBuffer.md:
--------------------------------------------------------------------------------
1 | # Class `tensorflow::TensorBuffer`
2 |
3 |
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`tensorflow::TensorBuffer::~TensorBuffer() override`](#tensorflow_TensorBuffer_TensorBuffer)
10 | * [`virtual void* tensorflow::TensorBuffer::data() const =0`](#virtual_void_tensorflow_TensorBuffer_data)
11 | * [`virtual size_t tensorflow::TensorBuffer::size() const =0`](#virtual_size_t_tensorflow_TensorBuffer_size)
12 | * [`virtual TensorBuffer* tensorflow::TensorBuffer::root_buffer()=0`](#virtual_TensorBuffer_tensorflow_TensorBuffer_root_buffer)
13 | * [`virtual void tensorflow::TensorBuffer::FillAllocationDescription(AllocationDescription *proto) const =0`](#virtual_void_tensorflow_TensorBuffer_FillAllocationDescription)
14 | * [`T* tensorflow::TensorBuffer::base() const`](#T_tensorflow_TensorBuffer_base)
15 |
16 | ##Member Details
17 |
18 | #### `tensorflow::TensorBuffer::~TensorBuffer() override`
19 |
20 |
21 |
22 |
23 |
24 | #### `virtual void* tensorflow::TensorBuffer::data() const =0`
25 |
26 |
27 |
28 |
29 |
30 | #### `virtual size_t tensorflow::TensorBuffer::size() const =0`
31 |
32 |
33 |
34 |
35 |
36 | #### `virtual TensorBuffer* tensorflow::TensorBuffer::root_buffer()=0`
37 |
38 |
39 |
40 |
41 |
42 | #### `virtual void tensorflow::TensorBuffer::FillAllocationDescription(AllocationDescription *proto) const =0`
43 |
44 |
45 |
46 |
47 |
48 | #### `T* tensorflow::TensorBuffer::base() const`
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/ClassTensorShapeIter.md:
--------------------------------------------------------------------------------
1 | # Class `tensorflow::TensorShapeIter`
2 |
3 |
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`tensorflow::TensorShapeIter::TensorShapeIter(const TensorShape *shape, int d)`](#tensorflow_TensorShapeIter_TensorShapeIter)
10 | * [`bool tensorflow::TensorShapeIter::operator==(const TensorShapeIter &rhs)`](#bool_tensorflow_TensorShapeIter_operator_)
11 | * [`bool tensorflow::TensorShapeIter::operator!=(const TensorShapeIter &rhs)`](#bool_tensorflow_TensorShapeIter_operator_)
12 | * [`void tensorflow::TensorShapeIter::operator++()`](#void_tensorflow_TensorShapeIter_operator_)
13 | * [`TensorShapeDim tensorflow::TensorShapeIter::operator*()`](#TensorShapeDim_tensorflow_TensorShapeIter_operator_)
14 |
15 | ##Member Details
16 |
17 | #### `tensorflow::TensorShapeIter::TensorShapeIter(const TensorShape *shape, int d)`
18 |
19 |
20 |
21 |
22 |
23 | #### `bool tensorflow::TensorShapeIter::operator==(const TensorShapeIter &rhs)`
24 |
25 |
26 |
27 |
28 |
29 | #### `bool tensorflow::TensorShapeIter::operator!=(const TensorShapeIter &rhs)`
30 |
31 |
32 |
33 |
34 |
35 | #### `void tensorflow::TensorShapeIter::operator++()`
36 |
37 |
38 |
39 |
40 |
41 | #### `TensorShapeDim tensorflow::TensorShapeIter::operator*()`
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/ClassTensorShapeUtils.md:
--------------------------------------------------------------------------------
1 | # Class `tensorflow::TensorShapeUtils`
2 |
3 | Static helper routines for ` TensorShape `. Includes a few common predicates on a tensor shape.
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`static bool tensorflow::TensorShapeUtils::IsScalar(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsScalar)
10 | * [`static bool tensorflow::TensorShapeUtils::IsVector(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsVector)
11 | * [`static bool tensorflow::TensorShapeUtils::IsLegacyScalar(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsLegacyScalar)
12 | * [`static bool tensorflow::TensorShapeUtils::IsLegacyVector(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsLegacyVector)
13 | * [`static bool tensorflow::TensorShapeUtils::IsVectorOrHigher(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsVectorOrHigher)
14 | * [`static bool tensorflow::TensorShapeUtils::IsMatrix(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsMatrix)
15 | * [`static bool tensorflow::TensorShapeUtils::IsMatrixOrHigher(const TensorShape &shape)`](#static_bool_tensorflow_TensorShapeUtils_IsMatrixOrHigher)
16 | * [`static TensorShape tensorflow::TensorShapeUtils::MakeShape(const T *dims, int n)`](#static_TensorShape_tensorflow_TensorShapeUtils_MakeShape)
17 | * Returns a ` TensorShape ` whose dimensions are `dims[0]`, `dims[1]`, ..., `dims[n-1]`.
18 | * [`static string tensorflow::TensorShapeUtils::ShapeListString(const gtl::ArraySlice< TensorShape > &shapes)`](#static_string_tensorflow_TensorShapeUtils_ShapeListString)
19 | * [`static bool tensorflow::TensorShapeUtils::StartsWith(const TensorShape &shape0, const TensorShape &shape1)`](#static_bool_tensorflow_TensorShapeUtils_StartsWith)
20 |
21 | ##Member Details
22 |
23 | #### `static bool tensorflow::TensorShapeUtils::IsScalar(const TensorShape &shape)`
24 |
25 |
26 |
27 |
28 |
29 | #### `static bool tensorflow::TensorShapeUtils::IsVector(const TensorShape &shape)`
30 |
31 |
32 |
33 |
34 |
35 | #### `static bool tensorflow::TensorShapeUtils::IsLegacyScalar(const TensorShape &shape)`
36 |
37 |
38 |
39 |
40 |
41 | #### `static bool tensorflow::TensorShapeUtils::IsLegacyVector(const TensorShape &shape)`
42 |
43 |
44 |
45 |
46 |
47 | #### `static bool tensorflow::TensorShapeUtils::IsVectorOrHigher(const TensorShape &shape)`
48 |
49 |
50 |
51 |
52 |
53 | #### `static bool tensorflow::TensorShapeUtils::IsMatrix(const TensorShape &shape)`
54 |
55 |
56 |
57 |
58 |
59 | #### `static bool tensorflow::TensorShapeUtils::IsMatrixOrHigher(const TensorShape &shape)`
60 |
61 |
62 |
63 |
64 |
65 | #### `static TensorShape tensorflow::TensorShapeUtils::MakeShape(const T *dims, int n)`
66 |
67 | Returns a ` TensorShape ` whose dimensions are `dims[0]`, `dims[1]`, ..., `dims[n-1]`.
68 |
69 |
70 |
71 | #### `static string tensorflow::TensorShapeUtils::ShapeListString(const gtl::ArraySlice< TensorShape > &shapes)`
72 |
73 |
74 |
75 |
76 |
77 | #### `static bool tensorflow::TensorShapeUtils::StartsWith(const TensorShape &shape0, const TensorShape &shape1)`
78 |
79 |
80 |
81 |
82 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/ClassThread.md:
--------------------------------------------------------------------------------
1 | # Class `tensorflow::Thread`
2 |
3 |
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`tensorflow::Thread::Thread()`](#tensorflow_Thread_Thread)
10 | * [`virtual tensorflow::Thread::~Thread()`](#virtual_tensorflow_Thread_Thread)
11 | * Blocks until the thread of control stops running.
12 |
13 | ##Member Details
14 |
15 | #### `tensorflow::Thread::Thread()`
16 |
17 |
18 |
19 |
20 |
21 | #### `virtual tensorflow::Thread::~Thread()`
22 |
23 | Blocks until the thread of control stops running.
24 |
25 |
26 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/ClassWritableFile.md:
--------------------------------------------------------------------------------
1 | # Class `tensorflow::WritableFile`
2 |
3 | A file abstraction for sequential writing.
4 |
5 | The implementation must provide buffering since callers may append small fragments at a time to the file.
6 |
7 | ##Member Summary
8 |
9 | * [`tensorflow::WritableFile::WritableFile()`](#tensorflow_WritableFile_WritableFile)
10 | * [`virtual tensorflow::WritableFile::~WritableFile()`](#virtual_tensorflow_WritableFile_WritableFile)
11 | * [`virtual Status tensorflow::WritableFile::Append(const StringPiece &data)=0`](#virtual_Status_tensorflow_WritableFile_Append)
12 | * [`virtual Status tensorflow::WritableFile::Close()=0`](#virtual_Status_tensorflow_WritableFile_Close)
13 | * [`virtual Status tensorflow::WritableFile::Flush()=0`](#virtual_Status_tensorflow_WritableFile_Flush)
14 | * [`virtual Status tensorflow::WritableFile::Sync()=0`](#virtual_Status_tensorflow_WritableFile_Sync)
15 |
16 | ##Member Details
17 |
18 | #### `tensorflow::WritableFile::WritableFile()`
19 |
20 |
21 |
22 |
23 |
24 | #### `virtual tensorflow::WritableFile::~WritableFile()`
25 |
26 |
27 |
28 |
29 |
30 | #### `virtual Status tensorflow::WritableFile::Append(const StringPiece &data)=0`
31 |
32 |
33 |
34 |
35 |
36 | #### `virtual Status tensorflow::WritableFile::Close()=0`
37 |
38 |
39 |
40 |
41 |
42 | #### `virtual Status tensorflow::WritableFile::Flush()=0`
43 |
44 |
45 |
46 |
47 |
48 | #### `virtual Status tensorflow::WritableFile::Sync()=0`
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/StructSessionOptions.md:
--------------------------------------------------------------------------------
1 | # Struct `tensorflow::SessionOptions`
2 |
3 | Configuration information for a Session .
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`Env* tensorflow::SessionOptions::env`](#Env_tensorflow_SessionOptions_env)
10 | * The environment to use.
11 | * [`string tensorflow::SessionOptions::target`](#string_tensorflow_SessionOptions_target)
12 | * The TensorFlow runtime to connect to.
13 | * [`ConfigProto tensorflow::SessionOptions::config`](#ConfigProto_tensorflow_SessionOptions_config)
14 | * Configuration options.
15 | * [`tensorflow::SessionOptions::SessionOptions()`](#tensorflow_SessionOptions_SessionOptions)
16 |
17 | ##Member Details
18 |
19 | #### `Env* tensorflow::SessionOptions::env`
20 |
21 | The environment to use.
22 |
23 |
24 |
25 | #### `string tensorflow::SessionOptions::target`
26 |
27 | The TensorFlow runtime to connect to.
28 |
29 | If 'target' is empty or unspecified, the local TensorFlow runtime implementation will be used. Otherwise, the TensorFlow engine defined by 'target' will be used to perform all computations.
30 |
31 | "target" can be either a single entry or a comma separated list of entries. Each entry is a resolvable address of the following format: local ip:port host:port ... other system-specific formats to identify tasks and jobs ...
32 |
33 | NOTE: at the moment 'local' maps to an in-process service-based runtime.
34 |
35 | Upon creation, a single session affines itself to one of the remote processes, with possible load balancing choices when the "target" resolves to a list of possible processes.
36 |
37 | If the session disconnects from the remote process during its lifetime, session calls may fail immediately.
38 |
39 | #### `ConfigProto tensorflow::SessionOptions::config`
40 |
41 | Configuration options.
42 |
43 |
44 |
45 | #### `tensorflow::SessionOptions::SessionOptions()`
46 |
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/StructState.md:
--------------------------------------------------------------------------------
1 | # Struct `tensorflow::Status::State`
2 |
3 |
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`tensorflow::error::Code tensorflow::Status::State::code`](#tensorflow_error_Code_tensorflow_Status_State_code)
10 | * [`string tensorflow::Status::State::msg`](#string_tensorflow_Status_State_msg)
11 |
12 | ##Member Details
13 |
14 | #### `tensorflow::error::Code tensorflow::Status::State::code`
15 |
16 |
17 |
18 |
19 |
20 | #### `string tensorflow::Status::State::msg`
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/StructTensorShapeDim.md:
--------------------------------------------------------------------------------
1 | # Struct `tensorflow::TensorShapeDim`
2 |
3 |
4 |
5 |
6 |
7 | ##Member Summary
8 |
9 | * [`int tensorflow::TensorShapeDim::size`](#int_tensorflow_TensorShapeDim_size)
10 | * [`tensorflow::TensorShapeDim::TensorShapeDim(int64 s)`](#tensorflow_TensorShapeDim_TensorShapeDim)
11 |
12 | ##Member Details
13 |
14 | #### `int tensorflow::TensorShapeDim::size`
15 |
16 |
17 |
18 |
19 |
20 | #### `tensorflow::TensorShapeDim::TensorShapeDim(int64 s)`
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/StructThreadOptions.md:
--------------------------------------------------------------------------------
1 | # Struct `tensorflow::ThreadOptions`
2 |
3 | Options to configure a Thread .
4 |
5 | Note that the options are all hints, and the underlying implementation may choose to ignore it.
6 |
7 | ##Member Summary
8 |
9 | * [`size_t tensorflow::ThreadOptions::stack_size`](#size_t_tensorflow_ThreadOptions_stack_size)
10 | * Thread stack size to use (in bytes).
11 | * [`size_t tensorflow::ThreadOptions::guard_size`](#size_t_tensorflow_ThreadOptions_guard_size)
12 | * Guard area size to use near thread stacks to use (in bytes)
13 |
14 | ##Member Details
15 |
16 | #### `size_t tensorflow::ThreadOptions::stack_size`
17 |
18 | Thread stack size to use (in bytes).
19 |
20 |
21 |
22 | #### `size_t tensorflow::ThreadOptions::guard_size`
23 |
24 | Guard area size to use near thread stacks to use (in bytes)
25 |
26 |
27 |
--------------------------------------------------------------------------------
/tex_pdf/api/cc/index.md:
--------------------------------------------------------------------------------
1 | # TensorFlow C++ Session API reference documentation
2 |
3 | TensorFlow's public C++ API includes only the API for executing graphs, as of
4 | version 0.5. To control the execution of a graph from C++:
5 |
6 | 1. Build the computation graph using the [Python API](../../api_docs/python/).
7 | 1. Use [tf.train.write_graph()](../../api_docs/python/train.md#write_graph) to
8 | write the graph to a file.
9 | 1. Load the graph using the C++ Session API. For example:
10 |
11 | ```c++
12 | // Reads a model graph definition from disk, and creates a session object you
13 | // can use to run it.
14 | Status LoadGraph(string graph_file_name, Session** session) {
15 | GraphDef graph_def;
16 | TF_RETURN_IF_ERROR(
17 | ReadBinaryProto(Env::Default(), graph_file_name, &graph_def));
18 | TF_RETURN_IF_ERROR(NewSession(SessionOptions(), session));
19 | TF_RETURN_IF_ERROR((*session)->Create(graph_def));
20 | return Status::OK();
21 | }
22 | ```
23 |
24 | 1. Run the graph with a call to `session->Run()`
25 |
26 |
27 | ##Classes
28 |
29 | * [tensorflow::Env](../../api_docs/cc/ClassEnv.md)
30 | * [tensorflow::EnvWrapper](../../api_docs/cc/ClassEnvWrapper.md)
31 | * [tensorflow::RandomAccessFile](../../api_docs/cc/ClassRandomAccessFile.md)
32 | * [tensorflow::Session](../../api_docs/cc/ClassSession.md)
33 | * [tensorflow::Status](../../api_docs/cc/ClassStatus.md)
34 | * [tensorflow::Tensor](../../api_docs/cc/ClassTensor.md)
35 | * [tensorflow::TensorBuffer](../../api_docs/cc/ClassTensorBuffer.md)
36 | * [tensorflow::TensorShape](../../api_docs/cc/ClassTensorShape.md)
37 | * [tensorflow::TensorShapeIter](../../api_docs/cc/ClassTensorShapeIter.md)
38 | * [tensorflow::TensorShapeUtils](../../api_docs/cc/ClassTensorShapeUtils.md)
39 | * [tensorflow::Thread](../../api_docs/cc/ClassThread.md)
40 | * [tensorflow::WritableFile](../../api_docs/cc/ClassWritableFile.md)
41 |
42 | ##Structs
43 |
44 | * [tensorflow::SessionOptions](../../api_docs/cc/StructSessionOptions.md)
45 | * [tensorflow::Status::State](../../api_docs/cc/StructState.md)
46 | * [tensorflow::TensorShapeDim](../../api_docs/cc/StructTensorShapeDim.md)
47 | * [tensorflow::ThreadOptions](../../api_docs/cc/StructThreadOptions.md)
48 |
49 |
50 |
51 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 | -->
69 |
70 |
--------------------------------------------------------------------------------
/tex_pdf/api/images/DynamicPartition.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/DynamicPartition.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/DynamicStitch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/DynamicStitch.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/Gather.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/Gather.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/ScatterAdd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/ScatterAdd.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/ScatterSub.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/ScatterSub.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/ScatterUpdate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/ScatterUpdate.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/SegmentMax.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/SegmentMax.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/SegmentMean.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/SegmentMean.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/SegmentMin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/SegmentMin.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/SegmentProd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/SegmentProd.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/SegmentSum.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/SegmentSum.png
--------------------------------------------------------------------------------
/tex_pdf/api/images/UnsortedSegmentSum.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/api/images/UnsortedSegmentSum.png
--------------------------------------------------------------------------------
/tex_pdf/api/python/c4s10_python_io.tex:
--------------------------------------------------------------------------------
1 |
2 |
3 | \section{Data IO (Python functions) }\label{data-io-python-functions}
4 |
5 | \subsection{Contents}\label{contents}
6 |
7 | \subsubsection{\texorpdfstring{\protect\hyperlink{AUTOGENERATED-data-io--python-functions-}{Data
8 | IO (Python
9 | functions)}}{Data IO (Python functions)}}\label{data-io-python-functions-1}
10 |
11 | \begin{itemize}
12 | \tightlist
13 | \item
14 | \protect\hyperlink{AUTOGENERATED-data-io--python-functions-}{Data IO
15 | (Python Functions)}
16 | \item
17 | \protect\hyperlink{TFRecordWriter}{\texttt{class\ tf.python\_io.TFRecordWriter}}
18 | \item
19 | \protect\hyperlink{tfux5frecordux5fiterator}{\texttt{tf.python\_io.tf\_record\_iterator(path)}}
20 | \item
21 | \protect\hyperlink{AUTOGENERATED-tfrecords-format-details}{TFRecords
22 | Format Details}
23 | \end{itemize}
24 |
25 | \subsection{Data IO (Python Functions)
26 | }\label{data-io-python-functions-2}
27 |
28 | A TFRecords file represents a sequence of (binary) strings. The format
29 | is not random access, so it is suitable for streaming large amounts of
30 | data but not suitable if fast sharding or other non-sequential access is
31 | desired.
32 |
33 | \begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
34 |
35 | \subsubsection{\texorpdfstring{\texttt{class\ tf.python\_io.TFRecordWriter}
36 | }{class tf.python\_io.TFRecordWriter }}\label{class-tf.pythonux5fio.tfrecordwriter}
37 |
38 | A class to write records to a TFRecords file.
39 |
40 | This class implements \texttt{\_\_enter\_\_} and \texttt{\_\_exit\_\_},
41 | and can be used in \texttt{with} blocks like a normal file.
42 |
43 | \begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
44 |
45 | \paragraph{\texorpdfstring{\texttt{tf.python\_io.TFRecordWriter.\_\_init\_\_(path)}
46 | }{tf.python\_io.TFRecordWriter.\_\_init\_\_(path) }}\label{tf.pythonux5fio.tfrecordwriter.ux5fux5finitux5fux5fpath}
47 |
48 | Opens file \texttt{path} and creates a \texttt{TFRecordWriter} writing
49 | to it.
50 |
51 | \subparagraph{Args: }\label{args}
52 |
53 | \begin{itemize}
54 | \tightlist
55 | \item
56 | \texttt{path}: The path to the TFRecords file.
57 | \end{itemize}
58 |
59 | \subparagraph{Raises: }\label{raises}
60 |
61 | \begin{itemize}
62 | \tightlist
63 | \item
64 | \texttt{IOError}: If \texttt{path} cannot be opened for writing.
65 | \end{itemize}
66 |
67 | \begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
68 |
69 | \paragraph{\texorpdfstring{\texttt{tf.python\_io.TFRecordWriter.write(record)}
70 | }{tf.python\_io.TFRecordWriter.write(record) }}\label{tf.pythonux5fio.tfrecordwriter.writerecord}
71 |
72 | Write a string record to the file.
73 |
74 | \subparagraph{Args: }\label{args-1}
75 |
76 | \begin{itemize}
77 | \tightlist
78 | \item
79 | \texttt{record}: str
80 | \end{itemize}
81 |
82 | \begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
83 |
84 | \paragraph{\texorpdfstring{\texttt{tf.python\_io.TFRecordWriter.close()}
85 | }{tf.python\_io.TFRecordWriter.close() }}\label{tf.pythonux5fio.tfrecordwriter.close}
86 |
87 | Close the file.
88 |
89 | \begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
90 |
91 | \subsubsection{\texorpdfstring{\texttt{tf.python\_io.tf\_record\_iterator(path)}
92 | }{tf.python\_io.tf\_record\_iterator(path) }}\label{tf.pythonux5fio.tfux5frecordux5fiteratorpath}
93 |
94 | An iterator that read the records from a TFRecords file.
95 |
96 | \subparagraph{Args: }\label{args-2}
97 |
98 | \begin{itemize}
99 | \tightlist
100 | \item
101 | \texttt{path}: The path to the TFRecords file.
102 | \end{itemize}
103 |
104 | \subparagraph{Yields: }\label{yields}
105 |
106 | Strings.
107 |
108 | \subparagraph{Raises: }\label{raises-1}
109 |
110 | \begin{itemize}
111 | \tightlist
112 | \item
113 | \texttt{IOError}: If \texttt{path} cannot be opened for reading.
114 | \end{itemize}
115 |
116 | \begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
117 |
118 | \subsubsection{TFRecords Format Details
119 | }\label{tfrecords-format-details}
120 |
121 | A TFRecords file contains a sequence of strings with CRC hashes. Each
122 | record has the format
123 |
124 | \begin{verbatim}
125 | uint64 length
126 | uint32 masked_crc32_of_length
127 | byte data[length]
128 | uint32 masked_crc32_of_data
129 | \end{verbatim}
130 |
131 | and the records are concatenated together to produce the file. The
132 | CRC32s are
133 | \href{https://en.wikipedia.org/wiki/Cyclic_redundancy_check}{described
134 | here}, and the mask of a CRC is
135 |
136 | \begin{verbatim}
137 | masked_crc = ((crc >> 15) | (crc << 17)) + 0xa282ead8ul
138 | \end{verbatim}
139 |
140 |
--------------------------------------------------------------------------------
/tex_pdf/get_started/c1s01_introduction.tex:
--------------------------------------------------------------------------------
1 | %!TEX program = xelatex
2 | % Encoding: UTF8
3 | % SEIKA 2016 | seika@live.ca
4 |
5 | % Chapter 1
6 | % Section 1.1 Introduction
7 |
8 | \section{Introduction || 简介}
9 |
10 | Ⓔ \textcolor{etc}{Let's get you up and running with TensorFlow!}
11 |
12 | Ⓒ 本章的目的是让你了解和运行 TensorFlow!
13 |
14 | Ⓔ \textcolor{etc}{But before we even get started, let's peek at what TensorFlow code looks like in the Python API, so you have a sense of where we're headed.}
15 |
16 | Ⓒ 在开始之前,让我们先看一段使用 Python API 撰写的 TensorFlow 示例代码,
17 | 让你对将要学习的内容有初步的印象.
18 |
19 | Ⓔ \textcolor{etc}{Here's a little Python program that makes up some data in two dimensions, and then fits a line to it.}
20 |
21 | Ⓒ 下面这段短小的Python程序将把一些数据放入二维空间,再用一条线来拟合这些数据.
22 |
23 | \begin{lstlisting}
24 | import tensorflow as tf
25 | import numpy as np
26 |
27 | # Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
28 | x_data = np.random.rand(100).astype("float32")
29 | y_data = x_data * 0.1 + 0.3
30 |
31 | # Try to find values for W and b that compute y_data = W * x_data + b
32 | # (We know that W should be 0.1 and b 0.3, but Tensorflow will
33 | # figure that out for us.)
34 | W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
35 | b = tf.Variable(tf.zeros([1]))
36 | y = W * x_data + b
37 |
38 | # Minimize the mean squared errors.
39 | loss = tf.reduce_mean(tf.square(y - y_data))
40 | optimizer = tf.train.GradientDescentOptimizer(0.5)
41 | train = optimizer.minimize(loss)
42 |
43 | # Before starting, initialize the variables. We will 'run' this first.
44 | init = tf.global_variables_initializer()
45 |
46 | # Launch the graph.
47 | sess = tf.Session()
48 | sess.run(init)
49 |
50 | # Fit the line.
51 | for step in xrange(201):
52 | sess.run(train)
53 | if step % 20 == 0:
54 | print(step, sess.run(W), sess.run(b))
55 |
56 | # Learns best fit is W: [0.1], b: [0.3]
57 | \end{lstlisting}
58 |
59 | Ⓔ \textcolor{etc}{The first part of this code builds the data flow graph. TensorFlow does not actually run any computation until the session is created and the run function is called.}
60 |
61 | Ⓒ 以上代码的第一部分构建了数据的流向图(flow graph).在一个session被建立并且\lstinline{run()}函数被运行前,TensorFlow不会进行任何实质的计算.
62 |
63 | Ⓔ \textcolor{etc}{To whet your appetite further, we suggest you check out what a classical machine learning problem looks like in TensorFlow. In the land of neural networks the most "classic" classical problem is the MNIST handwritten digit classification. We offer two introductions here, one for machine learning newbies, and one for pros. If you've already trained dozens of MNIST models in other software packages, please take the red pill. If you've never even heard of MNIST, definitely take the blue pill. If you're somewhere in between, we suggest skimming blue, then red.}
64 |
65 | Ⓒ 为了进一步激发你的学习欲望,我们想让你先看一下TensorFlow是如何解决一个经典的机器学习问题的.在神经网络领域,最为经典的问题莫过于MNIST手写数字分类.为此,我们准备了两篇不同的教程,分别面向初学者和专家.如果你已经使用其它软件训练过许多MNIST模型,请参阅\hyperref[MINIST_pros]{高级教程(红色药丸)}.如果你以前从未听说过 MNIST ,请先阅读\hyperref[MINIST_beginner]{初级教程(蓝色药丸)}.如果你的水平介于这两类人之间,我们建议你先快速浏览\hyperref[MINIST_beginner]{初级教程},然后再阅读\hyperref[MINIST_pros]{高级教程}.
66 |
67 | % Add pics and links here.
68 |
69 | Ⓔ \textcolor{etc}{If you're already sure you want to learn and install TensorFlow you can skip these and charge ahead. Don't worry, you'll still get to see MNIST -- we'll also use MNIST as an example in our technical tutorial where we elaborate on TensorFlow features.}
70 |
71 | \begin{center}
72 | \includegraphics[width=.45\textwidth]{../SOURCE/images/blue_pill.png}
73 | \includegraphics[width=.45\textwidth]{../SOURCE/images/red_pill.png}
74 | \end{center}
75 |
76 | Ⓒ 如果你已下定决心准备学习和安装 TensorFlow ,你可以略过这些文字,直接阅读
77 | 后面的章节\footnote{\textbf{推荐随后阅读内容}:\hyperref[download_install]{1 下载与安装}, \hyperref[basic_usage]{2 基本使用}, \hyperref[tf_mech101]{3 TensorFlow 101}.}.不用担心,你仍然会看到 MNIST --- 在阐述TensorFlow的特性时,
78 | 我们还会使用MNIST作为一个样例.
--------------------------------------------------------------------------------
/tex_pdf/how_tos/c3s00_overview.tex:
--------------------------------------------------------------------------------
1 |
2 |
3 | % \section{综述 Overview}\label{ux7efcux8ff0-overview}
4 |
5 | 综述 Overview
6 |
7 | \subsection{Variables:
8 | 创建,初始化,保存,和恢复}\label{variables-ux521bux5efaux521dux59cbux5316ux4fddux5b58ux548cux6062ux590d}
9 |
10 | TensorFlow Variables 是内存中的容纳 tensor
11 | 的缓存。这一小节介绍了用它们在模型训练时(during
12 | training)创建、保存和更新模型参数(model parameters) 的方法。
13 |
14 | \href{../how_tos/variables.md}{参看教程}
15 |
16 | \subsection{TensorFlow 机制 101}\label{tensorflow-ux673aux5236-101}
17 |
18 | 用 MNIST 手写数字识别作为一个小例子,一步一步的将使用 TensorFlow
19 | 基础架构(infrastructure)训练大规模模型的细节做详细介绍。
20 |
21 | \href{../tutorials/mnist_tf.md}{参看教程}
22 |
23 | \subsection{TensorBoard:
24 | 学习过程的可视化}\label{tensorboard-ux5b66ux4e60ux8fc7ux7a0bux7684ux53efux89c6ux5316}
25 |
26 | 对模型进行训练和评估时,TensorBoard
27 | 是一个很有用的可视化工具。此教程解释了创建和运行 TensorBoard
28 | 的方法,和使用摘要操作(Summary ops)的方法,通过添加摘要操作(Summary
29 | ops),可以自动把数据传输到 TensorBoard 所使用的事件文件。
30 |
31 | \href{../how_tos/summaries_and_tensorboard.md}{参看教程}
32 |
33 | \subsection{TensorBoard:
34 | 图的可视化}\label{tensorboard-ux56feux7684ux53efux89c6ux5316}
35 |
36 | 此教程介绍了在 TensorBoard
37 | 中使用可视化工具的方法,它可以帮助你理解张量流图的过程并 debug。
38 |
39 | \href{../how_tos/graph_viz.md}{参看教程}
40 |
41 | \subsection{数据读入}\label{ux6570ux636eux8bfbux5165}
42 |
43 | 此教程介绍了把数据传入 TensorSlow 程序的三种主要的方法: Feeding,
44 | Reading 和 Preloading.
45 |
46 | \href{../how_tos/reading_data.md}{参看教程}
47 |
48 | \subsection{线程和队列}\label{ux7ebfux7a0bux548cux961fux5217}
49 |
50 | 此教程介绍 TensorFlow
51 | 中为了更容易进行异步和并发训练的各种不同结构(constructs)。
52 |
53 | \href{../how_tos/threading_and_queues.md}{参看教程}
54 |
55 | \subsection{添加新的 Op}\label{ux6dfbux52a0ux65b0ux7684-op}
56 |
57 | TensorFlow 已经提供一整套节点操作()operation),你可以在你的 graph
58 | 中随意使用它们,不过这里有关于添加自定义操作(custom op)的细节。
59 |
60 | \href{../how_tos/adding_an_op.md}{参看教程}。
61 |
62 | \subsection{自定义数据的
63 | Readers}\label{ux81eaux5b9aux4e49ux6570ux636eux7684-readers}
64 |
65 | 如果你有相当大量的自定义数据集合,可能你想要对 TensorFlow 的 Data
66 | Readers 进行扩展,使它能直接以数据自身的格式将其读入。
67 |
68 | \href{../how_tos/new_data_formats.md}{参看教程}。
69 |
70 | \subsection{使用 GPUs}\label{ux4f7fux7528-gpus}
71 |
72 | 此教程描述了用多个 GPU 构建和运行模型的方法。
73 |
74 | \href{../how_tos/using_gpu.md}{参看教程}
75 |
76 | \subsection{共享变量 Sharing
77 | Variables}\label{ux5171ux4eabux53d8ux91cf-sharing-variables}
78 |
79 | 当在多 GPU 上部署大型的模型,或展开复杂的 LSTMs 或 RNNs
80 | 时,在模型构建代码的不同位置对许多相同的变量(Variable)进行读写常常是必须的。设计变量作用域(Variable
81 | Scope)机制的目的就是为了帮助上述任务的实现。
82 |
83 | \href{../how_tos/variable_scope/index.md}{参看教程}。
84 |
85 | 原文: \href{http://tensorflow.org/how_tos/index.html}{How-to}
86 |
87 | 翻译:\href{https://github.com/TerenceCooper}{Terence Cooper}
88 |
89 | 校对:\href{https://github.com/lonlonago}{lonlonago}
--------------------------------------------------------------------------------
/tex_pdf/how_tos/c3s03_viz_learning.tex:
--------------------------------------------------------------------------------
1 |
2 |
3 | \section{TensorBoard:可视化学习 }\label{vis_learning}
4 |
5 | TensorBoard
6 | 涉及到的运算,通常是在训练庞大的深度神经网络中出现的复杂而又难以理解的运算。
7 |
8 | 为了更方便 TensorFlow 程序的理解、调试与优化,我们发布了一套叫做
9 | TensorBoard 的可视化工具。你可以用 TensorBoard 来展现你的 TensorFlow
10 | 图像,绘制图像生成的定量指标图以及附加数据。
11 |
12 | 当 TensorBoard 设置完成后,它应该是这样子的:
13 |
14 | \begin{figure}[htbp]
15 | \centering
16 | \includegraphics[width=.95\textwidth]{../SOURCE/images/mnist_tensorboard.png}
17 | \caption{MNIST TensorBoard}
18 | \end{figure}
19 |
20 | \subsection{数据序列化 }\label{ux6570ux636eux5e8fux5217ux5316}
21 |
22 | TensorBoard 通过读取 TensorFlow 的事件文件来运行。TensorFlow
23 | 的事件文件包括了你会在 TensorFlow 运行中涉及到的主要数据。下面是
24 | TensorBoard 中汇总数据(Summary data)的大体生命周期。
25 |
26 | 首先,创建你想汇总数据的 TensorFlow
27 | 图,然后再选择你想在哪个节点进行\href{../api_docs/python/train.md\#summary_options}{汇总(summary)操作}。
28 |
29 | 比如,假设你正在训练一个卷积神经网络,用于识别 MNISt
30 | 标签。你可能希望记录学习速度(learning
31 | rate)的如何变化,以及目标函数如何变化。通过向节点附加\href{../api_docs/python/train.md\#scalary_summary}{scalar\_summary}操作来分别输出学习速度和期望误差。然后你可以给每个
32 | scalary\_summary 分配一个有意义的 \texttt{标签},比如
33 | \texttt{\textquotesingle{}learning\ rate\textquotesingle{}} 和
34 | \texttt{\textquotesingle{}loss\ function\textquotesingle{}}。
35 |
36 | 或者你还希望显示一个特殊层中激活的分布,或者梯度权重的分布。可以通过分别附加
37 | \href{../api_docs/python/train.md\#histogram_summary}{histogram\_summary}
38 | 运算来收集权重变量和梯度输出。
39 |
40 | 所有可用的 summary
41 | 操作详细信息,可以查看\href{../api_docs/python/train.md\#summary_operation}{summary\_operation}文档。
42 |
43 | 在TensorFlow中,所有的操作只有当你执行,或者另一个操作依赖于它的输出时才会运行。我们刚才创建的这些节点(summary
44 | nodes)都围绕着你的图像:没有任何操作依赖于它们的结果。因此,为了生成汇总信息,我们需要运行所有这些节点。这样的手动工作是很乏味的,因此可以使用\href{../api_docs/python/train.md\#scalary_summary}{tf.merge\_all\_summaries}来将他们合并为一个操作。
45 |
46 | 然后你可以执行合并命令,它会依据特点步骤将所有数据生成一个序列化的\texttt{Summary}
47 | protobuf对象。最后,为了将汇总数据写入磁盘,需要将汇总的protobuf对象传递给\href{../api_docs/python/train.md\#SummaryWriter}{tf.train.Summarywriter}。
48 |
49 | \texttt{SummaryWriter} 的构造函数中包含了参数 logdir。这个 logdir
50 | 非常重要,所有事件都会写到它所指的目录下。此外,\texttt{SummaryWriter}
51 | 中还包含了一个可选择的参数 \texttt{GraphDef}。如果输入了该参数,那么
52 | TensorBoard 也会显示你的图像。
53 |
54 | 现在已经修改了你的图,也有了
55 | \texttt{SummaryWriter},现在就可以运行你的神经网络了!如果你愿意的话,你可以每一步执行一次合并汇总,这样你会得到一大堆训练数据。这很有可能超过了你想要的数据量。你也可以每一百步执行一次合并汇总,或者如下面代码里示范的这样。
56 |
57 | \begin{Shaded}
58 | \begin{Highlighting}[]
59 | \NormalTok{merged_summary_op }\OperatorTok{=} \NormalTok{tf.merge_all_summaries()}
60 | \NormalTok{summary_writer }\OperatorTok{=} \NormalTok{tf.train.SummaryWriter(}\StringTok{'/tmp/mnist_logs'}\NormalTok{, sess.graph)}
61 | \NormalTok{total_step }\OperatorTok{=} \DecValTok{0}
62 | \ControlFlowTok{while} \NormalTok{training:}
63 | \NormalTok{total_step }\OperatorTok{+=} \DecValTok{1}
64 | \NormalTok{session.run(training_op)}
65 | \ControlFlowTok{if} \NormalTok{total_step }\OperatorTok{%} \DecValTok{100} \OperatorTok{==} \DecValTok{0}\NormalTok{:}
66 | \NormalTok{summary_str }\OperatorTok{=} \NormalTok{session.run(merged_summary_op)}
67 | \NormalTok{summary_writer.add_summary(summary_str, total_step)}
68 | \end{Highlighting}
69 | \end{Shaded}
70 |
71 | 现在已经准备好用 TensorBoard 来可视化这些数据了。
72 |
73 | \subsection{启动TensorBoard }\label{ux542fux52a8tensorboard}
74 |
75 | 输入下面的指令来启动TensorBoard
76 |
77 | \begin{verbatim}
78 | python tensorflow/tensorboard/tensorboard.py --logdir=path/to/log-directory
79 | \end{verbatim}
80 |
81 | 这里的参数 \texttt{logdir} 指向 \texttt{SummaryWriter}
82 | 序列化数据的存储路径。如果\texttt{logdir}目录的子目录中包含另一次运行时的数据,那么
83 | TensorBoard 会展示所有运行的数据。一旦 TensorBoard
84 | 开始运行,你可以通过在浏览器中输入 \texttt{localhost:6006} 来查看
85 | TensorBoard。
86 |
87 | 如果你已经通过pip安装了 TensorBoard,你可以通过执行更为简单地命令来访问
88 | TensorBoard
89 |
90 | \begin{verbatim}
91 | tensorboard --logdir=/path/to/log-directory
92 | \end{verbatim}
93 |
94 | 进入 TensorBoard
95 | 的界面时,你会在右上角看到导航选项卡,每一个选项卡将展现一组可视化的序列化数据集
96 | 。对于你查看的每一个选项卡,如果 TensorBoard
97 | 中没有数据与这个选项卡相关的话,则会显示一条提示信息指示你如何序列化相关数据。
98 |
99 | 更多更详细的关于如何使用 graph 选项来显示你的图像的信息。参见
100 | \href{./graph_viz.md}{TensorBoard:图表可视化}
101 |
102 | 原文地址:\href{http://tensorflow.org/how_tos/summaries_and_tensorboard/index.html\#tensorboard-visualizing-learning}{TensorBoard:Visualizing
103 | Learning} 翻译:\href{https://github.com/thylaco1eo}{thylaco1eo}
104 | 校对:\href{https://github.com/lucky521}{lucky521}
105 |
106 |
--------------------------------------------------------------------------------
/tex_pdf/tensorflow_manual_cn.idx:
--------------------------------------------------------------------------------
1 | \indexentry{graph|hyperpage}{27}
2 | \indexentry{session|hyperpage}{27}
3 | \indexentry{tensor|hyperpage}{27}
4 | \indexentry{variable|hyperpage}{27}
5 | \indexentry{MNIST 数据集|hyperpage}{39}
6 | \indexentry{Softmax regression|hyperpage}{42}
7 | \indexentry{梯度下降法|hyperpage}{49}
8 | \indexentry{Softmax regression|hyperpage}{54}
9 | \indexentry{卷积神经网络|hyperpage}{57}
10 |
--------------------------------------------------------------------------------
/tex_pdf/tensorflow_manual_cn.ilg:
--------------------------------------------------------------------------------
1 | This is makeindex, version 2.15 [TeX Live 2015/W32TeX] (kpathsea + Thai support).
2 | Scanning input file tensorflow_manual_cn.idx....done (9 entries accepted, 0 rejected).
3 | Sorting entries....done (33 comparisons).
4 | Generating output file tensorflow_manual_cn.ind....done (30 lines written, 0 warnings).
5 | Output written in tensorflow_manual_cn.ind.
6 | Transcript written in tensorflow_manual_cn.ilg.
7 |
--------------------------------------------------------------------------------
/tex_pdf/tensorflow_manual_cn.ind:
--------------------------------------------------------------------------------
1 | \begin{theindex}
2 |
3 | \item graph, \hyperpage{26}
4 |
5 | \indexspace
6 |
7 | \item MNIST 数据集, \hyperpage{37}
8 |
9 | \indexspace
10 |
11 | \item session, \hyperpage{26}
12 | \item Softmax regression, \hyperpage{40}, \hyperpage{52}
13 |
14 | \indexspace
15 |
16 | \item tensor, \hyperpage{26}
17 |
18 | \indexspace
19 |
20 | \item variable, \hyperpage{26}
21 |
22 | \indexspace
23 |
24 | \item 卷积神经网络, \hyperpage{55}
25 |
26 | \indexspace
27 |
28 | \item 梯度下降法, \hyperpage{47}
29 |
30 | \end{theindex}
31 |
--------------------------------------------------------------------------------
/tex_pdf/tensorflow_manual_cn.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jikexueyuanwiki/tensorflow-zh/28dd80bc65d60fe1fc8a19583ed9a669f16e0e23/tex_pdf/tensorflow_manual_cn.pdf
--------------------------------------------------------------------------------
/tex_pdf/tutorials/c2s00_overview.tex:
--------------------------------------------------------------------------------
1 | %!TEX program = xelatex
2 | % Encoding: UTF8
3 | % SEIKA 2015
4 |
5 |
6 | % Chapter 2 TutorialsHow to ...
7 | % Section 2.1
8 |
9 | % \section{综述}
10 |
11 | \textbf{综述}
12 |
13 | \hyperref[MINIST_beginner]{\textcolor{etc}{\textbf{MNIST For ML Beginners}} || \textbf{MNIST 机器学习入门}}
14 |
15 | \textcolor{etc}{If you're new to machine learning, we recommend starting here. You'll learn about a classic problem, handwritten digit classification (MNIST), and get a gentle introduction to multiclass classification.}
16 |
17 | 如果你是机器学习领域的新手, 我们推荐你从本文开始阅读. 本文通过讲述一个经典的问题, 手写数字识别 (MNIST), 让你对多类分类 (multiclass classification) 问题有直观的了解.
18 |
19 | \textcolor{etc}{\textbf{Deep MNIST for Experts}} || \textbf{深入MNIST}
20 |
21 | \textcolor{etc}{If you're already familiar with other deep learning software packages, and are already familiar with MNIST, this tutorial with give you a very brief primer on TensorFlow.}
22 |
23 | 如果你已经对其它深度学习软件比较熟悉, 并且也对 MNIST 很熟悉, 这篇教程能够引导你对 TensorFlow 有初步了解.
24 |
25 | \hyperref[MINIST_pros]{View Tutorial | 阅读该教程}
26 |
27 | \textcolor{etc}{\textbf{TensorFlow Mechanics 101}} || \textbf{}
28 |
29 | \textcolor{etc}{This is a technical tutorial, where we walk you through the details of using TensorFlow infrastructure to train models at scale. We use again MNIST as the example.}
30 |
31 | 这是一篇技术教程, 详细介绍了如何使用 TensorFlow 架构训练大规模模型. 本文继续使用MNIST 作为例子.
32 |
33 | \hyperref[tf_mech101]{View Tutorial | 阅读该教程}
34 |
35 | \textbf{Convolutional Neural Networks}
36 |
37 | An introduction to convolutional neural networks using the CIFAR-10 data set. Convolutional neural nets are particularly tailored to images, since they exploit translation invariance to yield more compact and effective representations of visual content.
38 |
39 | 这篇文章介绍了如何使用 TensorFlow 在 CIFAR-10 数据集上训练卷积神经网络. 卷积神经网络是为图像识别量身定做的一个模型. 相比其它模型, 该模型利用了平移不变性(translation invariance), 从而能够更更简洁有效地表示视觉内容.
40 |
41 | View Tutorial
42 |
43 | \textbf{Vector Representations of Words}
44 |
45 | This tutorial motivates why it is useful to learn to represent words as vectors (called word embeddings). It introduces the word2vec model as an efficient method for learning embeddings. It also covers the high-level details behind noise-contrastive training methods (the biggest recent advance in training embeddings).
46 |
47 | 本文让你了解为什么学会使用向量来表示单词, 即单词嵌套 (word embedding), 是一件很有用的事情. 文章中介绍的 word2vec 模型, 是一种高效学习嵌套的方法. 本文还涉及了对比噪声(noise-contrastive) 训练方法的一些高级细节, 该训练方法是训练嵌套领域最近最大的进展.
48 |
49 | View Tutorial
50 |
51 | \textbf{Recurrent Neural Networks}
52 |
53 | An introduction to RNNs, wherein we train an LSTM network to predict the next word in an English sentence. (A task sometimes called language modeling.)
54 |
55 | 一篇 RNN 的介绍文章, 文章中训练了一个 LSTM 网络来预测一个英文句子的下一个单词(该任务有时候被称作语言建模).
56 |
57 | View Tutorial
58 |
59 | \textbf{Sequence-to-Sequence Models}
60 |
61 | A follow on to the RNN tutorial, where we assemble a sequence-to-sequence model for machine translation. You will learn to build your own English-to-French translator, entirely machine learned, end-to-end.
62 |
63 | RNN 教程的后续, 该教程采用序列到序列模型进行机器翻译. 你将学会构建一个完全基于机器学习,端到端的\emph{英语-法语}翻译器.
64 |
65 | View Tutorial
66 |
67 | \textbf{Mandelbrot Set}
68 |
69 | TensorFlow can be used for computation that has nothing to do with machine learning. Here's a naive implementation of Mandelbrot set visualization.
70 |
71 | TensorFlow 可以用于与机器学习完全无关的其它计算领域. 这里实现了一个原生的 Mandelbrot 集合的可视化程序.
72 |
73 | View Tutorial
74 |
75 | \textbf{Partial Differential Equations}
76 |
77 | As another example of non-machine learning computation, we offer an example of a naive PDE simulation of raindrops landing on a pond.
78 |
79 | 这是另外一个非机器学习计算的例子, 我们利用一个原生实现的偏微分方程, 对雨滴落在池塘上的过程进行仿真.
80 |
81 | View Tutorial
82 |
83 | \textbf{MNIST Data Download}
84 |
85 | Details about downloading the MNIST handwritten digits data set. Exciting stuff.
86 |
87 | 一篇关于下载 MNIST 手写识别数据集的详细教程.
88 |
89 | View Tutorial
90 |
91 | \textbf{Image Recognition}
92 |
93 | How to run object recognition using a convolutional neural network trained on ImageNet Challenge data and label set.
94 |
95 | 如何利用受过训练的ImageNet挑战数据和标签集卷积神经网络来运行物体识别。
96 |
97 | View Tutorial
98 |
99 | We will soon be releasing code for training a state-of-the-art Inception model.
100 |
101 | Deep Dream Visual Hallucinations
102 |
103 | Building on the Inception recognition model, we will release a TensorFlow version of the Deep Dream neural network visual hallucination software.
104 |
105 | 我们也将公布一个训练高级的Iception模型所用的代码。
106 |
107 | COMING SOON
--------------------------------------------------------------------------------