├── .gitignore ├── 01-TF2.0-Overview ├── .gitignore ├── README.md ├── conv_train.py ├── fc_train.py └── features.png ├── 02-AutoGraph ├── README.md ├── graph.gif └── main.py ├── 03-Play-with-MNIST ├── README.md ├── main.py └── mnist.gif ├── 04-Linear-Regression ├── README.md ├── linear_regression.png └── main.py ├── 05-FashionMNIST ├── .gitignore ├── README.md ├── fashion_mnist.png ├── mnist_Seqential_gradient.py ├── mnist_custommodel.py ├── mnist_fit.py └── mnist_matmul.py ├── 06-CIFAR-VGG ├── .gitignore ├── README.md ├── main.py ├── network.py └── vgg16.png ├── 07-Inception ├── .gitignore ├── README.md ├── inception.png └── main.py ├── 08-ResNet ├── .gitignore ├── README.md ├── main.py └── resnet.jpeg ├── 09-RNN-Sentiment-Analysis ├── .gitignore ├── README.md ├── imdb.png └── main.py ├── 10-ColorBot ├── .gitignore ├── README.md ├── blue.png ├── green.png ├── main.py ├── model.py ├── purple.png ├── red.png ├── shot.png └── utils.py ├── 11-AE ├── README.md ├── autoencoder.png ├── images │ ├── vae_reconstructed_epoch_1.png │ ├── vae_reconstructed_epoch_2.png │ ├── vae_reconstructed_epoch_3.png │ ├── vae_reconstructed_epoch_4.png │ └── vae_reconstructed_epoch_5.png └── main.py ├── 12-VAE ├── README.md ├── images │ ├── vae_reconstructed_epoch_1.png │ ├── vae_reconstructed_epoch_2.png │ ├── vae_reconstructed_epoch_3.png │ ├── vae_reconstructed_epoch_4.png │ ├── vae_reconstructed_epoch_5.png │ ├── vae_reconstructed_epoch_6.png │ ├── vae_reconstructed_epoch_7.png │ ├── vae_reconstructed_epoch_8.png │ ├── vae_reconstructed_epoch_9.png │ ├── vae_sampled_epoch_1.png │ ├── vae_sampled_epoch_2.png │ ├── vae_sampled_epoch_3.png │ ├── vae_sampled_epoch_4.png │ ├── vae_sampled_epoch_5.png │ ├── vae_sampled_epoch_6.png │ ├── vae_sampled_epoch_7.png │ ├── vae_sampled_epoch_8.png │ └── vae_sampled_epoch_9.png ├── main.py └── variational_autoencoder.png ├── 13-DCGAN ├── .gitignore ├── README.md ├── dcgan.png ├── gan.py └── main.py ├── 14-Pixel2Pixel ├── .gitignore ├── README.md ├── gd.py ├── main.py └── pix2pix.jpg ├── 15-CycleGAN ├── .gitignore ├── README.md ├── cyclegan.jpg ├── images │ ├── generated_0.png │ └── generated_40.png ├── main.py └── model.py ├── 16-fasterRCNN ├── .gitignore ├── LICENSE ├── README.md ├── detection │ ├── core │ │ ├── anchor │ │ │ ├── anchor_generator.py │ │ │ └── anchor_target.py │ │ ├── bbox │ │ │ ├── bbox_target.py │ │ │ ├── geometry.py │ │ │ └── transforms.py │ │ └── loss │ │ │ └── losses.py │ ├── datasets │ │ ├── coco.py │ │ ├── data_generator.py │ │ ├── transforms.py │ │ └── utils.py │ ├── models │ │ ├── backbones │ │ │ └── resnet.py │ │ ├── bbox_heads │ │ │ └── bbox_head.py │ │ ├── detectors │ │ │ ├── faster_rcnn.py │ │ │ └── test_mixins.py │ │ ├── necks │ │ │ └── fpn.py │ │ ├── roi_extractors │ │ │ └── roi_align.py │ │ └── rpn_heads │ │ │ └── rpn_head.py │ └── utils │ │ └── misc.py ├── image_demo_ckpt.png ├── image_demo_random.png ├── inspect_model.ipynb ├── inspect_model.py ├── roi_test.py ├── shot.png ├── tf.image.crop_and_resize_test.png ├── train_model.ipynb ├── train_model.py └── visualize.py ├── 17-A2C ├── README.md ├── a2c.py ├── cartpole.gif └── reward.png ├── 18-GPT ├── README.md ├── architecture.png └── model.py ├── 19-BERT ├── README.md ├── __init__.py ├── architecture.png ├── bert.py ├── embedding_similarity │ ├── __init__.py │ └── embeddings.py ├── layer_normalization │ ├── __init__.py │ └── layer_normalization.py ├── layers │ ├── __init__.py │ ├── conv.py │ ├── embedding.py │ ├── extract.py │ ├── inputs.py │ ├── masked.py │ └── pooling.py ├── loader.py ├── main.py ├── multi_head_attention │ ├── __init__.py │ ├── multi_head.py │ └── multi_head_attention.py ├── pointwise_feedforward │ ├── __init__.py │ └── feed_forward.py ├── position_embedding │ ├── __init__.py │ ├── pos_embd.py │ └── trig_pos_embd.py ├── self_attention │ ├── __init__.py │ ├── scaled_dot_attention.py │ ├── seq_self_attention.py │ └── seq_weighted_attention.py ├── tokenizer.py └── transformer │ ├── __init__.py │ ├── gelu.py │ └── transformer.py ├── 20-GCN ├── README.MD ├── config.py ├── inits.py ├── layers.py ├── metrics.py ├── models.py ├── nohup.out ├── res │ └── screen.png ├── train.py └── utils.py ├── 21-CN-EN-Translation-BERT ├── Readme.md ├── attention.py ├── attlayer.py ├── bert_train.py ├── bertmodel.py ├── test.py ├── tokenizer.py ├── transformer.py ├── transformer_train.py └── utils.py ├── README.md ├── res ├── TF_QR_163.png ├── ai101.jpg ├── cover.png ├── tensorflow-2.0.gif ├── tensorflow-2.0.jpg └── weibo.jpg └── 深度学习与TensorFlow入门实战-源码和PPT ├── 0-课程介绍 ├── cover.png ├── 深度学习与TF.png └── 版权声明.png ├── Readme.md ├── lesson01-初见TensorFlow2.0 ├── autograd.py ├── gpu_accelerate.py ├── 初见TensorFlow2.0.pdf ├── 版权声明.png ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson02-开发环境准备 ├── test.py ├── 开发环境准备.pdf ├── 版权声明.png ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson03-回归问题 ├── 回归问题.pdf └── 版权声明.png ├── lesson04-回归问题实战 ├── data.csv ├── linear_regression.py ├── 回归实战.pdf ├── 版权声明.png ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson05-手写数字问题 ├── 手写数字问题.pdf └── 版权声明.png ├── lesson06-手写数字识别初体验 ├── main.py ├── 手写数字问题体验.pdf └── 版权声明.png ├── lesson07-数据类型 ├── 代码量较少,同学们自己动手练习.txt ├── 数据类型.pdf └── 版权声明.png ├── lesson08-创建Tensor ├── 代码量较少,同学们自己动手练习.txt ├── 创建Tensor.pdf └── 版权声明.png ├── lesson09-索引与切片 ├── 代码量较少,同学们自己动手练习.txt ├── 索引与切片-1.pdf └── 索引与切片-2.pdf ├── lesson10-维度变换 └── 维度变换.pdf ├── lesson11-Broadcasting └── Broadcasting.pdf ├── lesson12-数学运算 └── 数学运算.pdf ├── lesson13-前向传播(张量)-实战 ├── forward.py ├── 前向传播.pdf ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson14-合并与分割 └── 合并与分割.pdf ├── lesson15-数据统计 └── 数据统计.pdf ├── lesson16-张量排序 ├── topk.py └── 张量排序.pdf ├── lesson17-填充与复制 └── 填充与复制.pdf ├── lesson18-数据限幅 ├── main.py └── 张量限幅.pdf ├── lesson19-高阶OP ├── meshgrid.py └── 高阶特性.pdf ├── lesson20-数据加载 └── 数据加载.pdf ├── lesson21-测试(张量)-实战 ├── forward.py ├── mnist_tensor.py └── 测试(张量)实战.pdf ├── lesson22-全连接层 ├── mlp.py └── 全接连层.pdf ├── lesson23-输出方式 └── 输出方式.pdf ├── lesson24-误差计算 ├── loss.py └── 误差计算.pdf ├── lesson25-梯度计算 ├── 0.梯度下降-简介.pdf ├── 2.常见函数的梯度.pdf ├── 2nd_derivative.py ├── 3.激活函数及其梯度.pdf ├── 4.损失函数及其梯度.pdf ├── 5.单输出感知机梯度.pdf ├── 6.多输出感知机梯度.pdf ├── 7.链式法则.pdf ├── 8.多层感知机梯度.pdf ├── chain_rule.py ├── crossentropy_loss.py ├── mse_grad.py ├── multi_output_perceptron.py ├── sigmoid_grad.py └── single_output_perceptron.py ├── lesson26-优化方法 ├── himmelblau.py └── 函数优化实战.pdf ├── lesson27-书写数字问题(层)-实战 ├── MNIST实战.pdf ├── fashionmnist_layer.py ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson28-可视化 ├── main.py └── 可视化.pdf ├── lesson30-Keras高层API ├── 1.Metrics.pdf ├── 2.Compile&Fit.pdf ├── 3.自定义层.pdf ├── compile_fit.py ├── layer_model.py └── metrics.py ├── lesson31-Keras模型保存与加载 ├── model.h5 ├── save_load_model.py ├── save_load_weight.py └── 模型加载与保存.pdf ├── lesson32-Keras实战 ├── Keras实战CIFAR10.pdf ├── keras_train.py ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson33-过拟合与欠拟合 └── 过拟合与欠拟合.pdf ├── lesson34-交叉验证 ├── compile_fit.py ├── train_evalute_test.py └── 交叉验证.pdf ├── lesson35-Regularization ├── Regularization.pdf └── regularization.py ├── lesson36-动量与学习率 └── 学习率与动量.pdf ├── lesson37-Early Stopping, Dropout ├── dropout.py └── misc.pdf ├── lesson38-卷积神经网络 ├── 什么是卷积.pdf └── 卷积神经网络.pdf ├── lesson39-池化与采样 └── 池化与采样.pdf ├── lesson40-CIFAR与VGG实战 ├── CIFAR与VGG实战.pdf ├── cifar100_train.py ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson41-经典卷积网络 └── 经典卷积网络.pdf ├── lesson42-BatchNorm ├── BatchNorm.pdf └── main.py ├── lesson43-ResNet ├── ResNet与DenseNet.pdf ├── ResNet实战.pdf ├── resnet.py └── resnet18_train.py ├── lesson44-循环神经网络 ├── 循环神经网络.pdf └── 时间序列表示.pdf ├── lesson45-RNN实战 ├── RNN Layer使用.pdf ├── sentiment_analysis_cell.py ├── sentiment_analysis_layer.py └── 情感分类实战.pdf ├── lesson46-LSTM ├── LSTM.pdf └── 梯度弥散与梯度爆炸.pdf ├── lesson47-LSTM实战 ├── LSTM实战.pdf ├── gru_sentiment_analysis_cell.py ├── gru_sentiment_analysis_layer.py ├── lstm_sentiment_analysis_cell.py └── lstm_sentiment_analysis_layer.py ├── lesson48-AutoEncoders └── AutoEncoders.pdf ├── lesson49-VAE实战 ├── AE实战.pdf ├── autoencoder.py ├── vae.py ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson50-GAN └── GAN.pdf ├── lesson51-WGAN实战 ├── GAN实战.pdf ├── dataset.py ├── gan.py ├── gan_train.py ├── wgan.py ├── wgan_train.py ├── 答疑群-926107229.png └── 请学员务必加群答疑!!!.txt ├── lesson52-自定义数据集和迁移学习 ├── pokemon.py ├── resnet.py ├── train_scratch.py ├── train_transfer.py └── 宝可梦数据集.pdf └── lesson选看-人工智能发展史 └── 人工智能发展史.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | .idea 3 | tmp 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | pip-wheel-metadata/ 28 | share/python-wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | .python-version 88 | 89 | # celery beat schedule file 90 | celerybeat-schedule 91 | 92 | # SageMath parsed files 93 | *.sage.py 94 | 95 | # Environments 96 | .env 97 | .venv 98 | env/ 99 | venv/ 100 | ENV/ 101 | env.bak/ 102 | venv.bak/ 103 | 104 | # Spyder project settings 105 | .spyderproject 106 | .spyproject 107 | 108 | # Rope project settings 109 | .ropeproject 110 | 111 | # mkdocs documentation 112 | /site 113 | 114 | # mypy 115 | .mypy_cache/ 116 | .dmypy.json 117 | dmypy.json 118 | 119 | # Pyre type checker 120 | .pyre/ 121 | -------------------------------------------------------------------------------- /01-TF2.0-Overview/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /01-TF2.0-Overview/fc_train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | from tensorflow import keras 4 | from tensorflow.keras import layers, optimizers, datasets 5 | 6 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'} 7 | 8 | def mnist_dataset(): 9 | (x, y), _ = datasets.mnist.load_data() 10 | ds = tf.data.Dataset.from_tensor_slices((x, y)) 11 | ds = ds.map(prepare_mnist_features_and_labels) 12 | ds = ds.take(20000).shuffle(20000).batch(100) 13 | return ds 14 | 15 | @tf.function 16 | def prepare_mnist_features_and_labels(x, y): 17 | x = tf.cast(x, tf.float32) / 255.0 18 | y = tf.cast(y, tf.int64) 19 | return x, y 20 | 21 | 22 | model = keras.Sequential([ 23 | layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)), 24 | layers.Dense(100, activation='relu'), 25 | layers.Dense(100, activation='relu'), 26 | layers.Dense(10)]) 27 | 28 | optimizer = optimizers.Adam() 29 | 30 | 31 | @tf.function 32 | def compute_loss(logits, labels): 33 | return tf.reduce_mean( 34 | tf.nn.sparse_softmax_cross_entropy_with_logits( 35 | logits=logits, labels=labels)) 36 | 37 | @tf.function 38 | def compute_accuracy(logits, labels): 39 | predictions = tf.argmax(logits, axis=1) 40 | return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32)) 41 | 42 | @tf.function 43 | def train_one_step(model, optimizer, x, y): 44 | 45 | with tf.GradientTape() as tape: 46 | logits = model(x) 47 | loss = compute_loss(logits, y) 48 | 49 | # compute gradient 50 | grads = tape.gradient(loss, model.trainable_variables) 51 | # update to weights 52 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 53 | 54 | accuracy = compute_accuracy(logits, y) 55 | 56 | # loss and accuracy is scalar tensor 57 | return loss, accuracy 58 | 59 | 60 | def train(epoch, model, optimizer): 61 | 62 | train_ds = mnist_dataset() 63 | loss = 0.0 64 | accuracy = 0.0 65 | for step, (x, y) in enumerate(train_ds): 66 | loss, accuracy = train_one_step(model, optimizer, x, y) 67 | 68 | if step % 500 == 0: 69 | print('epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy()) 70 | 71 | return loss, accuracy 72 | 73 | 74 | for epoch in range(20): 75 | loss, accuracy = train(epoch, model, optimizer) 76 | 77 | print('Final epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy()) -------------------------------------------------------------------------------- /01-TF2.0-Overview/features.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/01-TF2.0-Overview/features.png -------------------------------------------------------------------------------- /02-AutoGraph/README.md: -------------------------------------------------------------------------------- 1 | # AutoGraph 2 | 3 | 4 | Compare static graph using @tf.function VS dynamic graph. 5 | 6 | AutoGraph helps you write complicated graph code using normal Python. Behind the scenes, AutoGraph automatically transforms your code into the equivalent TensorFlow graph code. 7 | 8 | Let's take a look at TensorFlow graphs and how they work. 9 | 10 | ```python 11 | ReLU_Layer = tf.keras.layers.Dense(100, input_shape=(784,), activation=tf.nn.relu) 12 | Logit_Layer = tf.keras.layers.Dense(10, input_shape=(100,)) 13 | 14 | # X and y are labels and inputs 15 | ``` 16 | 17 | 18 | 19 | **TensorFlow 1.0:** Operations are added as nodes to the computational graph and are not actually executed until we call session.run(), much like defining a function that doesn't run until it is called. 20 | 21 | ```python 22 | SGD_Trainer = tf.train.GradientDescentOptimizer(1e-2) 23 | 24 | inputs = tf.placeholder(tf.float32, shape=[None, 784]) 25 | labels = tf.placeholder(tf.int16, shape=[None, 10]) 26 | hidden = ReLU_Layer(inputs) 27 | logits = Logit_Layer(hidden) 28 | entropy = tf.nn.softmax_cross_entropy_with_logits( 29 | logits=logits, labels=labels) 30 | loss = tf.reduce_mean(entropy) 31 | train_step = SGD_Trainer.minimize(loss, 32 | var_list=ReLU_Layer.weights+Logit_Layer.weights) 33 | 34 | sess = tf.InteractiveSession() 35 | sess.run(tf.global_variables_initializer()) 36 | for step in range(1000): 37 | sess.run(train_step, feed_dict={inputs:X, labels:y}) 38 | ``` 39 | 40 | **TensorFlow 2.0:** Operations are executed directly and the computational graph is built on-the-fly. However, we can still write functions and pre-compile computational graphs from them like in TF 1.0 using the *@tf.function* decorator, allowing for faster execution. 41 | 42 | ```python 43 | SGD_Trainer = tf.optimizers.SGD(1e-2) 44 | 45 | @tf.function 46 | def loss_fn(inputs=X, labels=y): 47 | hidden = ReLU_Layer(inputs) 48 | logits = Logit_Layer(hidden) 49 | entropy = tf.nn.softmax_cross_entropy_with_logits( 50 | logits=logits, labels=labels) 51 | return tf.reduce_mean(entropy) 52 | 53 | for step in range(1000): 54 | SGD_Trainer.minimize(loss_fn, 55 | var_list=ReLU_Layer.weights+Logit_Layer.weights) 56 | ``` 57 | 58 | # HowTO 59 | 60 | ``` 61 | python main.py 62 | ``` 63 | 64 | and you will see some computation cost between static graph and dynamic graph. -------------------------------------------------------------------------------- /02-AutoGraph/graph.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/02-AutoGraph/graph.gif -------------------------------------------------------------------------------- /02-AutoGraph/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | import timeit 4 | 5 | 6 | 7 | 8 | 9 | cell = tf.keras.layers.LSTMCell(10) 10 | 11 | 12 | @tf.function 13 | def fn(input, state): 14 | """ 15 | use static graph to compute LSTM 16 | :param input: 17 | :param state: 18 | :return: 19 | """ 20 | 21 | return cell(input, state) 22 | 23 | 24 | 25 | input = tf.zeros([10, 10]) 26 | state = [tf.zeros([10, 10])] * 2 27 | 28 | # warmup 29 | cell(input, state) 30 | fn(input, state) 31 | 32 | 33 | dynamic_graph_time = timeit.timeit(lambda: cell(input, state), number=100) 34 | static_graph_time = timeit.timeit(lambda: fn(input, state), number=100) 35 | print('dynamic_graph_time:', dynamic_graph_time) 36 | print('static_graph_time:', static_graph_time) 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /03-Play-with-MNIST/README.md: -------------------------------------------------------------------------------- 1 | # Play with MNIST 2 | 3 | A detailed MNIST walk-through! 4 | 5 | Let's start by loading MNIST from **keras.datasets** and preprocessing to get rows of normalized 784-dimensional vectors. 6 | 7 | ```python 8 | import tensorflow as tf 9 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 10 | 11 | (xs, ys),_ = datasets.mnist.load_data() 12 | print('datasets:', xs.shape, ys.shape, xs.min(), xs.max()) 13 | 14 | xs = tf.convert_to_tensor(xs, dtype=tf.float32) / 255. 15 | db = tf.data.Dataset.from_tensor_slices((xs,ys)) 16 | db = db.batch(32).repeat(10) 17 | ``` 18 | 19 | 20 | 21 | Now let's build our network as a **keras.Sequential** model and instantiate a stochastic gradient descent optimizer from **keras.optimizers**. 22 | 23 | ```python 24 | network = Sequential([layers.Dense(256, activation='relu'), 25 | layers.Dense(256, activation='relu'), 26 | layers.Dense(256, activation='relu'), 27 | layers.Dense(10)]) 28 | network.build(input_shape=(None, 28*28)) 29 | network.summary() 30 | 31 | optimizer = optimizers.SGD(lr=0.01) 32 | acc_meter = metrics.Accuracy() 33 | ``` 34 | 35 | 36 | 37 | Finally, we can iterate through our dataset and train our model. 38 | In this example, we use **tf.GradientTape** to manually compute the gradients of the loss with respect to our network's trainable variables. GradientTape is just one of many ways to perform gradient steps in TensorFlow 2.0: 39 | 40 | - **Tf.GradientTape:** Manually computes loss gradients with respect to given variables by recording operations within its context manager. This is the most flexible way to perform optimizer steps, as we can work directly with gradients and don't need a pre-defined Keras model or loss function. 41 | - **Model.train():** Keras's built-in function for iterating through a dataset and fitting a Keras.Model on it. This is often the best choice for training a Keras model and comes with options for progress bar displays, validation splits, multiprocessing, and generator support. 42 | - **Optimizer.minimize():** Computes and differentiates through a given loss function and performs a step to minimize it with gradient descent. This method is easy to implement, and can be conveniently slapped onto any existing computational graph to make a working optimization step. 43 | 44 | ```python 45 | for step, (x,y) in enumerate(db): 46 | 47 | with tf.GradientTape() as tape: 48 | # [b, 28, 28] => [b, 784] 49 | x = tf.reshape(x, (-1, 28*28)) 50 | # [b, 784] => [b, 10] 51 | out = network(x) 52 | # [b] => [b, 10] 53 | y_onehot = tf.one_hot(y, depth=10) 54 | # [b, 10] 55 | loss = tf.square(out-y_onehot) 56 | # [b] 57 | loss = tf.reduce_sum(loss) / 32 58 | 59 | 60 | acc_meter.update_state(tf.argmax(out, axis=1), y) 61 | 62 | grads = tape.gradient(loss, network.trainable_variables) 63 | optimizer.apply_gradients(zip(grads, network.trainable_variables)) 64 | 65 | 66 | if step % 200==0: 67 | 68 | print(step, 'loss:', float(loss), 'acc:', acc_meter.result().numpy()) 69 | acc_meter.reset_states() 70 | ``` 71 | 72 | # HowTO 73 | 74 | Try it for yourself! 75 | 76 | ``` 77 | python main.py 78 | ``` -------------------------------------------------------------------------------- /03-Play-with-MNIST/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 3 | 4 | 5 | 6 | 7 | (xs, ys),_ = datasets.mnist.load_data() 8 | print('datasets:', xs.shape, ys.shape, xs.min(), xs.max()) 9 | 10 | 11 | xs = tf.convert_to_tensor(xs, dtype=tf.float32) / 255. 12 | db = tf.data.Dataset.from_tensor_slices((xs,ys)) 13 | db = db.batch(32).repeat(10) 14 | 15 | 16 | network = Sequential([layers.Dense(256, activation='relu'), 17 | layers.Dense(256, activation='relu'), 18 | layers.Dense(256, activation='relu'), 19 | layers.Dense(10)]) 20 | network.build(input_shape=(None, 28*28)) 21 | network.summary() 22 | 23 | optimizer = optimizers.SGD(lr=0.01) 24 | acc_meter = metrics.Accuracy() 25 | 26 | for step, (x,y) in enumerate(db): 27 | 28 | with tf.GradientTape() as tape: 29 | # [b, 28, 28] => [b, 784] 30 | x = tf.reshape(x, (-1, 28*28)) 31 | # [b, 784] => [b, 10] 32 | out = network(x) 33 | # [b] => [b, 10] 34 | y_onehot = tf.one_hot(y, depth=10) 35 | # [b, 10] 36 | loss = tf.square(out-y_onehot) 37 | # [b] 38 | loss = tf.reduce_sum(loss) / 32 39 | 40 | 41 | acc_meter.update_state(tf.argmax(out, axis=1), y) 42 | 43 | grads = tape.gradient(loss, network.trainable_variables) 44 | optimizer.apply_gradients(zip(grads, network.trainable_variables)) 45 | 46 | 47 | if step % 200==0: 48 | 49 | print(step, 'loss:', float(loss), 'acc:', acc_meter.result().numpy()) 50 | acc_meter.reset_states() 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /03-Play-with-MNIST/mnist.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/03-Play-with-MNIST/mnist.gif -------------------------------------------------------------------------------- /04-Linear-Regression/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Linear Regression 3 | 4 | Linear regression for Boston housing price prediction. 5 | 6 | Let's implement a linear regressor with tf.keras.layers.Layer 7 | 8 | ![](linear_regression.png) 9 | 10 | # HowTO 11 | 12 | ``` 13 | python main.py 14 | ``` -------------------------------------------------------------------------------- /04-Linear-Regression/linear_regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/04-Linear-Regression/linear_regression.png -------------------------------------------------------------------------------- /04-Linear-Regression/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from tensorflow import keras 4 | import os 5 | 6 | 7 | class Regressor(keras.layers.Layer): 8 | 9 | def __init__(self): 10 | super(Regressor, self).__init__() 11 | 12 | # here must specify shape instead of tensor ! 13 | # name here is meanless ! 14 | # [dim_in, dim_out] 15 | self.w = self.add_variable('meanless-name', [13, 1]) 16 | # [dim_out] 17 | self.b = self.add_variable('meanless-name', [1]) 18 | 19 | print(self.w.shape, self.b.shape) 20 | print(type(self.w), tf.is_tensor(self.w), self.w.name) 21 | print(type(self.b), tf.is_tensor(self.b), self.b.name) 22 | 23 | 24 | def call(self, x): 25 | 26 | x = tf.matmul(x, self.w) + self.b 27 | 28 | return x 29 | 30 | def main(): 31 | 32 | tf.random.set_seed(22) 33 | np.random.seed(22) 34 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 35 | assert tf.__version__.startswith('2.') 36 | 37 | 38 | (x_train, y_train), (x_val, y_val) = keras.datasets.boston_housing.load_data() 39 | # 40 | x_train, x_val = x_train.astype(np.float32), x_val.astype(np.float32) 41 | # (404, 13) (404,) (102, 13) (102,) 42 | print(x_train.shape, y_train.shape, x_val.shape, y_val.shape) 43 | # Here has two mis-leading issues: 44 | # 1. (x_train, y_train) cant be written as [x_train, y_train] 45 | # 2. 46 | db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(64) 47 | db_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(102) 48 | 49 | 50 | model = Regressor() 51 | criteon = keras.losses.MeanSquaredError() 52 | optimizer = keras.optimizers.Adam(learning_rate=1e-2) 53 | 54 | for epoch in range(200): 55 | 56 | for step, (x, y) in enumerate(db_train): 57 | 58 | with tf.GradientTape() as tape: 59 | # [b, 1] 60 | logits = model(x) 61 | # [b] 62 | logits = tf.squeeze(logits, axis=1) 63 | # [b] vs [b] 64 | loss = criteon(y, logits) 65 | 66 | grads = tape.gradient(loss, model.trainable_variables) 67 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 68 | 69 | print(epoch, 'loss:', loss.numpy()) 70 | 71 | 72 | if epoch % 10 == 0: 73 | 74 | for x, y in db_val: 75 | # [b, 1] 76 | logits = model(x) 77 | # [b] 78 | logits = tf.squeeze(logits, axis=1) 79 | # [b] vs [b] 80 | loss = criteon(y, logits) 81 | 82 | print(epoch, 'val loss:', loss.numpy()) 83 | 84 | 85 | 86 | 87 | 88 | if __name__ == '__main__': 89 | main() -------------------------------------------------------------------------------- /05-FashionMNIST/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | 3 | -------------------------------------------------------------------------------- /05-FashionMNIST/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Linear Regression 3 | 4 | Fashion-MNIST image classification with a fully-connected network subclassed from tf.keras.Model 5 | 6 | Fashion-MNIST is one of the datasets available in tf.keras.datasets: 7 | ```python 8 | from tf.keras.datasets import fashion_mnist 9 | ``` 10 | 11 | ![](fashion_mnist.png) 12 | 13 | # HowTO 14 | 15 | ``` 16 | python mnist_fit.py 17 | ``` -------------------------------------------------------------------------------- /05-FashionMNIST/fashion_mnist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/05-FashionMNIST/fashion_mnist.png -------------------------------------------------------------------------------- /05-FashionMNIST/mnist_Seqential_gradient.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | from tensorflow import keras 4 | from tensorflow.keras import layers, optimizers, datasets 5 | 6 | def prepare_mnist_features_and_labels(x, y): 7 | x = tf.cast(x, tf.float32) / 255.0 8 | y = tf.cast(y, tf.int64) 9 | return x, y 10 | 11 | def mnist_dataset(): 12 | (x, y), _ = datasets.fashion_mnist.load_data() 13 | ds = tf.data.Dataset.from_tensor_slices((x, y)) 14 | ds = ds.map(prepare_mnist_features_and_labels) 15 | ds = ds.take(20000).shuffle(20000).batch(100) 16 | return ds 17 | 18 | 19 | 20 | 21 | 22 | 23 | def compute_loss(logits, labels): 24 | return tf.reduce_mean( 25 | tf.nn.sparse_softmax_cross_entropy_with_logits( 26 | logits=logits, labels=labels)) 27 | 28 | 29 | def compute_accuracy(logits, labels): 30 | predictions = tf.argmax(logits, axis=1) 31 | return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32)) 32 | 33 | 34 | def train_one_step(model, optimizer, x, y): 35 | 36 | with tf.GradientTape() as tape: 37 | 38 | logits = model(x) 39 | loss = compute_loss(logits, y) 40 | 41 | # compute gradient 42 | grads = tape.gradient(loss, model.trainable_variables) 43 | # update to weights 44 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 45 | 46 | accuracy = compute_accuracy(logits, y) 47 | 48 | # loss and accuracy is scalar tensor 49 | return loss, accuracy 50 | 51 | 52 | def train(epoch, model, optimizer): 53 | 54 | train_ds = mnist_dataset() 55 | loss = 0.0 56 | accuracy = 0.0 57 | for step, (x, y) in enumerate(train_ds): 58 | loss, accuracy = train_one_step(model, optimizer, x, y) 59 | if step%500==0: 60 | print('epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy()) 61 | return loss, accuracy 62 | 63 | 64 | 65 | def main(): 66 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # or any {'0', '1', '2'} 67 | 68 | train_dataset = mnist_dataset() 69 | 70 | model = keras.Sequential([ 71 | layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)), 72 | layers.Dense(200, activation='relu'), 73 | layers.Dense(200, activation='relu'), 74 | layers.Dense(10)]) 75 | 76 | 77 | optimizer = optimizers.Adam() 78 | 79 | for epoch in range(20): 80 | loss, accuracy = train(epoch, model, optimizer) 81 | print('Final epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy()) 82 | 83 | 84 | if __name__ == '__main__': 85 | main() -------------------------------------------------------------------------------- /05-FashionMNIST/mnist_custommodel.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | from tensorflow import keras 4 | from tensorflow.keras import layers, optimizers, datasets 5 | 6 | def prepare_mnist_features_and_labels(x, y): 7 | x = tf.cast(x, tf.float32) / 255.0 8 | y = tf.cast(y, tf.int64) 9 | return x, y 10 | 11 | def mnist_dataset(): 12 | 13 | (x, y), (x_val, y_val) = datasets.fashion_mnist.load_data() 14 | print('x/y shape:', x.shape, y.shape) 15 | y = tf.one_hot(y, depth=10) 16 | y_val = tf.one_hot(y_val, depth=10) 17 | 18 | ds = tf.data.Dataset.from_tensor_slices((x, y)) 19 | ds = ds.map(prepare_mnist_features_and_labels) 20 | ds = ds.shuffle(60000).batch(100) 21 | 22 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 23 | ds_val = ds_val.map(prepare_mnist_features_and_labels) 24 | ds_val = ds_val.shuffle(10000).batch(100) 25 | 26 | sample = next(iter(ds)) 27 | print('sample:', sample[0].shape, sample[1].shape) 28 | 29 | return ds,ds_val 30 | 31 | 32 | 33 | 34 | 35 | 36 | class MyModel(keras.Model): 37 | 38 | def __init__(self): 39 | super(MyModel, self).__init__() 40 | 41 | # self.model = keras.Sequential([ 42 | # layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)), 43 | # layers.Dense(100, activation='relu'), 44 | # layers.Dense(100, activation='relu'), 45 | # layers.Dense(10)]) 46 | 47 | self.layer1 = layers.Dense(200, activation=tf.nn.relu) 48 | self.layer2 = layers.Dense(200, activation=tf.nn.relu) 49 | # self.layer3 = layers.Dense(200, activation=tf.nn.relu) 50 | self.layer4 = layers.Dense(10) 51 | 52 | def call(self, x, training=False): 53 | 54 | x = tf.reshape(x, [-1, 28*28]) 55 | x = self.layer1(x) 56 | x = self.layer2(x) 57 | # x = self.layer3(x) 58 | x = self.layer4(x) 59 | 60 | return x 61 | 62 | 63 | def main(): 64 | 65 | tf.random.set_seed(22) 66 | 67 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'} 68 | 69 | train_dataset, val_dataset = mnist_dataset() 70 | 71 | model = MyModel() 72 | model.compile(optimizer=optimizers.Adam(1e-3), 73 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 74 | metrics=['accuracy']) 75 | 76 | 77 | 78 | model.fit(train_dataset.repeat(), epochs=30, steps_per_epoch=500, verbose=1, 79 | validation_data=val_dataset.repeat(), 80 | validation_steps=2) 81 | 82 | 83 | if __name__ == '__main__': 84 | main() -------------------------------------------------------------------------------- /05-FashionMNIST/mnist_fit.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | from tensorflow import keras 4 | from tensorflow.keras import layers, optimizers, datasets 5 | 6 | 7 | 8 | def prepare_mnist_features_and_labels(x, y): 9 | x = tf.cast(x, tf.float32) / 255.0 10 | y = tf.cast(y, tf.int64) 11 | 12 | return x, y 13 | 14 | def mnist_dataset(): 15 | (x, y), (x_val, y_val) = datasets.fashion_mnist.load_data() 16 | print('x/y shape:', x.shape, y.shape) 17 | y = tf.one_hot(y, depth=10) 18 | y_val = tf.one_hot(y_val, depth=10) 19 | ds = tf.data.Dataset.from_tensor_slices((x, y)) 20 | ds = ds.map(prepare_mnist_features_and_labels) 21 | ds = ds.shuffle(60000).batch(100) 22 | 23 | 24 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 25 | ds_val = ds_val.map(prepare_mnist_features_and_labels) 26 | ds_val = ds_val.shuffle(10000).batch(100) 27 | 28 | return ds,ds_val 29 | 30 | 31 | 32 | 33 | 34 | 35 | def main(): 36 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'} 37 | 38 | train_dataset, val_dataset = mnist_dataset() 39 | 40 | model = keras.Sequential([ 41 | layers.Reshape(target_shape=(28 * 28,), input_shape=(28, 28)), 42 | layers.Dense(200, activation='relu'), 43 | layers.Dense(200, activation='relu'), 44 | layers.Dense(200, activation='relu'), 45 | layers.Dense(10)]) 46 | # no need to use compile if you have no loss/optimizer/metrics involved here. 47 | model.compile(optimizer=optimizers.Adam(0.001), 48 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 49 | metrics=['accuracy']) 50 | 51 | model.fit(train_dataset.repeat(), epochs=30, steps_per_epoch=500, 52 | validation_data=val_dataset.repeat(), 53 | validation_steps=2 54 | ) 55 | 56 | 57 | if __name__ == '__main__': 58 | main() -------------------------------------------------------------------------------- /05-FashionMNIST/mnist_matmul.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | from tensorflow import keras 4 | from tensorflow.keras import layers, optimizers, datasets 5 | 6 | def prepare_mnist_features_and_labels(x, y): 7 | x = tf.cast(x, tf.float32) / 255.0 8 | y = tf.cast(y, tf.int64) 9 | return x, y 10 | 11 | 12 | 13 | def mnist_dataset(): 14 | (x, y), _ = datasets.fashion_mnist.load_data() 15 | 16 | print('x/y shape:', x.shape, y.shape) 17 | 18 | ds = tf.data.Dataset.from_tensor_slices((x, y)) 19 | ds = ds.map(prepare_mnist_features_and_labels) 20 | ds = ds.take(20000).shuffle(20000).batch(100) 21 | return ds 22 | 23 | 24 | 25 | 26 | 27 | 28 | def compute_loss(logits, labels): 29 | return tf.reduce_mean( 30 | tf.nn.sparse_softmax_cross_entropy_with_logits( 31 | logits=logits, labels=labels)) 32 | 33 | 34 | def compute_accuracy(logits, labels): 35 | predictions = tf.argmax(logits, axis=1) 36 | return tf.reduce_mean(tf.cast(tf.equal(predictions, labels), tf.float32)) 37 | 38 | 39 | def train_one_step(model, optimizer, x, y): 40 | 41 | with tf.GradientTape() as tape: 42 | 43 | logits = model(x) 44 | loss = compute_loss(logits, y) 45 | 46 | # compute gradient 47 | grads = tape.gradient(loss, model.trainable_variables) 48 | # update to weights 49 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 50 | 51 | accuracy = compute_accuracy(logits, y) 52 | 53 | # loss and accuracy is scalar tensor 54 | return loss, accuracy 55 | 56 | 57 | def train(epoch, model, optimizer): 58 | 59 | train_ds = mnist_dataset() 60 | loss = 0.0 61 | accuracy = 0.0 62 | 63 | for step, (x, y) in enumerate(train_ds): 64 | 65 | loss, accuracy = train_one_step(model, optimizer, x, y) 66 | 67 | if step%500==0: 68 | print('epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy()) 69 | return loss, accuracy 70 | 71 | 72 | 73 | 74 | 75 | class MyLayer(layers.Layer): 76 | 77 | 78 | def __init__(self, units): 79 | """ 80 | 81 | :param units: [input_dim, h1_dim,...,hn_dim, output_dim] 82 | """ 83 | super(MyLayer, self).__init__() 84 | 85 | 86 | for i in range(1, len(units)): 87 | # w: [input_dim, output_dim] 88 | self.add_variable(name='kernel%d'%i, shape=[units[i-1], units[i]]) 89 | # b: [output_dim] 90 | self.add_variable(name='bias%d'%i,shape=[units[i]]) 91 | 92 | 93 | 94 | def call(self, x): 95 | """ 96 | 97 | :param x: [b, input_dim] 98 | :return: 99 | """ 100 | num = len(self.trainable_variables) 101 | 102 | x = tf.reshape(x, [-1, 28*28]) 103 | 104 | for i in range(0, num, 2): 105 | 106 | x = tf.matmul(x, self.trainable_variables[i]) + self.trainable_variables[i+1] 107 | 108 | return x 109 | 110 | 111 | 112 | def main(): 113 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'} 114 | 115 | train_dataset = mnist_dataset() 116 | 117 | model = MyLayer([28*28, 200, 200, 10]) 118 | for p in model.trainable_variables: 119 | print(p.name, p.shape) 120 | optimizer = optimizers.Adam() 121 | 122 | for epoch in range(20): 123 | loss, accuracy = train(epoch, model, optimizer) 124 | 125 | print('Final epoch', epoch, ': loss', loss.numpy(), '; accuracy', accuracy.numpy()) 126 | 127 | 128 | if __name__ == '__main__': 129 | main() -------------------------------------------------------------------------------- /06-CIFAR-VGG/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | __pycache__ 3 | -------------------------------------------------------------------------------- /06-CIFAR-VGG/README.md: -------------------------------------------------------------------------------- 1 | 2 | # CIFAR10-VGG16 3 | 4 | Fitting a VGG-16 network on CIFAR-10 for image classification! 5 | We use gradient clipping for faster convergence. 6 | 7 | A complete implementation of VGG-16 is available in network.py 8 | 9 | ![](vgg16.png) 10 | 11 | # HowTO 12 | CIFAR-10 will be downloaded automatically if it is not found. 13 | 14 | Simply run: 15 | ``` 16 | python main.py 17 | ``` 18 | -------------------------------------------------------------------------------- /06-CIFAR-VGG/vgg16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/06-CIFAR-VGG/vgg16.png -------------------------------------------------------------------------------- /07-Inception/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | __pycache__ 3 | .ipynb_checkpoints 4 | -------------------------------------------------------------------------------- /07-Inception/README.md: -------------------------------------------------------------------------------- 1 | # Inception Network 2 | 3 | Here we train a tf.keras implementation of InceptionV3 on MNIST. 4 | 5 | Includes cell dividers for running with IPython! 6 | 7 | ![](inception.png) 8 | 9 | # HowTO 10 | 11 | ``` 12 | python main.py 13 | ``` -------------------------------------------------------------------------------- /07-Inception/inception.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/07-Inception/inception.png -------------------------------------------------------------------------------- /08-ResNet/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | -------------------------------------------------------------------------------- /08-ResNet/README.md: -------------------------------------------------------------------------------- 1 | # ResNet 2 | 3 | Here we train a tf.keras implementation of ResNet-18 on Fashion-MNIST. 4 | 5 | Includes cell dividers for running with IPython! 6 | 7 | ![](resnet.jpeg) 8 | 9 | # HowTO 10 | 11 | ``` 12 | python main.py 13 | ``` -------------------------------------------------------------------------------- /08-ResNet/resnet.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/08-ResNet/resnet.jpeg -------------------------------------------------------------------------------- /09-RNN-Sentiment-Analysis/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | nohup.out 3 | -------------------------------------------------------------------------------- /09-RNN-Sentiment-Analysis/README.md: -------------------------------------------------------------------------------- 1 | # Sentiment Analysis 2 | 3 | RNN for Sentiment Analysis! 4 | 5 | In this lesson, we implement a stacked Long-Short Term Memory (LSTM) recurrent neural network for Sentiment Analysis on the IMDB text dataset. 6 | 7 | ![](imdb.png) 8 | 9 | # HowTO 10 | IMDB text data will automatically be downloaded from Google Cloud, so make sure you're connected to the internet and able to access Google services. 11 | 12 | ``` 13 | python main.py 14 | ``` -------------------------------------------------------------------------------- /09-RNN-Sentiment-Analysis/imdb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/09-RNN-Sentiment-Analysis/imdb.png -------------------------------------------------------------------------------- /09-RNN-Sentiment-Analysis/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | import numpy as np 4 | from tensorflow import keras 5 | 6 | 7 | # In[16]: 8 | 9 | 10 | tf.random.set_seed(22) 11 | np.random.seed(22) 12 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 13 | assert tf.__version__.startswith('2.') 14 | 15 | 16 | 17 | # fix random seed for reproducibility 18 | np.random.seed(7) 19 | # load the dataset but only keep the top n words, zero the rest 20 | top_words = 10000 21 | # truncate and pad input sequences 22 | max_review_length = 80 23 | (X_train, y_train), (X_test, y_test) = keras.datasets.imdb.load_data(num_words=top_words) 24 | # X_train = tf.convert_to_tensor(X_train) 25 | # y_train = tf.one_hot(y_train, depth=2) 26 | print('Pad sequences (samples x time)') 27 | x_train = keras.preprocessing.sequence.pad_sequences(X_train, maxlen=max_review_length) 28 | x_test = keras.preprocessing.sequence.pad_sequences(X_test, maxlen=max_review_length) 29 | print('x_train shape:', x_train.shape) 30 | print('x_test shape:', x_test.shape) 31 | 32 | 33 | class RNN(keras.Model): 34 | 35 | def __init__(self, units, num_classes, num_layers): 36 | super(RNN, self).__init__() 37 | 38 | 39 | # self.cells = [keras.layers.LSTMCell(units) for _ in range(num_layers)] 40 | # 41 | # self.rnn = keras.layers.RNN(self.cells, unroll=True) 42 | self.rnn = keras.layers.LSTM(units, return_sequences=True) 43 | self.rnn2 = keras.layers.LSTM(units) 44 | 45 | # self.cells = (keras.layers.LSTMCell(units) for _ in range(num_layers)) 46 | # # 47 | # self.rnn = keras.layers.RNN(self.cells, return_sequences=True, return_state=True) 48 | # self.rnn = keras.layers.LSTM(units, unroll=True) 49 | # self.rnn = keras.layers.StackedRNNCells(self.cells) 50 | 51 | 52 | # have 1000 words totally, every word will be embedding into 100 length vector 53 | # the max sentence lenght is 80 words 54 | self.embedding = keras.layers.Embedding(top_words, 100, input_length=max_review_length) 55 | self.fc = keras.layers.Dense(1) 56 | 57 | def call(self, inputs, training=None, mask=None): 58 | 59 | # print('x', inputs.shape) 60 | # [b, sentence len] => [b, sentence len, word embedding] 61 | x = self.embedding(inputs) 62 | # print('embedding', x.shape) 63 | x = self.rnn(x) 64 | x = self.rnn2(x) 65 | # print('rnn', x.shape) 66 | 67 | x = self.fc(x) 68 | print(x.shape) 69 | 70 | return x 71 | 72 | 73 | def main(): 74 | 75 | units = 64 76 | num_classes = 2 77 | batch_size = 32 78 | epochs = 20 79 | 80 | model = RNN(units, num_classes, num_layers=2) 81 | 82 | 83 | model.compile(optimizer=keras.optimizers.Adam(0.001), 84 | loss=keras.losses.BinaryCrossentropy(from_logits=True), 85 | metrics=['accuracy']) 86 | 87 | # train 88 | model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, 89 | validation_data=(x_test, y_test), verbose=1) 90 | 91 | # evaluate on test set 92 | scores = model.evaluate(x_test, y_test, batch_size, verbose=1) 93 | print("Final test loss and accuracy :", scores) 94 | 95 | 96 | 97 | 98 | if __name__ == '__main__': 99 | main() -------------------------------------------------------------------------------- /10-ColorBot/.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | __pycache__ 3 | -------------------------------------------------------------------------------- /10-ColorBot/README.md: -------------------------------------------------------------------------------- 1 | # Color Robot 2 | 3 | A simple Color Robot that can visualize colors when given color names. 4 | 5 | We implement a stacked LSTM that generates RGB colors from color name text data. 6 | 7 | # HowTO 8 | 9 | The code will download the ColorBot dataset automatically. 10 | 11 | - Step 1: train for 40 epoches. 12 | ``` 13 | python main.py 14 | ``` 15 | 16 | - Step 2: Type in a color name, like `red`, `blue`, or `purple`, and hit `Enter`! 17 | 18 | ![color](./shot.png) 19 | 20 | - Step 3: Press `Enter` to exit. -------------------------------------------------------------------------------- /10-ColorBot/blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/10-ColorBot/blue.png -------------------------------------------------------------------------------- /10-ColorBot/green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/10-ColorBot/green.png -------------------------------------------------------------------------------- /10-ColorBot/purple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/10-ColorBot/purple.png -------------------------------------------------------------------------------- /10-ColorBot/red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/10-ColorBot/red.png -------------------------------------------------------------------------------- /10-ColorBot/shot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/10-ColorBot/shot.png -------------------------------------------------------------------------------- /10-ColorBot/utils.py: -------------------------------------------------------------------------------- 1 | import os, six, time 2 | import tensorflow as tf 3 | import numpy as np 4 | from tensorflow import keras 5 | import urllib 6 | 7 | 8 | def parse(line): 9 | """ 10 | Parse a line from the colors dataset. 11 | """ 12 | 13 | # Each line of the dataset is comma-separated and formatted as 14 | # color_name, r, g, b 15 | # so `items` is a list [color_name, r, g, b]. 16 | items = tf.string_split([line], ",").values 17 | rgb = tf.strings.to_number(items[1:], out_type=tf.float32) / 255. 18 | # Represent the color name as a one-hot encoded character sequence. 19 | color_name = items[0] 20 | chars = tf.one_hot(tf.io.decode_raw(color_name, tf.uint8), depth=256) 21 | # The sequence length is needed by our RNN. 22 | length = tf.cast(tf.shape(chars)[0], dtype=tf.int64) 23 | return rgb, chars, length 24 | 25 | 26 | def maybe_download(filename, work_directory, source_url): 27 | """ 28 | Download the data from source url, unless it's already here. 29 | Args: 30 | filename: string, name of the file in the directory. 31 | work_directory: string, path to working directory. 32 | source_url: url to download from if file doesn't exist. 33 | Returns: 34 | Path to resulting file. 35 | """ 36 | if not tf.io.gfile.exists(work_directory): 37 | tf.io.gfile.makedirs(work_directory) 38 | filepath = os.path.join(work_directory, filename) 39 | if not tf.io.gfile.exists(filepath): 40 | temp_file_name, _ = urllib.request.urlretrieve(source_url) 41 | tf.io.gfile.copy(temp_file_name, filepath) 42 | with tf.io.gfile.GFile(filepath) as f: 43 | size = f.size() 44 | print("Successfully downloaded", filename, size, "bytes.") 45 | return filepath 46 | 47 | 48 | def load_dataset(data_dir, url, batch_size): 49 | """Loads the colors data at path into a PaddedDataset.""" 50 | 51 | # Downloads data at url into data_dir/basename(url). The dataset has a header 52 | # row (color_name, r, g, b) followed by comma-separated lines. 53 | path = maybe_download(os.path.basename(url), data_dir, url) 54 | 55 | # This chain of commands loads our data by: 56 | # 1. skipping the header; (.skip(1)) 57 | # 2. parsing the subsequent lines; (.map(parse)) 58 | # 3. shuffling the data; (.shuffle(...)) 59 | # 3. grouping the data into padded batches (.padded_batch(...)). 60 | dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle( 61 | buffer_size=10000).padded_batch( 62 | batch_size, padded_shapes=([None], [None, None], [])) 63 | return dataset -------------------------------------------------------------------------------- /11-AE/README.md: -------------------------------------------------------------------------------- 1 | # Auto-Encoders 2 | 3 | In this lesson, we implement a simple dense autoencoder to reconstruct MNIST images. 4 | 5 | ![](autoencoder.png) 6 | 7 | # HowTo 8 | 9 | ``` 10 | python main.py 11 | ``` -------------------------------------------------------------------------------- /11-AE/autoencoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/11-AE/autoencoder.png -------------------------------------------------------------------------------- /11-AE/images/vae_reconstructed_epoch_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/11-AE/images/vae_reconstructed_epoch_1.png -------------------------------------------------------------------------------- /11-AE/images/vae_reconstructed_epoch_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/11-AE/images/vae_reconstructed_epoch_2.png -------------------------------------------------------------------------------- /11-AE/images/vae_reconstructed_epoch_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/11-AE/images/vae_reconstructed_epoch_3.png -------------------------------------------------------------------------------- /11-AE/images/vae_reconstructed_epoch_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/11-AE/images/vae_reconstructed_epoch_4.png -------------------------------------------------------------------------------- /11-AE/images/vae_reconstructed_epoch_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/11-AE/images/vae_reconstructed_epoch_5.png -------------------------------------------------------------------------------- /12-VAE/README.md: -------------------------------------------------------------------------------- 1 | # Varitional Auto-Encoders 2 | 3 | In this lesson, we implement a variational autoencoder to reconstruct and generate new MNIST images. 4 | 5 | ![](variational_autoencoder.png) 6 | 7 | # HowTo 8 | 9 | ``` 10 | python main.py 11 | ``` -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_1.png -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_2.png -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_3.png -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_4.png -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_5.png -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_6.png -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_7.png -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_8.png -------------------------------------------------------------------------------- /12-VAE/images/vae_reconstructed_epoch_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_reconstructed_epoch_9.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_1.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_2.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_3.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_4.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_5.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_6.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_7.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_8.png -------------------------------------------------------------------------------- /12-VAE/images/vae_sampled_epoch_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/images/vae_sampled_epoch_9.png -------------------------------------------------------------------------------- /12-VAE/variational_autoencoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/12-VAE/variational_autoencoder.png -------------------------------------------------------------------------------- /13-DCGAN/.gitignore: -------------------------------------------------------------------------------- 1 | images 2 | __pycache__ 3 | -------------------------------------------------------------------------------- /13-DCGAN/README.md: -------------------------------------------------------------------------------- 1 | # DCGAN 2 | 3 | In this lesson, we implement the Deep Convolutional Generative Adversarial Network (DCGAN) for adversarial image generation. 4 | 5 | ![](dcgan.png) 6 | 7 | # HOWTO 8 | 9 | ``` 10 | python main.py 11 | ``` -------------------------------------------------------------------------------- /13-DCGAN/dcgan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/13-DCGAN/dcgan.png -------------------------------------------------------------------------------- /13-DCGAN/gan.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow import keras 3 | 4 | 5 | class Generator(keras.Model): 6 | 7 | def __init__(self): 8 | super(Generator, self).__init__() 9 | 10 | self.n_f = 512 11 | self.n_k = 4 12 | 13 | # input z vector is [None, 100] 14 | self.dense1 = keras.layers.Dense(3 * 3 * self.n_f) 15 | self.conv2 = keras.layers.Conv2DTranspose(self.n_f // 2, 3, 2, 'valid') 16 | self.bn2 = keras.layers.BatchNormalization() 17 | self.conv3 = keras.layers.Conv2DTranspose(self.n_f // 4, self.n_k, 2, 'same') 18 | self.bn3 = keras.layers.BatchNormalization() 19 | self.conv4 = keras.layers.Conv2DTranspose(1, self.n_k, 2, 'same') 20 | return 21 | 22 | def call(self, inputs, training=None): 23 | # [b, 100] => [b, 3, 3, 512] 24 | x = tf.nn.leaky_relu(tf.reshape(self.dense1(inputs), shape=[-1, 3, 3, self.n_f])) 25 | x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=training)) 26 | x = tf.nn.leaky_relu(self.bn3(self.conv3(x), training=training)) 27 | x = tf.tanh(self.conv4(x)) 28 | return x 29 | 30 | 31 | class Discriminator(keras.Model): 32 | 33 | def __init__(self): 34 | super(Discriminator, self).__init__() 35 | 36 | self.n_f = 64 37 | self.n_k = 4 38 | 39 | # input image is [-1, 28, 28, 1] 40 | self.conv1 = keras.layers.Conv2D(self.n_f, self.n_k, 2, 'same') 41 | self.conv2 = keras.layers.Conv2D(self.n_f * 2, self.n_k, 2, 'same') 42 | self.bn2 = keras.layers.BatchNormalization() 43 | self.conv3 = keras.layers.Conv2D(self.n_f * 4, self.n_k, 2, 'same') 44 | self.bn3 = keras.layers.BatchNormalization() 45 | self.flatten4 = keras.layers.Flatten() 46 | self.dense4 = keras.layers.Dense(1) 47 | return 48 | 49 | def call(self, inputs, training=None): 50 | x = tf.nn.leaky_relu(self.conv1(inputs)) 51 | x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=training)) 52 | x = tf.nn.leaky_relu(self.bn3(self.conv3(x), training=training)) 53 | x = self.dense4(self.flatten4(x)) 54 | return x -------------------------------------------------------------------------------- /14-Pixel2Pixel/.gitignore: -------------------------------------------------------------------------------- 1 | images 2 | facade* 3 | __pycache__ 4 | -------------------------------------------------------------------------------- /14-Pixel2Pixel/README.md: -------------------------------------------------------------------------------- 1 | # Pixel2Pixel Model 2 | 3 | In this lesson, we implement a convolutional Pixel2Pixel model and train it on the Facades dataset. 4 | 5 | ![](pix2pix.jpg) 6 | 7 | # HowTo 8 | This model may require a lot of GPU memory. 9 | 10 | ``` 11 | cd the project dir 12 | mkdir images 13 | python main.py 14 | ``` 15 | -------------------------------------------------------------------------------- /14-Pixel2Pixel/pix2pix.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/14-Pixel2Pixel/pix2pix.jpg -------------------------------------------------------------------------------- /15-CycleGAN/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | horse2zebra.zip 3 | horse2zebra 4 | -------------------------------------------------------------------------------- /15-CycleGAN/README.md: -------------------------------------------------------------------------------- 1 | # Cycle-GAN 2 | 3 | In this lesson, we implement CycleGAN and train it on the horse2zebra dataset. 4 | 5 | ![](cyclegan.jpg) 6 | 7 | # HowTo 8 | 9 | The code will download the horse2zebra dataset automatically. 10 | 11 | ``` 12 | python main.py 13 | ``` -------------------------------------------------------------------------------- /15-CycleGAN/cyclegan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/15-CycleGAN/cyclegan.jpg -------------------------------------------------------------------------------- /15-CycleGAN/images/generated_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/15-CycleGAN/images/generated_0.png -------------------------------------------------------------------------------- /15-CycleGAN/images/generated_40.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/15-CycleGAN/images/generated_40.png -------------------------------------------------------------------------------- /16-fasterRCNN/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | __pycache__ 3 | weights/faster_rcnn.h5 4 | -------------------------------------------------------------------------------- /16-fasterRCNN/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 vire 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /16-fasterRCNN/README.md: -------------------------------------------------------------------------------- 1 | # Faster RCNN 2 | 3 | Faster R-CNN R-101-FPN model was implemented with tensorflow 2.0. 4 | 5 | # Requirements 6 | 7 | - python 3.* 8 | - tensorflow 2.* 9 | - cv2 10 | - ... 11 | 12 | 13 | # HowTo 14 | 15 | - Step1. Download pretrained model from [Google Drive](https://drive.google.com/open?id=12gh8ei9w6MeAXXIxONNR-xdEdTJ8Zkko) or [百度网盘](https://pan.baidu.com/s/1I5PGkpvnDSduJnngoWuktQ) and put it in `weights/faster_rcnn.h5` directory. 16 | 17 | - Step2. `pip install required packages` according to your system prompts. 18 | 19 | - Step3. 20 | 21 | ``` 22 | python train_model.py 23 | ``` 24 | 25 | - Step4. You may need to check `train_model.ipynb` and `inspect_model.ipynb` for some insight Since Faster-RCNN is a really huge project. 26 | 27 | 28 | # Screenshot 29 | ![](shot.png) 30 | 31 | 32 | 33 | # Acknowledgement 34 | 35 | This work builds on many excellent works, which include: 36 | 37 | - Heavily based on [tf-eager-fasterrcnn](https://github.com/Viredery/tf-eager-fasterrcnn) 38 | - [matterport/Mask_RCNN](https://github.com/matterport/Mask_RCNN) 39 | - [open-mmlab/mmdetection](https://github.com/open-mmlab/mmdetection) 40 | -------------------------------------------------------------------------------- /16-fasterRCNN/detection/core/bbox/geometry.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | def compute_overlaps(boxes1, boxes2): 4 | '''Computes IoU overlaps between two sets of boxes. 5 | boxes1, boxes2: [N, (y1, x1, y2, x2)]. 6 | ''' 7 | # 1. Tile boxes2 and repeate boxes1. This allows us to compare 8 | # every boxes1 against every boxes2 without loops. 9 | # TF doesn't have an equivalent to np.repeate() so simulate it 10 | # using tf.tile() and tf.reshape. 11 | b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1), 12 | [1, 1, tf.shape(boxes2)[0]]), [-1, 4]) 13 | b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1]) 14 | # 2. Compute intersections 15 | b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1) 16 | b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1) 17 | y1 = tf.maximum(b1_y1, b2_y1) 18 | x1 = tf.maximum(b1_x1, b2_x1) 19 | y2 = tf.minimum(b1_y2, b2_y2) 20 | x2 = tf.minimum(b1_x2, b2_x2) 21 | intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0) 22 | # 3. Compute unions 23 | b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1) 24 | b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1) 25 | union = b1_area + b2_area - intersection 26 | # 4. Compute IoU and reshape to [boxes1, boxes2] 27 | iou = intersection / union 28 | overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]]) 29 | return overlaps 30 | -------------------------------------------------------------------------------- /16-fasterRCNN/detection/datasets/data_generator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | 5 | class DataGenerator: 6 | 7 | def __init__(self, dataset, shuffle=False): 8 | self.dataset = dataset 9 | self.shuffle = shuffle 10 | 11 | def __call__(self): 12 | indices = np.arange(len(self.dataset)) 13 | if self.shuffle: 14 | np.random.shuffle(indices) 15 | 16 | for img_idx in indices: 17 | img, img_meta, bbox, label = self.dataset[img_idx] 18 | yield img, img_meta, bbox, label 19 | -------------------------------------------------------------------------------- /16-fasterRCNN/detection/datasets/transforms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from detection.datasets.utils import * 4 | 5 | class ImageTransform(object): 6 | '''Preprocess the image. 7 | 8 | 1. rescale the image to expected size 9 | 2. normalize the image 10 | 3. flip the image (if needed) 11 | 4. pad the image (if needed) 12 | ''' 13 | def __init__(self, 14 | scale=(800, 1333), 15 | mean=(0, 0, 0), 16 | std=(1, 1, 1), 17 | pad_mode='fixed'): 18 | self.scale = scale 19 | self.mean = mean 20 | self.std = std 21 | self.pad_mode = pad_mode 22 | 23 | self.impad_size = max(scale) if pad_mode == 'fixed' else 64 24 | 25 | def __call__(self, img, flip=False): 26 | img, scale_factor = imrescale(img, self.scale) 27 | img_shape = img.shape 28 | img = imnormalize(img, self.mean, self.std) 29 | 30 | if flip: 31 | img = img_flip(img) 32 | if self.pad_mode == 'fixed': 33 | img = impad_to_square(img, self.impad_size) 34 | 35 | else: # 'non-fixed' 36 | img = impad_to_multiple(img, self.impad_size) 37 | 38 | return img, img_shape, scale_factor 39 | 40 | class BboxTransform(object): 41 | '''Preprocess ground truth bboxes. 42 | 43 | 1. rescale bboxes according to image size 44 | 2. flip bboxes (if needed) 45 | ''' 46 | def __init__(self): 47 | pass 48 | 49 | def __call__(self, bboxes, labels, 50 | img_shape, scale_factor, flip=False): 51 | 52 | bboxes = bboxes * scale_factor 53 | if flip: 54 | bboxes = bbox_flip(bboxes, img_shape) 55 | 56 | bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[0]) 57 | bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[1]) 58 | 59 | return bboxes, labels 60 | -------------------------------------------------------------------------------- /16-fasterRCNN/detection/models/detectors/test_mixins.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from detection.core.bbox import transforms 5 | from detection.utils.misc import * 6 | 7 | class RPNTestMixin: 8 | 9 | def simple_test_rpn(self, img, img_meta): 10 | ''' 11 | Args 12 | --- 13 | imgs: np.ndarray. [height, width, channel] 14 | img_metas: np.ndarray. [11] 15 | 16 | ''' 17 | imgs = tf.Variable(np.expand_dims(img, 0)) 18 | img_metas = tf.Variable(np.expand_dims(img_meta, 0)) 19 | 20 | x = self.backbone(imgs, training=False) 21 | x = self.neck(x, training=False) 22 | 23 | rpn_class_logits, rpn_probs, rpn_deltas = self.rpn_head(x, training=False) 24 | 25 | proposals_list = self.rpn_head.get_proposals( 26 | rpn_probs, rpn_deltas, img_metas, with_probs=False) 27 | 28 | return proposals_list[0] 29 | 30 | class BBoxTestMixin(object): 31 | 32 | def _unmold_detections(self, detections_list, img_metas): 33 | return [ 34 | self._unmold_single_detection(detections_list[i], img_metas[i]) 35 | for i in range(img_metas.shape[0]) 36 | ] 37 | 38 | def _unmold_single_detection(self, detections, img_meta): 39 | zero_ix = tf.where(tf.not_equal(detections[:, 4], 0)) 40 | detections = tf.gather_nd(detections, zero_ix) 41 | 42 | # Extract boxes, class_ids, scores, and class-specific masks 43 | boxes = detections[:, :4] 44 | class_ids = tf.cast(detections[:, 4], tf.int32) 45 | scores = detections[:, 5] 46 | 47 | boxes = transforms.bbox_mapping_back(boxes, img_meta) 48 | 49 | return {'rois': boxes.numpy(), 50 | 'class_ids': class_ids.numpy(), 51 | 'scores': scores.numpy()} 52 | 53 | def simple_test_bboxes(self, img, img_meta, proposals): 54 | ''' 55 | Args 56 | --- 57 | imgs: np.ndarray. [height, width, channel] 58 | img_meta: np.ndarray. [11] 59 | 60 | ''' 61 | imgs = tf.Variable(np.expand_dims(img, 0)) 62 | img_metas = tf.Variable(np.expand_dims(img_meta, 0)) 63 | rois_list = [tf.Variable(proposals)] 64 | 65 | x = self.backbone(imgs, training=False) 66 | P2, P3, P4, P5, _ = self.neck(x, training=False) 67 | 68 | rcnn_feature_maps = [P2, P3, P4, P5] 69 | 70 | 71 | pooled_regions_list = self.roi_align( 72 | (rois_list, rcnn_feature_maps, img_metas), training=False) 73 | 74 | rcnn_class_logits_list, rcnn_probs_list, rcnn_deltas_list = \ 75 | self.bbox_head(pooled_regions_list, training=False) 76 | 77 | detections_list = self.bbox_head.get_bboxes( 78 | rcnn_probs_list, rcnn_deltas_list, rois_list, img_metas) 79 | 80 | return self._unmold_detections(detections_list, img_metas)[0] -------------------------------------------------------------------------------- /16-fasterRCNN/detection/utils/misc.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | def trim_zeros(boxes, name=None): 4 | ''' 5 | Often boxes are represented with matrices of shape [N, 4] and 6 | are padded with zeros. This removes zero boxes. 7 | 8 | Args 9 | --- 10 | boxes: [N, 4] matrix of boxes. 11 | non_zeros: [N] a 1D boolean mask identifying the rows to keep 12 | ''' 13 | non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool) 14 | boxes = tf.boolean_mask(boxes, non_zeros, name=name) 15 | return boxes, non_zeros 16 | 17 | def parse_image_meta(meta): 18 | ''' 19 | Parses a tensor that contains image attributes to its components. 20 | 21 | Args 22 | --- 23 | meta: [..., 11] 24 | 25 | Returns 26 | --- 27 | a dict of the parsed tensors. 28 | ''' 29 | meta = meta.numpy() 30 | ori_shape = meta[..., 0:3] 31 | img_shape = meta[..., 3:6] 32 | pad_shape = meta[..., 6:9] 33 | scale = meta[..., 9] 34 | flip = meta[..., 10] 35 | return { 36 | 'ori_shape': ori_shape, 37 | 'img_shape': img_shape, 38 | 'pad_shape': pad_shape, 39 | 'scale': scale, 40 | 'flip': flip 41 | } 42 | 43 | def calc_batch_padded_shape(meta): 44 | ''' 45 | Args 46 | --- 47 | meta: [batch_size, 11] 48 | 49 | Returns 50 | --- 51 | nd.ndarray. Tuple of (height, width) 52 | ''' 53 | return tf.cast(tf.reduce_max(meta[:, 6:8], axis=0), tf.int32).numpy() 54 | 55 | def calc_img_shapes(meta): 56 | ''' 57 | Args 58 | --- 59 | meta: [..., 11] 60 | 61 | Returns 62 | --- 63 | nd.ndarray. [..., (height, width)] 64 | ''' 65 | return tf.cast(meta[..., 3:5], tf.int32).numpy() 66 | 67 | 68 | def calc_pad_shapes(meta): 69 | ''' 70 | Args 71 | --- 72 | meta: [..., 11] 73 | 74 | Returns 75 | --- 76 | nd.ndarray. [..., (height, width)] 77 | ''' 78 | return tf.cast(meta[..., 6:8], tf.int32).numpy() -------------------------------------------------------------------------------- /16-fasterRCNN/image_demo_ckpt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/16-fasterRCNN/image_demo_ckpt.png -------------------------------------------------------------------------------- /16-fasterRCNN/image_demo_random.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/16-fasterRCNN/image_demo_random.png -------------------------------------------------------------------------------- /16-fasterRCNN/roi_test.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import matplotlib.pyplot as plt 3 | 4 | img = plt.imread('/home/llong/Downloads/number.jpg') /255. 5 | img2 = plt.imread('/home/llong/Downloads/number2.jpg') /255. 6 | img = tf.convert_to_tensor(img, dtype=tf.float32) 7 | img = tf.expand_dims(img, axis=0) 8 | img = tf.image.resize(img, (1000,1000)) 9 | img2 = tf.convert_to_tensor(img2, dtype=tf.float32) 10 | img2 = tf.expand_dims(img2, axis=0) 11 | img2 = tf.image.resize(img2, (1000,1000)) 12 | 13 | img = tf.concat([img, img2], axis=0) 14 | print('img:', img.shape) 15 | 16 | a = tf.image.crop_and_resize(img, [[0.5, 0.5, 1.0, 1.0], [0.5, 0.5, 1.5, 1.5]], [0, 1], crop_size=(500, 500)) 17 | print('a:', a.shape) 18 | 19 | plt.subplot(2,2,1) 20 | plt.imshow(img[0]) 21 | plt.subplot(2,2,2) 22 | plt.imshow(img[1]) 23 | plt.subplot(2,2,3) 24 | plt.imshow(a[0]) 25 | plt.subplot(2,2,4) 26 | plt.imshow(a[1]) 27 | plt.show() 28 | -------------------------------------------------------------------------------- /16-fasterRCNN/shot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/16-fasterRCNN/shot.png -------------------------------------------------------------------------------- /16-fasterRCNN/tf.image.crop_and_resize_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/16-fasterRCNN/tf.image.crop_and_resize_test.png -------------------------------------------------------------------------------- /17-A2C/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Reinforcement Learning A2C Algorithm 3 | 4 | Here implement a synchronous A2C algorithm using gym `CartPole-v0` environment 5 | 6 | # Howto 7 | 8 | - Step1. `pip install gym` 9 | 10 | - Step2. `python a2c.py` 11 | 12 | 13 | # Screenshot 14 | 15 | ![](cartpole.gif) 16 | 17 | ![](reward.png) 18 | 19 | 20 | # Acknowledgement 21 | 22 | This implementation is taken from [Deep Reinforcement Learning with TensorFlow 2.0](http://inoryy.com/post/tensorflow2-deep-reinforcement-learning/). -------------------------------------------------------------------------------- /17-A2C/cartpole.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/17-A2C/cartpole.gif -------------------------------------------------------------------------------- /17-A2C/reward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/17-A2C/reward.png -------------------------------------------------------------------------------- /18-GPT/README.md: -------------------------------------------------------------------------------- 1 | # GPT 2 | 3 | TensorFlow 2.0 implementation of OpenAI's GPT (Generative Pre-Training) language model. 4 | 5 | ![](architecture.png) 6 | 7 | # Acknowledgement 8 | 9 | This implementation is based on [the Pytorch implementation](https://github.com/huggingface/pytorch-openai-transformer-lm) of OpenAI's paper, ["Improving Language Understanding by Generative Pre-Training"](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf). -------------------------------------------------------------------------------- /18-GPT/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/18-GPT/architecture.png -------------------------------------------------------------------------------- /19-BERT/README.md: -------------------------------------------------------------------------------- 1 | # BERT 2 | 3 | TensorFlow 2.0 implementation of Google's BERT (Bidirectional Transformer) language model. 4 | 5 | 6 | 7 | ![](architecture.png) 8 | 9 | # Acknowledgement 10 | 11 | Based on [CyberZHG's Keras BERT implementation](https://github.com/CyberZHG/keras-bert). -------------------------------------------------------------------------------- /19-BERT/__init__.py: -------------------------------------------------------------------------------- 1 | from .bert import * 2 | from .loader import * 3 | from .tokenizer import Tokenizer 4 | -------------------------------------------------------------------------------- /19-BERT/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/19-BERT/architecture.png -------------------------------------------------------------------------------- /19-BERT/embedding_similarity/__init__.py: -------------------------------------------------------------------------------- 1 | from .embeddings import * 2 | -------------------------------------------------------------------------------- /19-BERT/embedding_similarity/embeddings.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import tensorflow.keras.backend as K 3 | 4 | __all__ = ['EmbeddingRet', 'EmbeddingSim', 'get_custom_objects'] 5 | 6 | 7 | class EmbeddingRet(keras.layers.Embedding): 8 | """Embedding layer with weights returned.""" 9 | 10 | def compute_output_shape(self, input_shape): 11 | return [ 12 | super(EmbeddingRet, self).compute_output_shape(input_shape), 13 | (self.input_dim, self.output_dim), 14 | ] 15 | 16 | def compute_mask(self, inputs, mask=None): 17 | return [ 18 | super(EmbeddingRet, self).compute_mask(inputs, mask), 19 | None, 20 | ] 21 | 22 | def call(self, inputs): 23 | return [ 24 | super(EmbeddingRet, self).call(inputs), 25 | self.embeddings, 26 | ] 27 | 28 | 29 | class EmbeddingSim(keras.layers.Layer): 30 | """Calculate similarity between features and token embeddings with bias term.""" 31 | 32 | def __init__(self, 33 | use_bias=True, 34 | initializer='zeros', 35 | regularizer=None, 36 | constraint=None, 37 | **kwargs): 38 | """Initialize the layer. 39 | 40 | :param output_dim: Same as embedding output dimension. 41 | :param use_bias: Whether to use bias term. 42 | :param initializer: Initializer for bias. 43 | :param regularizer: Regularizer for bias. 44 | :param constraint: Constraint for bias. 45 | :param kwargs: Arguments for parent class. 46 | """ 47 | super(EmbeddingSim, self).__init__(**kwargs) 48 | self.supports_masking = True 49 | self.use_bias = use_bias 50 | self.initializer = keras.initializers.get(initializer) 51 | self.regularizer = keras.regularizers.get(regularizer) 52 | self.constraint = keras.constraints.get(constraint) 53 | self.bias = None 54 | 55 | def get_config(self): 56 | config = { 57 | 'use_bias': self.use_bias, 58 | 'initializer': keras.initializers.serialize(self.initializer), 59 | 'regularizer': keras.regularizers.serialize(self.regularizer), 60 | 'constraint': keras.constraints.serialize(self.constraint), 61 | } 62 | base_config = super(EmbeddingSim, self).get_config() 63 | return dict(list(base_config.items()) + list(config.items())) 64 | 65 | def build(self, input_shape): 66 | if self.use_bias: 67 | embed_shape = input_shape[1] 68 | token_num = embed_shape[0] 69 | self.bias = self.add_weight( 70 | shape=(token_num,), 71 | initializer=self.initializer, 72 | regularizer=self.regularizer, 73 | constraint=self.constraint, 74 | name='bias', 75 | ) 76 | super(EmbeddingSim, self).build(input_shape) 77 | 78 | def compute_output_shape(self, input_shape): 79 | feature_shape, embed_shape = input_shape 80 | token_num = embed_shape[0] 81 | return feature_shape[:-1] + (token_num,) 82 | 83 | def compute_mask(self, inputs, mask=None): 84 | return mask[0] 85 | 86 | def call(self, inputs, mask=None, **kwargs): 87 | inputs, embeddings = inputs 88 | outputs = K.dot(inputs, K.transpose(embeddings)) 89 | if self.use_bias: 90 | outputs = K.bias_add(outputs, self.bias) 91 | return keras.activations.softmax(outputs) 92 | 93 | 94 | def get_custom_objects(): 95 | return { 96 | 'EmbeddingRet': EmbeddingRet, 97 | 'EmbeddingSim': EmbeddingSim, 98 | } 99 | -------------------------------------------------------------------------------- /19-BERT/layer_normalization/__init__.py: -------------------------------------------------------------------------------- 1 | from .layer_normalization import LayerNormalization 2 | -------------------------------------------------------------------------------- /19-BERT/layers/__init__.py: -------------------------------------------------------------------------------- 1 | from .inputs import get_inputs 2 | from .embedding import get_embedding, TokenEmbedding, EmbeddingSimilarity 3 | from .masked import Masked 4 | from .extract import Extract 5 | from .pooling import MaskedGlobalMaxPool1D 6 | from .conv import MaskedConv1D 7 | -------------------------------------------------------------------------------- /19-BERT/layers/conv.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import tensorflow.keras.backend as K 3 | 4 | 5 | class MaskedConv1D(keras.layers.Conv1D): 6 | 7 | def __init__(self, **kwargs): 8 | super(MaskedConv1D, self).__init__(**kwargs) 9 | self.supports_masking = True 10 | 11 | def compute_mask(self, inputs, mask=None): 12 | return mask 13 | 14 | def call(self, inputs, mask=None): 15 | if mask is not None: 16 | mask = K.cast(mask, K.floatx()) 17 | inputs *= K.expand_dims(mask, axis=-1) 18 | return super(MaskedConv1D, self).call(inputs) 19 | -------------------------------------------------------------------------------- /19-BERT/layers/extract.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | 3 | 4 | class Extract(keras.layers.Layer): 5 | """Extract from index. 6 | 7 | See: https://arxiv.org/pdf/1810.04805.pdf 8 | """ 9 | 10 | def __init__(self, index, **kwargs): 11 | super(Extract, self).__init__(**kwargs) 12 | self.index = index 13 | self.supports_masking = True 14 | 15 | def get_config(self): 16 | config = { 17 | 'index': self.index, 18 | } 19 | base_config = super(Extract, self).get_config() 20 | return dict(list(base_config.items()) + list(config.items())) 21 | 22 | def compute_output_shape(self, input_shape): 23 | return input_shape[:1] + input_shape[2:] 24 | 25 | def compute_mask(self, inputs, mask=None): 26 | return None 27 | 28 | def call(self, x, mask=None): 29 | return x[:, self.index] 30 | -------------------------------------------------------------------------------- /19-BERT/layers/inputs.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | 3 | 4 | def get_inputs(seq_len): 5 | """Get input layers. 6 | 7 | See: https://arxiv.org/pdf/1810.04805.pdf 8 | 9 | :param seq_len: Length of the sequence or None. 10 | """ 11 | names = ['Token', 'Segment', 'Masked'] 12 | return [keras.layers.Input( 13 | shape=(seq_len,), 14 | name='Input-%s' % name, 15 | ) for name in names] 16 | -------------------------------------------------------------------------------- /19-BERT/layers/masked.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import tensorflow.keras.backend as K 3 | 4 | 5 | class Masked(keras.layers.Layer): 6 | """Generate output mask based on the given mask. 7 | 8 | The inputs for the layer is the original input layer and the masked locations. 9 | 10 | See: https://arxiv.org/pdf/1810.04805.pdf 11 | """ 12 | 13 | def __init__(self, 14 | return_masked=False, 15 | **kwargs): 16 | """Initialize the layer. 17 | 18 | :param return_masked: Whether to return the merged mask. 19 | :param kwargs: Arguments for parent class. 20 | """ 21 | super(Masked, self).__init__(**kwargs) 22 | self.supports_masking = True 23 | self.return_masked = return_masked 24 | 25 | def get_config(self): 26 | config = { 27 | 'return_masked': self.return_masked, 28 | } 29 | base_config = super(Masked, self).get_config() 30 | return dict(list(base_config.items()) + list(config.items())) 31 | 32 | def compute_output_shape(self, input_shape): 33 | if self.return_masked: 34 | return [input_shape[0], input_shape[0][:-1]] 35 | return input_shape[0] 36 | 37 | def compute_mask(self, inputs, mask=None): 38 | token_mask = K.not_equal(inputs[1], 0) 39 | return K.all(K.stack([token_mask, mask[0]], axis=0), axis=0) 40 | 41 | def call(self, inputs, mask=None, **kwargs): 42 | if self.return_masked: 43 | return [inputs[0], K.cast(self.compute_mask(inputs, mask), K.floatx())] 44 | return inputs[0] 45 | -------------------------------------------------------------------------------- /19-BERT/layers/pooling.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import tensorflow.keras.backend as K 3 | 4 | 5 | class MaskedGlobalMaxPool1D(keras.layers.Layer): 6 | 7 | def __init__(self, **kwargs): 8 | super(MaskedGlobalMaxPool1D, self).__init__(**kwargs) 9 | self.supports_masking = True 10 | 11 | def compute_mask(self, inputs, mask=None): 12 | return None 13 | 14 | def compute_output_shape(self, input_shape): 15 | return input_shape[:-2] + (input_shape[-1],) 16 | 17 | def call(self, inputs, mask=None): 18 | if mask is not None: 19 | mask = K.cast(mask, K.floatx()) 20 | inputs -= K.expand_dims((1.0 - mask) * 1e6, axis=-1) 21 | return K.max(inputs, axis=-2) 22 | -------------------------------------------------------------------------------- /19-BERT/main.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | from bert import get_base_dict, get_model, gen_batch_inputs 3 | 4 | 5 | # A toy input example 6 | sentence_pairs = [ 7 | [['all', 'work', 'and', 'no', 'play'], ['makes', 'jack', 'a', 'dull', 'boy']], 8 | [['from', 'the', 'day', 'forth'], ['my', 'arm', 'changed']], 9 | [['and', 'a', 'voice', 'echoed'], ['power', 'give', 'me', 'more', 'power']], 10 | ] 11 | 12 | 13 | # Build token dictionary 14 | token_dict = get_base_dict() # A dict that contains some special tokens 15 | for pairs in sentence_pairs: 16 | for token in pairs[0] + pairs[1]: 17 | if token not in token_dict: 18 | token_dict[token] = len(token_dict) 19 | token_list = list(token_dict.keys()) # Used for selecting a random word 20 | 21 | 22 | # Build & train the model 23 | model = get_model( 24 | token_num=len(token_dict), 25 | head_num=5, 26 | transformer_num=12, 27 | embed_dim=25, 28 | feed_forward_dim=100, 29 | seq_len=20, 30 | pos_num=20, 31 | dropout_rate=0.05, 32 | ) 33 | model.summary() 34 | 35 | def _generator(): 36 | while True: 37 | yield gen_batch_inputs( 38 | sentence_pairs, 39 | token_dict, 40 | token_list, 41 | seq_len=20, 42 | mask_rate=0.3, 43 | swap_sentence_rate=1.0, 44 | ) 45 | 46 | model.fit_generator( 47 | generator=_generator(), 48 | steps_per_epoch=1000, 49 | epochs=100, 50 | validation_data=_generator(), 51 | validation_steps=100, 52 | callbacks=[ 53 | keras.callbacks.EarlyStopping(monitor='val_loss', patience=5) 54 | ], 55 | ) 56 | 57 | 58 | # Use the trained model 59 | inputs, output_layer = get_model( 60 | token_num=len(token_dict), 61 | head_num=5, 62 | transformer_num=12, 63 | embed_dim=25, 64 | feed_forward_dim=100, 65 | seq_len=20, 66 | pos_num=20, 67 | dropout_rate=0.05, 68 | training=False, # The input layers and output layer will be returned if `training` is `False` 69 | trainable=False, # Whether the model is trainable. The default value is the same with `training` 70 | output_layer_num=4, # The number of layers whose outputs will be concatenated as a single output. 71 | # Only available when `training` is `False`. 72 | ) -------------------------------------------------------------------------------- /19-BERT/multi_head_attention/__init__.py: -------------------------------------------------------------------------------- 1 | from .multi_head import MultiHead 2 | from .multi_head_attention import MultiHeadAttention 3 | -------------------------------------------------------------------------------- /19-BERT/pointwise_feedforward/__init__.py: -------------------------------------------------------------------------------- 1 | from .feed_forward import FeedForward 2 | -------------------------------------------------------------------------------- /19-BERT/position_embedding/__init__.py: -------------------------------------------------------------------------------- 1 | from .pos_embd import PositionEmbedding 2 | from .trig_pos_embd import TrigPosEmbedding 3 | -------------------------------------------------------------------------------- /19-BERT/self_attention/__init__.py: -------------------------------------------------------------------------------- 1 | from .seq_self_attention import SeqSelfAttention 2 | from .seq_weighted_attention import SeqWeightedAttention 3 | from .scaled_dot_attention import ScaledDotProductAttention 4 | -------------------------------------------------------------------------------- /19-BERT/self_attention/scaled_dot_attention.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import tensorflow.keras.backend as K 3 | 4 | 5 | class ScaledDotProductAttention(keras.layers.Layer): 6 | """The attention layer that takes three inputs representing queries, keys and values. 7 | 8 | \text{Attention}(Q, K, V) = \text{softmax}(\frac{Q K^T}{\sqrt{d_k}}) V 9 | 10 | See: https://arxiv.org/pdf/1706.03762.pdf 11 | """ 12 | 13 | def __init__(self, 14 | return_attention=False, 15 | history_only=False, 16 | **kwargs): 17 | """Initialize the layer. 18 | 19 | :param return_attention: Whether to return attention weights. 20 | :param history_only: Whether to only use history data. 21 | :param kwargs: Arguments for parent class. 22 | """ 23 | self.supports_masking = True 24 | self.return_attention = return_attention 25 | self.history_only = history_only 26 | super(ScaledDotProductAttention, self).__init__(**kwargs) 27 | 28 | def get_config(self): 29 | config = { 30 | 'return_attention': self.return_attention, 31 | 'history_only': self.history_only, 32 | } 33 | base_config = super(ScaledDotProductAttention, self).get_config() 34 | return dict(list(base_config.items()) + list(config.items())) 35 | 36 | def compute_output_shape(self, input_shape): 37 | if isinstance(input_shape, list): 38 | query_shape, key_shape, value_shape = input_shape 39 | else: 40 | query_shape = key_shape = value_shape = input_shape 41 | output_shape = query_shape[:-1] + value_shape[-1:] 42 | if self.return_attention: 43 | attention_shape = query_shape[:2] + (key_shape[1],) 44 | return [output_shape, attention_shape] 45 | return output_shape 46 | 47 | def compute_mask(self, inputs, mask=None): 48 | if isinstance(mask, list): 49 | mask = mask[0] 50 | if self.return_attention: 51 | return [mask, None] 52 | return mask 53 | 54 | def call(self, inputs, mask=None, **kwargs): 55 | if isinstance(inputs, list): 56 | query, key, value = inputs 57 | else: 58 | query = key = value = inputs 59 | if isinstance(mask, list): 60 | mask = mask[1] 61 | feature_dim = K.shape(query)[-1] 62 | e = K.batch_dot(query, key, axes=2) / K.sqrt(K.cast(feature_dim, dtype=K.floatx())) 63 | e = K.exp(e - K.max(e, axis=-1, keepdims=True)) 64 | if self.history_only: 65 | query_len, key_len = K.shape(query)[1], K.shape(key)[1] 66 | indices = K.tile(K.expand_dims(K.arange(key_len), axis=0), [query_len, 1]) 67 | upper = K.expand_dims(K.arange(key_len), axis=-1) 68 | e *= K.expand_dims(K.cast(indices <= upper, K.floatx()), axis=0) 69 | if mask is not None: 70 | e *= K.cast(K.expand_dims(mask, axis=-2), K.floatx()) 71 | a = e / (K.sum(e, axis=-1, keepdims=True) + K.epsilon()) 72 | v = K.batch_dot(a, value) 73 | if self.return_attention: 74 | return [v, a] 75 | return v 76 | -------------------------------------------------------------------------------- /19-BERT/self_attention/seq_weighted_attention.py: -------------------------------------------------------------------------------- 1 | from tensorflow import keras 2 | import tensorflow.keras.backend as K 3 | 4 | 5 | class SeqWeightedAttention(keras.layers.Layer): 6 | """Y = \text{softmax}(XW + b) X 7 | 8 | See: https://arxiv.org/pdf/1708.00524.pdf 9 | """ 10 | 11 | def __init__(self, use_bias=True, return_attention=False, **kwargs): 12 | self.supports_masking = True 13 | self.use_bias = use_bias 14 | self.return_attention = return_attention 15 | self.W, self.b = None, None 16 | super(SeqWeightedAttention, self).__init__(** kwargs) 17 | 18 | def get_config(self): 19 | config = { 20 | 'use_bias': self.use_bias, 21 | 'return_attention': self.return_attention, 22 | } 23 | base_config = super(SeqWeightedAttention, self).get_config() 24 | return dict(list(base_config.items()) + list(config.items())) 25 | 26 | def build(self, input_shape): 27 | self.W = self.add_weight(shape=(input_shape[2], 1), 28 | name='{}_W'.format(self.name), 29 | initializer=keras.initializers.get('uniform')) 30 | if self.use_bias: 31 | self.b = self.add_weight(shape=(1,), 32 | name='{}_b'.format(self.name), 33 | initializer=keras.initializers.get('zeros')) 34 | super(SeqWeightedAttention, self).build(input_shape) 35 | 36 | def call(self, x, mask=None): 37 | logits = K.dot(x, self.W) 38 | if self.use_bias: 39 | logits += self.b 40 | x_shape = K.shape(x) 41 | logits = K.reshape(logits, (x_shape[0], x_shape[1])) 42 | ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True)) 43 | if mask is not None: 44 | mask = K.cast(mask, K.floatx()) 45 | ai = ai * mask 46 | att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon()) 47 | weighted_input = x * K.expand_dims(att_weights) 48 | result = K.sum(weighted_input, axis=1) 49 | if self.return_attention: 50 | return [result, att_weights] 51 | return result 52 | 53 | def compute_output_shape(self, input_shape): 54 | output_len = input_shape[2] 55 | if self.return_attention: 56 | return [(input_shape[0], output_len), (input_shape[0], input_shape[1])] 57 | return input_shape[0], output_len 58 | 59 | def compute_mask(self, _, input_mask=None): 60 | if self.return_attention: 61 | return [None, None] 62 | return None 63 | 64 | @staticmethod 65 | def get_custom_objects(): 66 | return {'SeqWeightedAttention': SeqWeightedAttention} 67 | -------------------------------------------------------------------------------- /19-BERT/transformer/__init__.py: -------------------------------------------------------------------------------- 1 | from .gelu import gelu 2 | from .transformer import * 3 | -------------------------------------------------------------------------------- /19-BERT/transformer/gelu.py: -------------------------------------------------------------------------------- 1 | import math 2 | import tensorflow.keras.backend as K 3 | 4 | 5 | def gelu(x): 6 | """An approximation of gelu. 7 | 8 | See: https://arxiv.org/pdf/1606.08415.pdf 9 | """ 10 | return 0.5 * x * (1.0 + K.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * K.pow(x, 3)))) 11 | -------------------------------------------------------------------------------- /20-GCN/README.MD: -------------------------------------------------------------------------------- 1 | # Graph Convolution Network for TF2 2 | 3 | GCN implementation for paper: [Semi-Supervised Classification with Graph Convolutional Networks](https://arxiv.org/pdf/1609.02907.pdf) 4 | 5 | # Benchmark 6 | 7 | | dataset | Citeseea | Cora | Pubmed | NELL | 8 | |---------------|----------|------|--------|------| 9 | | GCN(official) | 70.3 | 81.5 | 79.0 | 66.0 | 10 | | This repo. | | 81.8 | 78.9 | | 11 | 12 | # HOWTO 13 | ``` 14 | python train.py 15 | ``` 16 | 17 | # Screenshot 18 | 19 | ![](res/screen.png) 20 | -------------------------------------------------------------------------------- /20-GCN/config.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | args = argparse.ArgumentParser() 4 | args.add_argument('--dataset', default='cora') 5 | args.add_argument('--model', default='gcn') 6 | args.add_argument('--learning_rate', default=0.01) 7 | args.add_argument('--epochs', default=200) 8 | args.add_argument('--hidden1', default=16) 9 | args.add_argument('--dropout', default=0.5) 10 | args.add_argument('--weight_decay', default=5e-4) 11 | args.add_argument('--early_stopping', default=10) 12 | args.add_argument('--max_degree', default=3) 13 | 14 | 15 | args = args.parse_args() 16 | print(args) -------------------------------------------------------------------------------- /20-GCN/inits.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | 5 | def uniform(shape, scale=0.05, name=None): 6 | """Uniform init.""" 7 | initial = tf.random.uniform(shape, minval=-scale, maxval=scale, dtype=tf.float32) 8 | return tf.Variable(initial, name=name) 9 | 10 | 11 | def glorot(shape, name=None): 12 | """Glorot & Bengio (AISTATS 2010) init.""" 13 | init_range = np.sqrt(6.0/(shape[0]+shape[1])) 14 | initial = tf.random.uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32) 15 | return tf.Variable(initial, name=name) 16 | 17 | 18 | def zeros(shape, name=None): 19 | """All zeros.""" 20 | initial = tf.zeros(shape, dtype=tf.float32) 21 | return tf.Variable(initial, name=name) 22 | 23 | 24 | def ones(shape, name=None): 25 | """All ones.""" 26 | initial = tf.ones(shape, dtype=tf.float32) 27 | return tf.Variable(initial, name=name) -------------------------------------------------------------------------------- /20-GCN/metrics.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | def masked_softmax_cross_entropy(preds, labels, mask): 5 | """ 6 | Softmax cross-entropy loss with masking. 7 | """ 8 | loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) 9 | mask = tf.cast(mask, dtype=tf.float32) 10 | mask /= tf.reduce_mean(mask) 11 | loss *= mask 12 | return tf.reduce_mean(loss) 13 | 14 | 15 | def masked_accuracy(preds, labels, mask): 16 | """ 17 | Accuracy with masking. 18 | """ 19 | correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1)) 20 | accuracy_all = tf.cast(correct_prediction, tf.float32) 21 | mask = tf.cast(mask, dtype=tf.float32) 22 | mask /= tf.reduce_mean(mask) 23 | accuracy_all *= mask 24 | return tf.reduce_mean(accuracy_all) 25 | -------------------------------------------------------------------------------- /20-GCN/nohup.out: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/20-GCN/nohup.out -------------------------------------------------------------------------------- /20-GCN/res/screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/20-GCN/res/screen.png -------------------------------------------------------------------------------- /20-GCN/train.py: -------------------------------------------------------------------------------- 1 | import time 2 | import tensorflow as tf 3 | from tensorflow.keras import optimizers 4 | 5 | from utils import * 6 | from models import GCN, MLP 7 | from config import args 8 | 9 | import os 10 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 11 | print('tf version:', tf.__version__) 12 | assert tf.__version__.startswith('2.') 13 | 14 | 15 | 16 | # set random seed 17 | seed = 123 18 | np.random.seed(seed) 19 | tf.random.set_seed(seed) 20 | 21 | 22 | 23 | # load data 24 | adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(args.dataset) 25 | print('adj:', adj.shape) 26 | print('features:', features.shape) 27 | print('y:', y_train.shape, y_val.shape, y_test.shape) 28 | print('mask:', train_mask.shape, val_mask.shape, test_mask.shape) 29 | 30 | 31 | 32 | # D^-1@X 33 | features = preprocess_features(features) # [49216, 2], [49216], [2708, 1433] 34 | print('features coordinates::', features[0].shape) 35 | print('features data::', features[1].shape) 36 | print('features shape::', features[2]) 37 | 38 | if args.model == 'gcn': 39 | # D^-0.5 A D^-0.5 40 | support = [preprocess_adj(adj)] 41 | num_supports = 1 42 | model_func = GCN 43 | elif args.model == 'gcn_cheby': 44 | support = chebyshev_polynomials(adj, args.max_degree) 45 | num_supports = 1 + args.max_degree 46 | model_func = GCN 47 | elif args.model == 'dense': 48 | support = [preprocess_adj(adj)] # Not used 49 | num_supports = 1 50 | model_func = MLP 51 | else: 52 | raise ValueError('Invalid argument for model: ' + str(args.model)) 53 | 54 | 55 | 56 | # Create model 57 | model = GCN(input_dim=features[2][1], output_dim=y_train.shape[1], num_features_nonzero=features[1].shape) # [1433] 58 | 59 | 60 | 61 | 62 | train_label = tf.convert_to_tensor(y_train) 63 | train_mask = tf.convert_to_tensor(train_mask) 64 | val_label = tf.convert_to_tensor(y_val) 65 | val_mask = tf.convert_to_tensor(val_mask) 66 | test_label = tf.convert_to_tensor(y_test) 67 | test_mask = tf.convert_to_tensor(test_mask) 68 | features = tf.SparseTensor(*features) 69 | support = [tf.cast(tf.SparseTensor(*support[0]), dtype=tf.float32)] 70 | num_features_nonzero = features.values.shape 71 | dropout = args.dropout 72 | 73 | 74 | optimizer = optimizers.Adam(lr=1e-2) 75 | 76 | 77 | 78 | for epoch in range(args.epochs): 79 | 80 | with tf.GradientTape() as tape: 81 | loss, acc = model((features, train_label, train_mask,support)) 82 | grads = tape.gradient(loss, model.trainable_variables) 83 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 84 | 85 | _, val_acc = model((features, val_label, val_mask, support), training=False) 86 | 87 | 88 | if epoch % 20 == 0: 89 | 90 | print(epoch, float(loss), float(acc), '\tval:', float(val_acc)) 91 | 92 | 93 | 94 | test_loss, test_acc = model((features, test_label, test_mask, support), training=False) 95 | 96 | 97 | print('\ttest:', float(test_loss), float(test_acc)) -------------------------------------------------------------------------------- /21-CN-EN-Translation-BERT/Readme.md: -------------------------------------------------------------------------------- 1 | # Chinese-to-English Machine Translation 2 | 3 | 4 | # HowTo 5 | 6 | 1. install required packages. 7 | 8 | `pip install bert-for-tf2` 9 | 10 | 2. `python transformer_train.py` to run naive transformer model. 11 | 12 | 3. `python bert_train.py` to run BERT pretrained encoder model. 13 | 14 | 15 | # Screenshot 16 | 17 | 1. transformer 18 | 19 | ``` 20 | Chinese src: 周四和周五发生在五个省份的炸弹爆炸事件导致四人死亡,几十人受伤,遭袭地点包括旅游胜地普吉和华 欣。 21 | Translated : Four weeks and five-week bombings have killed four people and injured dozens of others, including tourism and the likelihood of land grabs. 22 | Real: The bombings in five provinces on Thursday and Friday, including in Phuket and Hua Hin, areas popular with tourists, killed four people and injured dozens. 23 | ``` 24 | -------------------------------------------------------------------------------- /21-CN-EN-Translation-BERT/attention.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | def scaled_dot_product_attention(q, k, v, mask): 4 | matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) 5 | 6 | # scale matmul_qk 7 | dk = tf.cast(tf.shape(k)[-1], tf.float32) 8 | scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) 9 | 10 | # add the mask to the scaled tensor. 11 | if mask is not None: 12 | scaled_attention_logits += (mask * -1e9) 13 | 14 | # softmax is normalized on the last axis (seq_len_k) so that the scores 15 | # add up to 1. 16 | attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) 17 | 18 | output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) 19 | 20 | return output, attention_weights 21 | 22 | 23 | # ## Multi-head attention 24 | 25 | # In[ ]: 26 | 27 | 28 | class MultiHeadAttention(tf.keras.layers.Layer): 29 | def __init__(self, d_model, num_heads): 30 | super(MultiHeadAttention, self).__init__() 31 | self.num_heads = num_heads 32 | self.d_model = d_model 33 | 34 | assert d_model % self.num_heads == 0 35 | 36 | self.depth = d_model // self.num_heads 37 | 38 | self.wq = tf.keras.layers.Dense(d_model) 39 | self.wk = tf.keras.layers.Dense(d_model) 40 | self.wv = tf.keras.layers.Dense(d_model) 41 | 42 | self.dense = tf.keras.layers.Dense(d_model) 43 | 44 | def split_heads(self, x, batch_size): 45 | """Split the last dimension into (num_heads, depth). 46 | Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) 47 | """ 48 | x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) 49 | return tf.transpose(x, perm=[0, 2, 1, 3]) 50 | 51 | def call(self, v, k, q, mask): 52 | batch_size = tf.shape(q)[0] 53 | 54 | q = self.wq(q) # (batch_size, seq_len, d_model) 55 | k = self.wk(k) # (batch_size, seq_len, d_model) 56 | v = self.wv(v) # (batch_size, seq_len, d_model) 57 | 58 | q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) 59 | k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) 60 | v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) 61 | 62 | # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) 63 | # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) 64 | scaled_attention, attention_weights = scaled_dot_product_attention( 65 | q, k, v, mask) 66 | 67 | scaled_attention = tf.transpose(scaled_attention, 68 | perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth) 69 | 70 | concat_attention = tf.reshape(scaled_attention, 71 | (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) 72 | 73 | output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) 74 | 75 | return output, attention_weights 76 | 77 | def main(): 78 | temp_mha = MultiHeadAttention(d_model=512, num_heads=8) 79 | y = tf.random.uniform((1, 60, 768)) # (batch_size, encoder_sequence, d_model) 80 | q = tf.random.uniform((1, 60, 512)) # (batch_size, encoder_sequence, d_model) 81 | out, attn = temp_mha(y, k=y, q=q, mask=None) 82 | out.shape, attn.shape 83 | 84 | 85 | 86 | if __name__ == '__main__': 87 | main() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow 2.0 Tutorials 2 | Our repo. is the **Winner** of [⚡#PoweredByTF 2.0 Challenge!](https://devpost.com/software/tensorflow-2-0-tutorials). 3 | 4 | 5 |

6 | 7 |

8 | 9 | Timeline: 10 | - Oct. 1, 2019: TensorFlow 2.0 Stable! 11 | - Aug. 24, 2019: [TensorFlow 2.0 rc0](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf) 12 | - Jun. 8, 2019: [TensorFlow 2.0 Beta](https://twitter.com/fchollet/status/1134583289384120320) 13 | - Mar. 7, 2019: [Tensorflow 2.0 Alpha](https://www.tensorflow.org/alpha) 14 | - Jan. 11, 2019: [TensorFlow r2.0 preview](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf) 15 | - Aug. 14, 2018: [TensorFlow 2.0 is coming](https://groups.google.com/a/tensorflow.org/forum/#!topic/discuss/bgug1G6a89A) 16 | 17 | 18 | 19 | # Installation 20 | 21 | make sure you are using python 3.x. 22 | 23 | - CPU install 24 | ```python 25 | pip install tensorflow -U 26 | ``` 27 | 28 | - GPU install 29 | 30 | Install `CUDA 10.0`(or after) and `cudnn` by yourself. and set `LD_LIBRARY_PATH` up. 31 | 32 | ```python 33 | pip install tensorflow-gpu -U 34 | ``` 35 | 36 | Test installation: 37 | ```python 38 | In [2]: import tensorflow as tf 39 | 40 | In [3]: tf.__version__ 41 | Out[3]: '2.0.0' 42 | In [4]: tf.test.is_gpu_available() 43 | ... 44 | totalMemory: 3.95GiB freeMemory: 3.00GiB 45 | ... 46 | Out[4]: True 47 | 48 | ``` 49 | 50 | 51 | 52 | 53 | # 配套TF2视频教程 54 | 55 |

56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 |

64 | 65 | TensorFlow 2.0的视频教程链接:[深度学习与TensorFlow 2实战](https://study.163.com/course/courseMain.htm?share=2&shareId=480000001847407&courseId=1209092816&_trace_c_p_k2_=dca16f8fd11a4525bac8c89f779b2cfa) 66 | 67 | 68 | # Acknowledgement 69 | - 爱可可-爱生活 友情推荐 ![](res/weibo.jpg) 70 | 71 | 72 | # Includes 73 | 74 | - TensorFlow 2.0 Overview 75 | - TensorFlow 2.0 Basic Usage 76 | - Linear Regression 77 | - MNIST, FashionMNIST 78 | - CIFAR10 79 | - Fully Connected Layer 80 | - VGG16 81 | - Inception Network 82 | - ResNet18 83 | - Naive RNN 84 | - LSTM 85 | - ColorBot 86 | - Auto-Encoders 87 | - Variational Auto-Encoders 88 | - DCGAN 89 | - CycleGAN 90 | - WGAN 91 | - Pixel2Pixel 92 | - Faster RCNN 93 | - A2C 94 | - GPT 95 | - BERT 96 | - GCN 97 | 98 | Feel free to submit a **PR** request to make this repo. more complete! 99 | 100 | 101 | 102 | # Refered Repos. 103 | 104 | Our work is not built from scratch. Great appreciation to these open works! 105 | 106 | - https://github.com/madalinabuzau/tensorflow-eager-tutorials 107 | - https://github.com/herbiebradley/CycleGAN-Tensorflow 108 | - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/pix2pix/pix2pix_eager.ipynb 109 | - https://github.com/moono/tf-eager-on-GAN 110 | - https://github.com/Viredery/tf-eager-fasterrcnn 111 | - https://github.com/github/gitignore/blob/master/Python.gitignore 112 | 113 | 114 | -------------------------------------------------------------------------------- /res/TF_QR_163.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/res/TF_QR_163.png -------------------------------------------------------------------------------- /res/ai101.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/res/ai101.jpg -------------------------------------------------------------------------------- /res/cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/res/cover.png -------------------------------------------------------------------------------- /res/tensorflow-2.0.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/res/tensorflow-2.0.gif -------------------------------------------------------------------------------- /res/tensorflow-2.0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/res/tensorflow-2.0.jpg -------------------------------------------------------------------------------- /res/weibo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/res/weibo.jpg -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/0-课程介绍/cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/0-课程介绍/cover.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/0-课程介绍/深度学习与TF.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/0-课程介绍/深度学习与TF.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/0-课程介绍/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/0-课程介绍/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/Readme.md: -------------------------------------------------------------------------------- 1 | # 深度学习与TensorFlow2入门实战 2 | 3 | ## 安装命令 4 | 5 | 请先安装Anaconda, CUDA 10.0, cuDNN。 6 | 7 | ```python 8 | pip install -U tensorflow-gpu numpy matplotlib pillow pandas -i https://pypi.tuna.tsinghua.edu.cn/simple 9 | ``` 10 | 11 | ## 课程链接 12 | 13 | 视频教程链接:[深度学习与TensorFlow2入门实战](https://study.163.com/course/courseMain.htm?share=2&shareId=480000001847407&courseId=1209092816&_trace_c_p_k2_=10515f5942ba46b5a4ec1c30a46b0269) 14 | 15 | ![](0-课程介绍/版权声明.png) 16 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/autograd.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | x = tf.constant(1.) 5 | a = tf.constant(2.) 6 | b = tf.constant(3.) 7 | c = tf.constant(4.) 8 | 9 | 10 | with tf.GradientTape() as tape: 11 | tape.watch([a, b, c]) 12 | y = a**2 * x + b * x + c 13 | 14 | 15 | [dy_da, dy_db, dy_dc] = tape.gradient(y, [a, b, c]) 16 | print(dy_da, dy_db, dy_dc) 17 | 18 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/gpu_accelerate.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import timeit 3 | 4 | 5 | with tf.device('/cpu:0'): 6 | cpu_a = tf.random.normal([10000, 1000]) 7 | cpu_b = tf.random.normal([1000, 2000]) 8 | print(cpu_a.device, cpu_b.device) 9 | 10 | with tf.device('/gpu:0'): 11 | gpu_a = tf.random.normal([10000, 1000]) 12 | gpu_b = tf.random.normal([1000, 2000]) 13 | print(gpu_a.device, gpu_b.device) 14 | 15 | def cpu_run(): 16 | with tf.device('/cpu:0'): 17 | c = tf.matmul(cpu_a, cpu_b) 18 | return c 19 | 20 | def gpu_run(): 21 | with tf.device('/gpu:0'): 22 | c = tf.matmul(gpu_a, gpu_b) 23 | return c 24 | 25 | 26 | # warm up 27 | cpu_time = timeit.timeit(cpu_run, number=10) 28 | gpu_time = timeit.timeit(gpu_run, number=10) 29 | print('warmup:', cpu_time, gpu_time) 30 | 31 | 32 | cpu_time = timeit.timeit(cpu_run, number=10) 33 | gpu_time = timeit.timeit(gpu_run, number=10) 34 | print('run time:', cpu_time, gpu_time) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/初见TensorFlow2.0.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/初见TensorFlow2.0.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson01-初见TensorFlow2.0/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | 6 | 7 | a = tf.constant(1.) 8 | b = tf.constant(2.) 9 | print(a+b) 10 | 11 | print('GPU:', tf.test.is_gpu_available()) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/开发环境准备.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/开发环境准备.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson02-开发环境准备/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson03-回归问题/回归问题.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson03-回归问题/回归问题.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson03-回归问题/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson03-回归问题/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/linear_regression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | 5 | 6 | # y = wx + b 7 | def compute_error_for_line_given_points(b, w, points): 8 | totalError = 0 9 | for i in range(0, len(points)): 10 | x = points[i, 0] 11 | y = points[i, 1] 12 | # computer mean-squared-error 13 | totalError += (y - (w * x + b)) ** 2 14 | # average loss for each point 15 | return totalError / float(len(points)) 16 | 17 | 18 | 19 | def step_gradient(b_current, w_current, points, learningRate): 20 | b_gradient = 0 21 | w_gradient = 0 22 | N = float(len(points)) 23 | for i in range(0, len(points)): 24 | x = points[i, 0] 25 | y = points[i, 1] 26 | # grad_b = 2(wx+b-y) 27 | b_gradient += (2/N) * ((w_current * x + b_current) - y) 28 | # grad_w = 2(wx+b-y)*x 29 | w_gradient += (2/N) * x * ((w_current * x + b_current) - y) 30 | # update w' 31 | new_b = b_current - (learningRate * b_gradient) 32 | new_w = w_current - (learningRate * w_gradient) 33 | return [new_b, new_w] 34 | 35 | def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations): 36 | b = starting_b 37 | w = starting_w 38 | # update for several times 39 | for i in range(num_iterations): 40 | b, w = step_gradient(b, w, np.array(points), learning_rate) 41 | return [b, w] 42 | 43 | 44 | def run(): 45 | 46 | points = np.genfromtxt("data.csv", delimiter=",") 47 | learning_rate = 0.0001 48 | initial_b = 0 # initial y-intercept guess 49 | initial_w = 0 # initial slope guess 50 | num_iterations = 1000 51 | print("Starting gradient descent at b = {0}, w = {1}, error = {2}" 52 | .format(initial_b, initial_w, 53 | compute_error_for_line_given_points(initial_b, initial_w, points)) 54 | ) 55 | print("Running...") 56 | [b, w] = gradient_descent_runner(points, initial_b, initial_w, learning_rate, num_iterations) 57 | print("After {0} iterations b = {1}, w = {2}, error = {3}". 58 | format(num_iterations, b, w, 59 | compute_error_for_line_given_points(b, w, points)) 60 | ) 61 | 62 | if __name__ == '__main__': 63 | run() -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/回归实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/回归实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson04-回归问题实战/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson05-手写数字问题/手写数字问题.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson05-手写数字问题/手写数字问题.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson05-手写数字问题/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson05-手写数字问题/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson06-手写数字识别初体验/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | from tensorflow import keras 6 | from tensorflow.keras import layers, optimizers, datasets 7 | 8 | 9 | 10 | (x, y), (x_val, y_val) = datasets.mnist.load_data() 11 | x = tf.convert_to_tensor(x, dtype=tf.float32) / 255. 12 | y = tf.convert_to_tensor(y, dtype=tf.int32) 13 | y = tf.one_hot(y, depth=10) 14 | print(x.shape, y.shape) 15 | train_dataset = tf.data.Dataset.from_tensor_slices((x, y)) 16 | train_dataset = train_dataset.batch(200) 17 | 18 | 19 | 20 | 21 | model = keras.Sequential([ 22 | layers.Dense(512, activation='relu'), 23 | layers.Dense(256, activation='relu'), 24 | layers.Dense(10)]) 25 | 26 | optimizer = optimizers.SGD(learning_rate=0.001) 27 | 28 | 29 | def train_epoch(epoch): 30 | 31 | # Step4.loop 32 | for step, (x, y) in enumerate(train_dataset): 33 | 34 | 35 | with tf.GradientTape() as tape: 36 | # [b, 28, 28] => [b, 784] 37 | x = tf.reshape(x, (-1, 28*28)) 38 | # Step1. compute output 39 | # [b, 784] => [b, 10] 40 | out = model(x) 41 | # Step2. compute loss 42 | loss = tf.reduce_sum(tf.square(out - y)) / x.shape[0] 43 | 44 | # Step3. optimize and update w1, w2, w3, b1, b2, b3 45 | grads = tape.gradient(loss, model.trainable_variables) 46 | # w' = w - lr * grad 47 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 48 | 49 | if step % 100 == 0: 50 | print(epoch, step, 'loss:', loss.numpy()) 51 | 52 | 53 | 54 | def train(): 55 | 56 | for epoch in range(30): 57 | 58 | train_epoch(epoch) 59 | 60 | 61 | 62 | 63 | 64 | 65 | if __name__ == '__main__': 66 | train() -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson06-手写数字识别初体验/手写数字问题体验.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson06-手写数字识别初体验/手写数字问题体验.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson06-手写数字识别初体验/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson06-手写数字识别初体验/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson07-数据类型/代码量较少,同学们自己动手练习.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson07-数据类型/代码量较少,同学们自己动手练习.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson07-数据类型/数据类型.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson07-数据类型/数据类型.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson07-数据类型/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson07-数据类型/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson08-创建Tensor/代码量较少,同学们自己动手练习.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson08-创建Tensor/代码量较少,同学们自己动手练习.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson08-创建Tensor/创建Tensor.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson08-创建Tensor/创建Tensor.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson08-创建Tensor/版权声明.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson08-创建Tensor/版权声明.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson09-索引与切片/代码量较少,同学们自己动手练习.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson09-索引与切片/代码量较少,同学们自己动手练习.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson09-索引与切片/索引与切片-1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson09-索引与切片/索引与切片-1.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson09-索引与切片/索引与切片-2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson09-索引与切片/索引与切片-2.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson10-维度变换/维度变换.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson10-维度变换/维度变换.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson11-Broadcasting/Broadcasting.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson11-Broadcasting/Broadcasting.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson12-数学运算/数学运算.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson12-数学运算/数学运算.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson13-前向传播(张量)-实战/forward.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow import keras 3 | from tensorflow.keras import datasets 4 | import os 5 | 6 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 7 | 8 | # x: [60k, 28, 28], 9 | # y: [60k] 10 | (x, y), _ = datasets.mnist.load_data() 11 | # x: [0~255] => [0~1.] 12 | x = tf.convert_to_tensor(x, dtype=tf.float32) / 255. 13 | y = tf.convert_to_tensor(y, dtype=tf.int32) 14 | 15 | print(x.shape, y.shape, x.dtype, y.dtype) 16 | print(tf.reduce_min(x), tf.reduce_max(x)) 17 | print(tf.reduce_min(y), tf.reduce_max(y)) 18 | 19 | 20 | train_db = tf.data.Dataset.from_tensor_slices((x,y)).batch(128) 21 | train_iter = iter(train_db) 22 | sample = next(train_iter) 23 | print('batch:', sample[0].shape, sample[1].shape) 24 | 25 | 26 | # [b, 784] => [b, 256] => [b, 128] => [b, 10] 27 | # [dim_in, dim_out], [dim_out] 28 | w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1)) 29 | b1 = tf.Variable(tf.zeros([256])) 30 | w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1)) 31 | b2 = tf.Variable(tf.zeros([128])) 32 | w3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1)) 33 | b3 = tf.Variable(tf.zeros([10])) 34 | 35 | lr = 1e-3 36 | 37 | for epoch in range(10): # iterate db for 10 38 | for step, (x, y) in enumerate(train_db): # for every batch 39 | # x:[128, 28, 28] 40 | # y: [128] 41 | 42 | # [b, 28, 28] => [b, 28*28] 43 | x = tf.reshape(x, [-1, 28*28]) 44 | 45 | with tf.GradientTape() as tape: # tf.Variable 46 | # x: [b, 28*28] 47 | # h1 = x@w1 + b1 48 | # [b, 784]@[784, 256] + [256] => [b, 256] + [256] => [b, 256] + [b, 256] 49 | h1 = x@w1 + tf.broadcast_to(b1, [x.shape[0], 256]) 50 | h1 = tf.nn.relu(h1) 51 | # [b, 256] => [b, 128] 52 | h2 = h1@w2 + b2 53 | h2 = tf.nn.relu(h2) 54 | # [b, 128] => [b, 10] 55 | out = h2@w3 + b3 56 | 57 | # compute loss 58 | # out: [b, 10] 59 | # y: [b] => [b, 10] 60 | y_onehot = tf.one_hot(y, depth=10) 61 | 62 | # mse = mean(sum(y-out)^2) 63 | # [b, 10] 64 | loss = tf.square(y_onehot - out) 65 | # mean: scalar 66 | loss = tf.reduce_mean(loss) 67 | 68 | # compute gradients 69 | grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3]) 70 | # print(grads) 71 | # w1 = w1 - lr * w1_grad 72 | w1.assign_sub(lr * grads[0]) 73 | b1.assign_sub(lr * grads[1]) 74 | w2.assign_sub(lr * grads[2]) 75 | b2.assign_sub(lr * grads[3]) 76 | w3.assign_sub(lr * grads[4]) 77 | b3.assign_sub(lr * grads[5]) 78 | 79 | 80 | if step % 100 == 0: 81 | print(epoch, step, 'loss:', float(loss)) 82 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson13-前向传播(张量)-实战/前向传播.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson13-前向传播(张量)-实战/前向传播.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson13-前向传播(张量)-实战/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson13-前向传播(张量)-实战/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson13-前向传播(张量)-实战/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson13-前向传播(张量)-实战/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson14-合并与分割/合并与分割.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson14-合并与分割/合并与分割.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson15-数据统计/数据统计.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson15-数据统计/数据统计.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson16-张量排序/topk.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import os 3 | 4 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 5 | tf.random.set_seed(2467) 6 | 7 | def accuracy(output, target, topk=(1,)): 8 | maxk = max(topk) 9 | batch_size = target.shape[0] 10 | 11 | pred = tf.math.top_k(output, maxk).indices 12 | pred = tf.transpose(pred, perm=[1, 0]) 13 | target_ = tf.broadcast_to(target, pred.shape) 14 | # [10, b] 15 | correct = tf.equal(pred, target_) 16 | 17 | res = [] 18 | for k in topk: 19 | correct_k = tf.cast(tf.reshape(correct[:k], [-1]), dtype=tf.float32) 20 | correct_k = tf.reduce_sum(correct_k) 21 | acc = float(correct_k* (100.0 / batch_size) ) 22 | res.append(acc) 23 | 24 | return res 25 | 26 | 27 | 28 | output = tf.random.normal([10, 6]) 29 | output = tf.math.softmax(output, axis=1) 30 | target = tf.random.uniform([10], maxval=6, dtype=tf.int32) 31 | print('prob:', output.numpy()) 32 | pred = tf.argmax(output, axis=1) 33 | print('pred:', pred.numpy()) 34 | print('label:', target.numpy()) 35 | 36 | acc = accuracy(output, target, topk=(1,2,3,4,5,6)) 37 | print('top-1-6 acc:', acc) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson16-张量排序/张量排序.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson16-张量排序/张量排序.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson17-填充与复制/填充与复制.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson17-填充与复制/填充与复制.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson18-数据限幅/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow import keras 3 | from tensorflow.keras import datasets, layers, optimizers 4 | import os 5 | 6 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 7 | print(tf.__version__) 8 | 9 | (x, y), _ = datasets.mnist.load_data() 10 | x = tf.convert_to_tensor(x, dtype=tf.float32) / 50. 11 | y = tf.convert_to_tensor(y) 12 | y = tf.one_hot(y, depth=10) 13 | print('x:', x.shape, 'y:', y.shape) 14 | train_db = tf.data.Dataset.from_tensor_slices((x,y)).batch(128).repeat(30) 15 | x,y = next(iter(train_db)) 16 | print('sample:', x.shape, y.shape) 17 | # print(x[0], y[0]) 18 | 19 | 20 | 21 | def main(): 22 | 23 | # 784 => 512 24 | w1, b1 = tf.Variable(tf.random.truncated_normal([784, 512], stddev=0.1)), tf.Variable(tf.zeros([512])) 25 | # 512 => 256 26 | w2, b2 = tf.Variable(tf.random.truncated_normal([512, 256], stddev=0.1)), tf.Variable(tf.zeros([256])) 27 | # 256 => 10 28 | w3, b3 = tf.Variable(tf.random.truncated_normal([256, 10], stddev=0.1)), tf.Variable(tf.zeros([10])) 29 | 30 | 31 | 32 | optimizer = optimizers.SGD(lr=0.01) 33 | 34 | 35 | for step, (x,y) in enumerate(train_db): 36 | 37 | # [b, 28, 28] => [b, 784] 38 | x = tf.reshape(x, (-1, 784)) 39 | 40 | with tf.GradientTape() as tape: 41 | 42 | # layer1. 43 | h1 = x @ w1 + b1 44 | h1 = tf.nn.relu(h1) 45 | # layer2 46 | h2 = h1 @ w2 + b2 47 | h2 = tf.nn.relu(h2) 48 | # output 49 | out = h2 @ w3 + b3 50 | # out = tf.nn.relu(out) 51 | 52 | # compute loss 53 | # [b, 10] - [b, 10] 54 | loss = tf.square(y-out) 55 | # [b, 10] => [b] 56 | loss = tf.reduce_mean(loss, axis=1) 57 | # [b] => scalar 58 | loss = tf.reduce_mean(loss) 59 | 60 | 61 | 62 | # compute gradient 63 | grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3]) 64 | # print('==before==') 65 | # for g in grads: 66 | # print(tf.norm(g)) 67 | 68 | grads, _ = tf.clip_by_global_norm(grads, 15) 69 | 70 | # print('==after==') 71 | # for g in grads: 72 | # print(tf.norm(g)) 73 | # update w' = w - lr*grad 74 | optimizer.apply_gradients(zip(grads, [w1, b1, w2, b2, w3, b3])) 75 | 76 | 77 | 78 | if step % 100 == 0: 79 | print(step, 'loss:', float(loss)) 80 | 81 | 82 | 83 | 84 | if __name__ == '__main__': 85 | main() -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson18-数据限幅/张量限幅.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson18-数据限幅/张量限幅.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson19-高阶OP/meshgrid.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | import matplotlib.pyplot as plt 4 | 5 | 6 | def func(x): 7 | """ 8 | 9 | :param x: [b, 2] 10 | :return: 11 | """ 12 | z = tf.math.sin(x[...,0]) + tf.math.sin(x[...,1]) 13 | 14 | return z 15 | 16 | 17 | x = tf.linspace(0., 2*3.14, 500) 18 | y = tf.linspace(0., 2*3.14, 500) 19 | # [50, 50] 20 | point_x, point_y = tf.meshgrid(x, y) 21 | # [50, 50, 2] 22 | points = tf.stack([point_x, point_y], axis=2) 23 | # points = tf.reshape(points, [-1, 2]) 24 | print('points:', points.shape) 25 | z = func(points) 26 | print('z:', z.shape) 27 | 28 | plt.figure('plot 2d func value') 29 | plt.imshow(z, origin='lower', interpolation='none') 30 | plt.colorbar() 31 | 32 | plt.figure('plot 2d func contour') 33 | plt.contour(point_x, point_y, z) 34 | plt.colorbar() 35 | plt.show() -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson19-高阶OP/高阶特性.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson19-高阶OP/高阶特性.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson20-数据加载/数据加载.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson20-数据加载/数据加载.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson21-测试(张量)-实战/测试(张量)实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson21-测试(张量)-实战/测试(张量)实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson22-全连接层/mlp.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow import keras 3 | 4 | 5 | 6 | 7 | 8 | x = tf.random.normal([2, 3]) 9 | 10 | model = keras.Sequential([ 11 | keras.layers.Dense(2, activation='relu'), 12 | keras.layers.Dense(2, activation='relu'), 13 | keras.layers.Dense(2) 14 | ]) 15 | model.build(input_shape=[None, 3]) 16 | model.summary() 17 | 18 | for p in model.trainable_variables: 19 | print(p.name, p.shape) 20 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson22-全连接层/全接连层.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson22-全连接层/全接连层.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson23-输出方式/输出方式.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson23-输出方式/输出方式.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson24-误差计算/loss.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | 5 | 6 | y = tf.constant([1, 2, 3, 0, 2]) 7 | y = tf.one_hot(y, depth=4) 8 | y = tf.cast(y, dtype=tf.float32) 9 | 10 | out = tf.random.normal([5, 4]) 11 | 12 | 13 | loss1 = tf.reduce_mean(tf.square(y-out)) 14 | 15 | loss2 = tf.square(tf.norm(y-out))/(5*4) 16 | 17 | loss3 = tf.reduce_mean(tf.losses.MSE(y, out)) # VS MeanSquaredError is a class 18 | 19 | 20 | print(loss1) 21 | print(loss2) 22 | print(loss3) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson24-误差计算/误差计算.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson24-误差计算/误差计算.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/0.梯度下降-简介.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/0.梯度下降-简介.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/2.常见函数的梯度.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/2.常见函数的梯度.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/2nd_derivative.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | w = tf.Variable(1.0) 4 | b = tf.Variable(2.0) 5 | x = tf.Variable(3.0) 6 | 7 | with tf.GradientTape() as t1: 8 | with tf.GradientTape() as t2: 9 | y = x * w + b 10 | dy_dw, dy_db = t2.gradient(y, [w, b]) 11 | d2y_dw2 = t1.gradient(dy_dw, w) 12 | 13 | print(dy_dw) 14 | print(dy_db) 15 | print(d2y_dw2) 16 | 17 | assert dy_dw.numpy() == 3.0 18 | assert d2y_dw2 is None -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/3.激活函数及其梯度.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/3.激活函数及其梯度.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/4.损失函数及其梯度.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/4.损失函数及其梯度.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/5.单输出感知机梯度.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/5.单输出感知机梯度.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/6.多输出感知机梯度.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/6.多输出感知机梯度.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/7.链式法则.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/7.链式法则.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/8.多层感知机梯度.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/8.多层感知机梯度.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/chain_rule.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | x = tf.constant(1.) 4 | w1 = tf.constant(2.) 5 | b1 = tf.constant(1.) 6 | w2 = tf.constant(2.) 7 | b2 = tf.constant(1.) 8 | 9 | 10 | with tf.GradientTape(persistent=True) as tape: 11 | 12 | tape.watch([w1, b1, w2, b2]) 13 | 14 | y1 = x * w1 + b1 15 | y2 = y1 * w2 + b2 16 | 17 | dy2_dy1 = tape.gradient(y2, [y1])[0] 18 | dy1_dw1 = tape.gradient(y1, [w1])[0] 19 | dy2_dw1 = tape.gradient(y2, [w1])[0] 20 | 21 | 22 | print(dy2_dy1 * dy1_dw1) 23 | print(dy2_dw1) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/crossentropy_loss.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | tf.random.set_seed(4323) 5 | 6 | x=tf.random.normal([1,3]) 7 | 8 | w=tf.random.normal([3,2]) 9 | 10 | b=tf.random.normal([2]) 11 | 12 | y = tf.constant([0, 1]) 13 | 14 | 15 | with tf.GradientTape() as tape: 16 | 17 | tape.watch([w, b]) 18 | logits = (x@w+b) 19 | loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y, logits, from_logits=True)) 20 | 21 | grads = tape.gradient(loss, [w, b]) 22 | print('w grad:', grads[0]) 23 | 24 | print('b grad:', grads[1]) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/mse_grad.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | 5 | 6 | x=tf.random.normal([1,3]) 7 | 8 | w=tf.ones([3,2]) 9 | 10 | b=tf.ones([2]) 11 | 12 | y = tf.constant([0, 1]) 13 | 14 | 15 | with tf.GradientTape() as tape: 16 | 17 | tape.watch([w, b]) 18 | logits = tf.sigmoid(x@w+b) 19 | loss = tf.reduce_mean(tf.losses.MSE(y, logits)) 20 | 21 | grads = tape.gradient(loss, [w, b]) 22 | print('w grad:', grads[0]) 23 | 24 | print('b grad:', grads[1]) 25 | 26 | 27 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/multi_output_perceptron.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | 5 | 6 | x=tf.random.normal([1,3]) 7 | 8 | w=tf.ones([3,2]) 9 | 10 | b=tf.ones([2]) 11 | 12 | y = tf.constant([0, 1]) 13 | 14 | 15 | with tf.GradientTape() as tape: 16 | 17 | tape.watch([w, b]) 18 | logits = tf.sigmoid(x@w+b) 19 | loss = tf.reduce_mean(tf.losses.MSE(y, logits)) 20 | 21 | grads = tape.gradient(loss, [w, b]) 22 | print('w grad:', grads[0]) 23 | 24 | print('b grad:', grads[1]) 25 | 26 | 27 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/sigmoid_grad.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | a = tf.linspace(-10., 10., 10) 5 | 6 | with tf.GradientTape() as tape: 7 | tape.watch(a) 8 | y = tf.sigmoid(a) 9 | 10 | 11 | grads = tape.gradient(y, [a]) 12 | print('x:', a.numpy()) 13 | print('y:', y.numpy()) 14 | print('grad:', grads[0].numpy()) 15 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson25-梯度计算/single_output_perceptron.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | 5 | 6 | x=tf.random.normal([1,3]) 7 | 8 | w=tf.ones([3,1]) 9 | 10 | b=tf.ones([1]) 11 | 12 | y = tf.constant([1]) 13 | 14 | 15 | with tf.GradientTape() as tape: 16 | 17 | tape.watch([w, b]) 18 | logits = tf.sigmoid(x@w+b) 19 | loss = tf.reduce_mean(tf.losses.MSE(y, logits)) 20 | 21 | grads = tape.gradient(loss, [w, b]) 22 | print('w grad:', grads[0]) 23 | 24 | print('b grad:', grads[1]) 25 | 26 | 27 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson26-优化方法/himmelblau.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from mpl_toolkits.mplot3d import Axes3D 3 | from matplotlib import pyplot as plt 4 | import tensorflow as tf 5 | 6 | 7 | 8 | def himmelblau(x): 9 | return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2 10 | 11 | 12 | x = np.arange(-6, 6, 0.1) 13 | y = np.arange(-6, 6, 0.1) 14 | print('x,y range:', x.shape, y.shape) 15 | X, Y = np.meshgrid(x, y) 16 | print('X,Y maps:', X.shape, Y.shape) 17 | Z = himmelblau([X, Y]) 18 | 19 | fig = plt.figure('himmelblau') 20 | ax = fig.gca(projection='3d') 21 | ax.plot_surface(X, Y, Z) 22 | ax.view_init(60, -30) 23 | ax.set_xlabel('x') 24 | ax.set_ylabel('y') 25 | plt.show() 26 | 27 | 28 | # [1., 0.], [-4, 0.], [4, 0.] 29 | x = tf.constant([4., 0.]) 30 | 31 | for step in range(200): 32 | 33 | with tf.GradientTape() as tape: 34 | tape.watch([x]) 35 | y = himmelblau(x) 36 | 37 | grads = tape.gradient(y, [x])[0] 38 | x -= 0.01*grads 39 | 40 | 41 | 42 | if step % 20 == 0: 43 | print ('step {}: x = {}, f(x) = {}' 44 | .format(step, x.numpy(), y.numpy())) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson26-优化方法/函数优化实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson26-优化方法/函数优化实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson27-书写数字问题(层)-实战/MNIST实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson27-书写数字问题(层)-实战/MNIST实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson27-书写数字问题(层)-实战/fashionmnist_layer.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 3 | 4 | import tensorflow as tf 5 | from tensorflow import keras 6 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 7 | 8 | assert tf.__version__.startswith('2.') 9 | 10 | def preprocess(x, y): 11 | 12 | x = tf.cast(x, dtype=tf.float32) / 255. 13 | y = tf.cast(y, dtype=tf.int32) 14 | return x,y 15 | 16 | 17 | (x, y), (x_test, y_test) = datasets.fashion_mnist.load_data() 18 | print(x.shape, y.shape) 19 | 20 | 21 | batchsz = 128 22 | 23 | db = tf.data.Dataset.from_tensor_slices((x,y)) 24 | db = db.map(preprocess).shuffle(10000).batch(batchsz) 25 | 26 | db_test = tf.data.Dataset.from_tensor_slices((x_test,y_test)) 27 | db_test = db_test.map(preprocess).batch(batchsz) 28 | 29 | db_iter = iter(db) 30 | sample = next(db_iter) 31 | print('batch:', sample[0].shape, sample[1].shape) 32 | 33 | 34 | model = Sequential([ 35 | layers.Dense(256, activation=tf.nn.relu), # [b, 784] => [b, 256] 36 | layers.Dense(128, activation=tf.nn.relu), # [b, 256] => [b, 128] 37 | layers.Dense(64, activation=tf.nn.relu), # [b, 128] => [b, 64] 38 | layers.Dense(32, activation=tf.nn.relu), # [b, 64] => [b, 32] 39 | layers.Dense(10) # [b, 32] => [b, 10], 330 = 32*10 + 10 40 | ]) 41 | model.build(input_shape=[None, 28*28]) 42 | model.summary() 43 | # w = w - lr*grad 44 | optimizer = optimizers.Adam(lr=1e-3) 45 | 46 | def main(): 47 | 48 | 49 | for epoch in range(30): 50 | 51 | 52 | for step, (x,y) in enumerate(db): 53 | 54 | # x: [b, 28, 28] => [b, 784] 55 | # y: [b] 56 | x = tf.reshape(x, [-1, 28*28]) 57 | 58 | with tf.GradientTape() as tape: 59 | # [b, 784] => [b, 10] 60 | logits = model(x) 61 | y_onehot = tf.one_hot(y, depth=10) 62 | # [b] 63 | loss_mse = tf.reduce_mean(tf.losses.MSE(y_onehot, logits)) 64 | loss_ce = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True) 65 | loss_ce = tf.reduce_mean(loss_ce) 66 | 67 | grads = tape.gradient(loss_ce, model.trainable_variables) 68 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 69 | 70 | 71 | if step % 100 == 0: 72 | print(epoch, step, 'loss:', float(loss_ce), float(loss_mse)) 73 | 74 | 75 | # test 76 | total_correct = 0 77 | total_num = 0 78 | for x,y in db_test: 79 | 80 | # x: [b, 28, 28] => [b, 784] 81 | # y: [b] 82 | x = tf.reshape(x, [-1, 28*28]) 83 | # [b, 10] 84 | logits = model(x) 85 | # logits => prob, [b, 10] 86 | prob = tf.nn.softmax(logits, axis=1) 87 | # [b, 10] => [b], int64 88 | pred = tf.argmax(prob, axis=1) 89 | pred = tf.cast(pred, dtype=tf.int32) 90 | # pred:[b] 91 | # y: [b] 92 | # correct: [b], True: equal, False: not equal 93 | correct = tf.equal(pred, y) 94 | correct = tf.reduce_sum(tf.cast(correct, dtype=tf.int32)) 95 | 96 | total_correct += int(correct) 97 | total_num += x.shape[0] 98 | 99 | acc = total_correct / total_num 100 | print(epoch, 'test acc:', acc) 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | if __name__ == '__main__': 109 | main() -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson27-书写数字问题(层)-实战/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson27-书写数字问题(层)-实战/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson27-书写数字问题(层)-实战/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson27-书写数字问题(层)-实战/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson28-可视化/可视化.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson28-可视化/可视化.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/1.Metrics.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/1.Metrics.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/2.Compile&Fit.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/2.Compile&Fit.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/3.自定义层.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/3.自定义层.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/compile_fit.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 3 | 4 | 5 | def preprocess(x, y): 6 | """ 7 | x is a simple image, not a batch 8 | """ 9 | x = tf.cast(x, dtype=tf.float32) / 255. 10 | x = tf.reshape(x, [28*28]) 11 | y = tf.cast(y, dtype=tf.int32) 12 | y = tf.one_hot(y, depth=10) 13 | return x,y 14 | 15 | 16 | batchsz = 128 17 | (x, y), (x_val, y_val) = datasets.mnist.load_data() 18 | print('datasets:', x.shape, y.shape, x.min(), x.max()) 19 | 20 | 21 | 22 | db = tf.data.Dataset.from_tensor_slices((x,y)) 23 | db = db.map(preprocess).shuffle(60000).batch(batchsz) 24 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 25 | ds_val = ds_val.map(preprocess).batch(batchsz) 26 | 27 | sample = next(iter(db)) 28 | print(sample[0].shape, sample[1].shape) 29 | 30 | 31 | network = Sequential([layers.Dense(256, activation='relu'), 32 | layers.Dense(128, activation='relu'), 33 | layers.Dense(64, activation='relu'), 34 | layers.Dense(32, activation='relu'), 35 | layers.Dense(10)]) 36 | network.build(input_shape=(None, 28*28)) 37 | network.summary() 38 | 39 | 40 | 41 | 42 | network.compile(optimizer=optimizers.Adam(lr=0.01), 43 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 44 | metrics=['accuracy'] 45 | ) 46 | 47 | network.fit(db, epochs=5, validation_data=ds_val, validation_freq=2) 48 | 49 | network.evaluate(ds_val) 50 | 51 | sample = next(iter(ds_val)) 52 | x = sample[0] 53 | y = sample[1] # one-hot 54 | pred = network.predict(x) # [b, 10] 55 | # convert back to number 56 | y = tf.argmax(y, axis=1) 57 | pred = tf.argmax(pred, axis=1) 58 | 59 | print(pred) 60 | print(y) 61 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/layer_model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 3 | from tensorflow import keras 4 | 5 | def preprocess(x, y): 6 | """ 7 | x is a simple image, not a batch 8 | """ 9 | x = tf.cast(x, dtype=tf.float32) / 255. 10 | x = tf.reshape(x, [28*28]) 11 | y = tf.cast(y, dtype=tf.int32) 12 | y = tf.one_hot(y, depth=10) 13 | return x,y 14 | 15 | 16 | batchsz = 128 17 | (x, y), (x_val, y_val) = datasets.mnist.load_data() 18 | print('datasets:', x.shape, y.shape, x.min(), x.max()) 19 | 20 | 21 | 22 | db = tf.data.Dataset.from_tensor_slices((x,y)) 23 | db = db.map(preprocess).shuffle(60000).batch(batchsz) 24 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 25 | ds_val = ds_val.map(preprocess).batch(batchsz) 26 | 27 | sample = next(iter(db)) 28 | print(sample[0].shape, sample[1].shape) 29 | 30 | 31 | network = Sequential([layers.Dense(256, activation='relu'), 32 | layers.Dense(128, activation='relu'), 33 | layers.Dense(64, activation='relu'), 34 | layers.Dense(32, activation='relu'), 35 | layers.Dense(10)]) 36 | network.build(input_shape=(None, 28*28)) 37 | network.summary() 38 | 39 | 40 | class MyDense(layers.Layer): 41 | 42 | def __init__(self, inp_dim, outp_dim): 43 | super(MyDense, self).__init__() 44 | 45 | self.kernel = self.add_weight('w', [inp_dim, outp_dim]) 46 | self.bias = self.add_weight('b', [outp_dim]) 47 | 48 | def call(self, inputs, training=None): 49 | 50 | out = inputs @ self.kernel + self.bias 51 | 52 | return out 53 | 54 | class MyModel(keras.Model): 55 | 56 | def __init__(self): 57 | super(MyModel, self).__init__() 58 | 59 | self.fc1 = MyDense(28*28, 256) 60 | self.fc2 = MyDense(256, 128) 61 | self.fc3 = MyDense(128, 64) 62 | self.fc4 = MyDense(64, 32) 63 | self.fc5 = MyDense(32, 10) 64 | 65 | def call(self, inputs, training=None): 66 | 67 | x = self.fc1(inputs) 68 | x = tf.nn.relu(x) 69 | x = self.fc2(x) 70 | x = tf.nn.relu(x) 71 | x = self.fc3(x) 72 | x = tf.nn.relu(x) 73 | x = self.fc4(x) 74 | x = tf.nn.relu(x) 75 | x = self.fc5(x) 76 | 77 | return x 78 | 79 | 80 | network = MyModel() 81 | 82 | 83 | network.compile(optimizer=optimizers.Adam(lr=0.01), 84 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 85 | metrics=['accuracy'] 86 | ) 87 | 88 | network.fit(db, epochs=5, validation_data=ds_val, 89 | validation_freq=2) 90 | 91 | network.evaluate(ds_val) 92 | 93 | sample = next(iter(ds_val)) 94 | x = sample[0] 95 | y = sample[1] # one-hot 96 | pred = network.predict(x) # [b, 10] 97 | # convert back to number 98 | y = tf.argmax(y, axis=1) 99 | pred = tf.argmax(pred, axis=1) 100 | 101 | print(pred) 102 | print(y) 103 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson30-Keras高层API/metrics.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 3 | 4 | 5 | def preprocess(x, y): 6 | 7 | x = tf.cast(x, dtype=tf.float32) / 255. 8 | y = tf.cast(y, dtype=tf.int32) 9 | 10 | return x,y 11 | 12 | 13 | batchsz = 128 14 | (x, y), (x_val, y_val) = datasets.mnist.load_data() 15 | print('datasets:', x.shape, y.shape, x.min(), x.max()) 16 | 17 | 18 | 19 | db = tf.data.Dataset.from_tensor_slices((x,y)) 20 | db = db.map(preprocess).shuffle(60000).batch(batchsz).repeat(10) 21 | 22 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 23 | ds_val = ds_val.map(preprocess).batch(batchsz) 24 | 25 | 26 | 27 | 28 | network = Sequential([layers.Dense(256, activation='relu'), 29 | layers.Dense(128, activation='relu'), 30 | layers.Dense(64, activation='relu'), 31 | layers.Dense(32, activation='relu'), 32 | layers.Dense(10)]) 33 | network.build(input_shape=(None, 28*28)) 34 | network.summary() 35 | 36 | optimizer = optimizers.Adam(lr=0.01) 37 | 38 | acc_meter = metrics.Accuracy() 39 | loss_meter = metrics.Mean() 40 | 41 | 42 | for step, (x,y) in enumerate(db): 43 | 44 | with tf.GradientTape() as tape: 45 | # [b, 28, 28] => [b, 784] 46 | x = tf.reshape(x, (-1, 28*28)) 47 | # [b, 784] => [b, 10] 48 | out = network(x) 49 | # [b] => [b, 10] 50 | y_onehot = tf.one_hot(y, depth=10) 51 | # [b] 52 | loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y_onehot, out, from_logits=True)) 53 | 54 | loss_meter.update_state(loss) 55 | 56 | 57 | 58 | grads = tape.gradient(loss, network.trainable_variables) 59 | optimizer.apply_gradients(zip(grads, network.trainable_variables)) 60 | 61 | 62 | if step % 100 == 0: 63 | 64 | print(step, 'loss:', loss_meter.result().numpy()) 65 | loss_meter.reset_states() 66 | 67 | 68 | # evaluate 69 | if step % 500 == 0: 70 | total, total_correct = 0., 0 71 | acc_meter.reset_states() 72 | 73 | for step, (x, y) in enumerate(ds_val): 74 | # [b, 28, 28] => [b, 784] 75 | x = tf.reshape(x, (-1, 28*28)) 76 | # [b, 784] => [b, 10] 77 | out = network(x) 78 | 79 | 80 | # [b, 10] => [b] 81 | pred = tf.argmax(out, axis=1) 82 | pred = tf.cast(pred, dtype=tf.int32) 83 | # bool type 84 | correct = tf.equal(pred, y) 85 | # bool tensor => int tensor => numpy 86 | total_correct += tf.reduce_sum(tf.cast(correct, dtype=tf.int32)).numpy() 87 | total += x.shape[0] 88 | 89 | acc_meter.update_state(y, pred) 90 | 91 | 92 | print(step, 'Evaluate Acc:', total_correct/total, acc_meter.result().numpy()) 93 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson31-Keras模型保存与加载/model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson31-Keras模型保存与加载/model.h5 -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson31-Keras模型保存与加载/save_load_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 6 | 7 | 8 | def preprocess(x, y): 9 | """ 10 | x is a simple image, not a batch 11 | """ 12 | x = tf.cast(x, dtype=tf.float32) / 255. 13 | x = tf.reshape(x, [28*28]) 14 | y = tf.cast(y, dtype=tf.int32) 15 | y = tf.one_hot(y, depth=10) 16 | return x,y 17 | 18 | 19 | batchsz = 128 20 | (x, y), (x_val, y_val) = datasets.mnist.load_data() 21 | print('datasets:', x.shape, y.shape, x.min(), x.max()) 22 | 23 | 24 | 25 | db = tf.data.Dataset.from_tensor_slices((x,y)) 26 | db = db.map(preprocess).shuffle(60000).batch(batchsz) 27 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 28 | ds_val = ds_val.map(preprocess).batch(batchsz) 29 | 30 | sample = next(iter(db)) 31 | print(sample[0].shape, sample[1].shape) 32 | 33 | 34 | network = Sequential([layers.Dense(256, activation='relu'), 35 | layers.Dense(128, activation='relu'), 36 | layers.Dense(64, activation='relu'), 37 | layers.Dense(32, activation='relu'), 38 | layers.Dense(10)]) 39 | network.build(input_shape=(None, 28*28)) 40 | network.summary() 41 | 42 | 43 | 44 | 45 | network.compile(optimizer=optimizers.Adam(lr=0.01), 46 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 47 | metrics=['accuracy'] 48 | ) 49 | 50 | network.fit(db, epochs=3, validation_data=ds_val, validation_freq=2) 51 | 52 | network.evaluate(ds_val) 53 | 54 | network.save('model.h5') 55 | print('saved total model.') 56 | del network 57 | 58 | print('loaded model from file.') 59 | network = tf.keras.models.load_model('model.h5', compile=False) 60 | network.compile(optimizer=optimizers.Adam(lr=0.01), 61 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 62 | metrics=['accuracy'] 63 | ) 64 | x_val = tf.cast(x_val, dtype=tf.float32) / 255. 65 | x_val = tf.reshape(x_val, [-1, 28*28]) 66 | y_val = tf.cast(y_val, dtype=tf.int32) 67 | y_val = tf.one_hot(y_val, depth=10) 68 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(128) 69 | network.evaluate(ds_val) 70 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson31-Keras模型保存与加载/save_load_weight.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 6 | 7 | 8 | def preprocess(x, y): 9 | """ 10 | x is a simple image, not a batch 11 | """ 12 | x = tf.cast(x, dtype=tf.float32) / 255. 13 | x = tf.reshape(x, [28*28]) 14 | y = tf.cast(y, dtype=tf.int32) 15 | y = tf.one_hot(y, depth=10) 16 | return x,y 17 | 18 | 19 | batchsz = 128 20 | (x, y), (x_val, y_val) = datasets.mnist.load_data() 21 | print('datasets:', x.shape, y.shape, x.min(), x.max()) 22 | 23 | 24 | 25 | db = tf.data.Dataset.from_tensor_slices((x,y)) 26 | db = db.map(preprocess).shuffle(60000).batch(batchsz) 27 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 28 | ds_val = ds_val.map(preprocess).batch(batchsz) 29 | 30 | sample = next(iter(db)) 31 | print(sample[0].shape, sample[1].shape) 32 | 33 | 34 | network = Sequential([layers.Dense(256, activation='relu'), 35 | layers.Dense(128, activation='relu'), 36 | layers.Dense(64, activation='relu'), 37 | layers.Dense(32, activation='relu'), 38 | layers.Dense(10)]) 39 | network.build(input_shape=(None, 28*28)) 40 | network.summary() 41 | 42 | 43 | 44 | 45 | network.compile(optimizer=optimizers.Adam(lr=0.01), 46 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 47 | metrics=['accuracy'] 48 | ) 49 | 50 | network.fit(db, epochs=3, validation_data=ds_val, validation_freq=2) 51 | 52 | network.evaluate(ds_val) 53 | 54 | network.save_weights('weights.ckpt') 55 | print('saved weights.') 56 | del network 57 | 58 | network = Sequential([layers.Dense(256, activation='relu'), 59 | layers.Dense(128, activation='relu'), 60 | layers.Dense(64, activation='relu'), 61 | layers.Dense(32, activation='relu'), 62 | layers.Dense(10)]) 63 | network.compile(optimizer=optimizers.Adam(lr=0.01), 64 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 65 | metrics=['accuracy'] 66 | ) 67 | network.load_weights('weights.ckpt') 68 | print('loaded weights!') 69 | network.evaluate(ds_val) 70 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson31-Keras模型保存与加载/模型加载与保存.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson31-Keras模型保存与加载/模型加载与保存.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson32-Keras实战/Keras实战CIFAR10.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson32-Keras实战/Keras实战CIFAR10.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson32-Keras实战/keras_train.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 6 | from tensorflow import keras 7 | 8 | 9 | 10 | def preprocess(x, y): 11 | # [0~255] => [-1~1] 12 | x = 2 * tf.cast(x, dtype=tf.float32) / 255. - 1. 13 | y = tf.cast(y, dtype=tf.int32) 14 | return x,y 15 | 16 | 17 | batchsz = 128 18 | # [50k, 32, 32, 3], [10k, 1] 19 | (x, y), (x_val, y_val) = datasets.cifar10.load_data() 20 | y = tf.squeeze(y) 21 | y_val = tf.squeeze(y_val) 22 | y = tf.one_hot(y, depth=10) # [50k, 10] 23 | y_val = tf.one_hot(y_val, depth=10) # [10k, 10] 24 | print('datasets:', x.shape, y.shape, x_val.shape, y_val.shape, x.min(), x.max()) 25 | 26 | 27 | train_db = tf.data.Dataset.from_tensor_slices((x,y)) 28 | train_db = train_db.map(preprocess).shuffle(10000).batch(batchsz) 29 | test_db = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 30 | test_db = test_db.map(preprocess).batch(batchsz) 31 | 32 | 33 | sample = next(iter(train_db)) 34 | print('batch:', sample[0].shape, sample[1].shape) 35 | 36 | 37 | class MyDense(layers.Layer): 38 | # to replace standard layers.Dense() 39 | def __init__(self, inp_dim, outp_dim): 40 | super(MyDense, self).__init__() 41 | 42 | self.kernel = self.add_variable('w', [inp_dim, outp_dim]) 43 | # self.bias = self.add_variable('b', [outp_dim]) 44 | 45 | def call(self, inputs, training=None): 46 | 47 | x = inputs @ self.kernel 48 | return x 49 | 50 | class MyNetwork(keras.Model): 51 | 52 | def __init__(self): 53 | super(MyNetwork, self).__init__() 54 | 55 | self.fc1 = MyDense(32*32*3, 256) 56 | self.fc2 = MyDense(256, 128) 57 | self.fc3 = MyDense(128, 64) 58 | self.fc4 = MyDense(64, 32) 59 | self.fc5 = MyDense(32, 10) 60 | 61 | 62 | 63 | def call(self, inputs, training=None): 64 | """ 65 | 66 | :param inputs: [b, 32, 32, 3] 67 | :param training: 68 | :return: 69 | """ 70 | x = tf.reshape(inputs, [-1, 32*32*3]) 71 | # [b, 32*32*3] => [b, 256] 72 | x = self.fc1(x) 73 | x = tf.nn.relu(x) 74 | # [b, 256] => [b, 128] 75 | x = self.fc2(x) 76 | x = tf.nn.relu(x) 77 | # [b, 128] => [b, 64] 78 | x = self.fc3(x) 79 | x = tf.nn.relu(x) 80 | # [b, 64] => [b, 32] 81 | x = self.fc4(x) 82 | x = tf.nn.relu(x) 83 | # [b, 32] => [b, 10] 84 | x = self.fc5(x) 85 | 86 | return x 87 | 88 | 89 | network = MyNetwork() 90 | network.compile(optimizer=optimizers.Adam(lr=1e-3), 91 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 92 | metrics=['accuracy']) 93 | network.fit(train_db, epochs=15, validation_data=test_db, validation_freq=1) 94 | 95 | network.evaluate(test_db) 96 | network.save_weights('ckpt/weights.ckpt') 97 | del network 98 | print('saved to ckpt/weights.ckpt') 99 | 100 | 101 | network = MyNetwork() 102 | network.compile(optimizer=optimizers.Adam(lr=1e-3), 103 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 104 | metrics=['accuracy']) 105 | network.load_weights('ckpt/weights.ckpt') 106 | print('loaded weights from file.') 107 | network.evaluate(test_db) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson32-Keras实战/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson32-Keras实战/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson32-Keras实战/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson32-Keras实战/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson33-过拟合与欠拟合/过拟合与欠拟合.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson33-过拟合与欠拟合/过拟合与欠拟合.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson34-交叉验证/compile_fit.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 3 | 4 | 5 | def preprocess(x, y): 6 | """ 7 | x is a simple image, not a batch 8 | """ 9 | x = tf.cast(x, dtype=tf.float32) / 255. 10 | x = tf.reshape(x, [28*28]) 11 | y = tf.cast(y, dtype=tf.int32) 12 | y = tf.one_hot(y, depth=10) 13 | return x,y 14 | 15 | 16 | batchsz = 128 17 | (x, y), (x_val, y_val) = datasets.mnist.load_data() 18 | print('datasets:', x.shape, y.shape, x.min(), x.max()) 19 | 20 | 21 | 22 | db = tf.data.Dataset.from_tensor_slices((x,y)) 23 | db = db.map(preprocess).shuffle(60000).batch(batchsz) 24 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 25 | ds_val = ds_val.map(preprocess).batch(batchsz) 26 | 27 | sample = next(iter(db)) 28 | print(sample[0].shape, sample[1].shape) 29 | 30 | 31 | network = Sequential([layers.Dense(256, activation='relu'), 32 | layers.Dense(128, activation='relu'), 33 | layers.Dense(64, activation='relu'), 34 | layers.Dense(32, activation='relu'), 35 | layers.Dense(10)]) 36 | network.build(input_shape=(None, 28*28)) 37 | network.summary() 38 | 39 | 40 | 41 | 42 | network.compile(optimizer=optimizers.Adam(lr=0.01), 43 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 44 | metrics=['accuracy'] 45 | ) 46 | 47 | network.fit(db, epochs=5, validation_data=ds_val, 48 | validation_steps=2) 49 | 50 | network.evaluate(ds_val) 51 | 52 | sample = next(iter(ds_val)) 53 | x = sample[0] 54 | y = sample[1] # one-hot 55 | pred = network.predict(x) # [b, 10] 56 | # convert back to number 57 | y = tf.argmax(y, axis=1) 58 | pred = tf.argmax(pred, axis=1) 59 | 60 | print(pred) 61 | print(y) 62 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson34-交叉验证/train_evalute_test.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 3 | 4 | 5 | def preprocess(x, y): 6 | """ 7 | x is a simple image, not a batch 8 | """ 9 | x = tf.cast(x, dtype=tf.float32) / 255. 10 | x = tf.reshape(x, [28*28]) 11 | y = tf.cast(y, dtype=tf.int32) 12 | y = tf.one_hot(y, depth=10) 13 | return x,y 14 | 15 | 16 | batchsz = 128 17 | (x, y), (x_test, y_test) = datasets.mnist.load_data() 18 | print('datasets:', x.shape, y.shape, x.min(), x.max()) 19 | 20 | 21 | 22 | idx = tf.range(60000) 23 | idx = tf.random.shuffle(idx) 24 | x_train, y_train = tf.gather(x, idx[:50000]), tf.gather(y, idx[:50000]) 25 | x_val, y_val = tf.gather(x, idx[-10000:]) , tf.gather(y, idx[-10000:]) 26 | print(x_train.shape, y_train.shape, x_val.shape, y_val.shape) 27 | db_train = tf.data.Dataset.from_tensor_slices((x_train,y_train)) 28 | db_train = db_train.map(preprocess).shuffle(50000).batch(batchsz) 29 | 30 | db_val = tf.data.Dataset.from_tensor_slices((x_val,y_val)) 31 | db_val = db_val.map(preprocess).shuffle(10000).batch(batchsz) 32 | 33 | 34 | 35 | db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)) 36 | db_test = db_test.map(preprocess).batch(batchsz) 37 | 38 | sample = next(iter(db_train)) 39 | print(sample[0].shape, sample[1].shape) 40 | 41 | 42 | network = Sequential([layers.Dense(256, activation='relu'), 43 | layers.Dense(128, activation='relu'), 44 | layers.Dense(64, activation='relu'), 45 | layers.Dense(32, activation='relu'), 46 | layers.Dense(10)]) 47 | network.build(input_shape=(None, 28*28)) 48 | network.summary() 49 | 50 | 51 | 52 | 53 | network.compile(optimizer=optimizers.Adam(lr=0.01), 54 | loss=tf.losses.CategoricalCrossentropy(from_logits=True), 55 | metrics=['accuracy'] 56 | ) 57 | 58 | network.fit(db_train, epochs=6, validation_data=db_val, validation_freq=2) 59 | 60 | print('Test performance:') 61 | network.evaluate(db_test) 62 | 63 | 64 | sample = next(iter(db_test)) 65 | x = sample[0] 66 | y = sample[1] # one-hot 67 | pred = network.predict(x) # [b, 10] 68 | # convert back to number 69 | y = tf.argmax(y, axis=1) 70 | pred = tf.argmax(pred, axis=1) 71 | 72 | print(pred) 73 | print(y) 74 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson34-交叉验证/交叉验证.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson34-交叉验证/交叉验证.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson35-Regularization/Regularization.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson35-Regularization/Regularization.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson35-Regularization/regularization.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics 3 | 4 | 5 | def preprocess(x, y): 6 | 7 | x = tf.cast(x, dtype=tf.float32) / 255. 8 | y = tf.cast(y, dtype=tf.int32) 9 | 10 | return x,y 11 | 12 | 13 | batchsz = 128 14 | (x, y), (x_val, y_val) = datasets.mnist.load_data() 15 | print('datasets:', x.shape, y.shape, x.min(), x.max()) 16 | 17 | 18 | 19 | db = tf.data.Dataset.from_tensor_slices((x,y)) 20 | db = db.map(preprocess).shuffle(60000).batch(batchsz).repeat(10) 21 | 22 | ds_val = tf.data.Dataset.from_tensor_slices((x_val, y_val)) 23 | ds_val = ds_val.map(preprocess).batch(batchsz) 24 | 25 | 26 | 27 | 28 | network = Sequential([layers.Dense(256, activation='relu'), 29 | layers.Dense(128, activation='relu'), 30 | layers.Dense(64, activation='relu'), 31 | layers.Dense(32, activation='relu'), 32 | layers.Dense(10)]) 33 | network.build(input_shape=(None, 28*28)) 34 | network.summary() 35 | 36 | optimizer = optimizers.Adam(lr=0.01) 37 | 38 | 39 | 40 | for step, (x,y) in enumerate(db): 41 | 42 | with tf.GradientTape() as tape: 43 | # [b, 28, 28] => [b, 784] 44 | x = tf.reshape(x, (-1, 28*28)) 45 | # [b, 784] => [b, 10] 46 | out = network(x) 47 | # [b] => [b, 10] 48 | y_onehot = tf.one_hot(y, depth=10) 49 | # [b] 50 | loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y_onehot, out, from_logits=True)) 51 | 52 | 53 | loss_regularization = [] 54 | for p in network.trainable_variables: 55 | loss_regularization.append(tf.nn.l2_loss(p)) 56 | loss_regularization = tf.reduce_sum(tf.stack(loss_regularization)) 57 | 58 | loss = loss + 0.0001 * loss_regularization 59 | 60 | 61 | grads = tape.gradient(loss, network.trainable_variables) 62 | optimizer.apply_gradients(zip(grads, network.trainable_variables)) 63 | 64 | 65 | if step % 100 == 0: 66 | 67 | print(step, 'loss:', float(loss), 'loss_regularization:', float(loss_regularization)) 68 | 69 | 70 | # evaluate 71 | if step % 500 == 0: 72 | total, total_correct = 0., 0 73 | 74 | for step, (x, y) in enumerate(ds_val): 75 | # [b, 28, 28] => [b, 784] 76 | x = tf.reshape(x, (-1, 28*28)) 77 | # [b, 784] => [b, 10] 78 | out = network(x) 79 | # [b, 10] => [b] 80 | pred = tf.argmax(out, axis=1) 81 | pred = tf.cast(pred, dtype=tf.int32) 82 | # bool type 83 | correct = tf.equal(pred, y) 84 | # bool tensor => int tensor => numpy 85 | total_correct += tf.reduce_sum(tf.cast(correct, dtype=tf.int32)).numpy() 86 | total += x.shape[0] 87 | 88 | print(step, 'Evaluate Acc:', total_correct/total) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson36-动量与学习率/学习率与动量.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson36-动量与学习率/学习率与动量.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson37-Early Stopping, Dropout/misc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson37-Early Stopping, Dropout/misc.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson38-卷积神经网络/什么是卷积.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson38-卷积神经网络/什么是卷积.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson38-卷积神经网络/卷积神经网络.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson38-卷积神经网络/卷积神经网络.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson39-池化与采样/池化与采样.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson39-池化与采样/池化与采样.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson40-CIFAR与VGG实战/CIFAR与VGG实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson40-CIFAR与VGG实战/CIFAR与VGG实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson40-CIFAR与VGG实战/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson40-CIFAR与VGG实战/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson40-CIFAR与VGG实战/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson40-CIFAR与VGG实战/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson41-经典卷积网络/经典卷积网络.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson41-经典卷积网络/经典卷积网络.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson42-BatchNorm/BatchNorm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson42-BatchNorm/BatchNorm.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson42-BatchNorm/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | from tensorflow import keras 4 | from tensorflow.keras import layers, optimizers 5 | 6 | 7 | # 2 images with 4x4 size, 3 channels 8 | # we explicitly enforce the mean and stddev to N(1, 0.5) 9 | x = tf.random.normal([2,4,4,3], mean=1.,stddev=0.5) 10 | 11 | net = layers.BatchNormalization(axis=-1, center=True, scale=True, 12 | trainable=True) 13 | 14 | out = net(x) 15 | print('forward in test mode:', net.variables) 16 | 17 | 18 | out = net(x, training=True) 19 | print('forward in train mode(1 step):', net.variables) 20 | 21 | for i in range(100): 22 | out = net(x, training=True) 23 | print('forward in train mode(100 steps):', net.variables) 24 | 25 | 26 | optimizer = optimizers.SGD(lr=1e-2) 27 | for i in range(10): 28 | with tf.GradientTape() as tape: 29 | out = net(x, training=True) 30 | loss = tf.reduce_mean(tf.pow(out,2)) - 1 31 | 32 | grads = tape.gradient(loss, net.trainable_variables) 33 | optimizer.apply_gradients(zip(grads, net.trainable_variables)) 34 | print('backward(10 steps):', net.variables) 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson43-ResNet/ResNet与DenseNet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson43-ResNet/ResNet与DenseNet.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson43-ResNet/ResNet实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson43-ResNet/ResNet实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson43-ResNet/resnet.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow import keras 3 | from tensorflow.keras import layers, Sequential 4 | 5 | 6 | 7 | class BasicBlock(layers.Layer): 8 | 9 | def __init__(self, filter_num, stride=1): 10 | super(BasicBlock, self).__init__() 11 | 12 | self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same') 13 | self.bn1 = layers.BatchNormalization() 14 | self.relu = layers.Activation('relu') 15 | 16 | self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same') 17 | self.bn2 = layers.BatchNormalization() 18 | 19 | if stride != 1: 20 | self.downsample = Sequential() 21 | self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride)) 22 | else: 23 | self.downsample = lambda x:x 24 | 25 | 26 | 27 | def call(self, inputs, training=None): 28 | 29 | # [b, h, w, c] 30 | out = self.conv1(inputs) 31 | out = self.bn1(out,training=training) 32 | out = self.relu(out) 33 | 34 | out = self.conv2(out) 35 | out = self.bn2(out,training=training) 36 | 37 | identity = self.downsample(inputs) 38 | 39 | output = layers.add([out, identity]) 40 | output = tf.nn.relu(output) 41 | 42 | return output 43 | 44 | 45 | class ResNet(keras.Model): 46 | 47 | 48 | def __init__(self, layer_dims, num_classes=100): # [2, 2, 2, 2] 49 | super(ResNet, self).__init__() 50 | 51 | self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)), 52 | layers.BatchNormalization(), 53 | layers.Activation('relu'), 54 | layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same') 55 | ]) 56 | 57 | self.layer1 = self.build_resblock(64, layer_dims[0]) 58 | self.layer2 = self.build_resblock(128, layer_dims[1], stride=2) 59 | self.layer3 = self.build_resblock(256, layer_dims[2], stride=2) 60 | self.layer4 = self.build_resblock(512, layer_dims[3], stride=2) 61 | 62 | # output: [b, 512, h, w], 63 | self.avgpool = layers.GlobalAveragePooling2D() 64 | self.fc = layers.Dense(num_classes) 65 | 66 | 67 | 68 | 69 | 70 | def call(self, inputs, training=None): 71 | 72 | x = self.stem(inputs,training=training) 73 | 74 | x = self.layer1(x,training=training) 75 | x = self.layer2(x,training=training) 76 | x = self.layer3(x,training=training) 77 | x = self.layer4(x,training=training) 78 | 79 | # [b, c] 80 | x = self.avgpool(x) 81 | # [b, 100] 82 | x = self.fc(x) 83 | 84 | return x 85 | 86 | 87 | 88 | def build_resblock(self, filter_num, blocks, stride=1): 89 | 90 | res_blocks = Sequential() 91 | # may down sample 92 | res_blocks.add(BasicBlock(filter_num, stride)) 93 | 94 | for _ in range(1, blocks): 95 | res_blocks.add(BasicBlock(filter_num, stride=1)) 96 | 97 | return res_blocks 98 | 99 | 100 | def resnet18(): 101 | return ResNet([2, 2, 2, 2]) 102 | 103 | 104 | def resnet34(): 105 | return ResNet([3, 4, 6, 3]) 106 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson43-ResNet/resnet18_train.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras import layers, optimizers, datasets, Sequential 6 | from resnet import resnet18 7 | 8 | tf.random.set_seed(2345) 9 | 10 | 11 | 12 | 13 | 14 | def preprocess(x, y): 15 | # [-1~1] 16 | x = tf.cast(x, dtype=tf.float32) / 255. - 0.5 17 | y = tf.cast(y, dtype=tf.int32) 18 | return x,y 19 | 20 | 21 | (x,y), (x_test, y_test) = datasets.cifar100.load_data() 22 | y = tf.squeeze(y, axis=1) 23 | y_test = tf.squeeze(y_test, axis=1) 24 | print(x.shape, y.shape, x_test.shape, y_test.shape) 25 | 26 | 27 | train_db = tf.data.Dataset.from_tensor_slices((x,y)) 28 | train_db = train_db.shuffle(1000).map(preprocess).batch(512) 29 | 30 | test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test)) 31 | test_db = test_db.map(preprocess).batch(512) 32 | 33 | sample = next(iter(train_db)) 34 | print('sample:', sample[0].shape, sample[1].shape, 35 | tf.reduce_min(sample[0]), tf.reduce_max(sample[0])) 36 | 37 | 38 | def main(): 39 | 40 | # [b, 32, 32, 3] => [b, 1, 1, 512] 41 | model = resnet18() 42 | model.build(input_shape=(None, 32, 32, 3)) 43 | model.summary() 44 | optimizer = optimizers.Adam(lr=1e-3) 45 | 46 | for epoch in range(500): 47 | 48 | for step, (x,y) in enumerate(train_db): 49 | 50 | with tf.GradientTape() as tape: 51 | # [b, 32, 32, 3] => [b, 100] 52 | logits = model(x,training=True) 53 | # [b] => [b, 100] 54 | y_onehot = tf.one_hot(y, depth=100) 55 | # compute loss 56 | loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True) 57 | loss = tf.reduce_mean(loss) 58 | 59 | grads = tape.gradient(loss, model.trainable_variables) 60 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 61 | 62 | if step %50 == 0: 63 | print(epoch, step, 'loss:', float(loss)) 64 | 65 | 66 | 67 | total_num = 0 68 | total_correct = 0 69 | for x,y in test_db: 70 | 71 | logits = model(x,training=False) 72 | prob = tf.nn.softmax(logits, axis=1) 73 | pred = tf.argmax(prob, axis=1) 74 | pred = tf.cast(pred, dtype=tf.int32) 75 | 76 | correct = tf.cast(tf.equal(pred, y), dtype=tf.int32) 77 | correct = tf.reduce_sum(correct) 78 | 79 | total_num += x.shape[0] 80 | total_correct += int(correct) 81 | 82 | acc = total_correct / total_num 83 | print(epoch, 'acc:', acc) 84 | 85 | 86 | 87 | if __name__ == '__main__': 88 | main() 89 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson44-循环神经网络/循环神经网络.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson44-循环神经网络/循环神经网络.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson44-循环神经网络/时间序列表示.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson44-循环神经网络/时间序列表示.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson45-RNN实战/RNN Layer使用.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson45-RNN实战/RNN Layer使用.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson45-RNN实战/sentiment_analysis_cell.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | import numpy as np 6 | from tensorflow import keras 7 | from tensorflow.keras import layers 8 | 9 | 10 | tf.random.set_seed(22) 11 | np.random.seed(22) 12 | assert tf.__version__.startswith('2.') 13 | 14 | batchsz = 128 15 | 16 | # the most frequest words 17 | total_words = 10000 18 | max_review_len = 80 19 | embedding_len = 100 20 | (x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words) 21 | # x_train:[b, 80] 22 | # x_test: [b, 80] 23 | x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len) 24 | x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len) 25 | 26 | db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 27 | db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True) 28 | db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)) 29 | db_test = db_test.batch(batchsz, drop_remainder=True) 30 | print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train)) 31 | print('x_test shape:', x_test.shape) 32 | 33 | 34 | 35 | class MyRNN(keras.Model): 36 | 37 | def __init__(self, units): 38 | super(MyRNN, self).__init__() 39 | 40 | # [b, 64] 41 | self.state0 = [tf.zeros([batchsz, units])] 42 | self.state1 = [tf.zeros([batchsz, units])] 43 | 44 | # transform text to embedding representation 45 | # [b, 80] => [b, 80, 100] 46 | self.embedding = layers.Embedding(total_words, embedding_len, 47 | input_length=max_review_len) 48 | 49 | # [b, 80, 100] , h_dim: 64 50 | # RNN: cell1 ,cell2, cell3 51 | # SimpleRNN 52 | self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5) 53 | self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5) 54 | 55 | 56 | # fc, [b, 80, 100] => [b, 64] => [b, 1] 57 | self.outlayer = layers.Dense(1) 58 | 59 | def call(self, inputs, training=None): 60 | """ 61 | net(x) net(x, training=True) :train mode 62 | net(x, training=False): test 63 | :param inputs: [b, 80] 64 | :param training: 65 | :return: 66 | """ 67 | # [b, 80] 68 | x = inputs 69 | # embedding: [b, 80] => [b, 80, 100] 70 | x = self.embedding(x) 71 | # rnn cell compute 72 | # [b, 80, 100] => [b, 64] 73 | state0 = self.state0 74 | state1 = self.state1 75 | for word in tf.unstack(x, axis=1): # word: [b, 100] 76 | # h1 = x*wxh+h0*whh 77 | # out0: [b, 64] 78 | out0, state0 = self.rnn_cell0(word, state0, training) 79 | # out1: [b, 64] 80 | out1, state1 = self.rnn_cell1(out0, state1, training) 81 | 82 | # out: [b, 64] => [b, 1] 83 | x = self.outlayer(out1) 84 | # p(y is pos|x) 85 | prob = tf.sigmoid(x) 86 | 87 | return prob 88 | 89 | def main(): 90 | units = 64 91 | epochs = 4 92 | 93 | model = MyRNN(units) 94 | model.compile(optimizer = keras.optimizers.Adam(0.001), 95 | loss = tf.losses.BinaryCrossentropy(), 96 | metrics=['accuracy'],experimental_run_tf_function=False) 97 | model.fit(db_train, epochs=epochs, validation_data=db_test) 98 | 99 | model.evaluate(db_test) 100 | 101 | 102 | if __name__ == '__main__': 103 | main() 104 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson45-RNN实战/sentiment_analysis_layer.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | import numpy as np 6 | from tensorflow import keras 7 | from tensorflow.keras import layers 8 | 9 | 10 | tf.random.set_seed(22) 11 | np.random.seed(22) 12 | assert tf.__version__.startswith('2.') 13 | 14 | batchsz = 128 15 | 16 | # the most frequest words 17 | total_words = 10000 18 | max_review_len = 80 19 | embedding_len = 100 20 | (x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words) 21 | # x_train:[b, 80] 22 | # x_test: [b, 80] 23 | x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len) 24 | x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len) 25 | 26 | db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 27 | db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True) 28 | db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)) 29 | db_test = db_test.batch(batchsz, drop_remainder=True) 30 | print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train)) 31 | print('x_test shape:', x_test.shape) 32 | 33 | 34 | 35 | class MyRNN(keras.Model): 36 | 37 | def __init__(self, units): 38 | super(MyRNN, self).__init__() 39 | 40 | 41 | # transform text to embedding representation 42 | # [b, 80] => [b, 80, 100] 43 | self.embedding = layers.Embedding(total_words, embedding_len, 44 | input_length=max_review_len) 45 | 46 | # [b, 80, 100] , h_dim: 64 47 | self.rnn = keras.Sequential([ 48 | layers.SimpleRNN(units, dropout=0.5, return_sequences=True, unroll=True), 49 | layers.SimpleRNN(units, dropout=0.5, unroll=True) 50 | ]) 51 | 52 | 53 | # fc, [b, 80, 100] => [b, 64] => [b, 1] 54 | self.outlayer = layers.Dense(1) 55 | 56 | def call(self, inputs, training=None): 57 | """ 58 | net(x) net(x, training=True) :train mode 59 | net(x, training=False): test 60 | :param inputs: [b, 80] 61 | :param training: 62 | :return: 63 | """ 64 | # [b, 80] 65 | x = inputs 66 | # embedding: [b, 80] => [b, 80, 100] 67 | x = self.embedding(x) 68 | # rnn cell compute 69 | # x: [b, 80, 100] => [b, 64] 70 | x = self.rnn(x,training=training) 71 | 72 | # out: [b, 64] => [b, 1] 73 | x = self.outlayer(x) 74 | # p(y is pos|x) 75 | prob = tf.sigmoid(x) 76 | 77 | return prob 78 | 79 | def main(): 80 | units = 64 81 | epochs = 4 82 | 83 | model = MyRNN(units) 84 | # model.build(input_shape=(4,80)) 85 | # model.summary() 86 | model.compile(optimizer = keras.optimizers.Adam(0.001), 87 | loss = tf.losses.BinaryCrossentropy(), 88 | metrics=['accuracy']) 89 | model.fit(db_train, epochs=epochs, validation_data=db_test) 90 | 91 | model.evaluate(db_test) 92 | 93 | 94 | if __name__ == '__main__': 95 | main() 96 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson45-RNN实战/情感分类实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson45-RNN实战/情感分类实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson46-LSTM/LSTM.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson46-LSTM/LSTM.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson46-LSTM/梯度弥散与梯度爆炸.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson46-LSTM/梯度弥散与梯度爆炸.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson47-LSTM实战/LSTM实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson47-LSTM实战/LSTM实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson47-LSTM实战/lstm_sentiment_analysis_layer.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | import numpy as np 6 | from tensorflow import keras 7 | from tensorflow.keras import layers 8 | 9 | 10 | tf.random.set_seed(22) 11 | np.random.seed(22) 12 | assert tf.__version__.startswith('2.') 13 | 14 | batchsz = 128 15 | 16 | # the most frequest words 17 | total_words = 10000 18 | max_review_len = 80 19 | embedding_len = 100 20 | (x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=total_words) 21 | # x_train:[b, 80] 22 | # x_test: [b, 80] 23 | x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len) 24 | x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len) 25 | 26 | db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)) 27 | db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True) 28 | db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)) 29 | db_test = db_test.batch(batchsz, drop_remainder=True) 30 | print('x_train shape:', x_train.shape, tf.reduce_max(y_train), tf.reduce_min(y_train)) 31 | print('x_test shape:', x_test.shape) 32 | 33 | 34 | 35 | class MyRNN(keras.Model): 36 | 37 | def __init__(self, units): 38 | super(MyRNN, self).__init__() 39 | 40 | 41 | # transform text to embedding representation 42 | # [b, 80] => [b, 80, 100] 43 | self.embedding = layers.Embedding(total_words, embedding_len, 44 | input_length=max_review_len) 45 | 46 | # [b, 80, 100] , h_dim: 64 47 | self.rnn = keras.Sequential([ 48 | # layers.SimpleRNN(units, dropout=0.5, return_sequences=True, unroll=True), 49 | # layers.SimpleRNN(units, dropout=0.5, unroll=True) 50 | 51 | layers.LSTM(units, dropout=0.5, return_sequences=True, unroll=True), 52 | layers.LSTM(units, dropout=0.5, unroll=True) 53 | ]) 54 | 55 | 56 | # fc, [b, 80, 100] => [b, 64] => [b, 1] 57 | self.outlayer = layers.Dense(1) 58 | 59 | def call(self, inputs, training=None): 60 | """ 61 | net(x) net(x, training=True) :train mode 62 | net(x, training=False): test 63 | :param inputs: [b, 80] 64 | :param training: 65 | :return: 66 | """ 67 | # [b, 80] 68 | x = inputs 69 | # embedding: [b, 80] => [b, 80, 100] 70 | x = self.embedding(x) 71 | # rnn cell compute 72 | # x: [b, 80, 100] => [b, 64] 73 | x = self.rnn(x,training=training) 74 | 75 | # out: [b, 64] => [b, 1] 76 | x = self.outlayer(x) 77 | # p(y is pos|x) 78 | prob = tf.sigmoid(x) 79 | 80 | return prob 81 | 82 | def main(): 83 | units = 64 84 | epochs = 4 85 | 86 | import time 87 | 88 | t0 = time.time() 89 | 90 | model = MyRNN(units) 91 | model.compile(optimizer = keras.optimizers.Adam(0.001), 92 | loss = tf.losses.BinaryCrossentropy(), 93 | metrics=['accuracy']) 94 | model.fit(db_train, epochs=epochs, validation_data=db_test) 95 | 96 | model.evaluate(db_test) 97 | 98 | 99 | t1 = time.time() 100 | # 69.3 secnods, 83% 101 | print('total time cost:', t1-t0) 102 | 103 | 104 | if __name__ == '__main__': 105 | main() 106 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson48-AutoEncoders/AutoEncoders.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson48-AutoEncoders/AutoEncoders.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson49-VAE实战/AE实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson49-VAE实战/AE实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson49-VAE实战/autoencoder.py: -------------------------------------------------------------------------------- 1 | import os 2 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2' 3 | 4 | import tensorflow as tf 5 | import numpy as np 6 | from tensorflow import keras 7 | from tensorflow.keras import Sequential, layers 8 | from PIL import Image 9 | from matplotlib import pyplot as plt 10 | 11 | 12 | 13 | tf.random.set_seed(22) 14 | np.random.seed(22) 15 | assert tf.__version__.startswith('2.') 16 | 17 | 18 | def save_images(imgs, name): 19 | new_im = Image.new('L', (280, 280)) 20 | 21 | index = 0 22 | for i in range(0, 280, 28): 23 | for j in range(0, 280, 28): 24 | im = imgs[index] 25 | im = Image.fromarray(im, mode='L') 26 | new_im.paste(im, (i, j)) 27 | index += 1 28 | 29 | new_im.save(name) 30 | 31 | 32 | h_dim = 20 33 | batchsz = 512 34 | lr = 1e-3 35 | 36 | 37 | (x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data() 38 | x_train, x_test = x_train.astype(np.float32) / 255., x_test.astype(np.float32) / 255. 39 | # we do not need label 40 | train_db = tf.data.Dataset.from_tensor_slices(x_train) 41 | train_db = train_db.shuffle(batchsz * 5).batch(batchsz) 42 | test_db = tf.data.Dataset.from_tensor_slices(x_test) 43 | test_db = test_db.batch(batchsz) 44 | 45 | print(x_train.shape, y_train.shape) 46 | print(x_test.shape, y_test.shape) 47 | 48 | 49 | 50 | class AE(keras.Model): 51 | 52 | def __init__(self): 53 | super(AE, self).__init__() 54 | 55 | # Encoders 56 | self.encoder = Sequential([ 57 | layers.Dense(256, activation=tf.nn.relu), 58 | layers.Dense(128, activation=tf.nn.relu), 59 | layers.Dense(h_dim) 60 | ]) 61 | 62 | # Decoders 63 | self.decoder = Sequential([ 64 | layers.Dense(128, activation=tf.nn.relu), 65 | layers.Dense(256, activation=tf.nn.relu), 66 | layers.Dense(784) 67 | ]) 68 | 69 | 70 | def call(self, inputs, training=None): 71 | # [b, 784] => [b, 10] 72 | h = self.encoder(inputs) 73 | # [b, 10] => [b, 784] 74 | x_hat = self.decoder(h) 75 | 76 | return x_hat 77 | 78 | 79 | 80 | model = AE() 81 | model.build(input_shape=(None, 784)) 82 | model.summary() 83 | 84 | optimizer = tf.optimizers.Adam(lr=lr) 85 | 86 | for epoch in range(100): 87 | 88 | for step, x in enumerate(train_db): 89 | 90 | #[b, 28, 28] => [b, 784] 91 | x = tf.reshape(x, [-1, 784]) 92 | 93 | with tf.GradientTape() as tape: 94 | x_rec_logits = model(x) 95 | 96 | rec_loss = tf.losses.binary_crossentropy(x, x_rec_logits, from_logits=True) 97 | rec_loss = tf.reduce_mean(rec_loss) 98 | 99 | grads = tape.gradient(rec_loss, model.trainable_variables) 100 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 101 | 102 | 103 | if step % 100 ==0: 104 | print(epoch, step, float(rec_loss)) 105 | 106 | 107 | # evaluation 108 | x = next(iter(test_db)) 109 | logits = model(tf.reshape(x, [-1, 784])) 110 | x_hat = tf.sigmoid(logits) 111 | # [b, 784] => [b, 28, 28] 112 | x_hat = tf.reshape(x_hat, [-1, 28, 28]) 113 | 114 | # [b, 28, 28] => [2b, 28, 28] 115 | x_concat = tf.concat([x, x_hat], axis=0) 116 | x_concat = x_hat 117 | x_concat = x_concat.numpy() * 255. 118 | x_concat = x_concat.astype(np.uint8) 119 | save_images(x_concat, 'ae_images/rec_epoch_%d.png'%epoch) 120 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson49-VAE实战/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson49-VAE实战/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson49-VAE实战/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson49-VAE实战/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson50-GAN/GAN.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson50-GAN/GAN.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson51-WGAN实战/GAN实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson51-WGAN实战/GAN实战.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson51-WGAN实战/gan.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow import keras 3 | from tensorflow.keras import layers 4 | 5 | 6 | 7 | 8 | 9 | 10 | class Generator(keras.Model): 11 | 12 | def __init__(self): 13 | super(Generator, self).__init__() 14 | 15 | # z: [b, 100] => [b, 3*3*512] => [b, 3, 3, 512] => [b, 64, 64, 3] 16 | self.fc = layers.Dense(3*3*512) 17 | 18 | self.conv1 = layers.Conv2DTranspose(256, 3, 3, 'valid') 19 | self.bn1 = layers.BatchNormalization() 20 | 21 | self.conv2 = layers.Conv2DTranspose(128, 5, 2, 'valid') 22 | self.bn2 = layers.BatchNormalization() 23 | 24 | self.conv3 = layers.Conv2DTranspose(3, 4, 3, 'valid') 25 | 26 | def call(self, inputs, training=None): 27 | # [z, 100] => [z, 3*3*512] 28 | x = self.fc(inputs) 29 | x = tf.reshape(x, [-1, 3, 3, 512]) 30 | x = tf.nn.leaky_relu(x) 31 | 32 | # 33 | x = tf.nn.leaky_relu(self.bn1(self.conv1(x), training=training)) 34 | x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=training)) 35 | x = self.conv3(x) 36 | x = tf.tanh(x) 37 | 38 | return x 39 | 40 | 41 | class Discriminator(keras.Model): 42 | 43 | def __init__(self): 44 | super(Discriminator, self).__init__() 45 | 46 | # [b, 64, 64, 3] => [b, 1] 47 | self.conv1 = layers.Conv2D(64, 5, 3, 'valid') 48 | 49 | self.conv2 = layers.Conv2D(128, 5, 3, 'valid') 50 | self.bn2 = layers.BatchNormalization() 51 | 52 | self.conv3 = layers.Conv2D(256, 5, 3, 'valid') 53 | self.bn3 = layers.BatchNormalization() 54 | 55 | # [b, h, w ,c] => [b, -1] 56 | self.flatten = layers.Flatten() 57 | self.fc = layers.Dense(1) 58 | 59 | 60 | def call(self, inputs, training=None): 61 | 62 | x = tf.nn.leaky_relu(self.conv1(inputs)) 63 | x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=training)) 64 | x = tf.nn.leaky_relu(self.bn3(self.conv3(x), training=training)) 65 | 66 | # [b, h, w, c] => [b, -1] 67 | x = self.flatten(x) 68 | # [b, -1] => [b, 1] 69 | logits = self.fc(x) 70 | 71 | return logits 72 | 73 | def main(): 74 | 75 | d = Discriminator() 76 | g = Generator() 77 | 78 | 79 | x = tf.random.normal([2, 64, 64, 3]) 80 | z = tf.random.normal([2, 100]) 81 | 82 | prob = d(x) 83 | print(prob) 84 | x_hat = g(z) 85 | print(x_hat.shape) 86 | 87 | 88 | 89 | 90 | if __name__ == '__main__': 91 | main() -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson51-WGAN实战/wgan.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow import keras 3 | from tensorflow.keras import layers 4 | 5 | 6 | 7 | 8 | 9 | 10 | class Generator(keras.Model): 11 | 12 | def __init__(self): 13 | super(Generator, self).__init__() 14 | 15 | # z: [b, 100] => [b, 3*3*512] => [b, 3, 3, 512] => [b, 64, 64, 3] 16 | self.fc = layers.Dense(3*3*512) 17 | 18 | self.conv1 = layers.Conv2DTranspose(256, 3, 3, 'valid') 19 | self.bn1 = layers.BatchNormalization() 20 | 21 | self.conv2 = layers.Conv2DTranspose(128, 5, 2, 'valid') 22 | self.bn2 = layers.BatchNormalization() 23 | 24 | self.conv3 = layers.Conv2DTranspose(3, 4, 3, 'valid') 25 | 26 | def call(self, inputs, training=None): 27 | # [z, 100] => [z, 3*3*512] 28 | x = self.fc(inputs) 29 | x = tf.reshape(x, [-1, 3, 3, 512]) 30 | x = tf.nn.leaky_relu(x) 31 | 32 | # 33 | x = tf.nn.leaky_relu(self.bn1(self.conv1(x), training=training)) 34 | x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=training)) 35 | x = self.conv3(x) 36 | x = tf.tanh(x) 37 | 38 | return x 39 | 40 | 41 | class Discriminator(keras.Model): 42 | 43 | def __init__(self): 44 | super(Discriminator, self).__init__() 45 | 46 | # [b, 64, 64, 3] => [b, 1] 47 | self.conv1 = layers.Conv2D(64, 5, 3, 'valid') 48 | 49 | self.conv2 = layers.Conv2D(128, 5, 3, 'valid') 50 | self.bn2 = layers.BatchNormalization() 51 | 52 | self.conv3 = layers.Conv2D(256, 5, 3, 'valid') 53 | self.bn3 = layers.BatchNormalization() 54 | 55 | # [b, h, w ,c] => [b, -1] 56 | self.flatten = layers.Flatten() 57 | self.fc = layers.Dense(1) 58 | 59 | 60 | def call(self, inputs, training=None): 61 | 62 | x = tf.nn.leaky_relu(self.conv1(inputs)) 63 | x = tf.nn.leaky_relu(self.bn2(self.conv2(x), training=training)) 64 | x = tf.nn.leaky_relu(self.bn3(self.conv3(x), training=training)) 65 | 66 | # [b, h, w, c] => [b, -1] 67 | x = self.flatten(x) 68 | # [b, -1] => [b, 1] 69 | logits = self.fc(x) 70 | 71 | return logits 72 | 73 | def main(): 74 | 75 | d = Discriminator() 76 | g = Generator() 77 | 78 | 79 | x = tf.random.normal([2, 64, 64, 3]) 80 | z = tf.random.normal([2, 100]) 81 | 82 | prob = d(x) 83 | print(prob) 84 | x_hat = g(z) 85 | print(x_hat.shape) 86 | 87 | 88 | 89 | 90 | if __name__ == '__main__': 91 | main() -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson51-WGAN实战/答疑群-926107229.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson51-WGAN实战/答疑群-926107229.png -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson51-WGAN实战/请学员务必加群答疑!!!.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson51-WGAN实战/请学员务必加群答疑!!!.txt -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson52-自定义数据集和迁移学习/train_scratch.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 4 | 5 | 6 | import tensorflow as tf 7 | import numpy as np 8 | from tensorflow import keras 9 | from tensorflow.keras import layers,optimizers,losses 10 | from tensorflow.keras.callbacks import EarlyStopping 11 | 12 | tf.random.set_seed(22) 13 | np.random.seed(22) 14 | assert tf.__version__.startswith('2.') 15 | # 设置GPU显存按需分配 16 | gpus = tf.config.experimental.list_physical_devices('GPU') 17 | if gpus: 18 | try: 19 | # Currently, memory growth needs to be the same across GPUs 20 | for gpu in gpus: 21 | tf.config.experimental.set_memory_growth(gpu, True) 22 | logical_gpus = tf.config.experimental.list_logical_devices('GPU') 23 | print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") 24 | except RuntimeError as e: 25 | # Memory growth must be set before GPUs have been initialized 26 | print(e) 27 | 28 | 29 | from pokemon import load_pokemon, normalize, denormalize 30 | from resnet import ResNet 31 | 32 | 33 | def preprocess(x,y): 34 | # x: 图片的路径,y:图片的数字编码 35 | x = tf.io.read_file(x) 36 | x = tf.image.decode_jpeg(x, channels=3) # RGBA 37 | # 图片缩放 38 | # x = tf.image.resize(x, [244, 244]) 39 | # 图片旋转 40 | # x = tf.image.rot90(x,2) 41 | # 随机水平翻转 42 | x = tf.image.random_flip_left_right(x) 43 | # 随机竖直翻转 44 | # x = tf.image.random_flip_up_down(x) 45 | 46 | # 图片先缩放到稍大尺寸 47 | x = tf.image.resize(x, [244, 244]) 48 | # 再随机裁剪到合适尺寸 49 | x = tf.image.random_crop(x, [224,224,3]) 50 | 51 | # x: [0,255]=> -1~1 52 | x = tf.cast(x, dtype=tf.float32) / 255. 53 | x = normalize(x) 54 | y = tf.convert_to_tensor(y) 55 | y = tf.one_hot(y, depth=5) 56 | 57 | return x, y 58 | 59 | 60 | batchsz = 256 61 | 62 | # creat train db 63 | images, labels, table = load_pokemon('pokemon',mode='train') 64 | db_train = tf.data.Dataset.from_tensor_slices((images, labels)) 65 | db_train = db_train.shuffle(1000).map(preprocess).batch(batchsz) 66 | # crate validation db 67 | images2, labels2, table = load_pokemon('pokemon',mode='val') 68 | db_val = tf.data.Dataset.from_tensor_slices((images2, labels2)) 69 | db_val = db_val.map(preprocess).batch(batchsz) 70 | # create test db 71 | images3, labels3, table = load_pokemon('pokemon',mode='test') 72 | db_test = tf.data.Dataset.from_tensor_slices((images3, labels3)) 73 | db_test = db_test.map(preprocess).batch(batchsz) 74 | 75 | 76 | resnet = keras.Sequential([ 77 | layers.Conv2D(16,5,3), 78 | layers.MaxPool2D(3,3), 79 | layers.ReLU(), 80 | layers.Conv2D(64,5,3), 81 | layers.MaxPool2D(2,2), 82 | layers.ReLU(), 83 | layers.Flatten(), 84 | layers.Dense(64), 85 | layers.ReLU(), 86 | layers.Dense(5) 87 | ]) 88 | 89 | 90 | resnet = ResNet(5) 91 | resnet.build(input_shape=(4, 224, 224, 3)) 92 | resnet.summary() 93 | 94 | early_stopping = EarlyStopping( 95 | monitor='val_accuracy', 96 | min_delta=0.001, 97 | patience=5 98 | ) 99 | 100 | resnet.compile(optimizer=optimizers.Adam(lr=1e-3), 101 | loss=losses.CategoricalCrossentropy(from_logits=True), 102 | metrics=['accuracy']) 103 | resnet.fit(db_train, validation_data=db_val, validation_freq=1, epochs=100, 104 | callbacks=[early_stopping]) 105 | resnet.evaluate(db_test) -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson52-自定义数据集和迁移学习/train_transfer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | import numpy as np 4 | from tensorflow import keras 5 | from tensorflow.keras import layers 6 | 7 | 8 | 9 | import os 10 | import tensorflow as tf 11 | import numpy as np 12 | from tensorflow import keras 13 | from tensorflow.keras import layers,optimizers,losses 14 | from tensorflow.keras.callbacks import EarlyStopping 15 | 16 | tf.random.set_seed(22) 17 | np.random.seed(22) 18 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 19 | assert tf.__version__.startswith('2.') 20 | 21 | 22 | from pokemon import load_pokemon,normalize 23 | 24 | 25 | 26 | def preprocess(x,y): 27 | # x: 图片的路径,y:图片的数字编码 28 | x = tf.io.read_file(x) 29 | x = tf.image.decode_jpeg(x, channels=3) # RGBA 30 | x = tf.image.resize(x, [244, 244]) 31 | 32 | # x = tf.image.random_flip_left_right(x) 33 | x = tf.image.random_flip_up_down(x) 34 | x = tf.image.random_crop(x, [224,224,3]) 35 | 36 | # x: [0,255]=> -1~1 37 | x = tf.cast(x, dtype=tf.float32) / 255. 38 | x = normalize(x) 39 | y = tf.convert_to_tensor(y) 40 | y = tf.one_hot(y, depth=5) 41 | 42 | return x, y 43 | 44 | 45 | batchsz = 128 46 | # 创建训练集Datset对象 47 | images, labels, table = load_pokemon('pokemon',mode='train') 48 | db_train = tf.data.Dataset.from_tensor_slices((images, labels)) 49 | db_train = db_train.shuffle(1000).map(preprocess).batch(batchsz) 50 | # 创建验证集Datset对象 51 | images2, labels2, table = load_pokemon('pokemon',mode='val') 52 | db_val = tf.data.Dataset.from_tensor_slices((images2, labels2)) 53 | db_val = db_val.map(preprocess).batch(batchsz) 54 | # 创建测试集Datset对象 55 | images3, labels3, table = load_pokemon('pokemon',mode='test') 56 | db_test = tf.data.Dataset.from_tensor_slices((images3, labels3)) 57 | db_test = db_test.map(preprocess).batch(batchsz) 58 | 59 | # 60 | net = keras.applications.VGG19(weights='imagenet', include_top=False, 61 | pooling='max') 62 | net.trainable = False 63 | newnet = keras.Sequential([ 64 | net, 65 | layers.Dense(5) 66 | ]) 67 | newnet.build(input_shape=(4,224,224,3)) 68 | newnet.summary() 69 | 70 | 71 | early_stopping = EarlyStopping( 72 | monitor='val_accuracy', 73 | min_delta=0.001, 74 | patience=5 75 | ) 76 | 77 | newnet.compile(optimizer=optimizers.Adam(lr=1e-3), 78 | loss=losses.CategoricalCrossentropy(from_logits=True), 79 | metrics=['accuracy']) 80 | newnet.fit(db_train, validation_data=db_val, validation_freq=1, epochs=100, 81 | callbacks=[early_stopping]) 82 | newnet.evaluate(db_test) 83 | -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson52-自定义数据集和迁移学习/宝可梦数据集.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson52-自定义数据集和迁移学习/宝可梦数据集.pdf -------------------------------------------------------------------------------- /深度学习与TensorFlow入门实战-源码和PPT/lesson选看-人工智能发展史/人工智能发展史.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiajunhua/dragen1860-TensorFlow-2.x-Tutorials/b1cf293ab7efc42050a7e973a621b182d567860e/深度学习与TensorFlow入门实战-源码和PPT/lesson选看-人工智能发展史/人工智能发展史.pdf --------------------------------------------------------------------------------