├── .dockerignore
├── .gitignore
├── .gitmodules
├── .pre-commit-config.yaml
├── .pre-commit-hooks
├── convert_markdown_into_html.py
└── convert_markdown_into_ipynb.sh
├── .style.yapf
├── .tools
├── build_docker.sh
├── cache_dataset.py
├── convert-markdown-into-ipynb-and-test.sh
├── notedown.sh
├── templates
│ ├── index.cn.html.json
│ ├── index.cn.html.tmpl
│ ├── index.html.json
│ └── index.html.tmpl
└── theme
│ ├── PP_w.png
│ ├── github-markdown.css
│ └── marked.js
├── .travis.yml
├── .travis
├── deploy_docs.sh
└── precommit.sh
├── 0.coding_guideline
├── README.MD
└── appendix
│ ├── README.MD
│ ├── dir-1.png
│ ├── dir-2.png
│ ├── dir-3.png
│ ├── dir-4.png
│ ├── dir-5.png
│ └── readme.png
├── 01.fit_a_line
├── .gitignore
├── .run_ce.sh
├── README.cn.md
├── README.md
├── _ce.py
├── fit_a_line.tar
├── fluid
│ └── fit_a_line.fluid.tar
├── image
│ ├── formula_fit_a_line_1.png
│ ├── formula_fit_a_line_2.png
│ ├── formula_fit_a_line_3.png
│ ├── formula_fit_a_line_4.png
│ ├── prediction_gt.png
│ ├── predictions.png
│ ├── predictions_en.png
│ ├── ranges.png
│ ├── ranges_en.png
│ └── train_and_test.png
├── index.cn.html
├── index.html
└── train.py
├── 02.recognize_digits
├── .gitignore
├── .run_ce.sh
├── README.cn.md
├── README.md
├── _ce.py
├── client
│ └── client.py
├── image
│ ├── 01.gif
│ ├── 02.gif
│ ├── 03.gif
│ ├── 04.gif
│ ├── 05.gif
│ ├── cnn.png
│ ├── cnn_en.png
│ ├── cnn_train_log.png
│ ├── cnn_train_log_en.png
│ ├── conv_layer.png
│ ├── infer_3.png
│ ├── max_pooling.png
│ ├── max_pooling_en.png
│ ├── mlp.png
│ ├── mlp_en.png
│ ├── mlp_train_log.png
│ ├── mlp_train_log_en.png
│ ├── mnist_example_image.png
│ ├── softmax_regression.png
│ ├── softmax_regression_en.png
│ ├── softmax_train_log.png
│ ├── softmax_train_log_en.png
│ └── train_and_test.png
├── index.cn.html
├── index.html
└── train.py
├── 03.image_classification
├── .gitignore
├── .run_ce.sh
├── README.cn.md
├── README.md
├── _ce.py
├── image
│ ├── cifar.png
│ ├── dog.png
│ ├── dog_cat.png
│ ├── fea_conv0.png
│ ├── flowers.png
│ ├── googlenet.jpeg
│ ├── ilsvrc.png
│ ├── inception.png
│ ├── inception_en.png
│ ├── lenet.png
│ ├── lenet_en.png
│ ├── plot.png
│ ├── plot_en.png
│ ├── resnet.png
│ ├── resnet_block.jpg
│ ├── train_and_test.png
│ ├── variations.png
│ ├── variations_en.png
│ └── vgg16.png
├── index.cn.html
├── index.html
├── resnet.py
├── train.py
└── vgg.py
├── 04.word2vec
├── .gitignore
├── .run_ce.sh
├── README.cn.md
├── README.md
├── _ce.py
├── calculate_dis.py
├── format_convert.py
├── image
│ ├── 2d_similarity.png
│ ├── Eqn1.gif
│ ├── Eqn2.gif
│ ├── Eqn3.gif
│ ├── Eqn4.gif
│ ├── Eqn5.gif
│ ├── Eqn6.gif
│ ├── Eqn7.gif
│ ├── Eqn8.gif
│ ├── Eqn9.gif
│ ├── cbow.png
│ ├── cbow_en.png
│ ├── ngram.en.png
│ ├── ngram.png
│ ├── nnlm.png
│ ├── nnlm_en.png
│ ├── sentence_emb.png
│ ├── skipgram.png
│ └── skipgram_en.png
├── index.cn.html
├── index.html
└── train.py
├── 05.recommender_system
├── .gitignore
├── .run_ce.sh
├── README.cn.md
├── README.md
├── _ce.py
├── image
│ ├── Deep_candidate_generation_model_architecture.en.png
│ ├── Deep_candidate_generation_model_architecture.png
│ ├── YouTube_Overview.en.png
│ ├── YouTube_Overview.png
│ ├── formula1.png
│ ├── formula2.png
│ ├── formula3.png
│ ├── output_32_0.png
│ ├── rec_regression_network.png
│ ├── rec_regression_network_en.png
│ ├── text_cnn.png
│ └── text_cnn_en.png
├── index.cn.html
├── index.html
└── train.py
├── 06.understand_sentiment
├── .gitignore
├── .run_ce.sh
├── README.cn.md
├── README.md
├── _ce.py
├── image
│ ├── formula_lstm.png
│ ├── formula_lstm_more.png
│ ├── formula_recrurent.png
│ ├── formula_rnn.png
│ ├── lstm.png
│ ├── lstm_en.png
│ ├── rnn.png
│ ├── stacked_lstm.jpg
│ └── stacked_lstm_en.png
├── index.cn.html
├── index.html
├── train_conv.py
├── train_dyn_rnn.py
└── train_stacked_lstm.py
├── 07.label_semantic_roles
├── .gitignore
├── .run_ce.sh
├── README.cn.md
├── README.md
├── _ce.py
├── image
│ ├── Eqn1.png
│ ├── Eqn2.gif
│ ├── Eqn3.gif
│ ├── Eqn4.png
│ ├── bidirectional_stacked_lstm.png
│ ├── bidirectional_stacked_lstm_en.png
│ ├── bio_example.png
│ ├── bio_example_en.png
│ ├── db_lstm_network.png
│ ├── db_lstm_network_en.png
│ ├── dependency_parsing.png
│ ├── dependency_parsing_en.png
│ ├── linear_chain_crf.png
│ ├── stacked_lstm.png
│ └── stacked_lstm_en.png
├── index.cn.html
├── index.html
└── train.py
├── 08.machine_translation
├── .gitignore
├── README.cn.md
├── README.md
├── image
│ ├── attention_decoder_formula.png
│ ├── bi_rnn.png
│ ├── bi_rnn_en.png
│ ├── decoder_attention.png
│ ├── decoder_attention_en.png
│ ├── decoder_formula.png
│ ├── encoder_attention.png
│ ├── encoder_attention_en.png
│ ├── encoder_decoder.png
│ ├── encoder_decoder_en.png
│ ├── gru.png
│ ├── gru_en.png
│ ├── nmt.png
│ ├── nmt_en.png
│ ├── probability_formula.png
│ ├── sum_formula.png
│ └── weight_formula.png
├── index.cn.html
├── index.html
└── seq2seq.py
├── 09.gan
├── .run_ce.sh
├── README.cn.md
├── _ce.py
├── dc_gan.py
├── image
│ ├── 01.gif
│ ├── dcgan_demo.png
│ ├── dcgan_g.png
│ └── process.png
├── index.cn.html
├── network.py
└── utility.py
├── README.cn.md
├── README.md
├── index.cn.html
├── index.html
├── mnist-client
├── .eslintrc
├── .gitignore
├── Dockerfile
├── Procfile
├── README.md
├── app.json
├── gulpfile.js
├── main.py
├── package.json
├── requirements.txt
├── runtime.txt
├── src
│ └── js
│ │ └── main.js
├── static
│ ├── css
│ │ └── bootstrap.min.css
│ └── js
│ │ └── jquery.min.js
└── templates
│ └── index.html
├── paddle2.0_docs
├── Actor_Critic_Method
│ └── Actor_Critic_Method.ipynb
├── Advantage Actor-Critic(A2C)
│ └── Advantage Actor-Critic(A2C).ipynb
├── Autoencoder
│ └── AutoEncoder.ipynb
├── Collaborative_filtering
│ └── Collaborative_Filtering.ipynb
├── Deep_Deterministic_Policy_Gradient_(DDPG)
│ └── Deep_Deterministic_Policy_Gradient_(DDPG).ipynb
├── README.md
├── addition_rnn
│ └── addition_rnn.ipynb
├── convnet_image_classification
│ └── convnet_image_classification.ipynb
├── cyclegan
│ └── cyclegan.ipynb
├── dcgan_face
│ ├── dcgan_face.ipynb
│ └── images
│ │ ├── face_image1.jpeg
│ │ ├── face_image2.jpeg
│ │ ├── loss.png
│ │ └── models.png
├── dynamic_graph
│ └── dynamic_graph.ipynb
├── hello_paddle
│ └── hello_paddle.ipynb
├── high_level_api
│ └── high_level_api.ipynb
├── image_classification
│ └── image_classification.ipynb
├── image_ocr
│ ├── image_ocr.ipynb
│ ├── images
│ │ ├── image1.png
│ │ ├── image2.png
│ │ └── image3.png
│ └── sample_img
│ │ ├── 9450.jpg
│ │ ├── 9451.jpg
│ │ └── 9452.jpg
├── image_search
│ └── image_search.ipynb
├── image_segmentation
│ └── image_segmentation.ipynb
├── imdb_bow_classification
│ └── imdb_bow_classification.ipynb
├── landmark_detection
│ └── landmark_detection.ipynb
├── linear_regression
│ └── linear_regression.ipynb
├── n_gram_model
│ └── n_gram_model.ipynb
├── pointnet
│ └── pointnet.ipynb
├── pretrained_word_embeddings
│ └── pretrained_word_embeddings.ipynb
├── save_model
│ └── save_model.ipynb
├── seq2seq_with_attention
│ └── seq2seq_with_attention.ipynb
└── super_resolution_sub_pixel
│ └── super_resolution_sub_pixel.ipynb
├── pending
├── gan
│ ├── README.md
│ └── index.html
├── image_caption
│ ├── README.md
│ └── index.html
├── image_detection
│ ├── README.md
│ └── index.html
├── image_qa
│ ├── README.md
│ └── index.html
├── query_relationship
│ ├── README.md
│ └── index.html
├── skip_thought
│ ├── README.md
│ └── index.html
└── speech_recognition
│ ├── README.md
│ └── index.html
└── serve
├── .gitignore
├── Dockerfile
├── Dockerfile.gpu
├── README.md
├── main.py
└── requirements.txt
/.dockerignore:
--------------------------------------------------------------------------------
1 | Dockerfile
2 | .git/
3 | .gitignore
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | deprecated
2 | *~
3 | pandoc.template
4 | .DS_Store
5 | .idea
6 | py_env*
7 | *.ipynb
8 | build
9 | .vscode
10 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "paddle"]
2 | path = paddle
3 | url = https://github.com/PaddlePaddle/Paddle.git
4 | branch = develop
5 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | - repo: https://github.com/pre-commit/mirrors-yapf.git
2 | sha: v0.16.0
3 | hooks:
4 | - id: yapf
5 | files: \.py$
6 | - repo: https://github.com/pre-commit/pre-commit-hooks
7 | sha: a11d9314b22d8f8c7556443875b731ef05965464
8 | hooks:
9 | - id: check-merge-conflict
10 | - id: check-symlinks
11 | - id: detect-private-key
12 | files: (?!.*paddle)^.*$
13 | - id: end-of-file-fixer
14 | files: \.md$
15 | - id: trailing-whitespace
16 | files: \.md$
17 | - repo: https://github.com/Lucas-C/pre-commit-hooks
18 | sha: v1.0.1
19 | hooks:
20 | - id: forbid-crlf
21 | files: \.md$
22 | - id: remove-crlf
23 | files: \.md$
24 | - id: forbid-tabs
25 | files: \.md$
26 | - id: remove-tabs
27 | files: \.md$
28 | - repo: https://github.com/reyoung/pre-commit-hooks-jinja-compile.git
29 | sha: 4a369cc72a4a2b8d3813ab8cc17abb5f5b21ef6c
30 | hooks:
31 | - id: convert-jinja2-into-html
32 | # The argument means repleace filename from pattern `.*/([^/]*)\.tmpl` to `\1`
33 | args: ['--filename_pattern=.*/([^/]*)\.tmpl', '--filename_repl=\1']
34 | - repo: local
35 | hooks:
36 | - id: convert-markdown-into-html
37 | name: convert-markdown-into-html
38 | description: Convert README.md into index.html and README.cn.md into index.cn.html
39 | entry: python .pre-commit-hooks/convert_markdown_into_html.py
40 | language: system
41 | files: .+README(\.cn)?\.md$
42 |
43 |
--------------------------------------------------------------------------------
/.pre-commit-hooks/convert_markdown_into_html.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import re
3 | import sys
4 |
5 | HEAD = """
6 |
7 |
8 |
20 |
21 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 | """
48 |
49 | TAIL = """
50 |
51 |
52 |
53 |
70 |
71 | """
72 |
73 |
74 | def convert_markdown_into_html(argv=None):
75 | parser = argparse.ArgumentParser()
76 | parser.add_argument('filenames', nargs='*', help='Filenames to fix')
77 | args = parser.parse_args(argv)
78 |
79 | retv = 0
80 |
81 | for filename in args.filenames:
82 | with open(
83 | re.sub(r"README", "index", re.sub(r"\.md$", ".html", filename)),
84 | "w") as output:
85 | output.write(HEAD)
86 | with open(filename) as input:
87 | for line in input:
88 | output.write(line)
89 | output.write(TAIL)
90 |
91 | return retv
92 |
93 |
94 | if __name__ == '__main__':
95 | sys.exit(convert_markdown_into_html())
96 |
--------------------------------------------------------------------------------
/.pre-commit-hooks/convert_markdown_into_ipynb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | for file in $@ ; do
3 | markdown-to-ipynb < $file > ${file%.*}".ipynb"
4 | if [ $? -ne 0 ]; then
5 | echo >&2 "markdown-to-ipynb $file error"
6 | exit 1
7 | fi
8 | done
9 |
10 |
--------------------------------------------------------------------------------
/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | based_on_style = pep8
3 | column_limit = 80
4 |
--------------------------------------------------------------------------------
/.tools/build_docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -xe
3 |
4 | cur_path="$(cd "$(dirname "$0")" && pwd -P)"
5 | cd "$cur_path"/../
6 |
7 | #paddle production image name
8 | if [ ! -n "$1" ]; then
9 | paddle_image=paddlepaddle/paddle
10 | else
11 | paddle_image=$1
12 | fi
13 |
14 | #paddle production image tag
15 | if [ ! -n "$2" ]; then
16 | paddle_tag=0.10.0
17 | else
18 | paddle_tag=$2
19 | fi
20 |
21 | #paddle book image name
22 | if [ ! -n "$3" ]; then
23 | book_image=paddlepaddle/book
24 | else
25 | book_image=$3
26 | fi
27 |
28 | #paddle book image tag
29 | if [ ! -n "$4" ]; then
30 | book_tag=latest
31 | else
32 | book_tag=$4
33 | fi
34 |
35 | #generate docker file
36 | if [ ${USE_UBUNTU_REPO_MIRROR} ]; then
37 | update_mirror_cmd="sed 's@http:\/\/archive.ubuntu.com\/ubuntu\/@mirror:\/\/mirrors.ubuntu.com\/mirrors.txt@' -i /etc/apt/sources.list && \\"
38 | else
39 | update_mirror_cmd="\\"
40 | fi
41 |
42 | #build docker image
43 | echo "paddle_tag:"$paddle_tag
44 | echo "book_tag:"$book_tag
45 |
46 | cat > Dockerfile <
49 |
50 | COPY . /book
51 | EOF
52 |
53 | if [ -n "${http_proxy}" ]; then
54 | cat >> Dockerfile <> Dockerfile </dev/null 2>&1
3 | if [ $? -ne 0 ]; then
4 | echo >&2 "Please install go https://golang.org/doc/install#install"
5 | exit 1
6 | fi
7 |
8 | export GOPATH=~/go; go get -u github.com/wangkuiyi/ipynb/markdown-to-ipynb
9 |
10 | cur_path="$(cd "$(dirname "$0")" && pwd -P)"
11 | cd $cur_path/../
12 |
13 | #convert md to ipynb
14 | for file in */{README,README\.cn}.md ; do
15 | ~/go/bin/markdown-to-ipynb < $file > ${file%.*}".ipynb"
16 | if [ $? -ne 0 ]; then
17 | echo >&2 "markdown-to-ipynb $file error"
18 | exit 1
19 | fi
20 | done
21 |
22 | if [[ -z $TEST_EMBEDDED_PYTHON_SCRIPTS ]]; then
23 | exit 0
24 | fi
25 |
26 | #exec ipynb's py file
27 | for file in */{README,README\.cn}.ipynb ; do
28 | pushd $PWD > /dev/null
29 | cd $(dirname $file) > /dev/null
30 |
31 | echo "begin test $file"
32 | jupyter nbconvert --to python $(basename $file) --stdout | python
33 |
34 | popd > /dev/null
35 | #break
36 | done
37 |
--------------------------------------------------------------------------------
/.tools/notedown.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -xe
3 |
4 | cd /book
5 |
6 | #convert md to ipynb
7 | for file in */{README,README\.cn}.md ; do
8 | notedown $file > ${file%.*}.ipynb
9 | done
10 |
--------------------------------------------------------------------------------
/.tools/templates/index.cn.html.json:
--------------------------------------------------------------------------------
1 | {
2 | "is_en": false,
3 | "chapters": [
4 | {
5 | "name": "线性回归",
6 | "link": "./01.fit_a_line/index.cn.html"
7 | },
8 | {
9 | "name": "识别数字",
10 | "link": "./02.recognize_digits/index.cn.html"
11 | },
12 | {
13 | "name": "图像分类",
14 | "link": "./03.image_classification/index.cn.html"
15 | },
16 | {
17 | "name": "词向量",
18 | "link": "./04.word2vec/index.cn.html"
19 | },
20 | {
21 | "name": "个性化推荐",
22 | "link": "./05.recommender_system/index.cn.html"
23 | },
24 | {
25 | "name": "情感分析",
26 | "link": "./06.understand_sentiment/index.cn.html"
27 | },
28 | {
29 | "name": "语义角色标注",
30 | "link": "./07.label_semantic_roles/index.cn.html"
31 | },
32 | {
33 | "name": "机器翻译",
34 | "link": "./08.machine_translation/index.cn.html"
35 | }
36 | ]
37 | }
38 |
--------------------------------------------------------------------------------
/.tools/templates/index.cn.html.tmpl:
--------------------------------------------------------------------------------
1 | index.html.tmpl
--------------------------------------------------------------------------------
/.tools/templates/index.html.json:
--------------------------------------------------------------------------------
1 | {
2 | "is_en": true,
3 | "chapters": [
4 | {
5 | "name": "Linear Regression",
6 | "link": "./01.fit_a_line/index.html"
7 | },
8 | {
9 | "name": "Recognize Digits",
10 | "link": "./02.recognize_digits/index.html"
11 | },
12 | {
13 | "name": "Image Classification",
14 | "link": "./03.image_classification/index.html"
15 | },
16 | {
17 | "name": "Word2Vec",
18 | "link": "./04.word2vec/index.html"
19 | },
20 | {
21 | "name": "Personalized Recommendation",
22 | "link": "./05.recommender_system/index.html"
23 | },
24 | {
25 | "name": "Sentiment Analysis",
26 | "link": "./06.understand_sentiment/index.html"
27 | },
28 | {
29 | "name": "Semantic Role Labeling",
30 | "link": "./07.label_semantic_roles/index.html"
31 | },
32 | {
33 | "name": "Machine Translation",
34 | "link": "./08.machine_translation/index.html"
35 | }
36 | ]
37 | }
38 |
--------------------------------------------------------------------------------
/.tools/templates/index.html.tmpl:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {% if not is_en %}深度学习入门{%else%}Deep Learning 101{% endif %}
6 |
7 |
8 |
9 |
10 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
94 |
95 |
96 |
97 |
121 |
122 |
124 |
125 |
126 |
127 |
144 |
145 |
146 |
--------------------------------------------------------------------------------
/.tools/theme/PP_w.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/.tools/theme/PP_w.png
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: cpp
2 | cache: ccache
3 | sudo: required
4 | dist: trusty
5 | os:
6 | - linux
7 | env:
8 | - JOB=PRE_COMMIT
9 |
10 | addons:
11 | apt:
12 | packages:
13 | - git
14 | - python
15 | - python-pip
16 | - python2.7-dev
17 | - golang
18 | ssh_known_hosts: 13.229.163.131
19 | before_install:
20 | - sudo pip install -U virtualenv pre-commit identify==1.5.9 -i https://pypi.tuna.tsinghua.edu.cn/simple
21 | - GOPATH=/tmp/go go get -u github.com/wangkuiyi/ipynb/markdown-to-ipynb
22 | script:
23 | - PATH=/tmp/go/bin:$PATH .travis/precommit.sh
24 | - |
25 | .travis/deploy_docs.sh
26 | notifications:
27 | email:
28 | on_success: change
29 | on_failure: always
30 |
31 |
--------------------------------------------------------------------------------
/.travis/deploy_docs.sh:
--------------------------------------------------------------------------------
1 | exit_code=0
2 |
3 | if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit $exit_code; fi;
4 |
5 | # Deploy to the the content server if its a "develop" or "release/version" branch
6 | # The "develop_doc" branch is reserved to test full deploy process without impacting the real content.
7 | if [ "$TRAVIS_BRANCH" == "develop_doc" ]; then
8 | PPO_SCRIPT_BRANCH=develop
9 | elif [[ "$TRAVIS_BRANCH" == "develop" || "$TRAVIS_BRANCH" =~ ^v|release/[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then
10 | PPO_SCRIPT_BRANCH=master
11 | else
12 | # Early exit, this branch doesn't require documentation build
13 | exit $exit_code;
14 | fi
15 |
16 | export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/$PPO_SCRIPT_BRANCH/scripts/deploy/deploy_docs.sh
17 |
18 | docker run -it \
19 | -e CONTENT_DEC_PASSWD=$CONTENT_DEC_PASSWD \
20 | -e TRAVIS_BRANCH=$TRAVIS_BRANCH \
21 | -e DEPLOY_DOCS_SH=$DEPLOY_DOCS_SH \
22 | -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST \
23 | -e PPO_SCRIPT_BRANCH=$PPO_SCRIPT_BRANCH \
24 | -e PADDLE_ROOT=/book \
25 | -v "$PWD:/book" \
26 | -w /book \
27 | paddlepaddle/paddle:latest-dev \
28 | /bin/bash -c 'curl $DEPLOY_DOCS_SH | bash -s $CONTENT_DEC_PASSWD $TRAVIS_BRANCH /book /book/build/doc/ $PPO_SCRIPT_BRANCH' || exit_code=$(( exit_code | $? ))
29 |
30 |
--------------------------------------------------------------------------------
/.travis/precommit.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | function abort(){
3 | echo "Your commit not fit PaddlePaddle code style" 1>&2
4 | echo "Please use pre-commit scripts to auto-format your code" 1>&2
5 | exit 1
6 | }
7 |
8 | trap 'abort' 0
9 | set -e
10 | cd `dirname $0`
11 | cd ..
12 | export PATH=/usr/bin:$PATH
13 | export BRANCH=develop
14 | pre-commit install
15 |
16 | for file_name in `git diff --numstat upstream/$BRANCH |awk '{print $NF}'`;do
17 | if ! pre-commit run --files $file_name ; then
18 | commit_files=off
19 | fi
20 | done
21 |
22 | if [ $commit_files == 'off' ];then
23 | ls -lh
24 | git diff 2>&1
25 | exit 1
26 | fi
27 |
28 | trap : 0
29 |
--------------------------------------------------------------------------------
/0.coding_guideline/appendix/README.MD:
--------------------------------------------------------------------------------
1 | Stores some images here.
2 |
--------------------------------------------------------------------------------
/0.coding_guideline/appendix/dir-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/0.coding_guideline/appendix/dir-1.png
--------------------------------------------------------------------------------
/0.coding_guideline/appendix/dir-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/0.coding_guideline/appendix/dir-2.png
--------------------------------------------------------------------------------
/0.coding_guideline/appendix/dir-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/0.coding_guideline/appendix/dir-3.png
--------------------------------------------------------------------------------
/0.coding_guideline/appendix/dir-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/0.coding_guideline/appendix/dir-4.png
--------------------------------------------------------------------------------
/0.coding_guideline/appendix/dir-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/0.coding_guideline/appendix/dir-5.png
--------------------------------------------------------------------------------
/0.coding_guideline/appendix/readme.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/0.coding_guideline/appendix/readme.png
--------------------------------------------------------------------------------
/01.fit_a_line/.gitignore:
--------------------------------------------------------------------------------
1 | data/housing*
2 | data/*.list
3 | *.pyc
4 | data/*.pyc
5 | output
6 |
--------------------------------------------------------------------------------
/01.fit_a_line/.run_ce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #This file is only used for continuous evaluation.
3 | python train.py --enable_ce | python _ce.py
4 |
5 |
--------------------------------------------------------------------------------
/01.fit_a_line/_ce.py:
--------------------------------------------------------------------------------
1 | ### This file is only used for continuous evaluation test!
2 | from __future__ import print_function
3 | from __future__ import division
4 | from __future__ import absolute_import
5 | import os
6 | import sys
7 | sys.path.append(os.environ['ceroot'])
8 | from kpi import CostKpi
9 |
10 | train_cost_kpi = CostKpi('train_cost', 0.02, 0, actived=True, desc='train cost')
11 | test_cost_kpi = CostKpi('test_cost', 0.02, 0, actived=True, desc='test cost')
12 | tracking_kpis = [train_cost_kpi, test_cost_kpi]
13 |
14 |
15 | def parse_log(log):
16 | for line in log.split('\n'):
17 | fs = line.strip().split('\t')
18 | print(fs)
19 | if len(fs) == 3 and fs[0] == 'kpis':
20 | print("-----%s" % fs)
21 | kpi_name = fs[1]
22 | kpi_value = float(fs[2])
23 | yield kpi_name, kpi_value
24 |
25 |
26 | def log_to_ce(log):
27 | kpi_tracker = {}
28 | for kpi in tracking_kpis:
29 | kpi_tracker[kpi.name] = kpi
30 |
31 | for (kpi_name, kpi_value) in parse_log(log):
32 | print(kpi_name, kpi_value)
33 | kpi_tracker[kpi_name].add_record(kpi_value)
34 | kpi_tracker[kpi_name].persist()
35 |
36 |
37 | if __name__ == '__main__':
38 | log = sys.stdin.read()
39 | log_to_ce(log)
40 |
--------------------------------------------------------------------------------
/01.fit_a_line/fit_a_line.tar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/fit_a_line.tar
--------------------------------------------------------------------------------
/01.fit_a_line/fluid/fit_a_line.fluid.tar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/fluid/fit_a_line.fluid.tar
--------------------------------------------------------------------------------
/01.fit_a_line/image/formula_fit_a_line_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/formula_fit_a_line_1.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/formula_fit_a_line_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/formula_fit_a_line_2.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/formula_fit_a_line_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/formula_fit_a_line_3.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/formula_fit_a_line_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/formula_fit_a_line_4.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/prediction_gt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/prediction_gt.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/predictions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/predictions.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/predictions_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/predictions_en.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/ranges.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/ranges.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/ranges_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/ranges_en.png
--------------------------------------------------------------------------------
/01.fit_a_line/image/train_and_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/01.fit_a_line/image/train_and_test.png
--------------------------------------------------------------------------------
/01.fit_a_line/train.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import print_function
16 |
17 | import sys
18 | import argparse
19 |
20 | import math
21 | import numpy
22 |
23 | import paddle
24 | import paddle.fluid as fluid
25 |
26 |
27 | def parse_args():
28 | parser = argparse.ArgumentParser("fit_a_line")
29 | parser.add_argument(
30 | '--enable_ce',
31 | action='store_true',
32 | help="If set, run the task with continuous evaluation logs.")
33 | parser.add_argument(
34 | '--use_gpu',
35 | type=bool,
36 | default=False,
37 | help="Whether to use GPU or not.")
38 | parser.add_argument(
39 | '--num_epochs', type=int, default=100, help="number of epochs.")
40 | args = parser.parse_args()
41 | return args
42 |
43 |
44 | # For training test cost
45 | def train_test(executor, program, reader, feeder, fetch_list):
46 | accumulated = 1 * [0]
47 | count = 0
48 | for data_test in reader():
49 | outs = executor.run(
50 | program=program, feed=feeder.feed(data_test), fetch_list=fetch_list)
51 | accumulated = [x_c[0] + x_c[1][0] for x_c in zip(accumulated, outs)]
52 | count += 1
53 | return [x_d / count for x_d in accumulated]
54 |
55 |
56 | def save_result(points1, points2):
57 | import matplotlib
58 | matplotlib.use('Agg')
59 | import matplotlib.pyplot as plt
60 | x1 = [idx for idx in range(len(points1))]
61 | y1 = points1
62 | y2 = points2
63 | l1 = plt.plot(x1, y1, 'r--', label='predictions')
64 | l2 = plt.plot(x1, y2, 'g--', label='GT')
65 | plt.plot(x1, y1, 'ro-', x1, y2, 'g+-')
66 | plt.title('predictions VS GT')
67 | plt.legend()
68 | plt.savefig('./image/prediction_gt.png')
69 |
70 |
71 | def main():
72 | batch_size = 20
73 |
74 | if args.enable_ce:
75 | train_reader = paddle.batch(
76 | paddle.dataset.uci_housing.train(), batch_size=batch_size)
77 | test_reader = paddle.batch(
78 | paddle.dataset.uci_housing.test(), batch_size=batch_size)
79 | else:
80 | train_reader = paddle.batch(
81 | paddle.reader.shuffle(
82 | paddle.dataset.uci_housing.train(), buf_size=500),
83 | batch_size=batch_size)
84 | test_reader = paddle.batch(
85 | paddle.reader.shuffle(
86 | paddle.dataset.uci_housing.test(), buf_size=500),
87 | batch_size=batch_size)
88 |
89 | # feature vector of length 13
90 | x = fluid.data(name='x', shape=[None, 13], dtype='float32')
91 | y = fluid.data(name='y', shape=[None, 1], dtype='float32')
92 |
93 | main_program = fluid.default_main_program()
94 | startup_program = fluid.default_startup_program()
95 |
96 | if args.enable_ce:
97 | main_program.random_seed = 90
98 | startup_program.random_seed = 90
99 |
100 | y_predict = fluid.layers.fc(input=x, size=1, act=None)
101 | cost = fluid.layers.square_error_cost(input=y_predict, label=y)
102 | avg_loss = fluid.layers.mean(cost)
103 |
104 | test_program = main_program.clone(for_test=True)
105 |
106 | sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
107 | sgd_optimizer.minimize(avg_loss)
108 |
109 | # can use CPU or GPU
110 | use_cuda = args.use_gpu
111 | place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
112 | exe = fluid.Executor(place)
113 |
114 | # Specify the directory to save the parameters
115 | params_dirname = "fit_a_line.inference.model"
116 | num_epochs = args.num_epochs
117 |
118 | # main train loop.
119 | feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
120 | exe.run(startup_program)
121 |
122 | train_prompt = "Train cost"
123 | test_prompt = "Test cost"
124 | step = 0
125 |
126 | exe_test = fluid.Executor(place)
127 |
128 | for pass_id in range(num_epochs):
129 | for data_train in train_reader():
130 | avg_loss_value, = exe.run(
131 | main_program,
132 | feed=feeder.feed(data_train),
133 | fetch_list=[avg_loss])
134 | if step % 10 == 0: # record a train cost every 10 batches
135 | print("%s, Step %d, Cost %f" %
136 | (train_prompt, step, avg_loss_value[0]))
137 |
138 | if step % 100 == 0: # record a test cost every 100 batches
139 | test_metics = train_test(
140 | executor=exe_test,
141 | program=test_program,
142 | reader=test_reader,
143 | fetch_list=[avg_loss],
144 | feeder=feeder)
145 | print("%s, Step %d, Cost %f" %
146 | (test_prompt, step, test_metics[0]))
147 | # If the accuracy is good enough, we can stop the training.
148 | if test_metics[0] < 10.0:
149 | break
150 |
151 | step += 1
152 |
153 | if math.isnan(float(avg_loss_value[0])):
154 | sys.exit("got NaN loss, training failed.")
155 | if params_dirname is not None:
156 | # We can save the trained parameters for the inferences later
157 | fluid.io.save_inference_model(params_dirname, ['x'], [y_predict],
158 | exe)
159 |
160 | if args.enable_ce and pass_id == args.num_epochs - 1:
161 | print("kpis\ttrain_cost\t%f" % avg_loss_value[0])
162 | print("kpis\ttest_cost\t%f" % test_metics[0])
163 |
164 | infer_exe = fluid.Executor(place)
165 | inference_scope = fluid.core.Scope()
166 |
167 | # infer
168 | with fluid.scope_guard(inference_scope):
169 | [inference_program, feed_target_names, fetch_targets
170 | ] = fluid.io.load_inference_model(params_dirname, infer_exe)
171 | batch_size = 10
172 |
173 | infer_reader = paddle.batch(
174 | paddle.dataset.uci_housing.test(), batch_size=batch_size)
175 |
176 | infer_data = next(infer_reader())
177 | infer_feat = numpy.array(
178 | [data[0] for data in infer_data]).astype("float32")
179 | infer_label = numpy.array(
180 | [data[1] for data in infer_data]).astype("float32")
181 |
182 | assert feed_target_names[0] == 'x'
183 | results = infer_exe.run(
184 | inference_program,
185 | feed={feed_target_names[0]: numpy.array(infer_feat)},
186 | fetch_list=fetch_targets)
187 |
188 | print("infer results: (House Price)")
189 | for idx, val in enumerate(results[0]):
190 | print("%d: %.2f" % (idx, val))
191 |
192 | print("\nground truth:")
193 | for idx, val in enumerate(infer_label):
194 | print("%d: %.2f" % (idx, val))
195 |
196 | save_result(results[0], infer_label)
197 |
198 |
199 | if __name__ == '__main__':
200 | args = parse_args()
201 | main()
202 |
--------------------------------------------------------------------------------
/02.recognize_digits/.gitignore:
--------------------------------------------------------------------------------
1 | data/raw_data
2 | data/train.list
3 | data/test.list
4 | *.log
5 | *.pyc
6 | plot.png
7 |
--------------------------------------------------------------------------------
/02.recognize_digits/.run_ce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #This file is only used for continuous evaluation.
3 | python train.py --enable_ce | python _ce.py
4 |
5 |
--------------------------------------------------------------------------------
/02.recognize_digits/_ce.py:
--------------------------------------------------------------------------------
1 | ### This file is only used for continuous evaluation test!
2 | from __future__ import print_function
3 | from __future__ import division
4 | from __future__ import absolute_import
5 | import os
6 | import sys
7 | sys.path.append(os.environ['ceroot'])
8 | from kpi import CostKpi
9 | from kpi import AccKpi
10 |
11 | train_cost_kpi = CostKpi('train_cost', 0.02, 0, actived=True, desc='train cost')
12 | test_cost_kpi = CostKpi('test_cost', 0.02, 0, actived=True, desc='test cost')
13 | test_acc_kpi = AccKpi('test_acc', 0.02, 0, actived=True, desc='test acc')
14 | tracking_kpis = [train_cost_kpi, test_cost_kpi, test_acc_kpi]
15 |
16 |
17 | def parse_log(log):
18 | for line in log.split('\n'):
19 | fs = line.strip().split('\t')
20 | print(fs)
21 | if len(fs) == 3 and fs[0] == 'kpis':
22 | kpi_name = fs[1]
23 | kpi_value = float(fs[2])
24 | yield kpi_name, kpi_value
25 |
26 |
27 | def log_to_ce(log):
28 | kpi_tracker = {}
29 | for kpi in tracking_kpis:
30 | kpi_tracker[kpi.name] = kpi
31 | for (kpi_name, kpi_value) in parse_log(log):
32 | print(kpi_name, kpi_value)
33 | kpi_tracker[kpi_name].add_record(kpi_value)
34 | kpi_tracker[kpi_name].persist()
35 |
36 |
37 | if __name__ == '__main__':
38 | log = sys.stdin.read()
39 | log_to_ce(log)
40 |
--------------------------------------------------------------------------------
/02.recognize_digits/client/client.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from PIL import Image
3 | import numpy as np
4 | import os
5 |
6 | # this client is used by Paddle serve: https://github.com/PaddlePaddle/book/tree/develop/serve
7 | # please do not use it directly
8 |
9 |
10 | def load_image(file):
11 | im = Image.open(file).convert('L')
12 | im = im.resize((28, 28), Image.ANTIALIAS)
13 | im = np.array(im).astype(np.float32).flatten()
14 | im = im / 255.0
15 | return im
16 |
17 |
18 | cur_dir = os.path.dirname(os.path.realpath(__file__))
19 | data = load_image(cur_dir + '/../image/infer_3.png')
20 | data = data.tolist()
21 |
22 | r = requests.post("http://0.0.0.0:8000", json={'img': data})
23 |
24 | print(r.text)
25 |
--------------------------------------------------------------------------------
/02.recognize_digits/image/01.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/01.gif
--------------------------------------------------------------------------------
/02.recognize_digits/image/02.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/02.gif
--------------------------------------------------------------------------------
/02.recognize_digits/image/03.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/03.gif
--------------------------------------------------------------------------------
/02.recognize_digits/image/04.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/04.gif
--------------------------------------------------------------------------------
/02.recognize_digits/image/05.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/05.gif
--------------------------------------------------------------------------------
/02.recognize_digits/image/cnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/cnn.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/cnn_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/cnn_en.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/cnn_train_log.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/cnn_train_log.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/cnn_train_log_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/cnn_train_log_en.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/conv_layer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/conv_layer.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/infer_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/infer_3.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/max_pooling.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/max_pooling.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/max_pooling_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/max_pooling_en.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/mlp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/mlp.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/mlp_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/mlp_en.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/mlp_train_log.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/mlp_train_log.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/mlp_train_log_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/mlp_train_log_en.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/mnist_example_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/mnist_example_image.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/softmax_regression.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/softmax_regression.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/softmax_regression_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/softmax_regression_en.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/softmax_train_log.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/softmax_train_log.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/softmax_train_log_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/softmax_train_log_en.png
--------------------------------------------------------------------------------
/02.recognize_digits/image/train_and_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/02.recognize_digits/image/train_and_test.png
--------------------------------------------------------------------------------
/02.recognize_digits/train.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import print_function
16 |
17 | import os
18 | import argparse
19 | from PIL import Image
20 | import numpy
21 | import paddle
22 | import paddle.fluid as fluid
23 |
24 |
25 | def parse_args():
26 | parser = argparse.ArgumentParser("mnist")
27 | parser.add_argument(
28 | '--enable_ce',
29 | action='store_true',
30 | help="If set, run the task with continuous evaluation logs.")
31 | parser.add_argument(
32 | '--use_gpu',
33 | type=bool,
34 | default=False,
35 | help="Whether to use GPU or not.")
36 | parser.add_argument(
37 | '--num_epochs', type=int, default=5, help="number of epochs.")
38 | args = parser.parse_args()
39 | return args
40 |
41 |
42 | def loss_net(hidden, label):
43 | prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
44 | loss = fluid.layers.cross_entropy(input=prediction, label=label)
45 | avg_loss = fluid.layers.mean(loss)
46 | acc = fluid.layers.accuracy(input=prediction, label=label)
47 | return prediction, avg_loss, acc
48 |
49 |
50 | def multilayer_perceptron(img, label):
51 | img = fluid.layers.fc(input=img, size=200, act='tanh')
52 | hidden = fluid.layers.fc(input=img, size=200, act='tanh')
53 | return loss_net(hidden, label)
54 |
55 |
56 | def softmax_regression(img, label):
57 | return loss_net(img, label)
58 |
59 |
60 | def convolutional_neural_network(img, label):
61 | conv_pool_1 = fluid.nets.simple_img_conv_pool(
62 | input=img,
63 | filter_size=5,
64 | num_filters=20,
65 | pool_size=2,
66 | pool_stride=2,
67 | act="relu")
68 | conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
69 | conv_pool_2 = fluid.nets.simple_img_conv_pool(
70 | input=conv_pool_1,
71 | filter_size=5,
72 | num_filters=50,
73 | pool_size=2,
74 | pool_stride=2,
75 | act="relu")
76 | return loss_net(conv_pool_2, label)
77 |
78 |
79 | def train(nn_type,
80 | use_cuda,
81 | save_dirname=None,
82 | model_filename=None,
83 | params_filename=None):
84 | if use_cuda and not fluid.core.is_compiled_with_cuda():
85 | return
86 |
87 | startup_program = fluid.default_startup_program()
88 | main_program = fluid.default_main_program()
89 |
90 | if args.enable_ce:
91 | train_reader = paddle.batch(
92 | paddle.dataset.mnist.train(), batch_size=BATCH_SIZE)
93 | test_reader = paddle.batch(
94 | paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
95 | startup_program.random_seed = 90
96 | main_program.random_seed = 90
97 | else:
98 | train_reader = paddle.batch(
99 | paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
100 | batch_size=BATCH_SIZE)
101 | test_reader = paddle.batch(
102 | paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
103 |
104 | img = fluid.data(name='img', shape=[None, 1, 28, 28], dtype='float32')
105 | label = fluid.data(name='label', shape=[None, 1], dtype='int64')
106 |
107 | if nn_type == 'softmax_regression':
108 | net_conf = softmax_regression
109 | elif nn_type == 'multilayer_perceptron':
110 | net_conf = multilayer_perceptron
111 | else:
112 | net_conf = convolutional_neural_network
113 |
114 | prediction, avg_loss, acc = net_conf(img, label)
115 |
116 | test_program = main_program.clone(for_test=True)
117 | optimizer = fluid.optimizer.Adam(learning_rate=0.001)
118 | optimizer.minimize(avg_loss)
119 |
120 | def train_test(train_test_program, train_test_feed, train_test_reader):
121 | acc_set = []
122 | avg_loss_set = []
123 | for test_data in train_test_reader():
124 | acc_np, avg_loss_np = exe.run(
125 | program=train_test_program,
126 | feed=train_test_feed.feed(test_data),
127 | fetch_list=[acc, avg_loss])
128 | acc_set.append(float(acc_np))
129 | avg_loss_set.append(float(avg_loss_np))
130 | # get test acc and loss
131 | acc_val_mean = numpy.array(acc_set).mean()
132 | avg_loss_val_mean = numpy.array(avg_loss_set).mean()
133 | return avg_loss_val_mean, acc_val_mean
134 |
135 | place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
136 |
137 | exe = fluid.Executor(place)
138 |
139 | feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
140 | exe.run(startup_program)
141 | epochs = [epoch_id for epoch_id in range(PASS_NUM)]
142 |
143 | lists = []
144 | step = 0
145 | for epoch_id in epochs:
146 | for step_id, data in enumerate(train_reader()):
147 | metrics = exe.run(
148 | main_program,
149 | feed=feeder.feed(data),
150 | fetch_list=[avg_loss, acc])
151 | if step % 100 == 0:
152 | print("Pass %d, Epoch %d, Cost %f" % (step, epoch_id,
153 | metrics[0]))
154 | step += 1
155 | # test for epoch
156 | avg_loss_val, acc_val = train_test(
157 | train_test_program=test_program,
158 | train_test_reader=test_reader,
159 | train_test_feed=feeder)
160 |
161 | print("Test with Epoch %d, avg_cost: %s, acc: %s" %
162 | (epoch_id, avg_loss_val, acc_val))
163 | lists.append((epoch_id, avg_loss_val, acc_val))
164 | if save_dirname is not None:
165 | fluid.io.save_inference_model(
166 | save_dirname, ["img"], [prediction],
167 | exe,
168 | model_filename=model_filename,
169 | params_filename=params_filename)
170 |
171 | if args.enable_ce:
172 | print("kpis\ttrain_cost\t%f" % metrics[0])
173 | print("kpis\ttest_cost\t%s" % avg_loss_val)
174 | print("kpis\ttest_acc\t%s" % acc_val)
175 |
176 | # find the best pass
177 | best = sorted(lists, key=lambda list: float(list[1]))[0]
178 | print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1]))
179 | print('The classification accuracy is %.2f%%' % (float(best[2]) * 100))
180 |
181 |
182 | def infer(use_cuda,
183 | save_dirname=None,
184 | model_filename=None,
185 | params_filename=None):
186 | if save_dirname is None:
187 | return
188 |
189 | place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
190 | exe = fluid.Executor(place)
191 |
192 | def load_image(file):
193 | im = Image.open(file).convert('L')
194 | im = im.resize((28, 28), Image.ANTIALIAS)
195 | im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32)
196 | im = im / 255.0 * 2.0 - 1.0
197 | return im
198 |
199 | cur_dir = os.path.dirname(os.path.realpath(__file__))
200 | tensor_img = load_image(cur_dir + '/image/infer_3.png')
201 |
202 | inference_scope = fluid.core.Scope()
203 | with fluid.scope_guard(inference_scope):
204 | # Use fluid.io.load_inference_model to obtain the inference program desc,
205 | # the feed_target_names (the names of variables that will be feeded
206 | # data using feed operators), and the fetch_targets (variables that
207 | # we want to obtain data from using fetch operators).
208 | [inference_program, feed_target_names,
209 | fetch_targets] = fluid.io.load_inference_model(
210 | save_dirname, exe, model_filename, params_filename)
211 |
212 | # Construct feed as a dictionary of {feed_target_name: feed_target_data}
213 | # and results will contain a list of data corresponding to fetch_targets.
214 | results = exe.run(
215 | inference_program,
216 | feed={feed_target_names[0]: tensor_img},
217 | fetch_list=fetch_targets)
218 | lab = numpy.argsort(results)
219 | print("Inference result of image/infer_3.png is: %d" % lab[0][0][-1])
220 |
221 |
222 | def main(use_cuda, nn_type):
223 | model_filename = None
224 | params_filename = None
225 | save_dirname = "recognize_digits_" + nn_type + ".inference.model"
226 |
227 | # call train() with is_local argument to run distributed train
228 | train(
229 | nn_type=nn_type,
230 | use_cuda=use_cuda,
231 | save_dirname=save_dirname,
232 | model_filename=model_filename,
233 | params_filename=params_filename)
234 | infer(
235 | use_cuda=use_cuda,
236 | save_dirname=save_dirname,
237 | model_filename=model_filename,
238 | params_filename=params_filename)
239 |
240 |
241 | if __name__ == '__main__':
242 | args = parse_args()
243 | BATCH_SIZE = 64
244 | PASS_NUM = args.num_epochs
245 | use_cuda = args.use_gpu
246 | # predict = 'softmax_regression' # uncomment for Softmax
247 | # predict = 'multilayer_perceptron' # uncomment for MLP
248 | predict = 'convolutional_neural_network' # uncomment for LeNet5
249 | main(use_cuda=use_cuda, nn_type=predict)
250 |
--------------------------------------------------------------------------------
/03.image_classification/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | train.log
3 | output
4 | data/cifar-10-batches-py/
5 | data/cifar-10-python.tar.gz
6 | data/*.txt
7 | data/*.list
8 | data/mean.meta
9 |
--------------------------------------------------------------------------------
/03.image_classification/.run_ce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #This file is only used for continuous evaluation.
3 | export FLAGS_cudnn_deterministic=true
4 | export CUDA_VISIBLE_DEVICES=0
5 | python train.py --num_epochs 1 --use_gpu 1 --enable_ce | python _ce.py
6 |
7 |
--------------------------------------------------------------------------------
/03.image_classification/_ce.py:
--------------------------------------------------------------------------------
1 | ### This file is only used for continuous evaluation test!
2 | from __future__ import print_function
3 | from __future__ import division
4 | from __future__ import absolute_import
5 | import os
6 | import sys
7 | sys.path.append(os.environ['ceroot'])
8 | from kpi import CostKpi
9 | from kpi import AccKpi
10 |
11 | train_cost_kpi = CostKpi('train_cost', 0.02, 0, actived=True, desc='train cost')
12 | train_acc_kpi = AccKpi('train_acc', 0.02, 0, actived=True, desc='train acc')
13 | test_cost_kpi = CostKpi('test_cost', 0.02, 0, actived=True, desc='test cost')
14 | test_acc_kpi = AccKpi('test_acc', 0.02, 0, actived=True, desc='test acc')
15 |
16 | tracking_kpis = [train_cost_kpi, train_acc_kpi, test_cost_kpi, test_acc_kpi]
17 |
18 |
19 | def parse_log(log):
20 | for line in log.split('\n'):
21 | fs = line.strip().split('\t')
22 | print(fs)
23 | if len(fs) == 3 and fs[0] == 'kpis':
24 | kpi_name = fs[1]
25 | kpi_value = float(fs[2])
26 | yield kpi_name, kpi_value
27 |
28 |
29 | def log_to_ce(log):
30 | kpi_tracker = {}
31 | for kpi in tracking_kpis:
32 | kpi_tracker[kpi.name] = kpi
33 | for (kpi_name, kpi_value) in parse_log(log):
34 | print(kpi_name, kpi_value)
35 | kpi_tracker[kpi_name].add_record(kpi_value)
36 | kpi_tracker[kpi_name].persist()
37 |
38 |
39 | if __name__ == '__main__':
40 | log = sys.stdin.read()
41 | log_to_ce(log)
42 |
--------------------------------------------------------------------------------
/03.image_classification/image/cifar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/cifar.png
--------------------------------------------------------------------------------
/03.image_classification/image/dog.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/dog.png
--------------------------------------------------------------------------------
/03.image_classification/image/dog_cat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/dog_cat.png
--------------------------------------------------------------------------------
/03.image_classification/image/fea_conv0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/fea_conv0.png
--------------------------------------------------------------------------------
/03.image_classification/image/flowers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/flowers.png
--------------------------------------------------------------------------------
/03.image_classification/image/googlenet.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/googlenet.jpeg
--------------------------------------------------------------------------------
/03.image_classification/image/ilsvrc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/ilsvrc.png
--------------------------------------------------------------------------------
/03.image_classification/image/inception.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/inception.png
--------------------------------------------------------------------------------
/03.image_classification/image/inception_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/inception_en.png
--------------------------------------------------------------------------------
/03.image_classification/image/lenet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/lenet.png
--------------------------------------------------------------------------------
/03.image_classification/image/lenet_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/lenet_en.png
--------------------------------------------------------------------------------
/03.image_classification/image/plot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/plot.png
--------------------------------------------------------------------------------
/03.image_classification/image/plot_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/plot_en.png
--------------------------------------------------------------------------------
/03.image_classification/image/resnet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/resnet.png
--------------------------------------------------------------------------------
/03.image_classification/image/resnet_block.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/resnet_block.jpg
--------------------------------------------------------------------------------
/03.image_classification/image/train_and_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/train_and_test.png
--------------------------------------------------------------------------------
/03.image_classification/image/variations.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/variations.png
--------------------------------------------------------------------------------
/03.image_classification/image/variations_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/variations_en.png
--------------------------------------------------------------------------------
/03.image_classification/image/vgg16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/03.image_classification/image/vgg16.png
--------------------------------------------------------------------------------
/03.image_classification/resnet.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import print_function
16 |
17 | import paddle.fluid as fluid
18 |
19 | __all__ = ['resnet_cifar10']
20 |
21 |
22 | def conv_bn_layer(input,
23 | ch_out,
24 | filter_size,
25 | stride,
26 | padding,
27 | act='relu',
28 | bias_attr=False):
29 | tmp = fluid.layers.conv2d(
30 | input=input,
31 | filter_size=filter_size,
32 | num_filters=ch_out,
33 | stride=stride,
34 | padding=padding,
35 | act=None,
36 | bias_attr=bias_attr)
37 | return fluid.layers.batch_norm(input=tmp, act=act)
38 |
39 |
40 | def shortcut(input, ch_in, ch_out, stride):
41 | if ch_in != ch_out:
42 | return conv_bn_layer(input, ch_out, 1, stride, 0, None)
43 | else:
44 | return input
45 |
46 |
47 | def basicblock(input, ch_in, ch_out, stride):
48 | tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
49 | tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)
50 | short = shortcut(input, ch_in, ch_out, stride)
51 | return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
52 |
53 |
54 | def layer_warp(block_func, input, ch_in, ch_out, count, stride):
55 | tmp = block_func(input, ch_in, ch_out, stride)
56 | for i in range(1, count):
57 | tmp = block_func(tmp, ch_out, ch_out, 1)
58 | return tmp
59 |
60 |
61 | def resnet_cifar10(ipt, depth=32):
62 | # depth should be one of 20, 32, 44, 56, 110, 1202
63 | assert (depth - 2) % 6 == 0
64 | n = (depth - 2) // 6
65 | nStages = {16, 64, 128}
66 | conv1 = conv_bn_layer(ipt, ch_out=16, filter_size=3, stride=1, padding=1)
67 | res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
68 | res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
69 | res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
70 | pool = fluid.layers.pool2d(
71 | input=res3, pool_size=8, pool_type='avg', pool_stride=1)
72 | predict = fluid.layers.fc(input=pool, size=10, act='softmax')
73 | return predict
74 |
--------------------------------------------------------------------------------
/03.image_classification/train.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License
14 |
15 | from __future__ import print_function
16 |
17 | import os
18 | import argparse
19 | import paddle
20 | import paddle.fluid as fluid
21 | import numpy
22 | import sys
23 | from vgg import vgg_bn_drop
24 | from resnet import resnet_cifar10
25 |
26 |
27 | def parse_args():
28 | parser = argparse.ArgumentParser("image_classification")
29 | parser.add_argument(
30 | '--enable_ce',
31 | action='store_true',
32 | help='If set, run the task with continuous evaluation logs.')
33 | parser.add_argument(
34 | '--use_gpu', type=bool, default=0, help='whether to use gpu')
35 | parser.add_argument(
36 | '--num_epochs', type=int, default=1, help='number of epoch')
37 | args = parser.parse_args()
38 | return args
39 |
40 |
41 | def inference_network():
42 | # The image is 32 * 32 with RGB representation.
43 | data_shape = [3, 32, 32]
44 | images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
45 |
46 | predict = resnet_cifar10(images, 32)
47 | # predict = vgg_bn_drop(images) # un-comment to use vgg net
48 | return predict
49 |
50 |
51 | def train_network(predict):
52 | label = fluid.layers.data(name='label', shape=[1], dtype='int64')
53 | cost = fluid.layers.cross_entropy(input=predict, label=label)
54 | avg_cost = fluid.layers.mean(cost)
55 | accuracy = fluid.layers.accuracy(input=predict, label=label)
56 | return [avg_cost, accuracy]
57 |
58 |
59 | def optimizer_program():
60 | return fluid.optimizer.Adam(learning_rate=0.001)
61 |
62 |
63 | def train(use_cuda, params_dirname):
64 | place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
65 | BATCH_SIZE = 128
66 |
67 | if args.enable_ce:
68 | train_reader = paddle.batch(
69 | paddle.dataset.cifar.train10(), batch_size=BATCH_SIZE)
70 | test_reader = paddle.batch(
71 | paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
72 | else:
73 | test_reader = paddle.batch(
74 | paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
75 | train_reader = paddle.batch(
76 | paddle.reader.shuffle(
77 | paddle.dataset.cifar.train10(), buf_size=128 * 100),
78 | batch_size=BATCH_SIZE)
79 |
80 | feed_order = ['pixel', 'label']
81 |
82 | main_program = fluid.default_main_program()
83 | start_program = fluid.default_startup_program()
84 |
85 | if args.enable_ce:
86 | main_program.random_seed = 90
87 | start_program.random_seed = 90
88 |
89 | predict = inference_network()
90 | avg_cost, acc = train_network(predict)
91 |
92 | # Test program
93 | test_program = main_program.clone(for_test=True)
94 | optimizer = optimizer_program()
95 | optimizer.minimize(avg_cost)
96 |
97 | exe = fluid.Executor(place)
98 |
99 | EPOCH_NUM = args.num_epochs
100 |
101 | # For training test cost
102 | def train_test(program, reader):
103 | count = 0
104 | feed_var_list = [
105 | program.global_block().var(var_name) for var_name in feed_order
106 | ]
107 | feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place)
108 | test_exe = fluid.Executor(place)
109 | accumulated = len([avg_cost, acc]) * [0]
110 | for tid, test_data in enumerate(reader()):
111 | avg_cost_np = test_exe.run(
112 | program=program,
113 | feed=feeder_test.feed(test_data),
114 | fetch_list=[avg_cost, acc])
115 | accumulated = [
116 | x[0] + x[1][0] for x in zip(accumulated, avg_cost_np)
117 | ]
118 | count += 1
119 | return [x / count for x in accumulated]
120 |
121 | # main train loop.
122 | def train_loop():
123 | feed_var_list_loop = [
124 | main_program.global_block().var(var_name) for var_name in feed_order
125 | ]
126 | feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place)
127 | exe.run(start_program)
128 |
129 | step = 0
130 | for pass_id in range(EPOCH_NUM):
131 | for step_id, data_train in enumerate(train_reader()):
132 | avg_loss_value = exe.run(
133 | main_program,
134 | feed=feeder.feed(data_train),
135 | fetch_list=[avg_cost, acc])
136 | if step_id % 100 == 0:
137 | print("\nPass %d, Batch %d, Cost %f, Acc %f" % (
138 | step_id, pass_id, avg_loss_value[0], avg_loss_value[1]))
139 | else:
140 | sys.stdout.write('.')
141 | sys.stdout.flush()
142 | step += 1
143 |
144 | avg_cost_test, accuracy_test = train_test(
145 | test_program, reader=test_reader)
146 | print('\nTest with Pass {0}, Loss {1:2.2}, Acc {2:2.2}'.format(
147 | pass_id, avg_cost_test, accuracy_test))
148 |
149 | if params_dirname is not None:
150 | fluid.io.save_inference_model(params_dirname, ["pixel"],
151 | [predict], exe)
152 |
153 | if args.enable_ce and pass_id == EPOCH_NUM - 1:
154 | print("kpis\ttrain_cost\t%f" % avg_loss_value[0])
155 | print("kpis\ttrain_acc\t%f" % avg_loss_value[1])
156 | print("kpis\ttest_cost\t%f" % avg_cost_test)
157 | print("kpis\ttest_acc\t%f" % accuracy_test)
158 |
159 | train_loop()
160 |
161 |
162 | def infer(use_cuda, params_dirname=None):
163 | from PIL import Image
164 | place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
165 | exe = fluid.Executor(place)
166 | inference_scope = fluid.core.Scope()
167 |
168 | def load_image(infer_file):
169 | im = Image.open(infer_file)
170 | im = im.resize((32, 32), Image.ANTIALIAS)
171 |
172 | im = numpy.array(im).astype(numpy.float32)
173 | # The storage order of the loaded image is W(width),
174 | # H(height), C(channel). PaddlePaddle requires
175 | # the CHW order, so transpose them.
176 | im = im.transpose((2, 0, 1)) # CHW
177 | im = im / 255.0
178 |
179 | # Add one dimension to mimic the list format.
180 | im = numpy.expand_dims(im, axis=0)
181 | return im
182 |
183 | cur_dir = os.path.dirname(os.path.realpath(__file__))
184 | img = load_image(cur_dir + '/image/dog.png')
185 |
186 | with fluid.scope_guard(inference_scope):
187 | # Use fluid.io.load_inference_model to obtain the inference program desc,
188 | # the feed_target_names (the names of variables that will be feeded
189 | # data using feed operators), and the fetch_targets (variables that
190 | # we want to obtain data from using fetch operators).
191 | [inference_program, feed_target_names,
192 | fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)
193 |
194 | # Construct feed as a dictionary of {feed_target_name: feed_target_data}
195 | # and results will contain a list of data corresponding to fetch_targets.
196 | results = exe.run(
197 | inference_program,
198 | feed={feed_target_names[0]: img},
199 | fetch_list=fetch_targets)
200 |
201 | # infer label
202 | label_list = [
203 | "airplane", "automobile", "bird", "cat", "deer", "dog", "frog",
204 | "horse", "ship", "truck"
205 | ]
206 |
207 | print("infer results: %s" % label_list[numpy.argmax(results[0])])
208 |
209 |
210 | def main(use_cuda):
211 | if use_cuda and not fluid.core.is_compiled_with_cuda():
212 | return
213 | save_path = "image_classification_resnet.inference.model"
214 |
215 | train(use_cuda=use_cuda, params_dirname=save_path)
216 |
217 | infer(use_cuda=use_cuda, params_dirname=save_path)
218 |
219 |
220 | if __name__ == '__main__':
221 | # For demo purpose, the training runs on CPU
222 | # Please change accordingly.
223 | args = parse_args()
224 | use_cuda = args.use_gpu
225 | main(use_cuda)
226 |
--------------------------------------------------------------------------------
/03.image_classification/vgg.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import print_function
16 |
17 | import paddle.fluid as fluid
18 |
19 |
20 | def vgg_bn_drop(input):
21 | def conv_block(ipt, num_filter, groups, dropouts):
22 | return fluid.nets.img_conv_group(
23 | input=ipt,
24 | pool_size=2,
25 | pool_stride=2,
26 | conv_num_filter=[num_filter] * groups,
27 | conv_filter_size=3,
28 | conv_act='relu',
29 | conv_with_batchnorm=True,
30 | conv_batchnorm_drop_rate=dropouts,
31 | pool_type='max')
32 |
33 | conv1 = conv_block(input, 64, 2, [0.3, 0])
34 | conv2 = conv_block(conv1, 128, 2, [0.4, 0])
35 | conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
36 | conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
37 | conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
38 |
39 | drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
40 | fc1 = fluid.layers.fc(input=drop, size=512, act=None)
41 | bn = fluid.layers.batch_norm(input=fc1, act='relu')
42 | drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
43 | fc2 = fluid.layers.fc(input=drop2, size=512, act=None)
44 | predict = fluid.layers.fc(input=fc2, size=10, act='softmax')
45 | return predict
46 |
--------------------------------------------------------------------------------
/04.word2vec/.gitignore:
--------------------------------------------------------------------------------
1 | data/train.list
2 | data/test.list
3 | data/simple-examples*
4 |
--------------------------------------------------------------------------------
/04.word2vec/.run_ce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #This file is only used for continuous evaluation.
3 | python train.py --enable_ce | python _ce.py
4 |
5 |
--------------------------------------------------------------------------------
/04.word2vec/_ce.py:
--------------------------------------------------------------------------------
1 | ### This file is only used for continuous evaluation test!
2 | from __future__ import print_function
3 | from __future__ import division
4 | from __future__ import absolute_import
5 | import os
6 | import sys
7 | sys.path.append(os.environ['ceroot'])
8 | from kpi import CostKpi
9 |
10 | train_cost_kpi = CostKpi('train_cost', 0.02, 0, actived=True, desc='train cost')
11 | tracking_kpis = [train_cost_kpi]
12 |
13 |
14 | def parse_log(log):
15 | for line in log.split('\n'):
16 | fs = line.strip().split('\t')
17 | print(fs)
18 | if len(fs) == 3 and fs[0] == 'kpis':
19 | kpi_name = fs[1]
20 | kpi_value = float(fs[2])
21 | yield kpi_name, kpi_value
22 |
23 |
24 | def log_to_ce(log):
25 | kpi_tracker = {}
26 | for kpi in tracking_kpis:
27 | kpi_tracker[kpi.name] = kpi
28 | for (kpi_name, kpi_value) in parse_log(log):
29 | print(kpi_name, kpi_value)
30 | kpi_tracker[kpi_name].add_record(kpi_value)
31 | kpi_tracker[kpi_name].persist()
32 |
33 |
34 | if __name__ == '__main__':
35 | log = sys.stdin.read()
36 | log_to_ce(log)
37 |
--------------------------------------------------------------------------------
/04.word2vec/calculate_dis.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | Example:
16 | python calculate_dis.py DICTIONARYTXT FEATURETXT
17 |
18 | Required arguments:
19 | DICTIONARYTXT the dictionary generated in dataprovider
20 | FEATURETXT the text format word feature, one line for one word
21 | """
22 |
23 | import numpy as np
24 | from argparse import ArgumentParser
25 |
26 |
27 | def load_dict(fdict):
28 | words = [line.strip() for line in fdict.readlines()]
29 | dictionary = dict(zip(words, xrange(len(words))))
30 | return dictionary
31 |
32 |
33 | def load_emb(femb):
34 | feaBank = []
35 | flag_firstline = True
36 | for line in femb:
37 | if flag_firstline:
38 | flag_firstline = False
39 | continue
40 | fea = np.array([float(x) for x in line.strip().split(',')])
41 | normfea = fea * 1.0 / np.linalg.norm(fea)
42 | feaBank.append(normfea)
43 | return feaBank
44 |
45 |
46 | def calcos(id1, id2, Fea):
47 | f1 = Fea[id1]
48 | f2 = Fea[id2]
49 | return np.dot(f1.transpose(), f2)
50 |
51 |
52 | def get_wordidx(w, Dict):
53 | if w not in Dict:
54 | print 'ERROR: %s not in the dictionary' % w
55 | return -1
56 | return Dict[w]
57 |
58 |
59 | if __name__ == '__main__':
60 | parser = ArgumentParser()
61 | parser.add_argument('dict', help='dictionary file')
62 | parser.add_argument('fea', help='feature file')
63 | args = parser.parse_args()
64 |
65 | with open(args.dict) as fdict:
66 | word_dict = load_dict(fdict)
67 |
68 | with open(args.fea) as ffea:
69 | word_fea = load_emb(ffea)
70 |
71 | while True:
72 | w1, w2 = raw_input("please input two words: ").split()
73 | w1_id = get_wordidx(w1, word_dict)
74 | w2_id = get_wordidx(w2, word_dict)
75 | if w1_id == -1 or w2_id == -1:
76 | continue
77 | print 'similarity: %s' % (calcos(w1_id, w2_id, word_fea))
78 |
--------------------------------------------------------------------------------
/04.word2vec/format_convert.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | Example:
16 | python format_convert.py --b2t -i INPUT -o OUTPUT -d DIM
17 | python format_convert.py --t2b -i INPUT -o OUTPUT
18 |
19 | Options:
20 | -h, --help show this help message and exit
21 | --b2t convert parameter file of embedding model from binary to text
22 | --t2b convert parameter file of embedding model from text to binary
23 | -i INPUT input parameter file name
24 | -o OUTPUT output parameter file name
25 | -d DIM dimension of parameter
26 | """
27 | from optparse import OptionParser
28 | import struct
29 |
30 |
31 | def binary2text(input, output, paraDim):
32 | """
33 | Convert a binary parameter file of embedding model to be a text file.
34 | input: the name of input binary parameter file, the format is:
35 | 1) the first 16 bytes is filehead:
36 | version(4 bytes): version of paddle, default = 0
37 | floatSize(4 bytes): sizeof(float) = 4
38 | paraCount(8 bytes): total number of parameter
39 | 2) the next (paraCount * 4) bytes is parameters, each has 4 bytes
40 | output: the name of output text parameter file, for example:
41 | 0,4,32156096
42 | -0.7845433,1.1937413,-0.1704215,...
43 | 0.0000909,0.0009465,-0.0008813,...
44 | ...
45 | the format is:
46 | 1) the first line is filehead:
47 | version=0, floatSize=4, paraCount=32156096
48 | 2) other lines print the paramters
49 | a) each line prints paraDim paramters splitted by ','
50 | b) there is paraCount/paraDim lines (embedding words)
51 | paraDim: dimension of parameters
52 | """
53 | fi = open(input, "rb")
54 | fo = open(output, "w")
55 | """
56 | """
57 | version, floatSize, paraCount = struct.unpack("iil", fi.read(16))
58 | newHead = ','.join([str(version), str(floatSize), str(paraCount)])
59 | print >> fo, newHead
60 |
61 | bytes = 4 * int(paraDim)
62 | format = "%df" % int(paraDim)
63 | context = fi.read(bytes)
64 | line = 0
65 |
66 | while context:
67 | numbers = struct.unpack(format, context)
68 | lst = []
69 | for i in numbers:
70 | lst.append('%8.7f' % i)
71 | print >> fo, ','.join(lst)
72 | context = fi.read(bytes)
73 | line += 1
74 | fi.close()
75 | fo.close()
76 | print "binary2text finish, total", line, "lines"
77 |
78 |
79 | def get_para_count(input):
80 | """
81 | Compute the total number of embedding parameters in input text file.
82 | input: the name of input text file
83 | """
84 | numRows = 1
85 | paraDim = 0
86 | with open(input) as f:
87 | line = f.readline()
88 | paraDim = len(line.split(","))
89 | for line in f:
90 | numRows += 1
91 | return numRows * paraDim
92 |
93 |
94 | def text2binary(input, output, paddle_head=True):
95 | """
96 | Convert a text parameter file of embedding model to be a binary file.
97 | input: the name of input text parameter file, for example:
98 | -0.7845433,1.1937413,-0.1704215,...
99 | 0.0000909,0.0009465,-0.0008813,...
100 | ...
101 | the format is:
102 | 1) it doesn't have filehead
103 | 2) each line stores the same dimension of parameters,
104 | the separator is commas ','
105 | output: the name of output binary parameter file, the format is:
106 | 1) the first 16 bytes is filehead:
107 | version(4 bytes), floatSize(4 bytes), paraCount(8 bytes)
108 | 2) the next (paraCount * 4) bytes is parameters, each has 4 bytes
109 | """
110 | fi = open(input, "r")
111 | fo = open(output, "wb")
112 |
113 | newHead = struct.pack("iil", 0, 4, get_para_count(input))
114 | fo.write(newHead)
115 |
116 | count = 0
117 | for line in fi:
118 | line = line.strip().split(",")
119 | for i in range(0, len(line)):
120 | binary_data = struct.pack("f", float(line[i]))
121 | fo.write(binary_data)
122 | count += 1
123 | fi.close()
124 | fo.close()
125 | print "text2binary finish, total", count, "lines"
126 |
127 |
128 | def main():
129 | """
130 | Main entry for running format_convert.py
131 | """
132 | usage = "usage: \n" \
133 | "python %prog --b2t -i INPUT -o OUTPUT -d DIM \n" \
134 | "python %prog --t2b -i INPUT -o OUTPUT"
135 | parser = OptionParser(usage)
136 | parser.add_option(
137 | "--b2t",
138 | action="store_true",
139 | help="convert parameter file of embedding model from binary to text")
140 | parser.add_option(
141 | "--t2b",
142 | action="store_true",
143 | help="convert parameter file of embedding model from text to binary")
144 | parser.add_option(
145 | "-i", action="store", dest="input", help="input parameter file name")
146 | parser.add_option(
147 | "-o", action="store", dest="output", help="output parameter file name")
148 | parser.add_option(
149 | "-d", action="store", dest="dim", help="dimension of parameter")
150 | (options, args) = parser.parse_args()
151 | if options.b2t:
152 | binary2text(options.input, options.output, options.dim)
153 | if options.t2b:
154 | text2binary(options.input, options.output)
155 |
156 |
157 | if __name__ == '__main__':
158 | main()
159 |
--------------------------------------------------------------------------------
/04.word2vec/image/2d_similarity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/2d_similarity.png
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn1.gif
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn2.gif
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn3.gif
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn4.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn4.gif
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn5.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn5.gif
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn6.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn6.gif
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn7.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn7.gif
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn8.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn8.gif
--------------------------------------------------------------------------------
/04.word2vec/image/Eqn9.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/Eqn9.gif
--------------------------------------------------------------------------------
/04.word2vec/image/cbow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/cbow.png
--------------------------------------------------------------------------------
/04.word2vec/image/cbow_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/cbow_en.png
--------------------------------------------------------------------------------
/04.word2vec/image/ngram.en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/ngram.en.png
--------------------------------------------------------------------------------
/04.word2vec/image/ngram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/ngram.png
--------------------------------------------------------------------------------
/04.word2vec/image/nnlm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/nnlm.png
--------------------------------------------------------------------------------
/04.word2vec/image/nnlm_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/nnlm_en.png
--------------------------------------------------------------------------------
/04.word2vec/image/sentence_emb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/sentence_emb.png
--------------------------------------------------------------------------------
/04.word2vec/image/skipgram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/skipgram.png
--------------------------------------------------------------------------------
/04.word2vec/image/skipgram_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/04.word2vec/image/skipgram_en.png
--------------------------------------------------------------------------------
/05.recommender_system/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .ipynb_checkpoints
3 |
--------------------------------------------------------------------------------
/05.recommender_system/.run_ce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #This file is only used for continuous evaluation.
3 | python train.py --enable_ce | python _ce.py
4 |
5 |
--------------------------------------------------------------------------------
/05.recommender_system/_ce.py:
--------------------------------------------------------------------------------
1 | ### This file is only used for continuous evaluation test!
2 | from __future__ import print_function
3 | from __future__ import division
4 | from __future__ import absolute_import
5 | import os
6 | import sys
7 | sys.path.append(os.environ['ceroot'])
8 | from kpi import CostKpi
9 |
10 | test_cost_kpi = CostKpi('test_cost', 0.02, 0, actived=True, desc='test cost')
11 | tracking_kpis = [test_cost_kpi]
12 |
13 |
14 | def parse_log(log):
15 | for line in log.split('\n'):
16 | fs = line.strip().split('\t')
17 | print(fs)
18 | if len(fs) == 3 and fs[0] == 'kpis':
19 | kpi_name = fs[1]
20 | kpi_value = float(fs[2])
21 | yield kpi_name, kpi_value
22 |
23 |
24 | def log_to_ce(log):
25 | kpi_tracker = {}
26 | for kpi in tracking_kpis:
27 | kpi_tracker[kpi.name] = kpi
28 | for (kpi_name, kpi_value) in parse_log(log):
29 | print(kpi_name, kpi_value)
30 | kpi_tracker[kpi_name].add_record(kpi_value)
31 | kpi_tracker[kpi_name].persist()
32 |
33 |
34 | if __name__ == '__main__':
35 | log = sys.stdin.read()
36 | log_to_ce(log)
37 |
--------------------------------------------------------------------------------
/05.recommender_system/image/Deep_candidate_generation_model_architecture.en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/Deep_candidate_generation_model_architecture.en.png
--------------------------------------------------------------------------------
/05.recommender_system/image/Deep_candidate_generation_model_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/Deep_candidate_generation_model_architecture.png
--------------------------------------------------------------------------------
/05.recommender_system/image/YouTube_Overview.en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/YouTube_Overview.en.png
--------------------------------------------------------------------------------
/05.recommender_system/image/YouTube_Overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/YouTube_Overview.png
--------------------------------------------------------------------------------
/05.recommender_system/image/formula1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/formula1.png
--------------------------------------------------------------------------------
/05.recommender_system/image/formula2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/formula2.png
--------------------------------------------------------------------------------
/05.recommender_system/image/formula3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/formula3.png
--------------------------------------------------------------------------------
/05.recommender_system/image/output_32_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/output_32_0.png
--------------------------------------------------------------------------------
/05.recommender_system/image/rec_regression_network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/rec_regression_network.png
--------------------------------------------------------------------------------
/05.recommender_system/image/rec_regression_network_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/rec_regression_network_en.png
--------------------------------------------------------------------------------
/05.recommender_system/image/text_cnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/text_cnn.png
--------------------------------------------------------------------------------
/05.recommender_system/image/text_cnn_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/05.recommender_system/image/text_cnn_en.png
--------------------------------------------------------------------------------
/06.understand_sentiment/.gitignore:
--------------------------------------------------------------------------------
1 | data/aclImdb
2 | data/imdb
3 | data/pre-imdb
4 | data/mosesdecoder-master
5 | *.log
6 | model_output
7 | dataprovider_copy_1.py
8 | model.list
9 | *.pyc
10 | .DS_Store
11 |
--------------------------------------------------------------------------------
/06.understand_sentiment/.run_ce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #This file is only used for continuous evaluation.
3 | export FLAGS_cudnn_deterministic=true
4 | export CUDA_VISIBLE_DEVICES=0
5 | python train_conv.py --use_gpu 1 --num_epochs=1 --enable_ce | python _ce.py
6 | python train_dyn_rnn.py --use_gpu 1 --num_epochs=1 --enable_ce | python _ce.py
7 | python train_stacked_lstm.py --use_gpu 1 --num_epochs=1 --enable_ce | python _ce.py
8 |
9 |
10 |
--------------------------------------------------------------------------------
/06.understand_sentiment/_ce.py:
--------------------------------------------------------------------------------
1 | ### This file is only used for continuous evaluation test!
2 | from __future__ import print_function
3 | from __future__ import division
4 | from __future__ import absolute_import
5 | import os
6 | import sys
7 | sys.path.append(os.environ['ceroot'])
8 | from kpi import CostKpi
9 | from kpi import AccKpi
10 |
11 | conv_train_cost_kpi = CostKpi(
12 | 'conv_train_cost', 0.02, 0, actived=True, desc='train cost')
13 | conv_train_acc_kpi = AccKpi(
14 | 'conv_train_acc', 0.02, 0, actived=True, desc='train acc')
15 | conv_test_cost_kpi = CostKpi(
16 | 'conv_test_cost', 0.02, 0, actived=True, desc='test cost')
17 | conv_test_acc_kpi = AccKpi(
18 | 'conv_test_acc', 0.02, 0, actived=True, desc='test acc')
19 |
20 | rnn_train_cost_kpi = CostKpi(
21 | 'rnn_train_cost', 0.02, 0, actived=True, desc='train cost')
22 | rnn_train_acc_kpi = AccKpi(
23 | 'rnn_train_acc', 0.02, 0, actived=True, desc='train acc')
24 | rnn_test_cost_kpi = CostKpi(
25 | 'rnn_test_cost', 0.02, 0, actived=True, desc='test cost')
26 | rnn_test_acc_kpi = AccKpi(
27 | 'rnn_test_acc', 0.02, 0, actived=True, desc='test acc')
28 |
29 | lstm_train_cost_kpi = CostKpi(
30 | 'lstm_train_cost', 0.02, 0, actived=True, desc='train cost')
31 | lstm_train_acc_kpi = AccKpi(
32 | 'lstm_train_acc', 0.02, 0, actived=True, desc='train acc')
33 | lstm_test_cost_kpi = CostKpi(
34 | 'lstm_test_cost', 0.02, 0, actived=True, desc='test cost')
35 | lstm_test_acc_kpi = AccKpi(
36 | 'lstm_test_acc', 0.02, 0, actived=True, desc='test acc')
37 |
38 | tracking_kpis = [
39 | conv_train_cost_kpi, conv_train_acc_kpi, conv_test_cost_kpi,
40 | conv_test_acc_kpi, rnn_train_cost_kpi, rnn_train_acc_kpi, rnn_test_cost_kpi,
41 | rnn_test_acc_kpi, lstm_train_cost_kpi, lstm_train_acc_kpi,
42 | lstm_test_cost_kpi, lstm_test_acc_kpi
43 | ]
44 |
45 |
46 | def parse_log(log):
47 | for line in log.split('\n'):
48 | fs = line.strip().split('\t')
49 | print(fs)
50 | if len(fs) == 3 and fs[0] == 'kpis':
51 | kpi_name = fs[1]
52 | kpi_value = float(fs[2])
53 | yield kpi_name, kpi_value
54 |
55 |
56 | def log_to_ce(log):
57 | kpi_tracker = {}
58 | for kpi in tracking_kpis:
59 | kpi_tracker[kpi.name] = kpi
60 | for (kpi_name, kpi_value) in parse_log(log):
61 | print(kpi_name, kpi_value)
62 | kpi_tracker[kpi_name].add_record(kpi_value)
63 | kpi_tracker[kpi_name].persist()
64 |
65 |
66 | if __name__ == '__main__':
67 | log = sys.stdin.read()
68 | log_to_ce(log)
69 |
--------------------------------------------------------------------------------
/06.understand_sentiment/image/formula_lstm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/formula_lstm.png
--------------------------------------------------------------------------------
/06.understand_sentiment/image/formula_lstm_more.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/formula_lstm_more.png
--------------------------------------------------------------------------------
/06.understand_sentiment/image/formula_recrurent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/formula_recrurent.png
--------------------------------------------------------------------------------
/06.understand_sentiment/image/formula_rnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/formula_rnn.png
--------------------------------------------------------------------------------
/06.understand_sentiment/image/lstm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/lstm.png
--------------------------------------------------------------------------------
/06.understand_sentiment/image/lstm_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/lstm_en.png
--------------------------------------------------------------------------------
/06.understand_sentiment/image/rnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/rnn.png
--------------------------------------------------------------------------------
/06.understand_sentiment/image/stacked_lstm.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/stacked_lstm.jpg
--------------------------------------------------------------------------------
/06.understand_sentiment/image/stacked_lstm_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/06.understand_sentiment/image/stacked_lstm_en.png
--------------------------------------------------------------------------------
/06.understand_sentiment/train_dyn_rnn.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import print_function
16 |
17 | import paddle
18 | import paddle.fluid as fluid
19 | import numpy as np
20 | import sys
21 | import math
22 | import argparse
23 |
24 | CLASS_DIM = 2
25 | EMB_DIM = 128
26 | BATCH_SIZE = 128
27 | LSTM_SIZE = 128
28 |
29 |
30 | def parse_args():
31 | parser = argparse.ArgumentParser("dyn_rnn")
32 | parser.add_argument(
33 | '--enable_ce',
34 | action='store_true',
35 | help="If set, run the task with continuous evaluation logs.")
36 | parser.add_argument(
37 | '--use_gpu', type=int, default=0, help="Whether to use GPU or not.")
38 | parser.add_argument(
39 | '--num_epochs', type=int, default=1, help="number of epochs.")
40 | args = parser.parse_args()
41 | return args
42 |
43 |
44 | def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size):
45 | emb = fluid.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True)
46 | sentence = fluid.layers.fc(input=emb, size=lstm_size * 4, act='tanh')
47 |
48 | lstm, _ = fluid.layers.dynamic_lstm(sentence, size=lstm_size * 4)
49 |
50 | last = fluid.layers.sequence_last_step(lstm)
51 | prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax")
52 | return prediction
53 |
54 |
55 | def inference_program(word_dict):
56 | data = fluid.data(name="words", shape=[None], dtype="int64", lod_level=1)
57 | dict_dim = len(word_dict)
58 | pred = dynamic_rnn_lstm(data, dict_dim, CLASS_DIM, EMB_DIM, LSTM_SIZE)
59 | return pred
60 |
61 |
62 | def train_program(prediction):
63 | label = fluid.data(name="label", shape=[None, 1], dtype="int64")
64 | cost = fluid.layers.cross_entropy(input=prediction, label=label)
65 | avg_cost = fluid.layers.mean(cost)
66 | accuracy = fluid.layers.accuracy(input=prediction, label=label)
67 | return [avg_cost, accuracy]
68 |
69 |
70 | def optimizer_func():
71 | return fluid.optimizer.Adagrad(learning_rate=0.002)
72 |
73 |
74 | def train(use_cuda, params_dirname):
75 | place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
76 | print("Loading IMDB word dict....")
77 | word_dict = paddle.dataset.imdb.word_dict()
78 |
79 | print("Reading training data....")
80 | if args.enable_ce:
81 | train_reader = paddle.batch(
82 | paddle.dataset.imdb.train(word_dict), batch_size=BATCH_SIZE)
83 | else:
84 | train_reader = paddle.batch(
85 | paddle.reader.shuffle(
86 | paddle.dataset.imdb.train(word_dict), buf_size=25000),
87 | batch_size=BATCH_SIZE)
88 |
89 | print("Reading testing data....")
90 | test_reader = paddle.batch(
91 | paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE)
92 |
93 | feed_order = ['words', 'label']
94 | pass_num = args.num_epochs
95 |
96 | main_program = fluid.default_main_program()
97 | star_program = fluid.default_startup_program()
98 |
99 | if args.enable_ce:
100 | main_program.random_seed = 90
101 | star_program.random_seed = 90
102 |
103 | prediction = inference_program(word_dict)
104 | train_func_outputs = train_program(prediction)
105 | avg_cost = train_func_outputs[0]
106 |
107 | test_program = main_program.clone(for_test=True)
108 |
109 | sgd_optimizer = optimizer_func()
110 | sgd_optimizer.minimize(avg_cost)
111 | exe = fluid.Executor(place)
112 |
113 | def train_test(program, reader):
114 | count = 0
115 | feed_var_list = [
116 | program.global_block().var(var_name) for var_name in feed_order
117 | ]
118 | feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place)
119 | test_exe = fluid.Executor(place)
120 | accumulated = len(train_func_outputs) * [0]
121 | for test_data in reader():
122 | avg_cost_np = test_exe.run(
123 | program=program,
124 | feed=feeder_test.feed(test_data),
125 | fetch_list=train_func_outputs)
126 | accumulated = [
127 | x[0] + x[1][0] for x in zip(accumulated, avg_cost_np)
128 | ]
129 | count += 1
130 | return [x / count for x in accumulated]
131 |
132 | def train_loop():
133 |
134 | feed_var_list_loop = [
135 | main_program.global_block().var(var_name) for var_name in feed_order
136 | ]
137 | feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place)
138 | exe.run(fluid.default_startup_program())
139 |
140 | for epoch_id in range(pass_num):
141 | for step_id, data in enumerate(train_reader()):
142 | metrics = exe.run(
143 | main_program,
144 | feed=feeder.feed(data),
145 | fetch_list=[var.name for var in train_func_outputs])
146 | if (step_id + 1) % 10 == 0:
147 |
148 | avg_cost_test, acc_test = train_test(test_program,
149 | test_reader)
150 | print('Step {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format(
151 | step_id, avg_cost_test, acc_test))
152 |
153 | print("Step {0}, Epoch {1} Metrics {2}".format(
154 | step_id, epoch_id, list(map(np.array, metrics))))
155 | if math.isnan(float(metrics[0])):
156 | sys.exit("got NaN loss, training failed.")
157 | if params_dirname is not None:
158 | fluid.io.save_inference_model(params_dirname, ["words"],
159 | prediction, exe)
160 | if args.enable_ce and epoch_id == pass_num - 1:
161 | print("kpis\trnn_train_cost\t%f" % metrics[0])
162 | print("kpis\trnn_train_acc\t%f" % metrics[1])
163 | print("kpis\trnn_test_cost\t%f" % avg_cost_test)
164 | print("kpis\trnn_test_acc\t%f" % acc_test)
165 |
166 | train_loop()
167 |
168 |
169 | def infer(use_cuda, params_dirname=None):
170 | place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
171 | word_dict = paddle.dataset.imdb.word_dict()
172 |
173 | exe = fluid.Executor(place)
174 |
175 | inference_scope = fluid.core.Scope()
176 | with fluid.scope_guard(inference_scope):
177 | # Use fluid.io.load_inference_model to obtain the inference program desc,
178 | # the feed_target_names (the names of variables that will be feeded
179 | # data using feed operators), and the fetch_targets (variables that
180 | # we want to obtain data from using fetch operators).
181 | [inferencer, feed_target_names,
182 | fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)
183 |
184 | # Setup input by creating LoDTensor to represent sequence of words.
185 | # Here each word is the basic element of the LoDTensor and the shape of
186 | # each word (base_shape) should be [1] since it is simply an index to
187 | # look up for the corresponding word vector.
188 | # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
189 | # which has only one lod level. Then the created LoDTensor will have only
190 | # one higher level structure (sequence of words, or sentence) than the basic
191 | # element (word). Hence the LoDTensor will hold data for three sentences of
192 | # length 3, 4 and 2, respectively.
193 | # Note that lod info should be a list of lists.
194 | reviews_str = [
195 | 'read the book forget the movie', 'this is a great movie',
196 | 'this is very bad'
197 | ]
198 | reviews = [c.split() for c in reviews_str]
199 |
200 | UNK = word_dict['']
201 | lod = []
202 | for c in reviews:
203 | lod.append([np.int64(word_dict.get(words, UNK)) for words in c])
204 |
205 | base_shape = [[len(c) for c in lod]]
206 |
207 | tensor_words = fluid.create_lod_tensor(lod, base_shape, place)
208 | assert feed_target_names[0] == "words"
209 | results = exe.run(
210 | inferencer,
211 | feed={feed_target_names[0]: tensor_words},
212 | fetch_list=fetch_targets,
213 | return_numpy=False)
214 | np_data = np.array(results[0])
215 | for i, r in enumerate(np_data):
216 | print("Predict probability of ", r[0], " to be positive and ", r[1],
217 | " to be negative for review \'", reviews_str[i], "\'")
218 |
219 |
220 | def main(use_cuda):
221 | if use_cuda and not fluid.core.is_compiled_with_cuda():
222 | return
223 | params_dirname = "understand_sentiment_conv.inference.model"
224 | train(use_cuda, params_dirname)
225 | infer(use_cuda, params_dirname)
226 |
227 |
228 | if __name__ == '__main__':
229 | args = parse_args()
230 | use_cuda = args.use_gpu # set to True if training with GPU
231 | main(use_cuda)
232 |
--------------------------------------------------------------------------------
/07.label_semantic_roles/.gitignore:
--------------------------------------------------------------------------------
1 | data/train.list
2 | data/test.*
3 | data/conll05st-release.tar.gz
4 | data/conll05st-release
5 | data/predicate_dict
6 | data/label_dict
7 | data/word_dict
8 | data/emb
9 | data/feature
10 | output
11 | predict.res
12 | train.log
13 |
--------------------------------------------------------------------------------
/07.label_semantic_roles/.run_ce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #This file is only used for continuous evaluation.
3 | python train.py --enable_ce | python _ce.py
4 |
5 |
--------------------------------------------------------------------------------
/07.label_semantic_roles/_ce.py:
--------------------------------------------------------------------------------
1 | ### This file is only used for continuous evaluation test!
2 | from __future__ import print_function
3 | from __future__ import division
4 | from __future__ import absolute_import
5 | import os
6 | import sys
7 | sys.path.append(os.environ['ceroot'])
8 | from kpi import CostKpi
9 |
10 | train_cost_kpi = CostKpi('train_cost', 0.02, 0, actived=True, desc='train cost')
11 | tracking_kpis = [train_cost_kpi]
12 |
13 |
14 | def parse_log(log):
15 | for line in log.split('\n'):
16 | fs = line.strip().split('\t')
17 | print(fs)
18 | if len(fs) == 3 and fs[0] == 'kpis':
19 | kpi_name = fs[1]
20 | kpi_value = float(fs[2])
21 | yield kpi_name, kpi_value
22 |
23 |
24 | def log_to_ce(log):
25 | kpi_tracker = {}
26 | for kpi in tracking_kpis:
27 | kpi_tracker[kpi.name] = kpi
28 |
29 | for (kpi_name, kpi_value) in parse_log(log):
30 | print(kpi_name, kpi_value)
31 | kpi_tracker[kpi_name].add_record(kpi_value)
32 | kpi_tracker[kpi_name].persist()
33 |
34 |
35 | if __name__ == '__main__':
36 | log = sys.stdin.read()
37 | log_to_ce(log)
38 |
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/Eqn1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/Eqn1.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/Eqn2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/Eqn2.gif
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/Eqn3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/Eqn3.gif
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/Eqn4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/Eqn4.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/bidirectional_stacked_lstm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/bidirectional_stacked_lstm.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/bidirectional_stacked_lstm_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/bidirectional_stacked_lstm_en.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/bio_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/bio_example.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/bio_example_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/bio_example_en.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/db_lstm_network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/db_lstm_network.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/db_lstm_network_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/db_lstm_network_en.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/dependency_parsing.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/dependency_parsing.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/dependency_parsing_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/dependency_parsing_en.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/linear_chain_crf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/linear_chain_crf.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/stacked_lstm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/stacked_lstm.png
--------------------------------------------------------------------------------
/07.label_semantic_roles/image/stacked_lstm_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/07.label_semantic_roles/image/stacked_lstm_en.png
--------------------------------------------------------------------------------
/08.machine_translation/.gitignore:
--------------------------------------------------------------------------------
1 | data/wmt14
2 | data/pre-wmt14
3 | pretrained/wmt14_model
4 | gen.log
5 | gen_result
6 | train.log
7 | dataprovider_copy_1.py
8 | *.pyc
9 | multi-bleu.perl
10 |
--------------------------------------------------------------------------------
/08.machine_translation/image/attention_decoder_formula.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/attention_decoder_formula.png
--------------------------------------------------------------------------------
/08.machine_translation/image/bi_rnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/bi_rnn.png
--------------------------------------------------------------------------------
/08.machine_translation/image/bi_rnn_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/bi_rnn_en.png
--------------------------------------------------------------------------------
/08.machine_translation/image/decoder_attention.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/decoder_attention.png
--------------------------------------------------------------------------------
/08.machine_translation/image/decoder_attention_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/decoder_attention_en.png
--------------------------------------------------------------------------------
/08.machine_translation/image/decoder_formula.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/decoder_formula.png
--------------------------------------------------------------------------------
/08.machine_translation/image/encoder_attention.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/encoder_attention.png
--------------------------------------------------------------------------------
/08.machine_translation/image/encoder_attention_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/encoder_attention_en.png
--------------------------------------------------------------------------------
/08.machine_translation/image/encoder_decoder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/encoder_decoder.png
--------------------------------------------------------------------------------
/08.machine_translation/image/encoder_decoder_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/encoder_decoder_en.png
--------------------------------------------------------------------------------
/08.machine_translation/image/gru.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/gru.png
--------------------------------------------------------------------------------
/08.machine_translation/image/gru_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/gru_en.png
--------------------------------------------------------------------------------
/08.machine_translation/image/nmt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/nmt.png
--------------------------------------------------------------------------------
/08.machine_translation/image/nmt_en.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/nmt_en.png
--------------------------------------------------------------------------------
/08.machine_translation/image/probability_formula.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/probability_formula.png
--------------------------------------------------------------------------------
/08.machine_translation/image/sum_formula.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/sum_formula.png
--------------------------------------------------------------------------------
/08.machine_translation/image/weight_formula.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/08.machine_translation/image/weight_formula.png
--------------------------------------------------------------------------------
/09.gan/.run_ce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #This file is only used for continuous evaluation.
3 | export FLAGS_cudnn_deterministic=True
4 | export CUDA_VISIBLE_DEVICES=0
5 | python dc_gan.py --enable_ce true --epoch 1 --use_gpu True | python _ce.py
6 |
7 |
--------------------------------------------------------------------------------
/09.gan/_ce.py:
--------------------------------------------------------------------------------
1 | ### This file is only used for continuous evaluation test!
2 | from __future__ import print_function
3 | from __future__ import division
4 | from __future__ import absolute_import
5 | import os
6 | import sys
7 | sys.path.append(os.environ['ceroot'])
8 | from kpi import CostKpi
9 |
10 | dcgan_d_train_cost_kpi = CostKpi(
11 | 'dcgan_d_train_cost',
12 | 0.02,
13 | 0,
14 | actived=True,
15 | desc='train cost of discriminator')
16 | dcgan_g_train_cost_kpi = CostKpi(
17 | 'dcgan_g_train_cost', 0.02, 0, actived=True, desc='train cost of generator')
18 |
19 | tracking_kpis = [dcgan_d_train_cost_kpi, dcgan_g_train_cost_kpi]
20 |
21 |
22 | def parse_log(log):
23 | for line in log.split('\n'):
24 | fs = line.strip().split('\t')
25 | print(fs)
26 | if len(fs) == 3 and fs[0] == 'kpis':
27 | kpi_name = fs[1]
28 | kpi_value = float(fs[2])
29 | yield kpi_name, kpi_value
30 |
31 |
32 | def log_to_ce(log):
33 | kpi_tracker = {}
34 | for kpi in tracking_kpis:
35 | kpi_tracker[kpi.name] = kpi
36 | print(kpi.name)
37 | print(kpi)
38 | for (kpi_name, kpi_value) in parse_log(log):
39 | print(kpi_name, kpi_value)
40 | kpi_tracker[kpi_name].add_record(kpi_value)
41 | kpi_tracker[kpi_name].persist()
42 |
43 |
44 | if __name__ == '__main__':
45 | log = sys.stdin.read()
46 | log_to_ce(log)
47 |
--------------------------------------------------------------------------------
/09.gan/dc_gan.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 | import sys
19 | import os
20 | import argparse
21 | import functools
22 | import matplotlib
23 | import six
24 | import numpy as np
25 | import paddle
26 | import time
27 | import paddle.fluid as fluid
28 | from utility import get_parent_function_name, plot, check, add_arguments, print_arguments
29 | from network import G, D
30 | matplotlib.use('agg')
31 | import matplotlib.pyplot as plt
32 | import matplotlib.gridspec as gridspec
33 |
34 | NOISE_SIZE = 100
35 | LEARNING_RATE = 2e-4
36 |
37 | parser = argparse.ArgumentParser(description=__doc__)
38 | add_arg = functools.partial(add_arguments, argparser=parser)
39 | # yapf: disable
40 | add_arg('batch_size', int, 128, "Minibatch size.")
41 | add_arg('epoch', int, 20, "The number of epoched to be trained.")
42 | add_arg('output', str, "./output_dcgan", "The directory the model and the test result to be saved to.")
43 | add_arg('use_gpu', bool, True, "Whether to use GPU to train.")
44 | add_arg('enable_ce', bool, False, "If set True, enable continuous evaluation job.")
45 | # yapf: enable
46 |
47 |
48 | def loss(x, label):
49 | return fluid.layers.mean(
50 | fluid.layers.sigmoid_cross_entropy_with_logits(x=x, label=label))
51 |
52 |
53 | def train(args):
54 |
55 | if args.enable_ce:
56 | np.random.seed(10)
57 | fluid.default_startup_program().random_seed = 90
58 |
59 | d_program = fluid.Program()
60 | dg_program = fluid.Program()
61 |
62 | with fluid.program_guard(d_program):
63 | img = fluid.data(name='img', shape=[None, 784], dtype='float32')
64 | label = fluid.data(name='label', shape=[None, 1], dtype='float32')
65 | d_logit = D(img)
66 | d_loss = loss(d_logit, label)
67 |
68 | with fluid.program_guard(dg_program):
69 | noise = fluid.data(
70 | name='noise', shape=[None, NOISE_SIZE], dtype='float32')
71 | g_img = G(x=noise)
72 |
73 | g_program = dg_program.clone()
74 | g_program_test = dg_program.clone(for_test=True)
75 |
76 | dg_logit = D(g_img)
77 | dg_loss = loss(dg_logit,
78 | fluid.layers.fill_constant_batch_size_like(
79 | input=noise,
80 | dtype='float32',
81 | shape=[-1, 1],
82 | value=1.0))
83 |
84 | opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE)
85 |
86 | opt.minimize(loss=d_loss)
87 | parameters = [p.name for p in g_program.global_block().all_parameters()]
88 |
89 | opt.minimize(loss=dg_loss, parameter_list=parameters)
90 |
91 | exe = fluid.Executor(fluid.CPUPlace())
92 | if args.use_gpu:
93 | exe = fluid.Executor(fluid.CUDAPlace(0))
94 | exe.run(fluid.default_startup_program())
95 | if args.enable_ce:
96 | train_reader = paddle.batch(
97 | paddle.dataset.mnist.train(), batch_size=args.batch_size)
98 | else:
99 |
100 | train_reader = paddle.batch(
101 | paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=60000),
102 | batch_size=args.batch_size)
103 |
104 | NUM_TRAIN_TIMES_OF_DG = 2
105 | const_n = np.random.uniform(
106 | low=-1.0, high=1.0,
107 | size=[args.batch_size, NOISE_SIZE]).astype('float32')
108 |
109 | t_time = 0
110 | losses = [[], []]
111 | for pass_id in range(args.epoch):
112 | for batch_id, data in enumerate(train_reader()):
113 | if len(data) != args.batch_size:
114 | continue
115 | noise_data = np.random.uniform(
116 | low=-1.0, high=1.0,
117 | size=[args.batch_size, NOISE_SIZE]).astype('float32')
118 | real_image = np.array(list(map(lambda x: x[0], data))).reshape(
119 | -1, 784).astype('float32')
120 | real_labels = np.ones(
121 | shape=[real_image.shape[0], 1], dtype='float32')
122 | fake_labels = np.zeros(
123 | shape=[real_image.shape[0], 1], dtype='float32')
124 | total_label = np.concatenate([real_labels, fake_labels])
125 | s_time = time.time()
126 | generated_image = exe.run(
127 | g_program, feed={'noise': noise_data}, fetch_list=[g_img])[0]
128 |
129 | total_images = np.concatenate([real_image, generated_image])
130 |
131 | d_loss_1 = exe.run(
132 | d_program,
133 | feed={
134 | 'img': generated_image,
135 | 'label': fake_labels,
136 | },
137 | fetch_list=[d_loss])[0][0]
138 |
139 | d_loss_2 = exe.run(
140 | d_program,
141 | feed={
142 | 'img': real_image,
143 | 'label': real_labels,
144 | },
145 | fetch_list=[d_loss])[0][0]
146 |
147 | d_loss_n = d_loss_1 + d_loss_2
148 | losses[0].append(d_loss_n)
149 | for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG):
150 | noise_data = np.random.uniform(
151 | low=-1.0, high=1.0,
152 | size=[args.batch_size, NOISE_SIZE]).astype('float32')
153 | dg_loss_n = exe.run(
154 | dg_program,
155 | feed={'noise': noise_data},
156 | fetch_list=[dg_loss])[0][0]
157 | losses[1].append(dg_loss_n)
158 | t_time += (time.time() - s_time)
159 | if batch_id % 10 == 0:
160 | if not os.path.exists(args.output):
161 | os.makedirs(args.output)
162 | # generate image each batch
163 | generated_images = exe.run(
164 | g_program_test, feed={'noise': const_n},
165 | fetch_list=[g_img])[0]
166 | total_images = np.concatenate([real_image, generated_images])
167 | fig = plot(total_images)
168 | msg = "Epoch ID={0} Batch ID={1} D-Loss={2} DG-Loss={3}\n gen={4}".format(
169 | pass_id, batch_id, d_loss_n, dg_loss_n,
170 | check(generated_images))
171 | print(msg)
172 | plt.title(msg)
173 | plt.savefig(
174 | '{}/{:04d}_{:04d}.png'.format(args.output, pass_id,
175 | batch_id),
176 | bbox_inches='tight')
177 | plt.close(fig)
178 | if args.enable_ce and pass_id == args.epoch - 1:
179 | print("kpis\tdcgan_d_train_cost\t%f" % np.mean(losses[0]))
180 | print("kpis\tdcgan_g_train_cost\t%f" % np.mean(losses[1]))
181 |
182 |
183 | if __name__ == "__main__":
184 | args = parser.parse_args()
185 | print_arguments(args)
186 | train(args)
187 |
--------------------------------------------------------------------------------
/09.gan/image/01.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/09.gan/image/01.gif
--------------------------------------------------------------------------------
/09.gan/image/dcgan_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/09.gan/image/dcgan_demo.png
--------------------------------------------------------------------------------
/09.gan/image/dcgan_g.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/09.gan/image/dcgan_g.png
--------------------------------------------------------------------------------
/09.gan/image/process.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/09.gan/image/process.png
--------------------------------------------------------------------------------
/09.gan/network.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 | import paddle
5 | import paddle.fluid as fluid
6 | from utility import get_parent_function_name
7 | import os
8 |
9 | gf_dim = 64
10 | df_dim = 64
11 | gfc_dim = 1024 * 2
12 | dfc_dim = 1024
13 | img_dim = 28
14 |
15 | c_dim = 3
16 | y_dim = 1
17 | output_height = 28
18 | output_width = 28
19 |
20 | use_cudnn = True
21 | if 'ce_mode' in os.environ:
22 | use_cudnn = False
23 |
24 |
25 | def bn(x, name=None, act='relu'):
26 | if name is None:
27 | name = get_parent_function_name()
28 | #return fluid.layers.leaky_relu(x)
29 | return fluid.layers.batch_norm(
30 | x,
31 | param_attr=name + '1',
32 | bias_attr=name + '2',
33 | moving_mean_name=name + '3',
34 | moving_variance_name=name + '4',
35 | name=name,
36 | act=act)
37 |
38 |
39 | def conv(x, num_filters, name=None, act=None):
40 | if name is None:
41 | name = get_parent_function_name()
42 | return fluid.nets.simple_img_conv_pool(
43 | input=x,
44 | filter_size=5,
45 | num_filters=num_filters,
46 | pool_size=2,
47 | pool_stride=2,
48 | param_attr=name + 'w',
49 | bias_attr=name + 'b',
50 | use_cudnn=use_cudnn,
51 | act=act)
52 |
53 |
54 | def fc(x, num_filters, name=None, act=None):
55 | if name is None:
56 | name = get_parent_function_name()
57 | return fluid.layers.fc(
58 | input=x,
59 | size=num_filters,
60 | act=act,
61 | param_attr=name + 'w',
62 | bias_attr=name + 'b')
63 |
64 |
65 | def deconv(x,
66 | num_filters,
67 | name=None,
68 | filter_size=5,
69 | stride=2,
70 | dilation=1,
71 | padding=2,
72 | output_size=None,
73 | act=None):
74 | if name is None:
75 | name = get_parent_function_name()
76 | return fluid.layers.conv2d_transpose(
77 | input=x,
78 | param_attr=name + 'w',
79 | bias_attr=name + 'b',
80 | num_filters=num_filters,
81 | output_size=output_size,
82 | filter_size=filter_size,
83 | stride=stride,
84 | dilation=dilation,
85 | padding=padding,
86 | use_cudnn=use_cudnn,
87 | act=act)
88 |
89 |
90 | def conv_cond_concat(x, y):
91 | """Concatenate conditioning vector on feature map axis."""
92 | ones = fluid.layers.fill_constant_batch_size_like(
93 | x, [-1, y.shape[1], x.shape[2], x.shape[3]], "float32", 1.0)
94 | return fluid.layers.concat([x, ones * y], 1)
95 |
96 |
97 | def D_cond(image, y):
98 | image = fluid.layers.reshape(x=image, shape=[-1, 1, 28, 28])
99 | yb = fluid.layers.reshape(y, [-1, y_dim, 1, 1])
100 | x = conv_cond_concat(image, yb)
101 |
102 | h0 = conv(x, c_dim + y_dim, act="leaky_relu")
103 | h0 = conv_cond_concat(h0, yb)
104 | h1 = bn(conv(h0, df_dim + y_dim), act="leaky_relu")
105 | h1 = fluid.layers.flatten(h1, axis=1)
106 |
107 | h1 = fluid.layers.concat([h1, y], 1)
108 |
109 | h2 = bn(fc(h1, dfc_dim), act='leaky_relu')
110 | h2 = fluid.layers.concat([h2, y], 1)
111 |
112 | h3 = fc(h2, 1, act='sigmoid')
113 | return h3
114 |
115 |
116 | def G_cond(z, y):
117 | s_h, s_w = output_height, output_width
118 | s_h2, s_h4 = int(s_h // 2), int(s_h // 4)
119 | s_w2, s_w4 = int(s_w // 2), int(s_w // 4)
120 |
121 | yb = fluid.layers.reshape(y, [-1, y_dim, 1, 1]) #NCHW
122 |
123 | z = fluid.layers.concat([z, y], 1)
124 | h0 = bn(fc(z, gfc_dim // 2), act='relu')
125 | h0 = fluid.layers.concat([h0, y], 1)
126 |
127 | h1 = bn(fc(h0, gf_dim * 2 * s_h4 * s_w4), act='relu')
128 | h1 = fluid.layers.reshape(h1, [-1, gf_dim * 2, s_h4, s_w4])
129 |
130 | h1 = conv_cond_concat(h1, yb)
131 | h2 = bn(deconv(h1, gf_dim * 2, output_size=[s_h2, s_w2]), act='relu')
132 | h2 = conv_cond_concat(h2, yb)
133 | h3 = deconv(h2, 1, output_size=[s_h, s_w], act='tanh')
134 | return fluid.layers.reshape(h3, shape=[-1, s_h * s_w])
135 |
136 |
137 | def D(x):
138 | x = fluid.layers.reshape(x=x, shape=[-1, 1, 28, 28])
139 | x = conv(x, df_dim, act='leaky_relu')
140 | x = bn(conv(x, df_dim * 2), act='leaky_relu')
141 | x = bn(fc(x, dfc_dim), act='leaky_relu')
142 | x = fc(x, 1, act='sigmoid')
143 | return x
144 |
145 |
146 | def G(x):
147 | x = bn(fc(x, gfc_dim))
148 | x = bn(fc(x, gf_dim * 2 * img_dim // 4 * img_dim // 4))
149 | x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim // 4, img_dim // 4])
150 | x = deconv(x, gf_dim * 2, act='relu', output_size=[14, 14])
151 | x = deconv(x, 1, filter_size=5, padding=2, act='tanh', output_size=[28, 28])
152 | x = fluid.layers.reshape(x, shape=[-1, 28 * 28])
153 | return x
154 |
--------------------------------------------------------------------------------
/09.gan/utility.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 | import math
5 | import distutils.util
6 | import numpy as np
7 | import inspect
8 | import matplotlib
9 | import six
10 | matplotlib.use('agg')
11 | import matplotlib.pyplot as plt
12 | import matplotlib.gridspec as gridspec
13 |
14 | img_dim = 28
15 |
16 |
17 | def get_parent_function_name():
18 | return inspect.stack()[2][3] + '.' + inspect.stack()[1][3] + '.' + str(
19 | inspect.stack()[2][2]) + '.'
20 |
21 |
22 | def plot(gen_data):
23 | pad_dim = 1
24 | paded = pad_dim + img_dim
25 | gen_data = gen_data.reshape(gen_data.shape[0], img_dim, img_dim)
26 | n = int(math.ceil(math.sqrt(gen_data.shape[0])))
27 | gen_data = (np.pad(
28 | gen_data, [[0, n * n - gen_data.shape[0]], [pad_dim, 0], [pad_dim, 0]],
29 | 'constant').reshape((n, n, paded, paded)).transpose((0, 2, 1, 3))
30 | .reshape((n * paded, n * paded)))
31 | fig = plt.figure(figsize=(8, 8))
32 | plt.axis('off')
33 | plt.imshow(gen_data, cmap='Greys_r', vmin=-1, vmax=1)
34 | return fig
35 |
36 |
37 | def check(a):
38 | a = np.sort(np.array(a).flatten())
39 | return [
40 | np.average(a), np.min(a), np.max(a), a[int(len(a) * 0.25)],
41 | a[int(len(a) * 0.75)]
42 | ]
43 |
44 |
45 | def print_arguments(args):
46 | """Print argparse's arguments.
47 |
48 | Usage:
49 |
50 | .. code-block:: python
51 |
52 | parser = argparse.ArgumentParser()
53 | parser.add_argument("name", default="Jonh", type=str, help="User name.")
54 | args = parser.parse_args()
55 | print_arguments(args)
56 |
57 | :param args: Input argparse.Namespace for printing.
58 | :type args: argparse.Namespace
59 | """
60 | print("----------- Configuration Arguments -----------")
61 | for arg, value in sorted(six.iteritems(vars(args))):
62 | print("%s: %s" % (arg, value))
63 | print("------------------------------------------------")
64 |
65 |
66 | def add_arguments(argname, type, default, help, argparser, **kwargs):
67 | """Add argparse's argument.
68 |
69 | Usage:
70 |
71 | .. code-block:: python
72 |
73 | parser = argparse.ArgumentParser()
74 | add_argument("name", str, "Jonh", "User name.", parser)
75 | args = parser.parse_args()
76 | """
77 | type = distutils.util.strtobool if type == bool else type
78 | argparser.add_argument(
79 | "--" + argname,
80 | default=default,
81 | type=type,
82 | help=help + ' Default: %(default)s.',
83 | **kwargs)
84 |
--------------------------------------------------------------------------------
/README.cn.md:
--------------------------------------------------------------------------------
1 | # 深度学习入门
2 |
3 | [](https://travis-ci.org/PaddlePaddle/book)
4 | [](https://github.com/PaddlePaddle/book/blob/develop/README.md)
5 | [](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md)
6 |
7 | 1. [线性回归](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/fit_a_line/README.cn.html)
8 | 1. [识别数字](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/recognize_digits/README.cn.html)
9 | 1. [图像分类](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/image_classification/index.html)
10 | 1. [词向量](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/word2vec/index.html)
11 | 1. [个性化推荐](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/recommender_system/index.html)
12 | 1. [情感分析](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/understand_sentiment/index.html)
13 | 1. [语义角色标注](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/label_semantic_roles/index.html)
14 | 1. [机器翻译](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/machine_translation/index.html)
15 | 1. [生成对抗网络](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/beginners_guide/basics/gan/index.html)
16 |
17 | 更多学习内容请访问PaddlePaddle[视频课堂](http://bit.baidu.com/Course/datalist/column/117.html)。
18 |
19 | ## 运行这本书
20 |
21 | 您现在在看的这本书是一本“交互式”电子书 —— 每一章都可以运行在一个Jupyter Notebook里。
22 |
23 | 我们把Jupyter、PaddlePaddle、以及各种被依赖的软件都打包进一个Docker image了。所以您不需要自己来安装各种软件,只需要安装Docker即可。对于各种Linux发行版,请参考 https://www.docker.com 。如果您使用[Windows](https://www.docker.com/docker-windows)或者[Mac](https://www.docker.com/docker-mac),可以考虑[给Docker更多内存和CPU资源](http://stackoverflow.com/a/39720010/724872)。
24 |
25 | 只需要在命令行窗口里运行:
26 |
27 | ```bash
28 | docker run -d -p 8888:8888 paddlepaddle/book
29 | ```
30 |
31 | 会从DockerHub.com下载和运行本书的Docker image。阅读和在线编辑本书请在浏览器里访问 http://localhost:8888 。
32 |
33 | 如果您访问DockerHub.com很慢,可以试试我们的另一个镜像hub.baidubce.com:
34 |
35 | ```bash
36 | docker run -d -p 8888:8888 hub.baidubce.com/paddlepaddle/book
37 | ```
38 |
39 | ### 使用GPU训练
40 |
41 | 本书默认使用CPU训练,若是要使用GPU训练,使用步骤会稍有变化。为了保证GPU驱动能够在镜像里面正常运行,我们推荐使用[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)来运行镜像。请先安装nvidia-docker,之后请运行:
42 |
43 | ```bash
44 | nvidia-docker run -d -p 8888:8888 paddlepaddle/book:latest-gpu
45 | ```
46 |
47 | 或者使用国内的镜像请运行:
48 |
49 | ```bash
50 | nvidia-docker run -d -p 8888:8888 hub.baidubce.com/paddlepaddle/book:latest-gpu
51 | ```
52 |
53 | 还需要将以下代码
54 | ```python
55 | use_cuda = False
56 | ```
57 |
58 | 改成:
59 | ```python
60 | use_cuda = True
61 | ```
62 |
63 |
64 | ## 贡献内容
65 |
66 | 您要是能贡献新的章节那就太好了!请发Pull Requests把您写的章节加入到`/pending`下面的一个子目录里。当这一章稳定下来,我们一起把您的目录挪到根目录。
67 |
68 | 为了写作、运行、调试,您需要安装Python 2.x和Go >1.5, 并可以用[脚本程序](https://github.com/PaddlePaddle/book/blob/develop/.tools/convert-markdown-into-ipynb-and-test.sh)来生成新的Docker image。
69 |
70 | **Note:** We also provide [English Readme](https://github.com/PaddlePaddle/book/blob/develop/README.md) for PaddlePaddle book.
71 |
72 |
73 | 本教程 由 PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议 进行许可。
74 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep Learning with PaddlePaddle
2 |
3 | [](https://travis-ci.org/PaddlePaddle/book)
4 | [](https://github.com/PaddlePaddle/book/blob/develop/README.md)
5 | [](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md)
6 |
7 | 1. [Fit a Line](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/beginners_guide/basics/fit_a_line/README.html)
8 | 1. [Recognize Digits](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/beginners_guide/basics/recognize_digits/README.html)
9 | 1. [Image Classification](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/beginners_guide/basics/image_classification/index_en.html)
10 | 1. [Word to Vector](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/beginners_guide/basics/word2vec/index_en.html)
11 | 1. [Recommender System](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/beginners_guide/basics/recommender_system/index_en.html)
12 | 1. [Understand Sentiment](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/beginners_guide/basics/understand_sentiment/index_en.html)
13 | 1. [Label Semantic Roles](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/beginners_guide/basics/label_semantic_roles/index_en.html)
14 | 1. [Machine Translation](https://www.paddlepaddle.org.cn/documentation/docs/en/develop/beginners_guide/basics/machine_translation/index_en.html)
15 |
16 | ## Running the Book
17 |
18 | This book you are reading is interactive -- each chapter can run as a Jupyter Notebook.
19 |
20 | We packed this book, Jupyter, PaddlePaddle, and all dependencies into a Docker image. So you don't need to install anything except Docker. If you are using Windows, please follow [this installation guide](https://www.docker.com/docker-windows). If you are running Mac, please follow [this](https://www.docker.com/docker-mac). For various Linux distros, please refer to https://www.docker.com. If you are using Windows or Mac, you might want to give Docker [more memory and CPUs/cores](http://stackoverflow.com/a/39720010/724872).
21 |
22 | Just type
23 |
24 | ```bash
25 | docker run -d -p 8888:8888 paddlepaddle/book
26 |
27 | ```
28 |
29 | This command will download the pre-built Docker image from DockerHub.com and run it in a container. Please direct your Web browser to http://localhost:8888 to read the book.
30 |
31 | If you are living in somewhere slow to access DockerHub.com, you might try our mirror server hub.baidubce.com:
32 |
33 | ```bash
34 | docker run -d -p 8888:8888 hub.baidubce.com/paddlepaddle/book
35 |
36 | ```
37 |
38 | ### Training with GPU
39 |
40 | By default we are using CPU for training, if you want to train with GPU, the steps are a little different.
41 |
42 | To make sure GPU can be successfully used from inside container, please install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker). Then run:
43 |
44 | ```bash
45 | nvidia-docker run -d -p 8888:8888 paddlepaddle/book:latest-gpu
46 |
47 | ```
48 |
49 | Or you can use the image registry mirror in China:
50 |
51 | ```bash
52 | nvidia-docker run -d -p 8888:8888 hub.baidubce.com/paddlepaddle/book:latest-gpu
53 |
54 | ```
55 |
56 | Change the code in the chapter that you are reading from
57 | ```python
58 | use_cuda = False
59 | ```
60 |
61 | to:
62 | ```python
63 | use_cuda = True
64 | ```
65 |
66 |
67 | ## Contribute
68 |
69 | Your contribution is welcome! Please feel free to file Pull Requests to add your chapter as a directory under `/pending`. Once it is going stable, the community would like to move it to `/`.
70 |
71 | To write, run, and debug your chapters, you will need Python 2.x, Go >1.5. You can build the Docker image using [this script](https://github.com/PaddlePaddle/book/blob/develop/.tools/convert-markdown-into-ipynb-and-test.sh).
72 | This tutorial is contributed by PaddlePaddle , and licensed under a Creative Commons Attribution-ShareAlike 4.0 International License .
73 |
--------------------------------------------------------------------------------
/index.cn.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 深度学习入门
6 |
7 |
8 |
9 |
10 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
94 |
95 |
96 |
97 |
149 |
150 |
152 |
153 |
154 |
155 |
172 |
173 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Deep Learning 101
6 |
7 |
8 |
9 |
10 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
90 |
91 |
92 |
93 |
145 |
146 |
148 |
149 |
150 |
151 |
168 |
169 |
--------------------------------------------------------------------------------
/mnist-client/.eslintrc:
--------------------------------------------------------------------------------
1 | {
2 | "rules": {
3 | "indent": [
4 | 2,
5 | 4
6 | ],
7 | "quotes": [
8 | 2,
9 | "single"
10 | ],
11 | "linebreak-style": [
12 | 2,
13 | "unix"
14 | ],
15 | "semi": [
16 | 2,
17 | "always"
18 | ]
19 | },
20 | "env": {
21 | "es6": true,
22 | "node": true,
23 | "browser": true
24 | },
25 | "extends": "eslint:recommended"
26 | }
--------------------------------------------------------------------------------
/mnist-client/.gitignore:
--------------------------------------------------------------------------------
1 | venv
2 | *.pyc
3 | node_modules
4 | static/js/main.js
5 | index.html
6 |
--------------------------------------------------------------------------------
/mnist-client/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mhart/alpine-node:6.11.3
2 |
3 | RUN mkdir /workspace
4 | WORKDIR /workspace/
5 | ADD * /workspace/
6 | RUN apk add --no-cache python py-pip
7 | RUN pip install -r /workspace/requirements.txt
8 | RUN cd /workspace && npm install && mkdir templates && mv index.html templates && mkdir static && mv js static && mv css static
9 | CMD ["python", "main.py"]
10 |
--------------------------------------------------------------------------------
/mnist-client/Procfile:
--------------------------------------------------------------------------------
1 | web: gunicorn main:app --log-file=-
2 |
--------------------------------------------------------------------------------
/mnist-client/README.md:
--------------------------------------------------------------------------------
1 | # MNIST classification by PaddlePaddle
2 |
3 | 
4 |
5 | ## Usage
6 |
7 | This MNIST classification demo consists of two parts: a PaddlePaddle
8 | inference server and a Javascript front end. We will start them
9 | separately.
10 |
11 | We will use Docker to run the demo, if you are not familiar with
12 | Docker, please checkout
13 | this
14 | [tutorial](https://github.com/PaddlePaddle/Paddle/wiki/TLDR-for-new-docker-user).
15 |
16 | ### Start the Inference Server
17 |
18 | The inference server can be used to inference any model trained by
19 | PaddlePaddle. Please see [here](../serve/README.md) for more details.
20 |
21 | 1. Download the MNIST inference model topylogy and parameters to the
22 | current working directory.
23 |
24 | ```bash
25 | wget https://s3.us-east-2.amazonaws.com/models.paddlepaddle/end-to-end-mnist/inference_topology.pkl
26 | wget https://s3.us-east-2.amazonaws.com/models.paddlepaddle/end-to-end-mnist/param.tar
27 | ```
28 |
29 | 1. Run following command to start the inference server:
30 |
31 | ```bash
32 | docker run --name paddle_serve -v `pwd`:/data -d -p 8000:80 -e WITH_GPU=0 paddlepaddle/book:serve
33 | ```
34 |
35 | The above command will mount the current working directory to the
36 | `/data` directory inside the docker container. The inference
37 | server will load the model topology and parameters that we just
38 | downloaded from there.
39 |
40 | After you are done with the demo, you can run `docker stop
41 | paddle_serve` to stop this docker container.
42 |
43 | ### Start the Front End
44 |
45 | 1. Run the following command
46 | ```bash
47 | docker run -it -p 5000:5000 -e BACKEND_URL=http://localhost:8000/ paddlepaddle/book:mnist
48 | ```
49 |
50 | `BACKEND_URL` in the above command specifies the inference server
51 | endpoint. If you started the inference server on another machine,
52 | or want to visit the front end remotely, you may want to change its
53 | value.
54 |
55 | 1. Visit http://localhost:5000 and you will see the PaddlePaddle MNIST demo.
56 |
57 |
58 | ## Build
59 |
60 | We have already prepared the pre-built docker image
61 | `paddlepaddle/book:mnist`, here is the command if you want to build
62 | the docker image again.
63 |
64 | ```bash
65 | docker build -t paddlepaddle/book:mnist .
66 | ```
67 |
68 |
69 | ## Acknowledgement
70 |
71 | Thanks to the great project https://github.com/sugyan/tensorflow-mnist
72 | . Most of the code in this project comes from there.
73 |
--------------------------------------------------------------------------------
/mnist-client/app.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "paddlepaddle-mnist",
3 | "buildpacks": [
4 | { "url": "https://github.com/heroku/heroku-buildpack-nodejs" },
5 | { "url": "https://github.com/heroku/heroku-buildpack-python" }
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/mnist-client/gulpfile.js:
--------------------------------------------------------------------------------
1 | var gulp = require('gulp');
2 | var babel = require('gulp-babel');
3 | var sourcemaps = require('gulp-sourcemaps');
4 | var uglify = require('gulp-uglify');
5 |
6 | gulp.task('build', function() {
7 | return gulp.src('src/js/*.js')
8 | .pipe(babel({ presets: ['es2015'] }))
9 | .pipe(sourcemaps.init({ loadMaps: true }))
10 | .pipe(uglify())
11 | .pipe(sourcemaps.write())
12 | .pipe(gulp.dest('static/js'));
13 | });
14 |
15 | gulp.task('watch', function() {
16 | gulp.watch('src/js/*.js', ['build']);
17 | });
18 |
19 | gulp.task('default', ['build']);
20 |
--------------------------------------------------------------------------------
/mnist-client/main.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, jsonify, render_template, request
2 | import os
3 |
4 | # webapp
5 | app = Flask(__name__)
6 |
7 |
8 | @app.route('/')
9 | def main():
10 | backend_url = os.getenv('BACKEND_URL', 'http://localhost:8000/')
11 | return render_template('index.html', backend_url=backend_url)
12 |
13 |
14 | if __name__ == '__main__':
15 | app.run(host='0.0.0.0', port=5000, threaded=True)
16 |
--------------------------------------------------------------------------------
/mnist-client/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "paddlepaddle-mnist",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1",
8 | "postinstall": "gulp"
9 | },
10 | "keywords": [],
11 | "author": "",
12 | "license": "ISC",
13 | "repository": {
14 | "type": "git",
15 | "url": "https://github.com/sugyan/tensorflow-mnist.git"
16 | },
17 | "engines": {
18 | "node": "6.x"
19 | },
20 | "dependencies": {
21 | "babel-preset-es2015": "^6.1.18",
22 | "bootstrap": "^3.3.5",
23 | "gulp": "^3.9.0",
24 | "gulp-babel": "^6.1.0",
25 | "gulp-sourcemaps": "^1.6.0",
26 | "gulp-uglify": "^1.5.1",
27 | "jquery": "^3.0.0"
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/mnist-client/requirements.txt:
--------------------------------------------------------------------------------
1 | flask>=1.0.0
2 |
--------------------------------------------------------------------------------
/mnist-client/runtime.txt:
--------------------------------------------------------------------------------
1 | python-3.6.0
--------------------------------------------------------------------------------
/mnist-client/src/js/main.js:
--------------------------------------------------------------------------------
1 | /* global $ */
2 | class Main {
3 | constructor() {
4 | this.canvas = document.getElementById('main');
5 | this.input = document.getElementById('input');
6 | this.canvas.width = 449; // 16 * 28 + 1
7 | this.canvas.height = 449; // 16 * 28 + 1
8 | this.ctx = this.canvas.getContext('2d');
9 | this.canvas.addEventListener('mousedown', this.onMouseDown.bind(this));
10 | this.canvas.addEventListener('mouseup', this.onMouseUp.bind(this));
11 | this.canvas.addEventListener('mousemove', this.onMouseMove.bind(this));
12 | this.initialize();
13 | }
14 | initialize() {
15 | this.ctx.fillStyle = '#FFFFFF';
16 | this.ctx.fillRect(0, 0, 449, 449);
17 | this.ctx.lineWidth = 1;
18 | this.ctx.strokeRect(0, 0, 449, 449);
19 | this.ctx.lineWidth = 0.05;
20 | for (var i = 0; i < 27; i++) {
21 | this.ctx.beginPath();
22 | this.ctx.moveTo((i + 1) * 16, 0);
23 | this.ctx.lineTo((i + 1) * 16, 449);
24 | this.ctx.closePath();
25 | this.ctx.stroke();
26 |
27 | this.ctx.beginPath();
28 | this.ctx.moveTo( 0, (i + 1) * 16);
29 | this.ctx.lineTo(449, (i + 1) * 16);
30 | this.ctx.closePath();
31 | this.ctx.stroke();
32 | }
33 | this.drawInput();
34 | $('#output td').text('').removeClass('success');
35 | }
36 | onMouseDown(e) {
37 | this.canvas.style.cursor = 'default';
38 | this.drawing = true;
39 | this.prev = this.getPosition(e.clientX, e.clientY);
40 | }
41 | onMouseUp() {
42 | this.drawing = false;
43 | this.drawInput();
44 | }
45 | onMouseMove(e) {
46 | if (this.drawing) {
47 | var curr = this.getPosition(e.clientX, e.clientY);
48 | this.ctx.lineWidth = 16;
49 | this.ctx.lineCap = 'round';
50 | this.ctx.beginPath();
51 | this.ctx.moveTo(this.prev.x, this.prev.y);
52 | this.ctx.lineTo(curr.x, curr.y);
53 | this.ctx.stroke();
54 | this.ctx.closePath();
55 | this.prev = curr;
56 | }
57 | }
58 | getPosition(clientX, clientY) {
59 | var rect = this.canvas.getBoundingClientRect();
60 | return {
61 | x: clientX - rect.left,
62 | y: clientY - rect.top
63 | };
64 | }
65 | drawInput() {
66 | var ctx = this.input.getContext('2d');
67 | var img = new Image();
68 | img.onload = () => {
69 | var inputs = [];
70 | var small = document.createElement('canvas').getContext('2d');
71 | small.drawImage(img, 0, 0, img.width, img.height, 0, 0, 28, 28);
72 | var data = small.getImageData(0, 0, 28, 28).data;
73 | for (var i = 0; i < 28; i++) {
74 | for (var j = 0; j < 28; j++) {
75 | var n = 4 * (i * 28 + j);
76 | inputs[i * 28 + j] = (data[n + 0] + data[n + 1] + data[n + 2]) / 3;
77 | ctx.fillStyle = 'rgb(' + [data[n + 0], data[n + 1], data[n + 2]].join(',') + ')';
78 | ctx.fillRect(j * 5, i * 5, 5, 5);
79 | }
80 | }
81 | if (Math.min(...inputs) === 255) {
82 | return;
83 | }
84 | for (var i = 0; i < 784; i++) {
85 | if (inputs[i] == 255) {
86 | // background
87 | inputs[i] = -1.0
88 | } else {
89 | inputs[i] = 1.0
90 | }
91 | }
92 | $.ajax({
93 | url: BACKEND_URL,
94 | method: 'POST',
95 | contentType: 'application/json',
96 | data: JSON.stringify({"img":inputs}),
97 | success: (data) => {
98 | data = data["data"][0]
99 | var max = 0;
100 | var max_index = 0;
101 | for (let j = 0; j < 10; j++) {
102 | var value = Math.round(data[j] * 1000);
103 | if (value > max) {
104 | max = value;
105 | max_index = j;
106 | }
107 | var digits = String(value).length;
108 | for (var k = 0; k < 3 - digits; k++) {
109 | value = '0' + value;
110 | }
111 | var text = '0.' + value;
112 | if (value > 999) {
113 | text = '1.000';
114 | }
115 | $('#output tr').eq(j + 1).find('td').text(text);
116 | }
117 | for (let j = 0; j < 10; j++) {
118 | if (j === max_index) {
119 | $('#output tr').eq(j + 1).find('td').addClass('success');
120 | } else {
121 | $('#output tr').eq(j + 1).find('td').removeClass('success');
122 | }
123 | }
124 | }
125 | });
126 | };
127 | img.src = this.canvas.toDataURL();
128 | }
129 | }
130 |
131 | $(() => {
132 | var main = new Main();
133 | $('#clear').click(() => {
134 | main.initialize();
135 | });
136 | });
137 |
--------------------------------------------------------------------------------
/mnist-client/static/css/bootstrap.min.css:
--------------------------------------------------------------------------------
1 | ../../node_modules/bootstrap/dist/css/bootstrap.min.css
--------------------------------------------------------------------------------
/mnist-client/static/js/jquery.min.js:
--------------------------------------------------------------------------------
1 | ../../node_modules/jquery/dist/jquery.min.js
--------------------------------------------------------------------------------
/mnist-client/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | MNIST
5 |
6 |
7 |
10 |
11 |
12 |
13 |
14 |
15 |
MNIST
16 |
17 |
18 |
draw a digit here!
19 |
20 |
21 | clear
22 |
23 |
24 |
25 |
input:
26 |
27 |
28 |
output:
29 |
30 |
31 | class
32 | confidence
33 |
34 |
35 | 0
36 |
37 |
38 |
39 | 1
40 |
41 |
42 |
43 | 2
44 |
45 |
46 |
47 | 3
48 |
49 |
50 |
51 | 4
52 |
53 |
54 |
55 | 5
56 |
57 |
58 |
59 | 6
60 |
61 |
62 |
63 | 7
64 |
65 |
66 |
67 | 8
68 |
69 |
70 |
71 | 9
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
--------------------------------------------------------------------------------
/paddle2.0_docs/README.md:
--------------------------------------------------------------------------------
1 | # 基于Paddle2.0的示例教程
2 |
3 | 本目录存放进展中的Paddle2.0版本的文档、教程和示例。目前处于work in progress状态,待2.0正式版发布之后会做相应的调整,并最终完成。
4 |
5 | - 用中文来写。
6 | - 当中的示例代码基于paddle2.0-alpha,随着paddle2.0的研发的进展而调整。
7 | - 整体上用Notebook来完成,每一篇教程可以解决一个具体的示例任务,这样用户可以直接运行(未来考虑与AIStudio联动)
8 | - 只用来向用户说明飞桨框架的使用,不涵盖深度学习的理论知识。(但可以通过链接,提供其他的优质的深度学习理论资料)
9 |
10 | 可以用来参考和借鉴的资料
11 |
12 | - 本repo下的,现存的教程和示例。
13 | - 飞桨官网上的系统性的教程: https://www.paddlepaddle.org.cn/tutorials/projectdetail/590324
14 | - 其他的深度学习框架的教程资料。
15 |
--------------------------------------------------------------------------------
/paddle2.0_docs/dcgan_face/images/face_image1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/dcgan_face/images/face_image1.jpeg
--------------------------------------------------------------------------------
/paddle2.0_docs/dcgan_face/images/face_image2.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/dcgan_face/images/face_image2.jpeg
--------------------------------------------------------------------------------
/paddle2.0_docs/dcgan_face/images/loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/dcgan_face/images/loss.png
--------------------------------------------------------------------------------
/paddle2.0_docs/dcgan_face/images/models.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/dcgan_face/images/models.png
--------------------------------------------------------------------------------
/paddle2.0_docs/image_ocr/images/image1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/image_ocr/images/image1.png
--------------------------------------------------------------------------------
/paddle2.0_docs/image_ocr/images/image2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/image_ocr/images/image2.png
--------------------------------------------------------------------------------
/paddle2.0_docs/image_ocr/images/image3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/image_ocr/images/image3.png
--------------------------------------------------------------------------------
/paddle2.0_docs/image_ocr/sample_img/9450.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/image_ocr/sample_img/9450.jpg
--------------------------------------------------------------------------------
/paddle2.0_docs/image_ocr/sample_img/9451.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/image_ocr/sample_img/9451.jpg
--------------------------------------------------------------------------------
/paddle2.0_docs/image_ocr/sample_img/9452.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/paddle2.0_docs/image_ocr/sample_img/9452.jpg
--------------------------------------------------------------------------------
/pending/gan/README.md:
--------------------------------------------------------------------------------
1 | TODO: Write about https://github.com/PaddlePaddle/Paddle/tree/develop/demo/gan
2 |
--------------------------------------------------------------------------------
/pending/gan/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 | TODO: Write about https://github.com/PaddlePaddle/Paddle/tree/develop/demo/gan
44 |
45 |
46 |
47 |
48 |
65 |
66 |
--------------------------------------------------------------------------------
/pending/image_caption/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/pending/image_caption/README.md
--------------------------------------------------------------------------------
/pending/image_caption/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
64 |
65 |
--------------------------------------------------------------------------------
/pending/image_detection/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/pending/image_detection/README.md
--------------------------------------------------------------------------------
/pending/image_detection/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
64 |
65 |
--------------------------------------------------------------------------------
/pending/image_qa/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/pending/image_qa/README.md
--------------------------------------------------------------------------------
/pending/image_qa/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
64 |
65 |
--------------------------------------------------------------------------------
/pending/query_relationship/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/pending/query_relationship/README.md
--------------------------------------------------------------------------------
/pending/query_relationship/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
64 |
65 |
--------------------------------------------------------------------------------
/pending/skip_thought/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/pending/skip_thought/README.md
--------------------------------------------------------------------------------
/pending/skip_thought/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
64 |
65 |
--------------------------------------------------------------------------------
/pending/speech_recognition/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaddlePaddle/book/ffaa2d315f1009887ffce26df961f943ba1ffc3f/pending/speech_recognition/README.md
--------------------------------------------------------------------------------
/pending/speech_recognition/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
16 |
17 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
64 |
65 |
--------------------------------------------------------------------------------
/serve/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | .idea
3 | index.html
4 |
--------------------------------------------------------------------------------
/serve/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM paddlepaddle/paddle
2 |
3 | ENV PARAMETER_TAR_PATH=/data/param.tar \
4 | TOPOLOGY_FILE_PATH=/data/inference_topology.pkl
5 | ADD requirements.txt /root
6 | ADD main.py /root
7 | RUN pip install -r /root/requirements.txt
8 | CMD ["python", "/root/main.py"]
9 |
--------------------------------------------------------------------------------
/serve/Dockerfile.gpu:
--------------------------------------------------------------------------------
1 | FROM paddlepaddle/paddle:latest-gpu
2 |
3 | ENV PARAMETER_TAR_PATH=/data/param.tar \
4 | TOPOLOGY_FILE_PATH=/data/inference_topology.pkl
5 | ADD requirements.txt /root
6 | ADD main.py /root
7 | RUN pip install -r /root/requirements.txt
8 | CMD ["python", "/root/main.py"]
9 |
--------------------------------------------------------------------------------
/serve/README.md:
--------------------------------------------------------------------------------
1 | # Inference Server Example
2 |
3 | The inference server can be used to perform inference on any model trained on
4 | PaddlePaddle. It provides an HTTP endpoint.
5 |
6 | ## Run
7 |
8 | The inference server reads a trained model (a topology file and a
9 | parameter file) and serves HTTP request at port `8000`. Because models
10 | differ in the numbers and types of inputs, **the HTTP API will differ
11 | slightly for each model,** please see [HTTP API](#http-api) for the
12 | API spec,
13 | and
14 | [here](https://github.com/PaddlePaddle/book/wiki/Using-Pre-trained-Models) for
15 | the request examples of different models that illustrate the
16 | difference.
17 |
18 | We will first show how to obtain the PaddlePaddle model, and then how
19 | to start the server.
20 |
21 | We will use Docker to run the demo, if you are not familiar with
22 | Docker, please checkout
23 | this
24 | [TLDR](https://github.com/PaddlePaddle/Paddle/wiki/Docker-for-Beginners).
25 |
26 | ### Obtain the PaddlePaddle Model
27 |
28 | A neural network model in PaddlePaddle contains two parts: the
29 | **parameter** and the **topology**.
30 |
31 | A PaddlePaddle training script contains the neural network topology,
32 | which is represented by layers. For example,
33 |
34 | ```python
35 | img = paddle.layer.data(name="img", type=paddle.data_type.dense_vector(784))
36 | hidden = fc_layer(input=type, size=200)
37 | prediction = fc_layer(input=hidden, size=10, act=paddle.activation.Softmax())
38 | ```
39 |
40 | The parameter instance is created by the topology and updated by the
41 | `train` method.
42 |
43 | ```python
44 | ...
45 | params = paddle.parameters.create(cost)
46 | ...
47 | trainer = paddle.trainer.SGD(cost=cost, parameters=params)
48 | ...
49 | ```
50 |
51 | PaddlePaddle stores the topology and parameter separately.
52 |
53 | 1. To serialize a topology, we need to create a topology instance
54 | explicitly by the outputs of the neural network. Then, invoke
55 | `serialize_for_inference` method.
56 |
57 | ```python
58 | # Save the inference topology to protobuf.
59 | inference_topology = paddle.topology.Topology(layers=prediction)
60 | with open("inference_topology.pkl", 'wb') as f:
61 | inference_topology.serialize_for_inference(f)
62 | ```
63 |
64 | 2. To save a parameter, we need to invoke `save_parameter_to_tar` method of
65 | `trainer`.
66 |
67 | ```python
68 | with open('param.tar', 'w') as f:
69 | trainer.save_parameter_to_tar(f)
70 | ```
71 |
72 | After serializing the parameter and topology into two files, we could
73 | use them to set up an inference server.
74 |
75 | For a working example, please see [train.py](https://github.com/reyoung/paddle_mnist_v2_demo/blob/master/train.py).
76 |
77 |
78 | ### Start the Server
79 |
80 | Make sure the `inference_topology.pkl` and `param.tar` mentioned in
81 | the last section are in your current working directory, and run the
82 | command:
83 |
84 | ```bash
85 | docker run --name paddle_serve -v `pwd`:/data -d -p 8000:80 -e WITH_GPU=0 paddlepaddle/book:serve
86 | ```
87 |
88 | The above command will mount the current working directory to the
89 | `/data/` directory inside the docker container. The inference server
90 | will load the model topology and parameters that we just created from
91 | there.
92 |
93 | To run the inference server with GPU support, please make sure you have
94 | [nvidia-docker](https://github.com/NVIDIA/nvidia-docker)
95 | first, and run:
96 |
97 | ```bash
98 | nvidia-docker run --name paddle_serve -v `pwd`:/data -d -p 8000:80 -e WITH_GPU=1 paddlepaddle/book:serve-gpu
99 | ```
100 |
101 | this command will start a server on port `8000`.
102 |
103 | After you are done with the demo, you can run `docker stop
104 | paddle_serve` to stop this docker container.
105 |
106 | ## HTTP API
107 |
108 | The inference server will handle HTTP POST request on path `/`. The
109 | content type of the request and response is json. You need to manually
110 | add `Content-Type` request header as `Content-Type: application/json`.
111 |
112 | The request json object is a single json dictionay object, whose key
113 | is the layer name of input data. The type of the corresponding value
114 | is decided by the data type. For most cases the corresponding value
115 | will be a list of floats. For completeness, we will list all data types
116 | below:
117 |
118 | There are twelve data types supported by PaddePaddle:
119 |
120 | | | plain | a sequence | a sequence of sequence |
121 | | --- | --- | --- | ---|
122 | | dense | [ f, f, f, f, ... ] | [ [f, f, f, ...], [f, f, f, ...]] | [[[f, f, ...], [f, f, ...]], [[f, f, ...], [f, f, ...]], ...] |
123 | | integer | i | [i, i, ...] | [[i, i, ...], [i, i, ...], ...] |
124 | | sparse | [i, i, ...] | [[i, i, ...], [i, i, ...], ...] | [[[i, i, ...], [i, i, ...], ...], [[i, i, ...], [i, i, ...], ...], ...] |
125 | | sparse | [[i, f], [i, f], ... ] | [[[i, f], [i, f], ... ], ...] | [[[[i, f], [i, f], ... ], ...], ...]
126 |
127 | In the table, `i` stands for a `int` value and `f` stands for a
128 | `float` value.
129 |
130 | What `data_type` should be used is decided by the training
131 | topology. For example,
132 |
133 | * For image data, they are usually a plain dense vector, we flatten
134 | the image into a vector. The pixel values of that image are usually
135 | normalized in `[-1.0, 1.0]` or `[0.0, 1.0]`(depends on each neural
136 | network).
137 |
138 | ```text
139 | +-------+
140 | |243 241|
141 | |139 211| +---->[0.95, 0.95, 0.54, 0.82]
142 | +-------+
143 | ```
144 |
145 | * For text data, each word of that text is represented by an
146 | integer. The association map between word and integer is decided by
147 | the training process. A sentence is represented by a list of
148 | integer.
149 |
150 | ```text
151 | I am good .
152 | +
153 | |
154 | v
155 | 23 942 402 19 +-----> [23, 942, 402, 19]
156 | ```
157 |
158 | A sample request data of a `4x4` image and a sentence could be
159 |
160 | ```json
161 | {
162 | "img": [
163 | 0.95,
164 | 0.95,
165 | 0.54,
166 | 0.82
167 | ],
168 | "sentence": [
169 | 23,
170 | 942,
171 | 402,
172 | 19
173 | ]
174 | }
175 | ```
176 |
177 | The response is a json object, too. The example of return data are:
178 |
179 | ```json
180 | {
181 | "code": 0,
182 | "data": [
183 | [
184 | 0.10060056298971176,
185 | 0.057179879397153854,
186 | 0.1453431099653244,
187 | 0.15825574100017548,
188 | 0.04464773088693619,
189 | 0.1566203236579895,
190 | 0.05657859891653061,
191 | 0.12077419459819794,
192 | 0.08073269575834274,
193 | 0.07926714420318604
194 | ]
195 | ],
196 | "message": "success"
197 | }
198 | ```
199 |
200 | Here, `code` and `message` represent the status of the request.
201 | `data` corresponds to the outputs of the neural network; they could be a
202 | probability of each class, could be the IDs of output sentence, and so
203 | on.
204 |
205 | ## MNIST Demo Client
206 |
207 | If you have trained an model with [train.py](https://github.com/reyoung/paddle_mnist_v2_demo/blob/master/train.py) and
208 | start a inference server. Then you can use this [client](https://github.com/PaddlePaddle/book/tree/develop/02.recognize_digits/client/client.py) to test if it works right.
209 |
210 | ## Build
211 |
212 | We have already prepared the pre-built docker image
213 | `paddlepaddle/book:serve`, here is the command if you want to build
214 | the docker image again.
215 |
216 | ```bash
217 | docker build -t paddlepaddle/book:serve .
218 | docker build -t paddlepaddle/book:serve-gpu -f Dockerfile.gpu .
219 | ```
220 |
--------------------------------------------------------------------------------
/serve/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import traceback
3 |
4 | import paddle as paddle
5 | from flask import Flask, jsonify, request
6 | from flask_cors import CORS
7 | from Queue import Queue
8 | import threading
9 |
10 | tarfn = os.getenv('PARAMETER_TAR_PATH', None)
11 |
12 | if tarfn is None:
13 | raise ValueError(
14 | "please specify parameter tar file path with environment variable PARAMETER_TAR_PATH"
15 | )
16 |
17 | topology_filepath = os.getenv('TOPOLOGY_FILE_PATH', None)
18 |
19 | if topology_filepath is None:
20 | raise ValueError(
21 | "please specify topology file path with environment variable TOPOLOGY_FILE_PATH"
22 | )
23 |
24 | with_gpu = os.getenv('WITH_GPU', '0') != '0'
25 | output_field = os.getenv('OUTPUT_FIELD', 'value')
26 | port = int(os.getenv('PORT', '80'))
27 |
28 | app = Flask(__name__)
29 | CORS(app)
30 |
31 |
32 | def errorResp(msg):
33 | return jsonify(code=-1, message=msg)
34 |
35 |
36 | def successResp(data):
37 | return jsonify(code=0, message="success", data=data)
38 |
39 |
40 | sendQ = Queue()
41 |
42 |
43 | @app.route('/', methods=['POST'])
44 | def infer():
45 | recv_queue = Queue()
46 | sendQ.put((request.json, recv_queue))
47 | success, resp = recv_queue.get()
48 | if success:
49 | return successResp(resp)
50 | else:
51 | return errorResp(resp)
52 |
53 |
54 | # PaddlePaddle v0.10.0 does not support inference from different
55 | # threads, so we create a single worker thread.
56 | def worker():
57 | paddle.init(use_gpu=with_gpu)
58 |
59 | fields = filter(lambda x: len(x) != 0, output_field.split(","))
60 |
61 | with open(tarfn) as param_f, open(topology_filepath) as topo_f:
62 | params = paddle.parameters.Parameters.from_tar(param_f)
63 | inferer = paddle.inference.Inference(parameters=params, fileobj=topo_f)
64 |
65 | while True:
66 | j, recv_queue = sendQ.get()
67 | try:
68 | feeding = {}
69 | d = []
70 | for i, key in enumerate(j):
71 | d.append(j[key])
72 | feeding[key] = i
73 | r = inferer.infer([d], feeding=feeding, field=fields)
74 | except:
75 | trace = traceback.format_exc()
76 | recv_queue.put((False, trace))
77 | continue
78 | if isinstance(r, list):
79 | recv_queue.put((True, [elem.tolist() for elem in r]))
80 | else:
81 | recv_queue.put((True, r.tolist()))
82 |
83 |
84 | if __name__ == '__main__':
85 | t = threading.Thread(target=worker)
86 | t.daemon = True
87 | t.start()
88 | print 'serving on port', port
89 | app.run(host='0.0.0.0', port=port, threaded=True)
90 |
--------------------------------------------------------------------------------
/serve/requirements.txt:
--------------------------------------------------------------------------------
1 | flask>=1.0.0
2 | Flask-CORS==3.0.3
3 |
--------------------------------------------------------------------------------