├── .circleci
├── config.yml
└── scripts
│ ├── build_for_windows.sh
│ ├── install_conda.bat
│ └── windows_cuda_install.sh
├── .devcontainer
├── Dockerfile
├── devcontainer.json
└── requirements.txt
├── .gitignore
├── .gitmodules
├── .jenkins
├── build.sh
├── delete_html_file_with_runnable_code_removed.py
├── remove_invisible_code_block_batch.sh
├── remove_invisible_code_block_from_html.py
├── remove_invisible_code_block_from_ipynb.py
├── remove_invisible_code_block_from_py.py
├── remove_invisible_code_block_from_rst_txt.py
├── remove_runnable_code.py
└── replace_tutorial_html_content.py
├── LICENSE
├── Makefile
├── README.md
├── _static
├── ajax-loader.gif
├── basic.css
├── broken_example.png
├── comment-bright.png
├── comment-close.png
├── comment.png
├── css
│ └── pytorch_theme.css
├── doctools.js
├── documentation_options.js
├── down-pressed.png
├── down.png
├── file.png
├── fonts
│ ├── FreightSans
│ │ ├── freight-sans-light.woff
│ │ ├── freight-sans-light.woff2
│ │ ├── freight-sans-regular.woff
│ │ └── freight-sans-regular.woff2
│ └── IBMPlexMono
│ │ ├── IBMPlexMono-Light.woff
│ │ ├── IBMPlexMono-Light.woff2
│ │ ├── IBMPlexMono-Medium.woff
│ │ ├── IBMPlexMono-Medium.woff2
│ │ ├── IBMPlexMono-Regular.woff
│ │ ├── IBMPlexMono-Regular.woff2
│ │ ├── IBMPlexMono-SemiBold.woff
│ │ └── IBMPlexMono-SemiBold.woff2
├── gallery.css
├── imagenet_class_index.json
├── images
│ ├── arrow-down-orange.svg
│ ├── arrow-right-with-tail.svg
│ ├── chevron-down-grey.svg
│ ├── chevron-right-orange.svg
│ ├── chevron-right-white.svg
│ ├── home-footer-background.jpg
│ ├── icon-close.svg
│ ├── icon-menu-dots-dark.svg
│ ├── logo-dark.svg
│ ├── logo-facebook-dark.svg
│ ├── logo-icon.svg
│ ├── logo-twitter-dark.svg
│ ├── logo.svg
│ └── view-page-source-icon.svg
├── img
│ ├── 8_workers.png
│ ├── ONNXLive.png
│ ├── SRResNet.png
│ ├── Variable.png
│ ├── audio_preprocessing_tutorial_waveform.png
│ ├── autodiff.png
│ ├── basics
│ │ ├── comp-graph.png
│ │ ├── fashion_mnist.png
│ │ ├── optimizationloops.png
│ │ └── typesdata.png
│ ├── bert.png
│ ├── bert_mrpc.png
│ ├── cartpole.gif
│ ├── cat.jpg
│ ├── cat_224x224.jpg
│ ├── cat_superres_with_ort.jpg
│ ├── channels_last_memory_format.png
│ ├── char_rnn_generation.png
│ ├── chat.png
│ ├── chatbot
│ │ ├── RNN-bidirectional.png
│ │ ├── attn1.png
│ │ ├── attn2.png
│ │ ├── bot.png
│ │ ├── diff.png
│ │ ├── global_attn.png
│ │ ├── grad_clip.png
│ │ ├── pytorch_workflow.png
│ │ ├── scores.png
│ │ ├── seq2seq_batches.png
│ │ └── seq2seq_ts.png
│ ├── cifar10.png
│ ├── classic_memory_format.png
│ ├── compare_output.png
│ ├── compare_stub.png
│ ├── cpp-frontend
│ │ └── digits.png
│ ├── cpp-pytorch.png
│ ├── cpp_logo.png
│ ├── dag_autograd.png
│ ├── data_parallel.png
│ ├── dcgan_generator.png
│ ├── deeplabv3_android.png
│ ├── deeplabv3_android2.png
│ ├── deeplabv3_ios.png
│ ├── deeplabv3_ios2.png
│ ├── distributed
│ │ ├── DistPyTorch.jpg
│ │ ├── all_gather.pdf
│ │ ├── all_gather.png
│ │ ├── all_reduce.pdf
│ │ ├── all_reduce.png
│ │ ├── broadcast.png
│ │ ├── gather.png
│ │ ├── reduce.png
│ │ ├── scatter.png
│ │ ├── send_recv.png
│ │ └── send_recv_big.png
│ ├── dynamic_graph.gif
│ ├── fgsm_panda_image.png
│ ├── flask.png
│ ├── hybrid_frontend
│ │ ├── 220px-KnnClassification.png
│ │ ├── iris_pic.jpg
│ │ └── pytorch_workflow_small.jpg
│ ├── landmarked_face2.png
│ ├── mario.gif
│ ├── mario_env.png
│ ├── memory_format_logo.png
│ ├── mnist.png
│ ├── model-parallel-images
│ │ ├── mp_vs_rn.png
│ │ ├── mp_vs_rn_vs_pp.png
│ │ └── split_size_tradeoff.png
│ ├── named_tensor.png
│ ├── neural-style
│ │ ├── dancing.jpg
│ │ ├── neuralstyle.png
│ │ ├── picasso.jpg
│ │ ├── sphx_glr_neural_style_tutorial_001.png
│ │ ├── sphx_glr_neural_style_tutorial_002.png
│ │ ├── sphx_glr_neural_style_tutorial_003.png
│ │ └── sphx_glr_neural_style_tutorial_004.png
│ ├── oneworker.png
│ ├── panda.png
│ ├── per_channel_quant.png
│ ├── per_tensor_quant.png
│ ├── pruning.png
│ ├── pytorch-logo-dark.png
│ ├── pytorch-logo-dark.svg
│ ├── qat.png
│ ├── quant_asym.png
│ ├── quant_embeddings.png
│ ├── quantized_transfer_learning.png
│ ├── ray-tune.png
│ ├── reinforcement_learning_diagram.jpg
│ ├── rnnclass.png
│ ├── rpc-images
│ │ └── batch.png
│ ├── rpc_trace_img.png
│ ├── sample_file.jpeg
│ ├── scipynumpy.png
│ ├── seq-seq-images
│ │ ├── attention-decoder-network.dot
│ │ ├── attention-decoder-network.png
│ │ ├── decoder-network.dot
│ │ ├── decoder-network.png
│ │ ├── decoder.png
│ │ ├── decoder@2x.png
│ │ ├── encoder-network.dot
│ │ ├── encoder-network.png
│ │ ├── seq2seq.png
│ │ ├── seq2seq@2x.png
│ │ ├── word-encoding.png
│ │ └── word-encoding@2x.png
│ ├── seq2seq_flat.png
│ ├── shadow.png
│ ├── steam-train-whistle-daniel_simon-converted-from-mp3.wav
│ ├── stn
│ │ ├── FSeq.png
│ │ ├── Five.gif
│ │ ├── stn-arch.png
│ │ └── tr.png
│ ├── tensor_illustration.png
│ ├── tensor_illustration_flat.png
│ ├── tensorboard_figure.png
│ ├── tensorboard_first_view.png
│ ├── tensorboard_images.png
│ ├── tensorboard_model_viz.png
│ ├── tensorboard_pr_curves.png
│ ├── tensorboard_projector.png
│ ├── tensorboard_scalar_runs.png
│ ├── text_sentiment_ngrams_model.png
│ ├── thumbnails
│ │ ├── 220px-KnnClassification.png
│ │ ├── babel.jpg
│ │ ├── captum_teaser.png
│ │ ├── cropped
│ │ │ ├── 60-min-blitz.png
│ │ │ ├── Adversarial-Example-Generation.png
│ │ │ ├── Autograd-in-Cpp-Frontend.png
│ │ │ ├── Combining-Distributed-DataParallel-with-Distributed-RPC-Framework.png
│ │ │ ├── Custom-Cpp-and-CUDA-Extensions.png
│ │ │ ├── DCGAN-Tutorial.png
│ │ │ ├── Deploying-PyTorch-in-Python-via-a-REST-API-with-Flask.png
│ │ │ ├── Distributed-Pipeline-Parallelism-Using-RPC.png
│ │ │ ├── Extending-TorchScript-with-Custom-Cpp-Classes.png
│ │ │ ├── Extending-TorchScript-with-Custom-Cpp-Operators.png
│ │ │ ├── Getting Started with Distributed-RPC-Framework.png
│ │ │ ├── Getting-Started-with Distributed RPC Framework.png
│ │ │ ├── Getting-Started-with-Distributed-Data-Parallel.png
│ │ │ ├── Getting-Started-with-Distributed-RPC-Framework.png
│ │ │ ├── Implementing-Batch-RPC-Processing-Using-Asynchronous-Executions.png
│ │ │ ├── Implementing-a-Parameter-Server-Using-Distributed-RPC-Framework.png
│ │ │ ├── Introduction-to-TorchScript.png
│ │ │ ├── Language-Translation-with-TorchText.png
│ │ │ ├── Loading-a-TorchScript-Model-in-Cpp.png
│ │ │ ├── Model-Parallel-Best-Practices.png
│ │ │ ├── NLP-From-Scratch-Classifying-Names-with-a-Character-Level-RNN.png
│ │ │ ├── NLP-From-Scratch-Generating-Names-with-a-Character-Level-RNN.png
│ │ │ ├── NLP-From-Scratch-Translation-with-a-Sequence-to-Sequence-Network-and-Attention.png
│ │ │ ├── Pruning-Tutorial.png
│ │ │ ├── PyTorch-Distributed-Overview.png
│ │ │ ├── Sequence-to-Sequence-Modeling-with-nnTransformer-andTorchText.png
│ │ │ ├── Text-Classification-with-TorchText.png
│ │ │ ├── TorchScript-Parallelism.jpg
│ │ │ ├── TorchVision-Object-Detection-Finetuning-Tutorial.png
│ │ │ ├── Training-Transformer-Models-using-Distributed-Data-Parallel-and-Pipeline-Parallelism.png
│ │ │ ├── Training-Transformer-models-using-Pipeline-Parallelism.png
│ │ │ ├── Transfer-Learning-for-Computer-Vision-Tutorial.png
│ │ │ ├── Tutorials_Card_Template.psd
│ │ │ ├── Using-the-PyTorch-Cpp-Frontend.png
│ │ │ ├── Writing-Distributed-Applications-with-PyTorch.png
│ │ │ ├── advanced-PyTorch-1point0-Distributed-Trainer-with-Amazon-AWS.png
│ │ │ ├── amp.png
│ │ │ ├── android.png
│ │ │ ├── custom-datasets-transforms-and-dataloaders.png
│ │ │ ├── defining-a-network.PNG
│ │ │ ├── experimental-Channels-Last-Memory-Format-in-PyTorch.png
│ │ │ ├── experimental-Dynamic-Quantization-on-BERT.png
│ │ │ ├── experimental-Dynamic-Quantization-on-an-LSTM-Word-Language-Model.png
│ │ │ ├── experimental-Introduction-to-Named-Tensors-in-PyTorch.png
│ │ │ ├── experimental-Quantized-Transfer-Learning-for-Computer-Vision-Tutorial.png
│ │ │ ├── experimental-Static-Quantization-with-Eager-Mode-in-PyTorch.png
│ │ │ ├── generic-pytorch-logo.png
│ │ │ ├── graph-mode-dynamic-bert.png
│ │ │ ├── ios.png
│ │ │ ├── learning-pytorch-with-examples.png
│ │ │ ├── loading-data-in-pytorch.png
│ │ │ ├── loading-data.PNG
│ │ │ ├── mobile.png
│ │ │ ├── model-interpretability-using-captum.png
│ │ │ ├── optional-Exporting-a-Model-from-PyTorch-to-ONNX-and-Running-it-using-ONNX-Runtime.png
│ │ │ ├── profile.png
│ │ │ ├── profiler.png
│ │ │ ├── saving-and-loading-general-checkpoint.PNG
│ │ │ ├── saving-and-loading-models-across-devices.PNG
│ │ │ ├── saving-and-loading-models-for-inference.PNG
│ │ │ ├── saving-multiple-models.PNG
│ │ │ ├── torch-nn.png
│ │ │ ├── torchaudio-Tutorial.png
│ │ │ ├── torchaudio-speech.png
│ │ │ ├── torchscript_overview.png
│ │ │ ├── using-dynamic-post-training-quantization.png
│ │ │ ├── using-flask-create-restful-api.png
│ │ │ ├── visualizing-with-tensorboard.png
│ │ │ ├── warmstarting-models.PNG
│ │ │ ├── what-is-a-state-dict.PNG
│ │ │ └── zeroing-out-gradients.PNG
│ │ ├── custom_dataset.png
│ │ ├── default.png
│ │ ├── defining_a_network.png
│ │ ├── examples.png
│ │ ├── eye.png
│ │ ├── floppy.png
│ │ ├── german_to_english_translation.png
│ │ ├── landmarked_face2.png
│ │ ├── pixelated-cat.png
│ │ ├── pytorch-logo-flat.png
│ │ ├── pytorch_tensorboard.png
│ │ ├── sphx_glr_transfer_learning_tutorial_001.png
│ │ ├── tensorboard_dev.png
│ │ ├── tensorboard_scalars.png
│ │ ├── torch-logo.png
│ │ ├── torchtext.png
│ │ └── tv-img.png
│ ├── torch-nn-vs-pytorch-nn.png
│ ├── torch.nn.png
│ ├── torchscript.png
│ ├── torchscript_to_cpp.png
│ ├── trace_img.png
│ ├── transformer_architecture.jpg
│ ├── transformer_input_target.png
│ └── tv_tutorial
│ │ ├── tv_image01.png
│ │ ├── tv_image02.png
│ │ ├── tv_image03.png
│ │ ├── tv_image04.png
│ │ ├── tv_image05.png
│ │ ├── tv_image06.png
│ │ └── tv_image07.png
├── jquery-3.2.1.js
├── jquery.js
├── js
│ └── modernizr.min.js
├── minus.png
├── mnist.pkl.gz
├── no_image.png
├── plus.png
├── pygments.css
├── pytorch-logo-dark.svg
├── searchtools.js
├── torchvision_finetuning_instance_segmentation.ipynb
├── tv-training-code.py
├── underscore-1.3.1.js
├── underscore.js
├── up-pressed.png
├── up.png
└── websupport.js
├── _templates
└── layout.html
├── advanced_source
├── ONNXLive.rst
├── README.txt
├── cpp_autograd.rst
├── cpp_export.rst
├── cpp_extension.rst
├── cpp_frontend.rst
├── ddp_pipeline.py
├── dispatcher.rst
├── dispatcher
│ ├── CMakeLists.txt
│ ├── op.cpp
│ └── test.py
├── dynamic_quantization_tutorial.py
├── extend_dispatcher.rst
├── neural_style_tutorial.py
├── numpy_extensions_tutorial.py
├── rpc_ddp_tutorial.rst
├── rpc_ddp_tutorial
│ └── main.py
├── static_quantization_tutorial.rst
├── super_resolution_with_onnxruntime.py
├── torch-script-parallelism.rst
├── torch_script_custom_classes.rst
├── torch_script_custom_classes
│ ├── CMakeLists.txt
│ ├── custom_class_project
│ │ ├── CMakeLists.txt
│ │ ├── class.cpp
│ │ ├── custom_test.py
│ │ ├── export_attr.py
│ │ └── save.py
│ ├── infer.cpp
│ ├── run.sh
│ └── run2.sh
├── torch_script_custom_ops.rst
├── torch_script_custom_ops
│ ├── CMakeLists.txt
│ ├── op.cpp
│ ├── smoke_test.py
│ └── test.py
└── transformer__timeseries_cpp_tutorial
│ ├── CMakeLists.txt
│ ├── scheduler.h
│ └── transformer_timeseries.cpp
├── beginner_source
├── Intro_to_TorchScript_tutorial.py
├── PyTorch Cheat.md
├── README.txt
├── audio_preprocessing_tutorial.py
├── basics
│ ├── README.txt
│ ├── autogradqs_tutorial.py
│ ├── buildmodel_tutorial.py
│ ├── data_tutorial.py
│ ├── intro.py
│ ├── optimization_tutorial.py
│ ├── qs_toc.txt
│ ├── quickstart_tutorial.py
│ ├── saveloadrun_tutorial.py
│ ├── tensorqs_tutorial.py
│ └── transforms_tutorial.py
├── blitz
│ ├── README.txt
│ ├── autograd_tutorial.py
│ ├── cifar10_tutorial.py
│ ├── data_parallel_tutorial.py
│ ├── neural_networks_tutorial.py
│ └── tensor_tutorial.py
├── chatbot_tutorial.py
├── colab.rst
├── data_loading_tutorial.py
├── dcgan_faces_tutorial.py
├── deep_learning_60min_blitz.rst
├── deep_learning_nlp_tutorial.rst
├── deeplabv3_on_android.rst
├── deeplabv3_on_ios.rst
├── deploy_seq2seq_hybrid_frontend_tutorial.py
├── dist_overview.rst
├── examples_autograd
│ ├── README.txt
│ ├── polynomial_autograd.py
│ └── polynomial_custom_function.py
├── examples_nn
│ ├── README.txt
│ ├── dynamic_net.py
│ ├── polynomial_module.py
│ ├── polynomial_nn.py
│ └── polynomial_optim.py
├── examples_tensor
│ ├── README.txt
│ ├── polynomial_numpy.py
│ └── polynomial_tensor.py
├── fgsm_tutorial.py
├── former_torchies
│ ├── README.txt
│ ├── autograd_tutorial_old.py
│ ├── nnft_tutorial.py
│ ├── parallelism_tutorial.py
│ └── tensor_tutorial_old.py
├── former_torchies_tutorial.rst
├── hybrid_frontend
│ ├── README.txt
│ └── learning_hybrid_frontend_through_example_tutorial.py
├── hybrid_frontend_tutorial.rst
├── hyperparameter_tuning_tutorial.py
├── nlp
│ ├── README.txt
│ ├── advanced_tutorial.py
│ ├── deep_learning_tutorial.py
│ ├── pytorch_tutorial.py
│ ├── sequence_models_tutorial.py
│ └── word_embeddings_tutorial.py
├── nn_tutorial.py
├── profiler.py
├── ptcheat.rst
├── pytorch_with_examples.rst
├── saving_loading_models.py
├── text_sentiment_ngrams_tutorial.py
├── torchtext_translation.py
├── transfer_learning_tutorial.py
├── transformer_tutorial.py
└── vt_tutorial.py
├── build.sh
├── cleanup.sh
├── conf.py
├── custom_directives.py
├── index.rst
├── intermediate_source
├── README.txt
├── char_rnn_classification_tutorial.py
├── char_rnn_generation_tutorial.py
├── ddp_tutorial.rst
├── dist_pipeline_parallel_tutorial.rst
├── dist_tuto.rst
├── dynamic_quantization_bert_tutorial.rst
├── flask_rest_api_tutorial.py
├── fx_conv_bn_fuser.py
├── fx_profiling_tutorial.py
├── mario_rl_tutorial.py
├── memory_format_tutorial.py
├── model_parallel_tutorial.py
├── named_tensor_tutorial.py
├── pipeline_tutorial.py
├── pruning_tutorial.py
├── quantized_transfer_learning_tutorial.rst
├── reinforcement_q_learning.py
├── rpc_async_execution.rst
├── rpc_param_server_tutorial.rst
├── rpc_tutorial.rst
├── seq2seq_translation_tutorial.py
├── spatial_transformer_tutorial.py
├── speech_command_recognition_with_torchaudio.py
├── tensorboard_tutorial.rst
└── torchvision_tutorial.rst
├── prototype_source
├── README.md
├── README.txt
├── distributed_rpc_profiling.rst
├── fx_graph_mode_ptq_dynamic.py
├── fx_graph_mode_ptq_static.rst
├── fx_graph_mode_quant_guide.py
├── graph_mode_dynamic_bert_tutorial.rst
├── ios_gpu_workflow.rst
├── lite_interpreter.rst
├── nnapi_mobilenetv2.rst
├── numeric_suite_tutorial.py
├── prototype_index.rst
├── torchscript_freezing.py
├── vmap_recipe.py
└── vulkan_workflow.rst
├── recipes_source
├── README.txt
├── android_native_app_with_custom_op.rst
├── cuda_rpc.rst
├── deployment_with_flask.rst
├── distributed_rpc_profiling.rst
├── fuse.rst
├── mobile_perf.rst
├── model_preparation_android.rst
├── model_preparation_ios.rst
├── ptmobile_recipes_summary.rst
├── quantization.rst
├── recipes
│ ├── Captum_Recipe.py
│ ├── README.txt
│ ├── amp_recipe.py
│ ├── benchmark.py
│ ├── defining_a_neural_network.py
│ ├── dynamic_quantization.py
│ ├── loading_data_recipe.py
│ ├── profiler_recipe.py
│ ├── save_load_across_devices.py
│ ├── saving_and_loading_a_general_checkpoint.py
│ ├── saving_and_loading_models_for_inference.py
│ ├── saving_multiple_models_in_one_file.py
│ ├── tensorboard_with_pytorch.py
│ ├── timer_quick_start.py
│ ├── tuning_guide.py
│ ├── warmstarting_model_using_parameters_from_a_different_model.py
│ ├── what_is_state_dict.py
│ └── zeroing_out_gradients.py
├── recipes_index.rst
├── script_optimized.rst
├── torchscript_inference.rst
└── zero_redundancy_optimizer.rst
├── requirements.txt
└── runtime.txt
/.circleci/scripts/build_for_windows.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | retry () {
5 | $* || (sleep 1 && $*) || (sleep 2 && $*)
6 | }
7 |
8 | SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
9 | PROJECT_DIR="${SOURCE_DIR}/../.."
10 | pushd $SOURCE_DIR
11 |
12 | #install wget and make
13 | curl -k https://ymu.dl.osdn.jp/mingw/68260/mingw-get-0.6.3-mingw32-pre-20170905-1-bin.zip -o mingw32.zip
14 | unzip mingw32.zip -d mingw32
15 | mingw32/bin/mingw-get.exe install mingw32-make
16 | mingw32/bin/mingw-get.exe install msys-findutils
17 | mv mingw32/bin/mingw32-make.exe mingw32/bin/make.exe
18 | curl -k https://eternallybored.org/misc/wget/1.20.3/64/wget.exe -o mingw32/bin/wget.exe
19 | export PATH="${SOURCE_DIR}/mingw32/bin:${SOURCE_DIR}/mingw32/msys/1.0/bin:$PATH"
20 |
21 | #install anaconda3
22 | export CONDA_HOME="${SOURCE_DIR}/conda"
23 | export tmp_conda="${SOURCE_DIR}/conda"
24 | export miniconda_exe="${SOURCE_DIR}/miniconda.exe"
25 | rm -rf conda miniconda.exe
26 | curl -k https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe -o miniconda.exe
27 | ./install_conda.bat
28 | export PATH="${tmp_conda}:${tmp_conda}/Library/usr/bin:${tmp_conda}/Library/bin:${tmp_conda}/Scripts:${tmp_conda}/bin:$PATH"
29 |
30 | eval "$(conda shell.bash hook)"
31 | conda create -qyn testenv python=3.7
32 | conda activate testenv
33 |
34 | REQUIREMENTS="$(grep -v '^ *#\|^torch\|^torchaudio\|^torchvision|^torchtext' $PROJECT_DIR/requirements.txt | grep .)"
35 | echo $REQUIREMENTS > requirements.txt
36 | pip install -r requirements.txt
37 | pip install pySoundFile
38 | # Force uninstall torch & related packages, we'll install them using conda later.
39 | pip uninstall -y torch torchvision torchtext
40 | conda install -yq -c pytorch "cudatoolkit=10.1" pytorch torchvision torchtext torchaudio
41 | python -m spacy download de
42 | python -m spacy download en
43 | pushd ${PROJECT_DIR}
44 | DIR=.jenkins
45 | export NUM_WORKERS=4
46 |
47 | if [[ "${CIRCLE_JOB}" == *worker_* ]]; then
48 | python $DIR/remove_runnable_code.py intermediate_source/model_parallel_tutorial.py intermediate_source/model_parallel_tutorial.py || true
49 | python $DIR/remove_runnable_code.py advanced_source/static_quantization_tutorial.py advanced_source/static_quantization_tutorial.py || true
50 | python $DIR/remove_runnable_code.py beginner_source/hyperparameter_tuning_tutorial.py beginner_source/hyperparameter_tuning_tutorial.py || true
51 | python $DIR/remove_runnable_code.py beginner_source/audio_preprocessing_tutorial.py beginner_source/audio_preprocessing_tutorial.py || true
52 | # Temp remove for mnist download issue.
53 | python $DIR/remove_runnable_code.py beginner_source/fgsm_tutorial.py beginner_source/fgsm_tutorial.py || true
54 |
55 | export WORKER_ID=$(echo "${CIRCLE_JOB}" | tr -dc '0-9')
56 | count=0
57 | FILES_TO_RUN=()
58 | for work_directory in beginner_source intermediate_source advanced_source recipes_source prototype_source; do
59 | for filename in $(find $work_directory -name '\*.py' -not -path '\*/data/\*'); do
60 | if [ $(($count % $NUM_WORKERS)) != $WORKER_ID ]; then
61 | echo "Removing runnable code from "$filename
62 | python $DIR/remove_runnable_code.py $filename $filename
63 | else
64 | echo "Keeping "$filename
65 | FILES_TO_RUN+=($(basename $filename .py))
66 | fi
67 | count=$((count+1))
68 | done
69 | done
70 | echo "FILES_TO_RUN: " ${FILES_TO_RUN[@]}
71 | fi
72 |
73 | if [[ ! -d advanced_source/data || ! -d beginner_source/data || ! -d intermediate_source/data || ! -d prototype_source/data ]];then
74 | make download
75 | fi
76 |
77 | make html
78 |
--------------------------------------------------------------------------------
/.circleci/scripts/install_conda.bat:
--------------------------------------------------------------------------------
1 | start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda%
2 |
--------------------------------------------------------------------------------
/.circleci/scripts/windows_cuda_install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eux -o pipefail
3 |
4 | curl --retry 3 -kLO https://ossci-windows.s3.amazonaws.com/cuda_10.1.243_426.00_win10.exe
5 | 7z x cuda_10.1.243_426.00_win10.exe -ocuda_10.1.243_426.00_win10
6 | cd cuda_10.1.243_426.00_win10
7 | mkdir cuda_install_logs
8 |
9 | set +e
10 |
11 | ./setup.exe -s nvcc_10.1 cuobjdump_10.1 nvprune_10.1 cupti_10.1 cublas_10.1 cublas_dev_10.1 cudart_10.1 cufft_10.1 cufft_dev_10.1 curand_10.1 curand_dev_10.1 cusolver_10.1 cusolver_dev_10.1 cusparse_10.1 cusparse_dev_10.1 nvgraph_10.1 nvgraph_dev_10.1 npp_10.1 npp_dev_10.1 nvrtc_10.1 nvrtc_dev_10.1 nvml_dev_10.1 -loglevel:6 -log:"$(pwd -W)/cuda_install_logs"
12 |
13 | set -e
14 |
15 | curl --retry 3 -kLO https://ossci-windows.s3.amazonaws.com/NvToolsExt.7z
16 | 7z x NvToolsExt.7z -oNvToolsExt
17 | mkdir -p "C:/Program Files/NVIDIA Corporation/NvToolsExt"
18 | cp -r NvToolsExt/* "C:/Program Files/NVIDIA Corporation/NvToolsExt/"
19 | export NVTOOLSEXT_PATH="C:\\Program Files\\NVIDIA Corporation\\NvToolsExt\\"
20 |
21 | if ! ls "/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1/bin/nvcc.exe"
22 | then
23 | echo "CUDA installation failed"
24 | mkdir -p /c/w/build-results
25 | 7z a "c:\\w\\build-results\\cuda_install_logs.7z" cuda_install_logs
26 | exit 1
27 | fi
28 |
29 | cd ..
30 | rm -rf ./cuda_10.1.243_426.00_win10
31 | rm -f ./cuda_10.1.243_426.00_win10.exe
32 |
--------------------------------------------------------------------------------
/.devcontainer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/vscode/devcontainers/python:3.8
2 |
3 | COPY requirements.txt /tmp/pip-tmp/
4 |
5 | RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
6 | && apt-get install git gcc unzip make -y \
7 | && pip3 install --disable-pip-version-check --no-cache-dir -r /tmp/pip-tmp/requirements.txt \
8 | && rm -rf /tmp/pip-tmp
9 |
--------------------------------------------------------------------------------
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "PyTorch Tutorials",
3 | "build": {
4 | "context": "..",
5 | "dockerfile": "Dockerfile",
6 | "args": {}
7 | },
8 | "settings": {
9 | "terminal.integrated.shell.linux": "/bin/bash",
10 | "workbench.startupEditor": "none",
11 | "files.autoSave": "afterDelay",
12 | "python.dataScience.enabled": true,
13 | "python.dataScience.alwaysTrustNotebooks": true,
14 | "python.insidersChannel": "weekly",
15 | "python.showStartPage": false
16 | },
17 | "extensions": ["ms-python.python", "lextudio.restructuredtext"]
18 | }
19 |
--------------------------------------------------------------------------------
/.devcontainer/requirements.txt:
--------------------------------------------------------------------------------
1 | # Refer to ./jenkins/build.sh for tutorial build instructions
2 |
3 | sphinx==1.8.2
4 | sphinx-gallery==0.3.1
5 | tqdm
6 | numpy
7 | matplotlib
8 | torch
9 | torchvision
10 | torchtext
11 | torchaudio
12 | PyHamcrest
13 | bs4
14 | awscli==1.16.35
15 | flask
16 | spacy
17 | ray[tune]
18 |
19 | # PyTorch Theme
20 | -e git+git://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
21 |
22 | ipython
23 |
24 | # to run examples
25 | pandas
26 | scikit-image
27 | pillow==8.1.1
28 | wget
29 |
30 | # for codespaces env
31 | pylint
32 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # sphinx things
2 | beginner
3 | intermediate
4 | advanced
5 | pytorch_basics
6 | recipes
7 | prototype
8 |
9 | #data things
10 | _data/
11 | advanced_source/images/
12 | advanced_source/data/
13 | beginner_source/.data/
14 | beginner_source/data/
15 | beginner_source/blitz/data/
16 | beginner_source/faces/
17 | beginner_source/hybrid_frontend/data/
18 | beginner_source/hymenoptera_data/
19 | intermediate_source/data/
20 | *.zip
21 | MNIST/
22 |
23 | #builds
24 | _build/
25 | _static/thumbs/
26 |
27 | # Byte-compiled / optimized / DLL files
28 | __pycache__/
29 | *.py[cod]
30 | *$py.class
31 |
32 | # C extensions
33 | *.so
34 |
35 | # Distribution / packaging
36 | src/
37 | .Python
38 | env/
39 | build/
40 | develop-eggs/
41 | dist/
42 | downloads/
43 | eggs/
44 | .eggs/
45 | lib/
46 | lib64/
47 | parts/
48 | sdist/
49 | var/
50 | *.egg-info/
51 | .installed.cfg
52 | *.egg
53 |
54 | # PyInstaller
55 | # Usually these files are written by a python script from a template
56 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
57 | *.manifest
58 | *.spec
59 |
60 | # Installer logs
61 | pip-log.txt
62 | pip-delete-this-directory.txt
63 |
64 | # Unit test / coverage reports
65 | htmlcov/
66 | .tox/
67 | .coverage
68 | .coverage.*
69 | .cache
70 | nosetests.xml
71 | coverage.xml
72 | *,cover
73 | .hypothesis/
74 |
75 | # Translations
76 | *.mo
77 | *.pot
78 |
79 | # Django stuff:
80 | *.log
81 | local_settings.py
82 |
83 | # Flask stuff:
84 | instance/
85 | .webassets-cache
86 |
87 | # Scrapy stuff:
88 | .scrapy
89 |
90 | # Sphinx documentation
91 | docs/_build/
92 |
93 | # PyBuilder
94 | target/
95 |
96 | # IPython Notebook
97 | .ipynb_checkpoints
98 |
99 | # pyenv
100 | .python-version
101 |
102 | # celery beat schedule file
103 | celerybeat-schedule
104 | # dotenv
105 | .env
106 |
107 | # virtualenv
108 | venv/
109 | ENV/
110 |
111 | # Spyder project settings
112 | .spyderproject
113 |
114 | # Rope project settings
115 | .ropeproject
116 |
117 | # Mac things
118 | .DS_Store
119 | cleanup.sh
120 | *.swp
121 |
122 | # PyTorch things
123 | *.pt
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "src/pytorch-sphinx-theme"]
2 | path = src/pytorch-sphinx-theme
3 | url = https://github.com/pytorch/pytorch_sphinx_theme
4 |
--------------------------------------------------------------------------------
/.jenkins/delete_html_file_with_runnable_code_removed.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | html_file_path = sys.argv[1]
5 |
6 | with open(html_file_path, 'r', encoding='utf-8') as html_file:
7 | html = html_file.read()
8 |
9 | if "%%%%%%RUNNABLE_CODE_REMOVED%%%%%%" in html:
10 | print("Removing " + html_file_path)
11 | os.remove(html_file_path)
12 |
--------------------------------------------------------------------------------
/.jenkins/remove_invisible_code_block_batch.sh:
--------------------------------------------------------------------------------
1 | BUILDDIR=$1
2 |
3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
4 |
5 | # Remove INVISIBLE_CODE_BLOCK from .html/.rst/.rst.txt/.ipynb/.py files
6 | for filename in $(find $BUILDDIR/beginner $BUILDDIR/intermediate $BUILDDIR/advanced -name '*.html'); do
7 | echo "Removing INVISIBLE_CODE_BLOCK from " $filename
8 | python $DIR/remove_invisible_code_block_from_html.py $filename $filename
9 | done
10 | for filename in $(find $BUILDDIR/_sources/beginner $BUILDDIR/_sources/intermediate $BUILDDIR/_sources/advanced -name '*.rst.txt'); do
11 | echo "Removing INVISIBLE_CODE_BLOCK from " $filename
12 | python $DIR/remove_invisible_code_block_from_rst_txt.py $filename $filename
13 | done
14 | for filename in $(find $BUILDDIR/_downloads -name '*.ipynb'); do
15 | echo "Removing INVISIBLE_CODE_BLOCK from " $filename
16 | python $DIR/remove_invisible_code_block_from_ipynb.py $filename $filename
17 | done
18 | for filename in $(find $BUILDDIR/_downloads -name '*.py'); do
19 | echo "Removing INVISIBLE_CODE_BLOCK from " $filename
20 | python $DIR/remove_invisible_code_block_from_py.py $filename $filename
21 | done
22 |
--------------------------------------------------------------------------------
/.jenkins/remove_invisible_code_block_from_html.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from bs4 import BeautifulSoup
3 |
4 | html_file_path = sys.argv[1]
5 | output_file_path = sys.argv[2]
6 |
7 | with open(html_file_path, 'r', encoding='utf-8') as html_file:
8 | html = html_file.read()
9 | html_soup = BeautifulSoup(html, 'html.parser')
10 |
11 | elems = html_soup.find_all("div", {"class": "highlight-default"})
12 | for elem in elems:
13 | if "%%%%%%INVISIBLE_CODE_BLOCK%%%%%%" in str(elem):
14 | elem.decompose()
15 |
16 | with open(output_file_path, "w", encoding='utf-8') as output_file:
17 | output_file.write(str(html_soup))
18 |
--------------------------------------------------------------------------------
/.jenkins/remove_invisible_code_block_from_ipynb.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from bs4 import BeautifulSoup
3 |
4 | ipynb_file_path = sys.argv[1]
5 | output_file_path = sys.argv[2]
6 |
7 | with open(ipynb_file_path, 'r', encoding='utf-8') as ipynb_file:
8 | ipynb_lines = ipynb_file.readlines()
9 |
10 | ipynb_out_lines = []
11 |
12 | for line in ipynb_lines:
13 | if not '%%%%%%INVISIBLE_CODE_BLOCK%%%%%%' in line:
14 | ipynb_out_lines.append(line)
15 |
16 | with open(output_file_path, "w", encoding='utf-8') as output_file:
17 | for line in ipynb_out_lines:
18 | output_file.write(line)
19 |
--------------------------------------------------------------------------------
/.jenkins/remove_invisible_code_block_from_py.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from bs4 import BeautifulSoup
3 |
4 | py_file_path = sys.argv[1]
5 | output_file_path = sys.argv[2]
6 |
7 | with open(py_file_path, 'r', encoding='utf-8') as py_file:
8 | py_lines = py_file.readlines()
9 |
10 | py_out_lines = []
11 |
12 | in_invisible_block = False
13 | for line in py_lines:
14 | if not in_invisible_block:
15 | if '%%%%%%INVISIBLE_CODE_BLOCK%%%%%%' in line:
16 | in_invisible_block = True
17 | else:
18 | py_out_lines.append(line)
19 | else:
20 | if '%%%%%%INVISIBLE_CODE_BLOCK%%%%%%' in line:
21 | in_invisible_block = False
22 |
23 | with open(output_file_path, "w", encoding='utf-8') as output_file:
24 | for line in py_out_lines:
25 | output_file.write(line)
26 |
--------------------------------------------------------------------------------
/.jenkins/remove_invisible_code_block_from_rst_txt.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from bs4 import BeautifulSoup
3 |
4 | rst_txt_file_path = sys.argv[1]
5 | output_file_path = sys.argv[2]
6 |
7 | with open(rst_txt_file_path, 'r', encoding='utf-8') as rst_txt_file:
8 | rst_txt = rst_txt_file.read()
9 |
10 | splits = rst_txt.split('.. code-block:: default\n\n\n # %%%%%%INVISIBLE_CODE_BLOCK%%%%%%\n')
11 | if len(splits) == 2:
12 | code_before_invisible_block = splits[0]
13 | code_after_invisible_block = splits[1].split(' # %%%%%%INVISIBLE_CODE_BLOCK%%%%%%\n')[1]
14 | rst_txt_out = code_before_invisible_block + code_after_invisible_block
15 | else:
16 | rst_txt_out = rst_txt
17 |
18 | with open(output_file_path, "w", encoding='utf-8') as output_file:
19 | output_file.write(rst_txt_out)
20 |
--------------------------------------------------------------------------------
/.jenkins/remove_runnable_code.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE = "STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE"
4 | STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE = "STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE"
5 | STATE_NORMAL = "STATE_NORMAL"
6 |
7 | python_file_path = sys.argv[1]
8 | output_file_path = sys.argv[2]
9 |
10 | with open(python_file_path, 'r', encoding='utf-8') as file:
11 | lines = file.readlines()
12 | ret_lines = []
13 | state = STATE_NORMAL
14 | for line in lines:
15 | if state == STATE_NORMAL:
16 | if line.startswith('#'):
17 | ret_lines.append(line)
18 | state = STATE_NORMAL
19 | elif ((line.startswith('"""') or line.startswith('r"""')) and
20 | line.endswith('"""')):
21 | ret_lines.append(line)
22 | state = STATE_NORMAL
23 | elif line.startswith('"""') or line.startswith('r"""'):
24 | ret_lines.append(line)
25 | state = STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE
26 | elif ((line.startswith("'''") or line.startswith("r'''")) and
27 | line.endswith("'''")):
28 | ret_lines.append(line)
29 | state = STATE_NORMAL
30 | elif line.startswith("'''") or line.startswith("r'''"):
31 | ret_lines.append(line)
32 | state = STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE
33 | else:
34 | ret_lines.append("\n")
35 | state = STATE_NORMAL
36 | elif state == STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE:
37 | if line.startswith('"""'):
38 | ret_lines.append(line)
39 | state = STATE_NORMAL
40 | else:
41 | ret_lines.append(line)
42 | state = STATE_IN_MULTILINE_COMMENT_BLOCK_DOUBLE_QUOTE
43 | elif state == STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE:
44 | if line.startswith("'''"):
45 | ret_lines.append(line)
46 | state = STATE_NORMAL
47 | else:
48 | ret_lines.append(line)
49 | state = STATE_IN_MULTILINE_COMMENT_BLOCK_SINGLE_QUOTE
50 |
51 | ret_lines.append("\n# %%%%%%RUNNABLE_CODE_REMOVED%%%%%%")
52 |
53 | with open(output_file_path, 'w', encoding='utf-8') as file:
54 | for line in ret_lines:
55 | file.write(line)
56 |
--------------------------------------------------------------------------------
/.jenkins/replace_tutorial_html_content.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | noplot_html_file_path = sys.argv[1]
4 | hasplot_html_file_path = sys.argv[2]
5 | output_html_file_path = sys.argv[3]
6 |
7 | from bs4 import BeautifulSoup
8 | with open(noplot_html_file_path, 'r', encoding='utf-8') as noplot_html_file:
9 | noplot_html = noplot_html_file.read()
10 | with open(hasplot_html_file_path, 'r', encoding='utf-8') as hasplot_html_file:
11 | hasplot_html = hasplot_html_file.read()
12 |
13 | noplot_html_soup = BeautifulSoup(noplot_html, 'html.parser')
14 | elems = noplot_html_soup.find_all("div", {"class": "sphx-glr-example-title"})
15 | if len(elems) == 0:
16 | print("No match found, not replacing HTML content in "+noplot_html_file_path)
17 | elif len(elems) == 1:
18 | print("Match found in "+noplot_html_file_path+". Replacing its content.")
19 | elem = elems[0]
20 | elem.replace_with(BeautifulSoup(hasplot_html, 'html.parser').find_all("div", {"class": "sphx-glr-example-title"})[0])
21 | with open(output_html_file_path, "w", encoding='utf-8') as output_html_file:
22 | output_html_file.write(str(noplot_html_soup))
23 | else:
24 | raise Exception("Found more than one match in "+noplot_html_file_path+". Aborting.")
25 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2017, Pytorch contributors
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PyTorch Tutorials
2 |
3 |
4 | All the tutorials are now presented as sphinx style documentation at:
5 |
6 | ## [https://pytorch.org/tutorials](https://pytorch.org/tutorials)
7 |
8 |
9 |
10 | # Contributing
11 |
12 | We use sphinx-gallery's [notebook styled examples](https://sphinx-gallery.github.io/stable/tutorials/index.html) to create the tutorials. Syntax is very simple. In essence, you write a slightly well formatted python file and it shows up as documentation page.
13 |
14 | Here's how to create a new tutorial or recipe:
15 | 1. Create a notebook styled python file. If you want it executed while inserted into documentation, save the file with suffix `tutorial` so that file name is `your_tutorial.py`.
16 | 2. Put it in one of the beginner_source, intermediate_source, advanced_source based on the level. If it is a recipe, add to recipes_source.
17 | 2. For Tutorials (except if it is a prototype feature), include it in the TOC tree at index.rst
18 | 3. For Tutorials (except if it is a prototype feature), create a thumbnail in the [index.rst file](https://github.com/pytorch/tutorials/blob/master/index.rst) using a command like `.. customcarditem:: beginner/your_tutorial.html`. For Recipes, create a thumbnail in the [recipes_index.rst](https://github.com/pytorch/tutorials/blob/master/recipes_source/recipes_index.rst)
19 |
20 | In case you prefer to write your tutorial in jupyter, you can use [this script](https://gist.github.com/chsasank/7218ca16f8d022e02a9c0deb94a310fe) to convert the notebook to python file. After conversion and addition to the project, please make sure the sections headings etc are in logical order.
21 |
22 | ## Building
23 |
24 | - Start with installing torch, torchvision, and your GPUs latest drivers. Install other requirements using `pip install -r requirements.txt`
25 |
26 | > If you want to use `virtualenv`, make your environment in a `venv` directory like: `virtualenv ./venv`, then `source ./venv/bin/activate`.
27 |
28 | - Then you can build using `make docs`. This will download the data, execute the tutorials and build the documentation to `docs/` directory. This will take about 60-120 min for systems with GPUs. If you do not have a GPU installed on your system, then see next step.
29 | - You can skip the computationally intensive graph generation by running `make html-noplot` to build basic html documentation to `_build/html`. This way, you can quickly preview your tutorial.
30 |
31 | > If you get **ModuleNotFoundError: No module named 'pytorch_sphinx_theme' make: *** [html-noplot] Error 2**, from /tutorials/src/pytorch-sphinx-theme run `python setup.py install`.
32 |
33 |
34 | ## About contributing to PyTorch Documentation and Tutorials
35 | * You can find information about contributing to PyTorch documentation in the
36 | PyTorch Repo [README.md](https://github.com/pytorch/pytorch/blob/master/README.md) file.
37 | * Additional information can be found in [PyTorch CONTRIBUTING.md](https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md).
38 |
--------------------------------------------------------------------------------
/_static/ajax-loader.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/ajax-loader.gif
--------------------------------------------------------------------------------
/_static/broken_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/broken_example.png
--------------------------------------------------------------------------------
/_static/comment-bright.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/comment-bright.png
--------------------------------------------------------------------------------
/_static/comment-close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/comment-close.png
--------------------------------------------------------------------------------
/_static/comment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/comment.png
--------------------------------------------------------------------------------
/_static/css/pytorch_theme.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;
3 | }
4 |
5 | /* Default header fonts are ugly */
6 | h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend, p.caption {
7 | font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;
8 | }
9 |
10 | /* Use white for docs background */
11 | .wy-side-nav-search {
12 | background-color: #fff;
13 | }
14 |
15 | .wy-nav-content-wrap, .wy-menu li.current > a {
16 | background-color: #fff;
17 | }
18 |
19 | @media screen and (min-width: 1400px) {
20 | .wy-nav-content-wrap {
21 | background-color: rgba(0, 0, 0, 0.0470588);
22 | }
23 |
24 | .wy-nav-content {
25 | background-color: #fff;
26 | }
27 | }
28 |
29 | /* Fixes for mobile */
30 | .wy-nav-top {
31 | background-color: #fff;
32 | background-image: url('../img/pytorch-logo-dark.svg');
33 | background-repeat: no-repeat;
34 | background-position: center;
35 | padding: 0;
36 | margin: 0.4045em 0.809em;
37 | color: #333;
38 | }
39 |
40 | .wy-nav-top > a {
41 | display: none;
42 | }
43 |
44 | @media screen and (max-width: 768px) {
45 | .wy-side-nav-search>a img.logo {
46 | height: 60px;
47 | }
48 | }
49 |
50 | /* This is needed to ensure that logo above search scales properly */
51 | .wy-side-nav-search a {
52 | display: block;
53 | }
54 |
55 | /* This ensures that multiple constructors will remain in separate lines. */
56 | .rst-content dl:not(.docutils) dt {
57 | display: table;
58 | }
59 |
60 | /* Use our red for literals (it's very similar to the original color) */
61 | .rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
62 | color: #F05732;
63 | }
64 |
65 | .rst-content tt.xref, a .rst-content tt, .rst-content tt.xref,
66 | .rst-content code.xref, a .rst-content tt, a .rst-content code {
67 | color: #404040;
68 | }
69 |
70 | /* Change link colors (except for the menu) */
71 |
72 | a {
73 | color: #F05732;
74 | }
75 |
76 | a:hover {
77 | color: #F05732;
78 | }
79 |
80 |
81 | a:visited {
82 | color: #D44D2C;
83 | }
84 |
85 | .wy-menu a {
86 | color: #b3b3b3;
87 | }
88 |
89 | .wy-menu a:hover {
90 | color: #b3b3b3;
91 | }
92 |
93 | a.icon.icon-home {
94 | color: #D44D2C;
95 | }
96 |
97 | .version{
98 | color: #D44D2C !important;
99 | }
100 |
101 | /* Default footer text is quite big */
102 | footer {
103 | font-size: 80%;
104 | }
105 |
106 | footer .rst-footer-buttons {
107 | font-size: 125%; /* revert footer settings - 1/80% = 125% */
108 | }
109 |
110 | footer p {
111 | font-size: 100%;
112 | }
113 |
114 | /* For hidden headers that appear in TOC tree */
115 | /* see https://stackoverflow.com/a/32363545/3343043 */
116 | .rst-content .hidden-section {
117 | display: none;
118 | }
119 |
120 | nav .hidden-section {
121 | display: inherit;
122 | }
123 |
124 | /* Make code blocks have a background */
125 | .codeblock,pre.literal-block,.rst-content .literal-block,.rst-content pre.literal-block,div[class^='highlight'] {
126 | background: rgba(0, 0, 0, 0.0470588);
127 | }
128 |
--------------------------------------------------------------------------------
/_static/documentation_options.js:
--------------------------------------------------------------------------------
1 | var DOCUMENTATION_OPTIONS = {
2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
3 | VERSION: '0.5.0a0+a24163a',
4 | LANGUAGE: 'None',
5 | COLLAPSE_INDEX: false,
6 | FILE_SUFFIX: '.html',
7 | HAS_SOURCE: true,
8 | SOURCELINK_SUFFIX: '.txt'
9 | };
--------------------------------------------------------------------------------
/_static/down-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/down-pressed.png
--------------------------------------------------------------------------------
/_static/down.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/down.png
--------------------------------------------------------------------------------
/_static/file.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/file.png
--------------------------------------------------------------------------------
/_static/fonts/FreightSans/freight-sans-light.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/FreightSans/freight-sans-light.woff
--------------------------------------------------------------------------------
/_static/fonts/FreightSans/freight-sans-light.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/FreightSans/freight-sans-light.woff2
--------------------------------------------------------------------------------
/_static/fonts/FreightSans/freight-sans-regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/FreightSans/freight-sans-regular.woff
--------------------------------------------------------------------------------
/_static/fonts/FreightSans/freight-sans-regular.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/FreightSans/freight-sans-regular.woff2
--------------------------------------------------------------------------------
/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff
--------------------------------------------------------------------------------
/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/IBMPlexMono/IBMPlexMono-Light.woff2
--------------------------------------------------------------------------------
/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff
--------------------------------------------------------------------------------
/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2
--------------------------------------------------------------------------------
/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff
--------------------------------------------------------------------------------
/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2
--------------------------------------------------------------------------------
/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff
--------------------------------------------------------------------------------
/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2
--------------------------------------------------------------------------------
/_static/images/arrow-down-orange.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/_static/images/arrow-right-with-tail.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/_static/images/chevron-down-grey.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
19 |
--------------------------------------------------------------------------------
/_static/images/chevron-right-orange.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
18 |
--------------------------------------------------------------------------------
/_static/images/chevron-right-white.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/_static/images/home-footer-background.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/images/home-footer-background.jpg
--------------------------------------------------------------------------------
/_static/images/icon-close.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/_static/images/icon-menu-dots-dark.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/_static/images/logo-dark.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
31 |
--------------------------------------------------------------------------------
/_static/images/logo-facebook-dark.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
9 |
--------------------------------------------------------------------------------
/_static/images/logo-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
13 |
--------------------------------------------------------------------------------
/_static/images/logo-twitter-dark.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
17 |
--------------------------------------------------------------------------------
/_static/images/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
32 |
--------------------------------------------------------------------------------
/_static/images/view-page-source-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
14 |
--------------------------------------------------------------------------------
/_static/img/8_workers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/8_workers.png
--------------------------------------------------------------------------------
/_static/img/ONNXLive.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/ONNXLive.png
--------------------------------------------------------------------------------
/_static/img/SRResNet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/SRResNet.png
--------------------------------------------------------------------------------
/_static/img/Variable.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/Variable.png
--------------------------------------------------------------------------------
/_static/img/audio_preprocessing_tutorial_waveform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/audio_preprocessing_tutorial_waveform.png
--------------------------------------------------------------------------------
/_static/img/autodiff.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/autodiff.png
--------------------------------------------------------------------------------
/_static/img/basics/comp-graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/basics/comp-graph.png
--------------------------------------------------------------------------------
/_static/img/basics/fashion_mnist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/basics/fashion_mnist.png
--------------------------------------------------------------------------------
/_static/img/basics/optimizationloops.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/basics/optimizationloops.png
--------------------------------------------------------------------------------
/_static/img/basics/typesdata.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/basics/typesdata.png
--------------------------------------------------------------------------------
/_static/img/bert.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/bert.png
--------------------------------------------------------------------------------
/_static/img/bert_mrpc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/bert_mrpc.png
--------------------------------------------------------------------------------
/_static/img/cartpole.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/cartpole.gif
--------------------------------------------------------------------------------
/_static/img/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/cat.jpg
--------------------------------------------------------------------------------
/_static/img/cat_224x224.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/cat_224x224.jpg
--------------------------------------------------------------------------------
/_static/img/cat_superres_with_ort.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/cat_superres_with_ort.jpg
--------------------------------------------------------------------------------
/_static/img/channels_last_memory_format.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/channels_last_memory_format.png
--------------------------------------------------------------------------------
/_static/img/char_rnn_generation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/char_rnn_generation.png
--------------------------------------------------------------------------------
/_static/img/chat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chat.png
--------------------------------------------------------------------------------
/_static/img/chatbot/RNN-bidirectional.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/RNN-bidirectional.png
--------------------------------------------------------------------------------
/_static/img/chatbot/attn1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/attn1.png
--------------------------------------------------------------------------------
/_static/img/chatbot/attn2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/attn2.png
--------------------------------------------------------------------------------
/_static/img/chatbot/bot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/bot.png
--------------------------------------------------------------------------------
/_static/img/chatbot/diff.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/diff.png
--------------------------------------------------------------------------------
/_static/img/chatbot/global_attn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/global_attn.png
--------------------------------------------------------------------------------
/_static/img/chatbot/grad_clip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/grad_clip.png
--------------------------------------------------------------------------------
/_static/img/chatbot/pytorch_workflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/pytorch_workflow.png
--------------------------------------------------------------------------------
/_static/img/chatbot/scores.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/scores.png
--------------------------------------------------------------------------------
/_static/img/chatbot/seq2seq_batches.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/seq2seq_batches.png
--------------------------------------------------------------------------------
/_static/img/chatbot/seq2seq_ts.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/chatbot/seq2seq_ts.png
--------------------------------------------------------------------------------
/_static/img/cifar10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/cifar10.png
--------------------------------------------------------------------------------
/_static/img/classic_memory_format.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/classic_memory_format.png
--------------------------------------------------------------------------------
/_static/img/compare_output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/compare_output.png
--------------------------------------------------------------------------------
/_static/img/compare_stub.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/compare_stub.png
--------------------------------------------------------------------------------
/_static/img/cpp-frontend/digits.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/cpp-frontend/digits.png
--------------------------------------------------------------------------------
/_static/img/cpp-pytorch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/cpp-pytorch.png
--------------------------------------------------------------------------------
/_static/img/cpp_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/cpp_logo.png
--------------------------------------------------------------------------------
/_static/img/dag_autograd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/dag_autograd.png
--------------------------------------------------------------------------------
/_static/img/data_parallel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/data_parallel.png
--------------------------------------------------------------------------------
/_static/img/dcgan_generator.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/dcgan_generator.png
--------------------------------------------------------------------------------
/_static/img/deeplabv3_android.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/deeplabv3_android.png
--------------------------------------------------------------------------------
/_static/img/deeplabv3_android2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/deeplabv3_android2.png
--------------------------------------------------------------------------------
/_static/img/deeplabv3_ios.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/deeplabv3_ios.png
--------------------------------------------------------------------------------
/_static/img/deeplabv3_ios2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/deeplabv3_ios2.png
--------------------------------------------------------------------------------
/_static/img/distributed/DistPyTorch.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/DistPyTorch.jpg
--------------------------------------------------------------------------------
/_static/img/distributed/all_gather.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/all_gather.pdf
--------------------------------------------------------------------------------
/_static/img/distributed/all_gather.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/all_gather.png
--------------------------------------------------------------------------------
/_static/img/distributed/all_reduce.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/all_reduce.pdf
--------------------------------------------------------------------------------
/_static/img/distributed/all_reduce.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/all_reduce.png
--------------------------------------------------------------------------------
/_static/img/distributed/broadcast.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/broadcast.png
--------------------------------------------------------------------------------
/_static/img/distributed/gather.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/gather.png
--------------------------------------------------------------------------------
/_static/img/distributed/reduce.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/reduce.png
--------------------------------------------------------------------------------
/_static/img/distributed/scatter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/scatter.png
--------------------------------------------------------------------------------
/_static/img/distributed/send_recv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/send_recv.png
--------------------------------------------------------------------------------
/_static/img/distributed/send_recv_big.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/distributed/send_recv_big.png
--------------------------------------------------------------------------------
/_static/img/dynamic_graph.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/dynamic_graph.gif
--------------------------------------------------------------------------------
/_static/img/fgsm_panda_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/fgsm_panda_image.png
--------------------------------------------------------------------------------
/_static/img/flask.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/flask.png
--------------------------------------------------------------------------------
/_static/img/hybrid_frontend/220px-KnnClassification.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/hybrid_frontend/220px-KnnClassification.png
--------------------------------------------------------------------------------
/_static/img/hybrid_frontend/iris_pic.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/hybrid_frontend/iris_pic.jpg
--------------------------------------------------------------------------------
/_static/img/hybrid_frontend/pytorch_workflow_small.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/hybrid_frontend/pytorch_workflow_small.jpg
--------------------------------------------------------------------------------
/_static/img/landmarked_face2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/landmarked_face2.png
--------------------------------------------------------------------------------
/_static/img/mario.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/mario.gif
--------------------------------------------------------------------------------
/_static/img/mario_env.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/mario_env.png
--------------------------------------------------------------------------------
/_static/img/memory_format_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/memory_format_logo.png
--------------------------------------------------------------------------------
/_static/img/mnist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/mnist.png
--------------------------------------------------------------------------------
/_static/img/model-parallel-images/mp_vs_rn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/model-parallel-images/mp_vs_rn.png
--------------------------------------------------------------------------------
/_static/img/model-parallel-images/mp_vs_rn_vs_pp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/model-parallel-images/mp_vs_rn_vs_pp.png
--------------------------------------------------------------------------------
/_static/img/model-parallel-images/split_size_tradeoff.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/model-parallel-images/split_size_tradeoff.png
--------------------------------------------------------------------------------
/_static/img/named_tensor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/named_tensor.png
--------------------------------------------------------------------------------
/_static/img/neural-style/dancing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/neural-style/dancing.jpg
--------------------------------------------------------------------------------
/_static/img/neural-style/neuralstyle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/neural-style/neuralstyle.png
--------------------------------------------------------------------------------
/_static/img/neural-style/picasso.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/neural-style/picasso.jpg
--------------------------------------------------------------------------------
/_static/img/neural-style/sphx_glr_neural_style_tutorial_001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/neural-style/sphx_glr_neural_style_tutorial_001.png
--------------------------------------------------------------------------------
/_static/img/neural-style/sphx_glr_neural_style_tutorial_002.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/neural-style/sphx_glr_neural_style_tutorial_002.png
--------------------------------------------------------------------------------
/_static/img/neural-style/sphx_glr_neural_style_tutorial_003.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/neural-style/sphx_glr_neural_style_tutorial_003.png
--------------------------------------------------------------------------------
/_static/img/neural-style/sphx_glr_neural_style_tutorial_004.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/neural-style/sphx_glr_neural_style_tutorial_004.png
--------------------------------------------------------------------------------
/_static/img/oneworker.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/oneworker.png
--------------------------------------------------------------------------------
/_static/img/panda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/panda.png
--------------------------------------------------------------------------------
/_static/img/per_channel_quant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/per_channel_quant.png
--------------------------------------------------------------------------------
/_static/img/per_tensor_quant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/per_tensor_quant.png
--------------------------------------------------------------------------------
/_static/img/pruning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/pruning.png
--------------------------------------------------------------------------------
/_static/img/pytorch-logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/pytorch-logo-dark.png
--------------------------------------------------------------------------------
/_static/img/pytorch-logo-dark.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
34 |
--------------------------------------------------------------------------------
/_static/img/qat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/qat.png
--------------------------------------------------------------------------------
/_static/img/quant_asym.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/quant_asym.png
--------------------------------------------------------------------------------
/_static/img/quant_embeddings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/quant_embeddings.png
--------------------------------------------------------------------------------
/_static/img/quantized_transfer_learning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/quantized_transfer_learning.png
--------------------------------------------------------------------------------
/_static/img/ray-tune.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/ray-tune.png
--------------------------------------------------------------------------------
/_static/img/reinforcement_learning_diagram.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/reinforcement_learning_diagram.jpg
--------------------------------------------------------------------------------
/_static/img/rnnclass.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/rnnclass.png
--------------------------------------------------------------------------------
/_static/img/rpc-images/batch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/rpc-images/batch.png
--------------------------------------------------------------------------------
/_static/img/rpc_trace_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/rpc_trace_img.png
--------------------------------------------------------------------------------
/_static/img/sample_file.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/sample_file.jpeg
--------------------------------------------------------------------------------
/_static/img/scipynumpy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/scipynumpy.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/attention-decoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden;encoder_outputs}
11 |
12 |
13 | input -> embedding;
14 | embedding -> dropout;
15 | dropout -> embedded;
16 |
17 | embedded -> attn;
18 | prev_hidden -> attn;
19 | attn -> attn_softmax;
20 | attn_softmax -> attn_weights;
21 | attn_weights -> bmm;
22 | encoder_outputs -> bmm;
23 | bmm -> attn_applied;
24 | attn_applied -> attn_combine;
25 | embedded -> attn_combine;
26 |
27 | attn_combine -> relu -> gru;
28 | prev_hidden -> gru;
29 | gru -> out;
30 | gru -> hidden;
31 |
32 | out -> softmax;
33 | softmax -> output;
34 |
35 | {rank=same;output;hidden}
36 |
37 | // Layer nodes
38 | embedding [fillcolor=dodgerblue, fontcolor=white];
39 | attn [fillcolor=dodgerblue, fontcolor=white];
40 | attn_combine [fillcolor=dodgerblue, fontcolor=white];
41 | bmm [fillcolor=dodgerblue, fontcolor=white];
42 | gru [fillcolor=dodgerblue, fontcolor=white];
43 | out [fillcolor=dodgerblue, fontcolor=white];
44 |
45 | // Function nodes
46 | dropout [fillcolor=palegreen];
47 | relu [fillcolor=palegreen];
48 | softmax [fillcolor=palegreen];
49 | attn_softmax [fillcolor=palegreen];
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/attention-decoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/attention-decoder-network.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/decoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden}
11 |
12 | input -> embedding;
13 | embedding -> relu;
14 | relu -> gru;
15 |
16 | prev_hidden -> gru;
17 | gru -> out;
18 | gru -> hidden;
19 |
20 | out -> softmax;
21 | softmax -> output;
22 |
23 | {rank=same;output;hidden}
24 |
25 | // Layer nodes
26 | embedding [fillcolor=dodgerblue, fontcolor=white];
27 | gru [fillcolor=dodgerblue, fontcolor=white];
28 | out [fillcolor=dodgerblue, fontcolor=white];
29 |
30 | // Function nodes
31 | relu [fillcolor=palegreen];
32 | softmax [fillcolor=palegreen];
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/decoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/decoder-network.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/decoder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/decoder.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/decoder@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/decoder@2x.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/encoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden}
11 |
12 | input -> embedding;
13 | embedding -> embedded;
14 | embedded -> gru;
15 | prev_hidden -> gru;
16 | gru -> output;
17 | gru -> hidden;
18 |
19 | embedding [fillcolor=dodgerblue, fontcolor=white];
20 | gru [fillcolor=dodgerblue, fontcolor=white];
21 |
22 | }
23 |
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/encoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/encoder-network.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/seq2seq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/seq2seq.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/seq2seq@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/seq2seq@2x.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/word-encoding.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/word-encoding.png
--------------------------------------------------------------------------------
/_static/img/seq-seq-images/word-encoding@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq-seq-images/word-encoding@2x.png
--------------------------------------------------------------------------------
/_static/img/seq2seq_flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/seq2seq_flat.png
--------------------------------------------------------------------------------
/_static/img/shadow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/shadow.png
--------------------------------------------------------------------------------
/_static/img/steam-train-whistle-daniel_simon-converted-from-mp3.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/steam-train-whistle-daniel_simon-converted-from-mp3.wav
--------------------------------------------------------------------------------
/_static/img/stn/FSeq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/stn/FSeq.png
--------------------------------------------------------------------------------
/_static/img/stn/Five.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/stn/Five.gif
--------------------------------------------------------------------------------
/_static/img/stn/stn-arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/stn/stn-arch.png
--------------------------------------------------------------------------------
/_static/img/stn/tr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/stn/tr.png
--------------------------------------------------------------------------------
/_static/img/tensor_illustration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensor_illustration.png
--------------------------------------------------------------------------------
/_static/img/tensor_illustration_flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensor_illustration_flat.png
--------------------------------------------------------------------------------
/_static/img/tensorboard_figure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensorboard_figure.png
--------------------------------------------------------------------------------
/_static/img/tensorboard_first_view.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensorboard_first_view.png
--------------------------------------------------------------------------------
/_static/img/tensorboard_images.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensorboard_images.png
--------------------------------------------------------------------------------
/_static/img/tensorboard_model_viz.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensorboard_model_viz.png
--------------------------------------------------------------------------------
/_static/img/tensorboard_pr_curves.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensorboard_pr_curves.png
--------------------------------------------------------------------------------
/_static/img/tensorboard_projector.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensorboard_projector.png
--------------------------------------------------------------------------------
/_static/img/tensorboard_scalar_runs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tensorboard_scalar_runs.png
--------------------------------------------------------------------------------
/_static/img/text_sentiment_ngrams_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/text_sentiment_ngrams_model.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/220px-KnnClassification.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/220px-KnnClassification.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/babel.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/babel.jpg
--------------------------------------------------------------------------------
/_static/img/thumbnails/captum_teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/captum_teaser.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/60-min-blitz.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/60-min-blitz.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Adversarial-Example-Generation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Adversarial-Example-Generation.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Autograd-in-Cpp-Frontend.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Autograd-in-Cpp-Frontend.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Combining-Distributed-DataParallel-with-Distributed-RPC-Framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Combining-Distributed-DataParallel-with-Distributed-RPC-Framework.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Custom-Cpp-and-CUDA-Extensions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Custom-Cpp-and-CUDA-Extensions.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/DCGAN-Tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/DCGAN-Tutorial.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Deploying-PyTorch-in-Python-via-a-REST-API-with-Flask.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Deploying-PyTorch-in-Python-via-a-REST-API-with-Flask.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Distributed-Pipeline-Parallelism-Using-RPC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Distributed-Pipeline-Parallelism-Using-RPC.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Classes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Classes.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Operators.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Extending-TorchScript-with-Custom-Cpp-Operators.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Getting Started with Distributed-RPC-Framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Getting Started with Distributed-RPC-Framework.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Getting-Started-with Distributed RPC Framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Getting-Started-with Distributed RPC Framework.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-Data-Parallel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-Data-Parallel.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-RPC-Framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Getting-Started-with-Distributed-RPC-Framework.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Implementing-Batch-RPC-Processing-Using-Asynchronous-Executions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Implementing-Batch-RPC-Processing-Using-Asynchronous-Executions.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Implementing-a-Parameter-Server-Using-Distributed-RPC-Framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Implementing-a-Parameter-Server-Using-Distributed-RPC-Framework.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Introduction-to-TorchScript.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Introduction-to-TorchScript.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Language-Translation-with-TorchText.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Language-Translation-with-TorchText.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Loading-a-TorchScript-Model-in-Cpp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Loading-a-TorchScript-Model-in-Cpp.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Model-Parallel-Best-Practices.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Model-Parallel-Best-Practices.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/NLP-From-Scratch-Classifying-Names-with-a-Character-Level-RNN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/NLP-From-Scratch-Classifying-Names-with-a-Character-Level-RNN.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/NLP-From-Scratch-Generating-Names-with-a-Character-Level-RNN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/NLP-From-Scratch-Generating-Names-with-a-Character-Level-RNN.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/NLP-From-Scratch-Translation-with-a-Sequence-to-Sequence-Network-and-Attention.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/NLP-From-Scratch-Translation-with-a-Sequence-to-Sequence-Network-and-Attention.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Pruning-Tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Pruning-Tutorial.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/PyTorch-Distributed-Overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/PyTorch-Distributed-Overview.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Sequence-to-Sequence-Modeling-with-nnTransformer-andTorchText.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Sequence-to-Sequence-Modeling-with-nnTransformer-andTorchText.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Text-Classification-with-TorchText.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Text-Classification-with-TorchText.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/TorchScript-Parallelism.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/TorchScript-Parallelism.jpg
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/TorchVision-Object-Detection-Finetuning-Tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/TorchVision-Object-Detection-Finetuning-Tutorial.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Training-Transformer-Models-using-Distributed-Data-Parallel-and-Pipeline-Parallelism.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Training-Transformer-Models-using-Distributed-Data-Parallel-and-Pipeline-Parallelism.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Training-Transformer-models-using-Pipeline-Parallelism.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Training-Transformer-models-using-Pipeline-Parallelism.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Transfer-Learning-for-Computer-Vision-Tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Transfer-Learning-for-Computer-Vision-Tutorial.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Tutorials_Card_Template.psd:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Tutorials_Card_Template.psd
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Using-the-PyTorch-Cpp-Frontend.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Using-the-PyTorch-Cpp-Frontend.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/Writing-Distributed-Applications-with-PyTorch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/Writing-Distributed-Applications-with-PyTorch.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/advanced-PyTorch-1point0-Distributed-Trainer-with-Amazon-AWS.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/advanced-PyTorch-1point0-Distributed-Trainer-with-Amazon-AWS.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/amp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/amp.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/android.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/android.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/custom-datasets-transforms-and-dataloaders.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/custom-datasets-transforms-and-dataloaders.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/defining-a-network.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/defining-a-network.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/experimental-Channels-Last-Memory-Format-in-PyTorch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/experimental-Channels-Last-Memory-Format-in-PyTorch.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/experimental-Dynamic-Quantization-on-BERT.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/experimental-Dynamic-Quantization-on-BERT.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/experimental-Dynamic-Quantization-on-an-LSTM-Word-Language-Model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/experimental-Dynamic-Quantization-on-an-LSTM-Word-Language-Model.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/experimental-Introduction-to-Named-Tensors-in-PyTorch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/experimental-Introduction-to-Named-Tensors-in-PyTorch.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/experimental-Quantized-Transfer-Learning-for-Computer-Vision-Tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/experimental-Quantized-Transfer-Learning-for-Computer-Vision-Tutorial.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/experimental-Static-Quantization-with-Eager-Mode-in-PyTorch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/experimental-Static-Quantization-with-Eager-Mode-in-PyTorch.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/generic-pytorch-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/generic-pytorch-logo.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/graph-mode-dynamic-bert.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/graph-mode-dynamic-bert.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/ios.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/ios.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/learning-pytorch-with-examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/learning-pytorch-with-examples.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/loading-data-in-pytorch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/loading-data-in-pytorch.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/loading-data.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/loading-data.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/mobile.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/mobile.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/model-interpretability-using-captum.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/model-interpretability-using-captum.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/optional-Exporting-a-Model-from-PyTorch-to-ONNX-and-Running-it-using-ONNX-Runtime.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/optional-Exporting-a-Model-from-PyTorch-to-ONNX-and-Running-it-using-ONNX-Runtime.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/profile.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/profile.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/profiler.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/profiler.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/saving-and-loading-general-checkpoint.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/saving-and-loading-general-checkpoint.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/saving-and-loading-models-across-devices.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/saving-and-loading-models-across-devices.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/saving-and-loading-models-for-inference.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/saving-and-loading-models-for-inference.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/saving-multiple-models.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/saving-multiple-models.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/torch-nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/torch-nn.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/torchaudio-Tutorial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/torchaudio-Tutorial.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/torchaudio-speech.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/torchaudio-speech.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/torchscript_overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/torchscript_overview.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/using-dynamic-post-training-quantization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/using-dynamic-post-training-quantization.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/using-flask-create-restful-api.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/using-flask-create-restful-api.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/visualizing-with-tensorboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/visualizing-with-tensorboard.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/warmstarting-models.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/warmstarting-models.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/what-is-a-state-dict.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/what-is-a-state-dict.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/cropped/zeroing-out-gradients.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/cropped/zeroing-out-gradients.PNG
--------------------------------------------------------------------------------
/_static/img/thumbnails/custom_dataset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/custom_dataset.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/default.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/default.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/defining_a_network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/defining_a_network.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/examples.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/eye.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/eye.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/floppy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/floppy.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/german_to_english_translation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/german_to_english_translation.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/landmarked_face2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/landmarked_face2.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/pixelated-cat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/pixelated-cat.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/pytorch-logo-flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/pytorch-logo-flat.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/pytorch_tensorboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/pytorch_tensorboard.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/sphx_glr_transfer_learning_tutorial_001.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/sphx_glr_transfer_learning_tutorial_001.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/tensorboard_dev.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/tensorboard_dev.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/tensorboard_scalars.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/tensorboard_scalars.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/torch-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/torch-logo.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/torchtext.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/torchtext.png
--------------------------------------------------------------------------------
/_static/img/thumbnails/tv-img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/thumbnails/tv-img.png
--------------------------------------------------------------------------------
/_static/img/torch-nn-vs-pytorch-nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/torch-nn-vs-pytorch-nn.png
--------------------------------------------------------------------------------
/_static/img/torch.nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/torch.nn.png
--------------------------------------------------------------------------------
/_static/img/torchscript.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/torchscript.png
--------------------------------------------------------------------------------
/_static/img/torchscript_to_cpp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/torchscript_to_cpp.png
--------------------------------------------------------------------------------
/_static/img/trace_img.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/trace_img.png
--------------------------------------------------------------------------------
/_static/img/transformer_architecture.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/transformer_architecture.jpg
--------------------------------------------------------------------------------
/_static/img/transformer_input_target.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/transformer_input_target.png
--------------------------------------------------------------------------------
/_static/img/tv_tutorial/tv_image01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tv_tutorial/tv_image01.png
--------------------------------------------------------------------------------
/_static/img/tv_tutorial/tv_image02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tv_tutorial/tv_image02.png
--------------------------------------------------------------------------------
/_static/img/tv_tutorial/tv_image03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tv_tutorial/tv_image03.png
--------------------------------------------------------------------------------
/_static/img/tv_tutorial/tv_image04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tv_tutorial/tv_image04.png
--------------------------------------------------------------------------------
/_static/img/tv_tutorial/tv_image05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tv_tutorial/tv_image05.png
--------------------------------------------------------------------------------
/_static/img/tv_tutorial/tv_image06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tv_tutorial/tv_image06.png
--------------------------------------------------------------------------------
/_static/img/tv_tutorial/tv_image07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/img/tv_tutorial/tv_image07.png
--------------------------------------------------------------------------------
/_static/minus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/minus.png
--------------------------------------------------------------------------------
/_static/mnist.pkl.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/mnist.pkl.gz
--------------------------------------------------------------------------------
/_static/no_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/no_image.png
--------------------------------------------------------------------------------
/_static/plus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/plus.png
--------------------------------------------------------------------------------
/_static/pytorch-logo-dark.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
34 |
--------------------------------------------------------------------------------
/_static/up-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/up-pressed.png
--------------------------------------------------------------------------------
/_static/up.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/_static/up.png
--------------------------------------------------------------------------------
/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {% block footer %}
4 | {{ super() }}
5 |
6 |
7 |
17 |
18 |
70 |
71 |
76 |
77 |
80 |
81 |
82 | {% endblock %}
83 |
--------------------------------------------------------------------------------
/advanced_source/README.txt:
--------------------------------------------------------------------------------
1 | Advanced Tutorials
2 | ------------------
3 |
4 | 1. neural_style_tutorial.py
5 | Neural Transfer with PyTorch
6 | https://pytorch.org/tutorials/advanced/neural_style_tutorial.html
7 |
8 | 2. numpy_extensions_tutorial.py
9 | Creating Extensions Using numpy and scipy
10 | https://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html
11 |
12 | 3. c_extension.rst
13 | Custom C Extensions for PyTorch
14 | https://pytorch.org/tutorials/advanced/c_extension.html
15 |
16 | 4. super_resolution_with_onnxruntime.py
17 | Exporting a Model from PyTorch to ONNX and Running it using ONNXRuntime
18 | https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
19 |
--------------------------------------------------------------------------------
/advanced_source/dispatcher/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
2 | project(dispatcher)
3 |
4 | find_package(Torch REQUIRED)
5 |
6 | add_library(dispatcher SHARED op.cpp)
7 | target_compile_features(dispatcher PRIVATE cxx_std_14)
8 | target_link_libraries(dispatcher "${TORCH_LIBRARIES}")
9 |
--------------------------------------------------------------------------------
/advanced_source/dispatcher/op.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include
5 |
6 | using torch::Tensor;
7 | using torch::DeviceType;
8 | using torch::autograd::tensor_list;
9 | using torch::autograd::AutogradContext;
10 |
11 | // BEGIN myadd
12 | Tensor myadd(const Tensor& self, const Tensor& other) {
13 | static auto op = torch::Dispatcher::singleton()
14 | .findSchemaOrThrow("myops::myadd", "")
15 | .typed();
16 | return op.call(self, other);
17 | }
18 | // END myadd
19 |
20 | // BEGIN TORCH_LIBRARY
21 | TORCH_LIBRARY(myops, m) {
22 | m.def("myadd(Tensor self, Tensor other) -> Tensor");
23 | }
24 | // END TORCH_LIBRARY
25 |
26 | // BEGIN myadd_cpu
27 | Tensor myadd_cpu(const Tensor& self_, const Tensor& other_) {
28 | TORCH_CHECK(self_.sizes() == other_.sizes());
29 | TORCH_INTERNAL_ASSERT(self_.device().type() == DeviceType::CPU);
30 | TORCH_INTERNAL_ASSERT(other_.device().type() == DeviceType::CPU);
31 | Tensor self = self_.contiguous();
32 | Tensor other = other_.contiguous();
33 | Tensor result = torch::empty(self.sizes(), self.options());
34 | const float* self_ptr = self.data_ptr();
35 | const float* other_ptr = other.data_ptr();
36 | float* result_ptr = result.data_ptr();
37 | for (int64_t i = 0; i < result.numel(); i++) {
38 | result_ptr[i] = self_ptr[i] + other_ptr[i];
39 | }
40 | return result;
41 | }
42 | // END myadd_cpu
43 |
44 | // BEGIN TORCH_LIBRARY_IMPL CPU
45 | TORCH_LIBRARY_IMPL(myops, CPU, m) {
46 | m.impl("myadd", myadd_cpu);
47 | }
48 | // END TORCH_LIBRARY_IMPL CPU
49 |
50 | Tensor myadd_cuda(const Tensor& self, const Tensor& other) {
51 | // Insert your CUDA implementation here
52 | TORCH_CHECK(0, "CUDA not yet implemented");
53 | }
54 |
55 | // BEGIN TORCH_LIBRARY_IMPL CUDA
56 | TORCH_LIBRARY_IMPL(myops, CUDA, m) {
57 | m.impl("myadd", myadd_cuda);
58 | }
59 | // END TORCH_LIBRARY_IMPL CUDA
60 |
61 | // BEGIN myadd_autograd
62 | class MyAddFunction : public torch::autograd::Function {
63 | public:
64 | static Tensor forward(
65 | AutogradContext *ctx, torch::Tensor self, torch::Tensor other) {
66 | at::AutoNonVariableTypeMode g;
67 | return myadd(self, other);
68 | }
69 |
70 | static tensor_list backward(AutogradContext *ctx, tensor_list grad_outputs) {
71 | auto grad_output = grad_outputs[0];
72 | return {grad_output, grad_output};
73 | }
74 | };
75 |
76 | Tensor myadd_autograd(const Tensor& self, const Tensor& other) {
77 | return MyAddFunction::apply(self, other)[0];
78 | }
79 | // END myadd_autograd
80 |
81 | // BEGIN TORCH_LIBRARY_IMPL Autograd
82 | TORCH_LIBRARY_IMPL(myops, Autograd, m) {
83 | m.impl("myadd", myadd_autograd);
84 | }
85 | // END TORCH_LIBRARY_IMPL Autograd
86 |
87 | #if 0
88 | // BEGIN TORCH_LIBRARY_IMPL Named
89 | Tensor myadd_named(const Tensor& self, const Tensor& other) {
90 | // TODO: shouldn't need to do size check here
91 | TORCH_CHECK(self.sizes() == other.sizes());
92 | auto maybe_outnames = at::unify_from_right(self.names(), other.names());
93 | auto result = ([&]() {
94 | at::NoNamesGuard guard;
95 | return myadd(self, other);
96 | })();
97 | at::namedinference::propagate_names_if_nonempty(result, maybe_outnames);
98 | return result;
99 | }
100 |
101 | TORCH_LIBRARY_IMPL(myops, Named, m) {
102 | m.impl("myadd", myadd_named);
103 | }
104 | // END TORCH_LIBRARY_IMPL Named
105 | #endif
106 |
--------------------------------------------------------------------------------
/advanced_source/dispatcher/test.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | torch.ops.load_library("build/libdispatcher.so")
4 | print(torch.ops.myops.myadd(torch.randn(32, 32), torch.rand(32, 32)))
5 | """
6 | # Doesn't currently work, because Python frontend on torch.ops doesn't
7 | # support names (for not a good reason?)
8 | x = torch.randn(32, 32, names=('A', 'B'))
9 | y = torch.rand(32, 32, names=('A', 'B'))
10 | print(torch.ops.myops.myadd(x, y))
11 | """
12 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_classes/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
2 | project(infer)
3 |
4 | find_package(Torch REQUIRED)
5 |
6 | add_subdirectory(custom_class_project)
7 |
8 | # Define our library target
9 | add_executable(infer infer.cpp)
10 | set(CMAKE_CXX_STANDARD 14)
11 | # Link against LibTorch
12 | target_link_libraries(infer "${TORCH_LIBRARIES}")
13 | # This is where we link in our libcustom_class code, making our
14 | # custom class available in our binary.
15 | target_link_libraries(infer -Wl,--no-as-needed custom_class)
16 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_classes/custom_class_project/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
2 | project(custom_class)
3 |
4 | find_package(Torch REQUIRED)
5 |
6 | # Define our library target
7 | add_library(custom_class SHARED class.cpp)
8 | set(CMAKE_CXX_STANDARD 14)
9 | # Link against LibTorch
10 | target_link_libraries(custom_class "${TORCH_LIBRARIES}")
11 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_classes/custom_class_project/custom_test.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | # `torch.classes.load_library()` allows you to pass the path to your .so file
4 | # to load it in and make the custom C++ classes available to both Python and
5 | # TorchScript
6 | torch.classes.load_library("build/libcustom_class.so")
7 | # You can query the loaded libraries like this:
8 | print(torch.classes.loaded_libraries)
9 | # prints {'/custom_class_project/build/libcustom_class.so'}
10 |
11 | # We can find and instantiate our custom C++ class in python by using the
12 | # `torch.classes` namespace:
13 | #
14 | # This instantiation will invoke the MyStackClass(std::vector init)
15 | # constructor we registered earlier
16 | s = torch.classes.my_classes.MyStackClass(["foo", "bar"])
17 |
18 | # We can call methods in Python
19 | s.push("pushed")
20 | assert s.pop() == "pushed"
21 |
22 | # Returning and passing instances of custom classes works as you'd expect
23 | s2 = s.clone()
24 | s.merge(s2)
25 | for expected in ["bar", "foo", "bar", "foo"]:
26 | assert s.pop() == expected
27 |
28 | # We can also use the class in TorchScript
29 | # For now, we need to assign the class's type to a local in order to
30 | # annotate the type on the TorchScript function. This may change
31 | # in the future.
32 | MyStackClass = torch.classes.my_classes.MyStackClass
33 |
34 |
35 | @torch.jit.script
36 | def do_stacks(s: MyStackClass): # We can pass a custom class instance
37 | # We can instantiate the class
38 | s2 = torch.classes.my_classes.MyStackClass(["hi", "mom"])
39 | s2.merge(s) # We can call a method on the class
40 | # We can also return instances of the class
41 | # from TorchScript function/methods
42 | return s2.clone(), s2.top()
43 |
44 |
45 | stack, top = do_stacks(torch.classes.my_classes.MyStackClass(["wow"]))
46 | assert top == "wow"
47 | for expected in ["wow", "mom", "hi"]:
48 | assert stack.pop() == expected
49 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_classes/custom_class_project/export_attr.py:
--------------------------------------------------------------------------------
1 | # export_attr.py
2 | import torch
3 |
4 | torch.classes.load_library('build/libcustom_class.so')
5 |
6 |
7 | class Foo(torch.nn.Module):
8 | def __init__(self):
9 | super().__init__()
10 | self.stack = torch.classes.my_classes.MyStackClass(["just", "testing"])
11 |
12 | def forward(self, s: str) -> str:
13 | return self.stack.pop() + s
14 |
15 |
16 | scripted_foo = torch.jit.script(Foo())
17 |
18 | scripted_foo.save('foo.pt')
19 | loaded = torch.jit.load('foo.pt')
20 |
21 | print(loaded.stack.pop())
22 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_classes/custom_class_project/save.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | torch.classes.load_library('build/libcustom_class.so')
4 |
5 |
6 | class Foo(torch.nn.Module):
7 | def __init__(self):
8 | super().__init__()
9 |
10 | def forward(self, s: str) -> str:
11 | stack = torch.classes.my_classes.MyStackClass(["hi", "mom"])
12 | return stack.pop() + s
13 |
14 |
15 | scripted_foo = torch.jit.script(Foo())
16 | print(scripted_foo.graph)
17 |
18 | scripted_foo.save('foo.pt')
19 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_classes/infer.cpp:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include
4 | #include
5 |
6 | int main(int argc, const char* argv[]) {
7 | torch::jit::Module module;
8 | try {
9 | // Deserialize the ScriptModule from a file using torch::jit::load().
10 | module = torch::jit::load("foo.pt");
11 | }
12 | catch (const c10::Error& e) {
13 | std::cerr << "error loading the model\n";
14 | return -1;
15 | }
16 |
17 | std::vector inputs = {"foobarbaz"};
18 | auto output = module.forward(inputs).toString();
19 | std::cout << output->string() << std::endl;
20 | }
21 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_classes/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -ex
4 |
5 | rm -rf build
6 | rm -rf custom_class_project/build
7 |
8 | pushd custom_class_project
9 | mkdir build
10 | (cd build && cmake CXXFLAGS="-DNO_PICKLE" -DCMAKE_PREFIX_PATH="$(python -c 'import torch.utils; print(torch.utils.cmake_prefix_path)')" ..)
11 | (cd build && make)
12 | python custom_test.py
13 | python save.py
14 | ! python export_attr.py
15 | popd
16 |
17 | mkdir build
18 | (cd build && cmake -DCMAKE_PREFIX_PATH="$(python -c 'import torch.utils; print(torch.utils.cmake_prefix_path)')" ..)
19 | (cd build && make)
20 | mv custom_class_project/foo.pt build/foo.pt
21 | (cd build && ./infer)
22 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_classes/run2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -ex
4 |
5 | rm -rf build
6 | rm -rf custom_class_project/build
7 |
8 | pushd custom_class_project
9 | mkdir build
10 | (cd build && cmake -DCMAKE_PREFIX_PATH="$(python -c 'import torch.utils; print(torch.utils.cmake_prefix_path)')" ..)
11 | (cd build && make)
12 | python export_attr.py
13 | popd
14 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_ops/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
2 | project(warp_perspective)
3 |
4 | find_package(Torch REQUIRED)
5 | find_package(OpenCV REQUIRED)
6 |
7 | # Define our library target
8 | add_library(warp_perspective SHARED op.cpp)
9 | # Enable C++14
10 | target_compile_features(warp_perspective PRIVATE cxx_std_14)
11 | # Link against LibTorch
12 | target_link_libraries(warp_perspective "${TORCH_LIBRARIES}")
13 | # Link against OpenCV
14 | target_link_libraries(warp_perspective opencv_core opencv_imgproc)
15 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_ops/op.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | // BEGIN warp_perspective
5 | torch::Tensor warp_perspective(torch::Tensor image, torch::Tensor warp) {
6 | // BEGIN image_mat
7 | cv::Mat image_mat(/*rows=*/image.size(0),
8 | /*cols=*/image.size(1),
9 | /*type=*/CV_32FC1,
10 | /*data=*/image.data_ptr());
11 | // END image_mat
12 |
13 | // BEGIN warp_mat
14 | cv::Mat warp_mat(/*rows=*/warp.size(0),
15 | /*cols=*/warp.size(1),
16 | /*type=*/CV_32FC1,
17 | /*data=*/warp.data_ptr());
18 | // END warp_mat
19 |
20 | // BEGIN output_mat
21 | cv::Mat output_mat;
22 | cv::warpPerspective(image_mat, output_mat, warp_mat, /*dsize=*/{8, 8});
23 | // END output_mat
24 |
25 | // BEGIN output_tensor
26 | torch::Tensor output = torch::from_blob(output_mat.ptr(), /*sizes=*/{8, 8});
27 | return output.clone();
28 | // END output_tensor
29 | }
30 | // END warp_perspective
31 |
32 | // BEGIN registry
33 | TORCH_LIBRARY(my_ops, m) {
34 | m.def("warp_perspective", warp_perspective);
35 | }
36 | // END registry
37 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_ops/smoke_test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | torch.ops.load_library("build/libwarp_perspective.so")
3 | print(torch.ops.my_ops.warp_perspective)
4 |
--------------------------------------------------------------------------------
/advanced_source/torch_script_custom_ops/test.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | print("BEGIN preamble")
5 | torch.ops.load_library("build/libwarp_perspective.so")
6 | print(torch.ops.my_ops.warp_perspective(torch.randn(32, 32), torch.rand(3, 3)))
7 | print("END preamble")
8 |
9 |
10 | # BEGIN compute
11 | def compute(x, y, z):
12 | return x.matmul(y) + torch.relu(z)
13 | # END compute
14 |
15 |
16 | print("BEGIN trace")
17 | inputs = [torch.randn(4, 8), torch.randn(8, 5), torch.randn(4, 5)]
18 | trace = torch.jit.trace(compute, inputs)
19 | print(trace.graph)
20 | print("END trace")
21 |
22 |
23 | # BEGIN compute2
24 | def compute(x, y, z):
25 | x = torch.ops.my_ops.warp_perspective(x, torch.eye(3))
26 | return x.matmul(y) + torch.relu(z)
27 | # END compute2
28 |
29 |
30 | print("BEGIN trace2")
31 | inputs = [torch.randn(4, 8), torch.randn(8, 5), torch.randn(8, 5)]
32 | trace = torch.jit.trace(compute, inputs)
33 | print(trace.graph)
34 | print("END trace2")
35 |
--------------------------------------------------------------------------------
/advanced_source/transformer__timeseries_cpp_tutorial/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
2 | project(custom_ops)
3 |
4 | find_package(Torch REQUIRED)
5 |
6 | add_executable(transformer_ts transformer_timeseries.cpp)
7 | target_link_libraries(transformer_ts "${TORCH_LIBRARIES}")
8 | set_property(TARGET transformer_ts PROPERTY CXX_STANDARD 14)
9 |
--------------------------------------------------------------------------------
/advanced_source/transformer__timeseries_cpp_tutorial/scheduler.h:
--------------------------------------------------------------------------------
1 | // Copyright 2020-present pytorch-cpp Authors
2 | #pragma once
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | namespace scheduler {
9 | template
10 | struct OptimizerOptionsMap {
11 | };
12 |
13 | template<>
14 | struct OptimizerOptionsMap {
15 | using type = torch::optim::AdamOptions;
16 | };
17 |
18 | template<>
19 | struct OptimizerOptionsMap {
20 | using type = torch::optim::AdagradOptions;
21 | };
22 |
23 | template<>
24 | struct OptimizerOptionsMap {
25 | using type = torch::optim::LBFGSOptions;
26 | };
27 |
28 | template<>
29 | struct OptimizerOptionsMap {
30 | using type = torch::optim::RMSpropOptions;
31 | };
32 |
33 | template<>
34 | struct OptimizerOptionsMap {
35 | using type = torch::optim::SGDOptions;
36 | };
37 |
38 | /**
39 | * Learning rate scheduler base.
40 | *
41 | * Based on the Python implementation at
42 | * https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py.
43 | * @tparam TOptimizer Optimizer type
44 | */
45 | template
46 | class LRScheduler {
47 | public:
48 | explicit LRScheduler(TOptimizer& optimizer, int64_t last_epoch = -1)
49 | : optimizer_(optimizer), last_epoch_(last_epoch), base_lrs(get_current_lr()) {}
50 |
51 | virtual std::vector get_lr() = 0;
52 |
53 | void step() {
54 | ++last_epoch_;
55 |
56 | const auto values = get_lr();
57 | auto ¶m_groups = optimizer_.param_groups();
58 |
59 | for (decltype(param_groups.size()) i = 0; i != param_groups.size(); ++i) {
60 | dynamic_cast::type &>(param_groups[i].options()).lr(values[i]);
61 | }
62 | }
63 |
64 | virtual ~LRScheduler() = default;
65 |
66 | protected:
67 | TOptimizer& optimizer_;
68 | int64_t last_epoch_;
69 | std::vector base_lrs;
70 |
71 | std::vector get_current_lr() {
72 | std::vector lrs;
73 | lrs.reserve(optimizer_.param_groups().size());
74 |
75 | for (auto ¶m_group : optimizer_.param_groups()) {
76 | lrs.push_back(dynamic_cast::type &>(param_group.options()).lr());
78 | }
79 |
80 | return lrs;
81 | }
82 | };
83 |
84 | /**
85 | * Step learning rate scheduler.
86 | *
87 | * Based on the python implementation at
88 | * https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py.
89 | * @tparam TOptimizer Optimizer type
90 | */
91 | template
92 | class StepLR : public LRScheduler {
93 | public:
94 | StepLR(TOptimizer& optimizer, int64_t step_size, double gamma = 0.1, int64_t last_epoch = -1)
95 | : LRScheduler(optimizer, last_epoch), step_size_(step_size), gamma_(gamma) {}
96 |
97 | std::vector get_lr() override {
98 | auto new_lr = this->get_current_lr();
99 |
100 | if (this->last_epoch_ != 0 && (this->last_epoch_ % step_size_ == 0)) {
101 | std::transform(new_lr.cbegin(), new_lr.cend(), new_lr.begin(),
102 | [gamma_ = gamma_](auto value) { return value * gamma_; });
103 | }
104 |
105 | return new_lr;
106 | }
107 |
108 | private:
109 | int64_t step_size_;
110 | double gamma_;
111 | };
112 | } // namespace scheduler
--------------------------------------------------------------------------------
/beginner_source/README.txt:
--------------------------------------------------------------------------------
1 | Beginner Tutorials
2 | ------------------
3 |
4 | 1. blitz/* and deep_learning_60min_blitz.rst
5 | Deep Learning with PyTorch: A 60 Minute Blitz
6 | https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html
7 |
8 | 2. former_torches/* and former_torchies_tutorial.rst
9 | PyTorch for Former Torch Users
10 | https://pytorch.org/tutorials/beginner/former_torchies_tutorial.html
11 |
12 | 3. examples_*/* and pytorch_with_examples.rst
13 | Learning PyTorch with Examples
14 | https://pytorch.org/tutorials/beginner/pytorch_with_examples.html
15 |
16 | 4. transfer_learning_tutorial.py
17 | Transfer Learning Tutorial
18 | https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
19 |
20 | 5. nlp/* and deep_learning_nlp_tutorial.rst
21 | Deep Learning for NLP with Pytorch
22 | https://pytorch.org/tutorials/beginner/deep_learning_nlp_tutorial.html
23 |
24 | 6. transformer_translation.py
25 | Language Translation with Transformers
26 | https://pytorch.org/tutorials/beginner/transformer_translation.html
--------------------------------------------------------------------------------
/beginner_source/basics/README.txt:
--------------------------------------------------------------------------------
1 | Learn the Basics
2 | ------------------
3 |
4 | 1. intro.py
5 | Learn the Basics
6 | https://pytorch.org/tutorials/beginner/basics/intro.html
7 |
8 | 2. quickstart_tutorial.py
9 | Quickstart
10 | https://pytorch.org/tutorials/beginner/basics/quickstart_tutorial.html
11 |
12 | 3. tensors_tutorial.py
13 | Tensors
14 | https://pytorch.org/tutorials/beginner/basics/tensor_tutorial.html
15 |
16 | 4. dataquickstart_tutorial.py
17 | Datasets & DataLoaders
18 | https://pytorch.org/tutorials/beginner/basics/data_tutorial.html
19 |
20 | 5. transforms_tutorial.py
21 | Transforms
22 | https://pytorch.org/tutorials/beginner/basics/transforms_tutorial.html
23 |
24 | 6. buildmodel_tutorial.py
25 | Building the Neural Network
26 | https://pytorch.org/tutorials/beginner/basics/buildmodel_tutorial.html
27 |
28 | 7. autograd_tutorial.py
29 | Automatic Differentiation with torch.autograd_tutorial
30 | https://pytorch.org/tutorials/beginner/basics/autograd_tutorial.html
31 |
32 | 8. optimization_tutorial.py
33 | Optimizing Model Parameters
34 | https://pytorch.org/tutorials/beginner/basics/optimization_tutorial.html
35 |
36 | 9. saveloadrun_tutorial.py
37 | Save and Load the Model
38 | https://pytorch.org/tutorials/beginner/basics/saveloadrun_tutorial.html
39 |
40 |
--------------------------------------------------------------------------------
/beginner_source/basics/intro.py:
--------------------------------------------------------------------------------
1 | """
2 | **Learn the Basics** ||
3 | `Quickstart `_ ||
4 | `Tensors `_ ||
5 | `Datasets & DataLoaders `_ ||
6 | `Transforms `_ ||
7 | `Build Model `_ ||
8 | `Autograd `_ ||
9 | `Optimization `_ ||
10 | `Save & Load Model `_
11 |
12 | Learn the Basics
13 | ===================
14 |
15 | Authors:
16 | `Suraj Subramanian `_,
17 | `Seth Juarez `_,
18 | `Cassie Breviu `_,
19 | `Dmitry Soshnikov `_,
20 | `Ari Bornstein `_
21 |
22 | Most machine learning workflows involve working with data, creating models, optimizing model
23 | parameters, and saving the trained models. This tutorial introduces you to a complete ML workflow
24 | implemented in PyTorch, with links to learn more about each of these concepts.
25 |
26 | We'll use the FashionMNIST dataset to train a neural network that predicts if an input image belongs
27 | to one of the following classes: T-shirt/top, Trouser, Pullover, Dress, Coat, Sandal, Shirt, Sneaker,
28 | Bag, or Ankle boot.
29 |
30 | `This tutorial assumes a basic familiarity with Python and Deep Learning concepts.`
31 |
32 |
33 | Running the Tutorial Code
34 | ------------------
35 | You can run this tutorial in a couple of ways:
36 |
37 | - **In the cloud**: This is the easiest way to get started! Each section has a Colab link at the top, which opens a notebook with the code in a fully-hosted environment. Pro tip: Use Colab with a GPU runtime to speed up operations *Runtime > Change runtime type > GPU*
38 | - **Locally**: This option requires you to setup PyTorch and TorchVision first on your local machine (`installation instructions `_). Download the notebook or copy the code into your favorite IDE.
39 |
40 |
41 | How to Use this Guide
42 | -----------------
43 | If you're familiar with other deep learning frameworks, check out the `0. Quickstart `_ first
44 | to quickly familiarize yourself with PyTorch's API.
45 |
46 | If you're new to deep learning frameworks, head right into the first section of our step-by-step guide: `1. Tensors `_.
47 |
48 |
49 | .. include:: /beginner_source/basics/qs_toc.txt
50 |
51 | .. toctree::
52 | :hidden:
53 |
54 | """
--------------------------------------------------------------------------------
/beginner_source/basics/qs_toc.txt:
--------------------------------------------------------------------------------
1 | | 0. `Quickstart `_
2 | | 1. `Tensors `_
3 | | 2. `Datasets and DataLoaders `_
4 | | 3. `Transforms `_
5 | | 4. `Build Model `_
6 | | 5. `Automatic Differentiation `_
7 | | 6. `Optimization Loop `_
8 | | 7. `Save, Load and Use Model `_
9 |
--------------------------------------------------------------------------------
/beginner_source/basics/saveloadrun_tutorial.py:
--------------------------------------------------------------------------------
1 | """
2 | `Learn the Basics `_ ||
3 | `Quickstart `_ ||
4 | `Tensors `_ ||
5 | `Datasets & DataLoaders `_ ||
6 | `Transforms `_ ||
7 | `Build Model `_ ||
8 | `Autograd `_ ||
9 | `Optimization `_ ||
10 | **Save & Load Model**
11 |
12 | Save and Load the Model
13 | ============================
14 |
15 | In this section we will look at how to persist model state with saving, loading and running model predictions.
16 | """
17 |
18 | import torch
19 | import torch.onnx as onnx
20 | import torchvision.models as models
21 |
22 |
23 | #######################################################################
24 | # Saving and Loading Model Weights
25 | # --------------------------------
26 | # PyTorch models store the learned parameters in an internal
27 | # state dictionary, called ``state_dict``. These can be persisted via the ``torch.save``
28 | # method:
29 |
30 | model = models.vgg16(pretrained=True)
31 | torch.save(model.state_dict(), 'model_weights.pth')
32 |
33 | ##########################
34 | # To load model weights, you need to create an instance of the same model first, and then load the parameters
35 | # using ``load_state_dict()`` method.
36 |
37 | model = models.vgg16() # we do not specify pretrained=True, i.e. do not load default weights
38 | model.load_state_dict(torch.load('model_weights.pth'))
39 | model.eval()
40 |
41 | ###########################
42 | # .. note:: be sure to call ``model.eval()`` method before inferencing to set the dropout and batch normalization layers to evaluation mode. Failing to do this will yield inconsistent inference results.
43 |
44 | #######################################################################
45 | # Saving and Loading Models with Shapes
46 | # -------------------------------------
47 | # When loading model weights, we needed to instantiate the model class first, because the class
48 | # defines the structure of a network. We might want to save the structure of this class together with
49 | # the model, in which case we can pass ``model`` (and not ``model.state_dict()``) to the saving function:
50 |
51 | torch.save(model, 'model.pth')
52 |
53 | ########################
54 | # We can then load the model like this:
55 |
56 | model = torch.load('model.pth')
57 |
58 | ########################
59 | # .. note:: This approach uses Python `pickle `_ module when serializing the model, thus it relies on the actual class definition to be available when loading the model.
60 |
61 | #######################################################################
62 | # Exporting Model to ONNX
63 | # -----------------------
64 | # PyTorch also has native ONNX export support. Given the dynamic nature of the
65 | # PyTorch execution graph, however, the export process must
66 | # traverse the execution graph to produce a persisted ONNX model. For this reason, a
67 | # test variable of the appropriate size should be passed in to the
68 | # export routine (in our case, we will create a dummy zero tensor of the correct size):
69 |
70 | input_image = torch.zeros((1,3,224,224))
71 | onnx.export(model, input_image, 'model.onnx')
72 |
73 | ###########################
74 | # There are a lot of things you can do with ONNX model, including running inference on different platforms
75 | # and in different programming languages. For more details, we recommend
76 | # visiting `ONNX tutorial `_.
77 | #
78 | # Congratulations! You have completed the PyTorch beginner tutorial! Try
79 | # `revisting the first page `_ to see the tutorial in its entirety
80 | # again. We hope this tutorial has helped you get started with deep learning on PyTorch.
81 | # Good luck!
82 | #
83 |
84 |
--------------------------------------------------------------------------------
/beginner_source/basics/transforms_tutorial.py:
--------------------------------------------------------------------------------
1 | """
2 | `Learn the Basics `_ ||
3 | `Quickstart `_ ||
4 | `Tensors `_ ||
5 | `Datasets & DataLoaders `_ ||
6 | **Transforms** ||
7 | `Build Model `_ ||
8 | `Autograd `_ ||
9 | `Optimization `_ ||
10 | `Save & Load Model `_
11 |
12 | Transforms
13 | ===================
14 |
15 | Data does not always come in its final processed form that is required for
16 | training machine learning algorithms. We use **transforms** to perform some
17 | manipulation of the data and make it suitable for training.
18 |
19 | All TorchVision datasets have two parameters -``transform`` to modify the features and
20 | ``target_transform`` to modify the labels - that accept callables containing the transformation logic.
21 | The `torchvision.transforms `_ module offers
22 | several commonly-used transforms out of the box.
23 |
24 | The FashionMNIST features are in PIL Image format, and the labels are integers.
25 | For training, we need the features as normalized tensors, and the labels as one-hot encoded tensors.
26 | To make these transformations, we use ``ToTensor`` and ``Lambda``.
27 | """
28 |
29 | from torchvision import datasets
30 | from torchvision.transforms import ToTensor, Lambda
31 |
32 | ds = datasets.FashionMNIST(
33 | root="data",
34 | train=True,
35 | download=True,
36 | transform=ToTensor(),
37 | target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1))
38 | )
39 |
40 | #################################################
41 | # ToTensor()
42 | # -------------------------------
43 | #
44 | # `ToTensor `_
45 | # converts a PIL image or NumPy ``ndarray`` into a ``FloatTensor``. and scales
46 | # the image's pixel intensity values in the range [0., 1.]
47 | #
48 |
49 | ##############################################
50 | # Lambda Transforms
51 | # -------------------------------
52 | #
53 | # Lambda transforms apply any user-defined lambda function. Here, we define a function
54 | # to turn the integer into a one-hot encoded tensor.
55 | # It first creates a zero tensor of size 10 (the number of labels in our dataset) and calls
56 | # `scatter_ `_ which assigns a
57 | # ``value=1`` on the index as given by the label ``y``.
58 |
59 | target_transform = Lambda(lambda y: torch.zeros(
60 | 10, dtype=torch.float).scatter_(dim=0, index=torch.tensor(y), value=1))
61 |
62 | ######################################################################
63 | # --------------
64 | #
65 |
66 | #################################################################
67 | # Further Reading
68 | # ~~~~~~~~~~~~~~~~~
69 | # - `torchvision.transforms API `_
70 |
--------------------------------------------------------------------------------
/beginner_source/blitz/README.txt:
--------------------------------------------------------------------------------
1 | Deep Learning with PyTorch: A 60 Minute Blitz
2 | ---------------------------------------------
3 |
4 | 1. tensor_tutorial.py
5 | What is PyTorch?
6 | https://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html
7 |
8 | 2. autograd_tutorial.py
9 | Autograd: Automatic Differentiation
10 | https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html
11 |
12 | 3. neural_networks_tutorial.py
13 | Neural Networks
14 | https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html#
15 |
16 | 4. autograd_tutorial.py
17 | Automatic Differentiation
18 | https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html
19 |
20 | 5. cifar10_tutorial.py
21 | Training a Classifier
22 | https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
23 |
24 |
25 |
--------------------------------------------------------------------------------
/beginner_source/colab.rst:
--------------------------------------------------------------------------------
1 | Using Tutorial Data from Google Drive in Colab
2 | ==============================================
3 |
4 | We've added a new feature to tutorials that allows users to open the
5 | notebook associated with a tutorial in Google Colab. You may need to
6 | copy data to your Google drive account to get the more complex tutorials
7 | to work.
8 |
9 | In this example, we'll demonstrate how to change the notebook in Colab
10 | to work with the Chatbot Tutorial. To do this, you'll first need to be
11 | logged into Google Drive. (For a full description of how to access data
12 | in Colab, you can view their example notebook
13 | `here `__.)
14 |
15 | To get started open the `Chatbot
16 | Tutorial `__
17 | in your browser.
18 |
19 | At the top of the page click **Run in Google Colab**.
20 |
21 | The file will open in Colab.
22 |
23 | If you choose, **Runtime** then **Run All**, you'll get an error as the
24 | file can't be found.
25 |
26 | To fix this, we'll copy the required file into our Google Drive account.
27 |
28 | 1. Log into Google Drive.
29 | 2. In Google Drive, make a folder named **data**, with a subfolder named
30 | **cornell**.
31 | 3. Visit the Cornell Movie Dialogs Corpus and download the ZIP file.
32 | 4. Unzip the file on your local machine.
33 | 5. Copy the file **movie\_lines.txt** to **data/cornell** folder you
34 | created in Google Drive.
35 |
36 | Now we'll need to edit the file in\_ \_Colab to point to the file on
37 | Google Drive.
38 |
39 | In Colab, add the following to top of the code section over the line
40 | that begins *corpus\_name*:
41 |
42 | ::
43 |
44 | from google.colab import drive
45 | drive.mount('/content/gdrive')
46 |
47 | Change the two lines that follow:
48 |
49 | 1. Change the **corpus\_name** value to **"cornell"**.
50 | 2. Change the line that begins with **corpus** to this:
51 |
52 | ::
53 |
54 | corpus = os.path.join("/content/gdrive/My Drive/data", corpus_name)
55 |
56 | We're now pointing to the file we uploaded to Drive.
57 |
58 | Now when you click on the **Run cell** button for the code section,
59 | you'll be prompted to authorize Google Drive and you'll get an
60 | authorization code. Paste the code into the prompt in Colab and you
61 | should be set.
62 |
63 | Rerun the notebook from **Runtime** / **Run All** menu command and
64 | you'll see it process. (Note that this tutorial takes a long time to
65 | run.)
66 |
67 | Hopefully this example will give you a good starting point for running
68 | some of the more complex tutorials in Colab. As we evolve our use of
69 | Colab on the PyTorch tutorials site, we'll look at ways to make this
70 | easier for users.
71 |
--------------------------------------------------------------------------------
/beginner_source/deep_learning_60min_blitz.rst:
--------------------------------------------------------------------------------
1 | Deep Learning with PyTorch: A 60 Minute Blitz
2 | ---------------------------------------------
3 | **Author**: `Soumith Chintala `_
4 |
5 | .. raw:: html
6 |
7 |
8 |
9 |
10 |
11 | What is PyTorch?
12 | ~~~~~~~~~~~~~~~~~~~~~
13 | PyTorch is a Python-based scientific computing package serving two broad purposes:
14 |
15 | - A replacement for NumPy to use the power of GPUs and other accelerators.
16 | - An automatic differentiation library that is useful to implement neural networks.
17 |
18 | Goal of this tutorial:
19 | ~~~~~~~~~~~~~~~~~~~~~~~~
20 | - Understand PyTorch’s Tensor library and neural networks at a high level.
21 | - Train a small neural network to classify images
22 |
23 |
24 | .. Note::
25 | Make sure you have the `torch`_ and `torchvision`_ packages installed.
26 |
27 | .. _torch: https://github.com/pytorch/pytorch
28 | .. _torchvision: https://github.com/pytorch/vision
29 |
30 |
31 | .. toctree::
32 | :hidden:
33 |
34 | /beginner/blitz/tensor_tutorial
35 | /beginner/blitz/autograd_tutorial
36 | /beginner/blitz/neural_networks_tutorial
37 | /beginner/blitz/cifar10_tutorial
38 |
39 | .. galleryitem:: /beginner/blitz/tensor_tutorial.py
40 | :figure: /_static/img/tensor_illustration_flat.png
41 |
42 | .. galleryitem:: /beginner/blitz/autograd_tutorial.py
43 | :figure: /_static/img/autodiff.png
44 |
45 | .. galleryitem:: /beginner/blitz/neural_networks_tutorial.py
46 | :figure: /_static/img/mnist.png
47 |
48 | .. galleryitem:: /beginner/blitz/cifar10_tutorial.py
49 | :figure: /_static/img/cifar10.png
50 |
51 | .. raw:: html
52 |
53 |
54 |
--------------------------------------------------------------------------------
/beginner_source/deep_learning_nlp_tutorial.rst:
--------------------------------------------------------------------------------
1 | Deep Learning for NLP with Pytorch
2 | **********************************
3 | **Author**: `Robert Guthrie `_
4 |
5 | This tutorial will walk you through the key ideas of deep learning
6 | programming using Pytorch. Many of the concepts (such as the computation
7 | graph abstraction and autograd) are not unique to Pytorch and are
8 | relevant to any deep learning toolkit out there.
9 |
10 | I am writing this tutorial to focus specifically on NLP for people who
11 | have never written code in any deep learning framework (e.g, TensorFlow,
12 | Theano, Keras, Dynet). It assumes working knowledge of core NLP
13 | problems: part-of-speech tagging, language modeling, etc. It also
14 | assumes familiarity with neural networks at the level of an intro AI
15 | class (such as one from the Russel and Norvig book). Usually, these
16 | courses cover the basic backpropagation algorithm on feed-forward neural
17 | networks, and make the point that they are chains of compositions of
18 | linearities and non-linearities. This tutorial aims to get you started
19 | writing deep learning code, given you have this prerequisite knowledge.
20 |
21 | Note this is about *models*, not data. For all of the models, I just
22 | create a few test examples with small dimensionality so you can see how
23 | the weights change as it trains. If you have some real data you want to
24 | try, you should be able to rip out any of the models from this notebook
25 | and use them on it.
26 |
27 |
28 | .. toctree::
29 | :hidden:
30 |
31 | /beginner/nlp/pytorch_tutorial
32 | /beginner/nlp/deep_learning_tutorial
33 | /beginner/nlp/word_embeddings_tutorial
34 | /beginner/nlp/sequence_models_tutorial
35 | /beginner/nlp/advanced_tutorial
36 |
37 |
38 | .. galleryitem:: /beginner/nlp/pytorch_tutorial.py
39 | :intro: All of deep learning is computations on tensors, which are generalizations of a matrix that can be
40 |
41 | .. galleryitem:: /beginner/nlp/deep_learning_tutorial.py
42 | :intro: Deep learning consists of composing linearities with non-linearities in clever ways. The introduction of non-linearities allows
43 |
44 | .. galleryitem:: /beginner/nlp/word_embeddings_tutorial.py
45 | :intro: Word embeddings are dense vectors of real numbers, one per word in your vocabulary. In NLP, it is almost always the case that your features are
46 |
47 | .. galleryitem:: /beginner/nlp/sequence_models_tutorial.py
48 | :intro: At this point, we have seen various feed-forward networks. That is, there is no state maintained by the network at all.
49 |
50 | .. galleryitem:: /beginner/nlp/advanced_tutorial.py
51 | :intro: Dynamic versus Static Deep Learning Toolkits. Pytorch is a *dynamic* neural network kit.
52 |
53 |
54 | .. raw:: html
55 |
56 |
57 |
--------------------------------------------------------------------------------
/beginner_source/examples_autograd/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/beginner_source/examples_autograd/README.txt
--------------------------------------------------------------------------------
/beginner_source/examples_autograd/polynomial_autograd.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | PyTorch: Tensors and autograd
4 | -------------------------------
5 |
6 | A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
7 | to :math:`pi` by minimizing squared Euclidean distance.
8 |
9 | This implementation computes the forward pass using operations on PyTorch
10 | Tensors, and uses PyTorch autograd to compute gradients.
11 |
12 |
13 | A PyTorch Tensor represents a node in a computational graph. If ``x`` is a
14 | Tensor that has ``x.requires_grad=True`` then ``x.grad`` is another Tensor
15 | holding the gradient of ``x`` with respect to some scalar value.
16 | """
17 | import torch
18 | import math
19 |
20 | dtype = torch.float
21 | device = torch.device("cpu")
22 | # device = torch.device("cuda:0") # Uncomment this to run on GPU
23 |
24 | # Create Tensors to hold input and outputs.
25 | # By default, requires_grad=False, which indicates that we do not need to
26 | # compute gradients with respect to these Tensors during the backward pass.
27 | x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
28 | y = torch.sin(x)
29 |
30 | # Create random Tensors for weights. For a third order polynomial, we need
31 | # 4 weights: y = a + b x + c x^2 + d x^3
32 | # Setting requires_grad=True indicates that we want to compute gradients with
33 | # respect to these Tensors during the backward pass.
34 | a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
35 | b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
36 | c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
37 | d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
38 |
39 | learning_rate = 1e-6
40 | for t in range(2000):
41 | # Forward pass: compute predicted y using operations on Tensors.
42 | y_pred = a + b * x + c * x ** 2 + d * x ** 3
43 |
44 | # Compute and print loss using operations on Tensors.
45 | # Now loss is a Tensor of shape (1,)
46 | # loss.item() gets the scalar value held in the loss.
47 | loss = (y_pred - y).pow(2).sum()
48 | if t % 100 == 99:
49 | print(t, loss.item())
50 |
51 | # Use autograd to compute the backward pass. This call will compute the
52 | # gradient of loss with respect to all Tensors with requires_grad=True.
53 | # After this call a.grad, b.grad. c.grad and d.grad will be Tensors holding
54 | # the gradient of the loss with respect to a, b, c, d respectively.
55 | loss.backward()
56 |
57 | # Manually update weights using gradient descent. Wrap in torch.no_grad()
58 | # because weights have requires_grad=True, but we don't need to track this
59 | # in autograd.
60 | with torch.no_grad():
61 | a -= learning_rate * a.grad
62 | b -= learning_rate * b.grad
63 | c -= learning_rate * c.grad
64 | d -= learning_rate * d.grad
65 |
66 | # Manually zero the gradients after updating weights
67 | a.grad = None
68 | b.grad = None
69 | c.grad = None
70 | d.grad = None
71 |
72 | print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
73 |
--------------------------------------------------------------------------------
/beginner_source/examples_autograd/polynomial_custom_function.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | PyTorch: Defining New autograd Functions
4 | ----------------------------------------
5 |
6 | A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
7 | to :math:`pi` by minimizing squared Euclidean distance. Instead of writing the
8 | polynomial as :math:`y=a+bx+cx^2+dx^3`, we write the polynomial as
9 | :math:`y=a+b P_3(c+dx)` where :math:`P_3(x)=\frac{1}{2}\left(5x^3-3x\right)` is
10 | the `Legendre polynomial`_ of degree three.
11 |
12 | .. _Legendre polynomial:
13 | https://en.wikipedia.org/wiki/Legendre_polynomials
14 |
15 | This implementation computes the forward pass using operations on PyTorch
16 | Tensors, and uses PyTorch autograd to compute gradients.
17 |
18 | In this implementation we implement our own custom autograd function to perform
19 | :math:`P_3'(x)`. By mathematics, :math:`P_3'(x)=\frac{3}{2}\left(5x^2-1\right)`
20 | """
21 | import torch
22 | import math
23 |
24 |
25 | class LegendrePolynomial3(torch.autograd.Function):
26 | """
27 | We can implement our own custom autograd Functions by subclassing
28 | torch.autograd.Function and implementing the forward and backward passes
29 | which operate on Tensors.
30 | """
31 |
32 | @staticmethod
33 | def forward(ctx, input):
34 | """
35 | In the forward pass we receive a Tensor containing the input and return
36 | a Tensor containing the output. ctx is a context object that can be used
37 | to stash information for backward computation. You can cache arbitrary
38 | objects for use in the backward pass using the ctx.save_for_backward method.
39 | """
40 | ctx.save_for_backward(input)
41 | return 0.5 * (5 * input ** 3 - 3 * input)
42 |
43 | @staticmethod
44 | def backward(ctx, grad_output):
45 | """
46 | In the backward pass we receive a Tensor containing the gradient of the loss
47 | with respect to the output, and we need to compute the gradient of the loss
48 | with respect to the input.
49 | """
50 | input, = ctx.saved_tensors
51 | return grad_output * 1.5 * (5 * input ** 2 - 1)
52 |
53 |
54 | dtype = torch.float
55 | device = torch.device("cpu")
56 | # device = torch.device("cuda:0") # Uncomment this to run on GPU
57 |
58 | # Create Tensors to hold input and outputs.
59 | # By default, requires_grad=False, which indicates that we do not need to
60 | # compute gradients with respect to these Tensors during the backward pass.
61 | x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
62 | y = torch.sin(x)
63 |
64 | # Create random Tensors for weights. For this example, we need
65 | # 4 weights: y = a + b * P3(c + d * x), these weights need to be initialized
66 | # not too far from the correct result to ensure convergence.
67 | # Setting requires_grad=True indicates that we want to compute gradients with
68 | # respect to these Tensors during the backward pass.
69 | a = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True)
70 | b = torch.full((), -1.0, device=device, dtype=dtype, requires_grad=True)
71 | c = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True)
72 | d = torch.full((), 0.3, device=device, dtype=dtype, requires_grad=True)
73 |
74 | learning_rate = 5e-6
75 | for t in range(2000):
76 | # To apply our Function, we use Function.apply method. We alias this as 'P3'.
77 | P3 = LegendrePolynomial3.apply
78 |
79 | # Forward pass: compute predicted y using operations; we compute
80 | # P3 using our custom autograd operation.
81 | y_pred = a + b * P3(c + d * x)
82 |
83 | # Compute and print loss
84 | loss = (y_pred - y).pow(2).sum()
85 | if t % 100 == 99:
86 | print(t, loss.item())
87 |
88 | # Use autograd to compute the backward pass.
89 | loss.backward()
90 |
91 | # Update weights using gradient descent
92 | with torch.no_grad():
93 | a -= learning_rate * a.grad
94 | b -= learning_rate * b.grad
95 | c -= learning_rate * c.grad
96 | d -= learning_rate * d.grad
97 |
98 | # Manually zero the gradients after updating weights
99 | a.grad = None
100 | b.grad = None
101 | c.grad = None
102 | d.grad = None
103 |
104 | print(f'Result: y = {a.item()} + {b.item()} * P3({c.item()} + {d.item()} x)')
105 |
--------------------------------------------------------------------------------
/beginner_source/examples_nn/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/beginner_source/examples_nn/README.txt
--------------------------------------------------------------------------------
/beginner_source/examples_nn/dynamic_net.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | PyTorch: Control Flow + Weight Sharing
4 | --------------------------------------
5 |
6 | To showcase the power of PyTorch dynamic graphs, we will implement a very strange
7 | model: a third-fifth order polynomial that on each forward pass
8 | chooses a random number between 3 and 5 and uses that many orders, reusing
9 | the same weights multiple times to compute the fourth and fifth order.
10 | """
11 | import random
12 | import torch
13 | import math
14 |
15 |
16 | class DynamicNet(torch.nn.Module):
17 | def __init__(self):
18 | """
19 | In the constructor we instantiate five parameters and assign them as members.
20 | """
21 | super().__init__()
22 | self.a = torch.nn.Parameter(torch.randn(()))
23 | self.b = torch.nn.Parameter(torch.randn(()))
24 | self.c = torch.nn.Parameter(torch.randn(()))
25 | self.d = torch.nn.Parameter(torch.randn(()))
26 | self.e = torch.nn.Parameter(torch.randn(()))
27 |
28 | def forward(self, x):
29 | """
30 | For the forward pass of the model, we randomly choose either 4, 5
31 | and reuse the e parameter to compute the contribution of these orders.
32 |
33 | Since each forward pass builds a dynamic computation graph, we can use normal
34 | Python control-flow operators like loops or conditional statements when
35 | defining the forward pass of the model.
36 |
37 | Here we also see that it is perfectly safe to reuse the same parameter many
38 | times when defining a computational graph.
39 | """
40 | y = self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3
41 | for exp in range(4, random.randint(4, 6)):
42 | y = y + self.e * x ** exp
43 | return y
44 |
45 | def string(self):
46 | """
47 | Just like any class in Python, you can also define custom method on PyTorch modules
48 | """
49 | return f'y = {self.a.item()} + {self.b.item()} x + {self.c.item()} x^2 + {self.d.item()} x^3 + {self.e.item()} x^4 ? + {self.e.item()} x^5 ?'
50 |
51 |
52 | # Create Tensors to hold input and outputs.
53 | x = torch.linspace(-math.pi, math.pi, 2000)
54 | y = torch.sin(x)
55 |
56 | # Construct our model by instantiating the class defined above
57 | model = DynamicNet()
58 |
59 | # Construct our loss function and an Optimizer. Training this strange model with
60 | # vanilla stochastic gradient descent is tough, so we use momentum
61 | criterion = torch.nn.MSELoss(reduction='sum')
62 | optimizer = torch.optim.SGD(model.parameters(), lr=1e-8, momentum=0.9)
63 | for t in range(30000):
64 | # Forward pass: Compute predicted y by passing x to the model
65 | y_pred = model(x)
66 |
67 | # Compute and print loss
68 | loss = criterion(y_pred, y)
69 | if t % 2000 == 1999:
70 | print(t, loss.item())
71 |
72 | # Zero gradients, perform a backward pass, and update the weights.
73 | optimizer.zero_grad()
74 | loss.backward()
75 | optimizer.step()
76 |
77 | print(f'Result: {model.string()}')
78 |
--------------------------------------------------------------------------------
/beginner_source/examples_nn/polynomial_module.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | PyTorch: Custom nn Modules
4 | --------------------------
5 |
6 | A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
7 | to :math:`pi` by minimizing squared Euclidean distance.
8 |
9 | This implementation defines the model as a custom Module subclass. Whenever you
10 | want a model more complex than a simple sequence of existing Modules you will
11 | need to define your model this way.
12 | """
13 | import torch
14 | import math
15 |
16 |
17 | class Polynomial3(torch.nn.Module):
18 | def __init__(self):
19 | """
20 | In the constructor we instantiate four parameters and assign them as
21 | member parameters.
22 | """
23 | super().__init__()
24 | self.a = torch.nn.Parameter(torch.randn(()))
25 | self.b = torch.nn.Parameter(torch.randn(()))
26 | self.c = torch.nn.Parameter(torch.randn(()))
27 | self.d = torch.nn.Parameter(torch.randn(()))
28 |
29 | def forward(self, x):
30 | """
31 | In the forward function we accept a Tensor of input data and we must return
32 | a Tensor of output data. We can use Modules defined in the constructor as
33 | well as arbitrary operators on Tensors.
34 | """
35 | return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3
36 |
37 | def string(self):
38 | """
39 | Just like any class in Python, you can also define custom method on PyTorch modules
40 | """
41 | return f'y = {self.a.item()} + {self.b.item()} x + {self.c.item()} x^2 + {self.d.item()} x^3'
42 |
43 |
44 | # Create Tensors to hold input and outputs.
45 | x = torch.linspace(-math.pi, math.pi, 2000)
46 | y = torch.sin(x)
47 |
48 | # Construct our model by instantiating the class defined above
49 | model = Polynomial3()
50 |
51 | # Construct our loss function and an Optimizer. The call to model.parameters()
52 | # in the SGD constructor will contain the learnable parameters of the nn.Linear
53 | # module which is members of the model.
54 | criterion = torch.nn.MSELoss(reduction='sum')
55 | optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
56 | for t in range(2000):
57 | # Forward pass: Compute predicted y by passing x to the model
58 | y_pred = model(x)
59 |
60 | # Compute and print loss
61 | loss = criterion(y_pred, y)
62 | if t % 100 == 99:
63 | print(t, loss.item())
64 |
65 | # Zero gradients, perform a backward pass, and update the weights.
66 | optimizer.zero_grad()
67 | loss.backward()
68 | optimizer.step()
69 |
70 | print(f'Result: {model.string()}')
71 |
--------------------------------------------------------------------------------
/beginner_source/examples_nn/polynomial_nn.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | PyTorch: nn
4 | -----------
5 |
6 | A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
7 | to :math:`pi` by minimizing squared Euclidean distance.
8 |
9 | This implementation uses the nn package from PyTorch to build the network.
10 | PyTorch autograd makes it easy to define computational graphs and take gradients,
11 | but raw autograd can be a bit too low-level for defining complex neural networks;
12 | this is where the nn package can help. The nn package defines a set of Modules,
13 | which you can think of as a neural network layer that has produces output from
14 | input and may have some trainable weights.
15 | """
16 | import torch
17 | import math
18 |
19 |
20 | # Create Tensors to hold input and outputs.
21 | x = torch.linspace(-math.pi, math.pi, 2000)
22 | y = torch.sin(x)
23 |
24 | # For this example, the output y is a linear function of (x, x^2, x^3), so
25 | # we can consider it as a linear layer neural network. Let's prepare the
26 | # tensor (x, x^2, x^3).
27 | p = torch.tensor([1, 2, 3])
28 | xx = x.unsqueeze(-1).pow(p)
29 |
30 | # In the above code, x.unsqueeze(-1) has shape (2000, 1), and p has shape
31 | # (3,), for this case, broadcasting semantics will apply to obtain a tensor
32 | # of shape (2000, 3)
33 |
34 | # Use the nn package to define our model as a sequence of layers. nn.Sequential
35 | # is a Module which contains other Modules, and applies them in sequence to
36 | # produce its output. The Linear Module computes output from input using a
37 | # linear function, and holds internal Tensors for its weight and bias.
38 | # The Flatten layer flatens the output of the linear layer to a 1D tensor,
39 | # to match the shape of `y`.
40 | model = torch.nn.Sequential(
41 | torch.nn.Linear(3, 1),
42 | torch.nn.Flatten(0, 1)
43 | )
44 |
45 | # The nn package also contains definitions of popular loss functions; in this
46 | # case we will use Mean Squared Error (MSE) as our loss function.
47 | loss_fn = torch.nn.MSELoss(reduction='sum')
48 |
49 | learning_rate = 1e-6
50 | for t in range(2000):
51 |
52 | # Forward pass: compute predicted y by passing x to the model. Module objects
53 | # override the __call__ operator so you can call them like functions. When
54 | # doing so you pass a Tensor of input data to the Module and it produces
55 | # a Tensor of output data.
56 | y_pred = model(xx)
57 |
58 | # Compute and print loss. We pass Tensors containing the predicted and true
59 | # values of y, and the loss function returns a Tensor containing the
60 | # loss.
61 | loss = loss_fn(y_pred, y)
62 | if t % 100 == 99:
63 | print(t, loss.item())
64 |
65 | # Zero the gradients before running the backward pass.
66 | model.zero_grad()
67 |
68 | # Backward pass: compute gradient of the loss with respect to all the learnable
69 | # parameters of the model. Internally, the parameters of each Module are stored
70 | # in Tensors with requires_grad=True, so this call will compute gradients for
71 | # all learnable parameters in the model.
72 | loss.backward()
73 |
74 | # Update the weights using gradient descent. Each parameter is a Tensor, so
75 | # we can access its gradients like we did before.
76 | with torch.no_grad():
77 | for param in model.parameters():
78 | param -= learning_rate * param.grad
79 |
80 | # You can access the first layer of `model` like accessing the first item of a list
81 | linear_layer = model[0]
82 |
83 | # For linear layer, its parameters are stored as `weight` and `bias`.
84 | print(f'Result: y = {linear_layer.bias.item()} + {linear_layer.weight[:, 0].item()} x + {linear_layer.weight[:, 1].item()} x^2 + {linear_layer.weight[:, 2].item()} x^3')
85 |
--------------------------------------------------------------------------------
/beginner_source/examples_nn/polynomial_optim.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | PyTorch: optim
4 | --------------
5 |
6 | A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
7 | to :math:`pi` by minimizing squared Euclidean distance.
8 |
9 | This implementation uses the nn package from PyTorch to build the network.
10 |
11 | Rather than manually updating the weights of the model as we have been doing,
12 | we use the optim package to define an Optimizer that will update the weights
13 | for us. The optim package defines many optimization algorithms that are commonly
14 | used for deep learning, including SGD+momentum, RMSProp, Adam, etc.
15 | """
16 | import torch
17 | import math
18 |
19 |
20 | # Create Tensors to hold input and outputs.
21 | x = torch.linspace(-math.pi, math.pi, 2000)
22 | y = torch.sin(x)
23 |
24 | # Prepare the input tensor (x, x^2, x^3).
25 | p = torch.tensor([1, 2, 3])
26 | xx = x.unsqueeze(-1).pow(p)
27 |
28 | # Use the nn package to define our model and loss function.
29 | model = torch.nn.Sequential(
30 | torch.nn.Linear(3, 1),
31 | torch.nn.Flatten(0, 1)
32 | )
33 | loss_fn = torch.nn.MSELoss(reduction='sum')
34 |
35 | # Use the optim package to define an Optimizer that will update the weights of
36 | # the model for us. Here we will use RMSprop; the optim package contains many other
37 | # optimization algorithms. The first argument to the RMSprop constructor tells the
38 | # optimizer which Tensors it should update.
39 | learning_rate = 1e-3
40 | optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
41 | for t in range(2000):
42 | # Forward pass: compute predicted y by passing x to the model.
43 | y_pred = model(xx)
44 |
45 | # Compute and print loss.
46 | loss = loss_fn(y_pred, y)
47 | if t % 100 == 99:
48 | print(t, loss.item())
49 |
50 | # Before the backward pass, use the optimizer object to zero all of the
51 | # gradients for the variables it will update (which are the learnable
52 | # weights of the model). This is because by default, gradients are
53 | # accumulated in buffers( i.e, not overwritten) whenever .backward()
54 | # is called. Checkout docs of torch.autograd.backward for more details.
55 | optimizer.zero_grad()
56 |
57 | # Backward pass: compute gradient of the loss with respect to model
58 | # parameters
59 | loss.backward()
60 |
61 | # Calling the step function on an Optimizer makes an update to its
62 | # parameters
63 | optimizer.step()
64 |
65 |
66 | linear_layer = model[0]
67 | print(f'Result: y = {linear_layer.bias.item()} + {linear_layer.weight[:, 0].item()} x + {linear_layer.weight[:, 1].item()} x^2 + {linear_layer.weight[:, 2].item()} x^3')
68 |
--------------------------------------------------------------------------------
/beginner_source/examples_tensor/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ageron/tutorials/6fb4757ad2ce0b3bcdf67c3a840672b4038443ac/beginner_source/examples_tensor/README.txt
--------------------------------------------------------------------------------
/beginner_source/examples_tensor/polynomial_numpy.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Warm-up: numpy
4 | --------------
5 |
6 | A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
7 | to :math:`pi` by minimizing squared Euclidean distance.
8 |
9 | This implementation uses numpy to manually compute the forward pass, loss, and
10 | backward pass.
11 |
12 | A numpy array is a generic n-dimensional array; it does not know anything about
13 | deep learning or gradients or computational graphs, and is just a way to perform
14 | generic numeric computations.
15 | """
16 | import numpy as np
17 | import math
18 |
19 | # Create random input and output data
20 | x = np.linspace(-math.pi, math.pi, 2000)
21 | y = np.sin(x)
22 |
23 | # Randomly initialize weights
24 | a = np.random.randn()
25 | b = np.random.randn()
26 | c = np.random.randn()
27 | d = np.random.randn()
28 |
29 | learning_rate = 1e-6
30 | for t in range(2000):
31 | # Forward pass: compute predicted y
32 | # y = a + b x + c x^2 + d x^3
33 | y_pred = a + b * x + c * x ** 2 + d * x ** 3
34 |
35 | # Compute and print loss
36 | loss = np.square(y_pred - y).sum()
37 | if t % 100 == 99:
38 | print(t, loss)
39 |
40 | # Backprop to compute gradients of a, b, c, d with respect to loss
41 | grad_y_pred = 2.0 * (y_pred - y)
42 | grad_a = grad_y_pred.sum()
43 | grad_b = (grad_y_pred * x).sum()
44 | grad_c = (grad_y_pred * x ** 2).sum()
45 | grad_d = (grad_y_pred * x ** 3).sum()
46 |
47 | # Update weights
48 | a -= learning_rate * grad_a
49 | b -= learning_rate * grad_b
50 | c -= learning_rate * grad_c
51 | d -= learning_rate * grad_d
52 |
53 | print(f'Result: y = {a} + {b} x + {c} x^2 + {d} x^3')
54 |
--------------------------------------------------------------------------------
/beginner_source/examples_tensor/polynomial_tensor.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | PyTorch: Tensors
4 | ----------------
5 |
6 | A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
7 | to :math:`pi` by minimizing squared Euclidean distance.
8 |
9 | This implementation uses PyTorch tensors to manually compute the forward pass,
10 | loss, and backward pass.
11 |
12 | A PyTorch Tensor is basically the same as a numpy array: it does not know
13 | anything about deep learning or computational graphs or gradients, and is just
14 | a generic n-dimensional array to be used for arbitrary numeric computation.
15 |
16 | The biggest difference between a numpy array and a PyTorch Tensor is that
17 | a PyTorch Tensor can run on either CPU or GPU. To run operations on the GPU,
18 | just cast the Tensor to a cuda datatype.
19 | """
20 |
21 | import torch
22 | import math
23 |
24 |
25 | dtype = torch.float
26 | device = torch.device("cpu")
27 | # device = torch.device("cuda:0") # Uncomment this to run on GPU
28 |
29 | # Create random input and output data
30 | x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
31 | y = torch.sin(x)
32 |
33 | # Randomly initialize weights
34 | a = torch.randn((), device=device, dtype=dtype)
35 | b = torch.randn((), device=device, dtype=dtype)
36 | c = torch.randn((), device=device, dtype=dtype)
37 | d = torch.randn((), device=device, dtype=dtype)
38 |
39 | learning_rate = 1e-6
40 | for t in range(2000):
41 | # Forward pass: compute predicted y
42 | y_pred = a + b * x + c * x ** 2 + d * x ** 3
43 |
44 | # Compute and print loss
45 | loss = (y_pred - y).pow(2).sum().item()
46 | if t % 100 == 99:
47 | print(t, loss)
48 |
49 | # Backprop to compute gradients of a, b, c, d with respect to loss
50 | grad_y_pred = 2.0 * (y_pred - y)
51 | grad_a = grad_y_pred.sum()
52 | grad_b = (grad_y_pred * x).sum()
53 | grad_c = (grad_y_pred * x ** 2).sum()
54 | grad_d = (grad_y_pred * x ** 3).sum()
55 |
56 | # Update weights using gradient descent
57 | a -= learning_rate * grad_a
58 | b -= learning_rate * grad_b
59 | c -= learning_rate * grad_c
60 | d -= learning_rate * grad_d
61 |
62 |
63 | print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
64 |
--------------------------------------------------------------------------------
/beginner_source/former_torchies/README.txt:
--------------------------------------------------------------------------------
1 | PyTorch for former Torch users
2 | ------------------------------
3 |
4 | 1. tensor_tutorial_old.py
5 | Tensors
6 | https://pytorch.org/tutorials/beginner/former_torchies/tensor_tutorial_old.html
7 |
8 | 2. autograd_tutorial_old.py
9 | Autograd
10 | https://pytorch.org/tutorials/beginner/former_torchies/autograd_tutorial_old.html
11 |
12 | 3. nnft_tutorial.py
13 | nn package
14 | https://pytorch.org/tutorials/beginner/former_torchies/nnft_tutorial.html
15 |
16 | 4. parallelism_tutorial.py
17 | Multi-GPU examples
18 | https://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html
19 |
--------------------------------------------------------------------------------
/beginner_source/former_torchies_tutorial.rst:
--------------------------------------------------------------------------------
1 | PyTorch for Former Torch Users
2 | ------------------------------
3 | **Author**: `Soumith Chintala `_
4 |
5 | In this tutorial, you will learn the following:
6 |
7 | 1. Using torch Tensors, and important difference against (Lua)Torch
8 | 2. Using the autograd package
9 | 3. Building neural networks
10 |
11 | - Building a ConvNet
12 | - Building a Recurrent Net
13 |
14 | 4. Use multiple GPUs
15 |
16 |
17 | .. toctree::
18 | :hidden:
19 |
20 | /beginner/former_torchies/tensor_tutorial_old
21 | /beginner/former_torchies/autograd_tutorial_old
22 | /beginner/former_torchies/nnft_tutorial
23 | /beginner/former_torchies/parallelism_tutorial
24 |
25 | .. galleryitem:: /beginner/former_torchies/tensor_tutorial_old.py
26 | :figure: /_static/img/tensor_illustration_flat.png
27 |
28 | .. galleryitem:: /beginner/former_torchies/autograd_tutorial_old.py
29 |
30 | .. galleryitem:: /beginner/former_torchies/nnft_tutorial.py
31 | :figure: /_static/img/torch-nn-vs-pytorch-nn.png
32 |
33 | .. galleryitem:: /beginner/former_torchies/parallelism_tutorial.py
34 |
35 | .. raw:: html
36 |
37 |
38 |
--------------------------------------------------------------------------------
/beginner_source/hybrid_frontend/README.txt:
--------------------------------------------------------------------------------
1 | Hybrid Frontend Tutorials
2 | -------------------------
3 |
4 | 1. learning_hybrid_frontend_through_example_tutorial.py
5 | Learning Hybrid Frontend Through Example
6 | https://pytorch.org/tutorials/beginner/hybrid_frontend/learning_hybrid_frontend_through_example_tutorial.html
7 |
8 | 2. introduction_to_hybrid_frontend_tutorial.py
9 | Introduction to Hybrid Frontend
10 | https://pytorch.org/tutorials/beginner/hybrid_frontend/introduction_to_hybrid_frontend_tutorial.html
11 |
--------------------------------------------------------------------------------
/beginner_source/hybrid_frontend_tutorial.rst:
--------------------------------------------------------------------------------
1 | Hybrid Frontend Tutorials
2 | -------------------------
3 | **Authors**: `Nathan Inkawhich `_ and `Matthew Inkawhich `_
4 |
5 | In this set of tutorials, you will learn the following:
6 |
7 | 1. What the hybrid frontend is and the suggested workflow
8 | 2. Basic syntax
9 | 3. How to transition an eager model to graph mode
10 |
11 |
12 | .. toctree::
13 | :hidden:
14 |
15 | /beginner/hybrid_frontend/learning_hybrid_frontend_through_example_tutorial
16 |
17 | .. galleryitem:: /beginner/hybrid_frontend/learning_hybrid_frontend_through_example_tutorial.py
18 |
19 | .. raw:: html
20 |
21 |
22 |
--------------------------------------------------------------------------------
/beginner_source/nlp/README.txt:
--------------------------------------------------------------------------------
1 | Deep Learning for NLP with Pytorch
2 | ----------------------------------
3 |
4 | 1. pytorch_tutorial.py
5 | Introduction to PyTorch
6 | https://pytorch.org/tutorials/beginner/nlp/pytorch_tutorial.html
7 |
8 | 2. deep_learning_tutorial.py
9 | Deep Learning with PyTorch
10 | https://pytorch.org/tutorials/beginner/nlp/deep_learning_tutorial.html
11 |
12 | 3. word_embeddings_tutorial.py
13 | Word Embeddings: Encoding Lexical Semantics
14 | https://pytorch.org/tutorials/beginner/nlp/word_embeddings_tutorial.html
15 |
16 | 4. sequence_models_tutorial.py
17 | Sequence Models and Long-Short Term Memory Networks
18 | https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html
19 |
20 | 5. advanced_tutorial.py
21 | Advanced: Making Dynamic Decisions and the Bi-LSTM CRF
22 | https://pytorch.org/tutorials/beginner/nlp/advanced_tutorial.html
--------------------------------------------------------------------------------
/build.sh:
--------------------------------------------------------------------------------
1 | # TODO: make sure pytorch installed
2 | pip install -r requirements.txt
3 | make docs
4 |
--------------------------------------------------------------------------------
/cleanup.sh:
--------------------------------------------------------------------------------
1 | rm -rf __pycache__/ _build/ advanced/ beginner/ intermediate/
2 |
--------------------------------------------------------------------------------
/intermediate_source/README.txt:
--------------------------------------------------------------------------------
1 | Intermediate tutorials
2 | ----------------------
3 |
4 | 1. tensorboard_tutorial.py
5 | Classifying Names with a Character-Level RNN
6 | https://pytorch.org/tutorials/beginner/tensorboard_tutorial.html
7 |
8 | 2. char_rnn_classification_tutorial.py
9 | Classifying Names with a Character-Level RNN
10 | https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html
11 |
12 | 3. char_rnn_generation_tutorial.py
13 | Generating Names with a Character-Level RNN
14 | https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
15 |
16 | 4. seq2seq_translation_tutorial.py
17 | Translation with a Sequence to Sequence Network and Attention
18 | https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
19 |
20 | 5. reinforcement_q_learning.py
21 | Reinforcement Learning (DQN) Tutorial
22 | https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
23 |
24 | 6. dist_tuto.rst
25 | Writing Distributed Applications with PyTorch
26 | https://pytorch.org/tutorials/intermediate/dist_tuto.html
27 |
28 | 7. spatial_transformer_tutorial
29 | Spatial Transformer Networks Tutorial
30 | https://pytorch.org/tutorials/intermediate/spatial_transformer_tutorial.html
31 |
32 | 8. flask_rest_api_tutorial.py
33 | Deploying PyTorch and Building a REST API using Flask
34 | https://pytorch.org/tutorials/beginner/flask_rest_api_tutorial.html
35 |
--------------------------------------------------------------------------------
/prototype_source/README.md:
--------------------------------------------------------------------------------
1 | # Prototype Tutorials and Recipes
2 |
3 | This directory contains tutorials and recipes demonstrating prototype features in PyTorch.
4 |
5 | **Prototype features** are not available as part of binary distributions like PyPI or Conda (except maybe behind run-time flags). To test these features we would, depending on the feature, recommend building from master or using the nightly wheelss that are made available on pytorch.org.
6 |
7 | These are intentionally left out of the pytorch.org/tutorials build and will not show up on the website.
8 |
9 | *Level of commitment:* We are committing to gathering high bandwidth feedback only on these features. Based on this feedback and potential further engagement between community members, we as a community will decide if we want to upgrade the level of commitment or to fail fast.
10 |
--------------------------------------------------------------------------------
/prototype_source/README.txt:
--------------------------------------------------------------------------------
1 | Prototype Tutorials
2 | ------------------
3 | 1. distributed_rpc_profiling.rst
4 | Profiling PyTorch RPC-Based Workloads
5 | https://github.com/pytorch/tutorials/blob/release/1.6/prototype_source/distributed_rpc_profiling.rst
6 |
7 | 2. graph_mode_static_quantization_tutorial.py
8 | Graph Mode Post Training Static Quantization in PyTorch
9 | https://pytorch.org/tutorials/prototype/graph_mode_static_quantization_tutorial.html
10 |
11 | 3. graph_mode_dynamic_bert_tutorial.rst
12 | Graph Mode Dynamic Quantization on BERT
13 | https://github.com/pytorch/tutorials/blob/master/prototype_source/graph_mode_dynamic_bert_tutorial.rst
14 |
15 | 4. numeric_suite_tutorial.py
16 | PyTorch Numeric Suite Tutorial
17 | https://github.com/pytorch/tutorials/blob/master/prototype_source/numeric_suite_tutorial.py
18 |
19 | 5. torchscript_freezing.py
20 | Model Freezing in TorchScript
21 | https://github.com/pytorch/tutorials/blob/master/prototype_source/torchscript_freezing.py
22 |
23 | 6. vulkan_workflow.rst
24 | Vulkan Backend User Workflow
25 | https://pytorch.org/tutorials/intermediate/vulkan_workflow.html
26 |
27 | 7. fx_graph_mode_ptq_static.rst
28 | FX Graph Mode Post Training Static Quantization
29 | https://pytorch.org/tutorials/prototype/fx_graph_mode_ptq_static.html
30 |
31 | 8. fx_graph_mode_ptq_dynamic.py
32 | FX Graph Mode Post Training Dynamic Quantization
33 | https://pytorch.org/tutorials/prototype/fx_graph_mode_ptq_dynamic.html
34 |
35 | 9. fx_graph_mode_quant_guide.py
36 | FX Graph Mode Quantization User Guide
37 | https://pytorch.org/tutorials/prototype/fx_graph_mode_quant_guide.html
38 |
39 |
--------------------------------------------------------------------------------
/recipes_source/README.txt:
--------------------------------------------------------------------------------
1 | Recipes
2 | ------------------
3 | 1. recipes/* and recipes_index.rst
4 | PyTorch Recipes
5 | https://pytorch.org/tutorials/recipes/recipes_index.html
6 |
7 |
8 |
--------------------------------------------------------------------------------
/recipes_source/model_preparation_android.rst:
--------------------------------------------------------------------------------
1 | Model Preparation for Android Recipe
2 | =====================================
3 |
4 | This recipe demonstrates how to prepare a PyTorch MobileNet v2 image classification model for Android apps, and how to set up Android projects to use the mobile-ready model file.
5 |
6 | Introduction
7 | -----------------
8 |
9 | After a PyTorch model is trained or a pre-trained model is made available, it is normally not ready to be used in mobile apps yet. It needs to be quantized (see the `Quantization Recipe `_), converted to TorchScript so Android apps can load it, and optimized for mobile apps. Furthermore, Android apps need to be set up correctly to enable the use of PyTorch Mobile libraries, before they can load and use the model for inference.
10 |
11 | Pre-requisites
12 | -----------------
13 |
14 | PyTorch 1.6.0 or 1.7.0
15 |
16 | torchvision 0.6.0 or 0.7.0
17 |
18 | Android Studio 3.5.1 or above with NDK installed
19 |
20 | Steps
21 | -----------------
22 |
23 | 1. Get Pretrained and Quantized MobileNet v2 Model
24 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
25 |
26 | To get the MobileNet v2 quantized model, simply do:
27 |
28 | ::
29 |
30 | import torchvision
31 |
32 | model_quantized = torchvision.models.quantization.mobilenet_v2(pretrained=True, quantize=True)
33 |
34 | 2. Script and Optimize the Model for Mobile Apps
35 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
36 |
37 | Use either the `script` or `trace` method to convert the quantized model to the TorchScript format:
38 |
39 | ::
40 |
41 | import torch
42 |
43 | dummy_input = torch.rand(1, 3, 224, 224)
44 | torchscript_model = torch.jit.trace(model_quantized, dummy_input)
45 |
46 | or
47 |
48 | ::
49 |
50 | torchscript_model = torch.jit.script(model_quantized)
51 |
52 |
53 | .. warning::
54 | The `trace` method only scripts the code path executed during the trace, so it will not work properly for models that include decision branches. See the `Script and Optimize for Mobile Recipe `_ for more details.
55 |
56 | Then optimize the TorchScript formatted model for mobile and save it:
57 |
58 | ::
59 |
60 | from torch.utils.mobile_optimizer import optimize_for_mobile
61 | torchscript_model_optimized = optimize_for_mobile(torchscript_model)
62 | torch.jit.save(torchscript_model_optimized, "mobilenetv2_quantized.pt")
63 |
64 | With the total 7 or 8 (depending on if the `script` or `trace` method is called to get the TorchScript format of the model) lines of code in the two steps above, we have a model ready to be added to mobile apps.
65 |
66 | 3. Add the Model and PyTorch Library on Android
67 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
68 |
69 | * In your current or a new Android Studio project, open the build.gradle file, and add the following two lines (the second one is required only if you plan to use a TorchVision model):
70 |
71 | ::
72 |
73 | implementation 'org.pytorch:pytorch_android:1.6.0'
74 | implementation 'org.pytorch:pytorch_android_torchvision:1.6.0'
75 |
76 | * Drag and drop the model file `mobilenetv2_quantized.pt` to your project's assets folder.
77 |
78 | That's it! Now you can build your Android app with the PyTorch library and the model ready to use. To actually write code to use the model, refer to the PyTorch Mobile `Android Quickstart with a HelloWorld Example `_ and `Android Hackathon Example `_.
79 |
80 | Learn More
81 | -----------------
82 |
83 | 1. `PyTorch Mobile site `_
84 |
85 | 2. `Introduction to TorchScript `_
86 |
--------------------------------------------------------------------------------
/recipes_source/ptmobile_recipes_summary.rst:
--------------------------------------------------------------------------------
1 | Summary of PyTorch Mobile Recipes
2 | =====================================
3 |
4 | This summary provides a top level overview of recipes for PyTorch Mobile to help developers choose which recipes to follow for their PyTorch-powered mobile app development.
5 |
6 | Introduction
7 | ----------------
8 |
9 | When a PyTorch model is trained or retrained, or when a pre-trained model is available, for mobile deployment, follow the the recipes outlined in this summary so mobile apps can successfully use the model.
10 |
11 | Pre-requisites
12 | ----------------
13 |
14 | PyTorch 1.6.0 or 1.7.0
15 |
16 | (Optional) torchvision 0.6.0 or 0.7.0
17 |
18 | For iOS development: Xcode 11 or 12
19 |
20 | For Android development: Android Studio 3.5.1 or above (with NDK installed); or Android SDK, NDK, Gradle, JDK.
21 |
22 | New Recipes for PyTorch Mobile
23 | --------------------------------
24 |
25 | * (Recommended) To fuse a list of PyTorch modules into a single module to reduce the model size before quantization, read the `Fuse Modules recipe `_.
26 |
27 | * (Recommended) To reduce the model size and make it run faster without losing much on accuracy, read the `Quantization Recipe `_.
28 |
29 | * (Must) To convert the model to TorchScipt and (optional) optimize it for mobile apps, read the `Script and Optimize for Mobile Recipe `_.
30 |
31 | * (Must for iOS development) To add the model in an iOS project and use PyTorch pod for iOS, read the `Model preparation for iOS Recipe `_.
32 |
33 | * (Must for Android development) To add the model in an Android project and use the PyTorch library for Android, read the `Model preparation for Android Recipe `_.
34 |
35 |
36 | Learn More
37 | -----------------
38 |
39 | 1. `PyTorch Mobile site `_
40 | 2. `PyTorch Mobile Performance Recipes `_
41 |
--------------------------------------------------------------------------------
/recipes_source/recipes/README.txt:
--------------------------------------------------------------------------------
1 | PyTorch Recipes
2 | ---------------------------------------------
3 | 1. loading_data_recipe.py
4 | Loading Data in PyTorch
5 | https://pytorch.org/tutorials/recipes/recipes/loading_data_recipe.html
6 |
7 | 2. defining_a_neural_network.py
8 | Defining a Neural Network in PyTorch
9 | https://pytorch.org/tutorials/recipes/recipes/defining_a_neural_network.html
10 |
11 | 3. what_is_state_dict.py
12 | What is a state_dict in PyTorch
13 | https://pytorch.org/tutorials/recipes/recipes/what_is_state_dict.html
14 |
15 | 4. saving_and_loading_models_for_inference.py
16 | Saving and loading models for inference in PyTorch
17 | https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_models_for_inference.html
18 |
19 | 5. custom_dataset_transforms_loader.py
20 | Developing Custom PyTorch Dataloaders
21 | https://pytorch.org/tutorials/recipes/recipes/custom_dataset_transforms_loader.html
22 |
23 |
24 | 6. Captum_Recipe.py
25 | Model Interpretability using Captum
26 | https://pytorch.org/tutorials/recipes/recipes/Captum_Recipe.html
27 |
28 | 7. dynamic_quantization.py
29 | Dynamic Quantization
30 | https://pytorch.org/tutorials/recipes/recipes/dynamic_quantization.html
31 |
32 | 8. save_load_across_devices.py
33 | Saving and loading models across devices in PyTorch
34 | https://pytorch.org/tutorials/recipes/recipes/save_load_across_devices.html
35 |
36 | 9. saving_and_loading_a_general_checkpoint.py
37 | Saving and loading a general checkpoint in PyTorch
38 | https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html
39 |
40 | 10. saving_and_loading_models_for_inference.py
41 | Saving and loading models for inference in PyTorch
42 | https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_models_for_inference.html
43 |
44 | 11. saving_multiple_models_in_one_file.py
45 | Saving and loading multiple models in one file using PyTorch
46 | https://pytorch.org/tutorials/recipes/recipes/saving_multiple_models_in_one_file.html
47 |
48 | 12. warmstarting_model_using_parameters_from_a_different_model.py
49 | Warmstarting models using parameters from different model
50 | https://pytorch.org/tutorials/recipes/recipes/warmstarting_model_using_parameters_from_a_different_model.html
51 |
52 | 13. zeroing_out_gradients.py
53 | Zeroing out gradients
54 | https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
55 |
56 | 14. mobile_perf.py
57 | PyTorch Mobile Performance Recipes
58 | https://pytorch.org/tutorials/recipes/mobile_perf.html
59 |
60 | 15. amp_recipe.py
61 | Automatic Mixed Precision
62 | https://pytorch.org/tutorials/recipes/amp_recipe.html
63 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Refer to ./jenkins/build.sh for tutorial build instructions
2 |
3 | sphinx==1.8.2
4 | sphinx-gallery==0.3.1
5 | sphinx-copybutton
6 | tqdm
7 | numpy
8 | matplotlib
9 | torch
10 | torchvision
11 | torchtext
12 | torchaudio
13 | PyHamcrest
14 | bs4
15 | awscli==1.16.35
16 | flask
17 | spacy==2.3.2
18 | ray[tune]
19 |
20 | # PyTorch Theme
21 | -e git+git://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
22 |
23 | ipython
24 |
25 | # to run examples
26 | boto3
27 | pandas
28 | requests
29 | scikit-image
30 | scipy
31 | pillow==8.1.1
32 | wget
33 | gym
34 | gym-super-mario-bros==7.3.0
35 | timm
36 |
--------------------------------------------------------------------------------
/runtime.txt:
--------------------------------------------------------------------------------
1 | 3.6
2 |
--------------------------------------------------------------------------------