├── .gitignore
├── LICENSE
├── README.md
├── doc
├── en
│ ├── Makefile
│ ├── make.bat
│ ├── requirements.txt
│ └── source
│ │ ├── _static
│ │ ├── css
│ │ │ └── pytorch_theme.css
│ │ └── img
│ │ │ ├── dynamic_graph.gif
│ │ │ ├── pytorch-logo-dark.png
│ │ │ ├── pytorch-logo-dark.svg
│ │ │ ├── pytorch-logo-flame.png
│ │ │ ├── pytorch-logo-flame.svg
│ │ │ └── tensor_illustration.png
│ │ ├── _templates
│ │ └── layout.html
│ │ ├── autograd.rst
│ │ ├── conf.py
│ │ ├── cuda.rst
│ │ ├── data.rst
│ │ ├── distributed.rst
│ │ ├── distributions.rst
│ │ ├── ffi.rst
│ │ ├── index.rst
│ │ ├── legacy.rst
│ │ ├── model_zoo.rst
│ │ ├── multiprocessing.rst
│ │ ├── nn.rst
│ │ ├── notes
│ │ ├── autograd.rst
│ │ ├── broadcasting.rst
│ │ ├── cuda.rst
│ │ ├── extending.rst
│ │ ├── multiprocessing.rst
│ │ └── serialization.rst
│ │ ├── onnx.rst
│ │ ├── optim.rst
│ │ ├── sparse.rst
│ │ ├── storage.rst
│ │ ├── tensors.rst
│ │ ├── torch.rst
│ │ └── torchvision
│ │ ├── datasets.rst
│ │ ├── index.rst
│ │ ├── models.rst
│ │ ├── transforms.rst
│ │ └── utils.rst
└── zh
│ ├── Makefile
│ ├── make.bat
│ ├── requirements.txt
│ └── source
│ ├── _static
│ ├── css
│ │ └── pytorch_theme.css
│ └── img
│ │ ├── dynamic_graph.gif
│ │ ├── pytorch-logo-dark.png
│ │ ├── pytorch-logo-dark.svg
│ │ ├── pytorch-logo-flame.png
│ │ ├── pytorch-logo-flame.svg
│ │ └── tensor_illustration.png
│ ├── _templates
│ └── layout.html
│ ├── apachecn-learning-group.rst
│ ├── autograd.rst
│ ├── code
│ ├── torch
│ │ ├── __init__.py
│ │ ├── _six.py
│ │ ├── _storage_docs.py
│ │ ├── _tensor_docs.py
│ │ ├── _tensor_str.py
│ │ ├── _thnn
│ │ │ ├── __init__.py
│ │ │ └── utils.py
│ │ ├── _torch_docs.py
│ │ ├── _utils.py
│ │ ├── autograd
│ │ │ ├── __init__.py
│ │ │ ├── _functions
│ │ │ │ ├── __init__.py
│ │ │ │ ├── basic_ops.py
│ │ │ │ ├── blas.py
│ │ │ │ ├── compare.py
│ │ │ │ ├── initializers.py
│ │ │ │ ├── linalg.py
│ │ │ │ ├── pointwise.py
│ │ │ │ ├── reduce.py
│ │ │ │ ├── stochastic.py
│ │ │ │ ├── tensor.py
│ │ │ │ └── utils.py
│ │ │ ├── function.py
│ │ │ ├── gradcheck.py
│ │ │ ├── profiler.py
│ │ │ ├── stochastic_function.py
│ │ │ └── variable.py
│ │ ├── backends
│ │ │ ├── __init__.py
│ │ │ └── cudnn
│ │ │ │ ├── __init__.py
│ │ │ │ └── rnn.py
│ │ ├── contrib
│ │ │ ├── __init__.py
│ │ │ └── _graph_vis.py
│ │ ├── cuda
│ │ │ ├── __init__.py
│ │ │ ├── comm.py
│ │ │ ├── error.py
│ │ │ ├── nccl.py
│ │ │ ├── nvtx.py
│ │ │ ├── profiler.py
│ │ │ ├── random.py
│ │ │ ├── sparse.py
│ │ │ └── streams.py
│ │ ├── distributed
│ │ │ ├── __init__.py
│ │ │ └── remote_types.py
│ │ ├── distributions.py
│ │ ├── for_onnx
│ │ │ └── __init__.py
│ │ ├── functional.py
│ │ ├── jit
│ │ │ ├── __init__.py
│ │ │ └── passes
│ │ │ │ ├── __init__.py
│ │ │ │ └── inplace.py
│ │ ├── legacy
│ │ │ ├── __init__.py
│ │ │ ├── nn
│ │ │ │ ├── Abs.py
│ │ │ │ ├── AbsCriterion.py
│ │ │ │ ├── Add.py
│ │ │ │ ├── AddConstant.py
│ │ │ │ ├── BCECriterion.py
│ │ │ │ ├── BatchNormalization.py
│ │ │ │ ├── Bilinear.py
│ │ │ │ ├── CAddTable.py
│ │ │ │ ├── CDivTable.py
│ │ │ │ ├── CMul.py
│ │ │ │ ├── CMulTable.py
│ │ │ │ ├── CSubTable.py
│ │ │ │ ├── Clamp.py
│ │ │ │ ├── ClassNLLCriterion.py
│ │ │ │ ├── ClassSimplexCriterion.py
│ │ │ │ ├── Concat.py
│ │ │ │ ├── ConcatTable.py
│ │ │ │ ├── Container.py
│ │ │ │ ├── Contiguous.py
│ │ │ │ ├── Copy.py
│ │ │ │ ├── Cosine.py
│ │ │ │ ├── CosineDistance.py
│ │ │ │ ├── CosineEmbeddingCriterion.py
│ │ │ │ ├── Criterion.py
│ │ │ │ ├── CriterionTable.py
│ │ │ │ ├── CrossEntropyCriterion.py
│ │ │ │ ├── DepthConcat.py
│ │ │ │ ├── DistKLDivCriterion.py
│ │ │ │ ├── DotProduct.py
│ │ │ │ ├── Dropout.py
│ │ │ │ ├── ELU.py
│ │ │ │ ├── Euclidean.py
│ │ │ │ ├── Exp.py
│ │ │ │ ├── FlattenTable.py
│ │ │ │ ├── GradientReversal.py
│ │ │ │ ├── HardShrink.py
│ │ │ │ ├── HardTanh.py
│ │ │ │ ├── HingeEmbeddingCriterion.py
│ │ │ │ ├── Identity.py
│ │ │ │ ├── Index.py
│ │ │ │ ├── JoinTable.py
│ │ │ │ ├── L1Cost.py
│ │ │ │ ├── L1HingeEmbeddingCriterion.py
│ │ │ │ ├── L1Penalty.py
│ │ │ │ ├── LeakyReLU.py
│ │ │ │ ├── Linear.py
│ │ │ │ ├── Log.py
│ │ │ │ ├── LogSigmoid.py
│ │ │ │ ├── LogSoftMax.py
│ │ │ │ ├── LookupTable.py
│ │ │ │ ├── MM.py
│ │ │ │ ├── MSECriterion.py
│ │ │ │ ├── MV.py
│ │ │ │ ├── MarginCriterion.py
│ │ │ │ ├── MarginRankingCriterion.py
│ │ │ │ ├── MaskedSelect.py
│ │ │ │ ├── Max.py
│ │ │ │ ├── Mean.py
│ │ │ │ ├── Min.py
│ │ │ │ ├── MixtureTable.py
│ │ │ │ ├── Module.py
│ │ │ │ ├── Mul.py
│ │ │ │ ├── MulConstant.py
│ │ │ │ ├── MultiCriterion.py
│ │ │ │ ├── MultiLabelMarginCriterion.py
│ │ │ │ ├── MultiLabelSoftMarginCriterion.py
│ │ │ │ ├── MultiMarginCriterion.py
│ │ │ │ ├── Narrow.py
│ │ │ │ ├── NarrowTable.py
│ │ │ │ ├── Normalize.py
│ │ │ │ ├── PReLU.py
│ │ │ │ ├── Padding.py
│ │ │ │ ├── PairwiseDistance.py
│ │ │ │ ├── Parallel.py
│ │ │ │ ├── ParallelCriterion.py
│ │ │ │ ├── ParallelTable.py
│ │ │ │ ├── PartialLinear.py
│ │ │ │ ├── Power.py
│ │ │ │ ├── RReLU.py
│ │ │ │ ├── ReLU.py
│ │ │ │ ├── ReLU6.py
│ │ │ │ ├── Replicate.py
│ │ │ │ ├── Reshape.py
│ │ │ │ ├── Select.py
│ │ │ │ ├── SelectTable.py
│ │ │ │ ├── Sequential.py
│ │ │ │ ├── Sigmoid.py
│ │ │ │ ├── SmoothL1Criterion.py
│ │ │ │ ├── SoftMarginCriterion.py
│ │ │ │ ├── SoftMax.py
│ │ │ │ ├── SoftMin.py
│ │ │ │ ├── SoftPlus.py
│ │ │ │ ├── SoftShrink.py
│ │ │ │ ├── SoftSign.py
│ │ │ │ ├── SpatialAdaptiveMaxPooling.py
│ │ │ │ ├── SpatialAveragePooling.py
│ │ │ │ ├── SpatialBatchNormalization.py
│ │ │ │ ├── SpatialClassNLLCriterion.py
│ │ │ │ ├── SpatialContrastiveNormalization.py
│ │ │ │ ├── SpatialConvolution.py
│ │ │ │ ├── SpatialConvolutionLocal.py
│ │ │ │ ├── SpatialConvolutionMap.py
│ │ │ │ ├── SpatialCrossMapLRN.py
│ │ │ │ ├── SpatialDilatedConvolution.py
│ │ │ │ ├── SpatialDivisiveNormalization.py
│ │ │ │ ├── SpatialDropout.py
│ │ │ │ ├── SpatialFractionalMaxPooling.py
│ │ │ │ ├── SpatialFullConvolution.py
│ │ │ │ ├── SpatialFullConvolutionMap.py
│ │ │ │ ├── SpatialLPPooling.py
│ │ │ │ ├── SpatialMaxPooling.py
│ │ │ │ ├── SpatialMaxUnpooling.py
│ │ │ │ ├── SpatialReflectionPadding.py
│ │ │ │ ├── SpatialReplicationPadding.py
│ │ │ │ ├── SpatialSoftMax.py
│ │ │ │ ├── SpatialSubSampling.py
│ │ │ │ ├── SpatialSubtractiveNormalization.py
│ │ │ │ ├── SpatialUpSamplingNearest.py
│ │ │ │ ├── SpatialZeroPadding.py
│ │ │ │ ├── SplitTable.py
│ │ │ │ ├── Sqrt.py
│ │ │ │ ├── Square.py
│ │ │ │ ├── Squeeze.py
│ │ │ │ ├── Sum.py
│ │ │ │ ├── Tanh.py
│ │ │ │ ├── TanhShrink.py
│ │ │ │ ├── TemporalConvolution.py
│ │ │ │ ├── TemporalMaxPooling.py
│ │ │ │ ├── TemporalSubSampling.py
│ │ │ │ ├── Threshold.py
│ │ │ │ ├── Transpose.py
│ │ │ │ ├── Unsqueeze.py
│ │ │ │ ├── View.py
│ │ │ │ ├── VolumetricAveragePooling.py
│ │ │ │ ├── VolumetricBatchNormalization.py
│ │ │ │ ├── VolumetricConvolution.py
│ │ │ │ ├── VolumetricDropout.py
│ │ │ │ ├── VolumetricFullConvolution.py
│ │ │ │ ├── VolumetricMaxPooling.py
│ │ │ │ ├── VolumetricMaxUnpooling.py
│ │ │ │ ├── VolumetricReplicationPadding.py
│ │ │ │ ├── WeightedEuclidean.py
│ │ │ │ ├── WeightedMSECriterion.py
│ │ │ │ ├── __init__.py
│ │ │ │ └── utils.py
│ │ │ └── optim
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adadelta.py
│ │ │ │ ├── adagrad.py
│ │ │ │ ├── adam.py
│ │ │ │ ├── adamax.py
│ │ │ │ ├── asgd.py
│ │ │ │ ├── cg.py
│ │ │ │ ├── lbfgs.py
│ │ │ │ ├── nag.py
│ │ │ │ ├── rmsprop.py
│ │ │ │ ├── rprop.py
│ │ │ │ └── sgd.py
│ │ ├── multiprocessing
│ │ │ ├── __init__.py
│ │ │ ├── pool.py
│ │ │ ├── queue.py
│ │ │ └── reductions.py
│ │ ├── nn
│ │ │ ├── __init__.py
│ │ │ ├── _functions
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dropout.py
│ │ │ │ ├── linear.py
│ │ │ │ ├── loss.py
│ │ │ │ ├── padding.py
│ │ │ │ ├── rnn.py
│ │ │ │ ├── thnn
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── activation.py
│ │ │ │ │ ├── auto.py
│ │ │ │ │ ├── auto_double_backwards.py
│ │ │ │ │ ├── auto_symbolic.py
│ │ │ │ │ ├── batchnorm_double_backwards.py
│ │ │ │ │ ├── loss.py
│ │ │ │ │ ├── normalization.py
│ │ │ │ │ ├── pooling.py
│ │ │ │ │ ├── rnnFusedPointwise.py
│ │ │ │ │ ├── sparse.py
│ │ │ │ │ └── upsampling.py
│ │ │ │ └── vision.py
│ │ │ ├── backends
│ │ │ │ ├── __init__.py
│ │ │ │ ├── backend.py
│ │ │ │ └── thnn.py
│ │ │ ├── functional.py
│ │ │ ├── init.py
│ │ │ ├── modules
│ │ │ │ ├── __init__.py
│ │ │ │ ├── activation.py
│ │ │ │ ├── batchnorm.py
│ │ │ │ ├── container.py
│ │ │ │ ├── conv.py
│ │ │ │ ├── distance.py
│ │ │ │ ├── dropout.py
│ │ │ │ ├── instancenorm.py
│ │ │ │ ├── linear.py
│ │ │ │ ├── loss.py
│ │ │ │ ├── module.py
│ │ │ │ ├── normalization.py
│ │ │ │ ├── padding.py
│ │ │ │ ├── pixelshuffle.py
│ │ │ │ ├── pooling.py
│ │ │ │ ├── rnn.py
│ │ │ │ ├── sparse.py
│ │ │ │ ├── upsampling.py
│ │ │ │ └── utils.py
│ │ │ ├── parallel
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _functions.py
│ │ │ │ ├── data_parallel.py
│ │ │ │ ├── distributed.py
│ │ │ │ ├── parallel_apply.py
│ │ │ │ ├── replicate.py
│ │ │ │ └── scatter_gather.py
│ │ │ ├── parameter.py
│ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── clip_grad.py
│ │ │ │ ├── convert_parameters.py
│ │ │ │ ├── rnn.py
│ │ │ │ └── weight_norm.py
│ │ ├── onnx
│ │ │ ├── __init__.py
│ │ │ └── symbolic.py
│ │ ├── optim
│ │ │ ├── __init__.py
│ │ │ ├── adadelta.py
│ │ │ ├── adagrad.py
│ │ │ ├── adam.py
│ │ │ ├── adamax.py
│ │ │ ├── asgd.py
│ │ │ ├── lbfgs.py
│ │ │ ├── lr_scheduler.py
│ │ │ ├── optimizer.py
│ │ │ ├── rmsprop.py
│ │ │ ├── rprop.py
│ │ │ ├── sgd.py
│ │ │ └── sparse_adam.py
│ │ ├── random.py
│ │ ├── serialization.py
│ │ ├── sparse
│ │ │ └── __init__.py
│ │ ├── storage.py
│ │ ├── tensor.py
│ │ ├── utils
│ │ │ ├── __init__.py
│ │ │ ├── backcompat
│ │ │ │ └── __init__.py
│ │ │ ├── data
│ │ │ │ ├── __init__.py
│ │ │ │ ├── dataloader.py
│ │ │ │ ├── dataset.py
│ │ │ │ ├── distributed.py
│ │ │ │ └── sampler.py
│ │ │ ├── dlpack.py
│ │ │ ├── ffi
│ │ │ │ └── __init__.py
│ │ │ ├── hooks.py
│ │ │ ├── model_zoo.py
│ │ │ ├── serialization
│ │ │ │ ├── __init__.py
│ │ │ │ └── read_lua_file.py
│ │ │ └── trainer
│ │ │ │ ├── __init__.py
│ │ │ │ ├── plugins
│ │ │ │ ├── __init__.py
│ │ │ │ ├── accuracy.py
│ │ │ │ ├── logger.py
│ │ │ │ ├── loss.py
│ │ │ │ ├── monitor.py
│ │ │ │ ├── plugin.py
│ │ │ │ ├── progress.py
│ │ │ │ └── time.py
│ │ │ │ └── trainer.py
│ │ └── version.py
│ └── torchvision
│ │ ├── __init__.py
│ │ ├── datasets
│ │ ├── __init__.py
│ │ ├── cifar.py
│ │ ├── coco.py
│ │ ├── fakedata.py
│ │ ├── folder.py
│ │ ├── lsun.py
│ │ ├── mnist.py
│ │ ├── phototour.py
│ │ ├── semeion.py
│ │ ├── stl10.py
│ │ ├── svhn.py
│ │ └── utils.py
│ │ ├── models
│ │ ├── __init__.py
│ │ ├── alexnet.py
│ │ ├── densenet.py
│ │ ├── inception.py
│ │ ├── resnet.py
│ │ ├── squeezenet.py
│ │ └── vgg.py
│ │ ├── transforms.py
│ │ ├── transforms
│ │ ├── __init__.py
│ │ ├── functional.py
│ │ └── transforms.py
│ │ └── utils.py
│ ├── conf.py
│ ├── cuda.rst
│ ├── data.rst
│ ├── distributed.rst
│ ├── distributions.rst
│ ├── ffi.rst
│ ├── index.rst
│ ├── legacy.rst
│ ├── model_zoo.rst
│ ├── multiprocessing.rst
│ ├── nn.rst
│ ├── notes
│ ├── autograd.rst
│ ├── broadcasting.rst
│ ├── cuda.rst
│ ├── extending.rst
│ ├── multiprocessing.rst
│ └── serialization.rst
│ ├── onnx.rst
│ ├── optim.rst
│ ├── project-contributors.rst
│ ├── sparse.rst
│ ├── storage.rst
│ ├── tensors.rst
│ ├── torch.rst
│ └── torchvision
│ ├── datasets.rst
│ ├── index.rst
│ ├── models.rst
│ ├── transforms.rst
│ └── utils.rst
├── example
└── tutorials
│ └── beginner
│ └── deep_learning_60min_bltiz.ipynb
└── tutorial
├── en
├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── _static
│ ├── css
│ │ └── pytorch_theme.css
│ └── img
│ │ ├── SRResNet.png
│ │ ├── Variable.png
│ │ ├── cartpole.gif
│ │ ├── cat.jpg
│ │ ├── cat_224x224.jpg
│ │ ├── cat_output1.png
│ │ ├── char_rnn_generation.png
│ │ ├── cifar10.png
│ │ ├── data_parallel.png
│ │ ├── distributed
│ │ ├── DistPyTorch.jpg
│ │ ├── all_gather.pdf
│ │ ├── all_gather.png
│ │ ├── all_reduce.pdf
│ │ ├── all_reduce.png
│ │ ├── broadcast.png
│ │ ├── gather.png
│ │ ├── reduce.png
│ │ ├── scatter.png
│ │ ├── send_recv.png
│ │ └── send_recv_big.png
│ │ ├── dynamic_graph.gif
│ │ ├── landmarked_face2.png
│ │ ├── mnist.png
│ │ ├── neural-style
│ │ ├── dancing.jpg
│ │ ├── neuralstyle.png
│ │ └── picasso.jpg
│ │ ├── pytorch-logo-dark.png
│ │ ├── pytorch-logo-dark.svg
│ │ ├── seq-seq-images
│ │ ├── attention-decoder-network.dot
│ │ ├── attention-decoder-network.png
│ │ ├── decoder-network.dot
│ │ ├── decoder-network.png
│ │ ├── decoder.png
│ │ ├── decoder@2x.png
│ │ ├── encoder-network.dot
│ │ ├── encoder-network.png
│ │ ├── seq2seq.png
│ │ ├── seq2seq@2x.png
│ │ ├── word-encoding.png
│ │ └── word-encoding@2x.png
│ │ ├── seq2seq_flat.png
│ │ ├── stn
│ │ ├── FSeq.png
│ │ ├── Five.gif
│ │ ├── stn-arch.png
│ │ └── tr.png
│ │ ├── tensor_illustration.png
│ │ ├── tensor_illustration_flat.png
│ │ ├── thumbnails
│ │ ├── babel.jpg
│ │ ├── default.png
│ │ ├── examples.png
│ │ ├── pytorch-logo-flat.png
│ │ └── torch-logo.png
│ │ └── torch-nn-vs-pytorch-nn.png
├── _templates
│ └── layout.html
├── advanced_source
│ ├── README.txt
│ ├── c_extension.rst
│ ├── neural_style_tutorial.py
│ ├── numpy_extensions_tutorial.py
│ └── super_resolution_with_caffe2.py
├── beginner_source
│ ├── README.txt
│ ├── blitz
│ │ ├── README.txt
│ │ ├── autograd_tutorial.py
│ │ ├── cifar10_tutorial.py
│ │ ├── data_parallel_tutorial.py
│ │ ├── neural_networks_tutorial.py
│ │ └── tensor_tutorial.py
│ ├── data_loading_tutorial.py
│ ├── deep_learning_60min_blitz.rst
│ ├── deep_learning_nlp_tutorial.rst
│ ├── examples_autograd
│ │ ├── README.txt
│ │ ├── tf_two_layer_net.py
│ │ ├── two_layer_net_autograd.py
│ │ └── two_layer_net_custom_function.py
│ ├── examples_nn
│ │ ├── README.txt
│ │ ├── dynamic_net.py
│ │ ├── two_layer_net_module.py
│ │ ├── two_layer_net_nn.py
│ │ └── two_layer_net_optim.py
│ ├── examples_tensor
│ │ ├── README.txt
│ │ ├── two_layer_net_numpy.py
│ │ └── two_layer_net_tensor.py
│ ├── former_torchies
│ │ ├── README.txt
│ │ ├── autograd_tutorial.py
│ │ ├── nn_tutorial.py
│ │ ├── parallelism_tutorial.py
│ │ └── tensor_tutorial.py
│ ├── former_torchies_tutorial.rst
│ ├── nlp
│ │ ├── README.txt
│ │ ├── advanced_tutorial.py
│ │ ├── deep_learning_tutorial.py
│ │ ├── pytorch_tutorial.py
│ │ ├── sequence_models_tutorial.py
│ │ └── word_embeddings_tutorial.py
│ ├── pytorch_with_examples.rst
│ └── transfer_learning_tutorial.py
├── build.sh
├── conf.py
├── custom_directives.py
├── index.rst
├── intermediate_source
│ ├── README.txt
│ ├── char_rnn_classification_tutorial.py
│ ├── char_rnn_generation_tutorial.py
│ ├── dist_tuto.rst
│ ├── reinforcement_q_learning.py
│ ├── seq2seq_translation_tutorial.py
│ └── spatial_transformer_tutorial.py
└── requirements.txt
└── zh
├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── _static
├── css
│ └── pytorch_theme.css
└── img
│ ├── SRResNet.png
│ ├── Variable.png
│ ├── cartpole.gif
│ ├── cat.jpg
│ ├── cat_224x224.jpg
│ ├── cat_output1.png
│ ├── char_rnn_generation.png
│ ├── cifar10.png
│ ├── data_parallel.png
│ ├── distributed
│ ├── DistPyTorch.jpg
│ ├── all_gather.pdf
│ ├── all_gather.png
│ ├── all_reduce.pdf
│ ├── all_reduce.png
│ ├── broadcast.png
│ ├── gather.png
│ ├── reduce.png
│ ├── scatter.png
│ ├── send_recv.png
│ └── send_recv_big.png
│ ├── dynamic_graph.gif
│ ├── landmarked_face2.png
│ ├── mnist.png
│ ├── neural-style
│ ├── dancing.jpg
│ ├── neuralstyle.png
│ └── picasso.jpg
│ ├── pytorch-logo-dark.png
│ ├── pytorch-logo-dark.svg
│ ├── seq-seq-images
│ ├── attention-decoder-network.dot
│ ├── attention-decoder-network.png
│ ├── decoder-network.dot
│ ├── decoder-network.png
│ ├── decoder.png
│ ├── decoder@2x.png
│ ├── encoder-network.dot
│ ├── encoder-network.png
│ ├── seq2seq.png
│ ├── seq2seq@2x.png
│ ├── word-encoding.png
│ └── word-encoding@2x.png
│ ├── seq2seq_flat.png
│ ├── stn
│ ├── FSeq.png
│ ├── Five.gif
│ ├── stn-arch.png
│ └── tr.png
│ ├── tensor_illustration.png
│ ├── tensor_illustration_flat.png
│ ├── thumbnails
│ ├── babel.jpg
│ ├── default.png
│ ├── examples.png
│ ├── pytorch-logo-flat.png
│ └── torch-logo.png
│ └── torch-nn-vs-pytorch-nn.png
├── _templates
└── layout.html
├── advanced_source
├── README.txt
├── c_extension.rst
├── neural_style_tutorial.py
├── numpy_extensions_tutorial.py
└── super_resolution_with_caffe2.py
├── apachecn-learning-group.rst
├── beginner_source
├── README.txt
├── blitz
│ ├── README.txt
│ ├── autograd_tutorial.py
│ ├── cifar10_tutorial.py
│ ├── data_parallel_tutorial.py
│ ├── neural_networks_tutorial.py
│ └── tensor_tutorial.py
├── data_loading_tutorial.py
├── deep_learning_60min_blitz.rst
├── deep_learning_nlp_tutorial.rst
├── examples_autograd
│ ├── README.txt
│ ├── tf_two_layer_net.py
│ ├── two_layer_net_autograd.py
│ └── two_layer_net_custom_function.py
├── examples_nn
│ ├── README.txt
│ ├── dynamic_net.py
│ ├── two_layer_net_module.py
│ ├── two_layer_net_nn.py
│ └── two_layer_net_optim.py
├── examples_tensor
│ ├── README.txt
│ ├── two_layer_net_numpy.py
│ └── two_layer_net_tensor.py
├── former_torchies
│ ├── README.txt
│ ├── autograd_tutorial.py
│ ├── nn_tutorial.py
│ ├── parallelism_tutorial.py
│ └── tensor_tutorial.py
├── former_torchies_tutorial.rst
├── nlp
│ ├── README.txt
│ ├── advanced_tutorial.py
│ ├── deep_learning_tutorial.py
│ ├── pytorch_tutorial.py
│ ├── sequence_models_tutorial.py
│ └── word_embeddings_tutorial.py
├── pytorch_with_examples.rst
└── transfer_learning_tutorial.py
├── build.sh
├── conf.py
├── custom_directives.py
├── index.rst
├── intermediate_source
├── README.txt
├── char_rnn_classification_tutorial.py
├── char_rnn_generation_tutorial.py
├── dist_tuto.rst
├── reinforcement_q_learning.py
├── seq2seq_translation_tutorial.py
└── spatial_transformer_tutorial.py
├── project-contributors.rst
└── requirements.txt
/doc/en/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = PyTorch
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | docset: html
16 | doc2dash --name $(SPHINXPROJ) --icon $(SOURCEDIR)/_static/img/pytorch-logo-flame.png --enable-js --online-redirect-url http://pytorch.org/docs/ --force $(BUILDDIR)/html/
17 |
18 | # Manually fix because Zeal doesn't deal well with `icon.png`-only at 2x resolution.
19 | cp $(SPHINXPROJ).docset/icon.png $(SPHINXPROJ).docset/icon@2x.png
20 | convert $(SPHINXPROJ).docset/icon@2x.png -resize 16x16 $(SPHINXPROJ).docset/icon.png
21 |
22 | .PHONY: help Makefile docset
23 |
24 | # Catch-all target: route all unknown targets to Sphinx using the new
25 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
26 | %: Makefile
27 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
28 |
--------------------------------------------------------------------------------
/doc/en/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 | set SPHINXPROJ=PyTorch
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/doc/en/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | -e git://github.com/snide/sphinx_rtd_theme.git#egg=sphinx_rtd_theme
3 |
--------------------------------------------------------------------------------
/doc/en/source/_static/img/dynamic_graph.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/en/source/_static/img/dynamic_graph.gif
--------------------------------------------------------------------------------
/doc/en/source/_static/img/pytorch-logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/en/source/_static/img/pytorch-logo-dark.png
--------------------------------------------------------------------------------
/doc/en/source/_static/img/pytorch-logo-flame.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/en/source/_static/img/pytorch-logo-flame.png
--------------------------------------------------------------------------------
/doc/en/source/_static/img/pytorch-logo-flame.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/doc/en/source/_static/img/tensor_illustration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/en/source/_static/img/tensor_illustration.png
--------------------------------------------------------------------------------
/doc/en/source/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {% block footer %}
4 | {{ super() }}
5 |
6 |
13 |
22 |
36 | {% endblock %}
--------------------------------------------------------------------------------
/doc/en/source/cuda.rst:
--------------------------------------------------------------------------------
1 | torch.cuda
2 | ===================================
3 |
4 | .. currentmodule:: torch.cuda
5 |
6 | .. automodule:: torch.cuda
7 | :members:
8 |
9 | Random Number Generator
10 | -------------------------
11 | .. autofunction:: get_rng_state
12 | .. autofunction:: set_rng_state
13 | .. autofunction:: manual_seed
14 | .. autofunction:: manual_seed_all
15 | .. autofunction:: seed
16 | .. autofunction:: seed_all
17 | .. autofunction:: initial_seed
18 |
19 |
20 | Communication collectives
21 | -------------------------
22 |
23 | .. autofunction:: torch.cuda.comm.broadcast
24 |
25 | .. autofunction:: torch.cuda.comm.reduce_add
26 |
27 | .. autofunction:: torch.cuda.comm.scatter
28 |
29 | .. autofunction:: torch.cuda.comm.gather
30 |
31 | Streams and events
32 | ------------------
33 |
34 | .. autoclass:: Stream
35 | :members:
36 |
37 | .. autoclass:: Event
38 | :members:
39 |
40 | Memory management
41 | -----------------
42 | .. autofunction:: empty_cache
43 |
44 | NVIDIA Tools Extension (NVTX)
45 | -----------------------------
46 |
47 | .. autofunction:: torch.cuda.nvtx.mark
48 | .. autofunction:: torch.cuda.nvtx.range_push
49 | .. autofunction:: torch.cuda.nvtx.range_pop
50 |
--------------------------------------------------------------------------------
/doc/en/source/data.rst:
--------------------------------------------------------------------------------
1 | torch.utils.data
2 | ===================================
3 |
4 | .. automodule:: torch.utils.data
5 | .. autoclass:: Dataset
6 | .. autoclass:: TensorDataset
7 | .. autoclass:: ConcatDataset
8 | .. autoclass:: DataLoader
9 | .. autoclass:: torch.utils.data.sampler.Sampler
10 | .. autoclass:: torch.utils.data.sampler.SequentialSampler
11 | .. autoclass:: torch.utils.data.sampler.RandomSampler
12 | .. autoclass:: torch.utils.data.sampler.SubsetRandomSampler
13 | .. autoclass:: torch.utils.data.sampler.WeightedRandomSampler
14 | .. autoclass:: torch.utils.data.distributed.DistributedSampler
15 |
--------------------------------------------------------------------------------
/doc/en/source/distributions.rst:
--------------------------------------------------------------------------------
1 | .. role:: hidden
2 | :class: hidden-section
3 |
4 | Probability distributions - torch.distributions
5 | ==================================================
6 |
7 | .. automodule:: torch.distributions
8 | .. currentmodule:: torch.distributions
9 |
10 | :hidden:`Distribution`
11 | ~~~~~~~~~~~~~~~~~~~~~~~
12 |
13 | .. autoclass:: Distribution
14 | :members:
15 |
16 | :hidden:`Bernoulli`
17 | ~~~~~~~~~~~~~~~~~~~~~~~
18 |
19 | .. autoclass:: Bernoulli
20 | :members:
21 |
22 | :hidden:`Categorical`
23 | ~~~~~~~~~~~~~~~~~~~~~~~
24 |
25 | .. autoclass:: Categorical
26 | :members:
27 |
28 | :hidden:`Normal`
29 | ~~~~~~~~~~~~~~~~~~~~~~~
30 |
31 | .. autoclass:: Normal
32 | :members:
33 |
--------------------------------------------------------------------------------
/doc/en/source/ffi.rst:
--------------------------------------------------------------------------------
1 | torch.utils.ffi
2 | ===============
3 |
4 | .. currentmodule:: torch.utils.ffi
5 | .. autofunction:: create_extension
6 |
7 |
--------------------------------------------------------------------------------
/doc/en/source/index.rst:
--------------------------------------------------------------------------------
1 | .. PyTorch documentation master file, created by
2 | sphinx-quickstart on Fri Dec 23 13:31:47 2016.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | :github_url: https://github.com/pytorch/pytorch
7 |
8 | PyTorch documentation
9 | ===================================
10 |
11 | PyTorch is an optimized tensor library for deep learning using GPUs and CPUs.
12 |
13 | .. toctree::
14 | :glob:
15 | :maxdepth: 1
16 | :caption: Notes
17 |
18 | notes/*
19 |
20 |
21 | .. toctree::
22 | :maxdepth: 1
23 | :caption: Package Reference
24 |
25 | torch
26 | tensors
27 | sparse
28 | storage
29 | nn
30 | optim
31 | torch.autograd
32 | torch.distributions
33 | torch.multiprocessing
34 | torch.distributed
35 | torch.legacy
36 | cuda
37 | ffi
38 | data
39 | model_zoo
40 | onnx
41 |
42 | .. toctree::
43 | :glob:
44 | :maxdepth: 2
45 | :caption: torchvision Reference
46 |
47 | torchvision/index
48 |
49 |
50 | Indices and tables
51 | ==================
52 |
53 | * :ref:`genindex`
54 | * :ref:`modindex`
55 |
--------------------------------------------------------------------------------
/doc/en/source/legacy.rst:
--------------------------------------------------------------------------------
1 | Legacy package - torch.legacy
2 | ===================================
3 |
4 | .. automodule:: torch.legacy
5 |
--------------------------------------------------------------------------------
/doc/en/source/model_zoo.rst:
--------------------------------------------------------------------------------
1 | torch.utils.model_zoo
2 | ===================================
3 |
4 | .. automodule:: torch.utils.model_zoo
5 | .. autofunction:: load_url
6 |
--------------------------------------------------------------------------------
/doc/en/source/notes/serialization.rst:
--------------------------------------------------------------------------------
1 |
2 | Serialization semantics
3 | =======================
4 |
5 | Best practices
6 | --------------
7 |
8 | .. _recommend-saving-models:
9 |
10 | Recommended approach for saving a model
11 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
12 |
13 | There are two main approaches for serializing and restoring a model.
14 |
15 | The first (recommended) saves and loads only the model parameters::
16 |
17 | torch.save(the_model.state_dict(), PATH)
18 |
19 | Then later::
20 |
21 | the_model = TheModelClass(*args, **kwargs)
22 | the_model.load_state_dict(torch.load(PATH))
23 |
24 | The second saves and loads the entire model::
25 |
26 | torch.save(the_model, PATH)
27 |
28 | Then later::
29 |
30 | the_model = torch.load(PATH)
31 |
32 | However in this case, the serialized data is bound to the specific classes
33 | and the exact directory structure used, so it can break in various ways when
34 | used in other projects, or after some serious refactors.
35 |
--------------------------------------------------------------------------------
/doc/en/source/storage.rst:
--------------------------------------------------------------------------------
1 | torch.Storage
2 | ===================================
3 |
4 | A :class:`torch.Storage` is a contiguous, one-dimensional array of a single
5 | data type.
6 |
7 | Every :class:`torch.Tensor` has a corresponding storage of the same data type.
8 |
9 | .. autoclass:: torch.FloatStorage
10 | :members:
11 | :undoc-members:
12 | :inherited-members:
13 |
--------------------------------------------------------------------------------
/doc/en/source/torchvision/index.rst:
--------------------------------------------------------------------------------
1 | torchvision
2 | ===========
3 |
4 | The :mod:`torchvision` package consists of popular datasets, model
5 | architectures, and common image transformations for computer vision.
6 |
7 | .. toctree::
8 | :maxdepth: 2
9 | :caption: Package Reference
10 |
11 | datasets
12 | models
13 | transforms
14 | utils
15 |
16 | .. automodule:: torchvision
17 | :members:
--------------------------------------------------------------------------------
/doc/en/source/torchvision/transforms.rst:
--------------------------------------------------------------------------------
1 | torchvision.transforms
2 | ======================
3 |
4 | .. currentmodule:: torchvision.transforms
5 |
6 | Transforms are common image transforms. They can be chained together using :class:`Compose`
7 |
8 | .. autoclass:: Compose
9 |
10 | Transforms on PIL Image
11 | -----------------------
12 |
13 | .. autoclass:: Resize
14 |
15 | .. autoclass:: Scale
16 |
17 | .. autoclass:: CenterCrop
18 |
19 | .. autoclass:: RandomCrop
20 |
21 | .. autoclass:: RandomHorizontalFlip
22 |
23 | .. autoclass:: RandomVerticalFlip
24 |
25 | .. autoclass:: RandomResizedCrop
26 |
27 | .. autoclass:: RandomSizedCrop
28 |
29 | .. autoclass:: Grayscale
30 |
31 | .. autoclass:: RandomGrayscale
32 |
33 | .. autoclass:: FiveCrop
34 |
35 | .. autoclass:: TenCrop
36 |
37 | .. autoclass:: Pad
38 |
39 | .. autoclass:: ColorJitter
40 |
41 | Transforms on torch.\*Tensor
42 | ----------------------------
43 |
44 | .. autoclass:: Normalize
45 | :members: __call__
46 | :special-members:
47 |
48 |
49 | Conversion Transforms
50 | ---------------------
51 |
52 | .. autoclass:: ToTensor
53 | :members: __call__
54 | :special-members:
55 |
56 | .. autoclass:: ToPILImage
57 | :members: __call__
58 | :special-members:
59 |
60 | Generic Transforms
61 | ------------------
62 |
63 | .. autoclass:: Lambda
--------------------------------------------------------------------------------
/doc/en/source/torchvision/utils.rst:
--------------------------------------------------------------------------------
1 | torchvision.utils
2 | =================
3 |
4 | .. currentmodule:: torchvision.utils
5 |
6 | .. autofunction:: make_grid
7 |
8 | .. autofunction:: save_image
--------------------------------------------------------------------------------
/doc/zh/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = PyTorch
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | docset: html
16 | doc2dash --name $(SPHINXPROJ) --icon $(SOURCEDIR)/_static/img/pytorch-logo-flame.png --enable-js --online-redirect-url http://pytorch.org/docs/ --force $(BUILDDIR)/html/
17 |
18 | # Manually fix because Zeal doesn't deal well with `icon.png`-only at 2x resolution.
19 | cp $(SPHINXPROJ).docset/icon.png $(SPHINXPROJ).docset/icon@2x.png
20 | convert $(SPHINXPROJ).docset/icon@2x.png -resize 16x16 $(SPHINXPROJ).docset/icon.png
21 |
22 | .PHONY: help Makefile docset
23 |
24 | # Catch-all target: route all unknown targets to Sphinx using the new
25 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
26 | %: Makefile
27 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
28 |
--------------------------------------------------------------------------------
/doc/zh/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 | set SPHINXPROJ=PyTorch
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/doc/zh/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | -e git://github.com/snide/sphinx_rtd_theme.git#egg=sphinx_rtd_theme
3 |
--------------------------------------------------------------------------------
/doc/zh/source/_static/img/dynamic_graph.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/_static/img/dynamic_graph.gif
--------------------------------------------------------------------------------
/doc/zh/source/_static/img/pytorch-logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/_static/img/pytorch-logo-dark.png
--------------------------------------------------------------------------------
/doc/zh/source/_static/img/pytorch-logo-flame.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/_static/img/pytorch-logo-flame.png
--------------------------------------------------------------------------------
/doc/zh/source/_static/img/pytorch-logo-flame.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/doc/zh/source/_static/img/tensor_illustration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/_static/img/tensor_illustration.png
--------------------------------------------------------------------------------
/doc/zh/source/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {% block footer %}
4 | {{ super() }}
5 |
6 |
13 |
22 |
36 | {% endblock %}
--------------------------------------------------------------------------------
/doc/zh/source/apachecn-learning-group.rst:
--------------------------------------------------------------------------------
1 | 组织学习交流群
2 | ==================
3 |
4 | 机器学习交流群: `629470233 `__ (2000人)
5 |
6 | 大数据交流群: `214293307 `__ (2000人)
7 |
8 | Kaggle 竞赛交流群: `686932392 `__ (2000人)
9 |
10 | 了解我们: ``__
11 |
12 | 加入组织: ``__
13 |
14 | 更多学(zhuang)习(bi)交流群请参阅: ``__
--------------------------------------------------------------------------------
/doc/zh/source/autograd.rst:
--------------------------------------------------------------------------------
1 | .. role:: hidden
2 | :class: hidden-section
3 |
4 | Automatic differentiation package - torch.autograd
5 | ==================================================
6 |
7 | .. automodule:: torch.autograd
8 | .. currentmodule:: torch.autograd
9 |
10 | .. autofunction:: backward
11 |
12 | .. autofunction:: grad
13 |
14 | Variable(变量)
15 | ----------------
16 |
17 | API compatibility
18 | ^^^^^^^^^^^^^^^^^
19 |
20 | Variable API 几乎与常规 Tensor API 相同(一些会覆盖梯度计算输入的内置方法除外).
21 | 在大多数情况下, 变量量可以安全地替换张量并且代码将保持正常工作.
22 | 因为这个, 我们没有记录变量上的所有操作, 你应该参阅 :class:`torch.Tensor` 文档来查看变量上的所有操作.
23 |
24 | In-place operations on Variables
25 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
26 |
27 | 在 autograd 支持就地操作是一件困难的事情, 在大多数情况下我们不鼓励使用.
28 | Autograd 积极的缓冲区释放和重用使得它非常高效, 而且很少有就地操作实际上大量地降低了内存使用量的情况.
29 | 除非你正在大量的的内存压力下运行, 否则你可能永远不需要使用它们.
30 |
31 | In-place correctness checks
32 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
33 |
34 | 所有的 :class:`Variable` 跟踪适用于它们的就地操作, 并且如果实现检测到一个变量是否被其中一个函数后台保存, 但是之后它被就地修改了, 会在开始求导时会报出异常.
35 | 这确保了如果你在就地使用函数并没有看到任何错误, 你可以肯定的是计算变量是正确的.
36 |
37 |
38 | .. autoclass:: Variable
39 | :members:
40 |
41 | :hidden:'Function(函数)'
42 | ---------------------------
43 |
44 | .. autoclass:: Function
45 | :members:
46 |
47 | Profiler(分析器)
48 | ------------------
49 |
50 | Autograd 包含一个分析器, 可以让你检查你的模型在CPU 和 GPU 上不同运算的成本.
51 | 目前实现有两种模式 - 只使用 CPU 的 :class:`~torch.autograd.profiler.profile`.
52 | 和基于 nvprof(注册 CPU 和 GPU 活动)的方式使用 :class:`~torch.autograd.profiler.emit_nvtx`.
53 |
54 | .. autoclass:: torch.autograd.profiler.profile
55 | :members:
56 |
57 | .. autoclass:: torch.autograd.profiler.emit_nvtx
58 | :members:
59 |
60 | .. autofunction:: torch.autograd.profiler.load_nvprof
61 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/_storage_docs.py:
--------------------------------------------------------------------------------
1 | """Adds docstrings to Storage functions"""
2 |
3 | import torch._C
4 | from torch._C import _add_docstr as add_docstr
5 |
6 |
7 | storage_classes = [
8 | 'DoubleStorageBase',
9 | 'FloatStorageBase',
10 | 'LongStorageBase',
11 | 'IntStorageBase',
12 | 'ShortStorageBase',
13 | 'CharStorageBase',
14 | 'ByteStorageBase',
15 | ]
16 |
17 |
18 | def add_docstr_all(method, docstr):
19 | for cls_name in storage_classes:
20 | cls = getattr(torch._C, cls_name)
21 | try:
22 | add_docstr(getattr(cls, method), docstr)
23 | except AttributeError:
24 | pass
25 |
26 |
27 | add_docstr_all('from_file',
28 | """
29 | from_file(filename, shared=False, size=0) -> Storage
30 |
31 | 如果 shared 为 True, 那么内存将会在所有进程间共享. 所有的更改都会被写入文件. 如果 shared 为 False,
32 | 那么对于内存的修改, 则不会影响到文件.
33 |
34 | size 是存储中所包含的元素个数. 如果 shared 为 False 则文件必须包含至少 `size * sizeof(Type)` 字节
35 | ( `Type` 是所存储的类型)如果 shared 为 True, 文件会在需要的时候被创建.
36 |
37 | Args:
38 | filename (str): 要映射到的文件名
39 | shared (bool): 是否共享内存
40 | size (int): 存储中包含元素的个数
41 | """)
42 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/autograd/_functions/__init__.py:
--------------------------------------------------------------------------------
1 | from .basic_ops import *
2 | from .tensor import *
3 | from .pointwise import *
4 | from .reduce import *
5 | from .linalg import *
6 | from .blas import *
7 | from .stochastic import *
8 | from .compare import *
9 | from .initializers import *
10 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/autograd/_functions/compare.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from ..function import Function
4 | from .utils import maybe_unexpand, maybe_unexpand_or_view
5 |
6 |
7 | # TODO: once Cpp-style functions are implemented we can detach a and b
8 | # before calling forward.
9 | class _CompareOp(Function):
10 |
11 | @classmethod
12 | def forward(cls, ctx, a, b):
13 | ctx.a_size = a.size()
14 | ctx.b_tensor = torch.is_tensor(b)
15 | ctx.b_size = b.size() if ctx.b_tensor else None
16 | ctx.input_type = type(a)
17 | mask = getattr(a, cls.fn_name)(b)
18 | ctx.mark_non_differentiable(mask)
19 | return mask
20 |
21 | @staticmethod
22 | def backward(ctx, grad_output):
23 | grad_input = (grad_output * 0).type(ctx.input_type)
24 | return (maybe_unexpand(grad_input, ctx.a_size),
25 | maybe_unexpand_or_view(grad_input, ctx.b_size) if ctx.b_tensor else None)
26 |
27 |
28 | class Eq(_CompareOp):
29 | fn_name = 'eq'
30 |
31 |
32 | class Ne(_CompareOp):
33 | fn_name = 'ne'
34 |
35 |
36 | class Gt(_CompareOp):
37 | fn_name = 'gt'
38 |
39 |
40 | class Ge(_CompareOp):
41 | fn_name = 'ge'
42 |
43 |
44 | class Lt(_CompareOp):
45 | fn_name = 'lt'
46 |
47 |
48 | class Le(_CompareOp):
49 | fn_name = 'le'
50 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/autograd/_functions/initializers.py:
--------------------------------------------------------------------------------
1 | from ..function import InplaceFunction
2 |
3 |
4 | class Zero(InplaceFunction):
5 |
6 | @staticmethod
7 | def forward(ctx, i, inplace=False):
8 | if inplace:
9 | ctx.mark_dirty(i)
10 | result = i.zero_()
11 | else:
12 | result = i.new(1).zero_().expand_as(i)
13 |
14 | ctx.save_for_backward(result)
15 | return result
16 |
17 | @staticmethod
18 | def backward(ctx, grad_output):
19 | result, = ctx.saved_variables
20 | return Variable(result.data.new(1).zero_().expand_as(result))
21 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/autograd/_functions/stochastic.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from ..function import Function
3 |
4 |
5 | class Categorical(Function):
6 | @staticmethod
7 | def forward(ctx, probs, num_samples, with_replacement):
8 | samples = probs.multinomial(num_samples, with_replacement)
9 | ctx.mark_non_differentiable(samples)
10 | return samples
11 |
12 | @staticmethod
13 | def backward(ctx, grad_output):
14 | return None, None, None
15 |
16 |
17 | class Bernoulli(Function):
18 | @staticmethod
19 | def forward(ctx, probs):
20 | samples = probs.new().resize_as_(probs).bernoulli_(probs)
21 | ctx.mark_non_differentiable(samples)
22 | return samples
23 |
24 | @staticmethod
25 | def backward(ctx, grad_output):
26 | return None
27 |
28 |
29 | class Normal(Function):
30 | @staticmethod
31 | def forward(ctx, means, stddevs=None):
32 | samples = torch.normal(means, stddevs)
33 | ctx.mark_non_differentiable(samples)
34 | return samples
35 |
36 | @staticmethod
37 | def backward(ctx, grad_output):
38 | return None, None
39 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/backends/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/code/torch/backends/__init__.py
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/contrib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/code/torch/contrib/__init__.py
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/cuda/error.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/code/torch/cuda/error.py
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/cuda/nvtx.py:
--------------------------------------------------------------------------------
1 | import ctypes
2 |
3 | lib = None
4 |
5 | __all__ = ['range_push', 'range_pop', 'mark']
6 |
7 |
8 | def _libnvToolsExt():
9 | global lib
10 | if lib is None:
11 | lib = ctypes.cdll.LoadLibrary(None)
12 | lib.nvtxMarkA.restype = None
13 | return lib
14 |
15 |
16 | def range_push(msg):
17 | """
18 | 设置一个固定范围的堆栈,返回的堆栈范围深度从0开始.
19 |
20 | Arguments:
21 | msg (string): 范围(用 ASCII 编码设置)
22 | """
23 | if _libnvToolsExt() is None:
24 | raise RuntimeError('Unable to load nvToolsExt library')
25 | return lib.nvtxRangePushA(ctypes.c_char_p(msg.encode("ascii")))
26 |
27 |
28 | def range_pop():
29 | """
30 | 弹出一个固定范围的堆栈,返回的堆栈范围深度从0结束.
31 | """
32 | if _libnvToolsExt() is None:
33 | raise RuntimeError('Unable to load nvToolsExt library')
34 | return lib.nvtxRangePop()
35 |
36 |
37 | def mark(msg):
38 | """
39 | 描述在某个时刻发生的瞬间事件.
40 |
41 | Arguments:
42 | msg (string): 事件(用 ASCII 编码表示).
43 | """
44 | if _libnvToolsExt() is None:
45 | raise RuntimeError('Unable to load nvToolsExt library')
46 | return lib.nvtxMarkA(ctypes.c_char_p(msg.encode("ascii")))
47 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/cuda/profiler.py:
--------------------------------------------------------------------------------
1 | import ctypes
2 | import tempfile
3 | import contextlib
4 | from . import cudart, check_error
5 |
6 |
7 | class cudaOutputMode(object):
8 | cudaKeyValuePair = ctypes.c_int(0)
9 | cudaCSV = ctypes.c_int(1)
10 |
11 | @staticmethod
12 | def for_key(key):
13 | if key == 'key_value':
14 | return cudaOutputMode.cudaKeyValuePair
15 | elif key == 'csv':
16 | return cudaOutputMode.cudaCSV
17 | else:
18 | raise RuntimeError("supported CUDA profiler output modes are: key_value and csv")
19 |
20 | DEFAULT_FLAGS = [
21 | "gpustarttimestamp",
22 | "gpuendtimestamp",
23 | "gridsize3d",
24 | "threadblocksize",
25 | "streamid",
26 | "enableonstart 0",
27 | "conckerneltrace",
28 | ]
29 |
30 |
31 | def init(output_file, flags=None, output_mode='key_value'):
32 | flags = DEFAULT_FLAGS if flags is None else flags
33 | output_mode = cudaOutputMode.for_key(output_mode)
34 | with tempfile.NamedTemporaryFile(delete=True) as f:
35 | f.write(b'\n'.join(map(lambda f: f.encode('ascii'), flags)))
36 | f.flush()
37 | check_error(cudart().cudaProfilerInitialize(
38 | ctypes.c_char_p(f.name.encode('ascii')), ctypes.c_char_p(output_file.encode('ascii')), output_mode))
39 |
40 |
41 | def start():
42 | check_error(cudart().cudaProfilerStart())
43 |
44 |
45 | def stop():
46 | check_error(cudart().cudaProfilerStop())
47 |
48 |
49 | @contextlib.contextmanager
50 | def profile():
51 | try:
52 | start()
53 | yield
54 | finally:
55 | stop()
56 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/for_onnx/__init__.py:
--------------------------------------------------------------------------------
1 | from .onnx import *
2 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/jit/passes/__init__.py:
--------------------------------------------------------------------------------
1 | from .inplace import _check_inplace
2 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/jit/passes/inplace.py:
--------------------------------------------------------------------------------
1 |
2 | def _check_inplace(trace):
3 | """Checks that all PythonOps that were not translated into JIT format are out of place.
4 |
5 | Should be run after the ONNX pass.
6 | """
7 | graph = trace.graph()
8 | for node in graph.nodes():
9 | if node.kind() == 'PythonOp':
10 | if node.i('inplace'):
11 | raise RuntimeError("inplace {} not supported in the JIT".format(node.pyname()))
12 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/__init__.py:
--------------------------------------------------------------------------------
1 | """包含从 Lua torch 移植的代码的包.
2 |
3 | 为了能够与现有的模型一起工作, 并简化当前 Lua torch 用户的过渡, 我们特意创建了这个包.
4 | 您可以在 ``torch.legacy.nn`` 中找到 ``nn`` 代码, 并在 ``torch.legacy.optim`` 中进行 ``optim` 优化.
5 | 该 API 应该完适配 Lua torch.
6 | """
7 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Abs.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Abs(Module):
6 |
7 | def __init__(self):
8 | super(Abs, self).__init__()
9 |
10 | def updateOutput(self, input):
11 | self._backend.Abs_updateOutput(
12 | self._backend.library_state,
13 | input,
14 | self.output
15 | )
16 | return self.output
17 |
18 | def updateGradInput(self, input, gradOutput):
19 | self._backend.Abs_updateGradInput(
20 | self._backend.library_state,
21 | input,
22 | gradOutput,
23 | self.gradInput
24 | )
25 | return self.gradInput
26 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/AbsCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class AbsCriterion(Criterion):
6 |
7 | def __init__(self, sizeAverage=True):
8 | super(AbsCriterion, self).__init__()
9 | self.sizeAverage = sizeAverage
10 | self.output_tensor = torch.Tensor(1)
11 |
12 | def updateOutput(self, input, target):
13 | if self.output_tensor is None:
14 | self.output_tensor = input.new(1)
15 | self._backend.AbsCriterion_updateOutput(
16 | self._backend.library_state,
17 | input,
18 | target,
19 | self.output_tensor,
20 | self.sizeAverage,
21 | True, # reduce
22 | )
23 | self.output = self.output_tensor[0]
24 | return self.output
25 |
26 | def updateGradInput(self, input, target):
27 | implicit_gradOutput = torch.ones(1).type_as(input)
28 | self._backend.AbsCriterion_updateGradInput(
29 | self._backend.library_state,
30 | input,
31 | target,
32 | implicit_gradOutput,
33 | self.gradInput,
34 | self.sizeAverage,
35 | True, # reduce
36 | )
37 | return self.gradInput
38 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/AddConstant.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class AddConstant(Module):
6 |
7 | def __init__(self, constant_scalar, inplace=False):
8 | super(AddConstant, self).__init__()
9 | self.constant_scalar = constant_scalar
10 | self.inplace = inplace
11 |
12 | def updateOutput(self, input):
13 | if self.inplace:
14 | input.add_(self.constant_scalar)
15 | self.output.set_(input)
16 | else:
17 | self.output.resize_as_(input)
18 | self.output.copy_(input)
19 | self.output.add_(self.constant_scalar)
20 |
21 | return self.output
22 |
23 | def updateGradInput(self, input, gradOutput):
24 | if self.inplace:
25 | self.gradInput.set_(gradOutput)
26 | # restore previous input value
27 | input.add_(-self.constant_scalar)
28 | else:
29 | self.gradInput.resize_as_(gradOutput)
30 | self.gradInput.copy_(gradOutput)
31 |
32 | return self.gradInput
33 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/CAddTable.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class CAddTable(Module):
6 |
7 | def __init__(self, inplace=False):
8 | super(CAddTable, self).__init__()
9 | self.inplace = inplace
10 | self.gradInput = []
11 |
12 | def updateOutput(self, input):
13 | if self.inplace:
14 | self.output.set_(input[0])
15 | else:
16 | self.output.resize_as_(input[0]).copy_(input[0])
17 |
18 | for i in range(1, len(input)):
19 | self.output.add_(input[i])
20 |
21 | return self.output
22 |
23 | def updateGradInput(self, input, gradOutput):
24 | for i in range(len(input)):
25 | if i >= len(self.gradInput):
26 | assert i == len(self.gradInput)
27 | self.gradInput.append(input[0].new())
28 |
29 | if self.inplace:
30 | self.gradInput[i].set_(gradOutput)
31 | else:
32 | self.gradInput[i].resize_as_(input[i]).copy_(gradOutput)
33 |
34 | del self.gradInput[len(input):]
35 |
36 | return self.gradInput
37 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/CDivTable.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class CDivTable(Module):
6 |
7 | def __init__(self, ):
8 | super(CDivTable, self).__init__()
9 | self.gradInput = []
10 |
11 | def updateOutput(self, input):
12 | self.output.resize_as_(input[0]).copy_(input[0])
13 | self.output.div_(input[1])
14 | return self.output
15 |
16 | def updateGradInput(self, input, gradOutput):
17 | while len(self.gradInput) < 2:
18 | self.gradInput.append(input[0].new())
19 | self.gradInput[0].resize_as_(input[0]).copy_(gradOutput, broadcast=False).div_(input[1])
20 | self.gradInput[1].resize_as_(input[1]).zero_().addcdiv_(-1, self.gradInput[0], input[1]).mul_(input[0])
21 |
22 | del self.gradInput[len(input):]
23 |
24 | return self.gradInput
25 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/CSubTable.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class CSubTable(Module):
6 |
7 | def __init__(self, ):
8 | super(CSubTable, self).__init__()
9 | self.gradInput = [torch.Tensor(), torch.Tensor()]
10 |
11 | def updateOutput(self, input):
12 | self.output.resize_as_(input[0]).copy_(input[0])
13 | self.output.add_(-1, input[1])
14 | return self.output
15 |
16 | def updateGradInput(self, input, gradOutput):
17 | if self.gradInput[0] is None:
18 | self.gradInput[0] = input[0].new()
19 | if self.gradInput[1] is None:
20 | self.gradInput[1] = input[1].new()
21 | self.gradInput[0].resize_as_(input[0]).copy_(gradOutput)
22 | self.gradInput[1].resize_as_(input[1]).copy_(gradOutput).mul_(-1)
23 |
24 | self.gradInput = self.gradInput[:2]
25 | return self.gradInput
26 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Clamp.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .HardTanh import HardTanh
3 |
4 |
5 | class Clamp(HardTanh):
6 |
7 | def __init__(self, min_value, max_value):
8 | super(Clamp, self,).__init__(min_value, max_value)
9 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Contiguous.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Contiguous(Module):
6 |
7 | def updateOutput(self, input):
8 | if not input.is_contiguous():
9 | self.output.resize_as_(input).copy_(input)
10 | else:
11 | self.output.set_(input)
12 |
13 | return self.output
14 |
15 | def updateGradInput(self, input, gradOutput):
16 | if not gradOutput.is_contiguous():
17 | self.gradInput.resize_as_(gradOutput).copy_(gradOutput)
18 | else:
19 | self.gradInput.set_(gradOutput)
20 |
21 | return self.gradInput
22 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Copy.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Copy(Module):
6 |
7 | def __init__(self, intype, outtype, dontCast=False):
8 | self.dontCast = dontCast
9 | super(Copy, self).__init__()
10 | self.gradInput = intype()
11 | self.output = outtype()
12 |
13 | def updateOutput(self, input):
14 | self.output.resize_(input.size()).copy_(input)
15 | return self.output
16 |
17 | def updateGradInput(self, input, gradOutput):
18 | self.gradInput.resize_(gradOutput.size()).copy_(gradOutput)
19 | return self.gradInput
20 |
21 | def type(self, type=None, tensorCache=None):
22 | if type and self.dontCast:
23 | return self
24 |
25 | return super(Copy, self).type(self, type, tensorCache)
26 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Criterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import recursiveType
4 | import torch._thnn
5 |
6 |
7 | class Criterion(object):
8 |
9 | def __init__(self):
10 | self.gradInput = torch.Tensor()
11 | self.output = 0
12 | self._backend = torch._thnn.type2backend[type(self.gradInput)]
13 |
14 | def updateOutput(self, input, target):
15 | raise NotImplementedError
16 |
17 | def forward(self, input, target):
18 | return self.updateOutput(input, target)
19 |
20 | def backward(self, input, target):
21 | return self.updateGradInput(input, target)
22 |
23 | def updateGradInput(self, input, target):
24 | raise NotImplementedError
25 |
26 | def clone(self):
27 | raise NotImplementedError
28 |
29 | def type(self, type, tensorCache=None):
30 | # find all tensors and convert them
31 | for key, param in self.__dict__.items():
32 | setattr(self, key, recursiveType(param, type, tensorCache or {}))
33 |
34 | self._backend = torch._thnn.type2backend[type]
35 | return self
36 |
37 | def float(self):
38 | return self.type('torch.FloatTensor')
39 |
40 | def double(self):
41 | return self.type('torch.DoubleTensor')
42 |
43 | def cuda(self):
44 | return self.type('torch.cuda.FloatTensor')
45 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/CriterionTable.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class CriterionTable(Module):
6 |
7 | def __init__(self, criterion):
8 | super(CriterionTable, self).__init__()
9 | self.criterion = criterion
10 | self.gradInput = [criterion.gradInput]
11 |
12 | def updateOutput(self, input):
13 | self.output = self.criterion.updateOutput(*input)
14 | return self.output
15 |
16 | def updateGradInput(self, input, grad_output):
17 | self.criterion.updateGradInput(*input)
18 | return self.gradInput
19 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/CrossEntropyCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 | from .LogSoftMax import LogSoftMax
4 | from .ClassNLLCriterion import ClassNLLCriterion
5 |
6 |
7 | class CrossEntropyCriterion(Criterion):
8 |
9 | def __init__(self, weights=None):
10 | super(CrossEntropyCriterion, self).__init__()
11 | self.lsm = LogSoftMax()
12 | self.nll = ClassNLLCriterion(weights)
13 |
14 | def updateOutput(self, input, target):
15 | input = input.squeeze()
16 | target = target.squeeze()
17 | self.lsm.updateOutput(input)
18 | self.nll.updateOutput(self.lsm.output, target)
19 | self.output = self.nll.output
20 | return self.output
21 |
22 | def updateGradInput(self, input, target):
23 | size = input.size()
24 | input = input.squeeze()
25 | target = target.squeeze()
26 | self.nll.updateGradInput(self.lsm.output, target)
27 | self.lsm.updateGradInput(input, self.nll.gradInput)
28 | self.gradInput = self.lsm.gradInput.view(size)
29 | return self.gradInput
30 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/DistKLDivCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class DistKLDivCriterion(Criterion):
6 |
7 | def __init__(self, sizeAverage=True):
8 | super(DistKLDivCriterion, self).__init__()
9 | self.sizeAverage = sizeAverage
10 | self.output_tensor = torch.Tensor(1)
11 |
12 | def updateOutput(self, input, target):
13 | assert input.is_same_size(target)
14 | if self.output_tensor is None:
15 | self.output_tensor = input.new(1)
16 | self._backend.DistKLDivCriterion_updateOutput(
17 | self._backend.library_state,
18 | input,
19 | target,
20 | self.output_tensor,
21 | self.sizeAverage,
22 | True, # reduce
23 | )
24 | self.output = self.output_tensor[0]
25 | return self.output
26 |
27 | def updateGradInput(self, input, target):
28 | assert input.is_same_size(target)
29 | implicit_gradOutput = torch.ones(1).type_as(input)
30 | self._backend.DistKLDivCriterion_updateGradInput(
31 | self._backend.library_state,
32 | input,
33 | target,
34 | implicit_gradOutput,
35 | self.gradInput,
36 | self.sizeAverage,
37 | True, # reduce
38 | )
39 | return self.gradInput
40 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/DotProduct.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class DotProduct(Module):
7 |
8 | def __init__(self):
9 | super(DotProduct, self).__init__()
10 | self.gradInput = [torch.Tensor(), torch.Tensor()]
11 | self.buffer = None
12 |
13 | def updateOutput(self, input):
14 | input1, input2 = input[0], input[1]
15 |
16 | if self.buffer is None:
17 | self.buffer = input1.new()
18 |
19 | torch.mul(input1, input2, out=self.buffer)
20 | torch.sum(self.buffer, 1, True, out=self.output)
21 | self.output.resize_(input1.size(0))
22 | return self.output
23 |
24 | def updateGradInput(self, input, gradOutput):
25 | v1 = input[0]
26 | v2 = input[1]
27 | not_batch = False
28 |
29 | if len(self.gradInput) != 2:
30 | if self.gradInput[0] is None:
31 | self.gradInput[0] = input[0].new()
32 | if self.gradInput[1] is None:
33 | self.gradInput[1] = input[1].new()
34 | self.gradInput = self.gradInput[:2]
35 |
36 | gw1 = self.gradInput[0]
37 | gw2 = self.gradInput[1]
38 | gw1.resize_as_(v1).copy_(v2)
39 | gw2.resize_as_(v2).copy_(v1)
40 |
41 | go = gradOutput.contiguous().view(-1, 1).expand_as(v1)
42 | gw1.mul_(go)
43 | gw2.mul_(go)
44 |
45 | return self.gradInput
46 |
47 | def clearState(self):
48 | clear(self, 'buffer')
49 | return super(DotProduct, self).clearState()
50 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Dropout.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class Dropout(Module):
7 |
8 | def __init__(self, p=0.5, inplace=False):
9 | super(Dropout, self).__init__()
10 | self.p = p
11 | self.inplace = inplace
12 | self.train = True
13 | self.noise = torch.Tensor()
14 |
15 | def updateOutput(self, input):
16 | if self.inplace:
17 | self.output.set_(input)
18 | else:
19 | self.output.resize_as_(input).copy_(input)
20 |
21 | if self.p > 0 and self.train:
22 | self.noise.resize_as_(input)
23 | self.noise.bernoulli_(1 - self.p)
24 | self.noise.div_(1 - self.p)
25 | self.output.mul_(self.noise)
26 |
27 | return self.output
28 |
29 | def updateGradInput(self, input, gradOutput):
30 | if self.inplace:
31 | self.gradInput.set_(gradOutput)
32 | else:
33 | self.gradInput.resize_as_(gradOutput).copy_(gradOutput)
34 |
35 | if self.p > 0 and self.train:
36 | self.gradInput.mul_(self.noise) # simply mask the gradients with the noise vector
37 |
38 | return self.gradInput
39 |
40 | def setp(self, p):
41 | self.p = p
42 |
43 | def __repr__(self):
44 | return super(Dropout, self).__repr__() + '({:.4f})'.format(self.p)
45 |
46 | def clearState(self):
47 | clear(self, 'noise')
48 | return super(Dropout, self).clearState()
49 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/ELU.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf8 -*-
2 | import torch
3 | from .Module import Module
4 |
5 |
6 | class ELU(Module):
7 | """
8 | Djork-Arné Clevert, Thomas Unterthiner, Sepp Hochreiter
9 | Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
10 | http.//arxiv.org/pdf/1511.07289.pdf
11 | """
12 |
13 | def __init__(self, alpha=1., inplace=False):
14 | assert type(alpha) == float
15 | super(ELU, self).__init__()
16 | self.alpha = alpha
17 | self.inplace = inplace
18 |
19 | def updateOutput(self, input):
20 | self._backend.ELU_updateOutput(
21 | self._backend.library_state,
22 | input,
23 | self.output,
24 | self.alpha,
25 | self.inplace
26 | )
27 | return self.output
28 |
29 | def updateGradInput(self, input, gradOutput):
30 | self._backend.ELU_updateGradInput(
31 | self._backend.library_state,
32 | gradOutput,
33 | self.gradInput,
34 | self.output,
35 | self.alpha,
36 | self.inplace
37 | )
38 | return self.gradInput
39 |
40 | def __repr__(self):
41 | return '{}(alpha={:.3f})'.format(str(type(self)), self.alpha)
42 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Exp.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Exp(Module):
6 |
7 | def updateOutput(self, input):
8 | return torch.exp(input, out=self.output)
9 |
10 | def updateGradInput(self, input, gradOutput):
11 | return torch.mul(self.output, gradOutput, out=self.gradInput)
12 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/GradientReversal.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class GradientReversal(Module):
6 |
7 | def __init__(self, lambd=1):
8 | super(GradientReversal, self).__init__()
9 | self.lambd = lambd
10 |
11 | def setLambda(self, lambd):
12 | self.lambd = lambd
13 |
14 | def updateOutput(self, input):
15 | self.output.set_(input)
16 | return self.output
17 |
18 | def updateGradInput(self, input, gradOutput):
19 | self.gradInput.resize_as_(gradOutput)
20 | self.gradInput.copy_(gradOutput)
21 | self.gradInput.mul_(-self.lambd)
22 | return self.gradInput
23 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/HardShrink.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class HardShrink(Module):
6 |
7 | def __init__(self, lambd=0.5):
8 | assert type(lambd) == float
9 | super(HardShrink, self).__init__()
10 | self.lambd = lambd
11 |
12 | def updateOutput(self, input):
13 | self._backend.HardShrink_updateOutput(
14 | self._backend.library_state,
15 | input,
16 | self.output,
17 | self.lambd
18 | )
19 | return self.output
20 |
21 | def updateGradInput(self, input, gradOutput):
22 | self._backend.HardShrink_updateGradInput(
23 | self._backend.library_state,
24 | input,
25 | gradOutput,
26 | self.gradInput,
27 | self.lambd
28 | )
29 | return self.gradInput
30 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/HardTanh.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class HardTanh(Module):
6 |
7 | def __init__(self, min_value=-1, max_value=1, inplace=False):
8 | super(HardTanh, self).__init__()
9 | self.min_val = min_value
10 | self.max_val = max_value
11 | self.inplace = inplace
12 | assert self.max_val > self.min_val
13 |
14 | def updateOutput(self, input):
15 | self._backend.HardTanh_updateOutput(
16 | self._backend.library_state,
17 | input,
18 | self.output,
19 | self.min_val,
20 | self.max_val,
21 | self.inplace
22 | )
23 | return self.output
24 |
25 | def updateGradInput(self, input, gradOutput):
26 | self._backend.HardTanh_updateGradInput(
27 | self._backend.library_state,
28 | input,
29 | gradOutput,
30 | self.gradInput,
31 | self.min_val,
32 | self.max_val,
33 | self.inplace
34 | )
35 | return self.gradInput
36 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/HingeEmbeddingCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class HingeEmbeddingCriterion(Criterion):
6 |
7 | def __init__(self, margin=1, sizeAverage=True):
8 | super(HingeEmbeddingCriterion, self).__init__()
9 | self.margin = margin
10 | self.sizeAverage = sizeAverage
11 | self.buffer = None
12 |
13 | def updateOutput(self, input, y):
14 | if self.buffer is None:
15 | self.buffer = input.new()
16 | self.buffer.resize_as_(input).copy_(input)
17 | self.buffer[torch.eq(y, -1.)] = 0
18 | self.output = self.buffer.sum()
19 |
20 | self.buffer.fill_(self.margin).add_(-1, input)
21 | self.buffer.clamp_(min=0)
22 | self.buffer[torch.eq(y, 1.)] = 0
23 | self.output = self.output + self.buffer.sum()
24 |
25 | if self.sizeAverage:
26 | self.output = self.output / input.nelement()
27 |
28 | return self.output
29 |
30 | def updateGradInput(self, input, y):
31 | self.gradInput.resize_as_(input).copy_(y)
32 | self.gradInput[torch.mul(torch.eq(y, -1), torch.gt(input, self.margin))] = 0
33 |
34 | if self.sizeAverage:
35 | self.gradInput.mul_(1. / input.nelement())
36 |
37 | return self.gradInput
38 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Identity.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class Identity(Module):
7 |
8 | def updateOutput(self, input):
9 | self.output = input
10 | return self.output
11 |
12 | def updateGradInput(self, input, gradOutput):
13 | self.gradInput = gradOutput
14 | return self.gradInput
15 |
16 | def clearState(self):
17 | clear(self, 'gradInput')
18 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Index.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Index(Module):
6 |
7 | def __init__(self, dimension):
8 | super(Index, self).__init__()
9 | self.dimension = dimension
10 | self.gradInput = [self.gradInput]
11 |
12 | def updateOutput(self, input):
13 | t = input[0]
14 | index = input[1]
15 | torch.index_select(t, self.dimension, index, out=self.output)
16 | return self.output
17 |
18 | def updateGradInput(self, input, gradOutput):
19 | t = input[0]
20 | index = input[1]
21 |
22 | gradInput = self.gradInput[0] # no gradient for the index variable
23 | gradInput.resize_as_(t).zero_()
24 | gradInput.index_add_(self.dimension, index, gradOutput)
25 | return self.gradInput
26 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/L1Cost.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 | from .utils import clear
4 |
5 |
6 | class L1Cost(Criterion):
7 |
8 | def __init__(self):
9 | super(L1Cost, self).__init__()
10 | self.output_tensor = torch.Tensor(1)
11 |
12 | def updateOutput(self, input, target=None):
13 | assert target is None
14 | if self.output_tensor is None:
15 | self.output_tensor = input.new(1)
16 | self._backend.L1Cost_updateOutput(
17 | self._backend.library_state,
18 | input,
19 | self.output_tensor
20 | )
21 | self.output = self.output_tensor[0]
22 | return self.output
23 |
24 | def updateGradInput(self, input, target=None):
25 | assert target is None
26 | self._backend.L1Cost_updateGradInput(
27 | self._backend.library_state,
28 | input,
29 | None,
30 | self.gradInput
31 | )
32 | return self.gradInput
33 |
34 | def clearState(self):
35 | clear(self, 'output_tensor')
36 | return super(L1Cost, self).clearState()
37 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/L1HingeEmbeddingCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class L1HingeEmbeddingCriterion(Criterion):
6 |
7 | def __init__(self, margin=1):
8 | super(L1HingeEmbeddingCriterion, self).__init__()
9 | self.margin = margin
10 | self.gradInput = [torch.Tensor(), torch.Tensor()]
11 |
12 | def updateOutput(self, input, y):
13 | self.output = input[0].dist(input[1], 1)
14 | if y == -1:
15 | self.output = max(0, self.margin - self.output)
16 |
17 | return self.output
18 |
19 | def _mathsign(t):
20 | return 1 if x > 0 else -1
21 |
22 | def updateGradInput(self, input, y):
23 | self.gradInput[0].resize_as_(input[0])
24 | self.gradInput[1].resize_as_(input[1])
25 | self.gradInput[0].copy_(input[0])
26 | self.gradInput[0].add_(-1, input[1])
27 | dist = self.gradInput[0].norm(1)
28 | self.gradInput[0].sign_()
29 | if y == -1: # just to avoid a mul by 1
30 | if dist > self.margin:
31 | self.gradInput[0].zero_()
32 | else:
33 | self.gradInput[0].mul_(-1)
34 |
35 | self.gradInput[1].zero_().add_(-1, self.gradInput[0])
36 | return self.gradInput
37 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/L1Penalty.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 | # This module acts as an L1 latent state regularizer, adding the
5 | # [gradOutput] to the gradient of the L1 loss. The [input] is copied to
6 | # the [output].
7 |
8 |
9 | class L1Penalty(Module):
10 |
11 | def __init__(self, l1weight, sizeAverage=False, provideOutput=True):
12 | super(L1Penalty, self).__init__()
13 | self.l1weight = l1weight
14 | self.sizeAverage = sizeAverage
15 | self.provideOutput = provideOutput
16 |
17 | def updateOutput(self, input):
18 | m = self.l1weight
19 | if self.sizeAverage:
20 | m = m / input.nelement()
21 |
22 | loss = m * input.norm(1)
23 | self.loss = loss
24 | self.output = input
25 | return self.output
26 |
27 | def updateGradInput(self, input, gradOutput):
28 | m = self.l1weight
29 | if self.sizeAverage:
30 | m = m / input.nelement()
31 |
32 | self.gradInput.resize_as_(input).copy_(input).sign_().mul_(m)
33 |
34 | if self.provideOutput:
35 | self.gradInput.add_(gradOutput)
36 |
37 | return self.gradInput
38 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/LeakyReLU.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class LeakyReLU(Module):
6 |
7 | def __init__(self, negval=1 / 100, inplace=False):
8 | super(LeakyReLU, self).__init__()
9 | if isinstance(negval, bool):
10 | inplace = negval
11 | self.negval = 1 / 100
12 | else:
13 | self.negval = negval
14 |
15 | # default for inplace is False
16 | self.inplace = inplace
17 | if self.negval < 0:
18 | # TODO: warning here
19 | self.inplace = False
20 |
21 | def updateOutput(self, input):
22 | self._backend.LeakyReLU_updateOutput(
23 | self._backend.library_state,
24 | input,
25 | self.output,
26 | self.negval,
27 | self.inplace
28 | )
29 | return self.output
30 |
31 | def updateGradInput(self, input, gradOutput):
32 | self._backend.LeakyReLU_updateGradInput(
33 | self._backend.library_state,
34 | input,
35 | gradOutput,
36 | self.gradInput,
37 | self.negval,
38 | self.inplace
39 | )
40 | return self.gradInput
41 |
42 | def __repr__(self):
43 | return str(type(self)) + '({:.4f})'.format(self.negval)
44 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Log.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Log(Module):
6 |
7 | def updateOutput(self, input):
8 | self.output.resize_as_(input)
9 | self.output.copy_(input)
10 | self.output.log_()
11 | return self.output
12 |
13 | def updateGradInput(self, input, gradOutput):
14 | self.gradInput.resize_as_(input)
15 | self.gradInput.fill_(1)
16 | self.gradInput.div_(input)
17 | self.gradInput.mul_(gradOutput)
18 | return self.gradInput
19 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/LogSigmoid.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class LogSigmoid(Module):
7 |
8 | def __init__(self):
9 | super(LogSigmoid, self).__init__()
10 | self.buffer = None
11 |
12 | def updateOutput(self, input):
13 | if self.buffer is None:
14 | self.buffer = input.new()
15 | self._backend.LogSigmoid_updateOutput(
16 | self._backend.library_state,
17 | input,
18 | self.output,
19 | self.buffer
20 | )
21 | return self.output
22 |
23 | def updateGradInput(self, input, gradOutput):
24 | self._backend.LogSigmoid_updateGradInput(
25 | self._backend.library_state,
26 | input,
27 | gradOutput,
28 | self.gradInput,
29 | self.buffer
30 | )
31 | return self.gradInput
32 |
33 | def clearState(self):
34 | clear(self, 'buffer')
35 | return super(LogSigmoid, self).clearState()
36 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/LogSoftMax.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class LogSoftMax(Module):
6 |
7 | def __init__(self, dim=None):
8 | super(LogSoftMax, self).__init__()
9 | if dim is not None:
10 | self.dim = dim
11 |
12 | def _get_dim(self, input):
13 | return getattr(self, 'dim', 0 if input.dim() == 1 or input.dim() == 3 else 1)
14 |
15 | def updateOutput(self, input):
16 | self._backend.LogSoftMax_updateOutput(
17 | self._backend.library_state,
18 | input,
19 | self.output,
20 | self._get_dim(input)
21 | )
22 | return self.output
23 |
24 | def updateGradInput(self, input, gradOutput):
25 | self._backend.LogSoftMax_updateGradInput(
26 | self._backend.library_state,
27 | input,
28 | gradOutput,
29 | self.gradInput,
30 | self.output,
31 | self._get_dim(input)
32 | )
33 | return self.gradInput
34 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/MSECriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class MSECriterion(Criterion):
6 |
7 | def __init__(self, sizeAverage=True):
8 | super(MSECriterion, self).__init__()
9 | self.sizeAverage = sizeAverage
10 | self.output_tensor = None
11 |
12 | def updateOutput(self, input, target):
13 | if self.output_tensor is None:
14 | self.output_tensor = input.new(1)
15 | self._backend.MSECriterion_updateOutput(
16 | self._backend.library_state,
17 | input,
18 | target,
19 | self.output_tensor,
20 | self.sizeAverage,
21 | True, # reduce
22 | )
23 | self.output = self.output_tensor[0]
24 | return self.output
25 |
26 | def updateGradInput(self, input, target):
27 | implicit_gradOutput = torch.Tensor([1]).type(input.type())
28 |
29 | self._backend.MSECriterion_updateGradInput(
30 | self._backend.library_state,
31 | input,
32 | target,
33 | implicit_gradOutput,
34 | self.gradInput,
35 | self.sizeAverage,
36 | True, # reduce
37 | )
38 | return self.gradInput
39 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/MarginCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class MarginCriterion(Criterion):
6 |
7 | def __init__(self, margin=1, sizeAverage=True):
8 | super(MarginCriterion, self).__init__()
9 | self.sizeAverage = True
10 | self.margin = margin
11 | self.output_tensor = None
12 |
13 | def updateOutput(self, input, target):
14 | if self.output_tensor is None:
15 | self.output_tensor = input.new(1)
16 | self._backend.MarginCriterion_updateOutput(
17 | self._backend.library_state,
18 | input,
19 | target,
20 | self.output_tensor,
21 | self.sizeAverage,
22 | self.margin
23 | )
24 | self.output = self.output_tensor[0]
25 | return self.output
26 |
27 | def updateGradInput(self, input, target):
28 | self._backend.MarginCriterion_updateGradInput(
29 | self._backend.library_state,
30 | input,
31 | target,
32 | self.gradInput,
33 | self.sizeAverage,
34 | self.margin
35 | )
36 | return self.gradInput
37 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Mean.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Sum import Sum
3 |
4 | """
5 |
6 | This file is still here because of backward compatibility.
7 |
8 | Please use instead "nn.Sum(dimension, nInputDims, sizeAverage)"
9 |
10 | """
11 |
12 |
13 | class Mean(Sum):
14 |
15 | def __init__(self, dimension):
16 | super(Mean, self).__init__(dimension, True)
17 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Mul.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | from .Module import Module
4 |
5 |
6 | class Mul(Module):
7 |
8 | def __init__(self):
9 | super(Mul, self).__init__()
10 | self.weight = torch.Tensor(1)
11 | self.gradWeight = torch.Tensor(1)
12 | self.reset()
13 |
14 | def reset(self, stdv=None):
15 | if stdv is not None:
16 | stdv = stdv * math.sqrt(3)
17 | else:
18 | stdv = 1. / math.sqrt(self.weight.size(0))
19 | self.weight.uniform_(-stdv, stdv)
20 |
21 | def updateOutput(self, input):
22 | self.output.resize_as_(input).copy_(input)
23 | self.output.mul_(self.weight[0])
24 | return self.output
25 |
26 | def updateGradInput(self, input, gradOutput):
27 | self.gradInput.resize_as_(input).zero_()
28 | self.gradInput.add_(self.weight[0], gradOutput)
29 | return self.gradInput
30 |
31 | def accGradParameters(self, input, gradOutput, scale=1):
32 | self.gradWeight[0] = (self.gradWeight[0] +
33 | scale * input.contiguous().view(-1).dot(gradOutput.contiguous().view(-1)))
34 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/MulConstant.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class MulConstant(Module):
6 |
7 | def __init__(self, constant_scalar, inplace=False):
8 | super(MulConstant, self).__init__()
9 | self.constant_scalar = constant_scalar
10 | self.inplace = inplace
11 |
12 | def updateOutput(self, input):
13 | if self.inplace:
14 | input.mul_(self.constant_scalar)
15 | self.output.set_(input)
16 | else:
17 | self.output.resize_as_(input)
18 | self.output.copy_(input)
19 | self.output.mul_(self.constant_scalar)
20 |
21 | return self.output
22 |
23 | def updateGradInput(self, input, gradOutput):
24 | if self.gradInput is None:
25 | return
26 |
27 | if self.inplace:
28 | gradOutput.mul_(self.constant_scalar)
29 | self.gradInput.set_(gradOutput)
30 | # restore previous input value
31 | input.div_(self.constant_scalar)
32 | else:
33 | self.gradInput.resize_as_(gradOutput)
34 | self.gradInput.copy_(gradOutput)
35 | self.gradInput.mul_(self.constant_scalar)
36 |
37 | return self.gradInput
38 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/MultiCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 | from .utils import recursiveResizeAs, recursiveFill, recursiveAdd
4 |
5 |
6 | class MultiCriterion(Criterion):
7 |
8 | def __init__(self, ):
9 | super(MultiCriterion, self).__init__()
10 | self.criterions = []
11 | self.weights = torch.DoubleStorage()
12 |
13 | def add(self, criterion, weight=1):
14 | self.criterions.append(criterion)
15 | new_weights = torch.DoubleStorage(len(self.criterions))
16 | for i, v in enumerate(self.weights):
17 | new_weights[i] = v
18 | new_weights[len(self.criterions) - 1] = weight
19 | self.weights = new_weights
20 | return self
21 |
22 | def updateOutput(self, input, target):
23 | self.output = 0
24 | for i in range(len(self.criterions)):
25 | self.output = self.output + self.weights[i] * self.criterions[i].updateOutput(input, target)
26 |
27 | return self.output
28 |
29 | def updateGradInput(self, input, target):
30 | self.gradInput = recursiveResizeAs(self.gradInput, input)[0]
31 | recursiveFill(self.gradInput, 0)
32 | for i in range(len(self.criterions)):
33 | recursiveAdd(self.gradInput, self.weights[i], self.criterions[i].updateGradInput(input, target))
34 |
35 | return self.gradInput
36 |
37 | def type(self, type):
38 | for criterion in self.criterions:
39 | criterion.type(type)
40 |
41 | return super(MultiCriterion, self).type(type)
42 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/MultiLabelMarginCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class MultiLabelMarginCriterion(Criterion):
6 |
7 | def __init__(self, sizeAverage=True):
8 | super(MultiLabelMarginCriterion, self).__init__()
9 | self.sizeAverage = sizeAverage
10 | self.isTarget = torch.Tensor()
11 | self.output_tensor = None
12 |
13 | def updateOutput(self, input, target):
14 | if self.output_tensor is None:
15 | self.output_tensor = input.new(1)
16 | target = target.long()
17 | self._backend.MultiLabelMarginCriterion_updateOutput(
18 | self._backend.library_state,
19 | input,
20 | target,
21 | self.output_tensor,
22 | self.isTarget,
23 | self.sizeAverage
24 | )
25 | self.output = self.output_tensor[0]
26 | return self.output
27 |
28 | def updateGradInput(self, input, target):
29 | target = target.long()
30 | self._backend.MultiLabelMarginCriterion_updateGradInput(
31 | self._backend.library_state,
32 | input,
33 | target,
34 | self.gradInput,
35 | self.isTarget,
36 | self.sizeAverage
37 | )
38 | return self.gradInput
39 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/MultiLabelSoftMarginCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 | from .Sigmoid import Sigmoid
4 | from .BCECriterion import BCECriterion
5 |
6 |
7 | class MultiLabelSoftMarginCriterion(Criterion):
8 | """
9 | A MultiLabel multiclass criterion based on sigmoid:
10 |
11 | the loss is:
12 | l(x, y) = - sum_i y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i])
13 | where p[i] = exp(x[i]) / (1 + exp(x[i]))
14 |
15 | and with weights:
16 | l(x, y) = - sum_i weights[i] (y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i]))
17 |
18 |
19 | """
20 |
21 | def __init__(self, weights=None):
22 | super(MultiLabelSoftMarginCriterion, self).__init__()
23 | self.lsm = Sigmoid()
24 | self.nll = BCECriterion(weights)
25 |
26 | def updateOutput(self, input, target):
27 | input = input if input.nelement() == 1 else input.squeeze()
28 | target = target if target.nelement() == 1 else target.squeeze()
29 | self.lsm.updateOutput(input)
30 | self.nll.updateOutput(self.lsm.output, target)
31 | self.output = self.nll.output
32 | return self.output
33 |
34 | def updateGradInput(self, input, target):
35 | size = input.size()
36 | input = input if input.nelement() == 1 else input.squeeze()
37 | target = target if target.nelement() == 1 else target.squeeze()
38 | self.nll.updateGradInput(self.lsm.output, target)
39 | self.lsm.updateGradInput(input, self.nll.gradInput)
40 | self.gradInput = self.lsm.gradInput.view(size)
41 | return self.gradInput
42 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/MultiMarginCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class MultiMarginCriterion(Criterion):
6 |
7 | def __init__(self, p=1, weights=None, margin=1, sizeAverage=True):
8 | super(MultiMarginCriterion, self).__init__()
9 | if p != 1 and p != 2:
10 | raise ValueError("only p == 1 and p == 2 supported")
11 | self.p = p
12 | self.margin = margin
13 | self.sizeAverage = sizeAverage
14 | if weights is not None:
15 | assert weights.dim() == 1
16 | self.weights = weights
17 | self.output_tensor = None
18 |
19 | def updateOutput(self, input, target):
20 | if self.output_tensor is None:
21 | self.output_tensor = input.new(1)
22 | target = target.long()
23 | self._backend.MultiMarginCriterion_updateOutput(
24 | self._backend.library_state,
25 | input,
26 | target,
27 | self.output_tensor,
28 | self.sizeAverage,
29 | self.p,
30 | self.weights,
31 | self.margin
32 | )
33 | self.output = self.output_tensor[0]
34 | return self.output
35 |
36 | def updateGradInput(self, input, target):
37 | target = target.long()
38 | self._backend.MultiMarginCriterion_updateGradInput(
39 | self._backend.library_state,
40 | input,
41 | target,
42 | self.gradInput,
43 | self.sizeAverage,
44 | self.p,
45 | self.weights,
46 | self.margin
47 | )
48 | return self.gradInput
49 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Narrow.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Narrow(Module):
6 |
7 | def __init__(self, dimension, offset, length=1):
8 | super(Narrow, self).__init__()
9 | self.dimension = dimension
10 | self.index = offset
11 | self.length = length
12 |
13 | def updateOutput(self, input):
14 | length = self.length
15 | if length < 0:
16 | length = input.size(self.dimension) - self.index + self.length + 1
17 |
18 | output = input.narrow(self.dimension, self.index, length)
19 | self.output = self.output.type_as(output)
20 | self.output.resize_as_(output).copy_(output)
21 | return self.output
22 |
23 | def updateGradInput(self, input, gradOutput):
24 | length = self.length
25 | if length < 0:
26 | length = input.size(self.dimension) - self.index + self.length + 1
27 |
28 | self.gradInput = self.gradInput.type_as(input)
29 | self.gradInput.resize_as_(input).zero_()
30 | self.gradInput.narrow(self.dimension, self.index, length).copy_(gradOutput)
31 | return self.gradInput
32 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/NarrowTable.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear, recursiveResizeAs, recursiveFill
4 |
5 |
6 | class NarrowTable(Module):
7 |
8 | def __init__(self, offset, length=1):
9 | super(NarrowTable, self).__init__()
10 | self.offset = offset
11 | self.length = length
12 | self.output = []
13 | self.gradInput = []
14 |
15 | def updateOutput(self, input):
16 | self.output[:] = [input[self.offset + i] for i in range(self.length)]
17 | return self.output
18 |
19 | def updateGradInput(self, input, gradOutput):
20 | if len(self.gradInput) != len(input):
21 | self.gradInput[:] = [None for i in range(len(input))]
22 |
23 | assert len(gradOutput) == self.length
24 | for i in range(self.length):
25 | self.gradInput[self.offset + i] = gradOutput[i]
26 |
27 | for i in range(len(input)):
28 | if i < self.offset or i >= self.offset + self.length:
29 | gi = self.gradInput[i]
30 | if gi is None:
31 | gi = input[i].new()
32 | self.gradInput[i] = recursiveResizeAs(gi, input[i])[0]
33 | recursiveFill(self.gradInput[i], 0)
34 |
35 | return self.gradInput
36 |
37 | def type(self, type=None, tensorCache=None):
38 | if not type:
39 | return self._type
40 | clear(self, 'output', 'gradInput')
41 | return super(NarrowTable, self).type(self, type, tensorCache)
42 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/PReLU.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class PReLU(Module):
7 |
8 | def __init__(self, nOutputPlane=0):
9 | super(PReLU, self).__init__()
10 | # if no argument provided, use shared model (weight is scalar)
11 | self.nOutputPlane = nOutputPlane
12 | self.weight = torch.Tensor(nOutputPlane or 1).fill_(0.25)
13 | self.gradWeight = torch.Tensor(nOutputPlane or 1)
14 |
15 | def updateOutput(self, input):
16 | self._backend.PReLU_updateOutput(
17 | self._backend.library_state,
18 | input,
19 | self.output,
20 | self.weight
21 | )
22 | return self.output
23 |
24 | def updateGradInput(self, input, gradOutput):
25 | self._backend.PReLU_updateGradInput(
26 | self._backend.library_state,
27 | input,
28 | gradOutput,
29 | self.gradInput,
30 | self.weight
31 | )
32 | return self.gradInput
33 |
34 | def accGradParameters(self, input, gradOutput, scale=1):
35 | self._backend.PReLU_accGradParameters(
36 | self._backend.library_state,
37 | input,
38 | gradOutput,
39 | self.gradInput,
40 | self.weight,
41 | self.gradWeight,
42 | scale
43 | )
44 | return self.gradWeight
45 |
46 | def clearState(self):
47 | clear(self, 'gradWeightBuf', 'gradWeightBuf2')
48 | return super(PReLU, self).clearState()
49 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/ParallelCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 | from .utils import recursiveResizeAs, recursiveFill, recursiveAdd
4 |
5 |
6 | class ParallelCriterion(Criterion):
7 |
8 | def __init__(self, repeatTarget=False):
9 | super(ParallelCriterion, self).__init__()
10 | self.criterions = []
11 | self.weights = []
12 | self.gradInput = []
13 | self.repeatTarget = repeatTarget
14 |
15 | def add(self, criterion, weight=1):
16 | self.criterions.append(criterion)
17 | self.weights.append(weight)
18 | return self
19 |
20 | def updateOutput(self, input, target):
21 | self.output = 0
22 | for i, criterion in enumerate(self.criterions):
23 | current_target = target if self.repeatTarget else target[i]
24 | self.output += self.weights[i] * criterion.updateOutput(input[i], current_target)
25 |
26 | return self.output
27 |
28 | def updateGradInput(self, input, target):
29 | self.gradInput = recursiveResizeAs(self.gradInput, input)[0]
30 | recursiveFill(self.gradInput, 0)
31 | for i, criterion in enumerate(self.criterions):
32 | current_target = target if self.repeatTarget else target[i]
33 | recursiveAdd(self.gradInput[i], self.weights[i], criterion.updateGradInput(input[i], current_target))
34 |
35 | return self.gradInput
36 |
37 | def type(self, type=None, tensorCache=None):
38 | self.gradInput = []
39 | return super(ParallelCriterion, self).type(type, tensorCache)
40 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Power.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Power(Module):
6 |
7 | def __init__(self, p):
8 | super(Power, self).__init__()
9 | self.pow = p
10 |
11 | def updateOutput(self, input):
12 | self.output.resize_as_(input).copy_(input)
13 | self.output.pow_(self.pow)
14 | return self.output
15 |
16 | def updateGradInput(self, input, gradOutput):
17 | self.gradInput.resize_as_(input).copy_(input)
18 | self.gradInput.pow_(self.pow - 1)
19 | self.gradInput.mul_(gradOutput).mul_(self.pow)
20 | return self.gradInput
21 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/RReLU.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class RReLU(Module):
7 |
8 | def __init__(self, lower=1. / 8, upper=1. / 3, inplace=False):
9 | super(RReLU, self).__init__()
10 | self.lower = lower
11 | self.upper = upper
12 | self.inplace = inplace
13 |
14 | assert self.lower <= self.upper and self.lower >= 0 and self.upper >= 0
15 | self.noise = torch.Tensor()
16 | self.train = True
17 |
18 | def updateOutput(self, input):
19 | self._backend.RReLU_updateOutput(
20 | self._backend.library_state,
21 | input,
22 | self.output,
23 | self.noise,
24 | self.lower,
25 | self.upper,
26 | self.train,
27 | self.inplace,
28 | torch.default_generator if not input.is_cuda else 0
29 | )
30 | return self.output
31 |
32 | def updateGradInput(self, input, gradOutput):
33 | self._backend.RReLU_updateGradInput(
34 | self._backend.library_state,
35 | input,
36 | gradOutput,
37 | self.gradInput,
38 | self.noise,
39 | self.lower,
40 | self.upper,
41 | self.train,
42 | self.inplace
43 | )
44 | return self.gradInput
45 |
46 | def __repr__(self):
47 | return super(RReLU, self).__repr__() + '({:.4f}, {:.4f})'.format(self.lower, self.upper)
48 |
49 | def clearState(self):
50 | clear(self, 'noise')
51 | return super(RReLU, self).clearState()
52 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/ReLU.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Threshold import Threshold
3 |
4 |
5 | class ReLU(Threshold):
6 |
7 | def __init__(self, inplace=False):
8 | super(ReLU, self).__init__(0, 0, inplace)
9 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/ReLU6.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class ReLU6(Module):
6 |
7 | def __init__(self, inplace=False):
8 | super(ReLU6, self).__init__()
9 | self.inplace = inplace
10 |
11 | def updateOutput(self, input):
12 | self._backend.HardTanh_updateOutput(
13 | self._backend.library_state,
14 | input,
15 | self.output,
16 | 0, 6, self.inplace
17 | )
18 | return self.output
19 |
20 | def updateGradInput(self, input, gradOutput):
21 | self._backend.HardTanh_updateGradInput(
22 | self._backend.library_state,
23 | input,
24 | gradOutput,
25 | self.gradInput,
26 | 0, 6, self.inplace
27 | )
28 | return self.gradInput
29 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Replicate.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Replicate(Module):
6 |
7 | def __init__(self, nf, dim=0):
8 | super(Replicate, self).__init__()
9 | self.nfeatures = nf
10 | self.dim = dim
11 | assert self.dim >= 0
12 |
13 | def updateOutput(self, input):
14 | assert self.dim < input.dim()
15 |
16 | size = list(input.size())
17 | size.insert(self.dim, self.nfeatures)
18 |
19 | stride = list(input.stride())
20 | stride.insert(self.dim, 0)
21 |
22 | self.output.set_(input.storage(), input.storage_offset(),
23 | torch.Size(size), tuple(stride))
24 | return self.output
25 |
26 | def updateGradInput(self, input, gradOutput):
27 | self.gradInput.resize_as_(input).zero_()
28 | size = list(input.size())
29 | size.insert(self.dim, 1)
30 |
31 | gradInput = self.gradInput.view(*size)
32 | torch.sum(gradOutput, self.dim, True, out=gradInput)
33 | return self.gradInput
34 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Select.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Select(Module):
6 |
7 | def __init__(self, dimension, index):
8 | super(Select, self).__init__()
9 | self.dimension = dimension
10 | self.index = index
11 |
12 | def updateOutput(self, input):
13 | index = self.index if self.index >= 0 else input.size(self.dimension) + self.index
14 | output = input.select(self.dimension, index)
15 | self.output.resize_as_(output)
16 | return self.output.copy_(output)
17 |
18 | def updateGradInput(self, input, gradOutput):
19 | index = self.index if self.index >= 0 else input.size(self.dimension) + self.index
20 | self.gradInput.resize_as_(input)
21 | self.gradInput.zero_()
22 | self.gradInput.select(self.dimension, index).copy_(gradOutput)
23 | return self.gradInput
24 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Sigmoid.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Sigmoid(Module):
6 |
7 | def updateOutput(self, input):
8 | self._backend.Sigmoid_updateOutput(
9 | self._backend.library_state,
10 | input,
11 | self.output
12 | )
13 | return self.output
14 |
15 | def updateGradInput(self, input, gradOutput):
16 | self._backend.Sigmoid_updateGradInput(
17 | self._backend.library_state,
18 | gradOutput,
19 | self.gradInput,
20 | self.output
21 | )
22 | return self.gradInput
23 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SmoothL1Criterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class SmoothL1Criterion(Criterion):
6 |
7 | def __init__(self, sizeAverage=True):
8 | super(SmoothL1Criterion, self).__init__()
9 | self.sizeAverage = sizeAverage
10 | self.output_tensor = None
11 |
12 | def updateOutput(self, input, target):
13 | if self.output_tensor is None:
14 | self.output_tensor = input.new(1)
15 | self._backend.SmoothL1Criterion_updateOutput(
16 | self._backend.library_state,
17 | input,
18 | target,
19 | self.output_tensor,
20 | self.sizeAverage,
21 | True, # reduce
22 | )
23 | self.output = self.output_tensor[0]
24 | return self.output
25 |
26 | def updateGradInput(self, input, target):
27 | implicit_gradOutput = torch.ones(1).type_as(input)
28 | self._backend.SmoothL1Criterion_updateGradInput(
29 | self._backend.library_state,
30 | input,
31 | target,
32 | implicit_gradOutput,
33 | self.gradInput,
34 | self.sizeAverage,
35 | True, # reduce
36 | )
37 | return self.gradInput
38 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SoftMarginCriterion.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Criterion import Criterion
3 |
4 |
5 | class SoftMarginCriterion(Criterion):
6 |
7 | def __init__(self, ):
8 | super(SoftMarginCriterion, self).__init__()
9 | self.sizeAverage = True
10 | self.output_tensor = None
11 |
12 | def updateOutput(self, input, target):
13 | if self.output_tensor is None:
14 | self.output_tensor = input.new(1)
15 | self._backend.SoftMarginCriterion_updateOutput(
16 | self._backend.library_state,
17 | input,
18 | target,
19 | self.output_tensor,
20 | self.sizeAverage
21 | )
22 | self.output = self.output_tensor[0]
23 | return self.output
24 |
25 | def updateGradInput(self, input, target):
26 | self._backend.SoftMarginCriterion_updateGradInput(
27 | self._backend.library_state,
28 | input,
29 | target,
30 | self.gradInput,
31 | self.sizeAverage
32 | )
33 | return self.gradInput
34 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SoftMax.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class SoftMax(Module):
6 |
7 | def __init__(self, dim=None):
8 | super(SoftMax, self).__init__()
9 | if dim is not None:
10 | self.dim = dim
11 |
12 | def _get_dim(self, input):
13 | return getattr(self, 'dim', 0 if input.dim() == 1 or input.dim() == 3 else 1)
14 |
15 | def updateOutput(self, input):
16 | self._backend.SoftMax_updateOutput(
17 | self._backend.library_state,
18 | input,
19 | self.output,
20 | self._get_dim(input)
21 | )
22 | return self.output
23 |
24 | def updateGradInput(self, input, gradOutput):
25 | self._backend.SoftMax_updateGradInput(
26 | self._backend.library_state,
27 | input,
28 | gradOutput,
29 | self.gradInput,
30 | self.output,
31 | self._get_dim(input)
32 | )
33 | return self.gradInput
34 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SoftMin.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class SoftMin(Module):
7 |
8 | def __init__(self, dim=None):
9 | super(SoftMin, self).__init__()
10 | self.mininput = None
11 | if dim is not None:
12 | self.dim = dim
13 |
14 | def _get_dim(self, input):
15 | return getattr(self, 'dim', 0 if input.dim() == 1 or input.dim() == 3 else 1)
16 |
17 | def updateOutput(self, input):
18 | if self.mininput is None:
19 | self.mininput = input.new()
20 | self.mininput.resize_as_(input).copy_(input).mul_(-1)
21 | self._backend.SoftMax_updateOutput(
22 | self._backend.library_state,
23 | self.mininput,
24 | self.output,
25 | self._get_dim(input)
26 | )
27 | return self.output
28 |
29 | def updateGradInput(self, input, gradOutput):
30 | if self.mininput is None:
31 | self.mininput = input.new()
32 | self.mininput.resize_as_(input).copy_(input).mul_(-1)
33 | self._backend.SoftMax_updateGradInput(
34 | self._backend.library_state,
35 | self.mininput,
36 | gradOutput,
37 | self.gradInput,
38 | self.output,
39 | self._get_dim(input)
40 | )
41 |
42 | self.gradInput.mul_(-1)
43 | return self.gradInput
44 |
45 | def clearState(self):
46 | clear(self, 'mininput')
47 | return super(SoftMin, self).clearState()
48 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SoftPlus.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class SoftPlus(Module):
6 |
7 | def __init__(self, beta=1, threshold=20):
8 | super(SoftPlus, self).__init__()
9 | self.beta = beta # Beta controls sharpness of transfer function
10 | self.threshold = threshold # Avoid floating point issues with exp(x), x>20
11 |
12 | def updateOutput(self, input):
13 | # f(x) = 1/beta * log(1 + exp(beta * x))
14 | self._backend.SoftPlus_updateOutput(
15 | self._backend.library_state,
16 | input,
17 | self.output,
18 | self.beta,
19 | self.threshold
20 | )
21 | return self.output
22 |
23 | def updateGradInput(self, input, gradOutput):
24 | # d/dx[log(1+exp(k*x))/k] = exp(kx) / (exp(kx) + 1)
25 | # SINCE
26 | # y = (1/k)*log(1+exp(k*x)) #> x = (1/k)*log(exp(k*y)-1)
27 | # THEREFORE:
28 | # d/dx(f(x)) = (exp(k*y) - 1) / exp(k*y)
29 | self._backend.SoftPlus_updateGradInput(
30 | self._backend.library_state,
31 | input,
32 | gradOutput,
33 | self.gradInput,
34 | self.output,
35 | self.beta,
36 | self.threshold
37 | )
38 | return self.gradInput
39 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SoftShrink.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class SoftShrink(Module):
6 |
7 | def __init__(self, lambd=0.5):
8 | super(SoftShrink, self).__init__()
9 | self.lambd = lambd
10 |
11 | def updateOutput(self, input):
12 | self._backend.SoftShrink_updateOutput(
13 | self._backend.library_state,
14 | input,
15 | self.output,
16 | self.lambd
17 | )
18 | return self.output
19 |
20 | def updateGradInput(self, input, gradOutput):
21 | self._backend.SoftShrink_updateGradInput(
22 | self._backend.library_state,
23 | input,
24 | gradOutput,
25 | self.gradInput,
26 | self.lambd
27 | )
28 | return self.gradInput
29 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SoftSign.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class SoftSign(Module):
7 |
8 | def __init__(self):
9 | super(SoftSign, self).__init__()
10 | self.temp = None
11 | self.tempgrad = None
12 |
13 | def updateOutput(self, input):
14 | if self.temp is None:
15 | self.temp = input.new()
16 | self.temp.resize_as_(input).copy_(input).abs_().add_(1)
17 | self.output.resize_as_(input).copy_(input).div_(self.temp)
18 | return self.output
19 |
20 | def updateGradInput(self, input, gradOutput):
21 | if self.tempgrad is None:
22 | self.tempgrad = input.new()
23 | self.tempgrad.resize_as_(self.output).copy_(input).abs_().add_(1).mul_(self.tempgrad)
24 | self.gradInput.resize_as_(input).copy_(gradOutput).div_(self.tempgrad)
25 | return self.gradInput
26 |
27 | def clearState(self):
28 | clear(self, 'temp', 'tempgrad')
29 | return super(SoftSign, self).clearState()
30 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SpatialAdaptiveMaxPooling.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class SpatialAdaptiveMaxPooling(Module):
7 |
8 | def __init__(self, w, h):
9 | super(SpatialAdaptiveMaxPooling, self).__init__()
10 | self.w = w
11 | self.h = h
12 | self.indices = None
13 |
14 | def updateOutput(self, input):
15 | if self.indices is None:
16 | self.indices = input.new()
17 | self.indices = self.indices.long()
18 | self._backend.SpatialAdaptiveMaxPooling_updateOutput(
19 | self._backend.library_state,
20 | input,
21 | self.output,
22 | self.indices,
23 | self.w,
24 | self.h
25 | )
26 | return self.output
27 |
28 | def updateGradInput(self, input, gradOutput):
29 | self._backend.SpatialAdaptiveMaxPooling_updateGradInput(
30 | self._backend.library_state,
31 | input,
32 | gradOutput,
33 | self.gradInput,
34 | self.indices
35 | )
36 | return self.gradInput
37 |
38 | def clearState(self):
39 | clear(self, 'indices')
40 | return super(SpatialAdaptiveMaxPooling, self).clearState()
41 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SpatialBatchNormalization.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .BatchNormalization import BatchNormalization
3 |
4 |
5 | class SpatialBatchNormalization(BatchNormalization):
6 | """
7 | This class implements Batch Normalization as described in the paper:
8 | "Batch Normalization: Accelerating Deep Network Training
9 | by Reducing Internal Covariate Shift"
10 | by Sergey Ioffe, Christian Szegedy
11 |
12 | This implementation is useful for inputs coming from convolution layers.
13 | For non-convolutional layers, see BatchNormalization.lua
14 |
15 | The operation implemented is:
16 | (x - mean(x))
17 | y = --------------------- * gamma + beta
18 | standard-deviation(x)
19 | where gamma and beta are learnable parameters.
20 |
21 | The learning of gamma and beta is optional.
22 |
23 | Usage:
24 | with learnable parameters: nn.SpatialBatchNormalization(N [, eps] [, momentum])
25 | where N = dimensionality of input
26 | without learnable parameters: nn.SpatialBatchNormalization(N [, eps] [, momentum], False)
27 |
28 | eps is a small value added to the variance to avoid divide-by-zero.
29 | Defaults to 1e-5
30 |
31 | In training time, this layer keeps a running estimate of it's computed mean and std.
32 | The running sum is kept with a default momentum of 0.1 (unless over-ridden)
33 | In test time, this running mean/std is used to normalize.
34 | """
35 |
36 | # expected dimension of input
37 | nDim = 4
38 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SpatialLPPooling.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .Sequential import Sequential
4 | from .Square import Square
5 | from .Power import Power
6 | from .SpatialAveragePooling import SpatialAveragePooling
7 | from .MulConstant import MulConstant
8 | from .Sqrt import Sqrt
9 |
10 |
11 | class SpatialLPPooling(Sequential):
12 |
13 | def __init__(self, nInputPlane, pnorm, kW, kH, dW=None, dH=None):
14 | super(SpatialLPPooling, self).__init__()
15 |
16 | dW = dW or kW
17 | dH = dH or kH
18 |
19 | self.kW = kW
20 | self.kH = kH
21 | self.dW = dW
22 | self.dH = dH
23 |
24 | if pnorm == 2:
25 | self.add(Square())
26 | else:
27 | self.add(Power(pnorm))
28 |
29 | self.add(SpatialAveragePooling(kW, kH, dW, dH))
30 | self.add(MulConstant(kW * kH))
31 | if pnorm == 2:
32 | self.add(Sqrt())
33 | else:
34 | self.add(Power(1. / pnorm))
35 |
36 | # the module is a Sequential: by default, it'll try to learn the parameters
37 | # of the sub sampler: we avoid that by redefining its methods.
38 | def reset(self, stdev=None):
39 | pass
40 |
41 | def accGradParameters(self, input, gradOutput):
42 | pass
43 |
44 | def accUpdateGradParameters(self, input, gradOutput, lr):
45 | pass
46 |
47 | def zeroGradParameters(self):
48 | pass
49 |
50 | def updateParameters(self, learningRate):
51 | pass
52 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SpatialMaxUnpooling.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .SpatialMaxPooling import SpatialMaxPooling
4 |
5 |
6 | class SpatialMaxUnpooling(Module):
7 |
8 | def __init__(self, poolingModule):
9 | super(SpatialMaxUnpooling, self).__init__()
10 | assert isinstance(poolingModule, SpatialMaxPooling)
11 | assert poolingModule.kH == poolingModule.dH
12 | assert poolingModule.kW == poolingModule.dW
13 | self.pooling = poolingModule
14 |
15 | def _setParams(self):
16 | self.indices = self.pooling.indices
17 | self.oheight = self.pooling.iheight
18 | self.owidth = self.pooling.iwidth
19 |
20 | def updateOutput(self, input):
21 | self._setParams()
22 | self._backend.SpatialMaxUnpooling_updateOutput(
23 | self._backend.library_state,
24 | input,
25 | self.output,
26 | self.indices,
27 | self.owidth, self.oheight
28 | )
29 | return self.output
30 |
31 | def updateGradInput(self, input, gradOutput):
32 | self._setParams()
33 | self._backend.SpatialMaxUnpooling_updateGradInput(
34 | self._backend.library_state,
35 | input,
36 | gradOutput,
37 | self.gradInput,
38 | self.indices,
39 | self.owidth, self.oheight
40 | )
41 | return self.gradInput
42 |
43 | def __repr__(self):
44 | return 'nn.SpatialMaxUnpooling associated to ' + self.pooling.__repr__()
45 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SpatialSoftMax.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class SpatialSoftMax(Module):
6 |
7 | def updateOutput(self, input):
8 | self._backend.SoftMax_updateOutput(
9 | self._backend.library_state,
10 | input,
11 | self.output,
12 | 0 if input.dim() == 1 or input.dim() == 3 else 1
13 | )
14 | return self.output
15 |
16 | def updateGradInput(self, input, gradOutput):
17 | self._backend.SoftMax_updateGradInput(
18 | self._backend.library_state,
19 | input,
20 | gradOutput,
21 | self.gradInput,
22 | self.output,
23 | 0 if input.dim() == 1 or input.dim() == 3 else 1
24 | )
25 | return self.gradInput
26 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/SplitTable.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class SplitTable(Module):
6 |
7 | def __init__(self, dimension):
8 | super(SplitTable, self).__init__()
9 | self.dimension = dimension
10 |
11 | def _getPositiveDimension(self, input):
12 | dimension = self.dimension
13 | if dimension < 0:
14 | dimension = input.dim() + dimension
15 |
16 | return dimension
17 |
18 | def updateOutput(self, input):
19 | dimension = self._getPositiveDimension(input)
20 | slices = input.size(dimension)
21 |
22 | currentOutput = []
23 | for i in range(slices):
24 | currentOutput.append(input.select(dimension, i))
25 |
26 | self.output = currentOutput
27 | return self.output
28 |
29 | def updateGradInput(self, input, gradOutput):
30 | if self.gradInput is None:
31 | return
32 | dimension = self._getPositiveDimension(input)
33 | slices = input.size(dimension)
34 | self.gradInput.resize_as_(input)
35 |
36 | for i in range(slices):
37 | self.gradInput.select(dimension, i).copy_(gradOutput[i])
38 |
39 | return self.gradInput
40 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Sqrt.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Sqrt(Module):
6 |
7 | def __init__(self, b=0, eps=0):
8 | super(Sqrt, self).__init__()
9 | self.eps = b
10 | self.eps = eps
11 |
12 | def updateOutput(self, input):
13 | self._backend.Sqrt_updateOutput(
14 | self._backend.library_state,
15 | input,
16 | self.output,
17 | self.eps
18 | )
19 | return self.output
20 |
21 | def updateGradInput(self, input, gradOutput):
22 | self._backend.Sqrt_updateGradInput(
23 | self._backend.library_state,
24 | input,
25 | gradOutput,
26 | self.gradInput,
27 | self.output
28 | )
29 | return self.gradInput
30 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Square.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Square(Module):
6 |
7 | def updateOutput(self, input):
8 | self._backend.Square_updateOutput(
9 | self._backend.library_state,
10 | input,
11 | self.output
12 | )
13 | return self.output
14 |
15 | def updateGradInput(self, input, gradOutput):
16 | self._backend.Square_updateGradInput(
17 | self._backend.library_state,
18 | input,
19 | gradOutput,
20 | self.gradInput
21 | )
22 | return self.gradInput
23 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Squeeze.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Squeeze(Module):
6 |
7 | def __init__(self, dim=None):
8 | super(Squeeze, self).__init__()
9 | self.dim = dim
10 |
11 | def updateOutput(self, input):
12 | dim = self.dim
13 | self.output.set_(input.squeeze(dim) if dim is not None else input.squeeze())
14 | return self.output
15 |
16 | def updateGradInput(self, input, gradOutput):
17 | assert input.nelement() == gradOutput.nelement()
18 | self.gradInput.set_(gradOutput.contiguous().view_as(input))
19 | return self.gradInput
20 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Tanh.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Tanh(Module):
6 |
7 | def updateOutput(self, input):
8 | self._backend.Tanh_updateOutput(
9 | self._backend.library_state,
10 | input,
11 | self.output
12 | )
13 | return self.output
14 |
15 | def updateGradInput(self, input, gradOutput):
16 | self._backend.Tanh_updateGradInput(
17 | self._backend.library_state,
18 | gradOutput,
19 | self.gradInput,
20 | self.output
21 | )
22 | return self.gradInput
23 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/TanhShrink.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .Tanh import Tanh
4 |
5 |
6 | class TanhShrink(Module):
7 |
8 | def __init__(self):
9 | super(TanhShrink, self).__init__()
10 | self.tanh = Tanh()
11 |
12 | def updateOutput(self, input):
13 | th = self.tanh.updateOutput(input)
14 | self.output.resize_as_(input).copy_(input)
15 | self.output.add_(-1, th)
16 | return self.output
17 |
18 | def updateGradInput(self, input, gradOutput):
19 | dth = self.tanh.updateGradInput(input, gradOutput)
20 | self.gradInput.resize_as_(input).copy_(gradOutput)
21 | self.gradInput.add_(-1, dth)
22 | return self.gradInput
23 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/TemporalMaxPooling.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import clear
4 |
5 |
6 | class TemporalMaxPooling(Module):
7 |
8 | def __init__(self, kW, dW=None):
9 | super(TemporalMaxPooling, self).__init__()
10 | self.kW = kW
11 | self.dW = dW or kW
12 | self.indices = None
13 |
14 | def updateOutput(self, input):
15 | if self.indices is None:
16 | self.indices = input.new()
17 | self._backend.TemporalMaxPooling_updateOutput(
18 | self._backend.library_state,
19 | input,
20 | self.output,
21 | self.indices,
22 | self.kW,
23 | self.dW
24 | )
25 | return self.output
26 |
27 | def updateGradInput(self, input, gradOutput):
28 | if self.gradInput is None:
29 | return
30 | self._backend.TemporalMaxPooling_updateGradInput(
31 | self._backend.library_state,
32 | input,
33 | gradOutput,
34 | self.gradInput,
35 | self.indices,
36 | self.kW,
37 | self.dW
38 | )
39 | return self.gradInput
40 |
41 | def clearState(self):
42 | clear(self, 'indices')
43 | return super(TemporalMaxPooling, self).clearState()
44 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Threshold.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Threshold(Module):
6 |
7 | def __init__(self, threshold=0, value=0, inplace=False):
8 | super(Threshold, self).__init__()
9 | self.threshold = threshold
10 | self.value = value
11 |
12 | # default for inplace is False
13 | self.inplace = inplace
14 | self.validateParameters()
15 |
16 | def updateOutput(self, input):
17 | self.validateParameters()
18 | self._backend.Threshold_updateOutput(
19 | self._backend.library_state,
20 | input,
21 | self.output,
22 | self.threshold,
23 | self.value,
24 | self.inplace
25 | )
26 | return self.output
27 |
28 | def updateGradInput(self, input, gradOutput):
29 | self.validateParameters()
30 | self._backend.Threshold_updateGradInput(
31 | self._backend.library_state,
32 | input,
33 | gradOutput,
34 | self.gradInput,
35 | self.threshold,
36 | self.value,
37 | self.inplace
38 | )
39 | return self.gradInput
40 |
41 | def validateParameters(self):
42 | if self.inplace:
43 | if self.value > self.threshold:
44 | raise RuntimeError('in-place processing requires value ({}) to not '
45 | 'exceed threshold ({})'.format(self.value, self.threshold))
46 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Transpose.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class Transpose(Module):
6 | # transpose dimensions:
7 | # n = nn.Transpose({1, 4}, {1, 3})
8 | # will transpose dims 1 and 4,: 1 and 3...
9 |
10 | def __init__(self, *args):
11 | super(Transpose, self).__init__()
12 | self.permutations = args
13 |
14 | def updateOutput(self, input):
15 | for perm in self.permutations:
16 | input = input.transpose(*perm)
17 | self.output.resize_as_(input).copy_(input)
18 | return self.output
19 |
20 | def updateGradInput(self, input, gradOutput):
21 | for perm in self.permutations[::-1]:
22 | gradOutput = gradOutput.transpose(*perm)
23 | self.gradInput.resize_as_(gradOutput).copy_(gradOutput)
24 | return self.gradInput
25 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/Unsqueeze.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .utils import addSingletondimension
4 |
5 |
6 | class Unsqueeze(Module):
7 |
8 | def __init__(self, dim):
9 | super(Unsqueeze, self).__init__()
10 | self.dim = dim
11 |
12 | def updateOutput(self, input):
13 | addSingletondimension(self.output, input, self.dim)
14 | return self.output
15 |
16 | def updateGradInput(self, input, gradOutput):
17 | assert input.nelement() == gradOutput.nelement()
18 | self.gradInput = gradOutput.contiguous().view(input.size())
19 | return self.gradInput
20 |
21 | def __repr__(self):
22 | return super(Unsqueeze, self).__repr__() + '({})'.format(self.dim)
23 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/View.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 |
4 |
5 | class View(Module):
6 |
7 | def resetSize(self, *args):
8 | if len(args) == 1 and isinstance(args[0], torch.Size):
9 | self.size = args[0]
10 | else:
11 | self.size = torch.Size(args)
12 |
13 | self.numElements = 1
14 | inferdim = False
15 | for i in range(len(self.size)):
16 | szi = self.size[i]
17 | if szi >= 0:
18 | self.numElements = self.numElements * self.size[i]
19 | else:
20 | assert szi == -1
21 | assert not inferdim
22 | inferdim = True
23 |
24 | return self
25 |
26 | def __init__(self, *args):
27 | super(View, self).__init__()
28 | self.resetSize(*args)
29 |
30 | def updateOutput(self, input):
31 | if self.output is None:
32 | self.output = input.new()
33 | self.output = input.view(self.size)
34 | return self.output
35 |
36 | def updateGradInput(self, input, gradOutput):
37 | if self.gradInput is None:
38 | self.gradInput = gradOutput.new()
39 | self.gradInput = gradOutput.contiguous().view(input.size())
40 | return self.gradInput
41 |
42 | def __repr__(self):
43 | return super(View, self).__repr__() + '({})'.format(', '.join(map(str, self.size)))
44 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/nn/VolumetricBatchNormalization.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .Module import Module
3 | from .BatchNormalization import BatchNormalization
4 |
5 |
6 | class VolumetricBatchNormalization(BatchNormalization):
7 | nDim = 5
8 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/legacy/optim/__init__.py:
--------------------------------------------------------------------------------
1 | from .adadelta import adadelta
2 | from .adagrad import adagrad
3 | from .adam import adam
4 | from .adamax import adamax
5 | from .asgd import asgd
6 | from .cg import cg
7 | from .nag import nag
8 | from .rmsprop import rmsprop
9 | from .rprop import rprop
10 | from .sgd import sgd
11 | from .lbfgs import lbfgs
12 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/multiprocessing/queue.py:
--------------------------------------------------------------------------------
1 | import io
2 | import multiprocessing
3 | import multiprocessing.queues
4 | from multiprocessing.reduction import ForkingPickler
5 | import pickle
6 |
7 |
8 | class ConnectionWrapper(object):
9 | """Proxy class for _multiprocessing.Connection which uses ForkingPickler to
10 | serialize objects"""
11 |
12 | def __init__(self, conn):
13 | self.conn = conn
14 |
15 | def send(self, obj):
16 | buf = io.BytesIO()
17 | ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
18 | self.send_bytes(buf.getvalue())
19 |
20 | def recv(self):
21 | buf = self.recv_bytes()
22 | return pickle.loads(buf)
23 |
24 | def __getattr__(self, name):
25 | return getattr(self.conn, name)
26 |
27 |
28 | class Queue(multiprocessing.queues.Queue):
29 |
30 | def __init__(self, *args, **kwargs):
31 | super(Queue, self).__init__(*args, **kwargs)
32 | self._reader = ConnectionWrapper(self._reader)
33 | self._writer = ConnectionWrapper(self._writer)
34 | self._send = self._writer.send
35 | self._recv = self._reader.recv
36 |
37 |
38 | class SimpleQueue(multiprocessing.queues.SimpleQueue):
39 |
40 | def _make_methods(self):
41 | if not isinstance(self._reader, ConnectionWrapper):
42 | self._reader = ConnectionWrapper(self._reader)
43 | self._writer = ConnectionWrapper(self._writer)
44 | super(SimpleQueue, self)._make_methods()
45 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/__init__.py:
--------------------------------------------------------------------------------
1 | from .modules import *
2 | from .parameter import Parameter
3 | from .parallel import DataParallel
4 | from . import init
5 | from . import utils
6 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/_functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/code/torch/nn/_functions/__init__.py
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/_functions/thnn/__init__.py:
--------------------------------------------------------------------------------
1 | _all_functions = []
2 |
3 | from .auto import *
4 | from .normalization import *
5 | from .activation import *
6 | from .pooling import *
7 | from .sparse import *
8 | from .upsampling import *
9 | from .rnnFusedPointwise import *
10 | from .batchnorm_double_backwards import batchnorm_double_backwards_fn
11 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/_functions/thnn/auto_symbolic.py:
--------------------------------------------------------------------------------
1 | from torch.autograd._functions.utils import prepare_onnx_paddings
2 |
3 |
4 | def reflectionpad_symbolic(g, input, *params):
5 | mode = "reflect"
6 | paddings = prepare_onnx_paddings(len(input.type().sizes()), params)
7 | return g.op("Pad", input, pads_i=paddings, mode_s=mode)
8 |
9 |
10 | def replicationpad_symbolic(g, input, *params):
11 | mode = "edge"
12 | paddings = prepare_onnx_paddings(len(input.type().sizes()), params)
13 | return g.op("Pad", input, pads_i=paddings, mode_s=mode)
14 |
15 |
16 | symbolic_fns = {
17 | 'ReflectionPad1d': reflectionpad_symbolic,
18 | 'ReflectionPad2d': reflectionpad_symbolic,
19 | 'ReplicationPad1d': replicationpad_symbolic,
20 | 'ReplicationPad2d': replicationpad_symbolic,
21 | 'ReplicationPad3d': replicationpad_symbolic,
22 | }
23 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/_functions/thnn/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch._thnn import type2backend
3 | from torch.autograd import Function
4 |
5 | from . import _all_functions
6 | from .auto import _BCELoss
7 | import warnings
8 |
9 |
10 | def _resize_weight(ctx, target):
11 | ctx.old_weight = ctx.weight
12 | if ctx.weight is not None and target.dim() != 1:
13 | ctx.weight = ctx.weight.view(1, target.size(1)).expand_as(target)
14 |
15 |
16 | def _unresize_weight(ctx):
17 | ctx.weight = ctx.old_weight
18 | del ctx.old_weight
19 |
20 |
21 | # TODO: move this code to THNN and remove _BCELoss from auto.py
22 | class BCELoss(_BCELoss):
23 |
24 | @staticmethod
25 | def forward(ctx, input, target, weight, *args):
26 | if not target.is_same_size(input):
27 | warnings.warn("Using a target size ({}) that is different to the input size ({}) is deprecated. "
28 | "Please ensure they have the same size.".format(target.size(), input.size()))
29 | assert input.nelement() == target.nelement()
30 | ctx.weight = weight
31 | _resize_weight(ctx, target)
32 | result = _BCELoss.forward(ctx, input, target, ctx.weight, *args)
33 | _unresize_weight(ctx)
34 | return result
35 |
36 | @staticmethod
37 | def backward(ctx, grad_output):
38 | target = ctx.saved_tensors[1]
39 | _resize_weight(ctx, target)
40 | result = _BCELoss.backward(ctx, grad_output)
41 | _unresize_weight(ctx)
42 | return result
43 |
44 |
45 | _all_functions.append(BCELoss)
46 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/backends/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/code/torch/nn/backends/__init__.py
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/backends/backend.py:
--------------------------------------------------------------------------------
1 |
2 | class FunctionBackend(object):
3 |
4 | def __init__(self):
5 | self.function_classes = {}
6 |
7 | def __getattr__(self, name):
8 | fn = self.function_classes.get(name)
9 | if fn is None:
10 | raise NotImplementedError
11 | return fn
12 |
13 | def register_function(self, name, function_class):
14 | if self.function_classes.get(name):
15 | raise RuntimeError("Trying to register second function under name " + name + " in " + type(self).__name__)
16 | self.function_classes[name] = function_class
17 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/modules/normalization.py:
--------------------------------------------------------------------------------
1 | from .module import Module
2 |
3 |
4 | class CrossMapLRN2d(Module):
5 |
6 | def __init__(self, size, alpha=1e-4, beta=0.75, k=1):
7 | super(CrossMapLRN2d, self).__init__()
8 | self.size = size
9 | self.alpha = alpha
10 | self.beta = beta
11 | self.k = k
12 |
13 | def forward(self, input):
14 | return self._backend.CrossMapLRN2d(self.size, self.alpha, self.beta,
15 | self.k)(input)
16 |
17 | def __repr__(self):
18 | return self.__class__.__name__ + '(' \
19 | + str(self.size) \
20 | + ', alpha=' + str(self.alpha) \
21 | + ', beta=' + str(self.beta) \
22 | + ', k=' + str(self.k) + ')'
23 |
24 |
25 | # TODO: ContrastiveNorm2d
26 | # TODO: DivisiveNorm2d
27 | # TODO: SubtractiveNorm2d
28 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/modules/pixelshuffle.py:
--------------------------------------------------------------------------------
1 | from .module import Module
2 | from .. import functional as F
3 |
4 |
5 | class PixelShuffle(Module):
6 | r"""
7 | 对张量中形如 :math:`(*, C * r^2, H, W]` 的元素, 重新排列成 :math:`(C, H * r, W * r)`.
8 |
9 | 当使用 stride = :math:`1/r` 的高效子像素卷积很有用.
10 |
11 | 参考如下论文获得更多信息:
12 | `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
13 | Shi et. al (2016) .
14 |
15 | 参数:
16 | upscale_factor (int): 增加空间分辨率的因子
17 |
18 | 形状:
19 | - 输入: :math:`(N, C * {upscale\_factor}^2, H, W)`
20 | - 输出: :math:`(N, C, H * {upscale\_factor}, W * {upscale\_factor})`
21 |
22 | Examples::
23 |
24 | >>> ps = nn.PixelShuffle(3)
25 | >>> input = autograd.Variable(torch.Tensor(1, 9, 4, 4))
26 | >>> output = ps(input)
27 | >>> print(output.size())
28 | torch.Size([1, 1, 12, 12])
29 |
30 | .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
31 | https://arxiv.org/abs/1609.05158
32 | """
33 |
34 | def __init__(self, upscale_factor):
35 | super(PixelShuffle, self).__init__()
36 | self.upscale_factor = upscale_factor
37 |
38 | def forward(self, input):
39 | return F.pixel_shuffle(input, self.upscale_factor)
40 |
41 | def __repr__(self):
42 | return self.__class__.__name__ + '(upscale_factor=' + str(self.upscale_factor) + ')'
43 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/modules/utils.py:
--------------------------------------------------------------------------------
1 | import collections
2 | from itertools import repeat
3 |
4 |
5 | def _ntuple(n):
6 | def parse(x):
7 | if isinstance(x, collections.Iterable):
8 | return x
9 | return tuple(repeat(x, n))
10 | return parse
11 |
12 | _single = _ntuple(1)
13 | _pair = _ntuple(2)
14 | _triple = _ntuple(3)
15 | _quadruple = _ntuple(4)
16 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/parallel/__init__.py:
--------------------------------------------------------------------------------
1 | from .parallel_apply import parallel_apply
2 | from .replicate import replicate
3 | from .data_parallel import DataParallel, data_parallel
4 | from .scatter_gather import scatter, gather
5 | from .distributed import DistributedDataParallel
6 |
7 | __all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
8 | 'DataParallel', 'DistributedDataParallel']
9 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/parameter.py:
--------------------------------------------------------------------------------
1 | from torch.autograd import Variable
2 |
3 |
4 | class Parameter(Variable):
5 | r"""Variable 的一种, 常被用于 module parameter(模块参数).
6 |
7 | Parameters 是 :class:`~torch.autograd.Variable` 的子类, 当它和 :class:`Module`
8 | 一起使用的时候会有一些特殊的属性 - 当它们被赋值给 Module 属性时,
9 | 它会自动的被加到 Module 的参数列表中, 并且会出现在 :meth:`~Module.parameters` iterator 迭代器方法中.
10 | 将 Varibale 赋值给 Module 属性则不会有这样的影响.
11 | 这样做的原因是: 我们有时候会需要缓存一些临时的 state(状态),
12 | 例如: 模型 RNN 中的最后一个隐藏状态.
13 | 如果没有 :class:`Parameter` 这个类的话,
14 | 那么这些临时表也会注册为模型变量.
15 |
16 | Variable 与 Parameter 的另一个不同之处在于,
17 | Parameter 不能被 volatile (即: 无法设置 volatile=True) 而且默认 requires_grad=True.
18 | Variable 默认 requires_grad=False.
19 |
20 | Arguments:
21 | data (Tensor): parameter tensor.
22 | requires_grad (bool, optional): 如果参数需要梯度. 更多细节请参阅 :ref:`excluding-subgraphs`.
23 | """
24 | def __new__(cls, data=None, requires_grad=True):
25 | return super(Parameter, cls).__new__(cls, data, requires_grad=requires_grad)
26 |
27 | def __repr__(self):
28 | return 'Parameter containing:' + self.data.__repr__()
29 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from . import rnn
2 | from .clip_grad import clip_grad_norm
3 | from .weight_norm import weight_norm, remove_weight_norm
4 | from .convert_parameters import parameters_to_vector, vector_to_parameters
5 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/nn/utils/clip_grad.py:
--------------------------------------------------------------------------------
1 |
2 | def clip_grad_norm(parameters, max_norm, norm_type=2):
3 | r"""接收一个包含 Variable 的可迭代对象, 对 Variable 的梯度按范数进行裁剪.
4 |
5 | 范数是对所有梯度进行计算的, 等价于把所有输入变量的梯度连接成一个向量, 然后对这个向量按范数进行裁剪. 梯度将会被原地修改.
6 |
7 | Arguments:
8 | parameters (Iterable[Variable]): 一个可迭代对象, 其包含将要进行梯度正规化的 Variable
9 | max_norm (float or int): 梯度的最大范数
10 | norm_type (float or int): p 范数(指定 p ). 用 ``'inf'`` 表示无穷范数
11 |
12 | Returns:
13 | 梯度的范数 (视为单个向量的).
14 | """
15 | parameters = list(filter(lambda p: p.grad is not None, parameters))
16 | max_norm = float(max_norm)
17 | norm_type = float(norm_type)
18 | if norm_type == float('inf'):
19 | total_norm = max(p.grad.data.abs().max() for p in parameters)
20 | else:
21 | total_norm = 0
22 | for p in parameters:
23 | param_norm = p.grad.data.norm(norm_type)
24 | total_norm += param_norm ** norm_type
25 | total_norm = total_norm ** (1. / norm_type)
26 | clip_coef = max_norm / (total_norm + 1e-6)
27 | if clip_coef < 1:
28 | for p in parameters:
29 | p.grad.data.mul_(clip_coef)
30 | return total_norm
31 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/optim/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | :mod:`torch.optim` is a package implementing various optimization algorithms.
3 | Most commonly used methods are already supported, and the interface is general
4 | enough, so that more sophisticated ones can be also easily integrated in the
5 | future.
6 | """
7 |
8 | from .adadelta import Adadelta
9 | from .adagrad import Adagrad
10 | from .adam import Adam
11 | from .sparse_adam import SparseAdam
12 | from .adamax import Adamax
13 | from .asgd import ASGD
14 | from .sgd import SGD
15 | from .rprop import Rprop
16 | from .rmsprop import RMSprop
17 | from .optimizer import Optimizer
18 | from .lbfgs import LBFGS
19 | from . import lr_scheduler
20 |
21 | del adadelta
22 | del adagrad
23 | del adam
24 | del adamax
25 | del asgd
26 | del sgd
27 | del rprop
28 | del rmsprop
29 | del optimizer
30 | del lbfgs
31 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/doc/zh/source/code/torch/utils/__init__.py
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/backcompat/__init__.py:
--------------------------------------------------------------------------------
1 | from torch._C import _set_backcompat_broadcast_warn
2 | from torch._C import _get_backcompat_broadcast_warn
3 | from torch._C import _set_backcompat_keepdim_warn
4 | from torch._C import _get_backcompat_keepdim_warn
5 |
6 |
7 | class Warning(object):
8 | def __init__(self, setter, getter):
9 | self.setter = setter
10 | self.getter = getter
11 |
12 | def set_enabled(self, value):
13 | self.setter(value)
14 |
15 | def get_enabled(self):
16 | return self.getter()
17 |
18 | enabled = property(get_enabled, set_enabled)
19 |
20 | broadcast_warning = Warning(_set_backcompat_broadcast_warn, _get_backcompat_broadcast_warn)
21 | keepdim_warning = Warning(_set_backcompat_keepdim_warn, _get_backcompat_keepdim_warn)
22 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/data/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from .dataset import Dataset, TensorDataset, ConcatDataset
3 | from .dataloader import DataLoader
4 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/dlpack.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from torch._C import _from_dlpack as from_dlpack
4 | from torch._C import _to_dlpack as to_dlpack
5 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/hooks.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import weakref
3 |
4 |
5 | class RemovableHandle(object):
6 | """A handle which provides the capability to remove a hook."""
7 |
8 | next_id = 0
9 |
10 | def __init__(self, hooks_dict):
11 | self.hooks_dict_ref = weakref.ref(hooks_dict)
12 | self.id = RemovableHandle.next_id
13 | RemovableHandle.next_id += 1
14 |
15 | def remove(self):
16 | hooks_dict = self.hooks_dict_ref()
17 | if hooks_dict is not None and self.id in hooks_dict:
18 | del hooks_dict[self.id]
19 |
20 | def __getstate__(self):
21 | return (self.hooks_dict_ref(), self.id)
22 |
23 | def __setstate__(self, state):
24 | if state[0] is None:
25 | # create a dead reference
26 | self.hooks_dict_ref = weakref.ref(collections.OrderedDict())
27 | else:
28 | self.hooks_dict_ref = weakref.ref(state[0])
29 | self.id = state[1]
30 | RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1)
31 |
32 | def __enter__(self):
33 | return self
34 |
35 | def __exit__(self, type, value, tb):
36 | self.remove()
37 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/serialization/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from .read_lua_file import load_lua, T7Reader
3 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/trainer/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from .trainer import Trainer
3 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/trainer/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | from .progress import ProgressMonitor
2 | from .accuracy import AccuracyMonitor
3 | from .time import TimeMonitor
4 | from .loss import LossMonitor
5 | from .logger import Logger
6 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/trainer/plugins/accuracy.py:
--------------------------------------------------------------------------------
1 | from .monitor import Monitor
2 |
3 |
4 | class AccuracyMonitor(Monitor):
5 | stat_name = 'accuracy'
6 |
7 | def __init__(self, *args, **kwargs):
8 | kwargs.setdefault('unit', '%')
9 | kwargs.setdefault('precision', 2)
10 | super(AccuracyMonitor, self).__init__(*args, **kwargs)
11 |
12 | def _get_value(self, iteration, input, target, output, loss):
13 | batch_size = input.size(0)
14 | predictions = output.max(1)[1].type_as(target)
15 | correct = predictions.eq(target)
16 | if not hasattr(correct, 'sum'):
17 | correct = correct.cpu()
18 | correct = correct.sum()
19 | return 100. * correct / batch_size
20 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/trainer/plugins/loss.py:
--------------------------------------------------------------------------------
1 | from .monitor import Monitor
2 |
3 |
4 | class LossMonitor(Monitor):
5 | stat_name = 'loss'
6 |
7 | def _get_value(self, iteration, input, target, output, loss):
8 | return loss[0]
9 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/trainer/plugins/plugin.py:
--------------------------------------------------------------------------------
1 |
2 | class Plugin(object):
3 |
4 | def __init__(self, interval=None):
5 | if interval is None:
6 | interval = []
7 | self.trigger_interval = interval
8 |
9 | def register(self, trainer):
10 | raise NotImplementedError
11 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/trainer/plugins/progress.py:
--------------------------------------------------------------------------------
1 | from .plugin import Plugin
2 |
3 |
4 | class ProgressMonitor(Plugin):
5 | stat_name = 'progress'
6 |
7 | def __init__(self):
8 | super(ProgressMonitor, self).__init__([(1, 'iteration'), (1, 'epoch')])
9 |
10 | def register(self, trainer):
11 | self.trainer = trainer
12 | stats = self.trainer.stats.setdefault(self.stat_name, {})
13 | stats['samples_used'] = 0
14 | stats['epoch_size'] = len(trainer.dataset)
15 | stats['log_iter_fields'] = [
16 | '{samples_used}/{epoch_size}',
17 | '({percent:.2f}%)'
18 | ]
19 |
20 | def iteration(self, iteration, input, *args):
21 | stats = self.trainer.stats.setdefault(self.stat_name, {})
22 | stats['samples_used'] += 1
23 | stats['percent'] = 100. * stats['samples_used'] / stats['epoch_size']
24 |
25 | def epoch(self, *args):
26 | stats = self.trainer.stats.setdefault(self.stat_name, {})
27 | stats['samples_used'] = 0
28 | stats['percent'] = 0
29 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/utils/trainer/plugins/time.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | import time
3 |
4 | from .monitor import Monitor
5 |
6 |
7 | class TimeMonitor(Monitor):
8 | stat_name = 'time'
9 |
10 | def __init__(self, *args, **kwargs):
11 | kwargs.setdefault('unit', 'ms')
12 | kwargs.setdefault('precision', 0)
13 | super(TimeMonitor, self).__init__(*args, **kwargs)
14 | self.last_time = None
15 |
16 | def _get_value(self, *args):
17 | if self.last_time:
18 | now = time.time()
19 | duration = now - self.last_time
20 | self.last_time = now
21 | return duration * 1000
22 | else:
23 | self.last_time = time.time()
24 | return 0
25 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torch/version.py:
--------------------------------------------------------------------------------
1 | __version__ = '0.3.0.post4'
2 | debug = False
3 | cuda = '8.0.61'
4 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torchvision/__init__.py:
--------------------------------------------------------------------------------
1 | from torchvision import models
2 | from torchvision import datasets
3 | from torchvision import transforms
4 | from torchvision import utils
5 |
6 | __version__ = '0.2.0'
7 |
8 | _image_backend = 'PIL'
9 |
10 |
11 | def set_image_backend(backend):
12 | """
13 | 指定用于加载图像的包。
14 |
15 | Args:
16 | backend (string): 图像处理后端的名称. {'PIL', 'accimage'}之一.
17 | :mod:`accimage` 使用 Intel IPP library(高性能图像加载和增强程序模拟的程序).
18 | 通常比PIL库()要快, 但是不支持许多操作.
19 | """
20 | global _image_backend
21 | if backend not in ['PIL', 'accimage']:
22 | raise ValueError("Invalid backend '{}'. Options are 'PIL' and 'accimage'"
23 | .format(backend))
24 | _image_backend = backend
25 |
26 |
27 | def get_image_backend():
28 | """
29 | 获取用于加载图像的包的名称
30 | """
31 | return _image_backend
32 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torchvision/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .lsun import LSUN, LSUNClass
2 | from .folder import ImageFolder
3 | from .coco import CocoCaptions, CocoDetection
4 | from .cifar import CIFAR10, CIFAR100
5 | from .stl10 import STL10
6 | from .mnist import MNIST, FashionMNIST
7 | from .svhn import SVHN
8 | from .phototour import PhotoTour
9 | from .fakedata import FakeData
10 | from .semeion import SEMEION
11 |
12 | __all__ = ('LSUN', 'LSUNClass',
13 | 'ImageFolder', 'FakeData',
14 | 'CocoCaptions', 'CocoDetection',
15 | 'CIFAR10', 'CIFAR100', 'FashionMNIST',
16 | 'MNIST', 'STL10', 'SVHN', 'PhotoTour', 'SEMEION')
17 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torchvision/datasets/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import os.path
3 | import hashlib
4 | import errno
5 |
6 |
7 | def check_integrity(fpath, md5):
8 | if not os.path.isfile(fpath):
9 | return False
10 | md5o = hashlib.md5()
11 | with open(fpath, 'rb') as f:
12 | # read in 1MB chunks
13 | for chunk in iter(lambda: f.read(1024 * 1024), b''):
14 | md5o.update(chunk)
15 | md5c = md5o.hexdigest()
16 | if md5c != md5:
17 | return False
18 | return True
19 |
20 |
21 | def download_url(url, root, filename, md5):
22 | from six.moves import urllib
23 |
24 | root = os.path.expanduser(root)
25 | fpath = os.path.join(root, filename)
26 |
27 | try:
28 | os.makedirs(root)
29 | except OSError as e:
30 | if e.errno == errno.EEXIST:
31 | pass
32 | else:
33 | raise
34 |
35 | # downloads file
36 | if os.path.isfile(fpath) and check_integrity(fpath, md5):
37 | print('Using downloaded and verified file: ' + fpath)
38 | else:
39 | try:
40 | print('Downloading ' + url + ' to ' + fpath)
41 | urllib.request.urlretrieve(url, fpath)
42 | except:
43 | if url[:5] == 'https':
44 | url = url.replace('https:', 'http:')
45 | print('Failed download. Trying https -> http instead.'
46 | ' Downloading ' + url + ' to ' + fpath)
47 | urllib.request.urlretrieve(url, fpath)
48 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torchvision/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .alexnet import *
2 | from .resnet import *
3 | from .vgg import *
4 | from .squeezenet import *
5 | from .inception import *
6 | from .densenet import *
7 |
--------------------------------------------------------------------------------
/doc/zh/source/code/torchvision/transforms/__init__.py:
--------------------------------------------------------------------------------
1 | from .transforms import *
2 |
--------------------------------------------------------------------------------
/doc/zh/source/cuda.rst:
--------------------------------------------------------------------------------
1 | torch.cuda
2 | ===================================
3 |
4 | .. currentmodule:: torch.cuda
5 |
6 | .. automodule:: torch.cuda
7 | :members:
8 |
9 | Random Number Generator
10 | -------------------------
11 | .. autofunction:: get_rng_state
12 | .. autofunction:: set_rng_state
13 | .. autofunction:: manual_seed
14 | .. autofunction:: manual_seed_all
15 | .. autofunction:: seed
16 | .. autofunction:: seed_all
17 | .. autofunction:: initial_seed
18 |
19 |
20 | Communication collectives
21 | -------------------------
22 |
23 | .. autofunction:: torch.cuda.comm.broadcast
24 |
25 | .. autofunction:: torch.cuda.comm.reduce_add
26 |
27 | .. autofunction:: torch.cuda.comm.scatter
28 |
29 | .. autofunction:: torch.cuda.comm.gather
30 |
31 | Streams and events
32 | ------------------
33 |
34 | .. autoclass:: Stream
35 | :members:
36 |
37 | .. autoclass:: Event
38 | :members:
39 |
40 | Memory management
41 | -----------------
42 | .. autofunction:: empty_cache
43 |
44 | NVIDIA Tools Extension (NVTX)
45 | -----------------------------
46 |
47 | .. autofunction:: torch.cuda.nvtx.mark
48 | .. autofunction:: torch.cuda.nvtx.range_push
49 | .. autofunction:: torch.cuda.nvtx.range_pop
50 |
--------------------------------------------------------------------------------
/doc/zh/source/data.rst:
--------------------------------------------------------------------------------
1 | torch.utils.data
2 | ===================================
3 |
4 | .. automodule:: torch.utils.data
5 | .. autoclass:: Dataset
6 | .. autoclass:: TensorDataset
7 | .. autoclass:: ConcatDataset
8 | .. autoclass:: DataLoader
9 | .. autoclass:: torch.utils.data.sampler.Sampler
10 | .. autoclass:: torch.utils.data.sampler.SequentialSampler
11 | .. autoclass:: torch.utils.data.sampler.RandomSampler
12 | .. autoclass:: torch.utils.data.sampler.SubsetRandomSampler
13 | .. autoclass:: torch.utils.data.sampler.WeightedRandomSampler
14 | .. autoclass:: torch.utils.data.distributed.DistributedSampler
15 |
--------------------------------------------------------------------------------
/doc/zh/source/distributions.rst:
--------------------------------------------------------------------------------
1 | .. role:: hidden
2 | :class: hidden-section
3 |
4 | Probability distributions - torch.distributions
5 | ==================================================
6 |
7 | .. automodule:: torch.distributions
8 | .. currentmodule:: torch.distributions
9 |
10 | Distribution(概率分布)
11 | ~~~~~~~~~~~~~~~~~~~~~~~
12 |
13 | .. autoclass:: Distribution
14 | :members:
15 |
16 | Bernoulli(伯努利分布)
17 | ~~~~~~~~~~~~~~~~~~~~~~~
18 |
19 | .. autoclass:: Bernoulli
20 | :members:
21 |
22 | Categorical(类别分布)
23 | ~~~~~~~~~~~~~~~~~~~~~~~
24 |
25 | .. autoclass:: Categorical
26 | :members:
27 |
28 | Normal(正态分布)
29 | ~~~~~~~~~~~~~~~~~~~~~~~
30 |
31 | .. autoclass:: Normal
32 | :members:
33 |
--------------------------------------------------------------------------------
/doc/zh/source/ffi.rst:
--------------------------------------------------------------------------------
1 | torch.utils.ffi
2 | ===============
3 |
4 | .. currentmodule:: torch.utils.ffi
5 | .. autofunction:: create_extension
6 |
7 |
--------------------------------------------------------------------------------
/doc/zh/source/index.rst:
--------------------------------------------------------------------------------
1 | .. PyTorch documentation master file, created by
2 | sphinx-quickstart on Fri Dec 23 13:31:47 2016.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | :github_url: https://github.com/apachecn/pytorch-doc-zh
7 |
8 | PyTorch 0.3.0 中文文档
9 | ===================================
10 |
11 | PyTorch 是一个针对 deep learning(深度学习), 并且使用 GPU 和 CPU 来优化的 tensor library(张量库).
12 |
13 | .. toctree::
14 | :glob:
15 | :maxdepth: 1
16 | :caption: 介绍
17 |
18 | notes/*
19 |
20 |
21 | .. toctree::
22 | :maxdepth: 1
23 | :caption: Package 参考
24 |
25 | torch
26 | tensors
27 | sparse
28 | storage
29 | nn
30 | optim
31 | torch.autograd
32 | torch.distributions
33 | torch.multiprocessing
34 | torch.distributed
35 | torch.legacy
36 | cuda
37 | ffi
38 | data
39 | model_zoo
40 | onnx
41 |
42 | .. toctree::
43 | :glob:
44 | :maxdepth: 2
45 | :caption: torchvision 参考
46 |
47 | torchvision/index
48 |
49 | .. toctree::
50 | :maxdepth: 1
51 | :caption: 项目相关
52 |
53 | 项目贡献者
54 | 组织学习交流群
55 |
56 |
57 |
58 | 索引表
59 | ==================
60 |
61 | * :ref:`genindex`
62 | * :ref:`modindex`
63 |
64 | 相关链接
65 | ==================
66 | - GitHub: `apachecn/pytorch-doc-zh `__
67 | - 项目贡献者: ``__
68 | - 组织学习交流群: ``__
--------------------------------------------------------------------------------
/doc/zh/source/legacy.rst:
--------------------------------------------------------------------------------
1 | Legacy package - torch.legacy
2 | ===================================
3 |
4 | .. automodule:: torch.legacy
5 |
--------------------------------------------------------------------------------
/doc/zh/source/model_zoo.rst:
--------------------------------------------------------------------------------
1 | torch.utils.model_zoo
2 | ===================================
3 |
4 | .. automodule:: torch.utils.model_zoo
5 | .. autofunction:: load_url
6 |
--------------------------------------------------------------------------------
/doc/zh/source/notes/serialization.rst:
--------------------------------------------------------------------------------
1 |
2 | 序列化语义
3 | =======================
4 |
5 | 最佳实践
6 | --------------
7 |
8 | .. _recommend-saving-models:
9 |
10 | 保存模型的推荐方法
11 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
12 |
13 | 有两种主要的方法可以用来序列化和恢复模型.
14 |
15 | 第一种方法(推荐), 只保存和加载模型的参数::
16 |
17 | torch.save(the_model.state_dict(), PATH)
18 |
19 | 然后::
20 |
21 | the_model = TheModelClass(*args, **kwargs)
22 | the_model.load_state_dict(torch.load(PATH))
23 |
24 | 第二种方法, 保存和加载整个模型::
25 |
26 | torch.save(the_model, PATH)
27 |
28 | 然后::
29 |
30 | the_model = torch.load(PATH)
31 |
32 | 但是在这种情况下, 序列化的数据与特定的类和固定的目录结构绑定, 所以当它被用于其他项目中, 或者经过一些重大的重构之后, 可能会以各种各样的方式崩掉.
33 |
--------------------------------------------------------------------------------
/doc/zh/source/storage.rst:
--------------------------------------------------------------------------------
1 | torch.Storage
2 | ===================================
3 |
4 | 一个 :class:`torch.Storage` 是一个连续的一维的单一数据类型的数组.
5 |
6 | 每个 :class:`torch.Tensor` 都有一个对应的相同数据类型的存储.
7 |
8 | .. autoclass:: torch.FloatStorage
9 | :members:
10 | :undoc-members:
11 | :inherited-members:
12 |
--------------------------------------------------------------------------------
/doc/zh/source/torchvision/index.rst:
--------------------------------------------------------------------------------
1 | torchvision
2 | ===========
3 |
4 | 模块 :mod:`torchvision` 库包含了计算机视觉中一些常用的数据集,
5 | 模型架构以及图像变换方法.
6 |
7 | .. toctree::
8 | :maxdepth: 2
9 | :caption: Package Reference
10 |
11 | datasets
12 | models
13 | transforms
14 | utils
15 |
16 | .. automodule:: torchvision
17 | :members:
--------------------------------------------------------------------------------
/doc/zh/source/torchvision/transforms.rst:
--------------------------------------------------------------------------------
1 | torchvision.transforms
2 | ======================
3 |
4 | .. currentmodule:: torchvision.transforms
5 |
6 | Transforms(变换)是常见的 image transforms(图像变换).他们可以使用 :class:`Compose` 类以链在一起来进行操作.
7 |
8 | .. autoclass:: Compose
9 |
10 | Transforms PIL Image 上的变换
11 | -----------------------------
12 |
13 | .. autoclass:: Resize
14 |
15 | .. autoclass:: Scale
16 |
17 | .. autoclass:: CenterCrop
18 |
19 | .. autoclass:: RandomCrop
20 |
21 | .. autoclass:: RandomHorizontalFlip
22 |
23 | .. autoclass:: RandomVerticalFlip
24 |
25 | .. autoclass:: RandomResizedCrop
26 |
27 | .. autoclass:: RandomSizedCrop
28 |
29 | .. autoclass:: Grayscale
30 |
31 | .. autoclass:: RandomGrayscale
32 |
33 | .. autoclass:: FiveCrop
34 |
35 | .. autoclass:: TenCrop
36 |
37 | .. autoclass:: Pad
38 |
39 | .. autoclass:: ColorJitter
40 |
41 | torch.\*Tensor 上的变换
42 | ----------------------------
43 |
44 | .. autoclass:: Normalize
45 | :members: __call__
46 | :special-members:
47 |
48 |
49 | 转换类型的变换
50 | ---------------------
51 |
52 | .. autoclass:: ToTensor
53 | :members: __call__
54 | :special-members:
55 |
56 | .. autoclass:: ToPILImage
57 | :members: __call__
58 | :special-members:
59 |
60 | 通用的变换
61 | ------------------
62 |
63 | .. autoclass:: Lambda
--------------------------------------------------------------------------------
/doc/zh/source/torchvision/utils.rst:
--------------------------------------------------------------------------------
1 | torchvision.utils
2 | =================
3 |
4 | .. currentmodule:: torchvision.utils
5 |
6 | .. autofunction:: make_grid
7 |
8 | .. autofunction:: save_image
--------------------------------------------------------------------------------
/tutorial/en/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2017, Pytorch contributors
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/tutorial/en/_static/img/SRResNet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/SRResNet.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/Variable.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/Variable.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/cartpole.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/cartpole.gif
--------------------------------------------------------------------------------
/tutorial/en/_static/img/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/cat.jpg
--------------------------------------------------------------------------------
/tutorial/en/_static/img/cat_224x224.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/cat_224x224.jpg
--------------------------------------------------------------------------------
/tutorial/en/_static/img/cat_output1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/cat_output1.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/char_rnn_generation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/char_rnn_generation.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/cifar10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/cifar10.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/data_parallel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/data_parallel.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/DistPyTorch.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/DistPyTorch.jpg
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/all_gather.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/all_gather.pdf
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/all_gather.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/all_gather.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/all_reduce.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/all_reduce.pdf
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/all_reduce.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/all_reduce.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/broadcast.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/broadcast.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/gather.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/gather.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/reduce.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/reduce.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/scatter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/scatter.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/send_recv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/send_recv.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/distributed/send_recv_big.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/distributed/send_recv_big.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/dynamic_graph.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/dynamic_graph.gif
--------------------------------------------------------------------------------
/tutorial/en/_static/img/landmarked_face2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/landmarked_face2.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/mnist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/mnist.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/neural-style/dancing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/neural-style/dancing.jpg
--------------------------------------------------------------------------------
/tutorial/en/_static/img/neural-style/neuralstyle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/neural-style/neuralstyle.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/neural-style/picasso.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/neural-style/picasso.jpg
--------------------------------------------------------------------------------
/tutorial/en/_static/img/pytorch-logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/pytorch-logo-dark.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/attention-decoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden;encoder_outputs}
11 |
12 |
13 | input -> embedding;
14 | embedding -> dropout;
15 | dropout -> embedded;
16 |
17 | embedded -> attn;
18 | prev_hidden -> attn;
19 | attn -> attn_softmax;
20 | attn_softmax -> attn_weights;
21 | attn_weights -> bmm;
22 | encoder_outputs -> bmm;
23 | bmm -> attn_applied;
24 | attn_applied -> attn_combine;
25 | embedded -> attn_combine;
26 |
27 | attn_combine -> relu -> gru;
28 | prev_hidden -> gru;
29 | gru -> out;
30 | gru -> hidden;
31 |
32 | out -> softmax;
33 | softmax -> output;
34 |
35 | {rank=same;output;hidden}
36 |
37 | // Layer nodes
38 | embedding [fillcolor=dodgerblue, fontcolor=white];
39 | attn [fillcolor=dodgerblue, fontcolor=white];
40 | attn_combine [fillcolor=dodgerblue, fontcolor=white];
41 | bmm [fillcolor=dodgerblue, fontcolor=white];
42 | gru [fillcolor=dodgerblue, fontcolor=white];
43 | out [fillcolor=dodgerblue, fontcolor=white];
44 |
45 | // Function nodes
46 | dropout [fillcolor=palegreen];
47 | relu [fillcolor=palegreen];
48 | softmax [fillcolor=palegreen];
49 | attn_softmax [fillcolor=palegreen];
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/attention-decoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/attention-decoder-network.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/decoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden}
11 |
12 | input -> embedding;
13 | embedding -> relu;
14 | relu -> gru;
15 |
16 | prev_hidden -> gru;
17 | gru -> out;
18 | gru -> hidden;
19 |
20 | out -> softmax;
21 | softmax -> output;
22 |
23 | {rank=same;output;hidden}
24 |
25 | // Layer nodes
26 | embedding [fillcolor=dodgerblue, fontcolor=white];
27 | gru [fillcolor=dodgerblue, fontcolor=white];
28 | out [fillcolor=dodgerblue, fontcolor=white];
29 |
30 | // Function nodes
31 | relu [fillcolor=palegreen];
32 | softmax [fillcolor=palegreen];
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/decoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/decoder-network.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/decoder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/decoder.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/decoder@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/decoder@2x.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/encoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden}
11 |
12 | input -> embedding;
13 | embedding -> embedded;
14 | embedded -> gru;
15 | prev_hidden -> gru;
16 | gru -> output;
17 | gru -> hidden;
18 |
19 | embedding [fillcolor=dodgerblue, fontcolor=white];
20 | gru [fillcolor=dodgerblue, fontcolor=white];
21 |
22 | }
23 |
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/encoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/encoder-network.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/seq2seq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/seq2seq.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/seq2seq@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/seq2seq@2x.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/word-encoding.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/word-encoding.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq-seq-images/word-encoding@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq-seq-images/word-encoding@2x.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/seq2seq_flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/seq2seq_flat.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/stn/FSeq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/stn/FSeq.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/stn/Five.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/stn/Five.gif
--------------------------------------------------------------------------------
/tutorial/en/_static/img/stn/stn-arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/stn/stn-arch.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/stn/tr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/stn/tr.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/tensor_illustration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/tensor_illustration.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/tensor_illustration_flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/tensor_illustration_flat.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/thumbnails/babel.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/thumbnails/babel.jpg
--------------------------------------------------------------------------------
/tutorial/en/_static/img/thumbnails/default.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/thumbnails/default.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/thumbnails/examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/thumbnails/examples.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/thumbnails/pytorch-logo-flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/thumbnails/pytorch-logo-flat.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/thumbnails/torch-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/thumbnails/torch-logo.png
--------------------------------------------------------------------------------
/tutorial/en/_static/img/torch-nn-vs-pytorch-nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/_static/img/torch-nn-vs-pytorch-nn.png
--------------------------------------------------------------------------------
/tutorial/en/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {% block footer %}
4 | {{ super() }}
5 |
6 |
13 |
22 |
36 | {% endblock %}
--------------------------------------------------------------------------------
/tutorial/en/advanced_source/README.txt:
--------------------------------------------------------------------------------
1 | Advanced Tutorials
2 | ------------------
3 |
4 | 1. neural_style_tutorial.py
5 | Neural Transfer with PyTorch
6 | http://pytorch.org/tutorials/advanced/neural_style_tutorial.html
7 |
8 | 2. numpy_extensions_tutorial.py
9 | Creating extensions using numpy and scipy
10 | http://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html
11 |
12 | 3. c_extension.rst
13 | Custom C extensions for pytorch
14 | http://pytorch.org/tutorials/advanced/c_extension.html
15 |
16 | 4. super_resolution_with_caffe2.py
17 | Transfering a model from PyTorch to Caffe2 and Mobile using ONNX
18 | http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
19 |
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/README.txt:
--------------------------------------------------------------------------------
1 | Beginner Tutorials
2 | ------------------
3 |
4 | 1. blitz/* and deep_learning_60min_blitz.rst
5 | Deep Learning with PyTorch: A 60 Minute Blitz
6 | http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html
7 |
8 | 2. former_torches/* and former_torchies_tutorial.rst
9 | PyTorch for former Torch users
10 | http://pytorch.org/tutorials/beginner/former_torchies_tutorial.html
11 |
12 | 3. examples_*/* and pytorch_with_examples.rst
13 | Learning PyTorch with Examples
14 | http://pytorch.org/tutorials/beginner/pytorch_with_examples.html
15 |
16 | 4. transfer_learning_tutorial.py
17 | Transfer Learning tutorial
18 | http://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
19 |
20 | 5. nlp/* and deep_learning_nlp_tutorial.rst
21 | Deep Learning for NLP with Pytorch
22 | http://pytorch.org/tutorials/beginner/deep_learning_nlp_tutorial.html
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/blitz/README.txt:
--------------------------------------------------------------------------------
1 | Deep Learning with PyTorch: A 60 Minute Blitz
2 | ---------------------------------------------
3 |
4 | 1. tensor_tutorial.py
5 | What is PyTorch?
6 | http://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html
7 |
8 | 2. autograd_tutorial.py
9 | Autograd: automatic differentiation
10 | http://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html
11 |
12 | 3. neural_networks_tutorial.py
13 | Neural Networks
14 | http://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html#
15 |
16 | 4. cifar10_tutorial.py
17 | Training a classifier
18 | http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/deep_learning_60min_blitz.rst:
--------------------------------------------------------------------------------
1 | Deep Learning with PyTorch: A 60 Minute Blitz
2 | ---------------------------------------------
3 | **Author**: `Soumith Chintala `_
4 |
5 | Goal of this tutorial:
6 |
7 | - Understand PyTorch’s Tensor library and neural networks at a high
8 | level.
9 | - Train a small neural network to classify images
10 |
11 | *This tutorial assumes that you have a basic familiarity of numpy*
12 |
13 | .. Note::
14 | Make sure you have the `torch`_ and `torchvision`_ packages installed.
15 |
16 | .. _torch: https://github.com/pytorch/pytorch
17 | .. _torchvision: https://github.com/pytorch/vision
18 |
19 |
20 | .. toctree::
21 | :hidden:
22 |
23 | /beginner/blitz/tensor_tutorial
24 | /beginner/blitz/autograd_tutorial
25 | /beginner/blitz/neural_networks_tutorial
26 | /beginner/blitz/cifar10_tutorial
27 | /beginner/blitz/data_parallel_tutorial
28 |
29 | .. galleryitem:: /beginner/blitz/tensor_tutorial.py
30 | :figure: /_static/img/tensor_illustration_flat.png
31 |
32 | .. galleryitem:: /beginner/blitz/autograd_tutorial.py
33 | :figure: /_static/img/Variable.png
34 |
35 | .. galleryitem:: /beginner/blitz/neural_networks_tutorial.py
36 | :figure: /_static/img/mnist.png
37 |
38 | .. galleryitem:: /beginner/blitz/cifar10_tutorial.py
39 | :figure: /_static/img/cifar10.png
40 |
41 | .. galleryitem:: /beginner/blitz/data_parallel_tutorial.py
42 | :figure: /_static/img/data_parallel.png
43 |
44 | .. raw:: html
45 |
46 |
47 |
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/examples_autograd/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/beginner_source/examples_autograd/README.txt
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/examples_nn/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/beginner_source/examples_nn/README.txt
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/examples_tensor/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/en/beginner_source/examples_tensor/README.txt
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/examples_tensor/two_layer_net_numpy.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Warm-up: numpy
4 | --------------
5 |
6 | A fully-connected ReLU network with one hidden layer and no biases, trained to
7 | predict y from x using Euclidean error.
8 |
9 | This implementation uses numpy to manually compute the forward pass, loss, and
10 | backward pass.
11 |
12 | A numpy array is a generic n-dimensional array; it does not know anything about
13 | deep learning or gradients or computational graphs, and is just a way to perform
14 | generic numeric computations.
15 | """
16 | import numpy as np
17 |
18 | # N is batch size; D_in is input dimension;
19 | # H is hidden dimension; D_out is output dimension.
20 | N, D_in, H, D_out = 64, 1000, 100, 10
21 |
22 | # Create random input and output data
23 | x = np.random.randn(N, D_in)
24 | y = np.random.randn(N, D_out)
25 |
26 | # Randomly initialize weights
27 | w1 = np.random.randn(D_in, H)
28 | w2 = np.random.randn(H, D_out)
29 |
30 | learning_rate = 1e-6
31 | for t in range(500):
32 | # Forward pass: compute predicted y
33 | h = x.dot(w1)
34 | h_relu = np.maximum(h, 0)
35 | y_pred = h_relu.dot(w2)
36 |
37 | # Compute and print loss
38 | loss = np.square(y_pred - y).sum()
39 | print(t, loss)
40 |
41 | # Backprop to compute gradients of w1 and w2 with respect to loss
42 | grad_y_pred = 2.0 * (y_pred - y)
43 | grad_w2 = h_relu.T.dot(grad_y_pred)
44 | grad_h_relu = grad_y_pred.dot(w2.T)
45 | grad_h = grad_h_relu.copy()
46 | grad_h[h < 0] = 0
47 | grad_w1 = x.T.dot(grad_h)
48 |
49 | # Update weights
50 | w1 -= learning_rate * grad_w1
51 | w2 -= learning_rate * grad_w2
52 |
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/former_torchies/README.txt:
--------------------------------------------------------------------------------
1 | PyTorch for former Torch users
2 | ------------------------------
3 |
4 | 1. tensor_tutorial.py
5 | Tensors
6 | http://pytorch.org/tutorials/beginner/former_torchies/tensor_tutorial.html
7 |
8 | 2. autograd.py
9 | Autograd
10 | http://pytorch.org/tutorials/beginner/former_torchies/autograd_tutorial.html
11 |
12 | 3. nn_tutorial.py
13 | nn package
14 | http://pytorch.org/tutorials/beginner/former_torchies/nn_tutorial.html
15 |
16 | 4. parallelism_tutorial.py
17 | Multi-GPU examples
18 | http://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html
19 |
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/former_torchies_tutorial.rst:
--------------------------------------------------------------------------------
1 | PyTorch for former Torch users
2 | ------------------------------
3 | **Author**: `Soumith Chintala `_
4 |
5 | In this tutorial, you will learn the following:
6 |
7 | 1. Using torch Tensors, and important difference against (Lua)Torch
8 | 2. Using the autograd package
9 | 3. Building neural networks
10 |
11 | - Building a ConvNet
12 | - Building a Recurrent Net
13 |
14 | 4. Use multiple GPUs
15 |
16 |
17 | .. toctree::
18 | :hidden:
19 |
20 | /beginner/former_torchies/tensor_tutorial
21 | /beginner/former_torchies/autograd_tutorial
22 | /beginner/former_torchies/nn_tutorial
23 | /beginner/former_torchies/parallelism_tutorial
24 |
25 | .. galleryitem:: /beginner/former_torchies/tensor_tutorial.py
26 | :figure: /_static/img/tensor_illustration_flat.png
27 |
28 | .. galleryitem:: /beginner/former_torchies/autograd_tutorial.py
29 | :figure: /_static/img/Variable.png
30 |
31 | .. galleryitem:: /beginner/former_torchies/nn_tutorial.py
32 | :figure: /_static/img/torch-nn-vs-pytorch-nn.png
33 |
34 | .. galleryitem:: /beginner/former_torchies/parallelism_tutorial.py
35 |
36 | .. raw:: html
37 |
38 |
--------------------------------------------------------------------------------
/tutorial/en/beginner_source/nlp/README.txt:
--------------------------------------------------------------------------------
1 | Deep Learning for NLP with Pytorch
2 | ----------------------------------
3 |
4 | 1. pytorch_tutorial.py
5 | Introduction to PyTorch
6 | http://pytorch.org/tutorials/beginner/nlp/pytorch_tutorial.html
7 |
8 | 2. deep_learning_tutorial.py
9 | Deep Learning with PyTorch
10 | http://pytorch.org/tutorials/beginner/nlp/deep_learning_tutorial.html
11 |
12 | 3. word_embeddings_tutorial.py
13 | Word Embeddings: Encoding Lexical Semantics
14 | http://pytorch.org/tutorials/beginner/nlp/word_embeddings_tutorial.html
15 |
16 | 4. sequence_models_tutorial.py
17 | Sequence Models and Long-Short Term Memory Networks
18 | http://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html
19 |
20 | 5. advanced_tutorial.py
21 | Advanced: Making Dynamic Decisions and the Bi-LSTM CRF
22 | http://pytorch.org/tutorials/beginner/nlp/advanced_tutorial.html
--------------------------------------------------------------------------------
/tutorial/en/build.sh:
--------------------------------------------------------------------------------
1 | # TODO: make sure pytorch installed
2 | pip install -r requirements.txt
3 | make docs
4 |
--------------------------------------------------------------------------------
/tutorial/en/intermediate_source/README.txt:
--------------------------------------------------------------------------------
1 | Intermediate tutorials
2 | ----------------------
3 |
4 | 1. char_rnn_classification_tutorial.py
5 | Classifying Names with a Character-Level RNN
6 | http://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html
7 |
8 | 2. char_rnn_generation_tutorial.py
9 | Generating Names with a Character-Level RNN
10 | http://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
11 |
12 | 3. seq2seq_translation_tutorial.py
13 | Translation with a Sequence to Sequence Network and Attention
14 | http://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
15 |
16 | 4. reinforcement_q_learning.py
17 | Reinforcement Learning (DQN) tutorial
18 | http://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
19 |
20 | 5. dist_tuto.rst
21 | Writing Distributed Applications with PyTorch
22 | http://pytorch.org/tutorials/intermediate/dist_tuto.html
23 |
24 | 6. spatial_transformer_tutorial
25 | Spatial Transformer Networks Tutorial
26 | http://pytorch.org/tutorials/intermediate/spatial_transformer_tutorial.html
27 |
--------------------------------------------------------------------------------
/tutorial/en/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | sphinx_rtd_theme
3 | sphinx-gallery
4 | numpy
5 | matplotlib
6 | torchvision
7 | torch
8 |
9 | # to run examples
10 | pandas
11 | scikit-image
12 | pillow
13 |
--------------------------------------------------------------------------------
/tutorial/zh/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2017, Pytorch contributors
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/SRResNet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/SRResNet.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/Variable.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/Variable.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/cartpole.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/cartpole.gif
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/cat.jpg
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/cat_224x224.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/cat_224x224.jpg
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/cat_output1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/cat_output1.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/char_rnn_generation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/char_rnn_generation.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/cifar10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/cifar10.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/data_parallel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/data_parallel.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/DistPyTorch.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/DistPyTorch.jpg
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/all_gather.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/all_gather.pdf
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/all_gather.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/all_gather.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/all_reduce.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/all_reduce.pdf
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/all_reduce.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/all_reduce.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/broadcast.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/broadcast.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/gather.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/gather.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/reduce.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/reduce.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/scatter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/scatter.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/send_recv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/send_recv.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/distributed/send_recv_big.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/distributed/send_recv_big.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/dynamic_graph.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/dynamic_graph.gif
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/landmarked_face2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/landmarked_face2.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/mnist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/mnist.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/neural-style/dancing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/neural-style/dancing.jpg
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/neural-style/neuralstyle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/neural-style/neuralstyle.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/neural-style/picasso.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/neural-style/picasso.jpg
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/pytorch-logo-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/pytorch-logo-dark.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/attention-decoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden;encoder_outputs}
11 |
12 |
13 | input -> embedding;
14 | embedding -> dropout;
15 | dropout -> embedded;
16 |
17 | embedded -> attn;
18 | prev_hidden -> attn;
19 | attn -> attn_softmax;
20 | attn_softmax -> attn_weights;
21 | attn_weights -> bmm;
22 | encoder_outputs -> bmm;
23 | bmm -> attn_applied;
24 | attn_applied -> attn_combine;
25 | embedded -> attn_combine;
26 |
27 | attn_combine -> relu -> gru;
28 | prev_hidden -> gru;
29 | gru -> out;
30 | gru -> hidden;
31 |
32 | out -> softmax;
33 | softmax -> output;
34 |
35 | {rank=same;output;hidden}
36 |
37 | // Layer nodes
38 | embedding [fillcolor=dodgerblue, fontcolor=white];
39 | attn [fillcolor=dodgerblue, fontcolor=white];
40 | attn_combine [fillcolor=dodgerblue, fontcolor=white];
41 | bmm [fillcolor=dodgerblue, fontcolor=white];
42 | gru [fillcolor=dodgerblue, fontcolor=white];
43 | out [fillcolor=dodgerblue, fontcolor=white];
44 |
45 | // Function nodes
46 | dropout [fillcolor=palegreen];
47 | relu [fillcolor=palegreen];
48 | softmax [fillcolor=palegreen];
49 | attn_softmax [fillcolor=palegreen];
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/attention-decoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/attention-decoder-network.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/decoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden}
11 |
12 | input -> embedding;
13 | embedding -> relu;
14 | relu -> gru;
15 |
16 | prev_hidden -> gru;
17 | gru -> out;
18 | gru -> hidden;
19 |
20 | out -> softmax;
21 | softmax -> output;
22 |
23 | {rank=same;output;hidden}
24 |
25 | // Layer nodes
26 | embedding [fillcolor=dodgerblue, fontcolor=white];
27 | gru [fillcolor=dodgerblue, fontcolor=white];
28 | out [fillcolor=dodgerblue, fontcolor=white];
29 |
30 | // Function nodes
31 | relu [fillcolor=palegreen];
32 | softmax [fillcolor=palegreen];
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/decoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/decoder-network.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/decoder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/decoder.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/decoder@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/decoder@2x.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/encoder-network.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 |
3 | // Main styles
4 | nodesep=0.3; ranksep=0.15;
5 |
6 | node [shape=rect, fillcolor=darkorange, color=white, style=filled, fontsize=11, fontname="arial", height=0.2];
7 | edge [color=gray, arrowsize=0.5];
8 |
9 | // Layout
10 | {rank=same;input;prev_hidden}
11 |
12 | input -> embedding;
13 | embedding -> embedded;
14 | embedded -> gru;
15 | prev_hidden -> gru;
16 | gru -> output;
17 | gru -> hidden;
18 |
19 | embedding [fillcolor=dodgerblue, fontcolor=white];
20 | gru [fillcolor=dodgerblue, fontcolor=white];
21 |
22 | }
23 |
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/encoder-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/encoder-network.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/seq2seq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/seq2seq.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/seq2seq@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/seq2seq@2x.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/word-encoding.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/word-encoding.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq-seq-images/word-encoding@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq-seq-images/word-encoding@2x.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/seq2seq_flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/seq2seq_flat.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/stn/FSeq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/stn/FSeq.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/stn/Five.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/stn/Five.gif
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/stn/stn-arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/stn/stn-arch.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/stn/tr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/stn/tr.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/tensor_illustration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/tensor_illustration.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/tensor_illustration_flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/tensor_illustration_flat.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/thumbnails/babel.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/thumbnails/babel.jpg
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/thumbnails/default.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/thumbnails/default.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/thumbnails/examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/thumbnails/examples.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/thumbnails/pytorch-logo-flat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/thumbnails/pytorch-logo-flat.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/thumbnails/torch-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/thumbnails/torch-logo.png
--------------------------------------------------------------------------------
/tutorial/zh/_static/img/torch-nn-vs-pytorch-nn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/_static/img/torch-nn-vs-pytorch-nn.png
--------------------------------------------------------------------------------
/tutorial/zh/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {% block footer %}
4 | {{ super() }}
5 |
6 |
13 |
22 |
36 | {% endblock %}
--------------------------------------------------------------------------------
/tutorial/zh/advanced_source/README.txt:
--------------------------------------------------------------------------------
1 | Advanced Tutorials
2 | ------------------
3 |
4 | 1. neural_style_tutorial.py
5 | Neural Transfer with PyTorch
6 | http://pytorch.org/tutorials/advanced/neural_style_tutorial.html
7 |
8 | 2. numpy_extensions_tutorial.py
9 | Creating extensions using numpy and scipy
10 | http://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html
11 |
12 | 3. c_extension.rst
13 | Custom C extensions for pytorch
14 | http://pytorch.org/tutorials/advanced/c_extension.html
15 |
16 | 4. super_resolution_with_caffe2.py
17 | Transfering a model from PyTorch to Caffe2 and Mobile using ONNX
18 | http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
19 |
--------------------------------------------------------------------------------
/tutorial/zh/apachecn-learning-group.rst:
--------------------------------------------------------------------------------
1 | 组织学习交流群
2 | ==================
3 |
4 | 机器学习交流群: `629470233 `__ (2000人)
5 |
6 | 大数据交流群: `214293307 `__ (2000人)
7 |
8 | Kaggle 竞赛交流群: `686932392 `__ (2000人)
9 |
10 | 了解我们: ``__
11 |
12 | 加入组织: ``__
13 |
14 | 更多学(zhuang)习(bi)交流群请参阅: ``__
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/README.txt:
--------------------------------------------------------------------------------
1 | Beginner Tutorials
2 | ------------------
3 |
4 | 1. blitz/* and deep_learning_60min_blitz.rst
5 | Deep Learning with PyTorch: A 60 Minute Blitz
6 | http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html
7 |
8 | 2. former_torches/* and former_torchies_tutorial.rst
9 | PyTorch for former Torch users
10 | http://pytorch.org/tutorials/beginner/former_torchies_tutorial.html
11 |
12 | 3. examples_*/* and pytorch_with_examples.rst
13 | Learning PyTorch with Examples
14 | http://pytorch.org/tutorials/beginner/pytorch_with_examples.html
15 |
16 | 4. transfer_learning_tutorial.py
17 | Transfer Learning tutorial
18 | http://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
19 |
20 | 5. nlp/* and deep_learning_nlp_tutorial.rst
21 | Deep Learning for NLP with Pytorch
22 | http://pytorch.org/tutorials/beginner/deep_learning_nlp_tutorial.html
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/blitz/README.txt:
--------------------------------------------------------------------------------
1 | Deep Learning with PyTorch: A 60 Minute Blitz
2 | ---------------------------------------------
3 |
4 | 1. tensor_tutorial.py
5 | What is PyTorch?
6 | http://pytorch.org/tutorials/beginner/blitz/tensor_tutorial.html
7 |
8 | 2. autograd_tutorial.py
9 | Autograd: automatic differentiation
10 | http://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html
11 |
12 | 3. neural_networks_tutorial.py
13 | Neural Networks
14 | http://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html#
15 |
16 | 4. cifar10_tutorial.py
17 | Training a classifier
18 | http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/deep_learning_60min_blitz.rst:
--------------------------------------------------------------------------------
1 | PyTorch 深度学习: 60 分钟极速入门教程
2 | ---------------------------------------------
3 | **Author**: `Soumith Chintala `_
4 |
5 | 本教程的目标:
6 |
7 | - 更高层次地理解 PyTorch 的 Tensor(张量)库以及神经网络.
8 | - 学会训练一个小的神经网络用来对图像进行分类
9 |
10 | *本教程假设您对 numpy 有基本的了解*
11 |
12 | .. Note::
13 | 请确认您已经安装了 `torch`_ 和 `torchvision`_ 包.
14 |
15 | .. _torch: https://github.com/pytorch/pytorch
16 | .. _torchvision: https://github.com/pytorch/vision
17 |
18 |
19 | .. toctree::
20 | :hidden:
21 |
22 | /beginner/blitz/tensor_tutorial
23 | /beginner/blitz/autograd_tutorial
24 | /beginner/blitz/neural_networks_tutorial
25 | /beginner/blitz/cifar10_tutorial
26 | /beginner/blitz/data_parallel_tutorial
27 |
28 | .. galleryitem:: /beginner/blitz/tensor_tutorial.py
29 | :figure: /_static/img/tensor_illustration_flat.png
30 |
31 | .. galleryitem:: /beginner/blitz/autograd_tutorial.py
32 | :figure: /_static/img/Variable.png
33 |
34 | .. galleryitem:: /beginner/blitz/neural_networks_tutorial.py
35 | :figure: /_static/img/mnist.png
36 |
37 | .. galleryitem:: /beginner/blitz/cifar10_tutorial.py
38 | :figure: /_static/img/cifar10.png
39 |
40 | .. galleryitem:: /beginner/blitz/data_parallel_tutorial.py
41 | :figure: /_static/img/data_parallel.png
42 |
43 | .. raw:: html
44 |
45 |
46 |
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/examples_autograd/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/beginner_source/examples_autograd/README.txt
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/examples_nn/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/beginner_source/examples_nn/README.txt
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/examples_tensor/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wizardforcel/pytorch-doc-zh/a722cff892c7cc30fab952e6a7562742197c0f74/tutorial/zh/beginner_source/examples_tensor/README.txt
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/former_torchies/README.txt:
--------------------------------------------------------------------------------
1 | PyTorch for former Torch users
2 | ------------------------------
3 |
4 | 1. tensor_tutorial.py
5 | Tensors
6 | http://pytorch.org/tutorials/beginner/former_torchies/tensor_tutorial.html
7 |
8 | 2. autograd.py
9 | Autograd
10 | http://pytorch.org/tutorials/beginner/former_torchies/autograd_tutorial.html
11 |
12 | 3. nn_tutorial.py
13 | nn package
14 | http://pytorch.org/tutorials/beginner/former_torchies/nn_tutorial.html
15 |
16 | 4. parallelism_tutorial.py
17 | Multi-GPU examples
18 | http://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html
19 |
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/former_torchies_tutorial.rst:
--------------------------------------------------------------------------------
1 | PyTorch for former Torch users
2 | ------------------------------
3 | **Author**: `Soumith Chintala `_
4 |
5 | 在本教程中,你将学习到以下内容:
6 |
7 | 1. 使用 torch Tensors, 它和 (Lua)Torch 有很大的不同
8 | 2. 使用 autograd package
9 | 3. 构建神经网络
10 |
11 | - 构建一个 ConvNet
12 | - 构建一个 Recurrent Net
13 |
14 | 4. 使用多个 GPUs
15 |
16 |
17 | .. toctree::
18 | :hidden:
19 |
20 | /beginner/former_torchies/tensor_tutorial
21 | /beginner/former_torchies/autograd_tutorial
22 | /beginner/former_torchies/nn_tutorial
23 | /beginner/former_torchies/parallelism_tutorial
24 |
25 | .. galleryitem:: /beginner/former_torchies/tensor_tutorial.py
26 | :figure: /_static/img/tensor_illustration_flat.png
27 |
28 | .. galleryitem:: /beginner/former_torchies/autograd_tutorial.py
29 | :figure: /_static/img/Variable.png
30 |
31 | .. galleryitem:: /beginner/former_torchies/nn_tutorial.py
32 | :figure: /_static/img/torch-nn-vs-pytorch-nn.png
33 |
34 | .. galleryitem:: /beginner/former_torchies/parallelism_tutorial.py
35 |
36 | .. raw:: html
37 |
38 |
--------------------------------------------------------------------------------
/tutorial/zh/beginner_source/nlp/README.txt:
--------------------------------------------------------------------------------
1 | Deep Learning for NLP with Pytorch
2 | ----------------------------------
3 |
4 | 1. pytorch_tutorial.py
5 | Introduction to PyTorch
6 | http://pytorch.org/tutorials/beginner/nlp/pytorch_tutorial.html
7 |
8 | 2. deep_learning_tutorial.py
9 | Deep Learning with PyTorch
10 | http://pytorch.org/tutorials/beginner/nlp/deep_learning_tutorial.html
11 |
12 | 3. word_embeddings_tutorial.py
13 | Word Embeddings: Encoding Lexical Semantics
14 | http://pytorch.org/tutorials/beginner/nlp/word_embeddings_tutorial.html
15 |
16 | 4. sequence_models_tutorial.py
17 | Sequence Models and Long-Short Term Memory Networks
18 | http://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html
19 |
20 | 5. advanced_tutorial.py
21 | Advanced: Making Dynamic Decisions and the Bi-LSTM CRF
22 | http://pytorch.org/tutorials/beginner/nlp/advanced_tutorial.html
--------------------------------------------------------------------------------
/tutorial/zh/build.sh:
--------------------------------------------------------------------------------
1 | # TODO: make sure pytorch installed
2 | pip install -r requirements.txt
3 | make docs
4 |
--------------------------------------------------------------------------------
/tutorial/zh/intermediate_source/README.txt:
--------------------------------------------------------------------------------
1 | Intermediate tutorials
2 | ----------------------
3 |
4 | 1. char_rnn_classification_tutorial.py
5 | Classifying Names with a Character-Level RNN
6 | http://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html
7 |
8 | 2. char_rnn_generation_tutorial.py
9 | Generating Names with a Character-Level RNN
10 | http://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
11 |
12 | 3. seq2seq_translation_tutorial.py
13 | Translation with a Sequence to Sequence Network and Attention
14 | http://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
15 |
16 | 4. reinforcement_q_learning.py
17 | Reinforcement Learning (DQN) tutorial
18 | http://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
19 |
20 | 5. dist_tuto.rst
21 | Writing Distributed Applications with PyTorch
22 | http://pytorch.org/tutorials/intermediate/dist_tuto.html
23 |
24 | 6. spatial_transformer_tutorial
25 | Spatial Transformer Networks Tutorial
26 | http://pytorch.org/tutorials/intermediate/spatial_transformer_tutorial.html
27 |
--------------------------------------------------------------------------------
/tutorial/zh/project-contributors.rst:
--------------------------------------------------------------------------------
1 | 项目贡献者
2 | ==========
3 |
4 | 该页面是 `apachecn/pytorch-doc-zh `__ 项目相关的贡献者信息.
5 |
6 | 项目负责人
7 | ----------
8 | - `@那伊抹微笑 `__
9 |
10 | 贡献者
11 | ------
12 |
13 | PyTorch 中文教程贡献者
14 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
15 |
16 | - `@那伊抹微笑 `__
17 |
18 | 建议反馈
19 | --------
20 |
21 | - 联系项目负责人 `@那伊抹微笑 `__.
22 | - 在我们的 `apachecn/pytorch-doc-zh `__ github 上提 issue.
23 | - 发送邮件到 Email: pytorch#apachecn.org(#替换成@).
24 | - 在我们的 `组织学习交流群 <./apachecn-learning-group.rst>`__ 中联系群主/管理员即可.
25 |
--------------------------------------------------------------------------------
/tutorial/zh/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | sphinx_rtd_theme
3 | sphinx-gallery
4 | numpy
5 | matplotlib
6 | torchvision
7 | torch
8 |
9 | # to run examples
10 | pandas
11 | scikit-image
12 | pillow
13 |
--------------------------------------------------------------------------------