├── fpn.pdf
├── model
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-37.pyc
│ ├── _utils.cpython-37.pyc
│ ├── anchor_utils.cpython-37.pyc
│ ├── backbone_utils.cpython-37.pyc
│ ├── faster_rcnn.cpython-37.pyc
│ ├── feature_pyramid_network.cpython-37.pyc
│ ├── generalized_rcnn.cpython-37.pyc
│ ├── image_list.cpython-37.pyc
│ ├── roi_heads.cpython-37.pyc
│ ├── rpn.cpython-37.pyc
│ └── transform.cpython-37.pyc
├── _utils.py
├── anchor_utils.py
├── backbone_utils.py
├── faster_rcnn.py
├── feature_pyramid_network.py
├── generalized_rcnn.py
├── image_list.py
├── ops
│ ├── __pycache__
│ │ └── poolers.cpython-37.pyc
│ └── poolers.py
├── roi_heads.py
├── rpn.py
└── transform.py
├── readme.assets
├── image-20211216112059762.png
├── image-20211216112509835.png
└── image-20211216112526108.png
├── readme.md
├── tensorrt_code
├── .gitignore
├── CMakeLists.txt
├── Makefile
├── README.md
├── README.zh-cn.md
├── TensorRT.sln
├── TensorRT.vcxproj
├── TensorRT.vcxproj.filters
├── TensorRT.vcxproj.user
├── YoloTRT_speed.xlsx
├── dll_export.def
├── onnx
│ ├── make_pb.sh
│ ├── onnx-ml.proto
│ └── onnx-operators-ml.proto
├── onnx_parser
│ ├── onnx_parser_7.x
│ │ ├── ImporterContext.hpp
│ │ ├── LoopHelpers.cpp
│ │ ├── LoopHelpers.hpp
│ │ ├── ModelImporter.cpp
│ │ ├── ModelImporter.hpp
│ │ ├── NvOnnxParser.cpp
│ │ ├── NvOnnxParser.h
│ │ ├── OnnxAttrs.cpp
│ │ ├── OnnxAttrs.hpp
│ │ ├── RNNHelpers.cpp
│ │ ├── RNNHelpers.hpp
│ │ ├── ShapeTensor.cpp
│ │ ├── ShapeTensor.hpp
│ │ ├── ShapedWeights.cpp
│ │ ├── ShapedWeights.hpp
│ │ ├── Status.hpp
│ │ ├── TensorOrWeights.hpp
│ │ ├── builtin_op_importers.cpp
│ │ ├── builtin_op_importers.hpp
│ │ ├── common.hpp
│ │ ├── onnx2trt.hpp
│ │ ├── onnx2trt_common.hpp
│ │ ├── onnx2trt_runtime.hpp
│ │ ├── onnx2trt_utils.cpp
│ │ ├── onnx2trt_utils.hpp
│ │ ├── onnx_utils.hpp
│ │ ├── toposort.hpp
│ │ ├── trt_utils.hpp
│ │ └── utils.hpp
│ ├── onnx_parser_8.x
│ │ ├── ImporterContext.hpp
│ │ ├── LoopHelpers.cpp
│ │ ├── LoopHelpers.hpp
│ │ ├── ModelImporter.cpp
│ │ ├── ModelImporter.hpp
│ │ ├── NvOnnxParser.cpp
│ │ ├── NvOnnxParser.h
│ │ ├── OnnxAttrs.cpp
│ │ ├── OnnxAttrs.hpp
│ │ ├── RNNHelpers.cpp
│ │ ├── RNNHelpers.hpp
│ │ ├── ShapeTensor.cpp
│ │ ├── ShapeTensor.hpp
│ │ ├── ShapedWeights.cpp
│ │ ├── ShapedWeights.hpp
│ │ ├── Status.hpp
│ │ ├── TensorOrWeights.hpp
│ │ ├── builtin_op_importers.cpp
│ │ ├── builtin_op_importers.hpp
│ │ ├── onnx2trt.hpp
│ │ ├── onnx2trt_common.hpp
│ │ ├── onnx2trt_runtime.hpp
│ │ ├── onnx2trt_utils.cpp
│ │ ├── onnx2trt_utils.hpp
│ │ ├── onnxErrorRecorder.cpp
│ │ ├── onnxErrorRecorder.hpp
│ │ ├── onnx_utils.hpp
│ │ ├── readme.md
│ │ ├── toposort.hpp
│ │ ├── trt_utils.hpp
│ │ └── utils.hpp
│ ├── readme.md
│ ├── use_tensorrt_7.x.sh
│ └── use_tensorrt_8.x.sh
├── python
│ ├── copy_dll_to_trtpy.bat
│ ├── setup.py
│ ├── test_centernet.py
│ ├── test_retinaface.py
│ ├── test_scrfd.py
│ ├── test_torch.py
│ ├── test_yolov5.py
│ ├── test_yolox.py
│ └── trtpy
│ │ └── __init__.py
├── src
│ ├── application
│ │ ├── app_alphapose.cpp
│ │ ├── app_alphapose
│ │ │ ├── alpha_pose.cpp
│ │ │ └── alpha_pose.hpp
│ │ ├── app_arcface.cpp
│ │ ├── app_arcface
│ │ │ ├── arcface.cpp
│ │ │ └── arcface.hpp
│ │ ├── app_cat.cpp
│ │ ├── app_cat
│ │ │ ├── yolo.cpp
│ │ │ ├── yolo.hpp
│ │ │ └── yolo_decode.cu
│ │ ├── app_centernet.cpp
│ │ ├── app_centernet
│ │ │ ├── centernet.cpp
│ │ │ ├── centernet.hpp
│ │ │ └── centernet_decode.cu
│ │ ├── app_dbface.cpp
│ │ ├── app_dbface
│ │ │ ├── dbface.cpp
│ │ │ ├── dbface.hpp
│ │ │ └── dbface_decode.cu
│ │ ├── app_fall_gcn
│ │ │ ├── fall_gcn.cpp
│ │ │ └── fall_gcn.hpp
│ │ ├── app_fall_recognize.cpp
│ │ ├── app_fasterrcnn.cpp
│ │ ├── app_fasterrcnn
│ │ │ ├── fasterrcnn.cpp
│ │ │ ├── fasterrcnn.hpp
│ │ │ ├── fasterrcnn_decode.cu
│ │ │ └── roialign.cu
│ │ ├── app_high_performance.cpp
│ │ ├── app_high_performance
│ │ │ ├── alpha_pose_high_perf.cpp
│ │ │ ├── alpha_pose_high_perf.hpp
│ │ │ ├── high_performance.cpp
│ │ │ ├── high_performance.hpp
│ │ │ ├── yolo_high_perf.cpp
│ │ │ └── yolo_high_perf.hpp
│ │ ├── app_lesson.cpp
│ │ ├── app_plugin.cpp
│ │ ├── app_python
│ │ │ └── interface.cpp
│ │ ├── app_retinaface.cpp
│ │ ├── app_retinaface
│ │ │ ├── retinaface.cpp
│ │ │ ├── retinaface.hpp
│ │ │ └── retinaface_decode.cu
│ │ ├── app_scrfd.cpp
│ │ ├── app_scrfd
│ │ │ ├── scrfd.cpp
│ │ │ ├── scrfd.hpp
│ │ │ └── scrfd_decode.cu
│ │ ├── app_shufflenet
│ │ │ ├── shufflenetv2.cpp
│ │ │ └── shufflenetv2.hpp
│ │ ├── app_shufflenetv2.cpp
│ │ ├── app_yolo.cpp
│ │ ├── app_yolo
│ │ │ ├── yolo.cpp
│ │ │ ├── yolo.hpp
│ │ │ └── yolo_decode.cu
│ │ ├── app_yolo_fast.cpp
│ │ ├── app_yolo_fast
│ │ │ ├── yolo_fast.cpp
│ │ │ ├── yolo_fast.hpp
│ │ │ ├── yolov5_decode.cu
│ │ │ └── yolox_decode.cu
│ │ ├── common
│ │ │ ├── face_detector.hpp
│ │ │ └── object_detector.hpp
│ │ └── tools
│ │ │ ├── Eigen
│ │ │ ├── CMakeLists.txt
│ │ │ ├── Cholesky
│ │ │ ├── CholmodSupport
│ │ │ ├── Core
│ │ │ ├── Dense
│ │ │ ├── Eigen
│ │ │ ├── Eigenvalues
│ │ │ ├── Geometry
│ │ │ ├── Householder
│ │ │ ├── IterativeLinearSolvers
│ │ │ ├── Jacobi
│ │ │ ├── LU
│ │ │ ├── MetisSupport
│ │ │ ├── OrderingMethods
│ │ │ ├── PaStiXSupport
│ │ │ ├── PardisoSupport
│ │ │ ├── QR
│ │ │ ├── QtAlignedMalloc
│ │ │ ├── SPQRSupport
│ │ │ ├── SVD
│ │ │ ├── Sparse
│ │ │ ├── SparseCholesky
│ │ │ ├── SparseCore
│ │ │ ├── SparseLU
│ │ │ ├── SparseQR
│ │ │ ├── StdDeque
│ │ │ ├── StdList
│ │ │ ├── StdVector
│ │ │ ├── SuperLUSupport
│ │ │ ├── UmfPackSupport
│ │ │ └── src
│ │ │ │ ├── Cholesky
│ │ │ │ ├── LDLT.h
│ │ │ │ ├── LLT.h
│ │ │ │ └── LLT_LAPACKE.h
│ │ │ │ ├── CholmodSupport
│ │ │ │ └── CholmodSupport.h
│ │ │ │ ├── Core
│ │ │ │ ├── Array.h
│ │ │ │ ├── ArrayBase.h
│ │ │ │ ├── ArrayWrapper.h
│ │ │ │ ├── Assign.h
│ │ │ │ ├── AssignEvaluator.h
│ │ │ │ ├── Assign_MKL.h
│ │ │ │ ├── BandMatrix.h
│ │ │ │ ├── Block.h
│ │ │ │ ├── BooleanRedux.h
│ │ │ │ ├── CommaInitializer.h
│ │ │ │ ├── ConditionEstimator.h
│ │ │ │ ├── CoreEvaluators.h
│ │ │ │ ├── CoreIterators.h
│ │ │ │ ├── CwiseBinaryOp.h
│ │ │ │ ├── CwiseNullaryOp.h
│ │ │ │ ├── CwiseTernaryOp.h
│ │ │ │ ├── CwiseUnaryOp.h
│ │ │ │ ├── CwiseUnaryView.h
│ │ │ │ ├── DenseBase.h
│ │ │ │ ├── DenseCoeffsBase.h
│ │ │ │ ├── DenseStorage.h
│ │ │ │ ├── Diagonal.h
│ │ │ │ ├── DiagonalMatrix.h
│ │ │ │ ├── DiagonalProduct.h
│ │ │ │ ├── Dot.h
│ │ │ │ ├── EigenBase.h
│ │ │ │ ├── ForceAlignedAccess.h
│ │ │ │ ├── Fuzzy.h
│ │ │ │ ├── GeneralProduct.h
│ │ │ │ ├── GenericPacketMath.h
│ │ │ │ ├── GlobalFunctions.h
│ │ │ │ ├── IO.h
│ │ │ │ ├── Inverse.h
│ │ │ │ ├── Map.h
│ │ │ │ ├── MapBase.h
│ │ │ │ ├── MathFunctions.h
│ │ │ │ ├── MathFunctionsImpl.h
│ │ │ │ ├── Matrix.h
│ │ │ │ ├── MatrixBase.h
│ │ │ │ ├── NestByValue.h
│ │ │ │ ├── NoAlias.h
│ │ │ │ ├── NumTraits.h
│ │ │ │ ├── PermutationMatrix.h
│ │ │ │ ├── PlainObjectBase.h
│ │ │ │ ├── Product.h
│ │ │ │ ├── ProductEvaluators.h
│ │ │ │ ├── Random.h
│ │ │ │ ├── Redux.h
│ │ │ │ ├── Ref.h
│ │ │ │ ├── Replicate.h
│ │ │ │ ├── ReturnByValue.h
│ │ │ │ ├── Reverse.h
│ │ │ │ ├── Select.h
│ │ │ │ ├── SelfAdjointView.h
│ │ │ │ ├── SelfCwiseBinaryOp.h
│ │ │ │ ├── Solve.h
│ │ │ │ ├── SolveTriangular.h
│ │ │ │ ├── SolverBase.h
│ │ │ │ ├── StableNorm.h
│ │ │ │ ├── Stride.h
│ │ │ │ ├── Swap.h
│ │ │ │ ├── Transpose.h
│ │ │ │ ├── Transpositions.h
│ │ │ │ ├── TriangularMatrix.h
│ │ │ │ ├── VectorBlock.h
│ │ │ │ ├── VectorwiseOp.h
│ │ │ │ ├── Visitor.h
│ │ │ │ ├── arch
│ │ │ │ │ ├── AVX
│ │ │ │ │ │ ├── Complex.h
│ │ │ │ │ │ ├── MathFunctions.h
│ │ │ │ │ │ ├── PacketMath.h
│ │ │ │ │ │ └── TypeCasting.h
│ │ │ │ │ ├── AVX512
│ │ │ │ │ │ ├── MathFunctions.h
│ │ │ │ │ │ └── PacketMath.h
│ │ │ │ │ ├── AltiVec
│ │ │ │ │ │ ├── Complex.h
│ │ │ │ │ │ ├── MathFunctions.h
│ │ │ │ │ │ └── PacketMath.h
│ │ │ │ │ ├── CUDA
│ │ │ │ │ │ ├── Complex.h
│ │ │ │ │ │ ├── Half.h
│ │ │ │ │ │ ├── MathFunctions.h
│ │ │ │ │ │ ├── PacketMath.h
│ │ │ │ │ │ ├── PacketMathHalf.h
│ │ │ │ │ │ └── TypeCasting.h
│ │ │ │ │ ├── Default
│ │ │ │ │ │ └── Settings.h
│ │ │ │ │ ├── NEON
│ │ │ │ │ │ ├── Complex.h
│ │ │ │ │ │ ├── MathFunctions.h
│ │ │ │ │ │ └── PacketMath.h
│ │ │ │ │ ├── SSE
│ │ │ │ │ │ ├── Complex.h
│ │ │ │ │ │ ├── MathFunctions.h
│ │ │ │ │ │ ├── PacketMath.h
│ │ │ │ │ │ └── TypeCasting.h
│ │ │ │ │ └── ZVector
│ │ │ │ │ │ ├── Complex.h
│ │ │ │ │ │ ├── MathFunctions.h
│ │ │ │ │ │ └── PacketMath.h
│ │ │ │ ├── functors
│ │ │ │ │ ├── AssignmentFunctors.h
│ │ │ │ │ ├── BinaryFunctors.h
│ │ │ │ │ ├── NullaryFunctors.h
│ │ │ │ │ ├── StlFunctors.h
│ │ │ │ │ ├── TernaryFunctors.h
│ │ │ │ │ └── UnaryFunctors.h
│ │ │ │ ├── products
│ │ │ │ │ ├── GeneralBlockPanelKernel.h
│ │ │ │ │ ├── GeneralMatrixMatrix.h
│ │ │ │ │ ├── GeneralMatrixMatrixTriangular.h
│ │ │ │ │ ├── GeneralMatrixMatrixTriangular_BLAS.h
│ │ │ │ │ ├── GeneralMatrixMatrix_BLAS.h
│ │ │ │ │ ├── GeneralMatrixVector.h
│ │ │ │ │ ├── GeneralMatrixVector_BLAS.h
│ │ │ │ │ ├── Parallelizer.h
│ │ │ │ │ ├── SelfadjointMatrixMatrix.h
│ │ │ │ │ ├── SelfadjointMatrixMatrix_BLAS.h
│ │ │ │ │ ├── SelfadjointMatrixVector.h
│ │ │ │ │ ├── SelfadjointMatrixVector_BLAS.h
│ │ │ │ │ ├── SelfadjointProduct.h
│ │ │ │ │ ├── SelfadjointRank2Update.h
│ │ │ │ │ ├── TriangularMatrixMatrix.h
│ │ │ │ │ ├── TriangularMatrixMatrix_BLAS.h
│ │ │ │ │ ├── TriangularMatrixVector.h
│ │ │ │ │ ├── TriangularMatrixVector_BLAS.h
│ │ │ │ │ ├── TriangularSolverMatrix.h
│ │ │ │ │ ├── TriangularSolverMatrix_BLAS.h
│ │ │ │ │ └── TriangularSolverVector.h
│ │ │ │ └── util
│ │ │ │ │ ├── BlasUtil.h
│ │ │ │ │ ├── Constants.h
│ │ │ │ │ ├── DisableStupidWarnings.h
│ │ │ │ │ ├── ForwardDeclarations.h
│ │ │ │ │ ├── MKL_support.h
│ │ │ │ │ ├── Macros.h
│ │ │ │ │ ├── Memory.h
│ │ │ │ │ ├── Meta.h
│ │ │ │ │ ├── NonMPL2.h
│ │ │ │ │ ├── ReenableStupidWarnings.h
│ │ │ │ │ ├── StaticAssert.h
│ │ │ │ │ └── XprHelper.h
│ │ │ │ ├── Eigenvalues
│ │ │ │ ├── ComplexEigenSolver.h
│ │ │ │ ├── ComplexSchur.h
│ │ │ │ ├── ComplexSchur_LAPACKE.h
│ │ │ │ ├── EigenSolver.h
│ │ │ │ ├── GeneralizedEigenSolver.h
│ │ │ │ ├── GeneralizedSelfAdjointEigenSolver.h
│ │ │ │ ├── HessenbergDecomposition.h
│ │ │ │ ├── MatrixBaseEigenvalues.h
│ │ │ │ ├── RealQZ.h
│ │ │ │ ├── RealSchur.h
│ │ │ │ ├── RealSchur_LAPACKE.h
│ │ │ │ ├── SelfAdjointEigenSolver.h
│ │ │ │ ├── SelfAdjointEigenSolver_LAPACKE.h
│ │ │ │ └── Tridiagonalization.h
│ │ │ │ ├── Geometry
│ │ │ │ ├── AlignedBox.h
│ │ │ │ ├── AngleAxis.h
│ │ │ │ ├── EulerAngles.h
│ │ │ │ ├── Homogeneous.h
│ │ │ │ ├── Hyperplane.h
│ │ │ │ ├── OrthoMethods.h
│ │ │ │ ├── ParametrizedLine.h
│ │ │ │ ├── Quaternion.h
│ │ │ │ ├── Rotation2D.h
│ │ │ │ ├── RotationBase.h
│ │ │ │ ├── Scaling.h
│ │ │ │ ├── Transform.h
│ │ │ │ ├── Translation.h
│ │ │ │ ├── Umeyama.h
│ │ │ │ └── arch
│ │ │ │ │ └── Geometry_SSE.h
│ │ │ │ ├── Householder
│ │ │ │ ├── BlockHouseholder.h
│ │ │ │ ├── Householder.h
│ │ │ │ └── HouseholderSequence.h
│ │ │ │ ├── IterativeLinearSolvers
│ │ │ │ ├── BasicPreconditioners.h
│ │ │ │ ├── BiCGSTAB.h
│ │ │ │ ├── ConjugateGradient.h
│ │ │ │ ├── IncompleteCholesky.h
│ │ │ │ ├── IncompleteLUT.h
│ │ │ │ ├── IterativeSolverBase.h
│ │ │ │ ├── LeastSquareConjugateGradient.h
│ │ │ │ └── SolveWithGuess.h
│ │ │ │ ├── Jacobi
│ │ │ │ └── Jacobi.h
│ │ │ │ ├── LU
│ │ │ │ ├── Determinant.h
│ │ │ │ ├── FullPivLU.h
│ │ │ │ ├── InverseImpl.h
│ │ │ │ ├── PartialPivLU.h
│ │ │ │ ├── PartialPivLU_LAPACKE.h
│ │ │ │ └── arch
│ │ │ │ │ └── Inverse_SSE.h
│ │ │ │ ├── MetisSupport
│ │ │ │ └── MetisSupport.h
│ │ │ │ ├── OrderingMethods
│ │ │ │ ├── Amd.h
│ │ │ │ ├── Eigen_Colamd.h
│ │ │ │ └── Ordering.h
│ │ │ │ ├── PaStiXSupport
│ │ │ │ └── PaStiXSupport.h
│ │ │ │ ├── PardisoSupport
│ │ │ │ └── PardisoSupport.h
│ │ │ │ ├── QR
│ │ │ │ ├── ColPivHouseholderQR.h
│ │ │ │ ├── ColPivHouseholderQR_LAPACKE.h
│ │ │ │ ├── CompleteOrthogonalDecomposition.h
│ │ │ │ ├── FullPivHouseholderQR.h
│ │ │ │ ├── HouseholderQR.h
│ │ │ │ └── HouseholderQR_LAPACKE.h
│ │ │ │ ├── SPQRSupport
│ │ │ │ └── SuiteSparseQRSupport.h
│ │ │ │ ├── SVD
│ │ │ │ ├── BDCSVD.h
│ │ │ │ ├── JacobiSVD.h
│ │ │ │ ├── JacobiSVD_LAPACKE.h
│ │ │ │ ├── SVDBase.h
│ │ │ │ └── UpperBidiagonalization.h
│ │ │ │ ├── SparseCholesky
│ │ │ │ ├── SimplicialCholesky.h
│ │ │ │ └── SimplicialCholesky_impl.h
│ │ │ │ ├── SparseCore
│ │ │ │ ├── AmbiVector.h
│ │ │ │ ├── CompressedStorage.h
│ │ │ │ ├── ConservativeSparseSparseProduct.h
│ │ │ │ ├── MappedSparseMatrix.h
│ │ │ │ ├── SparseAssign.h
│ │ │ │ ├── SparseBlock.h
│ │ │ │ ├── SparseColEtree.h
│ │ │ │ ├── SparseCompressedBase.h
│ │ │ │ ├── SparseCwiseBinaryOp.h
│ │ │ │ ├── SparseCwiseUnaryOp.h
│ │ │ │ ├── SparseDenseProduct.h
│ │ │ │ ├── SparseDiagonalProduct.h
│ │ │ │ ├── SparseDot.h
│ │ │ │ ├── SparseFuzzy.h
│ │ │ │ ├── SparseMap.h
│ │ │ │ ├── SparseMatrix.h
│ │ │ │ ├── SparseMatrixBase.h
│ │ │ │ ├── SparsePermutation.h
│ │ │ │ ├── SparseProduct.h
│ │ │ │ ├── SparseRedux.h
│ │ │ │ ├── SparseRef.h
│ │ │ │ ├── SparseSelfAdjointView.h
│ │ │ │ ├── SparseSolverBase.h
│ │ │ │ ├── SparseSparseProductWithPruning.h
│ │ │ │ ├── SparseTranspose.h
│ │ │ │ ├── SparseTriangularView.h
│ │ │ │ ├── SparseUtil.h
│ │ │ │ ├── SparseVector.h
│ │ │ │ ├── SparseView.h
│ │ │ │ └── TriangularSolver.h
│ │ │ │ ├── SparseLU
│ │ │ │ ├── SparseLU.h
│ │ │ │ ├── SparseLUImpl.h
│ │ │ │ ├── SparseLU_Memory.h
│ │ │ │ ├── SparseLU_Structs.h
│ │ │ │ ├── SparseLU_SupernodalMatrix.h
│ │ │ │ ├── SparseLU_Utils.h
│ │ │ │ ├── SparseLU_column_bmod.h
│ │ │ │ ├── SparseLU_column_dfs.h
│ │ │ │ ├── SparseLU_copy_to_ucol.h
│ │ │ │ ├── SparseLU_gemm_kernel.h
│ │ │ │ ├── SparseLU_heap_relax_snode.h
│ │ │ │ ├── SparseLU_kernel_bmod.h
│ │ │ │ ├── SparseLU_panel_bmod.h
│ │ │ │ ├── SparseLU_panel_dfs.h
│ │ │ │ ├── SparseLU_pivotL.h
│ │ │ │ ├── SparseLU_pruneL.h
│ │ │ │ └── SparseLU_relax_snode.h
│ │ │ │ ├── SparseQR
│ │ │ │ └── SparseQR.h
│ │ │ │ ├── StlSupport
│ │ │ │ ├── StdDeque.h
│ │ │ │ ├── StdList.h
│ │ │ │ ├── StdVector.h
│ │ │ │ └── details.h
│ │ │ │ ├── SuperLUSupport
│ │ │ │ └── SuperLUSupport.h
│ │ │ │ ├── UmfPackSupport
│ │ │ │ └── UmfPackSupport.h
│ │ │ │ ├── misc
│ │ │ │ ├── Image.h
│ │ │ │ ├── Kernel.h
│ │ │ │ ├── RealSvd2x2.h
│ │ │ │ ├── blas.h
│ │ │ │ ├── lapack.h
│ │ │ │ ├── lapacke.h
│ │ │ │ └── lapacke_mangling.h
│ │ │ │ └── plugins
│ │ │ │ ├── ArrayCwiseBinaryOps.h
│ │ │ │ ├── ArrayCwiseUnaryOps.h
│ │ │ │ ├── BlockMethods.h
│ │ │ │ ├── CommonCwiseBinaryOps.h
│ │ │ │ ├── CommonCwiseUnaryOps.h
│ │ │ │ ├── MatrixCwiseBinaryOps.h
│ │ │ │ └── MatrixCwiseUnaryOps.h
│ │ │ ├── auto_download.cpp
│ │ │ ├── deepsort.cpp
│ │ │ ├── deepsort.hpp
│ │ │ ├── pybind11.hpp
│ │ │ ├── zmq_remote_show.cpp
│ │ │ ├── zmq_remote_show.hpp
│ │ │ ├── zmq_u.cpp
│ │ │ └── zmq_u.hpp
│ ├── main.cpp
│ └── tensorRT
│ │ ├── builder
│ │ ├── trt_builder.cpp
│ │ └── trt_builder.hpp
│ │ ├── common
│ │ ├── cuda_tools.cpp
│ │ ├── cuda_tools.hpp
│ │ ├── ilogger.cpp
│ │ ├── ilogger.hpp
│ │ ├── infer_controller.hpp
│ │ ├── json.cpp
│ │ ├── json.hpp
│ │ ├── monopoly_allocator.hpp
│ │ ├── preprocess_kernel.cu
│ │ ├── preprocess_kernel.cuh
│ │ ├── trt_tensor.cpp
│ │ └── trt_tensor.hpp
│ │ ├── import_lib.cpp
│ │ ├── infer
│ │ ├── trt_infer.cpp
│ │ └── trt_infer.hpp
│ │ ├── onnx
│ │ ├── onnx-ml.pb.cpp
│ │ ├── onnx-ml.pb.h
│ │ ├── onnx-operators-ml.pb.cpp
│ │ ├── onnx-operators-ml.pb.h
│ │ ├── onnx_pb.h
│ │ ├── onnxifi.h
│ │ └── readme.md
│ │ ├── onnx_parser
│ │ ├── ImporterContext.hpp
│ │ ├── LoopHelpers.cpp
│ │ ├── LoopHelpers.hpp
│ │ ├── ModelImporter.cpp
│ │ ├── ModelImporter.hpp
│ │ ├── NvOnnxParser.cpp
│ │ ├── NvOnnxParser.h
│ │ ├── OnnxAttrs.cpp
│ │ ├── OnnxAttrs.hpp
│ │ ├── RNNHelpers.cpp
│ │ ├── RNNHelpers.hpp
│ │ ├── ShapeTensor.cpp
│ │ ├── ShapeTensor.hpp
│ │ ├── ShapedWeights.cpp
│ │ ├── ShapedWeights.hpp
│ │ ├── Status.hpp
│ │ ├── TensorOrWeights.hpp
│ │ ├── builtin_op_importers.cpp
│ │ ├── builtin_op_importers.hpp
│ │ ├── onnx2trt.hpp
│ │ ├── onnx2trt_common.hpp
│ │ ├── onnx2trt_runtime.hpp
│ │ ├── onnx2trt_utils.cpp
│ │ ├── onnx2trt_utils.hpp
│ │ ├── onnxErrorRecorder.cpp
│ │ ├── onnxErrorRecorder.hpp
│ │ ├── onnx_utils.hpp
│ │ ├── readme.md
│ │ ├── toposort.hpp
│ │ ├── trt_utils.hpp
│ │ └── utils.hpp
│ │ └── onnxplugin
│ │ ├── onnxplugin.cpp
│ │ ├── onnxplugin.hpp
│ │ ├── plugin_binary_io.cpp
│ │ ├── plugin_binary_io.hpp
│ │ └── plugins
│ │ ├── DCNv2.cu
│ │ ├── HSigmoid.cu
│ │ └── HSwish.cu
├── tools
│ └── show.py
├── tutorial
│ ├── 1.0framework
│ │ ├── README.md
│ │ ├── app_yolo.cpp
│ │ ├── infer_controller.hpp
│ │ ├── inference
│ │ │ ├── car.jpg
│ │ │ ├── f3_0.jpg
│ │ │ ├── f3_2.jpg
│ │ │ ├── f3_21.jpg
│ │ │ ├── f3_210.jpg
│ │ │ ├── f3_211.jpg
│ │ │ ├── f3_212.jpg
│ │ │ ├── f3_213.jpg
│ │ │ ├── f3_214.jpg
│ │ │ ├── f3_215.jpg
│ │ │ ├── f3_216.jpg
│ │ │ ├── f3_217.jpg
│ │ │ ├── f3_218.jpg
│ │ │ ├── f3_219.jpg
│ │ │ ├── f3_22.jpg
│ │ │ ├── f3_220.jpg
│ │ │ ├── f3_221.jpg
│ │ │ ├── f3_222.jpg
│ │ │ ├── f3_223.jpg
│ │ │ ├── f3_224.jpg
│ │ │ ├── f3_225.jpg
│ │ │ ├── f3_226.jpg
│ │ │ ├── f3_227.jpg
│ │ │ ├── f3_228.jpg
│ │ │ ├── f3_229.jpg
│ │ │ ├── f3_27.jpg
│ │ │ ├── f3_270.jpg
│ │ │ ├── f3_271.jpg
│ │ │ ├── f3_272.jpg
│ │ │ ├── f3_273.jpg
│ │ │ ├── f3_274.jpg
│ │ │ ├── f3_275.jpg
│ │ │ ├── f3_276.jpg
│ │ │ ├── f3_277.jpg
│ │ │ ├── f3_278.jpg
│ │ │ ├── f3_279.jpg
│ │ │ ├── f3_29.jpg
│ │ │ ├── f3_290.jpg
│ │ │ ├── f3_291.jpg
│ │ │ ├── f3_292.jpg
│ │ │ ├── f3_293.jpg
│ │ │ ├── f3_294.jpg
│ │ │ ├── f3_295.jpg
│ │ │ ├── f3_296.jpg
│ │ │ ├── f3_297.jpg
│ │ │ ├── f3_298.jpg
│ │ │ ├── f3_299.jpg
│ │ │ ├── gril.jpg
│ │ │ ├── group.jpg
│ │ │ ├── show.jpg
│ │ │ ├── torch_affine.jpg
│ │ │ ├── trt_affine.jpg
│ │ │ ├── yq.jpg
│ │ │ ├── zand.jpg
│ │ │ ├── zand_copy.jpg
│ │ │ └── zgjr.jpg
│ │ ├── pipeline.goodnotes
│ │ ├── pipeline.jpg
│ │ ├── pipeline.pdf
│ │ ├── yolo.cpp
│ │ ├── yolo.hpp
│ │ └── yolo_decode.cu
│ └── 2.0CenterNet_from_torch_trt
│ │ ├── 0_to_1_python_to_cuda
│ │ ├── cpp_cuda_centernet
│ │ │ ├── .vscode
│ │ │ │ ├── c_cpp_properties.json
│ │ │ │ ├── launch.json
│ │ │ │ ├── settings.json
│ │ │ │ └── tasks.json
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── src
│ │ │ │ ├── app_centernet.cpp
│ │ │ │ ├── centernet.cpp
│ │ │ │ ├── centernet.hpp
│ │ │ │ ├── centernet_decode.cu
│ │ │ │ ├── comments.jpg
│ │ │ │ ├── cpp_utils.hpp
│ │ │ │ ├── cuda_nms.jpg
│ │ │ │ ├── cuda_utils.cuh
│ │ │ │ └── main.cpp
│ │ │ └── workspace
│ │ │ │ ├── affined_result_cpp.jpg
│ │ │ │ ├── car.jpg
│ │ │ │ ├── check_preprocessed.ipynb
│ │ │ │ ├── final_result_cpp.jpg
│ │ │ │ ├── final_result_cuda.jpg
│ │ │ │ └── test.jpg
│ │ ├── model_output
│ │ ├── pre_post_processing.ipynb
│ │ └── truck_and_person.jpg
│ │ ├── 5_outputs.jpg
│ │ ├── DCNmodule_res18.png
│ │ ├── README.md
│ │ ├── README.zh-cn.md
│ │ ├── all_in_one_output.jpg
│ │ ├── comments.jpg
│ │ ├── cuda_nms.jpg
│ │ ├── dcn_v2.py
│ │ ├── demo.py
│ │ ├── export2onnx.py
│ │ └── resnet_dcn.py
└── workspace
│ ├── cvt_dbface.py
│ ├── exp
│ ├── face_tracker.mp4
│ └── fall_video.mp4
│ ├── face
│ ├── library
│ │ └── 2ys2.jpg
│ └── recognize
│ │ ├── 2ys1.jpg
│ │ ├── 2ys3.jpg
│ │ └── 2ys5.jpg
│ ├── inference
│ ├── car.jpg
│ ├── cat.jpg
│ ├── dog_cat.png
│ ├── gril.jpg
│ ├── group.jpg
│ ├── yq.jpg
│ ├── zand.jpg
│ └── zgjr.jpg
│ ├── inference_fasterrcnn
│ ├── car.jpg
│ ├── cat.jpg
│ ├── dog_cat.png
│ ├── gril.jpg
│ ├── group.jpg
│ ├── yq.jpg
│ ├── zand.jpg
│ └── zgjr.jpg
│ ├── labels.imagenet.txt
│ ├── lesson1.py
│ ├── makesure_input.py
│ ├── my-yoloxs-car.jpg
│ ├── perf.result.std.log
│ ├── pose.show.jpg
│ ├── python_test.ipynb
│ ├── test.jpg
│ ├── test_dcnv2.py
│ ├── test_hswish.py
│ ├── wget.exe
│ └── yq.jpg
└── test
├── car.jpg
├── x01export_FasterRCNN_onnx.py
├── x02test_FasterRCNN_onnx.py
├── x03extract_RPN.py
├── x04testRPNonnx.py
├── x05extract_ROIHeader.py
├── x06reduceRpnOnnx.py
├── x07reduce_header_onnx.py
└── x08test_header_onnx.py
/fpn.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/fpn.pdf
--------------------------------------------------------------------------------
/model/__init__.py:
--------------------------------------------------------------------------------
1 | from .faster_rcnn import *
2 |
--------------------------------------------------------------------------------
/model/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/anchor_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/anchor_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/backbone_utils.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/backbone_utils.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/faster_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/faster_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/feature_pyramid_network.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/feature_pyramid_network.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/generalized_rcnn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/generalized_rcnn.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/image_list.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/image_list.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/roi_heads.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/roi_heads.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/rpn.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/rpn.cpython-37.pyc
--------------------------------------------------------------------------------
/model/__pycache__/transform.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/__pycache__/transform.cpython-37.pyc
--------------------------------------------------------------------------------
/model/image_list.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | import torch
3 | from torch.jit.annotations import List, Tuple
4 | from torch import Tensor
5 |
6 |
7 | class ImageList(object):
8 | """
9 | Structure that holds a list of images (of possibly
10 | varying sizes) as a single tensor.
11 | This works by padding the images to the same size,
12 | and storing in a field the original sizes of each image
13 | """
14 |
15 | def __init__(self, tensors, image_sizes):
16 | # type: (Tensor, List[Tuple[int, int]]) -> None
17 | """
18 | Arguments:
19 | tensors (tensor)
20 | image_sizes (list[tuple[int, int]])
21 | """
22 | self.tensors = tensors
23 | self.image_sizes = image_sizes
24 |
25 | def to(self, device):
26 | # type: (Device) -> ImageList # noqa
27 | cast_tensor = self.tensors.to(device)
28 | return ImageList(cast_tensor, self.image_sizes)
29 |
--------------------------------------------------------------------------------
/model/ops/__pycache__/poolers.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/model/ops/__pycache__/poolers.cpython-37.pyc
--------------------------------------------------------------------------------
/readme.assets/image-20211216112059762.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/readme.assets/image-20211216112059762.png
--------------------------------------------------------------------------------
/readme.assets/image-20211216112509835.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/readme.assets/image-20211216112509835.png
--------------------------------------------------------------------------------
/readme.assets/image-20211216112526108.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/readme.assets/image-20211216112526108.png
--------------------------------------------------------------------------------
/tensorrt_code/.gitignore:
--------------------------------------------------------------------------------
1 | #
2 | tutorial/2.0CenterNet_from_torch_trt/0_to_1_python_to_cuda/cpp_cuda_centernet/src/tensorRT
3 | tutorial/2.0CenterNet_from_torch_trt/0_to_1_python_to_cuda/cpp_cuda_centernet/objs
4 | tutorial/2.0CenterNet_from_torch_trt/0_to_1_python_to_cuda/cpp_cuda_centernet/workspace/pro
5 |
6 | # compressed files
7 | *.tar.gz
8 | *.zip
9 |
10 | # temp tensor and data
11 | *.tensor
12 | *.data
13 |
14 |
15 | # Prerequisites
16 | *.d
17 |
18 | # Compiled Object files
19 | *.slo
20 | *.lo
21 | *.o
22 | *.obj
23 |
24 | # Precompiled Headers
25 | *.gch
26 | *.pch
27 |
28 | # Compiled Dynamic libraries
29 | *.so
30 | *.dylib
31 | *.dll
32 |
33 | # Fortran module files
34 | *.mod
35 | *.smod
36 |
37 | # Compiled Static libraries
38 | *.lai
39 | *.la
40 | *.a
41 | *.lib
42 |
43 | # Executables
44 | *.exe
45 | *.out
46 | *.app
47 |
48 | /objs
49 |
50 | *.trtmodel
51 | *.onnx
52 | /workspace/pro
53 | /build
54 | /workspace/*.avi
55 | /workspace/.ipynb_checkpoints
56 | /workspace/*_result
57 | /workspace/face/library_draw
58 | /workspace/face/result
59 | /workspace/face/library/laq.jpg
60 | __pycache__
61 | /tools/process_so.sh
62 | /tools/proc2.sh
63 | /python/trtpy.egg-info
64 | /python/dist
65 | /python/build
66 | /workspace/formtest.ipynb
67 | /workspace/meta.json
68 | /.vs
69 | *.pyd
70 | *.zip
71 | *.pdb
72 | *.ilk
73 | *.lib
74 | *.exp
75 |
76 | /lean/cuda10.1
77 | /lean/cudnn8.2.2.26
78 | /lean/opencv3.4.6
79 | /lean/protobuf3.11.4
80 | /lean/TensorRT-8.0.1.6
81 |
82 | __pycache__
83 |
84 | !/workspace/wget.exe
85 | /workspace/*.mp4
86 | /workspace/single_inference
87 | /workspace/exp/tracker.final.mp4
88 | /workspace/perf.result.log
89 |
--------------------------------------------------------------------------------
/tensorrt_code/TensorRT.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio 15
4 | VisualStudioVersion = 15.0.28307.136
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "TensorRT", "TensorRT.vcxproj", "{FBF775F5-DAB4-4BC1-97A9-D36301073438}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|x64 = Debug|x64
11 | Python|x64 = Python|x64
12 | Release|x64 = Release|x64
13 | EndGlobalSection
14 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
15 | {FBF775F5-DAB4-4BC1-97A9-D36301073438}.Debug|x64.ActiveCfg = Debug|x64
16 | {FBF775F5-DAB4-4BC1-97A9-D36301073438}.Debug|x64.Build.0 = Debug|x64
17 | {FBF775F5-DAB4-4BC1-97A9-D36301073438}.Python|x64.ActiveCfg = Python|x64
18 | {FBF775F5-DAB4-4BC1-97A9-D36301073438}.Python|x64.Build.0 = Python|x64
19 | {FBF775F5-DAB4-4BC1-97A9-D36301073438}.Release|x64.ActiveCfg = Release|x64
20 | {FBF775F5-DAB4-4BC1-97A9-D36301073438}.Release|x64.Build.0 = Release|x64
21 | EndGlobalSection
22 | GlobalSection(SolutionProperties) = preSolution
23 | HideSolutionNode = FALSE
24 | EndGlobalSection
25 | GlobalSection(ExtensibilityGlobals) = postSolution
26 | SolutionGuid = {679F35F0-20AA-4D18-8610-D369E2BE97E8}
27 | EndGlobalSection
28 | EndGlobal
29 |
--------------------------------------------------------------------------------
/tensorrt_code/TensorRT.vcxproj.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | workspace
5 | PATH=$(projectDir)lean/cuda10.1/bin;$(projectDir)lean/opencv3.4.6/lib;$(projectDir)lean/cudnn8.2.2.26;$(projectDir)lean/TensorRT-8.0.1.6/lib
6 | WindowsLocalDebugger
7 | true
8 |
9 |
10 | workspace
11 | PATH=$(projectDir)lean/cuda10.1/bin;$(projectDir)lean/opencv3.4.6/lib;$(projectDir)lean/cudnn8.2.2.26;$(projectDir)lean/TensorRT-8.0.1.6/lib
12 | WindowsLocalDebugger
13 | true
14 |
15 |
16 | WindowsLocalDebugger
17 | PATH=$(projectDir)lean/cuda10.1/bin;$(projectDir)lean/opencv3.4.6/lib;$(projectDir)lean/cudnn8.2.2.26;$(projectDir)lean/TensorRT-8.0.1.6/lib
18 | workspace
19 |
20 |
--------------------------------------------------------------------------------
/tensorrt_code/YoloTRT_speed.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thb1314/tensorrt-onnx-fasterrcnn-fpn-roialign/b3b8fc8f5c6bf60946ed5e14520a073611cd8aa0/tensorrt_code/YoloTRT_speed.xlsx
--------------------------------------------------------------------------------
/tensorrt_code/dll_export.def:
--------------------------------------------------------------------------------
1 | EXPORTS
2 | PyInit_libtrtpyc
--------------------------------------------------------------------------------
/tensorrt_code/onnx/make_pb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 请修改protoc为你要使用的版本protoc
4 | protoc=/data/sxai/lean/protobuf3.11.4/bin/protoc
5 | #protoc=/data/sxai/temp/protobuf-build3.18.x/bin/protoc
6 |
7 | echo Create directory "pbout"
8 | rm -rf pbout
9 | mkdir -p pbout
10 |
11 | $protoc onnx-ml.proto --cpp_out=pbout
12 | $protoc onnx-operators-ml.proto --cpp_out=pbout
13 |
14 | echo Copy pbout/onnx-ml.pb.cc to ../src/tensorRT/onnx/onnx-ml.pb.cpp
15 | cp pbout/onnx-ml.pb.cc ../src/tensorRT/onnx/onnx-ml.pb.cpp
16 |
17 | echo Copy pbout/onnx-operators-ml.pb.cc to ../src/tensorRT/onnx/onnx-operators-ml.pb.cpp
18 | cp pbout/onnx-operators-ml.pb.cc ../src/tensorRT/onnx/onnx-operators-ml.pb.cpp
19 |
20 | echo Copy pbout/onnx-ml.pb.h to ../src/tensorRT/onnx/onnx-ml.pb.h
21 | cp pbout/onnx-ml.pb.h ../src/tensorRT/onnx/onnx-ml.pb.h
22 |
23 | echo Copy pbout/onnx-operators-ml.pb.h to ../src/tensorRT/onnx/onnx-operators-ml.pb.h
24 | cp pbout/onnx-operators-ml.pb.h ../src/tensorRT/onnx/onnx-operators-ml.pb.h
25 |
26 | echo Remove directory "pbout"
27 | rm -rf pbout
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_7.x/LoopHelpers.cpp:
--------------------------------------------------------------------------------
1 | #include "LoopHelpers.hpp"
2 | #include "onnx2trt_utils.hpp"
3 |
4 | namespace onnx2trt
5 | {
6 |
7 | nvinfer1::ITensor* addLoopCounter(IImporterContext* ctx, nvinfer1::ILoop* loop, int32_t initial)
8 | {
9 | nvinfer1::ITensor* initialTensor = addConstantScalar(ctx, initial, ::onnx::TensorProto::INT32, nvinfer1::Dims{1, 1})->getOutput(0);
10 | nvinfer1::ITensor* one = addConstantScalar(ctx, 1, ::onnx::TensorProto::INT32, nvinfer1::Dims{1, 1})->getOutput(0);
11 |
12 | auto counter = loop->addRecurrence(*initialTensor);
13 | nvinfer1::ITensor* addOne = ctx->network()->addElementWise(*counter->getOutput(0), *one, nvinfer1::ElementWiseOperation::kSUM)->getOutput(0);
14 | counter->setInput(1, *addOne);
15 | return counter->getOutput(0);
16 | }
17 |
18 | } // namespace onnx2trt
19 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_7.x/LoopHelpers.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 |
5 | #include "ImporterContext.hpp"
6 |
7 | namespace onnx2trt
8 | {
9 |
10 | nvinfer1::ITensor* addLoopCounter(IImporterContext* ctx, nvinfer1::ILoop* loop, int32_t initial = 0);
11 |
12 | } // namespace onnx2trt
13 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_7.x/NvOnnxParser.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include "NvOnnxParser.h"
24 | #include "ModelImporter.hpp"
25 |
26 | extern "C" void* createNvOnnxParser_INTERNAL(void* network_, void* logger_, int version, const std::vector& input_dims)
27 | {
28 | auto network = static_cast(network_);
29 | auto logger = static_cast(logger_);
30 | return new onnx2trt::ModelImporter(network, logger, input_dims);
31 | }
32 |
33 | extern "C" int getNvOnnxParserVersion()
34 | {
35 | return NV_ONNX_PARSER_VERSION;
36 | }
37 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_7.x/RNNHelpers.hpp:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 |
7 | #include "TensorOrWeights.hpp"
8 | #include "ImporterContext.hpp"
9 |
10 | namespace onnx2trt
11 | {
12 |
13 | nvinfer1::ITensor* addRNNInput(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, std::vector& inputs, const std::string& direction);
14 |
15 | // Zeros out invalid timesteps in toMask. maxLen must be provided if reverse is true
16 | nvinfer1::ITensor* clearMissingSequenceElements(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* toMask, nvinfer1::ITensor* maxLen, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
17 |
18 | // Returns a bool tensor which is true during valid timesteps
19 | nvinfer1::ITensor* getRaggedMask(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* maxLen = nullptr, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
20 |
21 | // Selects between prevH and Ht to forward previous hidden state through invalid timesteps
22 | nvinfer1::ITensor* maskRNNHidden(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* prevH, nvinfer1::ITensor* Ht, nvinfer1::ITensor* maxLen = nullptr, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
23 |
24 | // Splits a bidirectional hidden state into forward and reverse passes, masks each using maskRNNHidden, then concatenates
25 | nvinfer1::ITensor* maskBidirRNNHidden(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* maxLen, nvinfer1::ITensor* Ht1, nvinfer1::ITensor* Ht, nvinfer1::ITensor* singlePassShape);
26 |
27 | } // namespace onnx2trt
28 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_7.x/ShapedWeights.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 |
28 | namespace onnx2trt
29 | {
30 |
31 | class ShapedWeights
32 | {
33 | public:
34 | using DataType = int32_t;
35 | DataType type;
36 | void* values;
37 | nvinfer1::Dims shape;
38 | const char* name = nullptr;
39 | static ShapedWeights empty(DataType type);
40 | ShapedWeights();
41 | explicit ShapedWeights(DataType type, void* values, nvinfer1::Dims shape_);
42 | size_t count() const;
43 | size_t size_bytes() const;
44 | const char* getName() const;
45 | void setName(const char* name);
46 | explicit operator bool() const;
47 | operator nvinfer1::Weights() const;
48 | };
49 |
50 | bool transposeWeights(ShapedWeights const& weights, nvinfer1::Permutation const& perm, ShapedWeights* result);
51 |
52 | } // namespace onnx2trt
53 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_7.x/builtin_op_importers.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #pragma once
24 |
25 | #include "onnx2trt.hpp"
26 | #include "utils.hpp"
27 |
28 | namespace onnx2trt
29 | {
30 |
31 | string_map& getBuiltinOpImporterMap();
32 |
33 | } // namespace onnx2trt
34 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_7.x/onnx2trt_runtime.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #pragma once
24 |
25 | #include "onnx2trt_common.hpp"
26 |
27 | namespace onnx2trt
28 | {
29 |
30 | typedef Plugin* (*plugin_deserializer)(const void* serialData, size_t serialLength);
31 |
32 | } // namespace onnx2trt
33 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_7.x/utils.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 |
27 | template
28 | using string_map = std::unordered_map;
29 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/LoopHelpers.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #include "LoopHelpers.hpp"
6 | #include "onnx2trt_utils.hpp"
7 |
8 | namespace onnx2trt
9 | {
10 |
11 | nvinfer1::ITensor* addLoopCounter(IImporterContext* ctx, nvinfer1::ILoop* loop, int32_t initial)
12 | {
13 | nvinfer1::ITensor* initialTensor = addConstantScalar(ctx, initial, ::onnx::TensorProto::INT32, nvinfer1::Dims{1, 1})->getOutput(0);
14 | nvinfer1::ITensor* one = addConstantScalar(ctx, 1, ::onnx::TensorProto::INT32, nvinfer1::Dims{1, 1})->getOutput(0);
15 |
16 | auto counter = loop->addRecurrence(*initialTensor);
17 | nvinfer1::ITensor* addOne = ctx->network()->addElementWise(*counter->getOutput(0), *one, nvinfer1::ElementWiseOperation::kSUM)->getOutput(0);
18 | counter->setInput(1, *addOne);
19 | return counter->getOutput(0);
20 | }
21 |
22 | } // namespace onnx2trt
23 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/LoopHelpers.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include
8 |
9 | #include "ImporterContext.hpp"
10 |
11 | namespace onnx2trt
12 | {
13 |
14 | nvinfer1::ITensor* addLoopCounter(IImporterContext* ctx, nvinfer1::ILoop* loop, int32_t initial = 0);
15 |
16 | } // namespace onnx2trt
17 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/NvOnnxParser.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #include "NvOnnxParser.h"
6 | #include "ModelImporter.hpp"
7 |
8 | extern "C" void* createNvOnnxParser_INTERNAL(void* network_, void* logger_, int version, const std::vector& input_dims)
9 | {
10 | auto network = static_cast(network_);
11 | auto logger = static_cast(logger_);
12 | return new onnx2trt::ModelImporter(network, logger, input_dims);
13 | }
14 |
15 | extern "C" int getNvOnnxParserVersion()
16 | {
17 | return NV_ONNX_PARSER_VERSION;
18 | }
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/OnnxAttrs.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | #include "ImporterContext.hpp"
13 |
14 | class OnnxAttrs
15 | {
16 | template
17 | using string_map = std::unordered_map;
18 | typedef string_map<::onnx::AttributeProto const*> AttrMap;
19 | AttrMap _attrs;
20 | onnx2trt::IImporterContext* mCtx;
21 |
22 | public:
23 | explicit OnnxAttrs(::onnx::NodeProto const& onnx_node, onnx2trt::IImporterContext* ctx)
24 | : mCtx{ctx}
25 | {
26 | for (auto const& attr : onnx_node.attribute())
27 | {
28 | _attrs.insert({attr.name(), &attr});
29 | }
30 | }
31 |
32 | bool count(const std::string& key) const
33 | {
34 | return _attrs.count(key);
35 | }
36 |
37 | ::onnx::AttributeProto const* at(std::string key) const
38 | {
39 | if (!_attrs.count(key))
40 | {
41 | throw std::out_of_range("Attribute not found: " + key);
42 | }
43 | return _attrs.at(key);
44 | }
45 |
46 | ::onnx::AttributeProto::AttributeType type(const std::string& key) const
47 | {
48 | return this->at(key)->type();
49 | }
50 |
51 |
52 | template
53 | T get(const std::string& key) const;
54 |
55 | template
56 | T get(const std::string& key, T const& default_value) const
57 | {
58 | return _attrs.count(key) ? this->get(key) : default_value;
59 | }
60 | };
61 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/RNNHelpers.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include
8 | #include
9 | #include
10 |
11 | #include "TensorOrWeights.hpp"
12 | #include "ImporterContext.hpp"
13 |
14 | namespace onnx2trt
15 | {
16 |
17 | nvinfer1::ITensor* addRNNInput(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, std::vector& inputs, const std::string& direction);
18 |
19 | // Zeros out invalid timesteps in toMask. maxLen must be provided if reverse is true
20 | nvinfer1::ITensor* clearMissingSequenceElements(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* toMask, nvinfer1::ITensor* maxLen, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
21 |
22 | // Returns a bool tensor which is true during valid timesteps
23 | nvinfer1::ITensor* getRaggedMask(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* maxLen = nullptr, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
24 |
25 | // Selects between prevH and Ht to forward previous hidden state through invalid timesteps
26 | nvinfer1::ITensor* maskRNNHidden(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* prevH, nvinfer1::ITensor* Ht, nvinfer1::ITensor* maxLen = nullptr, bool reverse = false, nvinfer1::ITensor* counter = nullptr);
27 |
28 | // Splits a bidirectional hidden state into forward and reverse passes, masks each using maskRNNHidden, then concatenates
29 | nvinfer1::ITensor* maskBidirRNNHidden(IImporterContext* ctx, const ::onnx::NodeProto& node, nvinfer1::ILoop* loop, nvinfer1::ITensor* seqLens, nvinfer1::ITensor* maxLen, nvinfer1::ITensor* Ht1, nvinfer1::ITensor* Ht, nvinfer1::ITensor* singlePassShape);
30 |
31 | } // namespace onnx2trt
32 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/ShapedWeights.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include
8 | #include
9 |
10 | namespace onnx2trt
11 | {
12 |
13 | class ShapedWeights
14 | {
15 | public:
16 | using DataType = int32_t;
17 |
18 | static ShapedWeights empty(DataType type);
19 |
20 | ShapedWeights();
21 |
22 | explicit ShapedWeights(DataType type, void* values, nvinfer1::Dims shape_);
23 |
24 | size_t count() const;
25 |
26 | size_t size_bytes() const;
27 |
28 | const char* getName() const;
29 |
30 | void setName(const char* name);
31 |
32 | explicit operator bool() const;
33 |
34 | operator nvinfer1::Weights() const;
35 |
36 | template
37 | T& at(size_t index)
38 | {
39 | assert(index >= 0 && (index * sizeof(T)) < size_bytes());
40 | return static_cast(values)[index];
41 | }
42 |
43 | template
44 | const T& at(size_t index) const
45 | {
46 | assert(index >= 0 && (index * sizeof(T)) < size_bytes());
47 | return static_cast(values)[index];
48 | }
49 |
50 | public:
51 | DataType type;
52 | void* values;
53 | nvinfer1::Dims shape;
54 | const char* name{};
55 | };
56 |
57 | class IImporterContext;
58 | bool transposeWeights(ShapedWeights const& weights, nvinfer1::Permutation const& perm, ShapedWeights* result, IImporterContext* ctx);
59 |
60 | } // namespace onnx2trt
61 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/builtin_op_importers.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include "onnx2trt.hpp"
8 | #include "utils.hpp"
9 |
10 | namespace onnx2trt
11 | {
12 |
13 | string_map& getBuiltinOpImporterMap();
14 |
15 | } // namespace onnx2trt
16 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/onnx2trt.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include "NvOnnxParser.h"
8 | #include "ShapedWeights.hpp"
9 | #include "Status.hpp"
10 | #include "TensorOrWeights.hpp"
11 |
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 |
20 | namespace onnx2trt
21 | {
22 |
23 | class IImporterContext;
24 |
25 | // TODO: Find ABI-safe alternative approach for this:
26 | // Can't use std::vector
27 | // Can't use ::onnx::NodeProto
28 | // Can't use std::function
29 | typedef ValueOrStatus> NodeImportResult;
30 | typedef std::function& inputs)>
32 | NodeImporter;
33 |
34 | template
35 | using StringMap = std::unordered_map;
36 |
37 | class IImporterContext
38 | {
39 | public:
40 | virtual nvinfer1::INetworkDefinition* network() = 0;
41 | virtual StringMap& tensors() = 0;
42 | virtual StringMap& tensorLocations() = 0;
43 | virtual StringMap& tensorRangeMins() = 0;
44 | virtual StringMap& tensorRangeMaxes() = 0;
45 | virtual StringMap& layerPrecisions() = 0;
46 | virtual std::unordered_set& unsupportedShapeTensors() = 0;
47 | virtual StringMap& loopTensors() = 0;
48 | virtual void setOnnxFileLocation(std::string location) = 0;
49 | virtual std::string getOnnxFileLocation() = 0;
50 | virtual void registerTensor(TensorOrWeights tensor, const std::string& basename) = 0;
51 | virtual void registerLayer(nvinfer1::ILayer* layer, const std::string& basename) = 0;
52 | virtual ShapedWeights createTempWeights(ShapedWeights::DataType type, nvinfer1::Dims shape, uint8_t value = 0) = 0;
53 | virtual int64_t getOpsetVersion(const char* domain = "") const = 0;
54 | virtual nvinfer1::ILogger& logger() = 0;
55 | virtual bool hasError() const = 0;
56 | virtual nvinfer1::IErrorRecorder* getErrorRecorder() const = 0;
57 |
58 | protected:
59 | virtual ~IImporterContext()
60 | {
61 | }
62 | };
63 |
64 | } // namespace onnx2trt
65 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/onnx2trt_common.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include
8 | #include
9 |
10 | #if NV_TENSORRT_MAJOR < 4
11 | namespace nvinfer1
12 | {
13 |
14 | enum class PluginFormat : uint8_t
15 | {
16 | kNCHW = 0, //!< NCHW
17 | kNC2HW2 = 1, //!< NCHW with 2-element packed channels
18 | kNHWC8 = 2 //!< NHWC with 8-element packed channels (C
19 | //! must be a multiple of 8)
20 | };
21 | // from NvInfer.h
22 | class IPluginExt : public IPlugin
23 | {
24 | public:
25 | virtual int getTensorRTVersion() const noexcept
26 | {
27 | return NV_TENSORRT_VERSION;
28 | }
29 | virtual bool supportsFormat(DataType type, PluginFormat format) const noexcept = 0;
30 | virtual void configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs,
31 | DataType type, PluginFormat format, int maxBatchSize) noexcept
32 | = 0;
33 |
34 | protected:
35 | void configure(
36 | const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, int maxBatchSize) noexcept final
37 | {
38 | try
39 | {
40 | DataType type = nvinfer1::DataType::kFLOAT;
41 | PluginFormat format = nvinfer1::PluginFormat::kLINEAR;
42 | return this->configureWithFormat(inputDims, nbInputs, outputDims, nbOutputs, type, format, maxBatchSize);
43 | }
44 | catch (const std::exception& e)
45 | {
46 | nvinfer1::getLogger()->log(nvinfer1::ILogger::Severity::kERROR, e.what().c_str());
47 | }
48 | }
49 | virtual ~IPluginExt()
50 | {
51 | }
52 | };
53 |
54 | } // namespace nvinfer1
55 | #endif
56 |
57 | namespace onnx2trt
58 | {
59 |
60 | struct IOwnable
61 | {
62 | virtual void destroy() = 0;
63 |
64 | protected:
65 | virtual ~IOwnable()
66 | {
67 | }
68 | };
69 |
70 | struct OwnableDeleter
71 | {
72 | void operator()(IOwnable* obj) const
73 | {
74 | obj->destroy();
75 | }
76 | };
77 |
78 | using UniqueOwnable = std::unique_ptr;
79 | class Plugin;
80 | class PluginV2;
81 |
82 | } // namespace onnx2trt
83 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/onnx2trt_runtime.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include "onnx2trt_common.hpp"
8 |
9 | namespace onnx2trt
10 | {
11 |
12 | typedef Plugin* (*plugin_deserializer)(const void* serialData, size_t serialLength);
13 |
14 | } // namespace onnx2trt
15 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/readme.md:
--------------------------------------------------------------------------------
1 | # ONNX Parser
2 | - 这几个文件提取自官方的onnx-tensorrt,去掉python方面,其他都在
3 | - 另外增加了Plugin节点的支持
4 | - https://github.com/onnx/onnx-tensorrt
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/onnx_parser_8.x/utils.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-License-Identifier: Apache-2.0
3 | */
4 |
5 | #pragma once
6 |
7 | #include
8 |
9 | template
10 | using string_map = std::unordered_map;
11 |
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/readme.md:
--------------------------------------------------------------------------------
1 | # Onnx parser for 7.x/8.x
2 | - Origin Code 7.x: https://github.com/onnx/onnx-tensorrt/releases/tag/release%2F7.2.1
3 | - Origin Code 8.x: https://github.com/onnx/onnx-tensorrt/releases/tag/release%2F8.0
4 |
5 | # TensorRT 7.x support
6 | 1. Replace onnx_parser_for_7.x/onnx_parser to src/tensorRT/onnx_parser
7 | - `rm -rf src/tensorRT/onnx_parser`
8 | - `cp -r onnx_parser/onnx_parser_7.x src/tensorRT/onnx_parser`
9 | - or execute `bash onnx_parser/use_tensorrt_7.x.sh`
10 | 2. Configure Makefile/CMakeLists.txt path to TensorRT7.x
11 | 3. Execute `make yolo -j64`
12 |
13 | # TensorRT 8.x support
14 | 1. Replace onnx_parser_for_8.x/onnx_parser to src/tensorRT/onnx_parser
15 | - `rm -rf src/tensorRT/onnx_parser`
16 | - `cp -r onnx_parser/onnx_parser_8.x src/tensorRT/onnx_parser`
17 | - or execute `bash onnx_parser/use_tensorrt_8.x.sh`
18 | 2. Configure Makefile/CMakeLists.txt path to TensorRT8.x
19 | 3. Execute `make yolo -j64`
20 |
21 | # Unsupported TensorRT for less 7.x version
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/use_tensorrt_7.x.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo Remove src/tensorRT/onnx_parser
4 | rm -rf src/tensorRT/onnx_parser
5 |
6 | echo Copy [onnx_parser/onnx_parser_7.x] to [src/tensorRT/onnx_parser]
7 | cp -r onnx_parser/onnx_parser_7.x src/tensorRT/onnx_parser
8 |
9 | echo Configure your tensorRT path to 7.x
10 | echo After that, you can execute the command 'make yolo -j64'
--------------------------------------------------------------------------------
/tensorrt_code/onnx_parser/use_tensorrt_8.x.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo Remove src/tensorRT/onnx_parser
4 | rm -rf src/tensorRT/onnx_parser
5 |
6 | echo Copy [onnx_parser/onnx_parser_8.x] to [src/tensorRT/onnx_parser]
7 | cp -r onnx_parser/onnx_parser_8.x src/tensorRT/onnx_parser
8 |
9 | echo Configure your tensorRT path to 8.x
10 | echo After that, you can execute the command 'make yolo -j64'
--------------------------------------------------------------------------------
/tensorrt_code/python/copy_dll_to_trtpy.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | copy ..\lean\cuda10.1\bin\cublas64_100.dll .\trtpy\
3 | copy ..\lean\cuda10.1\bin\cublasLt64_10.dll .\trtpy\
4 | copy ..\lean\cuda10.1\bin\cudart64_101.dll .\trtpy\
5 | copy ..\lean\cuda10.1\bin\cublas64_10.dll .\trtpy\
6 |
7 | copy ..\lean\opencv3.4.6\lib\opencv_world346.dll .\trtpy\
8 | copy ..\lean\TensorRT-8.0.1.6\lib\nvinfer.dll .\trtpy\
9 | copy ..\lean\TensorRT-8.0.1.6\lib\nvinfer_plugin.dll .\trtpy\
10 | copy ..\lean\cudnn8.2.2.26\*.dll .\trtpy\
11 |
12 |
--------------------------------------------------------------------------------
/tensorrt_code/python/setup.py:
--------------------------------------------------------------------------------
1 |
2 | from setuptools import find_packages
3 | from setuptools import setup
4 | import platform
5 | import os
6 |
7 | os_name = platform.system()
8 | if os_name == "Linux":
9 | cpp_library = ["libtrtpyc.so"]
10 | elif os_name == "Windows":
11 | os.system("copy_dll_to_trtpy.bat")
12 | cpp_library = [
13 | "libtrtpyc.pyd",
14 | "cublas64_10.dll",
15 | "cublas64_100.dll",
16 | "cublasLt64_10.dll",
17 | "cudart64_101.dll",
18 | "cudnn64_8.dll",
19 | "cudnn_adv_infer64_8.dll",
20 | "cudnn_adv_train64_8.dll",
21 | "cudnn_cnn_infer64_8.dll",
22 | "cudnn_cnn_train64_8.dll",
23 | "cudnn_ops_infer64_8.dll",
24 | "cudnn_ops_train64_8.dll",
25 | "nvinfer.dll",
26 | "nvinfer_plugin.dll",
27 | "opencv_world346.dll"
28 | ]
29 |
30 | else:
31 | raise RuntimeError(f"Unsupport platform {os_name}")
32 |
33 | setup(
34 | name="trtpy",
35 | version="1.0",
36 | author="Wish",
37 | url="https://github.com/shouxieai/tensorRT_cpp",
38 | description="tensorRT CPP/Python",
39 | packages=find_packages(),
40 | package_data={
41 | "": cpp_library
42 | },
43 | zip_safe=False
44 | )
--------------------------------------------------------------------------------
/tensorrt_code/python/test_centernet.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import trtpy as tp
5 |
6 | # change current workspace
7 | os.chdir("../workspace/")
8 |
9 | # 如果执行出错,请删掉 ~/.trtpy的缓存模型
10 | # rm -rf ~/.trtpy,重新下载
11 | engine_file = "centernet_r18_dcn.fp32.trtmodel"
12 | if not os.path.exists(engine_file):
13 | tp.compile_onnx_to_file(5, tp.onnx_hub("centernet_r18_dcn"), engine_file)
14 |
15 | model = tp.CenterNet(engine_file)
16 | image = cv2.imread("inference/car.jpg")
17 | bboxes = model.commit(image).get()
18 | print(f"{len(bboxes)} objects")
19 |
20 | for box in bboxes:
21 | print(box)
22 | left, top, right, bottom = map(int, [box.left, box.top, box.right, box.bottom])
23 | cv2.rectangle(image, (left, top), (right, bottom), tp.random_color(box.class_label), 5)
24 |
25 | os.makedirs("single_inference", exist_ok=True)
26 | saveto = "single_inference/centernet.car.jpg"
27 | print(f"Save to {saveto}")
28 |
29 | cv2.imwrite(saveto, image)
30 |
31 |
--------------------------------------------------------------------------------
/tensorrt_code/python/test_retinaface.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import trtpy as tp
5 |
6 | # change current workspace
7 | os.chdir("../workspace/")
8 |
9 | def compile_model(width, height):
10 |
11 | width = tp.upbound(width)
12 | height = tp.upbound(height)
13 | index_of_reshape_layer = 0
14 | def hook_reshape(name, shape):
15 | # print(name)
16 | # layerset = [
17 | # "Reshape_100", "Reshape_104", "Reshape_108",
18 | # "Reshape_113", "Reshape_117", "Reshape_121",
19 | # "Reshape_126", "Reshape_130", "Reshape_134"
20 | # ]
21 | nonlocal index_of_reshape_layer
22 | strides = [8, 16, 32, 8, 16, 32, 8, 16, 32]
23 | index = index_of_reshape_layer
24 | index_of_reshape_layer += 1
25 |
26 | stride = strides[index]
27 | return [-1, height * width // stride // stride * 2, shape[2]]
28 |
29 | engine_file = f"retinaface.{width}x{height}.fp32.trtmodel"
30 | if not os.path.exists(engine_file):
31 |
32 | tp.set_compile_hook_reshape_layer(hook_reshape)
33 | tp.compile_onnx_to_file(
34 | 5, tp.onnx_hub("mb_retinaface"), engine_file,
35 | inputs_dims=np.array([
36 | [1, 3, height, width]
37 | ], dtype=np.int32)
38 | )
39 | return engine_file
40 |
41 |
42 | engine_file = compile_model(640, 640)
43 | detector = tp.Retinaface(engine_file, nms_threshold=0.4)
44 | image = cv2.imread("inference/group.jpg")
45 | faces = detector.commit(image).get()
46 |
47 | for face in faces:
48 | left, top, right, bottom = map(int, [face.left, face.top, face.right, face.bottom])
49 | cv2.rectangle(image, (left, top), (right, bottom), (255, 0, 255), 5)
50 |
51 | for x, y in face.landmark.astype(int):
52 | cv2.circle(image, (x, y), 3, (0, 255, 0), -1, 16)
53 |
54 | os.makedirs("single_inference", exist_ok=True)
55 | saveto = "single_inference/retinaface.group.jpg"
56 | print(f"{len(faces)} faces, Save to {saveto}")
57 | cv2.imwrite(saveto, image)
--------------------------------------------------------------------------------
/tensorrt_code/python/test_scrfd.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import trtpy as tp
5 |
6 | # change current workspace
7 | os.chdir("../workspace/")
8 | tp.set_log_level(tp.LogLevel.Verbose)
9 |
10 | def compile_model(width, height):
11 |
12 | def hook_reshape(name, shape):
13 | layerset = [
14 | "Reshape_108", "Reshape_110", "Reshape_112",
15 | "Reshape_126", "Reshape_128", "Reshape_130",
16 | "Reshape_144", "Reshape_146", "Reshape_148"
17 | ]
18 | strides = [8, 8, 8, 16, 16, 16, 32, 32, 32]
19 |
20 | if name in layerset:
21 | index = layerset.index(name)
22 | stride = strides[index]
23 | return [-1, height * width // stride // stride * 2, shape[2]]
24 |
25 | return shape
26 |
27 | engine_file = f"scrfd.{width}x{height}.fp32.trtmodel"
28 | if not os.path.exists(engine_file):
29 |
30 | tp.set_compile_hook_reshape_layer(hook_reshape)
31 | tp.compile_onnx_to_file(
32 | 5, tp.onnx_hub("scrfd_2.5g_bnkps"), engine_file,
33 | inputs_dims=np.array([
34 | [1, 3, height, width]
35 | ], dtype=np.int32)
36 | )
37 | return engine_file
38 |
39 |
40 | engine_file = compile_model(640, 640)
41 | detector = tp.Scrfd(engine_file, nms_threshold=0.5)
42 | image = cv2.imread("inference/group.jpg")
43 | faces = detector.commit(image).get()
44 |
45 | for face in faces:
46 | left, top, right, bottom = map(int, [face.left, face.top, face.right, face.bottom])
47 | cv2.rectangle(image, (left, top), (right, bottom), (255, 0, 255), 5)
48 |
49 | for x, y in face.landmark.astype(int):
50 | cv2.circle(image, (x, y), 3, (0, 255, 0), -1, 16)
51 |
52 | os.makedirs("single_inference", exist_ok=True)
53 | saveto = "single_inference/scrfd.group.jpg"
54 | print(f"{len(faces)} faces, Save to {saveto}")
55 | cv2.imwrite(saveto, image)
--------------------------------------------------------------------------------
/tensorrt_code/python/test_torch.py:
--------------------------------------------------------------------------------
1 |
2 | import torch
3 | import torchvision.models as models
4 | import trtpy as tp
5 | import numpy as np
6 | import os
7 |
8 | os.chdir("../workspace/")
9 | device = "cuda" if torch.cuda.is_available() else "cpu"
10 |
11 | # 基于torch的tensor输入
12 | input = torch.full((5, 3, 224, 224), 0.3).to(device)
13 | model = models.resnet18(True).eval().to(device)
14 | trt_model = tp.from_torch(model, input, engine_save_file="torch.engine.trtmodel", onnx_save_file="torch.onnx")
15 | torch_out = model(input)
16 | trt_out = trt_model(input)
17 |
18 | trt_model.save("torch.trtmodel")
19 |
20 | abs_diff = (torch_out - trt_out).abs().max()
21 | print(f"Torch and TRTModel abs diff is {abs_diff}")
22 |
23 |
24 |
25 | print(trt_model.input().shape)
26 | trt_model.input().resize_single_dim(0, 1)
27 | print(trt_model.input().shape)
28 | trt_model.input().resize_single_dim(0, 5)
29 |
30 | # 获取模型的input,并对输入进行赋值为0.5
31 | trt_model.input().numpy[:] = 0.5
32 |
33 | # 执行推理
34 | trt_model.forward()
35 |
36 | # 获取输出
37 | trt_out = trt_model.output().numpy
38 |
39 | #对torch进行推理
40 | input[:] = 0.5
41 | torch_out = model(input).cpu().data.numpy()
42 |
43 | # 对比差距绝对值
44 | abs_diff = np.abs(torch_out - trt_out).max()
45 | print(f"Torch and TRTModel abs diff is {abs_diff}")
--------------------------------------------------------------------------------
/tensorrt_code/python/test_yolov5.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import trtpy as tp
5 |
6 | # change current workspace
7 | os.chdir("../workspace/")
8 |
9 | # 如果执行出错,请删掉 ~/.trtpy的缓存模型
10 | # rm -rf ~/.trtpy,重新下载
11 | engine_file = "yolov5m.fp32.trtmodel"
12 | if not os.path.exists(engine_file):
13 | tp.compile_onnx_to_file(5, tp.onnx_hub("yolov5m"), engine_file)
14 |
15 | yolo = tp.Yolo(engine_file, type=tp.YoloType.V5)
16 | image = cv2.imread("inference/car.jpg")
17 | bboxes = yolo.commit(image).get()
18 | print(f"{len(bboxes)} objects")
19 |
20 | for box in bboxes:
21 | left, top, right, bottom = map(int, [box.left, box.top, box.right, box.bottom])
22 | cv2.rectangle(image, (left, top), (right, bottom), tp.random_color(box.class_label), 5)
23 |
24 | os.makedirs("single_inference", exist_ok=True)
25 | saveto = "single_inference/yolov5.car.jpg"
26 | print(f"Save to {saveto}")
27 |
28 | cv2.imwrite(saveto, image)
29 |
30 |
--------------------------------------------------------------------------------
/tensorrt_code/python/test_yolox.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import trtpy as tp
5 |
6 | # change current workspace
7 | os.chdir("../workspace/")
8 |
9 | # 如果执行出错,请删掉 ~/.trtpy的缓存模型
10 | # rm -rf ~/.trtpy,重新下载
11 | engine_file = "yolox_m.fp32.trtmodel"
12 | if not os.path.exists(engine_file):
13 | tp.compile_onnx_to_file(5, tp.onnx_hub("yolox_m"), engine_file)
14 |
15 | yolo = tp.Yolo(engine_file, type=tp.YoloType.X)
16 | image = cv2.imread("inference/car.jpg")
17 | bboxes = yolo.commit(image).get()
18 | print(f"{len(bboxes)} objects")
19 |
20 | for box in bboxes:
21 | print(f"{box}")
22 | left, top, right, bottom = map(int, [box.left, box.top, box.right, box.bottom])
23 | cv2.rectangle(image, (left, top), (right, bottom), tp.random_color(box.class_label), 5)
24 |
25 | os.makedirs("single_inference", exist_ok=True)
26 | saveto = "single_inference/yolox.car.jpg"
27 | print(f"Save to {saveto}")
28 |
29 | cv2.imwrite(saveto, image)
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_alphapose.cpp:
--------------------------------------------------------------------------------
1 |
2 | /**
3 | * @file _main.cpp
4 | * @author 手写AI (zifuture.com:8090)
5 | * @date 2021-07-26
6 | *
7 | * 实现了基于TensorRT对yolox的推理工作
8 | * 1. 基于FP32的模型编译、和推理执行
9 | * 2. 基于INT8的模型编译、和推理执行
10 | * 3. 自定义插件的实现,从pytorch导出到推理编译,并支持FP16
11 | *
12 | * 预处理、后处理采用CPU实现(若想GPU可以自行实现)
13 | * 一次推理5张图获取结果
14 | *
15 | * 我们是一群热血的个人组织者,力图发布免费高质量内容
16 | * 我们的博客地址:http://zifuture.com:8090
17 | * 我们的B站地址:https://space.bilibili.com/1413433465
18 | *
19 | * 如果想要深入学习关于tensorRT的技术栈,请通过博客中的二维码联系我们(免费崔更即可)
20 | * 请关注B站,我们根据情况发布相关教程视频(免费)
21 | */
22 |
23 | #include
24 | #include
25 | #include
26 | #include "app_alphapose/alpha_pose.hpp"
27 |
28 | using namespace std;
29 | using namespace cv;
30 |
31 | bool requires(const char* name);
32 |
33 | int app_alphapose(){
34 |
35 | TRT::set_device(0);
36 | INFO("===================== test alphapose fp32 ==================================");
37 |
38 | const char* name = "sppe";
39 | if(not requires(name))
40 | return 0;
41 |
42 | string onnx_file = iLogger::format("%s.onnx", name);
43 | string model_file = iLogger::format("%s.FP32.trtmodel", name);
44 | int test_batch_size = 16;
45 |
46 | if(!iLogger::exists(model_file)){
47 | TRT::compile(
48 | TRT::Mode::FP32, // FP32、FP16、INT8
49 | test_batch_size, // max_batch_size
50 | onnx_file, // source
51 | model_file // save to
52 | );
53 | }
54 |
55 | Mat image = imread("inference/gril.jpg");
56 | auto engine = AlphaPose::create_infer(model_file, 0);
57 | auto box = Rect(158, 104, 176, 693);
58 | auto keys = engine->commit(make_tuple(image, box)).get();
59 | for(int i = 0; i < keys.size(); ++i){
60 | float x = keys[i].x;
61 | float y = keys[i].y;
62 | cv::circle(image, Point(x, y), 5, Scalar(0, 255, 0), -1, 16);
63 | }
64 |
65 | auto save_file = "pose.show.jpg";
66 | INFO("Save to %s", save_file);
67 |
68 | imwrite(save_file, image);
69 | INFO("Done");
70 | return 0;
71 | }
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_alphapose/alpha_pose.hpp:
--------------------------------------------------------------------------------
1 | #ifndef ALPHA_POSE_HPP
2 | #define ALPHA_POSE_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | namespace AlphaPose{
11 |
12 | using namespace std;
13 | using namespace cv;
14 |
15 | typedef tuple Input;
16 |
17 | class Infer{
18 | public:
19 | virtual shared_future> commit(const Input& input) = 0;
20 | virtual vector>> commits(const vector& inputs) = 0;
21 | };
22 |
23 | shared_ptr create_infer(const string& engine_file, int gpuid);
24 |
25 | }; // namespace AlphaPose
26 |
27 | #endif // ALPHA_POSE_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_arcface/arcface.hpp:
--------------------------------------------------------------------------------
1 | #ifndef ARCFACE_HPP
2 | #define ARCFACE_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | namespace Arcface{
11 |
12 | using namespace std;
13 |
14 | struct landmarks{
15 | float points[10];
16 | };
17 |
18 | typedef cv::Mat_ feature;
19 | typedef tuple commit_input;
20 |
21 | class Infer{
22 | public:
23 | virtual shared_future commit (const commit_input& input) = 0;
24 | virtual vector> commits(const vector& inputs) = 0;
25 | };
26 |
27 | cv::Mat face_alignment(const cv::Mat& image, const landmarks& landmark);
28 | shared_ptr create_infer(const string& engine_file, int gpuid=0);
29 |
30 | }; // namespace RetinaFace
31 |
32 | #endif // ARCFACE_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_cat/yolo.hpp:
--------------------------------------------------------------------------------
1 | #ifndef YOLO_HPP
2 | #define YOLO_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | /**
13 | * @brief 发挥极致的性能体验
14 | * 支持YoloX和YoloV5
15 | */
16 | namespace CatYolo{
17 |
18 | using namespace std;
19 | using namespace ObjectDetector;
20 |
21 | enum class Type : int{
22 | V5 = 0,
23 | X = 1
24 | };
25 |
26 | void image_to_tensor(const cv::Mat& image, shared_ptr& tensor, Type type, int ibatch);
27 |
28 | class Infer{
29 | public:
30 | virtual shared_future commit(const cv::Mat& image) = 0;
31 | virtual vector> commits(const vector& images) = 0;
32 | };
33 |
34 | shared_ptr create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold=0.25f, float nms_threshold=0.5f);
35 | const char* type_name(Type type);
36 |
37 | }; // namespace Yolo
38 |
39 | #endif // YOLO_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_centernet/centernet.hpp:
--------------------------------------------------------------------------------
1 | #ifndef CENTERNET_HPP
2 | #define CENTERNET_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | namespace CenterNet{
13 |
14 | using namespace std;
15 | using namespace ObjectDetector;
16 |
17 | void image_to_tensor(const cv::Mat& image, shared_ptr& tensor, int ibatch);
18 |
19 | class Infer{
20 | public:
21 | virtual shared_future commit(const cv::Mat& image) = 0;
22 | virtual vector> commits(const vector& images) = 0;
23 | };
24 |
25 | shared_ptr create_infer(const string& engine_file, int gpuid, float confidence_threshold=0.25f, float nms_threshold=0.5f);
26 |
27 | }; // namespace CenterNet
28 |
29 |
30 | #endif // CENTERNET_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_dbface/dbface.hpp:
--------------------------------------------------------------------------------
1 | #ifndef DBFACE_HPP
2 | #define DBFACE_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | namespace DBFace{
13 |
14 | using namespace std;
15 | using namespace FaceDetector;
16 |
17 | void image_to_tensor(const cv::Mat& image, shared_ptr& tensor, int ibatch);
18 |
19 | class Infer{
20 | public:
21 | virtual shared_future commit(const cv::Mat& image) = 0;
22 | virtual vector> commits(const vector& images) = 0;
23 | };
24 |
25 | shared_ptr create_infer(const string& engine_file, int gpuid, float confidence_threshold=0.25f, float nms_threshold=0.5f);
26 |
27 | }; // namespace CenterNet
28 |
29 |
30 | #endif // DBFACE_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_fall_gcn/fall_gcn.hpp:
--------------------------------------------------------------------------------
1 | #ifndef FALL_GCN_HPP
2 | #define FALL_GCN_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | namespace FallGCN{
11 |
12 | using namespace std;
13 | using namespace cv;
14 |
15 | typedef tuple, Rect> Input;
16 |
17 | enum class FallState : int{
18 | Fall = 0,
19 | Stand = 1,
20 | UnCertain = 2
21 | };
22 |
23 | const char* state_name(FallState state);
24 |
25 | class Infer{
26 | public:
27 | virtual shared_future> commit(const Input& input) = 0;
28 | virtual vector>> commits(const vector& inputs) = 0;
29 | };
30 |
31 | shared_ptr create_infer(const string& engine_file, int gpuid);
32 |
33 | }; // namespace AlphaPose
34 |
35 | #endif // FALL_GCN_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_fasterrcnn/fasterrcnn.hpp:
--------------------------------------------------------------------------------
1 | #ifndef YOLO_HPP
2 | #define YOLO_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | /**
13 | * @brief 发挥极致的性能体验
14 | * 支持YoloX和YoloV5
15 | */
16 | namespace FasterRCNN{
17 |
18 | using namespace std;
19 | using namespace ObjectDetector;
20 |
21 | enum class Type : int{
22 | FasterRCNN = 0,
23 | };
24 |
25 | void image_to_tensor(const cv::Mat& image, shared_ptr& tensor, Type type, int ibatch);
26 |
27 | class Infer{
28 | public:
29 | virtual shared_future commit(const cv::Mat& image) = 0;
30 | virtual vector> commits(const vector& images) = 0;
31 | };
32 |
33 | shared_ptr create_infer(const vector& engine_file, Type type, int gpuid, float confidence_threshold=0.25f, float nms_threshold=0.5f);
34 | const char* type_name(Type type);
35 |
36 | }; // namespace Yolo
37 |
38 | #endif // YOLO_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_high_performance/alpha_pose_high_perf.hpp:
--------------------------------------------------------------------------------
1 | #ifndef ALPHA_POSE_HIGH_PERF_HPP
2 | #define ALPHA_POSE_HIGH_PERF_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "high_performance.hpp"
10 |
11 | namespace AlphaPoseHighPerf{
12 |
13 | using namespace std;
14 | using namespace cv;
15 | using namespace HighPerformance;
16 |
17 | typedef tuple Input;
18 |
19 | class PointArray : public Data, public vector{
20 | public:
21 | SetupData(PointArray);
22 | };
23 |
24 | class Infer{
25 | public:
26 | virtual shared_future commit(const Input& input) = 0;
27 | virtual vector> commits(const vector& inputs) = 0;
28 | };
29 |
30 | shared_ptr create_infer(const string& engine_file, int gpuid);
31 |
32 | }; // namespace AlphaPose
33 |
34 | #endif // ALPHA_POSE_HIGH_PERF_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_high_performance/yolo_high_perf.hpp:
--------------------------------------------------------------------------------
1 | #ifndef YOLO_HIGHPERF_HPP
2 | #define YOLO_HIGHPERF_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "high_performance.hpp"
10 |
11 | /**
12 | * @brief 发挥极致的性能体验
13 | * 支持YoloX和YoloV5
14 | */
15 | namespace YoloHighPerf{
16 |
17 | using namespace std;
18 | using namespace HighPerformance;
19 |
20 | enum class Type : int{
21 | V5 = 0,
22 | X = 1
23 | };
24 |
25 | struct Box{
26 | float left, top, right, bottom, confidence;
27 | int class_label;
28 |
29 | Box() = default;
30 |
31 | Box(float left, float top, float right, float bottom, float confidence, int class_label)
32 | :left(left), top(top), right(right), bottom(bottom), confidence(confidence), class_label(class_label){}
33 | };
34 |
35 | class BoxArray : public Data, public vector{
36 | public:
37 | SetupData(BoxArray);
38 | };
39 |
40 | class Infer{
41 | public:
42 | virtual shared_future commit(const cv::Mat& image) = 0;
43 | virtual vector> commits(const vector& images) = 0;
44 | };
45 |
46 | shared_ptr create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold=0.25f, float nms_threshold=0.5f);
47 | const char* type_name(Type type);
48 |
49 | }; // namespace Yolo
50 |
51 | #endif // YOLO_HIGHPERF_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_retinaface/retinaface.hpp:
--------------------------------------------------------------------------------
1 | #ifndef RETINAFACE_HPP
2 | #define RETINAFACE_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "../common/face_detector.hpp"
10 |
11 | namespace RetinaFace{
12 |
13 | using namespace std;
14 | using namespace FaceDetector;
15 |
16 | class Infer{
17 | public:
18 | virtual shared_future commit(const cv::Mat& image) = 0;
19 | virtual vector> commits(const vector& images) = 0;
20 |
21 | };
22 |
23 | tuple crop_face_and_landmark(
24 | const cv::Mat& image, const Box& box, float scale_box=1.5f
25 | );
26 |
27 | shared_ptr create_infer(const string& engine_file, int gpuid, float confidence_threshold=0.5f, float nms_threshold=0.5f);
28 |
29 | }; // namespace RetinaFace
30 |
31 | #endif // RETINAFACE_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_scrfd/scrfd.hpp:
--------------------------------------------------------------------------------
1 | #ifndef SCRFD_HPP
2 | #define SCRFD_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "../common/face_detector.hpp"
10 |
11 | namespace Scrfd{
12 |
13 | using namespace std;
14 | using namespace FaceDetector;
15 |
16 | class Infer{
17 | public:
18 | virtual shared_future commit(const cv::Mat& image) = 0;
19 | virtual vector> commits(const vector& images) = 0;
20 | };
21 |
22 | tuple crop_face_and_landmark(
23 | const cv::Mat& image, const Box& box, float scale_box=1.5f
24 | );
25 |
26 | shared_ptr create_infer(const string& engine_file, int gpuid, float confidence_threshold=0.5f, float nms_threshold=0.5f);
27 |
28 | }; // namespace Scrfd
29 |
30 | #endif // SCRFD_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_shufflenet/shufflenetv2.hpp:
--------------------------------------------------------------------------------
1 | #ifndef SHUFFLENETV2
2 | #define SHUFFLENETV2
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 |
12 | namespace ShuffleNetV2 {
13 |
14 | using namespace std;
15 |
16 | void image_to_tensor(const cv::Mat& image, shared_ptr& tensor, int ibatch);
17 |
18 | class Infer{
19 | public:
20 | virtual shared_future> commit(const cv::Mat& image) = 0;
21 | virtual vector>> commits(const vector& images) = 0;
22 | };
23 |
24 | shared_ptr create_infer(const string& engine_file,int gpu_id = 0);
25 |
26 | }; // namespace Yolo
27 |
28 | #endif // YOLO_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_yolo/yolo.hpp:
--------------------------------------------------------------------------------
1 | #ifndef YOLO_HPP
2 | #define YOLO_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | /**
13 | * @brief 发挥极致的性能体验
14 | * 支持YoloX和YoloV5
15 | */
16 | namespace Yolo{
17 |
18 | using namespace std;
19 | using namespace ObjectDetector;
20 |
21 | enum class Type : int{
22 | V5 = 0,
23 | X = 1
24 | };
25 |
26 | void image_to_tensor(const cv::Mat& image, shared_ptr& tensor, Type type, int ibatch);
27 |
28 | class Infer{
29 | public:
30 | virtual shared_future commit(const cv::Mat& image) = 0;
31 | virtual vector> commits(const vector& images) = 0;
32 | };
33 |
34 | shared_ptr create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold=0.25f, float nms_threshold=0.5f);
35 | const char* type_name(Type type);
36 |
37 | }; // namespace Yolo
38 |
39 | #endif // YOLO_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/app_yolo_fast/yolo_fast.hpp:
--------------------------------------------------------------------------------
1 | #ifndef YOLO_FAST_HPP
2 | #define YOLO_FAST_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | /**
13 | * @brief 发挥极致的性能体验
14 | * 支持YoloX和YoloV5
15 | */
16 | namespace YoloFast{
17 |
18 | using namespace std;
19 | using namespace ObjectDetector;
20 |
21 | enum class Type : int{
22 | V5_P5 = 0,
23 | V5_P6 = 1,
24 | X = 2
25 | };
26 |
27 | struct DecodeMeta{
28 | int num_anchor;
29 | int num_level;
30 | float w[16], h[16];
31 | int strides[16];
32 |
33 | static DecodeMeta v5_p5_default_meta();
34 | static DecodeMeta v5_p6_default_meta();
35 | static DecodeMeta x_default_meta();
36 | };
37 |
38 | class Infer{
39 | public:
40 | virtual shared_future commit(const cv::Mat& image) = 0;
41 | virtual vector> commits(const vector& images) = 0;
42 | };
43 |
44 | void image_to_tensor(const cv::Mat& image, shared_ptr& tensor, Type type, int ibatch);
45 |
46 | shared_ptr create_infer(
47 | const string& engine_file,
48 | Type type,
49 | int gpuid,
50 | float confidence_threshold=0.25f,
51 | float nms_threshold=0.5f,
52 | const DecodeMeta& meta = DecodeMeta::v5_p5_default_meta()
53 | );
54 | const char* type_name(Type type);
55 |
56 | }; // namespace YoloFast
57 |
58 | #endif // YOLO_FAST_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/common/face_detector.hpp:
--------------------------------------------------------------------------------
1 | #ifndef FACE_DETECTOR_HPP
2 | #define FACE_DETECTOR_HPP
3 |
4 | #include
5 | #include
6 |
7 | namespace FaceDetector{
8 |
9 | struct Box{
10 | float left, top, right, bottom, confidence;
11 | float landmark[10];
12 |
13 | cv::Rect cvbox() const{return cv::Rect(left, top, right-left, bottom-top);}
14 | float width() const{return std::max(0.0f, right-left);}
15 | float height() const{return std::max(0.0f, bottom-top);}
16 | float area() const{return width() * height();}
17 | float get_left() {return left;}
18 | void set_left(float value) {left = value;}
19 | float get_top() {return top;}
20 | void set_top(float value) {top = value;}
21 | float get_right() {return right;}
22 | void set_right(float value) {right = value;}
23 | float get_bottom() {return bottom;}
24 | void set_bottom(float value) {bottom = value;}
25 | float get_confidence() {return confidence;}
26 | void set_confidence(float value){confidence = value;}
27 | };
28 |
29 | typedef std::vector BoxArray;
30 | };
31 |
32 | #endif // FACE_DETECTOR_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/common/object_detector.hpp:
--------------------------------------------------------------------------------
1 | #ifndef OBJECT_DETECTOR_HPP
2 | #define OBJECT_DETECTOR_HPP
3 |
4 | #include
5 |
6 | namespace ObjectDetector {
7 |
8 | struct Box{
9 | float left, top, right, bottom, confidence;
10 | int class_label;
11 |
12 | Box() = default;
13 |
14 | Box(float left, float top, float right, float bottom, float confidence, int class_label)
15 | :left(left), top(top), right(right), bottom(bottom), confidence(confidence), class_label(class_label){}
16 | };
17 |
18 | typedef std::vector BoxArray;
19 | };
20 |
21 |
22 | #endif // OBJECT_DETECTOR_HPP
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | include(RegexUtils)
2 | test_escape_string_as_regex()
3 |
4 | file(GLOB Eigen_directory_files "*")
5 |
6 | escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
7 |
8 | foreach(f ${Eigen_directory_files})
9 | if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/src")
10 | list(APPEND Eigen_directory_files_to_install ${f})
11 | endif()
12 | endforeach(f ${Eigen_directory_files})
13 |
14 | install(FILES
15 | ${Eigen_directory_files_to_install}
16 | DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel
17 | )
18 |
19 | install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel FILES_MATCHING PATTERN "*.h")
20 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/Cholesky:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_CHOLESKY_MODULE_H
9 | #define EIGEN_CHOLESKY_MODULE_H
10 |
11 | #include "Core"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | /** \defgroup Cholesky_Module Cholesky module
16 | *
17 | *
18 | *
19 | * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices.
20 | * Those decompositions are also accessible via the following methods:
21 | * - MatrixBase::llt()
22 | * - MatrixBase::ldlt()
23 | * - SelfAdjointView::llt()
24 | * - SelfAdjointView::ldlt()
25 | *
26 | * \code
27 | * #include
28 | * \endcode
29 | */
30 |
31 | #include "src/Cholesky/LLT.h"
32 | #include "src/Cholesky/LDLT.h"
33 | #ifdef EIGEN_USE_LAPACKE
34 | #include "src/misc/lapacke.h"
35 | #include "src/Cholesky/LLT_LAPACKE.h"
36 | #endif
37 |
38 | #include "src/Core/util/ReenableStupidWarnings.h"
39 |
40 | #endif // EIGEN_CHOLESKY_MODULE_H
41 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
42 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/CholmodSupport:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_CHOLMODSUPPORT_MODULE_H
9 | #define EIGEN_CHOLMODSUPPORT_MODULE_H
10 |
11 | #include "SparseCore"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | extern "C" {
16 | #include
17 | }
18 |
19 | /** \ingroup Support_modules
20 | * \defgroup CholmodSupport_Module CholmodSupport module
21 | *
22 | * This module provides an interface to the Cholmod library which is part of the suitesparse package.
23 | * It provides the two following main factorization classes:
24 | * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
25 | * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
26 | *
27 | * For the sake of completeness, this module also propose the two following classes:
28 | * - class CholmodSimplicialLLT
29 | * - class CholmodSimplicialLDLT
30 | * Note that these classes does not bring any particular advantage compared to the built-in
31 | * SimplicialLLT and SimplicialLDLT factorization classes.
32 | *
33 | * \code
34 | * #include
35 | * \endcode
36 | *
37 | * In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be linked to the cholmod library and its dependencies.
38 | * The dependencies depend on how cholmod has been compiled.
39 | * For a cmake based project, you can use our FindCholmod.cmake module to help you in this task.
40 | *
41 | */
42 |
43 | #include "src/CholmodSupport/CholmodSupport.h"
44 |
45 | #include "src/Core/util/ReenableStupidWarnings.h"
46 |
47 | #endif // EIGEN_CHOLMODSUPPORT_MODULE_H
48 |
49 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/Dense:
--------------------------------------------------------------------------------
1 | #include "Core"
2 | #include "LU"
3 | #include "Cholesky"
4 | #include "QR"
5 | #include "SVD"
6 | #include "Geometry"
7 | #include "Eigenvalues"
8 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/Eigen:
--------------------------------------------------------------------------------
1 | #include "Dense"
2 | #include "Sparse"
3 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/Eigenvalues:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_EIGENVALUES_MODULE_H
9 | #define EIGEN_EIGENVALUES_MODULE_H
10 |
11 | #include "Core"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | #include "Cholesky"
16 | #include "Jacobi"
17 | #include "Householder"
18 | #include "LU"
19 | #include "Geometry"
20 |
21 | /** \defgroup Eigenvalues_Module Eigenvalues module
22 | *
23 | *
24 | *
25 | * This module mainly provides various eigenvalue solvers.
26 | * This module also provides some MatrixBase methods, including:
27 | * - MatrixBase::eigenvalues(),
28 | * - MatrixBase::operatorNorm()
29 | *
30 | * \code
31 | * #include
32 | * \endcode
33 | */
34 |
35 | #include "src/misc/RealSvd2x2.h"
36 | #include "src/Eigenvalues/Tridiagonalization.h"
37 | #include "src/Eigenvalues/RealSchur.h"
38 | #include "src/Eigenvalues/EigenSolver.h"
39 | #include "src/Eigenvalues/SelfAdjointEigenSolver.h"
40 | #include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h"
41 | #include "src/Eigenvalues/HessenbergDecomposition.h"
42 | #include "src/Eigenvalues/ComplexSchur.h"
43 | #include "src/Eigenvalues/ComplexEigenSolver.h"
44 | #include "src/Eigenvalues/RealQZ.h"
45 | #include "src/Eigenvalues/GeneralizedEigenSolver.h"
46 | #include "src/Eigenvalues/MatrixBaseEigenvalues.h"
47 | #ifdef EIGEN_USE_LAPACKE
48 | #include "src/misc/lapacke.h"
49 | #include "src/Eigenvalues/RealSchur_LAPACKE.h"
50 | #include "src/Eigenvalues/ComplexSchur_LAPACKE.h"
51 | #include "src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h"
52 | #endif
53 |
54 | #include "src/Core/util/ReenableStupidWarnings.h"
55 |
56 | #endif // EIGEN_EIGENVALUES_MODULE_H
57 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
58 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/Geometry:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_GEOMETRY_MODULE_H
9 | #define EIGEN_GEOMETRY_MODULE_H
10 |
11 | #include "Core"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | #include "SVD"
16 | #include "LU"
17 | #include
18 |
19 | /** \defgroup Geometry_Module Geometry module
20 | *
21 | * This module provides support for:
22 | * - fixed-size homogeneous transformations
23 | * - translation, scaling, 2D and 3D rotations
24 | * - \link Quaternion quaternions \endlink
25 | * - cross products (\ref MatrixBase::cross, \ref MatrixBase::cross3)
26 | * - orthognal vector generation (\ref MatrixBase::unitOrthogonal)
27 | * - some linear components: \link ParametrizedLine parametrized-lines \endlink and \link Hyperplane hyperplanes \endlink
28 | * - \link AlignedBox axis aligned bounding boxes \endlink
29 | * - \link umeyama least-square transformation fitting \endlink
30 | *
31 | * \code
32 | * #include
33 | * \endcode
34 | */
35 |
36 | #include "src/Geometry/OrthoMethods.h"
37 | #include "src/Geometry/EulerAngles.h"
38 |
39 | #include "src/Geometry/Homogeneous.h"
40 | #include "src/Geometry/RotationBase.h"
41 | #include "src/Geometry/Rotation2D.h"
42 | #include "src/Geometry/Quaternion.h"
43 | #include "src/Geometry/AngleAxis.h"
44 | #include "src/Geometry/Transform.h"
45 | #include "src/Geometry/Translation.h"
46 | #include "src/Geometry/Scaling.h"
47 | #include "src/Geometry/Hyperplane.h"
48 | #include "src/Geometry/ParametrizedLine.h"
49 | #include "src/Geometry/AlignedBox.h"
50 | #include "src/Geometry/Umeyama.h"
51 |
52 | // Use the SSE optimized version whenever possible. At the moment the
53 | // SSE version doesn't compile when AVX is enabled
54 | #if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX
55 | #include "src/Geometry/arch/Geometry_SSE.h"
56 | #endif
57 |
58 | #include "src/Core/util/ReenableStupidWarnings.h"
59 |
60 | #endif // EIGEN_GEOMETRY_MODULE_H
61 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
62 |
63 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/Householder:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_HOUSEHOLDER_MODULE_H
9 | #define EIGEN_HOUSEHOLDER_MODULE_H
10 |
11 | #include "Core"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | /** \defgroup Householder_Module Householder module
16 | * This module provides Householder transformations.
17 | *
18 | * \code
19 | * #include
20 | * \endcode
21 | */
22 |
23 | #include "src/Householder/Householder.h"
24 | #include "src/Householder/HouseholderSequence.h"
25 | #include "src/Householder/BlockHouseholder.h"
26 |
27 | #include "src/Core/util/ReenableStupidWarnings.h"
28 |
29 | #endif // EIGEN_HOUSEHOLDER_MODULE_H
30 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
31 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/IterativeLinearSolvers:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
9 | #define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
10 |
11 | #include "SparseCore"
12 | #include "OrderingMethods"
13 |
14 | #include "src/Core/util/DisableStupidWarnings.h"
15 |
16 | /**
17 | * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module
18 | *
19 | * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse.
20 | * Those solvers are accessible via the following classes:
21 | * - ConjugateGradient for selfadjoint (hermitian) matrices,
22 | * - LeastSquaresConjugateGradient for rectangular least-square problems,
23 | * - BiCGSTAB for general square matrices.
24 | *
25 | * These iterative solvers are associated with some preconditioners:
26 | * - IdentityPreconditioner - not really useful
27 | * - DiagonalPreconditioner - also called Jacobi preconditioner, work very well on diagonal dominant matrices.
28 | * - IncompleteLUT - incomplete LU factorization with dual thresholding
29 | *
30 | * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport.
31 | *
32 | \code
33 | #include
34 | \endcode
35 | */
36 |
37 | #include "src/IterativeLinearSolvers/SolveWithGuess.h"
38 | #include "src/IterativeLinearSolvers/IterativeSolverBase.h"
39 | #include "src/IterativeLinearSolvers/BasicPreconditioners.h"
40 | #include "src/IterativeLinearSolvers/ConjugateGradient.h"
41 | #include "src/IterativeLinearSolvers/LeastSquareConjugateGradient.h"
42 | #include "src/IterativeLinearSolvers/BiCGSTAB.h"
43 | #include "src/IterativeLinearSolvers/IncompleteLUT.h"
44 | #include "src/IterativeLinearSolvers/IncompleteCholesky.h"
45 |
46 | #include "src/Core/util/ReenableStupidWarnings.h"
47 |
48 | #endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
49 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/Jacobi:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_JACOBI_MODULE_H
9 | #define EIGEN_JACOBI_MODULE_H
10 |
11 | #include "Core"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | /** \defgroup Jacobi_Module Jacobi module
16 | * This module provides Jacobi and Givens rotations.
17 | *
18 | * \code
19 | * #include
20 | * \endcode
21 | *
22 | * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation:
23 | * - MatrixBase::applyOnTheLeft()
24 | * - MatrixBase::applyOnTheRight().
25 | */
26 |
27 | #include "src/Jacobi/Jacobi.h"
28 |
29 | #include "src/Core/util/ReenableStupidWarnings.h"
30 |
31 | #endif // EIGEN_JACOBI_MODULE_H
32 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
33 |
34 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/LU:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_LU_MODULE_H
9 | #define EIGEN_LU_MODULE_H
10 |
11 | #include "Core"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | /** \defgroup LU_Module LU module
16 | * This module includes %LU decomposition and related notions such as matrix inversion and determinant.
17 | * This module defines the following MatrixBase methods:
18 | * - MatrixBase::inverse()
19 | * - MatrixBase::determinant()
20 | *
21 | * \code
22 | * #include
23 | * \endcode
24 | */
25 |
26 | #include "src/misc/Kernel.h"
27 | #include "src/misc/Image.h"
28 | #include "src/LU/FullPivLU.h"
29 | #include "src/LU/PartialPivLU.h"
30 | #ifdef EIGEN_USE_LAPACKE
31 | #include "src/misc/lapacke.h"
32 | #include "src/LU/PartialPivLU_LAPACKE.h"
33 | #endif
34 | #include "src/LU/Determinant.h"
35 | #include "src/LU/InverseImpl.h"
36 |
37 | // Use the SSE optimized version whenever possible. At the moment the
38 | // SSE version doesn't compile when AVX is enabled
39 | #if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX
40 | #include "src/LU/arch/Inverse_SSE.h"
41 | #endif
42 |
43 | #include "src/Core/util/ReenableStupidWarnings.h"
44 |
45 | #endif // EIGEN_LU_MODULE_H
46 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
47 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/MetisSupport:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_METISSUPPORT_MODULE_H
9 | #define EIGEN_METISSUPPORT_MODULE_H
10 |
11 | #include "SparseCore"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | extern "C" {
16 | #include
17 | }
18 |
19 |
20 | /** \ingroup Support_modules
21 | * \defgroup MetisSupport_Module MetisSupport module
22 | *
23 | * \code
24 | * #include
25 | * \endcode
26 | * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis).
27 | * It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink
28 | */
29 |
30 |
31 | #include "src/MetisSupport/MetisSupport.h"
32 |
33 | #include "src/Core/util/ReenableStupidWarnings.h"
34 |
35 | #endif // EIGEN_METISSUPPORT_MODULE_H
36 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/PaStiXSupport:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_PASTIXSUPPORT_MODULE_H
9 | #define EIGEN_PASTIXSUPPORT_MODULE_H
10 |
11 | #include "SparseCore"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | extern "C" {
16 | #include
17 | #include
18 | }
19 |
20 | #ifdef complex
21 | #undef complex
22 | #endif
23 |
24 | /** \ingroup Support_modules
25 | * \defgroup PaStiXSupport_Module PaStiXSupport module
26 | *
27 | * This module provides an interface to the PaSTiX library.
28 | * PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver.
29 | * It provides the two following main factorization classes:
30 | * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization.
31 | * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization.
32 | * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern).
33 | *
34 | * \code
35 | * #include
36 | * \endcode
37 | *
38 | * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies.
39 | * The dependencies depend on how PaSTiX has been compiled.
40 | * For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task.
41 | *
42 | */
43 |
44 | #include "src/PaStiXSupport/PaStiXSupport.h"
45 |
46 | #include "src/Core/util/ReenableStupidWarnings.h"
47 |
48 | #endif // EIGEN_PASTIXSUPPORT_MODULE_H
49 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/PardisoSupport:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_PARDISOSUPPORT_MODULE_H
9 | #define EIGEN_PARDISOSUPPORT_MODULE_H
10 |
11 | #include "SparseCore"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | #include
16 |
17 | /** \ingroup Support_modules
18 | * \defgroup PardisoSupport_Module PardisoSupport module
19 | *
20 | * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers.
21 | *
22 | * \code
23 | * #include
24 | * \endcode
25 | *
26 | * In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be linked to the MKL library and its dependencies.
27 | * See this \ref TopicUsingIntelMKL "page" for more information on MKL-Eigen integration.
28 | *
29 | */
30 |
31 | #include "src/PardisoSupport/PardisoSupport.h"
32 |
33 | #include "src/Core/util/ReenableStupidWarnings.h"
34 |
35 | #endif // EIGEN_PARDISOSUPPORT_MODULE_H
36 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/QR:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_QR_MODULE_H
9 | #define EIGEN_QR_MODULE_H
10 |
11 | #include "Core"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | #include "Cholesky"
16 | #include "Jacobi"
17 | #include "Householder"
18 |
19 | /** \defgroup QR_Module QR module
20 | *
21 | *
22 | *
23 | * This module provides various QR decompositions
24 | * This module also provides some MatrixBase methods, including:
25 | * - MatrixBase::householderQr()
26 | * - MatrixBase::colPivHouseholderQr()
27 | * - MatrixBase::fullPivHouseholderQr()
28 | *
29 | * \code
30 | * #include
31 | * \endcode
32 | */
33 |
34 | #include "src/QR/HouseholderQR.h"
35 | #include "src/QR/FullPivHouseholderQR.h"
36 | #include "src/QR/ColPivHouseholderQR.h"
37 | #include "src/QR/CompleteOrthogonalDecomposition.h"
38 | #ifdef EIGEN_USE_LAPACKE
39 | #include "src/misc/lapacke.h"
40 | #include "src/QR/HouseholderQR_LAPACKE.h"
41 | #include "src/QR/ColPivHouseholderQR_LAPACKE.h"
42 | #endif
43 |
44 | #include "src/Core/util/ReenableStupidWarnings.h"
45 |
46 | #endif // EIGEN_QR_MODULE_H
47 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
48 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/QtAlignedMalloc:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_QTMALLOC_MODULE_H
9 | #define EIGEN_QTMALLOC_MODULE_H
10 |
11 | #include "Core"
12 |
13 | #if (!EIGEN_MALLOC_ALREADY_ALIGNED)
14 |
15 | #include "src/Core/util/DisableStupidWarnings.h"
16 |
17 | void *qMalloc(std::size_t size)
18 | {
19 | return Eigen::internal::aligned_malloc(size);
20 | }
21 |
22 | void qFree(void *ptr)
23 | {
24 | Eigen::internal::aligned_free(ptr);
25 | }
26 |
27 | void *qRealloc(void *ptr, std::size_t size)
28 | {
29 | void* newPtr = Eigen::internal::aligned_malloc(size);
30 | memcpy(newPtr, ptr, size);
31 | Eigen::internal::aligned_free(ptr);
32 | return newPtr;
33 | }
34 |
35 | #include "src/Core/util/ReenableStupidWarnings.h"
36 |
37 | #endif
38 |
39 | #endif // EIGEN_QTMALLOC_MODULE_H
40 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
41 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/SPQRSupport:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_SPQRSUPPORT_MODULE_H
9 | #define EIGEN_SPQRSUPPORT_MODULE_H
10 |
11 | #include "SparseCore"
12 |
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | #include "SuiteSparseQR.hpp"
16 |
17 | /** \ingroup Support_modules
18 | * \defgroup SPQRSupport_Module SuiteSparseQR module
19 | *
20 | * This module provides an interface to the SPQR library, which is part of the suitesparse package.
21 | *
22 | * \code
23 | * #include
24 | * \endcode
25 | *
26 | * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...).
27 | * For a cmake based project, you can use our FindSPQR.cmake and FindCholmod.Cmake modules
28 | *
29 | */
30 |
31 | #include "src/CholmodSupport/CholmodSupport.h"
32 | #include "src/SPQRSupport/SuiteSparseQRSupport.h"
33 |
34 | #endif
35 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/SVD:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_SVD_MODULE_H
9 | #define EIGEN_SVD_MODULE_H
10 |
11 | #include "QR"
12 | #include "Householder"
13 | #include "Jacobi"
14 |
15 | #include "src/Core/util/DisableStupidWarnings.h"
16 |
17 | /** \defgroup SVD_Module SVD module
18 | *
19 | *
20 | *
21 | * This module provides SVD decomposition for matrices (both real and complex).
22 | * Two decomposition algorithms are provided:
23 | * - JacobiSVD implementing two-sided Jacobi iterations is numerically very accurate, fast for small matrices, but very slow for larger ones.
24 | * - BDCSVD implementing a recursive divide & conquer strategy on top of an upper-bidiagonalization which remains fast for large problems.
25 | * These decompositions are accessible via the respective classes and following MatrixBase methods:
26 | * - MatrixBase::jacobiSvd()
27 | * - MatrixBase::bdcSvd()
28 | *
29 | * \code
30 | * #include
31 | * \endcode
32 | */
33 |
34 | #include "src/misc/RealSvd2x2.h"
35 | #include "src/SVD/UpperBidiagonalization.h"
36 | #include "src/SVD/SVDBase.h"
37 | #include "src/SVD/JacobiSVD.h"
38 | #include "src/SVD/BDCSVD.h"
39 | #if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
40 | #include "src/misc/lapacke.h"
41 | #include "src/SVD/JacobiSVD_LAPACKE.h"
42 | #endif
43 |
44 | #include "src/Core/util/ReenableStupidWarnings.h"
45 |
46 | #endif // EIGEN_SVD_MODULE_H
47 | /* vim: set filetype=cpp et sw=2 ts=2 ai: */
48 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/Sparse:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_SPARSE_MODULE_H
9 | #define EIGEN_SPARSE_MODULE_H
10 |
11 | /** \defgroup Sparse_Module Sparse meta-module
12 | *
13 | * Meta-module including all related modules:
14 | * - \ref SparseCore_Module
15 | * - \ref OrderingMethods_Module
16 | * - \ref SparseCholesky_Module
17 | * - \ref SparseLU_Module
18 | * - \ref SparseQR_Module
19 | * - \ref IterativeLinearSolvers_Module
20 | *
21 | \code
22 | #include
23 | \endcode
24 | */
25 |
26 | #include "SparseCore"
27 | #include "OrderingMethods"
28 | #ifndef EIGEN_MPL2_ONLY
29 | #include "SparseCholesky"
30 | #endif
31 | #include "SparseLU"
32 | #include "SparseQR"
33 | #include "IterativeLinearSolvers"
34 |
35 | #endif // EIGEN_SPARSE_MODULE_H
36 |
37 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/SparseCholesky:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // Copyright (C) 2008-2013 Gael Guennebaud
5 | //
6 | // This Source Code Form is subject to the terms of the Mozilla
7 | // Public License v. 2.0. If a copy of the MPL was not distributed
8 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 |
10 | #ifndef EIGEN_SPARSECHOLESKY_MODULE_H
11 | #define EIGEN_SPARSECHOLESKY_MODULE_H
12 |
13 | #include "SparseCore"
14 | #include "OrderingMethods"
15 |
16 | #include "src/Core/util/DisableStupidWarnings.h"
17 |
18 | /**
19 | * \defgroup SparseCholesky_Module SparseCholesky module
20 | *
21 | * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices.
22 | * Those decompositions are accessible via the following classes:
23 | * - SimplicialLLt,
24 | * - SimplicialLDLt
25 | *
26 | * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module.
27 | *
28 | * \code
29 | * #include
30 | * \endcode
31 | */
32 |
33 | #ifdef EIGEN_MPL2_ONLY
34 | #error The SparseCholesky module has nothing to offer in MPL2 only mode
35 | #endif
36 |
37 | #include "src/SparseCholesky/SimplicialCholesky.h"
38 |
39 | #ifndef EIGEN_MPL2_ONLY
40 | #include "src/SparseCholesky/SimplicialCholesky_impl.h"
41 | #endif
42 |
43 | #include "src/Core/util/ReenableStupidWarnings.h"
44 |
45 | #endif // EIGEN_SPARSECHOLESKY_MODULE_H
46 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/SparseLU:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // Copyright (C) 2012 Désiré Nuentsa-Wakam
5 | // Copyright (C) 2012 Gael Guennebaud
6 | //
7 | // This Source Code Form is subject to the terms of the Mozilla
8 | // Public License v. 2.0. If a copy of the MPL was not distributed
9 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 |
11 | #ifndef EIGEN_SPARSELU_MODULE_H
12 | #define EIGEN_SPARSELU_MODULE_H
13 |
14 | #include "SparseCore"
15 |
16 | /**
17 | * \defgroup SparseLU_Module SparseLU module
18 | * This module defines a supernodal factorization of general sparse matrices.
19 | * The code is fully optimized for supernode-panel updates with specialized kernels.
20 | * Please, see the documentation of the SparseLU class for more details.
21 | */
22 |
23 | // Ordering interface
24 | #include "OrderingMethods"
25 |
26 | #include "src/SparseLU/SparseLU_gemm_kernel.h"
27 |
28 | #include "src/SparseLU/SparseLU_Structs.h"
29 | #include "src/SparseLU/SparseLU_SupernodalMatrix.h"
30 | #include "src/SparseLU/SparseLUImpl.h"
31 | #include "src/SparseCore/SparseColEtree.h"
32 | #include "src/SparseLU/SparseLU_Memory.h"
33 | #include "src/SparseLU/SparseLU_heap_relax_snode.h"
34 | #include "src/SparseLU/SparseLU_relax_snode.h"
35 | #include "src/SparseLU/SparseLU_pivotL.h"
36 | #include "src/SparseLU/SparseLU_panel_dfs.h"
37 | #include "src/SparseLU/SparseLU_kernel_bmod.h"
38 | #include "src/SparseLU/SparseLU_panel_bmod.h"
39 | #include "src/SparseLU/SparseLU_column_dfs.h"
40 | #include "src/SparseLU/SparseLU_column_bmod.h"
41 | #include "src/SparseLU/SparseLU_copy_to_ucol.h"
42 | #include "src/SparseLU/SparseLU_pruneL.h"
43 | #include "src/SparseLU/SparseLU_Utils.h"
44 | #include "src/SparseLU/SparseLU.h"
45 |
46 | #endif // EIGEN_SPARSELU_MODULE_H
47 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/SparseQR:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // This Source Code Form is subject to the terms of the Mozilla
5 | // Public License v. 2.0. If a copy of the MPL was not distributed
6 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 |
8 | #ifndef EIGEN_SPARSEQR_MODULE_H
9 | #define EIGEN_SPARSEQR_MODULE_H
10 |
11 | #include "SparseCore"
12 | #include "OrderingMethods"
13 | #include "src/Core/util/DisableStupidWarnings.h"
14 |
15 | /** \defgroup SparseQR_Module SparseQR module
16 | * \brief Provides QR decomposition for sparse matrices
17 | *
18 | * This module provides a simplicial version of the left-looking Sparse QR decomposition.
19 | * The columns of the input matrix should be reordered to limit the fill-in during the
20 | * decomposition. Built-in methods (COLAMD, AMD) or external methods (METIS) can be used to this end.
21 | * See the \link OrderingMethods_Module OrderingMethods\endlink module for the list
22 | * of built-in and external ordering methods.
23 | *
24 | * \code
25 | * #include
26 | * \endcode
27 | *
28 | *
29 | */
30 |
31 | #include "OrderingMethods"
32 | #include "src/SparseCore/SparseColEtree.h"
33 | #include "src/SparseQR/SparseQR.h"
34 |
35 | #include "src/Core/util/ReenableStupidWarnings.h"
36 |
37 | #endif
38 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/StdDeque:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // Copyright (C) 2009 Gael Guennebaud
5 | // Copyright (C) 2009 Hauke Heibel
6 | //
7 | // This Source Code Form is subject to the terms of the Mozilla
8 | // Public License v. 2.0. If a copy of the MPL was not distributed
9 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 |
11 | #ifndef EIGEN_STDDEQUE_MODULE_H
12 | #define EIGEN_STDDEQUE_MODULE_H
13 |
14 | #include "Core"
15 | #include
16 |
17 | #if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */
18 |
19 | #define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...)
20 |
21 | #else
22 |
23 | #include "src/StlSupport/StdDeque.h"
24 |
25 | #endif
26 |
27 | #endif // EIGEN_STDDEQUE_MODULE_H
28 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/StdList:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // Copyright (C) 2009 Hauke Heibel
5 | //
6 | // This Source Code Form is subject to the terms of the Mozilla
7 | // Public License v. 2.0. If a copy of the MPL was not distributed
8 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 |
10 | #ifndef EIGEN_STDLIST_MODULE_H
11 | #define EIGEN_STDLIST_MODULE_H
12 |
13 | #include "Core"
14 | #include
15 |
16 | #if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */
17 |
18 | #define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...)
19 |
20 | #else
21 |
22 | #include "src/StlSupport/StdList.h"
23 |
24 | #endif
25 |
26 | #endif // EIGEN_STDLIST_MODULE_H
27 |
--------------------------------------------------------------------------------
/tensorrt_code/src/application/tools/Eigen/StdVector:
--------------------------------------------------------------------------------
1 | // This file is part of Eigen, a lightweight C++ template library
2 | // for linear algebra.
3 | //
4 | // Copyright (C) 2009 Gael Guennebaud