├── .gitignore ├── docs ├── cnn │ ├── 引言.md │ ├── imgs │ │ └── alexnet │ │ │ ├── lrn.png │ │ │ ├── pca.png │ │ │ ├── relu.png │ │ │ ├── AlexNet.png │ │ │ └── alexnet-model.PNG │ ├── 过拟合和欠拟合.md │ └── 线性和非线性.md ├── python │ ├── imgs │ │ ├── xml.png │ │ ├── tqdm-1.gif │ │ ├── tqdm-2.gif │ │ ├── tqdm-3.gif │ │ └── decorator.png │ ├── [numpy]提取数组中属于某一条件的数据.md │ ├── [numpy]元素累加.md │ ├── [numpy]增加或者减少一维.md │ ├── [numpy][clip]限制取值范围.md │ ├── [python3.6][f-strings]字符串连接.md │ ├── [around]四舍五入.md │ ├── json文件读写.md │ ├── [pprint]更易读的打印.md │ ├── pip-python-bad-interpreter.md │ ├── [collections][defaultdict]更安全的dict.md │ ├── pip-更新国内镜像源.md │ ├── [enumerate]遍历.md │ ├── [tqdm]进度条.md │ ├── [list]排序.md │ ├── [itertools][product]嵌套循环.md │ ├── 保存json或者dict数据为voc-xml文件.md │ ├── [collections][deque]双向队列的使用.md │ ├── [slice]扩展切片操作.md │ ├── [xmltodict]读取XML文件.md │ ├── [setup.py]保存额外数据.md │ ├── [抽象基类]abc.md │ └── [easydict]访问属性的方式来访问字典.md ├── matplotlib │ ├── imgs │ │ ├── gray.png │ │ ├── pie-1.png │ │ ├── pie-2.png │ │ ├── pie-3.png │ │ ├── pie-4.png │ │ ├── pie-5.png │ │ ├── pie-6.png │ │ ├── xaxis.png │ │ ├── anatomy.png │ │ ├── axes-2-2.png │ │ ├── curve-3d.png │ │ ├── figure-1.png │ │ ├── figure-2.png │ │ ├── figure-3.png │ │ ├── fmt-line.png │ │ ├── gray-2-3.png │ │ ├── plot-x-y.png │ │ ├── plot-y.png │ │ ├── unorder.png │ │ ├── contour_1.png │ │ ├── contour_2.png │ │ ├── contour_3.png │ │ ├── contour_4.png │ │ ├── contour_5.png │ │ ├── fmt-color.png │ │ ├── fmt-marker.png │ │ ├── line-chart.png │ │ ├── line-text.png │ │ ├── line_spots.png │ │ ├── mat_image.png │ │ ├── plot-x-y-+.png │ │ ├── scatter-3d.png │ │ ├── surface-3d.png │ │ ├── coordinate-3d.png │ │ ├── line-legend-1.png │ │ ├── multi-scatter.png │ │ ├── subplot-1-2-2.png │ │ ├── figure-suptitle.png │ │ ├── single-scatter.png │ │ ├── line2d-properties-1.png │ │ ├── line2d-properties-2.png │ │ ├── sphx_glr_pyplot_001.png │ │ ├── sphx_glr_pyplot_002.png │ │ ├── sphx_glr_pyplot_003.png │ │ ├── sphx_glr_pyplot_004.png │ │ ├── sphx_glr_pyplot_005.png │ │ ├── sphx_glr_pyplot_006.png │ │ ├── sphx_glr_pyplot_007.png │ │ ├── sphx_glr_pyplot_008.png │ │ ├── sphx_glr_pyplot_009.png │ │ └── sphx_glr_pyplot_010.png │ ├── y轴坐标错乱.md │ ├── 手动设置轴刻度间隔.md │ ├── 引言.md │ ├── 中文乱码.md │ ├── 属性配置.md │ ├── 矩阵显示.md │ └── 散点图.md ├── opencv │ ├── draw │ │ ├── imgs │ │ │ ├── line.png │ │ │ ├── text.png │ │ │ ├── freetype.png │ │ │ └── rectangle.png │ │ ├── [rectangle]绘制边框.md │ │ ├── [掩码]绘制多边形.md │ │ ├── [line]绘制线段.md │ │ └── [text]绘制文本.md │ ├── code │ │ ├── imgs │ │ │ ├── canny.png │ │ │ ├── kernel.png │ │ │ ├── sobel-1.png │ │ │ ├── sobel-2.png │ │ │ ├── sobel-3.png │ │ │ ├── affine │ │ │ │ ├── X.png │ │ │ │ ├── compute.png │ │ │ │ ├── affine-1.png │ │ │ │ ├── affine-2.png │ │ │ │ ├── warp-affine.png │ │ │ │ ├── affine-matrix.png │ │ │ │ ├── affine-result.png │ │ │ │ ├── rotate-python.png │ │ │ │ ├── get-rotation-matrix.png │ │ │ │ └── Warp_Affine_Tutorial_Theory_0.jpg │ │ │ ├── filter2d.png │ │ │ ├── gradient.png │ │ │ ├── scharr-1.png │ │ │ ├── scharr-2.png │ │ │ ├── scharr-3.png │ │ │ ├── cartToPolar.png │ │ │ ├── convert-to.png │ │ │ ├── laplacian-1.png │ │ │ ├── laplacian-2.png │ │ │ ├── laplacian-3.png │ │ │ ├── laplacian-math.png │ │ │ ├── scharr-kernel.png │ │ │ ├── sift │ │ │ │ ├── sift_dog.jpg │ │ │ │ ├── sift_keypoints.jpg │ │ │ │ ├── sift_keypoints-gray.jpg │ │ │ │ └── sift_local_extrema.jpg │ │ │ ├── sobel-vertical.png │ │ │ ├── constraint_border.png │ │ │ ├── gaussian-filter.png │ │ │ ├── gradient-compute.png │ │ │ ├── laplacian-kernel.png │ │ │ ├── replicate_border.png │ │ │ ├── sobel-horizontal.png │ │ │ ├── matcher │ │ │ │ ├── match_sift.png │ │ │ │ └── knnmatch_sift.png │ │ │ ├── gradient-like-compute.png │ │ │ ├── normalize │ │ │ │ ├── norm-type-1.png │ │ │ │ └── norm-type-2.png │ │ │ ├── thresh │ │ │ │ ├── thresh-binary.png │ │ │ │ ├── thresh-tozero.png │ │ │ │ ├── thresh-truncate.png │ │ │ │ ├── thresh-binary-inv.png │ │ │ │ ├── thresh-tozero-inv.png │ │ │ │ ├── Threshold_Tutorial_Theory_Zero.png │ │ │ │ ├── Threshold_Tutorial_Theory_Binary.png │ │ │ │ ├── Threshold_Tutorial_Theory_Truncate.png │ │ │ │ ├── Threshold_Tutorial_Theory_Base_Figure.png │ │ │ │ ├── Threshold_Tutorial_Theory_Zero_Inverted.png │ │ │ │ └── Threshold_Tutorial_Theory_Binary_Inverted.png │ │ │ ├── sample-edge-second-derivative.jpg │ │ │ ├── Laplace_Operator_Tutorial_Theory_Previous.jpg │ │ │ └── Laplace_Operator_Tutorial_Theory_ddIntensity.jpg │ │ ├── 运行时间统计.md │ │ ├── Understanding-Features.md │ │ ├── [convertTo]数据转换.md │ │ └── [cartToPolar]二维向量的大小和角度.md │ ├── OpenCV概述.md │ └── install-configure │ │ ├── [PyCharm]解码opencv python库.md │ │ ├── OpenCV-4.0.1测试.md │ │ └── [opencv-python]编译与安装.md ├── pytorch │ ├── imgs │ │ └── fivecrop.png │ ├── model │ │ ├── imgs │ │ │ ├── mnist.png │ │ │ ├── alexnet-500.png │ │ │ ├── alexnet-loss-500.png │ │ │ ├── spp-pretrained-acc.png │ │ │ └── spp-pretrained-loss.png │ │ ├── 查询模型参数总数.md │ │ └── 为什么推荐使用static_dict方式保存模型.md │ ├── preprocessing │ │ ├── imgs │ │ │ ├── preprocess.png │ │ │ ├── cifar-sample-4.png │ │ │ ├── voc-aeroplane.png │ │ │ ├── voc-dataloader.png │ │ │ ├── sphx_glr_data_loading_tutorial_001.png │ │ │ └── sphx_glr_data_loading_tutorial_004.png │ │ ├── 数据预处理.md │ │ ├── [torchvision][ConcatDataset]连接多个数据集.md │ │ └── [Ten Crops]多样本检测.md │ ├── 可视化.md │ ├── error │ │ ├── RuntimeError: CUDA error: initialization error.md │ │ ├── RuntimeError: invalid argument 0: Sizes of tensors must match.md │ │ ├── OSError: [Errno 12] Cannot allocate memory.md │ │ └── Process finished with exit code 137 (interrupted by signal 9: SIGKILL) .md │ ├── cuda │ │ ├── 监控显存使用.md │ │ ├── [CUDA_VISIBLE_DEVICES]指定哪张卡运行.md │ │ ├── [benchmark]训练加速.md │ │ └── [empty_cache]清空显存.md │ ├── [nonzero]非零元素下标.md │ ├── [clamp]限制取值范围.md │ ├── [Conv][Pool]实现原理.md │ ├── [transpose][permute]维度转换.md │ ├── 引言.md │ ├── [index_fill]在给定维度填充指定val.md │ ├── [AdaptiveMaxPool][AdaptiveAvgPool]自适应池化层操作.md │ └── [softmax]分类概率计算.md ├── uml │ ├── plantuml │ │ ├── imgs │ │ │ ├── class.png │ │ │ ├── title.png │ │ │ ├── version.png │ │ │ ├── class-line.png │ │ │ ├── visibility.png │ │ │ ├── class-namespace.png │ │ │ ├── visibility-icon.png │ │ │ ├── vscode-preview.png │ │ │ ├── extesion-plantuml.png │ │ │ ├── relationship-identify.png │ │ │ └── class-field-methods-define.png │ │ ├── PlantUML简介.md │ │ ├── VSCode插件使用.md │ │ ├── 常用命令.md │ │ └── 本地安装和使用.md │ ├── imgs │ │ ├── Class_Dependency.png │ │ ├── Uml_classes_en.svg.png │ │ ├── class-multiplicity.png │ │ ├── 330px-BankAccount1.svg.png │ │ ├── 450px-UML_role_example.gif │ │ ├── 450px-UML_role_example.png │ │ ├── 450px-KP-UML-Aggregation-20060420.svg.png │ │ ├── 450px-KP-UML-Generalization-20060325.svg.png │ │ └── 330px-AggregationAndComposition-edited.svg.png │ ├── 统一建模语言UML.md │ └── 类图小结.md ├── algorithm │ ├── imgs │ │ └── tp-fp-tn-fn.png │ ├── ROC曲线.md │ ├── dataset.md │ ├── evaluation-metrics.md │ ├── TP-FP-TN-FN.md │ ├── machine-learning.md │ └── optimization.md ├── cplusplus │ ├── advanced │ │ ├── class │ │ │ ├── imgs │ │ │ │ ├── base_access.png │ │ │ │ ├── access_control.png │ │ │ │ └── multiple_inheritance.png │ │ │ ├── 类、结构体和共同体.md │ │ │ ├── static成员.md │ │ │ ├── 嵌套类定义.md │ │ │ ├── 析构器.md │ │ │ ├── 基本类结构.md │ │ │ ├── 成员函数概述.md │ │ │ └── 类定义.md │ │ ├── smart-pointer │ │ │ ├── imgs │ │ │ │ ├── shared_ptr.png │ │ │ │ └── unique_ptr.png │ │ │ ├── 使用原始指针还是智能指针.md │ │ │ ├── weak_ptr.md │ │ │ └── 智能指针类型.md │ │ ├── reference │ │ │ ├── 指针引用.md │ │ │ ├── 引用类型函数操作.md │ │ │ └── 引用概述.md │ │ ├── stl │ │ │ ├── [shuffle]随机重排列.md │ │ │ ├── find.md │ │ │ ├── for_each.md │ │ │ ├── map.md │ │ │ ├── stack.md │ │ │ ├── queue.md │ │ │ └── vector.md │ │ └── template │ │ │ ├── 模板和名称解析.md │ │ │ └── 函数模板.md │ ├── get-started │ │ ├── basic-concepts │ │ │ ├── imgs │ │ │ │ ├── include.png │ │ │ │ ├── Comp-link.png │ │ │ │ └── overloading_consideration.png │ │ │ ├── 指针引用.md │ │ │ ├── 临时对象.md │ │ │ └── 声明和定义.md │ │ ├── type-cast-deduce │ │ │ ├── imgs │ │ │ │ ├── decltype.png │ │ │ │ └── promotion.png │ │ │ ├── 类型概述.md │ │ │ ├── decltype.md │ │ │ ├── void类型.md │ │ │ ├── auto.md │ │ │ └── 标准转换.md │ │ ├── pointer-array │ │ │ ├── 原始数组.md │ │ │ ├── 指针常量和常量指针.md │ │ │ ├── 二维数组和二级指针.md │ │ │ ├── 指针名和数组名的区别.md │ │ │ ├── 指针类型.md │ │ │ ├── 指针和数组.md │ │ │ └── const指针和volatile指针.md │ │ ├── operator-overload │ │ │ ├── 二元运算符重载.md │ │ │ ├── 函数调用运算符重载.md │ │ │ ├── 赋值运算符重载.md │ │ │ └── 下标运算符重载.md │ │ └── keywords │ │ │ ├── size_t.md │ │ │ ├── nullptr.md │ │ │ └── main.md │ ├── C++标准.md │ ├── faq │ │ ├── ISO C++ forbids converting a string constant to char*.md │ │ └── multiple-definition-of.md │ └── 学习C++之路.md └── index.md ├── samples └── plantuml │ ├── version.puml │ ├── common.puml │ └── class.puml ├── py ├── .gitignore ├── data │ ├── baboon.jpg │ ├── lena.jpg │ ├── HappyFish.jpg │ └── butterfly.jpg ├── lr │ └── __init__.py └── data_preprocessing │ ├── __init__.py │ ├── compose.py │ ├── resize.py │ ├── crop.py │ ├── flip.py │ ├── erase.py │ └── color.py ├── patches └── simhei.ttf ├── requirements.txt └── tools ├── createsamples ├── CMakeLists.txt └── .vscode │ ├── c_cpp_properties.json │ └── tasks.json └── traincascade ├── CMakeLists.txt ├── .vscode ├── c_cpp_properties.json └── settings.json └── imagestorage.h /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | .vscode 3 | 4 | docs/build/ 5 | -------------------------------------------------------------------------------- /docs/cnn/引言.md: -------------------------------------------------------------------------------- 1 | 2 | # 引言 3 | 4 | 本模块介绍卷积神经网络相关层/激活函数/网络的结构和概念 -------------------------------------------------------------------------------- /samples/plantuml/version.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | version 3 | @enduml -------------------------------------------------------------------------------- /py/.gitignore: -------------------------------------------------------------------------------- 1 | lr/__pycache__/ 2 | 3 | data/ 4 | 5 | .idea/ 6 | -------------------------------------------------------------------------------- /patches/simhei.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/patches/simhei.ttf -------------------------------------------------------------------------------- /py/data/baboon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/py/data/baboon.jpg -------------------------------------------------------------------------------- /py/data/lena.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/py/data/lena.jpg -------------------------------------------------------------------------------- /py/data/HappyFish.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/py/data/HappyFish.jpg -------------------------------------------------------------------------------- /py/data/butterfly.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/py/data/butterfly.jpg -------------------------------------------------------------------------------- /docs/python/imgs/xml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/python/imgs/xml.png -------------------------------------------------------------------------------- /docs/python/imgs/tqdm-1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/python/imgs/tqdm-1.gif -------------------------------------------------------------------------------- /docs/python/imgs/tqdm-2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/python/imgs/tqdm-2.gif -------------------------------------------------------------------------------- /docs/python/imgs/tqdm-3.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/python/imgs/tqdm-3.gif -------------------------------------------------------------------------------- /docs/cnn/imgs/alexnet/lrn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cnn/imgs/alexnet/lrn.png -------------------------------------------------------------------------------- /docs/cnn/imgs/alexnet/pca.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cnn/imgs/alexnet/pca.png -------------------------------------------------------------------------------- /docs/cnn/imgs/alexnet/relu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cnn/imgs/alexnet/relu.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/gray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/gray.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/pie-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/pie-1.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/pie-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/pie-2.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/pie-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/pie-3.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/pie-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/pie-4.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/pie-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/pie-5.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/pie-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/pie-6.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/xaxis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/xaxis.png -------------------------------------------------------------------------------- /docs/opencv/draw/imgs/line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/draw/imgs/line.png -------------------------------------------------------------------------------- /docs/opencv/draw/imgs/text.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/draw/imgs/text.png -------------------------------------------------------------------------------- /docs/python/imgs/decorator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/python/imgs/decorator.png -------------------------------------------------------------------------------- /docs/pytorch/imgs/fivecrop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/imgs/fivecrop.png -------------------------------------------------------------------------------- /docs/cnn/imgs/alexnet/AlexNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cnn/imgs/alexnet/AlexNet.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/anatomy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/anatomy.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/axes-2-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/axes-2-2.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/curve-3d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/curve-3d.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/figure-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/figure-1.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/figure-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/figure-2.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/figure-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/figure-3.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/fmt-line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/fmt-line.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/gray-2-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/gray-2-3.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/plot-x-y.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/plot-x-y.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/plot-y.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/plot-y.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/unorder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/unorder.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/canny.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/canny.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/kernel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/kernel.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sobel-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sobel-1.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sobel-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sobel-2.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sobel-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sobel-3.png -------------------------------------------------------------------------------- /docs/pytorch/model/imgs/mnist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/model/imgs/mnist.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/class.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/class.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/title.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/title.png -------------------------------------------------------------------------------- /docs/algorithm/imgs/tp-fp-tn-fn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/algorithm/imgs/tp-fp-tn-fn.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/contour_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/contour_1.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/contour_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/contour_2.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/contour_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/contour_3.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/contour_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/contour_4.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/contour_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/contour_5.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/fmt-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/fmt-color.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/fmt-marker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/fmt-marker.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/line-chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/line-chart.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/line-text.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/line-text.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/line_spots.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/line_spots.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/mat_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/mat_image.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/plot-x-y-+.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/plot-x-y-+.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/scatter-3d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/scatter-3d.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/surface-3d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/surface-3d.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/X.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/X.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/filter2d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/filter2d.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/gradient.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/gradient.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/scharr-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/scharr-1.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/scharr-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/scharr-2.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/scharr-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/scharr-3.png -------------------------------------------------------------------------------- /docs/opencv/draw/imgs/freetype.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/draw/imgs/freetype.png -------------------------------------------------------------------------------- /docs/opencv/draw/imgs/rectangle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/draw/imgs/rectangle.png -------------------------------------------------------------------------------- /docs/uml/imgs/Class_Dependency.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/Class_Dependency.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/version.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/version.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | mkdocs==1.0.4 2 | mkdocs-material==4.6.0 3 | mkdocs-minify-plugin==0.2.1 4 | Markdown==3.1.1 5 | markdown-katex==201912.11b0 -------------------------------------------------------------------------------- /docs/matplotlib/imgs/coordinate-3d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/coordinate-3d.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/line-legend-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/line-legend-1.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/multi-scatter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/multi-scatter.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/subplot-1-2-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/subplot-1-2-2.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/cartToPolar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/cartToPolar.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/convert-to.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/convert-to.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/laplacian-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/laplacian-1.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/laplacian-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/laplacian-2.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/laplacian-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/laplacian-3.png -------------------------------------------------------------------------------- /docs/uml/imgs/Uml_classes_en.svg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/Uml_classes_en.svg.png -------------------------------------------------------------------------------- /docs/uml/imgs/class-multiplicity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/class-multiplicity.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/class-line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/class-line.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/visibility.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/visibility.png -------------------------------------------------------------------------------- /samples/plantuml/common.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | scale 300*200 3 | title 第一行bababa\n第二行bababa 4 | 5 | class Hello { 6 | + hi: int 7 | } 8 | @enduml 9 | -------------------------------------------------------------------------------- /docs/cnn/imgs/alexnet/alexnet-model.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cnn/imgs/alexnet/alexnet-model.PNG -------------------------------------------------------------------------------- /docs/matplotlib/imgs/figure-suptitle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/figure-suptitle.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/single-scatter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/single-scatter.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/compute.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/compute.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/laplacian-math.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/laplacian-math.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/scharr-kernel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/scharr-kernel.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sift/sift_dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sift/sift_dog.jpg -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sobel-vertical.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sobel-vertical.png -------------------------------------------------------------------------------- /docs/pytorch/model/imgs/alexnet-500.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/model/imgs/alexnet-500.png -------------------------------------------------------------------------------- /docs/uml/imgs/330px-BankAccount1.svg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/330px-BankAccount1.svg.png -------------------------------------------------------------------------------- /docs/uml/imgs/450px-UML_role_example.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/450px-UML_role_example.gif -------------------------------------------------------------------------------- /docs/uml/imgs/450px-UML_role_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/450px-UML_role_example.png -------------------------------------------------------------------------------- /py/lr/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | @date: 2020/5/2 下午2:36 5 | @file: __init__.py.py 6 | @author: zj 7 | @description: 8 | """ -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/affine-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/affine-1.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/affine-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/affine-2.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/constraint_border.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/constraint_border.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/gaussian-filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/gaussian-filter.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/gradient-compute.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/gradient-compute.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/laplacian-kernel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/laplacian-kernel.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/replicate_border.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/replicate_border.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sobel-horizontal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sobel-horizontal.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/class-namespace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/class-namespace.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/visibility-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/visibility-icon.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/vscode-preview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/vscode-preview.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/line2d-properties-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/line2d-properties-1.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/line2d-properties-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/line2d-properties-2.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_001.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_002.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_003.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_004.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_005.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_006.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_007.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_008.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_009.png -------------------------------------------------------------------------------- /docs/matplotlib/imgs/sphx_glr_pyplot_010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/matplotlib/imgs/sphx_glr_pyplot_010.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/warp-affine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/warp-affine.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/matcher/match_sift.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/matcher/match_sift.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sift/sift_keypoints.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sift/sift_keypoints.jpg -------------------------------------------------------------------------------- /docs/pytorch/model/imgs/alexnet-loss-500.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/model/imgs/alexnet-loss-500.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/extesion-plantuml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/extesion-plantuml.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/affine-matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/affine-matrix.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/affine-result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/affine-result.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/rotate-python.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/rotate-python.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/gradient-like-compute.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/gradient-like-compute.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/matcher/knnmatch_sift.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/matcher/knnmatch_sift.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/normalize/norm-type-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/normalize/norm-type-1.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/normalize/norm-type-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/normalize/norm-type-2.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/thresh-binary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/thresh-binary.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/thresh-tozero.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/thresh-tozero.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/thresh-truncate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/thresh-truncate.png -------------------------------------------------------------------------------- /docs/pytorch/model/imgs/spp-pretrained-acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/model/imgs/spp-pretrained-acc.png -------------------------------------------------------------------------------- /docs/pytorch/model/imgs/spp-pretrained-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/model/imgs/spp-pretrained-loss.png -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/imgs/preprocess.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/preprocessing/imgs/preprocess.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/relationship-identify.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/relationship-identify.png -------------------------------------------------------------------------------- /py/data_preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | @date: 2020/4/30 下午1:51 5 | @file: __init__.py.py 6 | @author: zj 7 | @description: 8 | """ -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/imgs/base_access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/advanced/class/imgs/base_access.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sift/sift_keypoints-gray.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sift/sift_keypoints-gray.jpg -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sift/sift_local_extrema.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sift/sift_local_extrema.jpg -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/thresh-binary-inv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/thresh-binary-inv.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/thresh-tozero-inv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/thresh-tozero-inv.png -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/imgs/cifar-sample-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/preprocessing/imgs/cifar-sample-4.png -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/imgs/voc-aeroplane.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/preprocessing/imgs/voc-aeroplane.png -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/imgs/voc-dataloader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/preprocessing/imgs/voc-dataloader.png -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/imgs/access_control.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/advanced/class/imgs/access_control.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/get-rotation-matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/get-rotation-matrix.png -------------------------------------------------------------------------------- /docs/uml/plantuml/imgs/class-field-methods-define.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/plantuml/imgs/class-field-methods-define.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/sample-edge-second-derivative.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/sample-edge-second-derivative.jpg -------------------------------------------------------------------------------- /docs/uml/imgs/450px-KP-UML-Aggregation-20060420.svg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/450px-KP-UML-Aggregation-20060420.svg.png -------------------------------------------------------------------------------- /docs/cplusplus/advanced/smart-pointer/imgs/shared_ptr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/advanced/smart-pointer/imgs/shared_ptr.png -------------------------------------------------------------------------------- /docs/cplusplus/advanced/smart-pointer/imgs/unique_ptr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/advanced/smart-pointer/imgs/unique_ptr.png -------------------------------------------------------------------------------- /docs/cplusplus/get-started/basic-concepts/imgs/include.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/get-started/basic-concepts/imgs/include.png -------------------------------------------------------------------------------- /docs/uml/imgs/450px-KP-UML-Generalization-20060325.svg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/450px-KP-UML-Generalization-20060325.svg.png -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/imgs/multiple_inheritance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/advanced/class/imgs/multiple_inheritance.png -------------------------------------------------------------------------------- /docs/cplusplus/get-started/basic-concepts/imgs/Comp-link.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/get-started/basic-concepts/imgs/Comp-link.png -------------------------------------------------------------------------------- /docs/uml/imgs/330px-AggregationAndComposition-edited.svg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/uml/imgs/330px-AggregationAndComposition-edited.svg.png -------------------------------------------------------------------------------- /docs/cplusplus/get-started/type-cast-deduce/imgs/decltype.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/get-started/type-cast-deduce/imgs/decltype.png -------------------------------------------------------------------------------- /docs/cplusplus/get-started/type-cast-deduce/imgs/promotion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/get-started/type-cast-deduce/imgs/promotion.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/affine/Warp_Affine_Tutorial_Theory_0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/affine/Warp_Affine_Tutorial_Theory_0.jpg -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Zero.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Zero.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Binary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Binary.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/Laplace_Operator_Tutorial_Theory_Previous.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/Laplace_Operator_Tutorial_Theory_Previous.jpg -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Truncate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Truncate.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/Laplace_Operator_Tutorial_Theory_ddIntensity.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/Laplace_Operator_Tutorial_Theory_ddIntensity.jpg -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Base_Figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Base_Figure.png -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/imgs/sphx_glr_data_loading_tutorial_001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/preprocessing/imgs/sphx_glr_data_loading_tutorial_001.png -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/imgs/sphx_glr_data_loading_tutorial_004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/pytorch/preprocessing/imgs/sphx_glr_data_loading_tutorial_004.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Zero_Inverted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Zero_Inverted.png -------------------------------------------------------------------------------- /docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Binary_Inverted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/opencv/code/imgs/thresh/Threshold_Tutorial_Theory_Binary_Inverted.png -------------------------------------------------------------------------------- /docs/cplusplus/get-started/basic-concepts/imgs/overloading_consideration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Document-Collection/image-processing/HEAD/docs/cplusplus/get-started/basic-concepts/imgs/overloading_consideration.png -------------------------------------------------------------------------------- /docs/pytorch/可视化.md: -------------------------------------------------------------------------------- 1 | 2 | # 可视化 3 | 4 | * [[PyTorch]Tensorboard可视化实现](https://blog.zhujian.life/posts/eb6f2b71.html) 5 | * [[PyTorch]Tensorboard使用实践](https://blog.zhujian.life/posts/f793688d.html) 6 | * [模型可视化工具和库](https://blog.zhujian.life/posts/d813343e.html) -------------------------------------------------------------------------------- /tools/createsamples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | file(GLOB SRCS *.cpp) 2 | ocv_add_application(opencv_createsamples 3 | MODULES opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_calib3d opencv_features2d opencv_videoio 4 | SRCS ${SRCS}) 5 | -------------------------------------------------------------------------------- /docs/cplusplus/get-started/pointer-array/原始数组.md: -------------------------------------------------------------------------------- 1 | 2 | # 原始数组 3 | 4 | ``` 5 | const int LENGTH = 3; 6 | const int WIDTH = 10; 7 | const int HEIGHT = 5; 8 | 9 | int a1[LENGTH] = {1, 2, 3}; 10 | int a2[LENGTH][WIDTH]{}; 11 | int a3[LENGTH][WIDTH][HEIGHT]{}; 12 | ``` 13 | 14 | 通过列表进行初始化 15 | -------------------------------------------------------------------------------- /docs/pytorch/error/RuntimeError: CUDA error: initialization error.md: -------------------------------------------------------------------------------- 1 | 2 | # RuntimeError: CUDA error: initialization error 3 | 4 | 参考:[RuntimeError: CUDA error: initialization error](https://blog.csdn.net/yyhaohaoxuexi/article/details/90718501) 5 | 6 | >不可在DataLoader或DataSet内将任何数据放到CUDA上,而是等到程序运行出DataLoader之后(也就是到了train里的时候)将数据放到CUDA上。 -------------------------------------------------------------------------------- /docs/cplusplus/C++标准.md: -------------------------------------------------------------------------------- 1 | 2 | # C++标准 3 | 4 | 参考:[C++ 的历史](https://zh.cppreference.com/w/cpp/language/history) 5 | 6 | `C++`标准一直在进步,从远到近有以下版本: 7 | 8 | * `C++98` 9 | * `C++03` 10 | * `C++11` 11 | * `C++14` 12 | * `C++17` 13 | * `C++20` 14 | 15 | ## 使用哪个标准 16 | 17 | 之前编译`OpenCV 4.0`的时候,发现其中一个新特征就是完全符合`C++11`标准,所以当前学习和使用`C++11`标准 -------------------------------------------------------------------------------- /docs/python/[numpy]提取数组中属于某一条件的数据.md: -------------------------------------------------------------------------------- 1 | 2 | # [numpy]提取数组中属于某一条件的数据 3 | 4 | 参考:[从numpy数组中取出满足条件的元素](https://blog.csdn.net/qq_27972567/article/details/82889376) 5 | 6 | ``` 7 | import numpy as np 8 | 9 | data = np.arange(10) 10 | print(data) 11 | # 取偶数 12 | print(data[data % 2 == 0]) 13 | # 取奇数 14 | print(data[data % 2 == 1]) 15 | ``` -------------------------------------------------------------------------------- /tools/traincascade/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | ocv_warnings_disable(CMAKE_CXX_FLAGS -Woverloaded-virtual -Winconsistent-missing-override -Wsuggest-override) 2 | file(GLOB SRCS *.cpp) 3 | ocv_add_application(opencv_traincascade 4 | MODULES opencv_core opencv_imgproc opencv_objdetect opencv_imgcodecs opencv_highgui opencv_calib3d opencv_features2d 5 | SRCS ${SRCS}) 6 | -------------------------------------------------------------------------------- /docs/python/[numpy]元素累加.md: -------------------------------------------------------------------------------- 1 | 2 | # [numpy]元素累加 3 | 4 | `Numpy`提供了函数`cumsum`用于元素累加操作 5 | 6 | ``` 7 | import numpy as np 8 | 9 | if __name__ == '__main__': 10 | data = np.arange(10) 11 | print(data) 12 | 13 | res = np.cumsum(data) 14 | print(res) 15 | ################### 输出 16 | [0 1 2 3 4 5 6 7 8 9] 17 | [ 0 1 3 6 10 15 21 28 36 45] 18 | ``` -------------------------------------------------------------------------------- /docs/uml/plantuml/PlantUML简介.md: -------------------------------------------------------------------------------- 1 | 2 | # PlantUML简介 3 | 4 | 尝试过许多种`UML`建模工具,包括`Visio、StartUML、EdrawMax、Draw.io`等等。这些建模工具大都通过拖拽控件的方式进行`UML`绘图,实现方式很清晰,但是个人体验不佳,感觉操作很繁琐 5 | 6 | `PlantUML`是一种标记语言,能够通过编写语言的方式生成`UML`图例,相比较之下更加符合程序员的要求:快速、简洁、干净 7 | 8 | * 官网:[https://plantuml.com/zh/](https://plantuml.com/zh/) 9 | 10 | * 教程:[PlantUML教程](http://plantuml.com/zh/guide) -------------------------------------------------------------------------------- /docs/uml/plantuml/VSCode插件使用.md: -------------------------------------------------------------------------------- 1 | 2 | # VSCode插件使用 3 | 4 | `VSCode`提供了`PlantUML`插件,在左侧菜单栏`->Extensions`中搜索`PlantUML`即可 5 | 6 | ![](./imgs/extesion-plantuml.png) 7 | 8 | 安装完成后,即可在`VSCode`中编辑`.puml`文件 9 | 10 | ``` 11 | @startuml 12 | version 13 | @enduml 14 | ``` 15 | 16 | 在编辑框中右键`Preview Current Diagram`,或者使用快捷键`Alt+D`即可完成预览 17 | 18 | ![](./imgs/vscode-preview.png) -------------------------------------------------------------------------------- /docs/cplusplus/advanced/smart-pointer/使用原始指针还是智能指针.md: -------------------------------------------------------------------------------- 1 | 2 | # 使用原始指针还是智能指针 3 | 4 | 参考:[C++智能指针的正确使用方式](https://cloud.tencent.com/developer/article/1517336) 5 | 6 | 智能指针能够自动操作内存分配和删除,所以相比较于指针而言,其更能够确保内存和资源不被泄漏 7 | 8 | 不过由于智能指针额外增加了对内存和引用的操作,所以性能上会弱于原始指针操作 9 | 10 | 使用关键在于是否需要关心指针内存: 11 | 12 | * 如果指向已有数组和对象,不需要指针进行内存管理,那么应该使用原始指针 13 | * 对于使用`new`关键字进行显式内存分配的指针而言,因为需要指针自己完成内存新建和删除操作,所以使用智能指针更加安全 -------------------------------------------------------------------------------- /docs/cplusplus/get-started/pointer-array/指针常量和常量指针.md: -------------------------------------------------------------------------------- 1 | 2 | # 指针常量和常量指针 3 | 4 | 参考:[const和volatile指针](https://zj-image-processing.readthedocs.io/zh_CN/latest/c++/const%E5%92%8Cvolatile%E6%8C%87%E9%92%88.html) 5 | 6 | ## 解析 7 | 8 | * 指针常量:指针指向的地址为常量,不能修改指针保存的地址,但可以修改地址保存的值。类似于**数组** 9 | * 常量指针:指针指向的对象为常量,可以修改指针保存的地址,但不能修改地址保存的值 10 | 11 | ## 声明 12 | 13 | 指针常量 14 | 15 | ``` 16 | char const *p; 17 | ``` 18 | 19 | 常量指针 20 | 21 | ``` 22 | const char *p; 23 | ``` -------------------------------------------------------------------------------- /docs/python/[numpy]增加或者减少一维.md: -------------------------------------------------------------------------------- 1 | 2 | # [numpy]增加或者减少一维 3 | 4 | 参考:[numpy.ndarray 增加一维](https://blog.csdn.net/a362682954/article/details/81220035) 5 | 6 | 增加维度使用`np.newaxis`,减少维度使用`np.squeeze` 7 | 8 | ## np.newaxis 9 | 10 | ``` 11 | >>> a = np.arange(3) 12 | >>> a.shape 13 | (3,) 14 | # 增加第一维 15 | >>> b = a[np.newaxis, :] 16 | >>> b.shape 17 | (1, 3) 18 | ``` 19 | 20 | ## np.squeeze 21 | 22 | ``` 23 | # 减少第一维 24 | >>> c = b.squeeze(0) 25 | >>> c.shape 26 | (3,) 27 | ``` -------------------------------------------------------------------------------- /docs/pytorch/error/RuntimeError: invalid argument 0: Sizes of tensors must match.md: -------------------------------------------------------------------------------- 1 | 2 | # RuntimeError: invalid argument 0: Sizes of tensors must match except in dimension 0 3 | 4 | 参考:[12 invalid argument 0: Sizes of tensors must match except in dimension 1. Got 14 and 13 in dimension 0 at /home/prototype/Downloads/pytorch/aten/src/THC/generic/THCTensorMath.cu:83](https://oldpan.me/archives/pytorch-conmon-problem-in-training) 5 | 6 | 调用`DataSet`的`__getitem__`方法返回的`image`数据维度应该一致 -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/类、结构体和共同体.md: -------------------------------------------------------------------------------- 1 | 2 | # 类、结构体和共同体 3 | 4 | 类、结构体和共同体是`3`种类的类型,分别通过关键字`class、struct`和`union`定义 5 | 6 | ## 类 vs. 结构体 7 | 8 | 这两个构造在`C++`中是相同的,除了在结构中默认的可访问性是公共的,而在类中默认是私有的 9 | 10 | 类和结构体是用于自定义类型的构造。类和结构体都可以包含数据成员和成员函数,能够描述类型的状态和行为 11 | 12 | ## 访问控制和限制 13 | 14 | 类、结构体和共同体的访问控制(`access control`)和限制(`constraint`)有所差异。如下图所示: 15 | 16 | ![](./imgs/access_control.png) 17 | 18 | * 就访问控制而言,结构体和共同体默认访问权限是`public`,而类的访问权限是`private` 19 | * 就访问限制而言,结构体和类没有任何限制,而共同体每次只能使用一个成员 20 | -------------------------------------------------------------------------------- /docs/cplusplus/get-started/type-cast-deduce/类型概述.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11]类型概述 3 | 4 | 参考:[Type Conversions and Type Safety (Modern C++)](https://docs.microsoft.com/en-us/cpp/cpp/type-conversions-and-type-safety-modern-cpp?view=vs-2019) 5 | 6 | `c++`是强类型编程语言,每个变量、函数参数、函数返回值都必须拥有一个类型 7 | 8 | 类型的作用如下: 9 | 10 | 1. 指定变量(或表达式结果)分配的内存 11 | 2. 指定可能存储的值的类型 12 | 3. 指定编译器如何解释这些值(在位模式下) 13 | 4. 指定可以对其进行的操作 14 | 15 | `c++`的类型分为以下几种: 16 | 17 | 1. 基本类型 18 | 2. `void`类型 19 | 3. `string`类型 20 | 4. 用户自定义类型 21 | 5. 指针类型 22 | 23 | 另外也经常使用`const`类型限定符 -------------------------------------------------------------------------------- /tools/createsamples/.vscode/c_cpp_properties.json: -------------------------------------------------------------------------------- 1 | { 2 | "configurations": [ 3 | { 4 | "name": "Linux", 5 | "includePath": [ 6 | "${workspaceFolder}/**", 7 | "/home/zhujian/opencv-3.4.4/install/**" 8 | ], 9 | "defines": [], 10 | "compilerPath": "/usr/bin/gcc", 11 | "cStandard": "c11", 12 | "cppStandard": "c++17", 13 | "intelliSenseMode": "clang-x64" 14 | } 15 | ], 16 | "version": 4 17 | } -------------------------------------------------------------------------------- /docs/opencv/OpenCV概述.md: -------------------------------------------------------------------------------- 1 | 2 | # OpenCV概述 3 | 4 | `OpenCV`(`Open Source Computer Vision Library`,开源计算机视觉库),是一个基于`BSD`协议的计算机视觉库 5 | 6 | `OpenCV`支持多语言开发,包括`C++,Python和Java` 7 | 8 | `OpenCV`支持多平台开发,包括`Windows,Linux,Max OS,Ios和Android` 9 | 10 | 官网地址:[OpenCV](https://opencv.org/) 11 | 12 | ## `opencv_contrib` 13 | 14 | `OpenCV`将稳定功能和`API`接口的代码放置在`opencv`库,将新特征和新功能的代码放置在`opencv_contrib`库 15 | 16 | `github`地址: 17 | 18 | [opencv/opencv](https://github.com/opencv/opencv) 19 | 20 | [opencv/opencv_contrib](https://github.com/opencv/opencv_contrib) 21 | -------------------------------------------------------------------------------- /docs/pytorch/cuda/监控显存使用.md: -------------------------------------------------------------------------------- 1 | 2 | # 监控显存使用 3 | 4 | `PyTorch`提供了两个函数用于显存查询: 5 | 6 | * [memory_allocated](https://pytorch.org/docs/stable/cuda.html#torch.cuda.memory_allocated) 7 | * [max_memory_allocated](https://pytorch.org/docs/stable/cuda.html?highlight=max_memory_allocated#torch.cuda.max_memory_allocated) 8 | 9 | ## memory_allocated 10 | 11 | >torch.cuda.memory_allocated(device=None) 12 | 13 | 查询指定`GPU`中使用的显存大小(字节) 14 | 15 | ## max_memory_allocated 16 | 17 | >torch.cuda.max_memory_allocated(device=None) 18 | 19 | 返回给定设备的张量占用的最大`GPU`内存(字节) -------------------------------------------------------------------------------- /tools/traincascade/.vscode/c_cpp_properties.json: -------------------------------------------------------------------------------- 1 | { 2 | "configurations": [ 3 | { 4 | "name": "Linux", 5 | "includePath": [ 6 | "${workspaceFolder}/**", 7 | "/home/zhujian/opencv-3.4.4/install/include/**" 8 | ], 9 | "defines": [], 10 | "compilerPath": "/usr/bin/gcc", 11 | "cStandard": "c11", 12 | "cppStandard": "c++17", 13 | "intelliSenseMode": "clang-x64" 14 | } 15 | ], 16 | "version": 4 17 | } -------------------------------------------------------------------------------- /docs/python/[numpy][clip]限制取值范围.md: -------------------------------------------------------------------------------- 1 | 2 | # [numpy][clip]限制取值范围 3 | 4 | >clip(a, a_min, a_max, out=None, **kwargs) 5 | 6 | 将数组取值限制为给定最小值和最大值之间 7 | 8 | ``` 9 | >>> import numpy as np 10 | >>> 11 | >>> a = np.arange(5) 12 | >>> a 13 | array([0, 1, 2, 3, 4]) 14 | >>> np.clip(a, 2, 3) 15 | array([2, 2, 2, 3, 3]) 16 | >>> a = np.arange(12).reshape(3,4) 17 | >>> a 18 | array([[ 0, 1, 2, 3], 19 | [ 4, 5, 6, 7], 20 | [ 8, 9, 10, 11]]) 21 | >>> np.clip(a, 3, 9) 22 | array([[3, 3, 3, 3], 23 | [4, 5, 6, 7], 24 | [8, 9, 9, 9]]) 25 | ``` -------------------------------------------------------------------------------- /docs/pytorch/cuda/[CUDA_VISIBLE_DEVICES]指定哪张卡运行.md: -------------------------------------------------------------------------------- 1 | 2 | # [CUDA_VISIBLE_DEVICES]指定哪张卡运行 3 | 4 | 参考:[CUDA Pro Tip: Control GPU Visibility with CUDA_VISIBLE_DEVICES](https://devblogs.nvidia.com/cuda-pro-tip-control-gpu-visibility-cuda_visible_devices/) 5 | 6 | ## 使用 7 | 8 | 在多卡环境下,可以设置环境变量`CUDA_VISIBLE_DEVICES`来指定要运行的`GPU` 9 | 10 | ## 示例 11 | 12 | 单张卡 13 | 14 | ``` 15 | # 设置方式一 16 | CUDA_VISIBLE_DEVICES=0 python xxx.py 17 | # 设置方式二 18 | export CUDA_VISIBLE_DEVICES=0 19 | pytohn xxx.py 20 | ``` 21 | 22 | 多张卡 23 | 24 | ``` 25 | CUDA_VISIBLE_DEVICES='0,1' 26 | ``` -------------------------------------------------------------------------------- /docs/opencv/code/运行时间统计.md: -------------------------------------------------------------------------------- 1 | 2 | # 运行时间统计 3 | 4 | 参考:[Performance Measurement and Improvement Techniques](https://docs.opencv.org/4.1.0/dc/d71/tutorial_py_optimization.html) 5 | 6 | `OpenCV`提供了函数[getTickCount](https://docs.opencv.org/master/db/de0/group__core__utils.html#gae73f58000611a1af25dd36d496bf4487)和[getTickFrequency](https://docs.opencv.org/master/db/de0/group__core__utils.html#ga705441a9ef01f47acdc55d87fbe5090c)来计算程序运行时间(单位:秒) 7 | 8 | ``` 9 | double t = (double) getTickCount(); 10 | // do something ... 11 | t = ((double) getTickCount() - t) / getTickFrequency(); 12 | ``` -------------------------------------------------------------------------------- /docs/pytorch/[nonzero]非零元素下标.md: -------------------------------------------------------------------------------- 1 | 2 | # [nonzero]非零元素下标 3 | 4 | [torch.nonzero](https://pytorch.org/docs/stable/torch.html?highlight=nonzero#torch.nonzero)能够返回张量中所有非零元素下标 5 | 6 | ## 定义 7 | 8 | ``` 9 | torch.nonzero(input, *, out=None, as_tuple=False) → LongTensor or tuple of LongTensors 10 | ``` 11 | 12 | 返回一个`2`维张量,每一行表示一个元素的下标 13 | 14 | ## 测试 15 | 16 | ``` 17 | >>> import torch 18 | >>> a = torch.tensor([1, 1, 1, 0, 1]) 19 | >>> torch.nonzero(a) 20 | tensor([[0], 21 | [1], 22 | [2], 23 | [4]]) 24 | >>> torch.nonzero(a).shape 25 | torch.Size([4, 1]) 26 | ``` -------------------------------------------------------------------------------- /docs/python/[python3.6][f-strings]字符串连接.md: -------------------------------------------------------------------------------- 1 | 2 | # [python3.6][f-strings]字符串连接 3 | 4 | 最近接触到一种新的字符串连接方式:[f-strings](https://www.python.org/dev/peps/pep-0498/#how-to-denote-f-strings)。这是从`Python3.6`开始的一个新的语法糖 5 | 6 | ## 实现 7 | 8 | ``` 9 | str_demo = f'balabala {TEXT} balabala' 10 | ``` 11 | 12 | 在字符串引号前面添加前缀`f`,在字符串内部的大括号里面添加前面定义的变量`TEXT`。在编译期间,`Python`解释器会自动解析该字符串的连接操作,此操作类似于之前的`str.format()`方法 13 | 14 | ## 示例 15 | 16 | ``` 17 | >>> text='abcd' 18 | >>> text 19 | 'abcd' 20 | >>> f'this is {text}' 21 | 'this is abcd' 22 | >>> 23 | >>> num=100 24 | >>> f'num is {num}' 25 | 'num is 100' 26 | ``` -------------------------------------------------------------------------------- /docs/opencv/install-configure/[PyCharm]解码opencv python库.md: -------------------------------------------------------------------------------- 1 | 2 | # [PyCharm]解码opencv python库 3 | 4 | `opencv`源码编译得到的`python`库仅是一个`.so`文件,在`vscode`中编辑代码时无法跳转到内部,但是在`pycharm`中可以查看函数头和常量定义 5 | 6 | 进入(`Ctrl+B`)之后发现是一个`__init__.py`文件,存储`python_stubs`路径下 7 | 8 | /home/zj/.PyCharm2018.3/system/python_stubs/-1678504091/cv2/__init__.py 9 | 10 | 上网查找了许久,参考 11 | 12 | [pycharm的python_stubs](https://blog.csdn.net/u013128262/article/details/81491009) 13 | 14 | [PyCharm, what is python_stubs?](https://stackoverflow.com/questions/24266114/pycharm-what-is-python-stubs) 15 | 16 | `pycharm`自己解码了`cv2.so`文件,生成了类头文件以便更好的编程 -------------------------------------------------------------------------------- /docs/pytorch/model/查询模型参数总数.md: -------------------------------------------------------------------------------- 1 | 2 | # 查询模型参数总数 3 | 4 | ## numel 5 | 6 | 参考:[torch.numel(input) → int](https://pytorch.org/docs/stable/torch.html?highlight=numel#torch.numel) 7 | 8 | 函数`numel`作用是返回输入张量的元素总数 9 | 10 | ``` 11 | >>> import torch 12 | >>> 13 | >>> a = torch.randn((2,3,4)) 14 | >>> a.shape 15 | torch.Size([2, 3, 4]) 16 | >>> torch.numel(a) 17 | 24 18 | >>> a.numel() 19 | 24 20 | ``` 21 | 22 | ## 查询模型参数总数 23 | 24 | 参考:[5.查看网络总参数](https://www.jianshu.com/p/fcafcfb3d887) 25 | 26 | ``` 27 | net = Model() 28 | print('# Model parameters:', sum(param.numel() for param in net.parameters())) 29 | ``` -------------------------------------------------------------------------------- /docs/python/[around]四舍五入.md: -------------------------------------------------------------------------------- 1 | 2 | # [around]四舍五入 3 | 4 | 函数[np.around](https://numpy.org/doc/1.18/reference/generated/numpy.around.html)能够对数据执行四舍五入操作 5 | 6 | ## 定义 7 | 8 | >numpy.around(a, decimals=0, out=None)[source] 9 | 10 | * `a`:输入数据,可以是单个数字,或者列表/数组 11 | * `decimals`:保留几位小数。默认不保留小数 12 | 13 | ## 示例 14 | 15 | ``` 16 | # 单个数字 17 | >>> np.around(3.33) 18 | 3.0 19 | # 数组 20 | >>> a = np.random.randn(3) 21 | >>> a 22 | array([ 0.02557499, -0.05847877, -1.53689999]) 23 | >>> 24 | >>> np.around(a) 25 | array([ 0., -0., -2.]) 26 | # 保留2位小数 27 | >>> np.around(a, decimals=2) 28 | array([ 0.03, -0.06, -1.54]) 29 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/operator-overload/二元运算符重载.md: -------------------------------------------------------------------------------- 1 | 2 | # 二元运算符重载 3 | 4 | 参考:[Binary Operators](https://docs.microsoft.com/en-us/cpp/cpp/binary-operators?view=vs-2019) 5 | 6 | 二元运算符重载和一元运算符重载类似,区别仅在于参数个数 7 | 8 | ## 二元运算符 9 | 10 | 可被重定义的二元运算符:[Redefinable Binary Operators](https://docs.microsoft.com/en-us/cpp/cpp/binary-operators?view=vs-2019#redefinable-binary-operators) 11 | 12 | ## 语法 13 | 14 | ``` 15 | // 非静态成员函数 16 | ret-type operator op(arg) 17 | // 全局函数 18 | ret-type operator op(arg1, arg2) 19 | ``` 20 | 21 | * `ret-type`表示返回值 22 | * `op`表示运算符号 23 | * `arg_`表示参数 24 | 25 | 对二元运算符的返回类型没有限制,通常返回类类型或对类类型的引用 -------------------------------------------------------------------------------- /docs/python/json文件读写.md: -------------------------------------------------------------------------------- 1 | 2 | # json文件读写 3 | 4 | 参考:[6.2 读写JSON数据](https://python3-cookbook.readthedocs.io/zh_CN/latest/c06/p02_read-write_json_data.html) 5 | 6 | ## 导入 7 | 8 | ``` 9 | import json 10 | ``` 11 | 12 | ## 示例 13 | 14 | 读取文件 15 | 16 | ``` 17 | import json 18 | 19 | if __name__ == '__main__': 20 | ... 21 | ... 22 | with open(file_path, 'r') as f: 23 | json_data = json.load(f) 24 | 25 | ``` 26 | 27 | 写入文件 28 | 29 | ``` 30 | import json 31 | 32 | if __name__ == '__main__': 33 | ... 34 | ... 35 | with open(file_path, 'w') as f: 36 | json.dump(json_data, f) 37 | ``` -------------------------------------------------------------------------------- /docs/matplotlib/y轴坐标错乱.md: -------------------------------------------------------------------------------- 1 | 2 | # y轴坐标错乱 3 | 4 | 今天遇到一个问题,`y`轴坐标值出现了乱序,如下图所示: 5 | 6 | ![](./imgs/unorder.png) 7 | 8 | 参考[plt作图时出现横坐标或者纵坐标乱序的解决方法](https://blog.csdn.net/weixin_43748786/article/details/96432047),发现是因为输入`y`轴数据类型不是`np.int/np.float`,在程序中查了一下,发现果真如此,数据类型为`np.str` 9 | 10 | 复现代码如下: 11 | 12 | ``` 13 | import numpy as np 14 | import matplotlib.pyplot as plt 15 | 16 | 17 | def draw(y): 18 | f = plt.figure() 19 | 20 | x = list(range(len(y))) 21 | plt.scatter(x, y) 22 | 23 | plt.show() 24 | 25 | 26 | if __name__ == '__main__': 27 | a = np.arange(10).astype(np.str) 28 | np.random.shuffle(a) 29 | draw(a) 30 | ``` -------------------------------------------------------------------------------- /docs/python/[pprint]更易读的打印.md: -------------------------------------------------------------------------------- 1 | 2 | # [pprint]更易读的打印 3 | 4 | 参考: 5 | 6 | [pprint — Data pretty printer](https://docs.python.org/3/library/pprint.html) 7 | 8 | [python中pprint模块](https://blog.csdn.net/ZEroJAVAson/article/details/88649650) 9 | 10 | 相比于`print`,`pprint`提供了更加易读的打印结果 11 | 12 | ``` 13 | >>> import pprint 14 | >>> stuff = ['spam', 'eggs', 'lumberjack', 'knights', 'ni'] 15 | >>> stuff.insert(0, stuff) 16 | >>> print(stuff) 17 | [[...], 'spam', 'eggs', 'lumberjack', 'knights', 'ni'] 18 | >>> pprint.pprint(stuff) 19 | [, 20 | 'spam', 21 | 'eggs', 22 | 'lumberjack', 23 | 'knights', 24 | 'ni'] 25 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/keywords/size_t.md: -------------------------------------------------------------------------------- 1 | 2 | # size_t 3 | 4 | 参考: 5 | 6 | [size_t](http://www.cplusplus.com/reference/cstddef/size_t/?kw=size_t) 7 | 8 | [std::size_t](https://en.cppreference.com/w/cpp/types/size_t) 9 | 10 | 无符号整数值,可作为基本无符号整数类型的别名 11 | 12 | 它是一种能够以字节表示任何对象大小的类型:`size_t`是`sizeof`运算符返回的类型,在标准库中广泛用于表示大小和计数 13 | 14 | ``` 15 | #include 16 | #include 17 | #include 18 | 19 | int main() 20 | { 21 | std::array a; 22 | for (std::size_t i = 0; i != a.size(); ++i) 23 | a[i] = i; 24 | for (std::size_t i = a.size()-1; i < a.size(); --i) 25 | std::cout << a[i] << " "; 26 | } 27 | ``` -------------------------------------------------------------------------------- /docs/opencv/code/Understanding-Features.md: -------------------------------------------------------------------------------- 1 | 2 | # 特征/特征检测/特征描述 3 | 4 | `OpenCV`提供了一篇很好的文章来介绍什么是特征、特征检测以及特征描述符:[Understanding Features](https://docs.opencv.org/master/df/d54/tutorial_py_features_meaning.html) 5 | 6 | ## 什么是特征 7 | 8 | 对计算机视觉而言,特征就是图像中的一部分区域 9 | 10 | ## 什么是好的特征 11 | 12 | 好的特征就是从该区域向图像任何方向移动时,其内容会发现最大的变化,比如角特征(`corner features`)或者斑点特征(`blob features`) 13 | 14 | ## 什么是特征检测 15 | 16 | 发现这些图像特征的方法就是特征检测(`Feature Detection`) 17 | 18 | ## 什么是特征描述 19 | 20 | 如何用计算机语言描述图像特征所在区域,使得计算机可以在其他图像上发现相同的特征,这一方法称为特征描述(`Feature Description`) 21 | 22 | ## 小结 23 | 24 | * 好的特征 = 图像中最独特的区域 25 | * 特征检测 = 发现图像中的特征 26 | * 特征描述 = 发现不同图像中相同的特征 27 | 28 | 之后就可以进行图像对齐、图像分类、图像检测等任务了 -------------------------------------------------------------------------------- /docs/python/pip-python-bad-interpreter.md: -------------------------------------------------------------------------------- 1 | 2 | # [pip]python: bad interpreter 3 | 4 | ## 问题 5 | 6 | 使用`pip`命令安装`python`库时发现了如下错误 7 | 8 | ``` 9 | $ pip install imgaug 10 | -bash: /home/zj/anaconda3/envs/cv2/bin/pip: /home/zj/anaconda3/envs/cv/bin/python: bad interpreter: No such file or directory 11 | ``` 12 | 13 | ## 解析 14 | 15 | 使用`Anaconda`配置`Python`开发环境,除了`base`环境外还创建了一个新的环境`cv`,后来想要配置另一套环境,就直接在`envs`环境下复制了`cv`,重命名为`cv2`,把`cv`环境给删除了 16 | 17 | 在`cv2`环境下能够正常的运行`Python`程序,但是使用`pip`安装新库时发现了如上错误 18 | 19 | ## 解决 20 | 21 | 所有还是需要使用`Anaconda`提供的`clone`命令进行环境移植 22 | 23 | 另外在网上找到一种解决方法,就是在执行`pip`命令时指定使用哪个`python` 24 | 25 | ``` 26 | $ pythoh -m pip install imgaug 27 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/static成员.md: -------------------------------------------------------------------------------- 1 | 2 | # static成员 3 | 4 | 参考:[Static Members (C++)](https://docs.microsoft.com/en-us/cpp/cpp/static-members-cpp?view=vs-2019) 5 | 6 | 类可以包含静态成员数据和成员函数。当一个数据成员声明为静态时,该类的所有对象只维护一个数据副本 7 | 8 | 静态数据成员不是给定类类型的对象的一部分。因此,静态数据成员的声明不被视为定义。数据成员在类作用域中声明,但定义在文件作用域中执行。这些静态成员具有外部链接。以下示例说明了这一点: 9 | 10 | ## 使用 11 | 12 | 可以引用静态数据成员而不引用类类型的对象 13 | 14 | ``` 15 | long nBytes = BufferedOutput::bytecount; 16 | ``` 17 | 18 | 也可以通过类对象引用 19 | 20 | ``` 21 | BufferedOutput Console; 22 | long nBytes = Console.bytecount; 23 | ``` 24 | 25 | ## 访问规则 26 | 27 | 静态数据成员受类成员访问规则的约束。对于私有定义的静态数据成员而言,只允许类成员函数和友元函数进行私有访问。例外情况是,不管静态数据成员的访问限制如何,都必须在文件作用域中定义它们。如果要显式初始化数据成员,则必须为该定义提供初始值设定项 -------------------------------------------------------------------------------- /docs/uml/plantuml/常用命令.md: -------------------------------------------------------------------------------- 1 | 2 | # 常用命令 3 | 4 | 参考:[通用命令](https://plantuml.com/zh/commons) 5 | 6 | 除了绘制各种`UML`图的命令外,常常需要一些额外操作,比如放大图形、添加标题等等 7 | 8 | ## 注释 9 | 10 | 单行注释使用单引号`'` 11 | 12 | 多行注释使用`/'`和`'/`作为注释的起始和结束 13 | 14 | ## 缩放图 15 | 16 | 使用`scale`命令缩放生存的`UML`图。示例如下: 17 | 18 | ``` 19 | @startuml 20 | ' 放大1.5倍 21 | scale 1.5 22 | ' 指定宽度为200 23 | scale 200 width 24 | ' 指定图像大小为200*100 25 | scale 200*100 26 | ' 指定图像最大为300*200 27 | scale max 300*200 28 | @enduml 29 | ``` 30 | 31 | ## 标题 32 | 33 | 使用`title`命令添加标题。示例如下: 34 | 35 | ``` 36 | @startuml 37 | scale 300*200 38 | title 第一行bababa\n第二行bababa 39 | 40 | class Hello { 41 | + hi: int 42 | } 43 | @enduml 44 | ``` 45 | 46 | ![](./imgs/title.png) 47 | 48 | ## -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # 本仓库不再维护,更新的内容前往:[ZJDoc/VisionGuide](https://github.com/ZJDoc/VisionGuide) 2 | 3 | # 引言 4 | 5 | 图像处理是一个综合性研究和开发领域,涉及基础概念、图像算法、代码实现等等。本文档小结相关的内容 6 | 7 | * 编程语言 8 | * [Python](./python/类操作.md) 9 | * [C++11](./cplusplus/学习C++之路.md) 10 | * 代码库 11 | * [OpenCV](./opencv/OpenCV概述.md) 12 | * [Matplotlib](./matplotlib/引言.md) 13 | * [PyTorch](./pytorch/引言.md) 14 | * 算法 15 | * [机器学习](./algorithm/machine-learning.md) 16 | * [深度学习](./algorithm/deep-learning.md) 17 | * [最优化](./algorithm/optimization.md) 18 | * [数据集](./algorithm/dataset.md) 19 | * [评价标准](./algorithm/evaluation-metrics.md) 20 | * 建模 21 | * [UML](./uml/统一建模语言UML.md) 22 | * [PlantUML](./uml/plantuml/PlantUML简介.md) -------------------------------------------------------------------------------- /docs/cplusplus/faq/ISO C++ forbids converting a string constant to char*.md: -------------------------------------------------------------------------------- 1 | 2 | # warning: ISO C++ forbids converting a string constant to 'char*' [-Wwrite-strings] 3 | 4 | 测试如下代码时遇到上述问题 5 | 6 | ``` 7 | char *pArray[] = {"apple", "pear", "banana", "orange", "pineApple"}; 8 | for (int i = 0; i < sizeof(pArray) / sizeof(*pArray); i++) { 9 | std::cout << pArray[i] << std::endl; 10 | } 11 | ``` 12 | 13 | ``` 14 | warning: ISO C++ forbids converting a string constant to ‘char*’ [-Wwrite-strings] 15 | ``` 16 | 17 | 参考[warning: ISO C++ forbids converting a string constant to 'char*' [-Wwrite-strings]](https://blog.csdn.net/creambean/article/details/89459858) 18 | 19 | `C++11`禁止将字符串常量赋值给`char*`类型,解决方式之一就是将`char*`设置为`const char*` -------------------------------------------------------------------------------- /docs/cplusplus/faq/multiple-definition-of.md: -------------------------------------------------------------------------------- 1 | 2 | # multiple definition of 3 | 4 | ## 问题描述 5 | 6 | 新建头文件`macro.h`,添加全局变量 7 | 8 | #ifndef C_MACRO_H 9 | #define C_MACRO_H 10 | 11 | #include 12 | using namespace std; 13 | 14 | // 人脸检测模型路径 15 | string FACE_CASCADE_PATH = "../../models/haarcascade_frontalface_default.xml"; 16 | 17 | #endif //C_MACRO_H 18 | 19 | 编译时出错 20 | 21 | CMakeFiles/c__.dir/OpencvDetect.cpp.o:(.bss+0x0): multiple definition of `WINDOWS_NAME[abi:cxx11]' 22 | CMakeFiles/c__.dir/main.cpp.o:(.bss+0x0): first defined here 23 | 24 | ## 问题解析 25 | 26 | 头文件被多次引用,导致重复定义 27 | 28 | 解决:设置成常量,添加`const`关键字 29 | 30 | const string FACE_CASCADE_PATH = "../../models/haarcascade_frontalface_default.xml"; -------------------------------------------------------------------------------- /docs/cplusplus/get-started/pointer-array/二维数组和二级指针.md: -------------------------------------------------------------------------------- 1 | 2 | # 二维数组和二级指针 3 | 4 | * 二维数组是指向数组的数组 5 | * 二级指针是指向指针的指针 6 | 7 | 一维数组名可以赋值给一级指针,但是二级数组名不可以赋值给二级指针 8 | 9 | 参考:[二维数组名不能赋值给二级指针](https://zhidao.baidu.com/question/1512707882225632860.html?qbl=relate_question_0&word=c%2B%2B%20%CE%AA%CA%B2%C3%B4%B6%FE%BC%B6%D6%B8%D5%EB%B2%BB%C4%DC%B8%B3%D6%B5%B8%F8%B6%FE%BC%B6%CA%FD%D7%E9) 10 | 11 | ``` 12 | const int LENGTH = 3; 13 | const int WIDTH = 2; 14 | 15 | int arr[LENGTH][WIDTH]={}; 16 | int **p; 17 | 18 | p = arr; // Assigning to 'int **' from incompatible type 'int [3][2]' 19 | ``` 20 | 21 | * 对于二维指针`p`而言,其声明为`int*`类型的一维指针 22 | * 对于二维数组`arr`而言,其声明为`int[4]`类型的一维数组 23 | 24 | 因为两者声明类型不一致,所以无法兼容。如果将`p`定义为数组指针即可操作 25 | 26 | ``` 27 | char (*p2)[WIDTH] = arr; 28 | ``` 29 | -------------------------------------------------------------------------------- /docs/python/[collections][defaultdict]更安全的dict.md: -------------------------------------------------------------------------------- 1 | 2 | # [collections][defaultdict]更安全的dict 3 | 4 | 参考: 5 | 6 | [是时候用 defaultdict 和 Counter 代替 dictionary 了](https://zhuanlan.zhihu.com/p/68407137) 7 | 8 | [1-2 collections中deque,defaultdict,OrderedDict](https://www.jianshu.com/p/291bb5641c56) 9 | 10 | [Python 3 collections.defaultdict() 与 dict的使用和区别](https://www.cnblogs.com/herbert/archive/2013/01/09/2852843.html) 11 | 12 | `defaultdict`支持`dict`的用法,并且提供了更加安全的设置 13 | 14 | ## 默认值设置 15 | 16 | `defaultdict`内置了一个工厂函数,当访问不存在的键时,会自动生成一个内置类型的对象 17 | 18 | ``` 19 | # 设置内置类型为int 20 | from collections import defaultdict 21 | 22 | if __name__ == '__main__': 23 | a = defaultdict(int) 24 | print(a) 25 | # 当访问不存在键`a`时,得到0 26 | print(a['a']) 27 | ################# 输出 28 | defaultdict(, {}) 29 | 0 30 | ``` -------------------------------------------------------------------------------- /docs/pytorch/[clamp]限制取值范围.md: -------------------------------------------------------------------------------- 1 | 2 | # [clamp]限制取值范围 3 | 4 | 参考:[[numpy][clip]限制取值范围](../python/[numpy][clip]限制取值范围.md) 5 | 6 | [torch.clamp](https://pytorch.org/docs/stable/torch.html#torch.clamp)和`numpy.clip`作用一致,用于限制数组取值范围 7 | 8 | ## 定义 9 | 10 | >torch.clamp(input, min, max, out=None) → Tensor 11 | 12 | $$ 13 | y_{i}=\left\{\begin{matrix} 14 | min & if x_{i} max 17 | \end{matrix}\right. 18 | $$ 19 | 20 | 如果要作用于`input`,则使用`torch.clamp_` 21 | 22 | ## 示例 23 | 24 | ``` 25 | >>> import torch 26 | >>> a = torch.arange(12).reshape(3,4) 27 | >>> a 28 | tensor([[ 0, 1, 2, 3], 29 | [ 4, 5, 6, 7], 30 | [ 8, 9, 10, 11]]) 31 | >>> 32 | >>> torch.clamp(a, 3, 5) 33 | tensor([[3, 3, 3, 3], 34 | [4, 5, 5, 5], 35 | [5, 5, 5, 5]]) 36 | ``` -------------------------------------------------------------------------------- /docs/uml/统一建模语言UML.md: -------------------------------------------------------------------------------- 1 | 2 | # 统一建模语言UML 3 | 4 | 参考: 5 | 6 | [统一建模语言](https://baike.baidu.com/item/%E7%BB%9F%E4%B8%80%E5%BB%BA%E6%A8%A1%E8%AF%AD%E8%A8%80/3160571?fromtitle=UML&fromid=446747&fr=aladdin) 7 | 8 | [UML 教程](https://www.w3cschool.cn/uml_tutorial/) 9 | 10 | >统一建模语言(Unified Modeling Language,UML)是一种为面向对象系统的产品进行说明、可视化和编制文档的一种标准语言,是非专利的第三代建模和规约语言。UML使用面向对象设计的的建模工具,但独立于任何具体程序设计语言 11 | 12 | `UML`通常表示为图例。为了让大家在不需要沟通的情况下能够通过文档有效的对产品进行了解,需要一种统一的,高效的绘图方式,也就是`UML` 13 | 14 | ## 模型 15 | 16 | `UML`包含了多种模型: 17 | 18 | 1. 功能模型:从用户的角度展示系统的功能,包括用例图 19 | 2. 对象模型:采用对象,属性,操作,关联等概念展示系统的结构和基础,包括类别图、对象图 20 | 3. 动态模型:展现系统的内部行为。包括序列图,活动图,状态图 21 | 22 | ## 使用 23 | 24 | `UML`是一个非常具有想象力的产品,它希望通过统一的标准,完美的解构不同语言实现的内容。但是在实际操作时,`UML`的标准和`UML`工具的实现总会有差距,使得绘图过程中常常出现一些疑惑:这些功能如何实现?应不应该加入这些语法? 25 | 26 | 在网上也找了许多的资料,自己也学习了很久,小结使用的心得,就是注重内容大于实现:不再过度关注于实现的细节,而是根据实现的语言,更加符合自己的阅读习惯,实现`UML`绘图 -------------------------------------------------------------------------------- /docs/cplusplus/advanced/reference/指针引用.md: -------------------------------------------------------------------------------- 1 | 2 | # 指针引用 3 | 4 | 参考:[References to pointers](https://docs.microsoft.com/en-us/cpp/cpp/references-to-pointers?view=vs-2019) 5 | 6 | 对指针的引用(`Reference to pointer`)可以用与对对象的引用(`reference to object`)几乎相同的方式声明。指针的引用是一个可修改的值,可以像普通指针一样使用 7 | 8 | ## 示例 9 | 10 | ``` 11 | void f(int *&b) { 12 | for (int i = 0; i < 10; i++) { 13 | cout << b[i] << " "; 14 | b[i] = 10 - i; 15 | } 16 | cout << endl; 17 | } 18 | 19 | int main(int argc, char *argv[]) { 20 | int *a; 21 | // 指针引用b和指针a指向同一个地址 22 | int *&b = a; 23 | 24 | b = new int[10]; 25 | for (int i = 0; i < 10; i++) { 26 | b[i] = i; 27 | } 28 | 29 | cout << (void *) a << endl; 30 | cout << (void *) b << endl; 31 | 32 | f(b); 33 | for (int i = 0; i < 10; i++) { 34 | cout << a[i] << " "; 35 | } 36 | } 37 | ``` -------------------------------------------------------------------------------- /docs/python/pip-更新国内镜像源.md: -------------------------------------------------------------------------------- 1 | 2 | # [pip]更新国内镜像源 3 | 4 | 有两种方式配置国内镜像源: 5 | 6 | 1. 参数配置 7 | 2. 文件配置 8 | 9 | ## 参数配置 10 | 11 | 参考:[python - pip换源,更换pip源到国内镜像](https://blog.csdn.net/xuezhangjun0121/article/details/81664260) 12 | 13 | 添加`-i`参数,指定国内镜像源 14 | 15 | ``` 16 | $ pip install <软件名> -i https://pypi.tuna.tsinghua.edu.cn/simple 17 | ``` 18 | 19 | 可选的有 20 | 21 | * 阿里云 http://mirrors.aliyun.com/pypi/simple/ 22 | * 中国科技大学 https://pypi.mirrors.ustc.edu.cn/simple/ 23 | * 豆瓣(douban) http://pypi.douban.com/simple/ 24 | * 清华大学 https://pypi.tuna.tsinghua.edu.cn/simple/ 25 | * 中国科学技术大学 http://pypi.mirrors.ustc.edu.cn/simple/ 26 | 27 | ## 文件配置 28 | 29 | 参考:[Python pip 修改镜像源为豆瓣源](https://www.douban.com/note/672475302/) 30 | 31 | 修改配置文件`~/.pip/pip.conf`,添加 32 | 33 | ``` 34 | [global] 35 | index-url = https://pypi.doubanio.com/simple 36 | trusted-host = pypi.doubanio.com 37 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/basic-concepts/指针引用.md: -------------------------------------------------------------------------------- 1 | 2 | # 指针引用 3 | 4 | 参考:[References to pointers](https://docs.microsoft.com/en-us/cpp/cpp/references-to-pointers?view=vs-2019) 5 | 6 | 对指针的引用(`Reference to pointer`)可以用与对对象的引用(`reference to object`)几乎相同的方式声明。指针的引用是一个可修改的值,可以像普通指针一样使用 7 | 8 | ## 示例 9 | 10 | ``` 11 | void f(int *&b) { 12 | for (int i = 0; i < 10; i++) { 13 | cout << b[i] << " "; 14 | b[i] = 10 - i; 15 | } 16 | cout << endl; 17 | } 18 | 19 | int main(int argc, char *argv[]) { 20 | int *a; 21 | // 指针引用b和指针a指向同一个地址 22 | int *&b = a; 23 | 24 | b = new int[10]; 25 | for (int i = 0; i < 10; i++) { 26 | b[i] = i; 27 | } 28 | 29 | cout << (void *) a << endl; 30 | cout << (void *) b << endl; 31 | 32 | f(b); 33 | for (int i = 0; i < 10; i++) { 34 | cout << a[i] << " "; 35 | } 36 | } 37 | ``` -------------------------------------------------------------------------------- /docs/pytorch/[Conv][Pool]实现原理.md: -------------------------------------------------------------------------------- 1 | 2 | # [Conv][Pool]实现原理 3 | 4 | 之前一直觉得卷积层和池化层的计算如下所示: 5 | 6 | $$ 7 | n_{out} = (n_{in}−F+2P)/S+1 (卷积层)\\ 8 | n_{out} = (n_{in}−F)/S+1 (池化层) 9 | $$ 10 | 11 | * $n_{out}$表示输出维度的特征数 12 | * $n_{in}$表示输入维度的特征数 13 | * $F$表示卷积核大小 14 | * $P$表示零填充大小 15 | * $S$表示步长 16 | 17 | 最近发现`PyTorch`并没有严格按照上述公式实现,其实现参考[A guide to convolution arithmetic for deeplearnin](https://arxiv.org/pdf/1603.07285.pdf)中$2.4$节以及第$3$节所示 18 | 19 | $$ 20 | n_{out} = \left \lfloor \frac {n_{in} + 2p - k}{s} \right \rfloor + 1 (卷积层) 21 | $$ 22 | 23 | $$ 24 | n_{out} = \left \lfloor \frac {n_{in} - k}{s} \right \rfloor + 1 (池化层) 25 | $$ 26 | 27 | 使用了一个向下取整(`floor`)计算,所以在`PyTorch`实现中,不同输入大小(比如`224`和`227`)能够得到相同大小的输出 28 | 29 | `PyTorch`关于卷积层和池化层的实现参考: 30 | 31 | * [Conv2d](https://pytorch.org/docs/master/nn.html#conv2d) 32 | * [MaxPool2d](https://pytorch.org/docs/master/nn.html#maxpool2d) -------------------------------------------------------------------------------- /docs/cplusplus/get-started/operator-overload/函数调用运算符重载.md: -------------------------------------------------------------------------------- 1 | 2 | # 函数调用运算符重载 3 | 4 | ## 语法 5 | 6 | 函数调用运算符`operator()`是一个二元运算符,语法如下: 7 | 8 | ``` 9 | primary-expression ( expression-list ) 10 | ``` 11 | 12 | * `primary-expression`是第一个操作数 13 | * `expression-list`是第二个操作数,有可能为空的参数列表 14 | 15 | **注意 1:函数调用运算符重载必须是非静态成员函数** 16 | 17 | **注意 2:函数调用运算符是应用于对象,而不是函数** 18 | 19 | ## 示例 20 | 21 | 定义类`Point`,重定义函数调用运算符 22 | 23 | ``` 24 | class Point { 25 | public: 26 | Point() { _x = _y = 0; } 27 | 28 | Point &operator()(int dx, int dy) { 29 | _x += dx; 30 | _y += dy; 31 | return *this; 32 | } 33 | 34 | inline void print() { 35 | cout << "_x = " << _x << " _y = " << _y << endl; 36 | } 37 | 38 | private: 39 | int _x, _y; 40 | }; 41 | 42 | int main() { 43 | Point pt; 44 | 45 | pt.print(); 46 | pt(3, 2); 47 | pt.print(); 48 | } 49 | ``` -------------------------------------------------------------------------------- /docs/algorithm/ROC曲线.md: -------------------------------------------------------------------------------- 1 | 2 | # ROC曲线 3 | 4 | 参考:[Receiver operating characteristic](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) 5 | 6 | `ROC`曲线,全称是接受者操作特征曲线(`receiver operating characteristic curve`),能够证明在不同阈值条件下二值分类器的检测性能 7 | 8 | ## true positive rate 9 | 10 | 参考:[Sensitivity and specificity](https://en.wikipedia.org/wiki/Sensitivity_and_specificity) 11 | 12 | `ROC`曲线的`y`轴表示真阳性率(`true positive rate(TPR)`),也称为召回率(`recall rate`)或者检测率(`probability of detection`) 13 | 14 | `TPR`计算的是预测为正的正样本(`true positive`)个数占整个正样本的比例,计算公式为`TPR = TP / (TP+FN)` 15 | 16 | ## false positive rate 17 | 18 | 参考:[False positive rate](https://en.wikipedia.org/wiki/False_positive_rate) 19 | 20 | `ROC`曲线的`x`轴表示假阳性率(`false positive rate(FPR)`),也称为误报率(`probability of false alarm`) 21 | 22 | `FPR`计算的是预测为负的正样本(`false positive`)个数占整个负样本的比例,计算公式为`FPR = FP / (FP+TN)` 23 | 24 | ## 解析 25 | 26 | 最好的检测效果位于坐标点(0,1),指的是所有正样本都被检测为正,没有负样本 -------------------------------------------------------------------------------- /docs/pytorch/cuda/[benchmark]训练加速.md: -------------------------------------------------------------------------------- 1 | 2 | # [benchmark]训练加速 3 | 4 | 阅读工程源码时发现了一种加速训练的方法: 5 | 6 | ``` 7 | if torch.cuda.is_available(): 8 | # This flag allows you to enable the inbuilt cudnn auto-tuner to 9 | # find the best algorithm to use for your hardware. 10 | torch.backends.cudnn.benchmark = True 11 | ``` 12 | 13 | ## 原理 14 | 15 | 参考: 16 | 17 | [What does torch.backends.cudnn.benchmark do?](https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936) 18 | 19 | [Can you use torch.backends.cudnn.benchmark = True after resizing images?](https://discuss.pytorch.org/t/can-you-use-torch-backends-cudnn-benchmark-true-after-resizing-images/40659) 20 | 21 | 因为存在多种卷积实现算法,所以设置`true`将会启动内置`cudnn auto-tuner`,帮助寻找最优的计算算法,从而实现加速目的。其前提条件是每次输入的尺寸固定,如果不固定,那么找到的加速算法不一定适合不同大小的输入计算,不会得到加速效果 22 | 23 | ## 测试 24 | 25 | 参考:[torch.backends.cudnn.benchmark ?!](https://zhuanlan.zhihu.com/p/73711222) -------------------------------------------------------------------------------- /docs/algorithm/dataset.md: -------------------------------------------------------------------------------- 1 | 2 | # 数据集 3 | 4 | * [[数据集]German Credit Data](https://blog.zhujian.life/posts/833d7df4.html) 5 | * [[数据集]Iris](https://blog.zhujian.life/posts/ffa9d775.html) 6 | * [[数据集]Image Localization Dataset](https://blog.zhujian.life/posts/a2d65e1.html) 7 | * [[数据集]Penn-Fudan](https://blog.zhujian.life/posts/6c61a203.html) 8 | * MNIST 9 | * [Python MNIST解压](https://blog.csdn.net/u012005313/article/details/84453316) 10 | * [[数据集]Fashion-MNIST](https://blog.zhujian.life/posts/631c599a.html) 11 | * CIFAR 12 | * [cifar-10数据集解析](https://blog.zhujian.life/posts/43d7ec86.html) 13 | * [cifar-100数据集解析](https://blog.zhujian.life/posts/adb6e880.html) 14 | * PASCAL VOC 15 | * [[数据集]PASCAL VOC 2007](https://blog.zhujian.life/posts/5a56cd45.html) 16 | * [[数据集]PASCAL VOC 2012](https://blog.zhujian.life/posts/d3cd45d1.html) 17 | * [[数据集][PASCAL VOC]07+12](https://blog.zhujian.life/posts/db93f7d2.html) 18 | -------------------------------------------------------------------------------- /docs/cplusplus/advanced/stl/[shuffle]随机重排列.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11][stl][shuffle]随机重排列 3 | 4 | 参考:[std::shuffle](http://www.cplusplus.com/reference/algorithm/shuffle/) 5 | 6 | `c++`实现了`shuffle`函数用于随机重新排列指定范围内的元素,使用均匀随机数发生器 7 | 8 | ``` 9 | // shuffle algorithm example 10 | #include // std::cout 11 | #include // std::shuffle 12 | #include // std::array 13 | #include // std::default_random_engine 14 | #include // std::chrono::system_clock 15 | 16 | int main() { 17 | std::array foo{1, 2, 3, 4, 5}; 18 | 19 | // obtain a time-based seed: 20 | long seed = std::chrono::system_clock::now().time_since_epoch().count(); 21 | 22 | shuffle(foo.begin(), foo.end(), std::default_random_engine(seed)); 23 | 24 | std::cout << "shuffled elements:"; 25 | for (int &x: foo) std::cout << ' ' << x; 26 | std::cout << '\n'; 27 | 28 | return 0; 29 | } 30 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/type-cast-deduce/decltype.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11]decltype 3 | 4 | 参考: 5 | 6 | [decltype (C++)](https://docs.microsoft.com/en-us/cpp/cpp/decltype-cpp?view=vs-2019) 7 | 8 | [decltype specifier](https://en.cppreference.com/w/cpp/language/decltype) 9 | 10 | `decltype`类型说明符是`c++11`新增的特性,能够生成指定表达式的类型。语法如下: 11 | 12 | ``` 13 | decltype( expression ) 14 | ``` 15 | 16 | ## 推导规则 17 | 18 | 编译器使用以下规则推导参数`expression`的类型 19 | 20 | 1. 如果参数`expression`是一个标识符(`identifier`)或者类成员访问(`class member access`),那么`decltype(expression)`是该实体(`entity`)的类型 21 | 2. 如果参数`expression`是一个函数或者重载操作符的调用,那么`decltype(expression)`返回函数值类型。忽略重载运算符周围的括号 22 | 3. 如果参数`expression`是一个`rvalue`,那么`decltype(expression)`是`expression`的类型;如果是一个`lvalue`,那么结果是对`expression`类型的`lvalue`引用 23 | 24 | ## 示例 25 | 26 | ``` 27 | int var; 28 | const int&& fx(); 29 | struct A { double x; } 30 | const A* a = new A(); 31 | ``` 32 | 33 | 推导结果如下: 34 | 35 | ![](./imgs/decltype.png) -------------------------------------------------------------------------------- /docs/python/[enumerate]遍历.md: -------------------------------------------------------------------------------- 1 | 2 | # [enumerate]遍历 3 | 4 | 参考:[Python enumerate() 函数](https://www.runoob.com/python/python-func-enumerate.html) 5 | 6 | ``` 7 | class enumerate(object) 8 | | enumerate(iterable, start=0) 9 | | 10 | | Return an enumerate object. 11 | | 12 | | iterable 13 | | an object supporting iteration 14 | ``` 15 | 16 | 遍历可迭代对象,同时返回数组下标和值 17 | 18 | ``` 19 | >>> import numpy as np 20 | >>> a = np.arange(30, 40) 21 | >>> a 22 | array([30, 31, 32, 33, 34, 35, 36, 37, 38, 39]) 23 | >>> for idx, item in enumerate(a, 0): 24 | ... print(idx, item) 25 | ... 26 | 0 30 27 | 1 31 28 | 2 32 29 | 3 33 30 | 4 34 31 | 5 35 32 | 6 36 33 | 7 37 34 | 8 38 35 | 9 39 36 | ``` 37 | 38 | 可以指定起始位置下标值(**Note:仅改变下标起始值,仍旧会完整遍历数组**) 39 | 40 | ``` 41 | >>> for idx, item in enumerate(a, 10): 42 | ... print(idx, item) 43 | ... 44 | 10 30 45 | 11 31 46 | 12 32 47 | 13 33 48 | 14 34 49 | 15 35 50 | 16 36 51 | 17 37 52 | 18 38 53 | 19 39 54 | ``` -------------------------------------------------------------------------------- /tools/createsamples/.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | // See https://go.microsoft.com/fwlink/?LinkId=733558 3 | // for the documentation about the tasks.json format 4 | "version": "2.0.0", 5 | "tasks": [ 6 | { 7 | "label": "create", 8 | "type": "shell", 9 | "command": "g++ -o create -std=c++17 createsamples.cpp utility.cpp `pkg-config --libs --cflags opencv`", 10 | "args": [], 11 | "group": { 12 | "kind": "build", 13 | "isDefault": true 14 | } 15 | }, 16 | { 17 | "label": "create", 18 | "type": "shell", 19 | "command": "g++ -g -o create -std=c++17 createsamples.cpp utility.cpp `pkg-config --libs --cflags opencv`", 20 | "args": [], 21 | "group": { 22 | "kind": "test", 23 | "isDefault": true 24 | } 25 | } 26 | ] 27 | } -------------------------------------------------------------------------------- /docs/pytorch/error/OSError: [Errno 12] Cannot allocate memory.md: -------------------------------------------------------------------------------- 1 | 2 | # OSError: [Errno 12] Cannot allocate memory 3 | 4 | 参考: 5 | 6 | [死亡Error:OSError: [Errno 12] Cannot allocate memory](https://blog.csdn.net/breeze210/article/details/99679048) 7 | 8 | [OSError: [Errno 12] Cannot allocate memory. But memory usage is actually normal](https://discuss.pytorch.org/t/oserror-errno-12-cannot-allocate-memory-but-memory-usage-is-actually-normal/56027) 9 | 10 | 执行`PyTorch`程序,发生内存不足错误 11 | 12 | ## 内存查询 13 | 14 | 监视内存,查看是否是内存不足 15 | 16 | ``` 17 | # 打开两个窗口,分别查看CPU内存和显卡内存 18 | # 每隔1秒查询一次 19 | $ watch -n 1 free -m 20 | $ wathc -n 1 nvidia-smi 21 | ``` 22 | 23 | ## num_workers 24 | 25 | 确实不是因为内存不足,那么修改`DataLoader`的`num_workers`为`0`,再重新运行即可 26 | 27 | ``` 28 | num_workers (int, optional): how many subprocesses to use for data 29 | loading. ``0`` means that the data will be loaded in the main process. 30 | (default: ``0``) 31 | ``` 32 | 33 | -------------------------------------------------------------------------------- /docs/pytorch/[transpose][permute]维度转换.md: -------------------------------------------------------------------------------- 1 | 2 | # [transpose][permute]维度转换 3 | 4 | `PyTorch`提供了两个函数用于维度转换 5 | 6 | * [transpose](https://pytorch.org/docs/stable/torch.html#torch.transpose) 7 | * [permute](https://pytorch.org/docs/stable/tensors.html?highlight=permute#torch.Tensor.permute) 8 | 9 | ## transpose 10 | 11 | >torch.transpose(input, dim0, dim1) → Tensor 12 | 13 | 函数`transpose`每次仅能调整两个维度 14 | 15 | ``` 16 | >>> import torch 17 | >>> a = torch.arange(24).reshape(2, 3, 4) 18 | >>> a.shape 19 | torch.Size([2, 3, 4]) 20 | # 切换第1维和第2维 21 | >>> torch.transpose(a, 1, 2).shape 22 | torch.Size([2, 4, 3]) 23 | # 切换第0维和第2维 24 | >>> torch.transpose(a, 2, 0).shape 25 | torch.Size([4, 3, 2]) 26 | ``` 27 | 28 | ## permute 29 | 30 | >permute(*dims) → Tensor 31 | 32 | 使用`permute`能够一次性调整多个维度 33 | 34 | ``` 35 | >>> import torch 36 | >>> a = torch.arange(24).reshape(2, 3, 4) 37 | >>> a.shape 38 | torch.Size([2, 3, 4]) 39 | >>> a.permute(2, 0, 1).shape 40 | torch.Size([4, 2, 3]) 41 | ``` -------------------------------------------------------------------------------- /docs/algorithm/evaluation-metrics.md: -------------------------------------------------------------------------------- 1 | 2 | # 评价标准 3 | 4 | * 分类 5 | * 二分类 6 | * [[二分类]混淆矩阵](https://blog.zhujian.life/posts/74ea027a.html) 7 | * [准确率 vs. 精确率](https://blog.zhujian.life/posts/5b516f3c.html) 8 | * [[ROC][AUC]二分类任务评判标准](https://blog.zhujian.life/posts/887dcf29.html) 9 | * [[二分类]ROC曲线](https://blog.zhujian.life/posts/71a847e.html) 10 | * [[二分类]PR曲线](https://blog.zhujian.life/posts/bca792b4.html) 11 | * [[二分类]F1-score](https://blog.zhujian.life/posts/50c7d392.html) 12 | * 多分类 13 | * [[多分类]混淆矩阵](https://blog.zhujian.life/posts/c35edb41.html) 14 | * [[多分类]ROC曲线](https://blog.zhujian.life/posts/48526d13.html) 15 | * [[多分类]PR曲线](https://blog.zhujian.life/posts/2bbcad17.html) 16 | * 检测 17 | * [[目标检测]IoU](https://blog.zhujian.life/posts/796ebd4e.html) 18 | * [[目标检测][目标识别]模型性能测试](https://blog.zhujian.life/posts/4bc9fe45.html) 19 | * [[目标检测][PASCAL VOC]mAP](https://blog.zhujian.life/posts/d817618d.html) -------------------------------------------------------------------------------- /docs/cplusplus/get-started/pointer-array/指针名和数组名的区别.md: -------------------------------------------------------------------------------- 1 | 2 | # 指针名和数组名的区别 3 | 4 | 参考: 5 | 6 | [指针和数组的区别](https://blog.csdn.net/u012124564/article/details/47323817) 7 | 8 | [c中,数组名跟指针有区别吗?](https://www.zhihu.com/question/41805285) 9 | 10 | 有如下区别: 11 | 12 | 1. 可以用数组名初始化指针,但是数组名必须用列表进行初始化 13 | 2. 数组名指向固定内存地址,不能修改,而指针名可以指向其他地址 14 | 3. 使用`sizeof`计算两者所占的内存字节数,数组名返回整个数组所占字节数,而指针名返回单个地址的字节大小(当前`CPU`的最大位数:`8`字节) 15 | 16 | 测试代码如下: 17 | 18 | ``` 19 | int arr[3] = {1, 2, 3}; 20 | int *p = arr; 21 | 22 | cout << arr << endl; 23 | cout << p << endl; 24 | 25 | cout << arr[1] << endl; 26 | cout << *(p + 1) << endl; 27 | 28 | cout << sizeof(arr) << endl; 29 | cout << sizeof(p) << endl; 30 | ``` 31 | 32 | * 新建整型数组`arr`并初始化 33 | * 新建整型指针`p`,指向数组`arr` 34 | * 打印数组名和指针名,此时两者均指向数组首地址 35 | * 通过数组方式和指针方式访问第二个元素 36 | * 打印数组名和指针名所占内存字节数 37 | 38 | 结果如下: 39 | 40 | ``` 41 | 0x7fff2b649180 42 | 0x7fff2b649180 43 | 2 44 | 2 45 | 12 46 | 8 47 | ``` 48 | 49 | 总的来说,**数组名就是指针常量,而指针名是指针变量** -------------------------------------------------------------------------------- /docs/pytorch/error/Process finished with exit code 137 (interrupted by signal 9: SIGKILL) .md: -------------------------------------------------------------------------------- 1 | 2 | # Process finished with exit code 137 (interrupted by signal 9: SIGKILL) 3 | 4 | 前几天突然遇到这个问题: 5 | 6 | ``` 7 | Process finished with exit code 137 (interrupted by signal 9: SIGKILL) 8 | ``` 9 | 10 | 程序启动后被系统`KILLED`,在网上找了资料,发现是说内存不足的问题 11 | 12 | 查询内存使用情况,发现大多数内存都被缓存占据了 13 | 14 | ``` 15 | $ free -h 16 | total used free shared buff/cache available 17 | Mem: 62G 13G 450M 308M 49G 48G 18 | Swap: 7.6G 4.2G 3.4G 19 | ``` 20 | 21 | ## 额外阅读 22 | 23 | * [Process finished with exit code 137 in PyCharm](https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method) 24 | * [multiprocessing.set_start_method(method)](https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method) 25 | * [Semaphore leaks in dataloader](https://github.com/pytorch/pytorch/issues/11727) -------------------------------------------------------------------------------- /docs/cplusplus/get-started/type-cast-deduce/void类型.md: -------------------------------------------------------------------------------- 1 | 2 | # void类型 3 | 4 | ## void类型 5 | 6 | 参考: 7 | 8 | [The void type](https://docs.microsoft.com/en-us/cpp/cpp/cpp-type-system-modern-cpp?view=vs-2019#the-void-type) 9 | 10 | [void (C++)](https://docs.microsoft.com/en-us/cpp/cpp/void-cpp?view=vs-2019) 11 | 12 | `void`类型有两个用处 13 | 14 | 1. 作为函数返回值类型,表示不返回一个值 15 | 2. 定义在函数参数列表,表示该函数没有任何参数 16 | 3. 使用`void *`作为指针,可以指向任何类型的变量 17 | 18 | ``` 19 | #include 20 | 21 | using std::cout; 22 | using std::endl; 23 | 24 | void f(void *a) { 25 | int *b = (int *) a; 26 | cout << sizeof(b) << endl; 27 | cout << sizeof(*b) << endl; 28 | 29 | cout << *b << endl; 30 | } 31 | 32 | 33 | int main() { 34 | 35 | int a = 3; 36 | f(&a); 37 | 38 | return 0; 39 | } 40 | ``` 41 | 42 | 结果 43 | 44 | ``` 45 | 8 46 | 4 47 | 3 48 | ``` 49 | 50 | `void *`可以指向任何类型的指针(除了`const`和`volatile`声明的),但是如果想要使用具体的变量值必须重新经过转换 51 | 52 | `void`指针同样可以指向函数,但是不能是类成员 53 | 54 | ## C++规范 55 | 56 | 1. 尽量避免使用`void`指针,其涉及到类型安全 57 | 2. 设置函数无参数,使用`f()`代替`f(void)` -------------------------------------------------------------------------------- /docs/pytorch/引言.md: -------------------------------------------------------------------------------- 1 | 2 | # 引言 3 | 4 | 之前操作过`torch`,是一个`lua`编写的深度学习训练框架,后来`facebook`发布了`pytorch`,使用`python`语言进行开发 5 | 6 | `pytorch`是在`torch`的基础上发展而来的,它继承了许多内容,包括各种包的命名和类的定义,比如张量(`tensor`) 7 | 8 | 参考:[pytorch](https://pytorch.org/) 9 | 10 | ## 目标 11 | 12 | * 替代`NumPY`进行`GPU`的运算 13 | * 提供最大灵活性和速度的深度学习平台 14 | 15 | ## 安装 16 | 17 | 参考:[Start Locally](https://pytorch.org/get-started/locally/) 18 | 19 | 指定版本/操作系统/安装方式/`python`语言/`cuda`版本 20 | 21 | 当前配置: 22 | 23 | * PyTorch Stable(1.0) 24 | * Ubuntu 16.04 25 | * Anacodna3 26 | * Python 3.6 27 | * CUDA 10.0 28 | 29 | 安装命令如下: 30 | 31 | $ conda install pytorch torchvision cudatoolkit=9.0 -c pytorch 32 | 33 | ## 加载`torch` 34 | 35 | 命令行方式 36 | 37 | $ python 38 | Python 3.6.8 |Anaconda, Inc.| (default, Dec 30 2018, 01:22:34) 39 | [GCC 7.3.0] on linux 40 | Type "help", "copyright", "credits" or "license" for more information. 41 | >>> import torch 42 | >>> torch.__version__ 43 | '1.0.1.post2' 44 | >>> 45 | 46 | 文件方式 47 | 48 | from __future__ import print_function 49 | import torch -------------------------------------------------------------------------------- /docs/cplusplus/advanced/stl/find.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11][stl]find 3 | 4 | 参考:[std::find](http://www.cplusplus.com/reference/algorithm/find/) 5 | 6 | `C++`提供了丰富的查询函数 7 | 8 | * `find` 9 | * `find_end` 10 | * `find_first_of` 11 | * `find_if` 12 | * `find_if_not` 13 | 14 | ## find_if 15 | 16 | 参考:[std::find_if](http://www.cplusplus.com/reference/algorithm/find_if/) 17 | 18 | 函数`find_if`发现指定范围内(不包含最后一个位置)是否存在数值符合条件 19 | 20 | 如果存在,返回第一个符合条件的迭代器;如果不存在,返回最后一个值的迭代器 21 | 22 | ``` 23 | // find_if example 24 | #include // std::cout 25 | #include // std::find_if 26 | #include // std::vector 27 | 28 | bool IsOdd(int i) { 29 | return ((i % 2) == 1); 30 | } 31 | 32 | int main() { 33 | std::vector myvector; 34 | 35 | myvector.push_back(10); 36 | myvector.push_back(25); 37 | myvector.push_back(40); 38 | myvector.push_back(55); 39 | 40 | std::vector::iterator it = std::find_if(myvector.begin(), myvector.end(), IsOdd); 41 | std::cout << "The first odd value is " << *it << '\n'; 42 | 43 | return 0; 44 | } 45 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/advanced/stl/for_each.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11][stl]for_each 3 | 4 | 参考:[std::for_each](http://www.cplusplus.com/reference/algorithm/for_each/?kw=for_each) 5 | 6 | 使用`for_each`函数对指定范围内的数值逐个进行函数操作 7 | 8 | 最常用的就是遍历操作 9 | 10 | ``` 11 | #include // std::cout 12 | #include // std::for_each 13 | #include // std::vector 14 | 15 | void myfunction(int i) { // function: 16 | std::cout << ' ' << i; 17 | } 18 | 19 | struct myclass { // function object type: 20 | void operator()(int i) { std::cout << ' ' << i; } 21 | } myobject; 22 | 23 | int main() { 24 | std::vector myvector; 25 | myvector.emplace_back(10); 26 | myvector.emplace_back(20); 27 | myvector.emplace_back(30); 28 | 29 | std::cout << "myvector contains:"; 30 | for_each(myvector.begin(), myvector.end(), myfunction); 31 | std::cout << '\n'; 32 | 33 | // or: 34 | std::cout << "myvector contains:"; 35 | for_each(myvector.begin(), myvector.end(), myobject); 36 | std::cout << '\n'; 37 | 38 | return 0; 39 | } 40 | ``` -------------------------------------------------------------------------------- /docs/cnn/过拟合和欠拟合.md: -------------------------------------------------------------------------------- 1 | 2 | # 过拟合和欠拟合 3 | 4 | 过拟合(`overfitting`)和欠拟合(`underfitting`)都是分类训练过程中经常遇到的现象,理清它们之间的区别和含义,有助于得到更好的分类器 5 | 6 | ## 过拟合 7 | 8 | 参考: 9 | 10 | [过拟合](https://baike.baidu.com/item/%E8%BF%87%E6%8B%9F%E5%90%88) 11 | 12 | [机器学习中用来防止过拟合的方法有哪些?](https://www.zhihu.com/question/59201590/answer/167392763) 13 | 14 | 过拟合(`over fitting`)现象常出现在分类器训练过程中,指的是分类器对训练集数据能够得到很好的结果,但是在测试集(或者其他数据)上不能够很好的分类,分类器泛化能力差 15 | 16 | 出现原因: 17 | 18 | 1. 训练集数据不够大 19 | 2. 训练集数据存在噪音 20 | 3. 模型过于复杂导致不仅能够拟合数据还能够拟合噪音 21 | 22 | 解决方法: 23 | 24 | 1. 给予足够多的数据 25 | 2. 提高训练集质量 26 | 3. 选用合适的模型,限制模型的拟合能力 27 | 28 | 针对卷积神经网络,限制网络复杂度的方法包括 29 | 30 | 1. 减少网络层数 31 | 2. 减小神经元个数 32 | 3. 激活函数 33 | 34 | ## 欠拟合 35 | 36 | 参考:[欠拟合](https://baike.baidu.com/item/%E6%AC%A0%E6%8B%9F%E5%90%88) 37 | 38 | 欠拟合(`under fitting`)指分类器在训练集上不能够得到很好的检测效果,同样在测试集上也不能够得到很好的检测效果,分类器泛化能力差 39 | 40 | 欠拟合的原因是由于分类模型没有很好的捕捉到数据特征,真实数据离拟合曲线较远 41 | 42 | 解决方法: 43 | 44 | 1. 增加新特征 45 | 2. 减少参数正则化 46 | 3. 使用非线性模型 47 | 4. 集成多个学习模型 48 | 49 | 针对卷积神经网络,提高网络复杂度的方法包括 50 | 51 | 1. 扩展网络层数 52 | 2. 扩大神经元个数 53 | 3. 减少激活函数 54 | 4. 使用稀疏网络结构 55 | 5. 集成多个网络模型 56 | 6. 随机激活权值 -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/嵌套类定义.md: -------------------------------------------------------------------------------- 1 | 2 | # 嵌套类定义 3 | 4 | 参考:[Nested Class Declarations](https://docs.microsoft.com/en-us/cpp/cpp/nested-class-declarations?view=vs-2019) 5 | 6 | ## 声明 7 | 8 | 一个类可以在另一个类的范围内声明,这样的类称为`嵌套类`。嵌套类被视为在封闭类的范围内,并可在该范围内使用。若要从其直接封闭作用域以外的作用域引用嵌套类,必须使用完全限定名 9 | 10 | ``` 11 | class Cls { 12 | public: 13 | class NestA { 14 | public: 15 | void print() { 16 | std::cout << "NestA" << std::endl; 17 | } 18 | }; 19 | 20 | class NestB { 21 | public: 22 | void print() { 23 | std::cout << "NestB" << std::endl; 24 | } 25 | }; 26 | 27 | void print() { 28 | NestA nestA; 29 | NestB nestB; 30 | nestA.print(); 31 | nestB.print(); 32 | std::cout << "Cls" << std::endl; 33 | } 34 | }; 35 | 36 | int main() { 37 | Cls cls; 38 | cls.print(); 39 | 40 | Cls::NestA nestA; 41 | nestA.print(); 42 | Cls::NestB nestB; 43 | nestB.print(); 44 | } 45 | ``` 46 | 47 | ## 使用 48 | 49 | * 对于封闭类中的成员/函数,嵌套类可以直接使用 50 | * 对于其他类的成员/函数,必须通过指针、引用或对象名来使用 51 | * 对于嵌套类的友元函数,其仅能访问嵌套类,不能访问封闭类 -------------------------------------------------------------------------------- /py/data_preprocessing/compose.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | @date: 2020/4/30 下午2:52 5 | @file: compose.py 6 | @author: zj 7 | @description: 组合实现多种图像预处理 8 | """ 9 | 10 | import torchvision.transforms as transforms 11 | from PIL import Image 12 | import matplotlib.pyplot as plt 13 | 14 | if __name__ == '__main__': 15 | src = Image.open('../data/lena.jpg') 16 | 17 | # 预处理顺序如下: 18 | # 1. 按较短边缩放 19 | # 2. 随机裁剪224x224 20 | # 3. 随机水平翻转 21 | # 4. 随机颜色抖动:亮度、对比度、饱和度、色调 22 | transform = transforms.Compose([ 23 | transforms.Resize(224), 24 | transforms.RandomCrop(224), 25 | transforms.RandomHorizontalFlip(), 26 | transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1), 27 | transforms.ToTensor(), 28 | transforms.RandomErasing(), 29 | transforms.ToPILImage() 30 | ]) 31 | 32 | cols = 3 33 | rows = 3 34 | for i in range(rows): 35 | for j in range(cols): 36 | plt.subplot(rows, cols, i * cols + j + 1) 37 | plt.imshow(transform(src)) 38 | plt.axis('off') 39 | plt.show() 40 | -------------------------------------------------------------------------------- /docs/matplotlib/手动设置轴刻度间隔.md: -------------------------------------------------------------------------------- 1 | 2 | # 手动设置轴刻度间隔 3 | 4 | 默认情况下,`Matplotlib`会自动调整`x`轴刻度,需要手动设置 5 | 6 | ## 定义 7 | 8 | 参考:[Python设置matplotlib.plot的坐标轴刻度间隔以及刻度范围](https://www.jb51.net/article/163842.htm) 9 | 10 | ``` 11 | import matplotlib.pyplot as plt 12 | from matplotlib.pyplot import MultipleLocator 13 | 14 | # 设置`x`轴刻度间隔为`1` 15 | x_major_locator = MultipleLocator(1) 16 | ax = plt.gca() 17 | ax.xaxis.set_major_locator(x_major_locator) 18 | ``` 19 | 20 | ## 示例 21 | 22 | 参考:[折线图](./折线图.md) 23 | 24 | ``` 25 | import matplotlib.pyplot as plt 26 | from matplotlib.pyplot import MultipleLocator 27 | 28 | def show(): 29 | x = list(range(10)) 30 | y = random.sample(list(range(100)), 10) 31 | 32 | plt.figure(1, figsize=(9, 3)) 33 | 34 | plt.title('test') 35 | plt.subplot(1, 2, 1) 36 | plt.plot(x, y, label='unset') 37 | plt.legend() 38 | 39 | plt.subplot(122) 40 | 41 | x_major_locator = MultipleLocator(1) 42 | ax = plt.gca() 43 | ax.xaxis.set_major_locator(x_major_locator) 44 | 45 | plt.plot(x, y, label='set') 46 | plt.legend() 47 | 48 | plt.show() 49 | ``` 50 | 51 | ![](./imgs/xaxis.png) -------------------------------------------------------------------------------- /docs/uml/plantuml/本地安装和使用.md: -------------------------------------------------------------------------------- 1 | 2 | # 本地安装和使用 3 | 4 | `PlantUML`提供了编译包,能够在本地实现图例编译 5 | 6 | *当前操作系统:`Ubuntu 18.04`* 7 | 8 | ## 配置环境 9 | 10 | 1. `Java`环境配置 11 | 2. `Graphviz`安装 12 | 13 | ### Java环境配置 14 | 15 | 参考:[Java安装](https://zj-linux-guide.readthedocs.io/zh_CN/latest/tool-install-configure/Java%E5%AE%89%E8%A3%85/) 16 | 17 | ### Graphviz安装 18 | 19 | 可以从`Graphviz`官网下载安装包:[Download](https://www.graphviz.org/download/) 20 | 21 | 在`Ubuntu`上直接安装: 22 | 23 | ``` 24 | $ sudo apt install graphviz 25 | ``` 26 | 27 | ## PlantUML安装 28 | 29 | 下载`plantuml.jar`文件即可 30 | 31 | 下载地址:[https://plantuml.com/zh/download](https://plantuml.com/zh/download) 32 | 33 | ## 使用 34 | 35 | 参考:[这个成熟了吗?我没有看到任何新版本吗?](https://plantuml.com/zh/faq) 36 | 37 | 支持文件格式:`*.wsd, *.pu, *.puml, *.plantuml, *.iuml` 38 | 39 | 有两种方式能够打印`PlantUML`版本 40 | 41 | * 命令行模式 42 | 43 | $ java -jar plantuml.jar -version 44 | 45 | * 文件模式 46 | * 编写文件version.puml 47 | 48 | @startuml 49 | version 50 | @enduml 51 | 52 | * 编译该文件,生成`png`格式图 53 | 54 | $ java -jar plantuml.jar version.puml 55 | 56 | ![](./imgs/version.png) -------------------------------------------------------------------------------- /docs/pytorch/cuda/[empty_cache]清空显存.md: -------------------------------------------------------------------------------- 1 | 2 | # [empty_cache]清空显存 3 | 4 | 查看工程源码时发现在训练完成后,测试模型之前调用了函数[torch.cuda.empty_cache()](https://pytorch.org/docs/stable/cuda.html#torch.cuda.empty_cache) 5 | 6 | ``` 7 | logger.info('Start evaluating...') 8 | torch.cuda.empty_cache() # speed up evaluating after training finished 9 | do_evaluation(cfg, model, distributed=args.distributed) 10 | ``` 11 | 12 | 其作用是释放缓存分配器当前持有的所有未占用的缓存内存,以便这些内存可以在其他GPU应用程序中使用,并在`nvidia-smi`中可见 13 | 14 | ## 使用 15 | 16 | 对于何时使用该函数清空缓存内存,参考: 17 | 18 | [About torch.cuda.empty_cache()](https://discuss.pytorch.org/t/about-torch-cuda-empty-cache/34232) 19 | 20 | [What is torch.cuda.empty_cache do and where should i add it?](https://discuss.pytorch.org/t/what-is-torch-cuda-empty-cache-do-and-where-should-i-add-it/40975) 21 | 22 | [Why does torch.cuda.empty_cache() make the GPU utilization near 0 and slow down the training time?](https://discuss.pytorch.org/t/why-does-torch-cuda-empty-cache-make-the-gpu-utilization-near-0-and-slow-down-the-training-time/65196) 23 | 24 | [pytorch GPU显存释放的问题?](https://www.zhihu.com/question/68509057/answer/566619040) 25 | 26 | 并不推荐在实现中频繁调用该函数。仅在显存不足时进行调用即可 -------------------------------------------------------------------------------- /docs/cplusplus/get-started/pointer-array/指针类型.md: -------------------------------------------------------------------------------- 1 | 2 | # 指针类型 3 | 4 | 参考:[Pointers (C++)](https://docs.microsoft.com/en-us/cpp/cpp/pointers-cpp?view=vs-2019) 5 | 6 | 指针是`c/c++`相对于其他语言来说最不同的内容之一。通过指针的使用,`c/c++`可以更加自由的操作内存地址,下面学习指针的相关概念和用法 7 | 8 | ## 语法 9 | 10 | 通用语法如下: 11 | 12 | ``` 13 | [storage-class-specifiers] [cv-qualifiers] type-specifiers declarator ; 14 | ``` 15 | 16 | 简化版本如下: 17 | 18 | ``` 19 | * [cv-qualifiers] identifier [= expression] 20 | ``` 21 | 22 | 1. 声明说明符 23 | * 可选的存储类说明符 24 | * 可选的`cv`限定符,应用于要指向的对象的类型 25 | * 类型说明符:表示要指向的对象类型的类型的名称 26 | 2. 声明符 27 | * `*`运算符 28 | * 可选的`cv`限定符,应用于指针本身 29 | * 标识符 30 | * 可选的初始化器 31 | 32 | ### 函数指针 33 | 34 | 指向函数的指针的声明符如下所示: 35 | 36 | ``` 37 | (* [cv-qualifiers] identifier )( argument-list ) [cv-qualifers] [exception-specification] [= expression] ; 38 | ``` 39 | 40 | ### 指针数组 41 | 42 | 指针数组(`array of pointer`)的语法如下: 43 | 44 | ``` 45 | * identifier [ [constant-expression] ] 46 | ``` 47 | 48 | ## 示例 49 | 50 | * 声明指向`char`类型对象的指针 51 | 52 | ``` 53 | char *pch; 54 | ``` 55 | 56 | * 声明指向`unsigned int`类型的静态对象的常量指针 57 | 58 | ``` 59 | static unsigned int * const ptr; 60 | ``` -------------------------------------------------------------------------------- /docs/pytorch/[index_fill]在给定维度填充指定val.md: -------------------------------------------------------------------------------- 1 | 2 | # [index_fill]在给定维度填充指定val 3 | 4 | `PyTorch`提供了函数[index_fill_](https://pytorch.org/docs/stable/tensors.html?highlight=index_fill_#torch.Tensor.index_fill_)用于在张量的指定维度上填充指定`val` 5 | 6 | ## 定义 7 | 8 | >index_fill_(dim, index, val) → Tensor 9 | 10 | * `dim`:给定维度。`0`表示行,`1`表示列 11 | * `index`:`LongTensor`。给定维度下的指定下标 12 | * `val`:待填充值 13 | 14 | ## 示例 15 | 16 | 对于大小为$3\times 4$的张量 17 | 18 | ``` 19 | >>> import torch 20 | >>> a = torch.arange(12, dtype=torch.float).reshape(3, 4) 21 | >>> a 22 | tensor([[ 0., 1., 2., 3.], 23 | [ 4., 5., 6., 7.], 24 | [ 8., 9., 10., 11.]]) 25 | ``` 26 | 27 | 填充第`1/3`行,大小为`33` 28 | 29 | ``` 30 | >>> index=torch.LongTensor([0, 2]) 31 | >>> index 32 | tensor([0, 2]) 33 | >>> a.index_fill(0, index, 33) 34 | tensor([[33., 33., 33., 33.], 35 | [ 4., 5., 6., 7.], 36 | [33., 33., 33., 33.]]) 37 | ``` 38 | 39 | 填充第`2/3`列,大小为`-1` 40 | 41 | ``` 42 | >>> index=torch.LongTensor([1,2]) 43 | >>> index 44 | tensor([1, 2]) 45 | >>> a.index_fill(1, index, -1) 46 | tensor([[ 0., -1., -1., 3.], 47 | [ 4., -1., -1., 7.], 48 | [ 8., -1., -1., 11.]]) 49 | ``` -------------------------------------------------------------------------------- /docs/opencv/draw/[rectangle]绘制边框.md: -------------------------------------------------------------------------------- 1 | 2 | # [rectangle]绘制边框 3 | 4 | 参考: 5 | 6 | [opencv 绘图 cvLine cvRectangle cvCircle cvEllipse cvEllipseBox cvFillPoly cvConvexPoly cvPolyLine](https://blog.csdn.net/u012005313/article/details/46802565) 7 | 8 | [ rectangle() ](https://docs.opencv.org/4.1.0/d6/d6e/group__imgproc__draw.html#ga07d2f74cadcf8e305e810ce8eed13bc9) 9 | 10 | ## 定义 11 | 12 | ``` 13 | def rectangle(img, pt1, pt2, color, thickness=None, lineType=None, shift=None): 14 | ``` 15 | 16 | * `img`:图像 17 | * `pt1`:左上角坐标点 18 | * `pt2`:右下角坐标点 19 | * `color`:颜色 20 | * `thickness`:线条粗细程度 21 | * `lineType`:线条类型,参考[LineTypes](https://docs.opencv.org/4.1.0/d6/d6e/group__imgproc__draw.html#gaf076ef45de481ac96e0ab3dc2c29a777) 22 | 23 | **Note:在输入图像上进行边框绘制** 24 | 25 | ## 示例 26 | 27 | ``` 28 | import cv2 29 | import numpy as np 30 | 31 | if __name__ == '__main__': 32 | # 创建空白3通道图像 33 | img = np.ones((500, 500, 3)) * 255 34 | 35 | cv2.rectangle(img, (20, 20), (120, 120), (0, 0, 255), thickness=1) 36 | cv2.rectangle(img, (50, 100), (380, 450), (0, 255, 0), thickness=2) 37 | 38 | cv2.imshow('img', img) 39 | cv2.waitKey(0) 40 | ``` 41 | 42 | ![](./imgs/rectangle.png) -------------------------------------------------------------------------------- /docs/python/[tqdm]进度条.md: -------------------------------------------------------------------------------- 1 | 2 | # [tqdm]进度条 3 | 4 | [tqdm](https://github.com/tqdm/tqdm)提供了一个简易的方式实现进度条 5 | 6 | ## 安装 7 | 8 | ``` 9 | pip install tqdm 10 | ``` 11 | 12 | ## 示例 13 | 14 | `tqdm`提供了多个公式以实现进度条,常用的有以下几种方式 15 | 16 | 1. 基于可迭代对象 17 | 2. 手动设置迭代进度 18 | 19 | ### tqdm() 20 | 21 | ``` 22 | from tqdm import tqdm 23 | from time import sleep 24 | 25 | text = "" 26 | for char in tqdm(["a", "b", "c", "d"]): 27 | sleep(0.25) 28 | text = text + char 29 | print(text) 30 | ``` 31 | 32 | ![](./imgs/tqdm-1.gif) 33 | 34 | ### trange() 35 | 36 | ``` 37 | from time import sleep 38 | from tqdm import trange 39 | 40 | for i in trange(100): 41 | sleep(0.01) 42 | ``` 43 | 44 | ![](./imgs/tqdm-2.gif) 45 | 46 | ### with tqdm(total=xxx) as pbar 47 | 48 | ``` 49 | from tqdm import tqdm 50 | from time import sleep 51 | 52 | with tqdm(total=100) as pbar: 53 | for i in range(10): 54 | sleep(0.1) 55 | pbar.update(10) 56 | ``` 57 | 58 | ![](./imgs/tqdm-3.gif) 59 | 60 | ### pbar = tqdm(total=100) 61 | 62 | ``` 63 | from tqdm import tqdm 64 | from time import sleep 65 | 66 | pbar = tqdm(total=100) 67 | for i in range(10): 68 | sleep(0.1) 69 | pbar.update(10) 70 | pbar.close() 71 | ``` 72 | -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/数据预处理.md: -------------------------------------------------------------------------------- 1 | 2 | # 数据预处理 3 | 4 | 参考: 5 | 6 | [深度学习入门之Pytorch——数据增强](https://blog.csdn.net/ 7 | weixin_40793406/article/details/84867143) 8 | 9 | [深度神经网络模型训练中的最新tricks总结【原理与代码汇总】](https://zhuanlan.zhihu.com/p/66080948) 10 | 11 | `PyTorch`提供了丰富的预处理函数,组合不同的预处理函数能够极大的扩充图像数据库 12 | 13 | 实现代码位于`py/data_preprocessing`目录下 14 | 15 | ## 预处理功能 16 | 17 | 1. 缩放(`Resize`) 18 | 2. 裁剪(`CenterCrop/RandomCrop`) 19 | 3. 翻转(`RandomHorizontalFlip/RandomVerticalFlip`) 20 | 4. 颜色抖动(`ColorJitter`) 21 | 5. 随机擦除(`RandomErasing`) 22 | 23 | ## 组合 24 | 25 | `Torchvision`提供了`Compose`函数来组合多种预处理功能 26 | 27 | ``` 28 | # 预处理顺序如下: 29 | # 1. 按较短边缩放 30 | # 2. 随机裁剪224x224 31 | # 3. 随机水平翻转 32 | # 4. 随机颜色抖动:亮度、对比度、饱和度、色调 33 | # 5. 转换成Tensor张量 34 | # 6. 随机擦除 35 | # 7. 转换成PIL Image 36 | transform = transforms.Compose([ 37 | transforms.Resize(224), 38 | transforms.RandomCrop(224), 39 | transforms.RandomHorizontalFlip(), 40 | transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1), 41 | transforms.ToTensor(), 42 | transforms.RandomErasing(), 43 | transforms.ToPILImage() 44 | ]) 45 | ``` 46 | 47 | ![](./imgs/preprocess.png) -------------------------------------------------------------------------------- /docs/python/[list]排序.md: -------------------------------------------------------------------------------- 1 | 2 | # [list]排序 3 | 4 | 参考:[Python List sort()方法](https://www.runoob.com/python/att-list-sort.html) 5 | 6 | ## 定义 7 | 8 | ``` 9 | list.sort(cmp=None, key=None, reverse=False) 10 | ``` 11 | 12 | * `cmp`:指定排序方法 13 | * `key`:指定比较元素 14 | * `reverse`:排序规则:`True`表示降序,`False`表示升序 15 | 16 | ## 示例一 17 | 18 | ``` 19 | import random 20 | 21 | if __name__ == '__main__': 22 | a = random.sample(range(10), 3) 23 | print(a) 24 | 25 | # 降序 26 | a.sort(reverse=True) 27 | print(a) 28 | # 升序 29 | a.sort() 30 | print(a) 31 | ######################3 输出 32 | [8, 1, 5] 33 | [8, 5, 1] 34 | [1, 5, 8] 35 | ``` 36 | 37 | ## 示例二 38 | 39 | 列表中包含列表,按子列表中指定元素进行排序 40 | 41 | ``` 42 | import random 43 | 44 | if __name__ == '__main__': 45 | a = [random.sample(range(10), 3), random.sample(range(10), 3), random.sample(range(10), 3)] 46 | print(a) 47 | 48 | # 按子列表中最后一个元素进行排序 49 | a.sort(key=lambda x: x[2], reverse=True) 50 | print(a) 51 | ######################## 输出 52 | [[0, 9, 5], [0, 2, 6], [5, 7, 0]] 53 | [[0, 2, 6], [0, 9, 5], [5, 7, 0]] 54 | ``` 55 | 56 | ## 示例三 57 | 58 | 列表中包含字典,按字典中指定元素进行排序。其操作和示例二类似 59 | 60 | ``` 61 | a.sort(key=lambda x: x['指定key'], reverse=True) 62 | ``` -------------------------------------------------------------------------------- /docs/matplotlib/引言.md: -------------------------------------------------------------------------------- 1 | 2 | # 引言 3 | 4 | [matplotlib](https://matplotlib.org/index.html)是`Python 2D`绘图库 5 | 6 | 之前对它的概念不太理解,都是在网上找的示例代码,所以很难在原先代码基础上添加一些特性 7 | 8 | 这一次深入`matplotlib`的绘图架构,争取能够实现好的绘图 9 | 10 | ## matplotlib.plot 11 | 12 | 参考:[Matplotlib, pyplot and pylab: how are they related?](https://matplotlib.org/tutorials/introductory/usage.html#matplotlib-pyplot-and-pylab-how-are-they-related) 13 | 14 | `matplotlib.plot`是`matplotlib`的一个模块,为底层的面向对象绘图库提供状态机接口,状态机隐式并自动创建图形和轴以实现所需的绘图 15 | 16 | 其`API`风格类似于`MATLIB`,更加简单直观 17 | 18 | ## 输入数据格式 19 | 20 | 参考:[Types of inputs to plotting functions](https://matplotlib.org/tutorials/introductory/usage.html#types-of-inputs-to-plotting-functions) 21 | 22 | `matplotlib`支持多种格式数据输入,特别是`np.array`对象,所以最好在数据输入之前转换成`np.array`对象 23 | 24 | ``` 25 | b = np.matrix([[1,2],[3,4]]) 26 | b_asarray = np.asarray(b) 27 | ``` 28 | 29 | ## 代码风格 30 | 31 | 参考:[coding styles](https://matplotlib.org/tutorials/introductory/usage.html#coding-styles) 32 | 33 | 引用`matplotlib.plot`类库以及`numpy`类库如下 34 | 35 | ``` 36 | import matplotlib.pyplot as plt 37 | import numpy as np 38 | ``` 39 | 40 | ## `jupyter notebook`嵌入 41 | 42 | `matplotlib`支持在`jupyter notebook`嵌入绘图,仅需在最开始执行以下语句: 43 | 44 | ``` 45 | %matplotlib inline 46 | ``` -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/[torchvision][ConcatDataset]连接多个数据集.md: -------------------------------------------------------------------------------- 1 | 2 | # [torchvision][ConcatDataset]连接多个数据集 3 | 4 | `PyTorch`提供了类[torch.utils.data.ConcatDataset](https://pytorch.org/docs/stable/data.html#torch.utils.data.ConcatDataset),能够连接多个不同的数据集 5 | 6 | ## 定义 7 | 8 | >CLASS torch.utils.data.ConcatDataset(datasets) 9 | 10 | * `datasets`:是一个列表,保存了多个数据集对象 11 | 12 | ## 示例 13 | 14 | 连接`MNIST`和`CIFAR100` 15 | 16 | ``` 17 | from torchvision.datasets import MNIST 18 | from torchvision.datasets import CIFAR100 19 | from torch.utils.data import ConcatDataset 20 | 21 | import numpy as np 22 | 23 | if __name__ == "__main__": 24 | mnist_data = MNIST('./data', train=True, download=True) 25 | print('mnist: ', len(mnist_data)) 26 | cifar10_data = CIFAR100('./data', train=True, download=True) 27 | print('cifar: ', len(cifar10_data)) 28 | 29 | concat_data = ConcatDataset([mnist_data, cifar10_data]) 30 | print('concat_data: ', len(concat_data)) 31 | 32 | img, target = concat_data.__getitem__(133) 33 | print(np.array(img).shape) 34 | print(target) 35 | ``` 36 | 37 | 输出如下: 38 | 39 | ``` 40 | mnist: 60000 41 | Files already downloaded and verified 42 | cifar: 50000 43 | concat_data: 110000 44 | (28, 28) 45 | 9 46 | ``` -------------------------------------------------------------------------------- /docs/opencv/draw/[掩码]绘制多边形.md: -------------------------------------------------------------------------------- 1 | 2 | # [掩码]绘制多边形 3 | 4 | 参考[python opencv cv2在图片中画mask掩码/掩膜](https://blog.csdn.net/xjtdw/article/details/107073396)和[cv2.fillConvexPoly()与cv2.fillPoly()填充多边形](https://www.cnblogs.com/Ph-one/p/12082692.html),进行多边形的绘制和填充 5 | 6 | ## 示例程序 7 | 8 | ``` 9 | import cv2 10 | import numpy as np 11 | 12 | img = cv2.imread('box.png') 13 | 14 | # binary mask 15 | coordinates = [] 16 | coordinate = np.array([[[100, 100], [300, 100], [200, 200], [100, 200]]]) 17 | coordinate2 = np.array([[[100, 100], [300, 200], [100, 300], [100, 200]]]) 18 | print(coordinate.shape) 19 | print(coordinate2.shape) 20 | coordinates.append(coordinate) 21 | coordinates.append(coordinate2) 22 | 23 | mask = np.zeros(img.shape[:2], dtype=np.int8) 24 | mask = cv2.fillPoly(mask, coordinates, 255) 25 | 26 | # 掩码实现 27 | image = cv2.add(img, np.zeros(np.shape(img), dtype=np.uint8), mask=mask) 28 | 29 | cv2.imshow('mask', mask) 30 | cv2.imshow('image', image) 31 | cv2.waitKey(0) 32 | ``` 33 | 34 | ## 出错 35 | 36 | ``` 37 | cv2.error: OpenCV(4.4.0) /tmp/pip-req-build-f9hglo4e/opencv/modules/imgproc/src/drawing.cpp:2395: error: (-215:Assertion failed) p.checkVector(2, CV_32S) >= 0 in function 'fillPoly' 38 | ``` 39 | 40 | **注意:每个多边形坐标点数组大小为(1, 4, 2),其数据格式为np.int** -------------------------------------------------------------------------------- /docs/cplusplus/advanced/reference/引用类型函数操作.md: -------------------------------------------------------------------------------- 1 | 2 | # 引用类型函数操作 3 | 4 | 引用可作用于函数参数和函数返回值 5 | 6 | ## 引用类型函数参数 7 | 8 | 参考:[Reference-Type Function Arguments](https://docs.microsoft.com/en-us/cpp/cpp/reference-type-function-arguments?view=vs-2019) 9 | 10 | 将引用作为函数参数,通过传递对象地址的方式进行对象访问,避免对象复制带来的额外开销,通常比直接输入对象更有效 11 | 12 | ### 语法 13 | 14 | 分两种情况,一是函数能够**修改**对象信息,而是函数能够**访问**对象信息 15 | 16 | ``` 17 | // 可修改 18 | ret-type func(type& declarator); 19 | // 可访问 20 | ret-type func(const type& declarator); 21 | ``` 22 | 23 | ## 引用类型函数返回值 24 | 25 | 参考:[Reference-Type Function Returns](https://docs.microsoft.com/en-us/cpp/cpp/reference-type-function-returns?view=vs-2019) 26 | 27 | 正如通过引用将大对象传递给函数更有效一样,通过引用从函数返回大对象也更有效。引用返回协议消除了在返回之前将对象复制到临时位置的必要性 28 | 29 | 将引用作为函数返回值有以下要求: 30 | 31 | 1. 函数返回类型一定是`lvalue` 32 | 2. 当函数返回时,引用的对象不能超出其作用域范围 33 | 34 | ### 示例 35 | 36 | ``` 37 | struct S { 38 | short i; 39 | }; 40 | 41 | S &f(S &s) { 42 | // S s; 43 | s.i = 333; 44 | 45 | cout << (void *) &s << endl; 46 | 47 | return s; 48 | } 49 | 50 | int main() { 51 | S a; 52 | 53 | S &s = f(a); 54 | 55 | cout << (void *) &s << endl; 56 | } 57 | ``` 58 | 59 | 在`main`函数内将结构体对象`a`输入函数`f`,再返回其引用到`main`函数,赋值给引用对象`s` 60 | 61 | **注意:此时对象`a`的作用域是`main`函数,所以函数返回时没有超出其作用域** -------------------------------------------------------------------------------- /docs/cplusplus/advanced/stl/map.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11][stl]map 3 | 4 | 参考:[std::map](http://www.cplusplus.com/reference/map/map/) 5 | 6 | `map`是存储键/值对的关联容器 7 | 8 | ## 头文件 9 | 10 | ``` 11 | #include 12 | ``` 13 | 14 | ## 使用 15 | 16 | ``` 17 | #include 18 | 19 | template 20 | void forward_print(std::map maps) { 21 | // for (auto it = maps.begin(); it != maps.end(); ++it) 22 | // std::cout << it->first << " => " << it->second << ' '; 23 | 24 | for (auto &x:maps) { 25 | std::cout << x.first << " => " << x.second << ' '; 26 | } 27 | std::cout << std::endl; 28 | } 29 | 30 | int main() { 31 | // 创建 32 | std::map maps; 33 | // 添加 34 | for (int i = 0; i < 10; i++) { 35 | maps.emplace(i, i + 1); 36 | } 37 | forward_print(maps); 38 | 39 | // 修改 40 | // 第二个位置,从0开始 41 | maps[1] = 444; 42 | forward_print(maps); 43 | 44 | // 删除 45 | // 先查找再删除 46 | std::map::iterator it = maps.find(3); 47 | maps.erase(it); 48 | // 按键删除 49 | maps.erase(4); 50 | forward_print(maps); 51 | // 删除所有 52 | maps.clear(); 53 | std::cout << "size: " << maps.size() << std::endl; 54 | std::cout << "isEmpty: " << maps.empty() << std::endl; 55 | } 56 | ``` -------------------------------------------------------------------------------- /docs/python/[itertools][product]嵌套循环.md: -------------------------------------------------------------------------------- 1 | 2 | # [itertools][product]嵌套循环 3 | 4 | 参考:[product](https://docs.python.org/zh-cn/3.7/library/itertools.html#itertools.product) 5 | 6 | 可迭代对象输入的笛卡儿积。大致相当于生成器表达式中的嵌套循环 7 | 8 | ## product(A, B) 9 | 10 | 等同于`((x,y) for x in A for y in B)` 11 | 12 | ``` 13 | >>> from itertools import product 14 | >>> import numpy a snp 15 | >>> a = np.arange(3) 16 | >>> b = np.arange(5, 9) 17 | >>> a 18 | array([0, 1, 2]) 19 | >>> b 20 | array([5, 6, 7, 8]) 21 | >>> ((x, y) for x in a for y in b) 22 | at 0x7fb108a9fed0> 23 | >>> list(((x, y) for x in a for y in b)) 24 | [(0, 5), (0, 6), (0, 7), (0, 8), (1, 5), (1, 6), (1, 7), (1, 8), (2, 5), (2, 6), (2, 7), (2, 8)] 25 | >>> product(a,b) 26 | 27 | >>> list(product(a,b)) 28 | [(0, 5), (0, 6), (0, 7), (0, 8), (1, 5), (1, 6), (1, 7), (1, 8), (2, 5), (2, 6), (2, 7), (2, 8)] 29 | ``` 30 | 31 | ## product(A, repeat=2) 32 | 33 | 等同于`product(A, A)`,也就是`((x, y) for x in A for y in A)` 34 | 35 | ``` 36 | >>> product(a, a) 37 | 38 | >>> list(product(a, a)) 39 | [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)] 40 | >>> list(product(a, repeat=2)) 41 | [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)] 42 | ``` -------------------------------------------------------------------------------- /docs/opencv/draw/[line]绘制线段.md: -------------------------------------------------------------------------------- 1 | 2 | # [line]绘制线段 3 | 4 | 使用`OpenCV`函数[line](https://docs.opencv.org/4.1.0/d6/d6e/group__imgproc__draw.html#ga7078a9fae8c7e7d13d24dac2520ae4a2)绘制两点之间的线段 5 | 6 | ## 函数解析 7 | 8 | ``` 9 | CV_EXPORTS_W void line(InputOutputArray img, Point pt1, Point pt2, const Scalar& color, 10 | int thickness = 1, int lineType = LINE_8, int shift = 0); 11 | ``` 12 | 13 | * `img`:绘制图像 14 | * `pt1`:起始点 15 | * `pt2`:终止点 16 | * `color`:线条颜色 17 | * `thickness`:线条粗细 18 | * `lineType`:线条绘制类型。参考[LineTypes](https://docs.opencv.org/4.1.0/d6/d6e/group__imgproc__draw.html#gaf076ef45de481ac96e0ab3dc2c29a777) 19 | * `shift`:点坐标中的小数位数 20 | 21 | ## 示例 22 | 23 | ``` 24 | #include 25 | #include 26 | 27 | using namespace std; 28 | using namespace cv; 29 | 30 | int main() { 31 | int width = 400; 32 | int height = 200; 33 | 34 | // 3通道8位大小图像 35 | Mat src = Mat(height, width, CV_8UC3); 36 | cout << src.size() << endl; 37 | 38 | // 过中心点的斜线 39 | line(src, Point(10, 10), Point(390, 190), Scalar(255, 0, 0), 2); 40 | // 过中心点的直线 41 | line(src, Point(10, 100), Point(390, 100), Scalar(0, 0, 255), 2); 42 | 43 | imshow("line", src); 44 | waitKey(0); 45 | 46 | return 0; 47 | } 48 | ``` 49 | 50 | 新建图像`src`,大小为`200x400`,绘制两条线段 51 | 52 | ![](./imgs/line.png) -------------------------------------------------------------------------------- /docs/cplusplus/advanced/stl/stack.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11][stl]stack 3 | 4 | `c++`提供了栈的实现:[queue](http://www.cplusplus.com/reference/queue/queue/),实现后进先出(`last-in first-out, LIFO`)功能 5 | 6 | ## 创建栈 7 | 8 | 引入头文件`stack`,创建时指定数据类型: 9 | 10 | ``` 11 | #include 12 | 13 | stack s; 14 | ``` 15 | 16 | ## 栈功能 17 | 18 | `stack`提供了如下常用功能实现: 19 | 20 | * empty():判断栈是否问空,为空返回`true`,不为空返回`false` 21 | * size():返回栈长度 22 | * top():返回栈顶数据 23 | * push(value_type&& __x):插入数据到栈顶 24 | * pop():移除栈顶数据。注意,返回值为空 25 | 26 | `c++11`提供了两个新特性: 27 | 28 | * [swap](http://www.cplusplus.com/reference/stack/stack/swap/):交换两个栈的值 29 | * [emplace](http://www.cplusplus.com/reference/stack/stack/emplace/):添加数据到栈顶。这个新元素是就地(`in place`)构造的,它传递参数作为其构造函数的参数,可替换push操作 30 | 31 | *参考:[C++11中emplace的使用](https://blog.csdn.net/u013700358/article/details/52623985):emplace能通过参数构造对象,不需要拷贝或者移动内存,相比push能更好地避免内存的拷贝与移动,使容器插入元素的性能得到进一步提升* 32 | 33 | 34 | ## 实现 35 | 36 | ``` 37 | stack s; 38 | // 栈大小 39 | cout << s.size() << endl; 40 | // 栈是否为空 41 | cout << s.empty() << endl; 42 | 43 | s.push(3); 44 | s.push(4); 45 | s.emplace(5); 46 | 47 | // 栈顶元素 48 | cout << s.top() << endl; 49 | 50 | stack s2; 51 | s2.push(3232); 52 | 53 | s2.swap(s); 54 | cout << s.size() << endl; 55 | cout << s.empty() << endl; 56 | cout << s.top() << endl; 57 | } 58 | ``` -------------------------------------------------------------------------------- /tools/traincascade/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "cctype": "cpp", 4 | "cmath": "cpp", 5 | "cstddef": "cpp", 6 | "cstdio": "cpp", 7 | "cstdlib": "cpp", 8 | "cstring": "cpp", 9 | "ctime": "cpp", 10 | "cwchar": "cpp", 11 | "atomic": "cpp", 12 | "strstream": "cpp", 13 | "chrono": "cpp", 14 | "cstdint": "cpp", 15 | "iosfwd": "cpp", 16 | "ratio": "cpp", 17 | "thread": "cpp", 18 | "cinttypes": "cpp", 19 | "typeindex": "cpp", 20 | "array": "cpp", 21 | "*.tcc": "cpp", 22 | "clocale": "cpp", 23 | "complex": "cpp", 24 | "cwctype": "cpp", 25 | "unordered_map": "cpp", 26 | "vector": "cpp", 27 | "exception": "cpp", 28 | "fstream": "cpp", 29 | "functional": "cpp", 30 | "initializer_list": "cpp", 31 | "iostream": "cpp", 32 | "istream": "cpp", 33 | "limits": "cpp", 34 | "new": "cpp", 35 | "ostream": "cpp", 36 | "numeric": "cpp", 37 | "sstream": "cpp", 38 | "stdexcept": "cpp", 39 | "streambuf": "cpp", 40 | "system_error": "cpp", 41 | "type_traits": "cpp", 42 | "tuple": "cpp", 43 | "typeinfo": "cpp", 44 | "utility": "cpp" 45 | } 46 | } -------------------------------------------------------------------------------- /docs/opencv/code/[convertTo]数据转换.md: -------------------------------------------------------------------------------- 1 | 2 | # [convertTo]数据转换 3 | 4 | 参考:[convertTo()](https://docs.opencv.org/4.1.0/d3/d63/classcv_1_1Mat.html#adf88c60c5b4980e05bb556080916978b) 5 | 6 | 图像处理过程中经常需要缩放数据值以及转换数据类型,`OpenCV`提供了函数`cv::convertTo`来完成 7 | 8 | ## 函数解析 9 | 10 | ``` 11 | void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const; 12 | ``` 13 | 14 | * `m`:输出矩阵;如果在操作之前没有分配适当的大小或类型,则会根据操作重新分配 15 | * `rtype`:输出数据类型,如果为负,表示和原图一致 16 | * `alpha`:缩放因子 17 | * `beta`:增加到缩放后数据的因子 18 | 19 | 函数`convertTo`执行如下操作: 20 | 21 | ![](./imgs/convert-to.png) 22 | 23 | ## 示例 24 | 25 | ``` 26 | #include 27 | #include 28 | #include 29 | 30 | using namespace std; 31 | using namespace cv; 32 | 33 | void print(const Mat &src, const Mat &dst) { 34 | cout << "数据类型" << endl; 35 | cout << src.type() << endl; 36 | cout << dst.type() << endl; 37 | 38 | cout << "结果" << endl; 39 | cout << src << endl; 40 | cout << dst << endl; 41 | } 42 | 43 | int main() { 44 | Mat src = Mat(1, 3, CV_8UC1); 45 | 46 | src.at(0) = 3; 47 | src.at(1) = 4; 48 | src.at(2) = 5; 49 | 50 | Mat dst; 51 | src.convertTo(dst, CV_32F, 0.5); 52 | print(src, dst); 53 | } 54 | ``` 55 | 56 | 转换成浮点类型,并进行缩放 57 | 58 | ``` 59 | 数据类型 60 | 0 61 | 5 62 | 结果 63 | [ 3, 4, 5] 64 | [1.5, 2, 2.5] 65 | ``` -------------------------------------------------------------------------------- /py/data_preprocessing/resize.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | @date: 2020/4/30 上午11:22 5 | @file: resize.py 6 | @author: zj 7 | @description: 缩放 8 | """ 9 | 10 | import torchvision.transforms as transforms 11 | from PIL import Image 12 | import matplotlib.pyplot as plt 13 | 14 | plt.rcParams['font.sans-serif'] = ['simhei'] # 用来正常显示中文标签 15 | plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号 16 | 17 | 18 | def plot(src, dst, dst2): 19 | f = plt.figure() 20 | 21 | plt.subplot(311) 22 | h, w = src.size 23 | plt.title('原图 (w,h)=(%d, %d)' % (h, w)) 24 | plt.imshow(src), # plt.axis('off') 25 | 26 | plt.subplot(312) 27 | h, w = dst.size 28 | plt.title('按较短边进行缩放 (w,h)=(%d, %d)' % (h, w)) 29 | plt.imshow(dst), # plt.axis('off') 30 | 31 | plt.subplot(313) 32 | h, w = dst2.size 33 | plt.title('指定长/宽 (w,h)=(%d, %d)' % (h, w)) 34 | plt.imshow(dst2), # plt.axis('off') 35 | 36 | plt.show() 37 | 38 | 39 | if __name__ == '__main__': 40 | src = Image.open('../data/butterfly.jpg') 41 | 42 | # 按较短边进行缩放 43 | transform = transforms.Compose([ 44 | transforms.Resize(224) 45 | ]) 46 | dst = transform(src) 47 | 48 | # 指定图像长宽 49 | transform2 = transforms.Compose([ 50 | transforms.Resize((224, 224)) 51 | ]) 52 | dst2 = transform2(src) 53 | 54 | plot(src, dst, dst2) 55 | -------------------------------------------------------------------------------- /docs/python/保存json或者dict数据为voc-xml文件.md: -------------------------------------------------------------------------------- 1 | 2 | # 保存json或者dict数据为voc-xml文件 3 | 4 | 需要将标注数据保存为`VOC XML`格式,在网上查了一些资料。参考: 5 | 6 | * [Convert JSON to XML in Python](https://stackoverflow.com/questions/8988775/convert-json-to-xml-in-python/19474571) 7 | * [Python – JSON to XML](https://www.geeksforgeeks.org/python-json-to-xml/) 8 | * [How to convert JSON to XML using Python](https://www.codespeedy.com/how-to-convert-json-to-xml-using-python/) 9 | 10 | 有多种方式可以实现,下面使用[martinblech/xmltodict](https://github.com/martinblech/xmltodict)将`json/dict`保存为`xml`文件 11 | 12 | ## 函数定义 13 | 14 | ### parse 15 | 16 | 使用`parse`读取`xml`文件,保存为`dict`数据 17 | 18 | ``` 19 | def parse(xml_input, encoding=None, expat=expat, process_namespaces=False, 20 | namespace_separator=':', disable_entities=True, **kwargs): 21 | ``` 22 | 23 | * `xml_input`:`xml`文件路径或者`file-like object` 24 | 25 | ### unparse 26 | 27 | 使用`unparse`将`dict`数据保存为`xml`文件 28 | 29 | ``` 30 | def unparse(input_dict, output=None, encoding='utf-8', full_document=True, 31 | short_empty_elements=False, 32 | **kwargs): 33 | ``` 34 | 35 | * `input_dict`:字典数据 36 | * `output`:默认为`None`,则函数将转换后的`xml`数据字符串返回;如果设置文件路径,则保存在本地 37 | 38 | ## 示例 39 | 40 | 参考:[python中xml和json数据相互转换](https://blog.csdn.net/qq_33196814/article/details/99992771) 41 | 42 | ## VOC-xml 43 | 44 | 生成`VOC`格式`xml`文件,参考:[ zjykzj/pnno](https://github.com/zjykzj/pnno) -------------------------------------------------------------------------------- /docs/cplusplus/get-started/type-cast-deduce/auto.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11]auto 3 | 4 | 参考: 5 | 6 | [auto (C++)](https://docs.microsoft.com/en-us/cpp/cpp/auto-cpp?view=vs-2019) 7 | 8 | [placeholder type specifiers](https://en.cppreference.com/w/cpp/language/auto) 9 | 10 | ## 定义 11 | 12 | 关键字`auto`是`c++11`新增的,其目的是用于自动类型推断。语法如下: 13 | 14 | ``` 15 | auto declarator initializer; 16 | ``` 17 | 18 | `auto`本身不是类型,它是一个类型占位符。它能够指导编译器根据声明变量的初始化表达式或`lambda`表达式参数进行类型推断 19 | 20 | 使用`auto`代替固定类型声明有以下优点: 21 | 22 | * 鲁棒性:即使表达式的类型会更改也能工作,比如函数返回不同类型 23 | * 高性能:能够保证不会发生类型转换 24 | * 易用性:不需要关心拼写困难或打字错误 25 | * 高效率:使得编码更有效率 26 | 27 | 以下情况可能需要使用固定类型: 28 | 29 | 1. 只有某一类型能够起作用 30 | 2. 表达式模板辅助类型,比如`(valarray+valarray)` 31 | 32 | 网上也有关于`auto`的讨论:[如何评价 C++ 11 auto 关键字?](https://www.zhihu.com/question/35517805) 33 | 34 | ## 示例 35 | 36 | ``` 37 | # BEFORE 38 | float getSum(int A, float B) { 39 | return A + B; 40 | } 41 | 42 | int main(int argc, char *argv[]) { 43 | float sum = getSum(2, 33.33); 44 | 45 | vector src; 46 | src.emplace_back(1); 47 | src.emplace_back(4); 48 | src.emplace_back(3); 49 | for (vector::iterator it = src.begin(); it != src.end(); it++) { 50 | cout << *it << " "; 51 | } 52 | } 53 | 54 | # AFTER 55 | ... 56 | auto sum = getSum(2, 33.33); 57 | ... 58 | for (auto it = src.begin(); it != src.end(); it++) { 59 | cout << *it << " "; 60 | } 61 | ``` -------------------------------------------------------------------------------- /docs/pytorch/model/为什么推荐使用static_dict方式保存模型.md: -------------------------------------------------------------------------------- 1 | 2 | # 为什么推荐使用static_dict方式保存模型 3 | 4 | 在官网教程[[译]保存和加载模型](./[译]保存和加载模型.md)中给出了多种模型使用方式,其中最常用的有 5 | 6 | 1. 保存/加载`static_dict` 7 | 2. 保存/加载完整模型 8 | 9 | 对于第一种方式,只保存训练好的模型的学习参数,但是加载时需要额外提供定义的模型结构;对于第二种方式,直接使用`PyTorch`的保存和加载函数即可,不过教程中也提到了第二种方式的缺陷,就是需要在调用时维护模型类文件的路径,否则会出错 10 | 11 | 之前一直使用第一种方式进行模型的读写,直到遇到了下面这个问题,才真正理解了第二种方式的缺陷 12 | 13 | ## ModuleNotFoundError: No module named 'models' 14 | 15 | 使用[ultralytics/yolov5](https://github.com/ultralytics/yolov5)的时候出现了如上错误。在网上查找了资料后发现这就是保存/加载完整模型带来的问题。参考 16 | 17 | * [torch.load() requires model module in the same folder #3678](https://github.com/pytorch/pytorch/issues/3678) 18 | * [ModuleNotFoundError: No module named 'models' #18325](https://github.com/pytorch/pytorch/issues/18325) 19 | * [Pytorch.load() error:No module named ‘model’](https://discuss.pytorch.org/t/pytorch-load-error-no-module-named-model/25821) 20 | 21 | ## 解析 22 | 23 | `PyTorch`集成了`Pickle`工具进行模型的保存和加载。如果直接保存完整模型,那么附带的需要在调用时维持和模型定义文件的相对位置,否则会出现错误 24 | 25 | ## 解决 26 | 27 | 解决方式就是维护调用文件和模型定义文件之间的相对位置,保证`Pickle`能够找到模型定义文件 28 | 29 | 1. 使用`sys.path`设置 30 | 31 | ``` 32 | import sys 33 | sys.path.insert(0, './yolov5') 34 | ``` 35 | 36 | 2. 设置`PYTHONPATH`环境变量 37 | 38 | ## 相关 39 | 40 | 在[ultralytics/yolov5](https://github.com/ultralytics/yolov5)提了一个问题 41 | 42 | * [ModuleNotFoundError: No module named 'models' #353](https://github.com/ultralytics/yolov5/issues/353) -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/析构器.md: -------------------------------------------------------------------------------- 1 | 2 | # 析构器 3 | 4 | 参考:[Destructors (C++)](https://docs.microsoft.com/en-us/cpp/cpp/destructors-cpp?view=vs-2019) 5 | 6 | 当对象超出作用域时,自动调用析构器进行删除 7 | 8 | ## 声明 9 | 10 | * 不接受参数 11 | * 不返回值 12 | * 无法声明为`const,volatile`或`static` 13 | * 可以声明为`virtual`。使用虚拟析构函数,可以在不知道对象类型的情况下销毁对象 - 使用虚拟函数机制调用对象的正确析构函数。注意,析构函数也可以声明为抽象类的纯虚拟函数 14 | 15 | ## 使用 16 | 17 | 当以下事件发生时调用析构器: 18 | 19 | * 具有块作用域的本地(自动)对象超出作用域 20 | * 使用`new`运算符分配的对象使用`delete`显式释放 21 | * 临时对象的生存期结束 22 | * 程序结束后为全局或静态对象调用析构器 23 | * 使用析构函数的完全限定名显式调用 24 | 25 | 使用限制如下: 26 | 27 | * 无法获取析构器地址 28 | * 派生类无法继承基类的析构器 29 | 30 | ## 调用顺序 31 | 32 | * 首先调用对象类的析构器,执行函数体 33 | * `nonstatic`成员对象的析构器以声明的相反顺序调用 34 | * 以声明的相反顺序调用非虚拟基类的析构器 35 | * 以声明的相反顺序调用虚拟基类的析构器 36 | 37 | ### 虚拟基类 38 | 39 | 虚拟基类的析构函数的调用顺序与它们在有向无环图中的出现顺序相反(深度优先、从左到右、后序遍历)。下图描述了继承关系图 40 | 41 | ![](https://docs.microsoft.com/en-us/cpp/cpp/media/vc392j1.gif?view=vs-2019) 42 | 43 | ``` 44 | class A 45 | class B 46 | class C : virtual public A, virtual public B 47 | class D : virtual public A, virtual public B 48 | class E : public C, public D, virtual public B 49 | ``` 50 | 51 | 首先`C/D`是非虚拟基类调用,非虚拟基类的析构函数的调用顺序与基类名称的声明顺序相反;然后才是虚拟基类的析构器调用。销毁顺序为`E->D->C->B->A` 52 | 53 | ## 显式析构调用 54 | 55 | ``` 56 | s.String::~String(); // non-virtual call 57 | ps->String::~String(); // non-virtual call 58 | 59 | s.~String(); // Virtual call 60 | ps->~String(); // Virtual call 61 | ``` 62 | 63 | 对未定义析构函数的显式调用没有任何效果 -------------------------------------------------------------------------------- /py/data_preprocessing/crop.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | @date: 2020/4/30 下午2:15 5 | @file: crop.py 6 | @author: zj 7 | @description: 中心裁剪、随机裁剪 8 | """ 9 | 10 | import torchvision.transforms as transforms 11 | from PIL import Image 12 | import matplotlib.pyplot as plt 13 | 14 | plt.rcParams['font.sans-serif'] = ['simhei'] # 用来正常显示中文标签 15 | plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号 16 | 17 | 18 | def plot(src, dst, dst2): 19 | f = plt.figure() 20 | 21 | plt.subplot(311) 22 | h, w = src.size 23 | plt.title('原图 (w,h)=(%d, %d)' % (h, w)) 24 | plt.imshow(src), # plt.axis('off') 25 | 26 | plt.subplot(312) 27 | h, w = dst.size 28 | plt.title('中心裁剪 (w,h)=(%d, %d)' % (h, w)) 29 | plt.imshow(dst), # plt.axis('off') 30 | 31 | plt.subplot(313) 32 | h, w = dst2.size 33 | plt.title('随机裁剪 (w,h)=(%d, %d)' % (h, w)) 34 | plt.imshow(dst2), # plt.axis('off') 35 | 36 | plt.show() 37 | 38 | 39 | if __name__ == '__main__': 40 | src = Image.open('../data/butterfly.jpg') 41 | 42 | # 先缩放,再中心裁剪 43 | transform = transforms.Compose([ 44 | transforms.Resize(224), 45 | transforms.CenterCrop(224) 46 | ]) 47 | dst = transform(src) 48 | 49 | # 先缩放,再随机裁剪 50 | transform2 = transforms.Compose([ 51 | transforms.Resize(224), 52 | transforms.RandomCrop(224) 53 | ]) 54 | dst2 = transform2(src) 55 | 56 | plot(src, dst, dst2) 57 | -------------------------------------------------------------------------------- /tools/traincascade/imagestorage.h: -------------------------------------------------------------------------------- 1 | #ifndef _OPENCV_IMAGESTORAGE_H_ 2 | #define _OPENCV_IMAGESTORAGE_H_ 3 | 4 | 5 | class CvCascadeImageReader 6 | { 7 | public: 8 | bool create( const std::string _posFilename, const std::string _negFilename, cv::Size _winSize ); 9 | void restart() { posReader.restart(); } 10 | bool getNeg(cv::Mat &_img) { return negReader.get( _img ); } 11 | bool getPos(cv::Mat &_img) { return posReader.get( _img ); } 12 | 13 | private: 14 | class PosReader 15 | { 16 | public: 17 | PosReader(); 18 | virtual ~PosReader(); 19 | bool create( const std::string _filename ); 20 | bool get( cv::Mat &_img ); 21 | void restart(); 22 | 23 | short* vec; 24 | FILE* file; 25 | int count; 26 | int vecSize; 27 | int last; 28 | int base; 29 | } posReader; 30 | 31 | class NegReader 32 | { 33 | public: 34 | NegReader(); 35 | bool create( const std::string _filename, cv::Size _winSize ); 36 | bool get( cv::Mat& _img ); 37 | bool nextImg(); 38 | 39 | cv::Mat src, img; 40 | std::vector imgFilenames; 41 | cv::Point offset, point; 42 | float scale; 43 | float scaleFactor; 44 | float stepFactor; 45 | size_t last, round; 46 | cv::Size winSize; 47 | } negReader; 48 | }; 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /docs/matplotlib/中文乱码.md: -------------------------------------------------------------------------------- 1 | 2 | # 中文乱码 3 | 4 | 参考: 5 | 6 | [Linux 系统下 matplotlib 中文乱码解决办法](https://www.cnblogs.com/michael-xiang/p/10466807.html) 7 | 8 | [matplotlib图例中文乱码?](https://www.zhihu.com/question/25404709) 9 | 10 | ## 下载中文字体 11 | 12 | [simhei](https://fontzone.net/downloadfile/simhei) 13 | 14 | ## 存放 15 | 16 | 找到`matplotlib`字体存放位置 17 | 18 | ``` 19 | >>> import matplotlib 20 | >>> matplotlib.matplotlib_fname() 21 | '/home/zj/software/anaconda/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/matplotlibrc' 22 | ``` 23 | 24 | 进入`mpl-data/fonts/ttf`文件夹,存放刚才下载的`simhei.ttf` 25 | 26 | ## 配置 27 | 28 | 可以全局配置,也可以局部配置 29 | 30 | ### 全局配置 31 | 32 | 在`mpl-data`有配置文件`matplotlibrc`,添加以下配置 33 | 34 | ``` 35 | font.family : sans-serif 36 | font.sans-serif : SimHei, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Verdana, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 37 | axes.unicode_minus : False 38 | ``` 39 | 40 | ### 局部配置 41 | 42 | 在程序中配置使用中文字体 43 | 44 | ``` 45 | plt.rcParams['font.sans-serif']=['simhei'] #用来正常显示中文标签 46 | plt.rcParams['axes.unicode_minus']=False #用来正常显示负号 47 | ``` 48 | 49 | ## 缓存 50 | 51 | 参考:[第四步:删除缓存](https://www.jianshu.com/p/d20a0971756b) 52 | 53 | 删除之前的缓存 54 | 55 | ``` 56 | $ rm -rf ~/.cache/matplotlib 57 | # 或 58 | $ rm -rf ~/.matplotlib 59 | ``` 60 | 61 | 也可以文件中使用命令重载字体 62 | 63 | ``` 64 | from matplotlib.font_manager import _rebuild 65 | _rebuild() # reload一下 66 | ``` -------------------------------------------------------------------------------- /py/data_preprocessing/flip.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | @date: 2020/4/30 下午2:18 5 | @file: flip.py 6 | @author: zj 7 | @description: 水平/垂直翻转 8 | """ 9 | 10 | import torchvision.transforms as transforms 11 | from PIL import Image 12 | import matplotlib.pyplot as plt 13 | 14 | plt.rcParams['font.sans-serif'] = ['simhei'] # 用来正常显示中文标签 15 | plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号 16 | 17 | 18 | def plot(src, dst, dst2): 19 | f = plt.figure() 20 | 21 | plt.subplot(311) 22 | h, w = src.size 23 | plt.title('原图 (w,h)=(%d, %d)' % (h, w)) 24 | plt.imshow(src), # plt.axis('off') 25 | 26 | plt.subplot(312) 27 | h, w = dst.size 28 | plt.title('水平翻转 (w,h)=(%d, %d)' % (h, w)) 29 | plt.imshow(dst), # plt.axis('off') 30 | 31 | plt.subplot(313) 32 | h, w = dst2.size 33 | plt.title('垂直翻转 (w,h)=(%d, %d)' % (h, w)) 34 | plt.imshow(dst2), # plt.axis('off') 35 | 36 | plt.show() 37 | 38 | 39 | if __name__ == '__main__': 40 | src = Image.open('../data/butterfly.jpg') 41 | 42 | # 随机水平翻转 43 | transform = transforms.Compose([ 44 | # transforms.ToPILImage(), 45 | transforms.RandomHorizontalFlip() 46 | ]) 47 | dst = transform(src) 48 | 49 | # 随机竖直翻转 50 | transform2 = transforms.Compose([ 51 | # transforms.ToPILImage(), 52 | transforms.RandomVerticalFlip() 53 | ]) 54 | dst2 = transform2(src) 55 | 56 | plot(src, dst, dst2) 57 | -------------------------------------------------------------------------------- /docs/algorithm/TP-FP-TN-FN.md: -------------------------------------------------------------------------------- 1 | 2 | # TP-FP-TN-FN 3 | 4 | 参考: 5 | 6 | [关于false positive和false negative的定义的疑惑?](https://www.zhihu.com/question/302985367/answer/535024467) 7 | 8 | [机器学习之分类器性能指标之ROC曲线、AUC值](https://blog.csdn.net/zdy0_2004/article/details/44948511) 9 | 10 | [false positive 与 false negative](https://blog.csdn.net/u013264172/article/details/51972152) 11 | 12 | ## 解析 13 | 14 | 在二分类问题上,会出现以下四种分类情况 15 | 16 | * 实例是正类 17 | * 预测结果是正类,称为真阳性(`true positive`,简称`TP`) 18 | * 预测结果是负类,称为假阴性(`false negative`,简称`FN`) 19 | * 实例是负类 20 | * 预测结果是正类,称为假阳性(`false positive`,简称`FP`) 21 | * 预测结果是负类,称为真阴性(`true negative`,简称`TN`) 22 | 23 | 完整的计算结果如下表所示 24 | 25 | 31 | 32 | ![](./imgs/tp-fp-tn-fn.png) 33 | 34 | ## 准确率 35 | 36 | 计算准确率(`precision rate`),指的是预测为正的样本中实际是正样本的概率,计算公式为`TP / (TP+FP)` 37 | 38 | ## 漏检率 39 | 40 | 计算漏检率(`False detection rate`),指的是实际为正样本,预测为负的概率,计算公式为`FP/(FP+TN)` 41 | 42 | ## 召回率 43 | 44 | 计算召回率(`recall rate`),指的是预测为正的正样本占整个正样本的概率,计算公式为`TP/TP+FN` -------------------------------------------------------------------------------- /docs/matplotlib/属性配置.md: -------------------------------------------------------------------------------- 1 | 2 | # 属性配置 3 | 4 | ## 查找配置文件 5 | 6 | 使用命令查找配置文件 7 | 8 | ``` 9 | >>> import matplotlib 10 | >>> matplotlib.matplotlib_fname() 11 | 'xxx/xxx/matplotlib/mpl-data/matplotlibrc' 12 | ``` 13 | 14 | ## 重载配置文件 15 | 16 | 修改完成配置文件后需要重新加载,或者重启系统,或者输入以下命令 17 | 18 | ``` 19 | from matplotlib.font_manager import _rebuild 20 | _rebuild() 21 | ``` 22 | 23 | ## 属性查找 24 | 25 | 使用命令查看当前属性 26 | 27 | ``` 28 | >>> import matplotlib.pyplot as plt 29 | >>> plt.rcParams 30 | /home/zj/software/anaconda/anaconda3/lib/python3.7/site-packages/matplotlib/__init__.py:886: MatplotlibDeprecationWarning: 31 | examples.directory is deprecated; in the future, examples will be found relative to the 'datapath' directory. 32 | "found relative to the 'datapath' directory.".format(key)) 33 | RcParams({'_internal.classic_mode': False, 34 | 'agg.path.chunksize': 0, 35 | 'animation.avconv_args': [], 36 | 'animation.avconv_path': 'avconv', 37 | 'animation.bitrate': -1, 38 | 'animation.codec': 'h264', 39 | 'animation.convert_args': [], 40 | 'animation.convert_path': 'convert', 41 | 'animation.embed_limit': 20.0, 42 | 'animation.ffmpeg_args': [], 43 | 'animation.ffmpeg_path': 'ffmpeg', 44 | 'animation.frame_format': 'png', 45 | 'animation.html': 'none', 46 | ... 47 | ... 48 | ``` 49 | 50 | ## 属性设置 51 | 52 | 使用命令进行属性设置 53 | 54 | ``` 55 | plt.rcParams[xxx] = xxx 56 | ``` -------------------------------------------------------------------------------- /py/data_preprocessing/erase.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | @date: 2020/5/1 上午10:09 5 | @file: erase.py 6 | @author: zj 7 | @description: 8 | """ 9 | 10 | import torchvision.transforms as transforms 11 | from PIL import Image 12 | import matplotlib.pyplot as plt 13 | 14 | plt.rcParams['font.sans-serif'] = ['simhei'] # 用来正常显示中文标签 15 | plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号 16 | 17 | 18 | def plot(src, dst, dst2): 19 | f = plt.figure() 20 | 21 | plt.subplot(311) 22 | h, w = src.size 23 | plt.title('原图 (w,h)=(%d, %d)' % (h, w)) 24 | plt.imshow(src), # plt.axis('off') 25 | 26 | plt.subplot(312) 27 | h, w = dst.size 28 | plt.title('随机擦除1 (w,h)=(%d, %d)' % (h, w)) 29 | plt.imshow(dst), # plt.axis('off') 30 | 31 | plt.subplot(313) 32 | h, w = dst2.size 33 | plt.title('随机擦除2 (w,h)=(%d, %d)' % (h, w)) 34 | plt.imshow(dst2), # plt.axis('off') 35 | 36 | plt.show() 37 | 38 | 39 | if __name__ == '__main__': 40 | src = Image.open('../data/butterfly.jpg') 41 | 42 | # 随机擦除1 43 | transform = transforms.Compose([ 44 | transforms.ToTensor(), 45 | transforms.RandomErasing(), 46 | transforms.ToPILImage() 47 | ]) 48 | dst = transform(src) 49 | 50 | # 随机擦除2 51 | transform2 = transforms.Compose([ 52 | transforms.ToTensor(), 53 | transforms.RandomErasing(), 54 | transforms.ToPILImage() 55 | ]) 56 | dst2 = transform2(src) 57 | 58 | plot(src, dst, dst2) 59 | -------------------------------------------------------------------------------- /docs/cplusplus/advanced/stl/queue.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11][stl]queue 3 | 4 | `c++`提供了队列的实现:[queue](http://www.cplusplus.com/reference/queue/queue/),实现先进先出(`first-in first-out, FIFO`)功能 5 | 6 | ## 创建队列 7 | 8 | 引入头文件`queue`,创建时指定数据类型: 9 | 10 | ``` 11 | #include 12 | 13 | queue q; 14 | ``` 15 | 16 | ## 队列功能 17 | 18 | `queue`提供了如下常用功能实现: 19 | 20 | * empty():判断队列是否问空,为空返回`true`,不为空返回`false` 21 | * size():返回队列长度 22 | * front():返回队头(第一个出队)数据 23 | * back():返回队尾(第一个入队)数据 24 | * push(value_type&& __x):添加数据到队尾 25 | * pop():移除队头数据。注意,返回值为空 26 | 27 | `c++11`提供了两个新特性: 28 | 29 | * [swap](http://www.cplusplus.com/reference/queue/queue/swap/):交换两个队列的值 30 | * [emplace](http://www.cplusplus.com/reference/queue/queue/emplace/):添加数据到队尾。这个新元素是就地(`in place`)构造的,它传递参数作为其构造函数的参数,可替换push操作 31 | 32 | *参考:[C++11中emplace的使用](https://blog.csdn.net/u013700358/article/details/52623985):emplace能通过参数构造对象,不需要拷贝或者移动内存,相比push能更好地避免内存的拷贝与移动,使容器插入元素的性能得到进一步提升* 33 | 34 | ## 实现 35 | 36 | ``` 37 | #include 38 | #include 39 | using namespace std; 40 | 41 | int main() { 42 | queue q; 43 | // 队列大小 44 | cout << q.size() << endl; 45 | // 队列是否为空 46 | cout << q.empty() << endl; 47 | 48 | q.push(3); 49 | q.push(4); 50 | q.emplace(5); 51 | 52 | // 队头元素 53 | cout << q.front() << endl; 54 | // 队尾元素 55 | cout << q.back() << endl; 56 | 57 | queue s; 58 | s.push(3232); 59 | 60 | q.swap(s); 61 | cout << q.size() << endl; 62 | cout << q.empty() << endl; 63 | } 64 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/basic-concepts/临时对象.md: -------------------------------------------------------------------------------- 1 | 2 | # 临时对象 3 | 4 | 参考: 5 | 6 | [Temporary Objects](https://docs.microsoft.com/en-us/cpp/cpp/temporary-objects?view=vs-2019) 7 | 8 | [c++ 临时变量问题?](https://www.zhihu.com/question/41137408) 9 | 10 | 临时对象的创建有如下原因: 11 | 12 | * 要初始化一个`const`引用,但是其初始化器的类型不同于引用的基础类型 13 | * 存储返回用户定义类型的函数返回值 14 | 15 | * 只有当程序不会将返回值复制到对象时,才会创建这些临时文件 16 | 17 | // 函数声明,使用自定义类型作为返回值 18 | UDT Func1(); 19 | ... 20 | // 调用该函数,不接受返回值。此时编译器创建一个临时对象来保存返回值 21 | Func1(); 22 | 23 | * 创建临时变量的更常见情况是在表达式的计算过程中,必须调用重载的运算符函数。这些重载的运算符函数返回一个用户定义的类型,该类型通常不会复制到另一个对象 24 | 25 | class Complex { 26 | 27 | public: 28 | Complex(int num) : num(num) {} 29 | 30 | Complex operator+(Complex &other) { 31 | return Complex{this->getNum() + other.getNum()}; 32 | } 33 | 34 | int getNum() { 35 | return num; 36 | } 37 | 38 | private: 39 | int num; 40 | }; 41 | 42 | int main() { 43 | Complex complex1(1), complex2(2), complex3(3); 44 | // complex1和complex2的加法结果被存储在一个临时对象tmp中 45 | // tmp再继续和complex3进行加法计算,将结果复制到result 46 | Complex result = complex1 + complex2 + complex3; 47 | 48 | cout << result.getNum() << endl; 49 | } 50 | 51 | * 将强制转换的结果存储到用户定义的类型。当给定类型的对象显式转换为用户定义的类型时,该新对象将被构造为临时对象 -------------------------------------------------------------------------------- /docs/pytorch/[AdaptiveMaxPool][AdaptiveAvgPool]自适应池化层操作.md: -------------------------------------------------------------------------------- 1 | 2 | # [AdaptiveMaxPool][AdaptiveAvgPool]自适应池化层操作 3 | 4 | 空间金字塔池化操作解放了固定输入的限制,保证了输出固定大小,在`PyTorch`中使用`AdaptiveMaxPool`和`AdaptiveAvgPool`实现 5 | 6 | ## AdaptiveMaxPool 7 | 8 | 包含了一维/二维/三维实现 9 | 10 | * [AdaptiveMaxPool1d](https://pytorch.org/docs/stable/nn.html#adaptivemaxpool1d) 11 | * [AdaptiveMaxPool2d](https://pytorch.org/docs/stable/nn.html#adaptivemaxpool2d) 12 | * [AdaptiveMaxPool3d](https://pytorch.org/docs/stable/nn.html#adaptivemaxpool3d) 13 | 14 | ### 一维示例 15 | 16 | ``` 17 | >>> import torch 18 | >>> import torch.nn as nn 19 | >>> 20 | >>> input = torch.randn(1, 1, 8) 21 | >>> input 22 | tensor([[[ 1.6188, -0.0436, 1.8603, 0.9043, 0.1372, 0.6567, -0.5700, 23 | 0.8480]]]) 24 | >>> 25 | >>> m = nn.AdaptiveMaxPool1d(5) 26 | >>> m 27 | AdaptiveMaxPool1d(output_size=5) 28 | >>> 29 | >>> output = m(input) 30 | >>> output 31 | tensor([[[1.6188, 1.8603, 0.9043, 0.6567, 0.8480]]]) 32 | >>> output.size() 33 | torch.Size([1, 1, 5]) 34 | ``` 35 | 36 | 在定义`AdaptiveMaxPool1d`对象时确定固定输出大小即可 37 | 38 | ### 二维示例 39 | 40 | 二维操作和一维操作一样,设置输出大小即可 41 | 42 | ``` 43 | >>> input = torch.randn((1, 1, 8, 8)) 44 | >>> m = nn.AdaptiveMaxPool2d((3, 3)) 45 | >>> output = m(input) 46 | >>> output.size() 47 | torch.Size([1, 1, 3, 3]) 48 | >>> output 49 | tensor([[[[1.4030, 2.3893, 1.1493], 50 | [0.8610, 1.9903, 0.9673], 51 | [1.1998, 1.9903, 1.9642]]]]) 52 | ``` 53 | 54 | ## AdaptiveAvgPool 55 | 56 | 参考:[自适应平均池化层](https://blog.zhujian.life/posts/ba337bfa.html) -------------------------------------------------------------------------------- /docs/cplusplus/get-started/basic-concepts/声明和定义.md: -------------------------------------------------------------------------------- 1 | 2 | # 声明和定义 3 | 4 | 参考: 5 | 6 | [Declarations and Definitions (C++)](https://docs.microsoft.com/en-us/cpp/cpp/declarations-and-definitions-cpp?view=vs-2019) 7 | 8 | [Declarations, Prototypes, Definitions, and Implementations](http://www.cplusplus.com/articles/yAqpX9L8/) 9 | 10 | ## 声明和定义 11 | 12 | * 声明(`declaration`)用于为程序引入名称 13 | * 定义(`definition`)是在内存中创建实体位置 14 | 15 | 声明通常可看成定义,除了以下情况: 16 | 17 | 1. 函数原型(`function prototype`):只有函数声明,没有函数体 18 | 2. 包含`extern`标识符,同时没有初始化值(对于对象和变量而言)或没有函数体(对于函数而言)。这意味着不一定在当前单元进行定义,给予其外部链接 19 | 3. 类声明中的静态数据成员:因为静态类数据成员是类中所有对象共享的离散变量,所以必须在类声明之外定义和初始化它们 20 | 4. 不包含定义的类声明,比如`class T;` 21 | 5. `typedef`表达式 22 | 23 | 声明后都需要进行定义,除了以下两种情况: 24 | 25 | 1. 函数已声明,但从未被函数调用或者被表达式引用其地址 26 | 2. 类的使用方式不需要知道其定义,但是必须声明类。比如 27 | 28 | ``` 29 | class WindowCounter; // Forward declaration; no definition 30 | 31 | class Window 32 | { 33 | // Definition of WindowCounter not required 34 | static WindowCounter windowCounter; 35 | }; 36 | 37 | int main() 38 | { 39 | } 40 | ``` 41 | 42 | ## 为什么要区分声明和定义 43 | 44 | 从源文件编译得到程序可分为两个过程:编译(`compile`)和链接(`link`) 45 | 46 | ![](./imgs/Comp-link.png) 47 | 48 | * 编译阶段:独立编译每一个`.cpp`文件。将所有`#include`文件插入到`.cpp`中,然后从头到尾进行编译,生成机器码输出为`.obj`文件 49 | * 链接阶段:组合所有`.obj`文件,生成内存寻址以及函数调用,最后输出一个可执行程序 50 | 51 | 在编译阶段编译器只需要知道函数参数类型以及返回值类型即可,不关心具体实现过程 52 | 53 | 所以声明作用于编译阶段,定义作用于链接阶段 54 | 55 | ## 使用声明和定义 56 | 57 | 在编译阶段编译器从头到尾处理`.cpp`文件,所以在使用变量、函数等名称之前必须有声明,有两种方式: 58 | 59 | 1. 前向声明(`Forward declaration`):仅包含声明,在之后进行定义(**推荐**) 60 | 2. 在使用之前同时进行声明和定义 -------------------------------------------------------------------------------- /docs/python/[collections][deque]双向队列的使用.md: -------------------------------------------------------------------------------- 1 | 2 | # [collections][deque]双向队列的使用 3 | 4 | 参考: 5 | 6 | [python3:deque和list的区别](https://blog.csdn.net/qq_34979346/article/details/83540389) 7 | 8 | [python list与deque在存储超大数组的区别](https://blog.csdn.net/qq_37887537/article/details/93722103) 9 | 10 | `list`是单向队列,而`deque`是双向队列 11 | 12 | ## list方法 13 | 14 | `deque`支持`list`常用的用法,包括 15 | 16 | ``` 17 | >>> from collections import deque 18 | >>> a = deque(range(3)) 19 | >>> a 20 | deque([0, 1, 2]) 21 | # 队尾添加 22 | >>> a.append(4) 23 | >>> a 24 | deque([0, 1, 2, 4]) 25 | # 检索下标 26 | >>> a.index(2) 27 | 2 28 | # 队尾弹出 29 | >>> a.pop() 30 | 4 31 | >>> a 32 | deque([0, 1, 2]) 33 | # 读取指定下标值 34 | >>> a[2] 35 | 2 36 | ``` 37 | 38 | ## deque方法 39 | 40 | 除此之外,`deque`还支持前向操作 41 | 42 | ``` 43 | appendleft(x) 头部添加元素 44 | extendleft(iterable) 头部添加多个元素 45 | popleft() 头部返回并删除 46 | rotate(n=1) 旋转 47 | maxlen 最大空间,如果是无边界的,返回None 48 | ``` 49 | 50 | ## maxlen 51 | 52 | `deque`支持有限长度 53 | 54 | ``` 55 | >>> a = deque(range(3), maxlen=3) 56 | >>> a 57 | deque([0, 1, 2], maxlen=3) 58 | >>> a.append(4) 59 | >>> a 60 | deque([1, 2, 4], maxlen=3) 61 | >>> a.appendleft(22) 62 | >>> a 63 | deque([22, 1, 2], maxlen=3) 64 | ``` 65 | 66 | 当队列到达最大长度后,再次添加元素,会进行替换操作 67 | 68 | ## rotate 69 | 70 | 将序列队头/队尾元素进行移动 71 | 72 | ``` 73 | >>> a = deque(range(8)) 74 | >>> a 75 | deque([0, 1, 2, 3, 4, 5, 6, 7]) 76 | # 将队尾元素向队头移动 77 | >>> a.rotate(2) 78 | >>> a 79 | deque([6, 7, 0, 1, 2, 3, 4, 5]) 80 | # 将队头元素向队尾移动 81 | >>> a.rotate(-2) 82 | >>> a 83 | deque([0, 1, 2, 3, 4, 5, 6, 7]) 84 | ``` -------------------------------------------------------------------------------- /docs/matplotlib/矩阵显示.md: -------------------------------------------------------------------------------- 1 | 2 | # 矩阵显示 3 | 4 | 通过图形显示矩阵信息,依据图像颜色反映矩阵各个位置上的数值大小 5 | 6 | ## 函数定义 7 | 8 | 参考:[matplotlib.pyplot.matshow](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.matshow.html?highlight=matshow#matplotlib.pyplot.matshow) 9 | 10 | ``` 11 | matplotlib.pyplot.matshow(A, fignum=None, **kwargs) 12 | ``` 13 | 14 | * 参数`A`表示将要图形化的数组 15 | * 参数`fignum`指定图形窗口 16 | * 默认为`None`,表示新建一个窗口进行绘制 17 | * 如果为非零整数,那么将绘制到该窗口(如果不存在则新建) 18 | * 如果为`0`,那么使用当前窗口(如果不存在则新建) 19 | 20 | ## 示例 21 | 22 | 参考:[Matshow](https://matplotlib.org/gallery/images_contours_and_fields/matshow.html#sphx-glr-gallery-images-contours-and-fields-matshow-py) 23 | 24 | ``` 25 | import matplotlib.pyplot as plt 26 | import numpy as np 27 | import warnings 28 | 29 | warnings.filterwarnings("ignore") 30 | 31 | def samplemat(dims): 32 | """Make a matrix with all zeros and increasing elements on the diagonal""" 33 | aa = np.zeros(dims) 34 | for i in range(min(dims)): 35 | aa[i, i] = i 36 | return aa 37 | 38 | 39 | ma = samplemat((10, 10)) 40 | print(ma) 41 | # Display matrix 42 | plt.matshow(ma) 43 | plt.show() 44 | ``` 45 | 46 | 输出矩阵信息: 47 | 48 | ``` 49 | [[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] 50 | [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] 51 | [0. 0. 2. 0. 0. 0. 0. 0. 0. 0.] 52 | [0. 0. 0. 3. 0. 0. 0. 0. 0. 0.] 53 | [0. 0. 0. 0. 4. 0. 0. 0. 0. 0.] 54 | [0. 0. 0. 0. 0. 5. 0. 0. 0. 0.] 55 | [0. 0. 0. 0. 0. 0. 6. 0. 0. 0.] 56 | [0. 0. 0. 0. 0. 0. 0. 7. 0. 0.] 57 | [0. 0. 0. 0. 0. 0. 0. 0. 8. 0.] 58 | [0. 0. 0. 0. 0. 0. 0. 0. 0. 9.]] 59 | ``` 60 | 61 | ![](./imgs/mat_image.png) -------------------------------------------------------------------------------- /docs/cplusplus/学习C++之路.md: -------------------------------------------------------------------------------- 1 | 2 | # 学习c++之路 3 | 4 | 从大学开始,陆陆续续的学习和使用`C++`。最开始是从`C`入门,然后自学过`C++`,当时看的是书籍:`《C程序设计》`和`《C++ Primer》`等;后来做项目的时候需要`C++`编程,看得更多的是博客,专注于要解决的困难点;最近实践深度学习需要使用`C++`,所以在网上找一些教程和参考网站 5 | 6 | *以下涉及的网站同样提供了`C`语言规范和教程* 7 | 8 | ## 语法 9 | 10 | 网站[cplusplus](http://www.cplusplus.com/)和[cppreference](https://en.cppreference.com/w/)提供了全面的`C++`语法规范 11 | 12 | ## 教程 13 | 14 | 推荐以下`3`个在线教程 15 | 16 | 1. [Microsoft Docs](https://docs.microsoft.com/en-us/cpp/cpp/c-cpp-language-and-standard-libraries?view=vs-2019) 17 | 2. [cppreference - C++ language](https://en.cppreference.com/w/cpp/language) 18 | 3. [cplusplus - C++ Language](http://www.cplusplus.com/doc/tutorial/) 19 | 20 | 其中微软提供的教程排版比较好,易于阅读和理解,不过`3`个教程都有各自的角度,综合起来看比较全面 21 | 22 | ## 库参考 23 | 24 | `cplusplus`提供了标准`C++`库参考:[Standard C++ Library reference](http://www.cplusplus.com/reference/) 25 | 26 | ## 关键字 27 | 28 | [Microsoft - Keywords (C++)](https://docs.microsoft.com/en-us/cpp/cpp/keywords-cpp?view=vs-2019)提供了完整的关键字列表 29 | 30 | 关键字是具有特殊含义的预定义保留标识符,它们不能用作程序中的自定义标识符。以下标识符是微软`C++`保留的关键字,下划线开头的名字和附加(`C++/CLI`)的名字是微软扩展 31 | 32 | ## 操作符 33 | 34 | [C++ Built-in Operators, Precedence and Associativity](https://docs.microsoft.com/en-us/cpp/cpp/cpp-built-in-operators-precedence-and-associativity?view=vs-2019)提供了完整的操作符列表 35 | 36 | `C++`语言包括所有的`C`运算符,并添加了几个新的运算符。运算符指定一个或多个操作数执行计算 37 | 38 | ## 语言规范 39 | 40 | `C++`规范已经经历了多个版本的迭代(`98/03/11/14/17/20`),其实现方式从`C`语言风格转向脚本语言风格,越来越智能和现代化。当前专注于`C++11`版本的学习和使用,关于`C++11`舍弃的命令和使用方式,参考[Which C++ idioms are deprecated in C++11?](https://stackoverflow.com/questions/9299101/which-c-idioms-are-deprecated-in-c11) -------------------------------------------------------------------------------- /docs/opencv/code/[cartToPolar]二维向量的大小和角度.md: -------------------------------------------------------------------------------- 1 | 2 | # [cartToPolar]二维向量的大小和角度 3 | 4 | `OpenCV`提供函数[cv::cartToPolar](https://docs.opencv.org/4.0.1/d2/de8/group__core__array.html#gac5f92f48ec32cacf5275969c33ee837d0)用于计算`2`维向量的大小和角度 5 | 6 | ## 函数解析 7 | 8 | ``` 9 | CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, 10 | OutputArray magnitude, OutputArray angle, 11 | bool angleInDegrees = false); 12 | ``` 13 | 14 | * `x`:`x`轴坐标数组;这必须是单精度或双精度浮点数组 15 | * `y`:`y`轴坐标数组,其大小和类型必须与`x`相同 16 | * `magnitude`:输出与`x`相同大小和类型的大小数组 17 | * `angle`:与x具有相同大小和类型的角度的输出数组;角度以弧度(从`0`到`2*Pi`)或度(`0`到`360`度)度量 18 | * `angleInDegrees`:标志,指示结果是以弧度(默认情况下是以弧度)还是以度度量 19 | 20 | **注意:输入数组必须具有相同精度** 21 | 22 | 输入`x/y`均为`2`维向量,其实现如下: 23 | 24 | ![](./imgs/cartToPolar.png) 25 | 26 | 源码地址:`/path/to/modules/core/test/test_arithm.cpp` 27 | 28 | ## 示例 29 | 30 | ``` 31 | #include 32 | #include 33 | 34 | using namespace std; 35 | using namespace cv; 36 | 37 | int main() { 38 | Mat xx = Mat(2, 3, CV_32FC1, Scalar(6, 0, 0)); 39 | Mat yy = Mat(2, 3, CV_32FC1, Scalar(6, 0, 0)); 40 | 41 | cout << xx << endl; 42 | cout << yy << endl; 43 | 44 | Mat mag, angle; 45 | // 输出角度 等边直角三角形,小角=45度 46 | cartToPolar(xx, yy, mag, angle, true); 47 | cout << mag << endl; 48 | cout << angle << endl; 49 | } 50 | // out 51 | [6, 6, 6; 52 | 6, 6, 6] 53 | [6, 6, 6; 54 | 6, 6, 6] 55 | [8.485281, 8.485281, 8.485281; 56 | 8.485281, 8.485281, 8.485281] 57 | [44.990456, 44.990456, 44.990456; 58 | 44.990456, 44.990456, 44.990456] 59 | ``` 60 | 61 | -------------------------------------------------------------------------------- /samples/plantuml/class.puml: -------------------------------------------------------------------------------- 1 | @startuml 2 | skinparam classAttributeIconSize 0 3 | 4 | ' class Dummy { 5 | ' String data 6 | ' void methods() 7 | ' } 8 | 9 | ' class Flight { 10 | ' +flightNumber: Integer 11 | ' -departureTime: Data 12 | 13 | ' # hahah(): void 14 | ' } 15 | 16 | ' ' 泛化 空心三角形+实线 17 | ' Class01 <|-- Class02 18 | ' ' 实现 空心三角形+虚线 19 | ' Class03 <|.. Class04 20 | ' ' 组合 实心菱形+实线 21 | ' Class05 *-- Class06 22 | ' ' 聚合 空心菱形+实线 23 | ' Class07 o-- Class08 24 | ' ' 关联 箭头+实线 25 | ' Class09 <-- Class10 26 | ' ' 依赖 箭头+虚线 27 | ' Class11 <.. Class12 28 | 29 | ' namespaceA.Class01 <|-- namespaceB.Class02 30 | 31 | ' class Dummy { 32 | ' {static} String id 33 | ' {abstract} void methods() 34 | ' {static} virtual void hi() override 35 | ' } 36 | 37 | ' Class01 "1" *-- "many" Class02 : contains 38 | 39 | ' Class03 o-- Class04 : aggregation 40 | 41 | ' Class05 --> "1" Class06 42 | 43 | ' class Dummy01 { 44 | ' String data 45 | ' void methods() 46 | ' } 47 | 48 | ' class Dummy02 { 49 | ' data: String 50 | ' methods(): void 51 | ' } 52 | 53 | ' class Dummy03 { 54 | ' String data 55 | ' void methods() override 56 | ' } 57 | 58 | class default { 59 | + int field01 60 | - float field02 61 | 62 | + void method01() 63 | - float method02() 64 | } 65 | 66 | class custom { 67 | + int field01 68 | ' 两格点号表示虚线 69 | .. 70 | - float field02 71 | ' 两格下划线表示实线 72 | __ 73 | ' 可以在线段中间添加文字 74 | .. 构造器 .. 75 | custom() 76 | __ override __ 77 | + void method01() 78 | - float method02() 79 | } 80 | 81 | @enduml -------------------------------------------------------------------------------- /docs/python/[slice]扩展切片操作.md: -------------------------------------------------------------------------------- 1 | 2 | # [slice]扩展切片操作 3 | 4 | 参考: 5 | 6 | [python 切片(slice)](https://blog.csdn.net/u012005313/article/details/48159477) 7 | 8 | [[Python]切片完全指南(语法篇)](https://zhuanlan.zhihu.com/p/79541418) 9 | 10 | 切片操作是`Python`实现中最常用的功能之一,重新小结几种不同的切片操作 11 | 12 | ## 基本切片 13 | 14 | 基本切片语法如下: 15 | 16 | ``` 17 | list[start:stop] 18 | ``` 19 | 20 | 常用以下几种实现方式: 21 | 22 | * 缺省`start` 23 | * 语法:`list[:stop]` 24 | * 作用:取前`stop`个元素 25 | * 缺省`stop` 26 | * 语法:`list[start:]` 27 | * 作用:取`start`开始的所有元素 28 | * 同时缺省`start`和`stop` 29 | * 语法:`list[:]` 30 | * 作用:取整个列表的所有元素 31 | 32 | 通常情况下,下标`start`小于`stop`,如果出现`start`大于等于`stop`,那么返回空列表 33 | 34 | ## 扩展切片 35 | 36 | 扩展切片语法如下: 37 | 38 | ``` 39 | a[start:stop:step] 40 | ``` 41 | 42 | 增加了`step`参数,表示每隔多少个位置取一个元素,如果为空,默认为`1`,即取`[start, stop)`区间内所有的元素 43 | 44 | ## 另一种扩展切片:`...` 45 | 46 | 阅读源码过程中还发现了一个切片符号`...`,其实现如下: 47 | 48 | ``` 49 | >>> import numpy as np 50 | >>> a = np.random.randn(2, 3, 4) 51 | >>> a 52 | array([[[-1.08746212, -1.98456057, 0.31604132, 0.77781412], 53 | [ 0.09395506, 0.89095723, 0.33838468, 1.7124017 ], 54 | [ 0.91403118, -0.27132943, 2.10017449, -0.05663262]], 55 | 56 | [[ 0.38814181, 0.30023491, -0.0099694 , 0.53520844], 57 | [-2.01299842, -0.17652996, 0.261344 , 0.19216268], 58 | [ 0.15511314, -0.48549088, -0.30289901, 1.46871216]]]) 59 | >>> a[:, :, 3] 60 | array([[ 0.77781412, 1.7124017 , -0.05663262], 61 | [ 0.53520844, 0.19216268, 1.46871216]]) 62 | >>> a[..., 3] 63 | array([[ 0.77781412, 1.7124017 , -0.05663262], 64 | [ 0.53520844, 0.19216268, 1.46871216]]) 65 | ``` -------------------------------------------------------------------------------- /docs/pytorch/preprocessing/[Ten Crops]多样本检测.md: -------------------------------------------------------------------------------- 1 | 2 | # [Ten Crops]多样本检测 3 | 4 | 参考: 5 | 6 | [PyTorch数据增强,TenCrop的用法](https://www.jianshu.com/p/aba1142c0453) 7 | 8 | [How to properly do 10-crop testing on Imagenet?](https://discuss.pytorch.org/t/how-to-properly-do-10-crop-testing-on-imagenet/11341) 9 | 10 | [[Pytorch]Pytorch中图像的基本操作(TenCrop)](https://blog.csdn.net/weixin_44538273/article/details/88406404) 11 | 12 | 在检测阶段,采集一张样本中的多个裁剪,平均其预测结果,有助于更高的检测精度 13 | 14 | 完整实现参考`py/data_preprocessing/ten-crops.py` 15 | 16 | ## FiveCrops/TenCrops 17 | 18 | 常用的有`FiveCrops`和`TenCrops` 19 | 20 | * `FiveCrops`:裁剪图像中心和`4`个角 21 | * `TenCrops`:裁剪图像中心和`4`个角,以及翻转图像后的`5`个裁剪 22 | 23 | ## PyTorch实现 24 | 25 | `PyTorch`提供了函数`torchvision.transforms.FiveCrop`和`torchvision.transforms.TenCrop`。裁剪`5`张图像并显示 26 | 27 | ``` 28 | def plot(src, dsts): 29 | f = plt.figure() 30 | 31 | cols = 3 32 | rows = 2 33 | 34 | plt.subplot(rows, cols, 1) 35 | plt.title('src') 36 | plt.imshow(src) 37 | 38 | for i in range(rows): 39 | for j in range(cols): 40 | if (i * cols + j) == 5: 41 | break 42 | 43 | plt.subplot(rows, cols, i * cols + j + 2) 44 | plt.imshow(dsts[i * cols + j]) 45 | 46 | plt.show() 47 | 48 | 49 | def draw_five_crop(): 50 | src = Image.open('./data/butterfly.jpg') 51 | 52 | transform = transforms.Compose([ 53 | transforms.Resize(256), 54 | transforms.FiveCrop(224), # this is a list of PIL Images 55 | ]) 56 | 57 | dsts = transform(src) 58 | print(len(dsts)) 59 | plot(src, dsts) 60 | ``` 61 | 62 | ![](./imgs/fivecrop.png) -------------------------------------------------------------------------------- /docs/opencv/draw/[text]绘制文本.md: -------------------------------------------------------------------------------- 1 | 2 | # [text]绘制文本 3 | 4 | 参考: 5 | 6 | [opencv 绘图 cvLine cvRectangle cvCircle cvEllipse cvEllipseBox cvFillPoly cvConvexPoly cvPolyLine](https://blog.csdn.net/u012005313/article/details/46802565) 7 | 8 | [putText()](https://docs.opencv.org/4.1.0/d6/d6e/group__imgproc__draw.html#ga5126f47f883d730f633d74f07456c576) 9 | 10 | ## 定义 11 | 12 | ``` 13 | def putText(img, text, org, fontFace, fontScale, color, thickness=None, lineType=None, bottomLeftOrigin=None): # 14 | ``` 15 | 16 | * `img`:输入图像 17 | * `text`:绘制文本 18 | * `org`:左下角坐标点 19 | * `fontFace`:字体,参考[HersheyFonts](https://docs.opencv.org/4.1.0/d6/d6e/group__imgproc__draw.html#ga0f9314ea6e35f99bb23f29567fc16e11) 20 | * `fontscale`:字体比例因子(*最后字体大小需要该因子乘以特定字体的基本大小*) 21 | * `color`:颜色 22 | * `thicknes`:粗细程度 23 | * `bottomLeftOrigin`:如果设置为`True`,那么`org`表示左上角坐标点 24 | 25 | ## 示例 26 | 27 | ``` 28 | import cv2 29 | import numpy as np 30 | 31 | if __name__ == '__main__': 32 | # 创建空白3通道图像 33 | img = np.zeros((500, 500, 3)) 34 | 35 | cv2.putText(img, 'OpenCV', (100, 100), 1, cv2.FONT_HERSHEY_PLAIN, (0, 0, 255), thickness=1) 36 | cv2.putText(img, 'OpenCV', (200, 150), 2, cv2.FONT_HERSHEY_PLAIN, (0, 255, 0), thickness=2) 37 | 38 | # 配合边框使用 39 | cv2.rectangle(img, (250, 200), (450, 400), (255, 0, 0), thickness=1) 40 | cv2.putText(img, '0.333', (250, 400), 1, cv2.FONT_HERSHEY_PLAIN, (255, 0, 0), thickness=1) 41 | # 使用左上角坐标点 42 | cv2.putText(img, '0.333', (250, 200), 1, cv2.FONT_HERSHEY_PLAIN, (255, 0, 0), thickness=1, bottomLeftOrigin=True) 43 | 44 | cv2.imshow('img', img) 45 | cv2.waitKey(0) 46 | ``` 47 | 48 | ![](./imgs/text.png) -------------------------------------------------------------------------------- /docs/python/[xmltodict]读取XML文件.md: -------------------------------------------------------------------------------- 1 | 2 | # [xmltodict]读取XML文件 3 | 4 | 参考:[xmltodict 0.12.0 ](https://pypi.org/project/xmltodict/) 5 | 6 | 之前学习过使用包[xml.etree.cElementTree](./[python]读取XML文件.md)来读取`XML`文件,今天发现一个新的包`xmltodict`,将`XML`文件转换成字典形式进行读取 7 | 8 | ## 安装 9 | 10 | ``` 11 | $ conda install xmltodict 12 | ``` 13 | 14 | ## 关键函数 15 | 16 | 解析给定的`XML`输入并将其转换为字典 17 | 18 | ``` 19 | def parse(xml_input, encoding=None, expat=expat, process_namespaces=False, 20 | namespace_separator=':', disable_entities=True, **kwargs): 21 | ``` 22 | 23 | 参数`xml_input`为字符串(`XML`文件,不是文件名)或者文件对象 24 | 25 | 输入字典,生成`XML`文件字符串 26 | 27 | ``` 28 | def unparse(input_dict, output=None, encoding='utf-8', full_document=True, 29 | short_empty_elements=False, 30 | **kwargs): 31 | ``` 32 | 33 | ## 示例 34 | 35 | 输入一个`xml`格式字符串,使用函数`parse`解析成字典;使用函数`unparse`将字典解析成`xml`格式字符串 36 | 37 | ``` 38 | import xmltodict 39 | 40 | xxxml_str = "OK HAHAHA" 41 | 42 | xml_parse_dict = xmltodict.parse(xxxml_str) 43 | print(xml_parse_dict) 44 | print(xml_parse_dict['source']['database']) 45 | 46 | dict_unparse_xml = xmltodict.unparse(xml_parse_dict) 47 | print(dict_unparse_xml) 48 | # 输出 49 | OrderedDict([('source', OrderedDict([('database', 'OK HAHAHA')]))]) 50 | OK HAHAHA 51 | 52 | OK HAHAHA 53 | ``` 54 | 55 | 还可以使用`parse`函数直接解析文件对象 56 | 57 | ``` 58 | import xmltodict 59 | 60 | with open('./data/image-localization-dataset/training_images/eggplant_36.xml', 'rb') as f: 61 | xml_parse_dict = xmltodict.parse(f) 62 | print(xml_parse_dict) 63 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/operator-overload/赋值运算符重载.md: -------------------------------------------------------------------------------- 1 | 2 | # 赋值运算符重载 3 | 4 | 参考:[Assignment](https://docs.microsoft.com/en-us/cpp/cpp/assignment?view=vs-2019) 5 | 6 | ## 规范 7 | 8 | 赋值运算符(`operator=`)是二元运算符,除了遵循二元运算符的规范外,还有以下限制: 9 | 10 | 1. 必须是非静态成员函数 11 | 2. 无法被派生类继承 12 | 3. 如果不存在,默认的`opeator=`函数会被编译器生成 13 | 14 | ## 赋值 vs. 复制 15 | 16 | 赋值(`copy assignment`)运算和复制(`copy`)操作有区别,后者在新对象构造期间调用 17 | 18 | ``` 19 | // Copy constructor is called--not overloaded copy assignment operator! 20 | Point pt3 = pt1; 21 | 22 | // The previous initialization is similar to the following: 23 | Point pt4(pt1); // Copy constructor call. 24 | ``` 25 | 26 | **最佳实践:定义赋值运算的同时定义复制构造器和析构器** 27 | 28 | ## 示例 29 | 30 | 定义类`Point`,重载赋值运算符,重载复制构造器和析构器 31 | 32 | ``` 33 | class Point { 34 | public: 35 | Point(int x, int y) : _x{x}, _y{y} {} 36 | 37 | Point() : _x(0), _y(0) {} 38 | 39 | Point(const Point &point) : _x{point._x}, _y{point._y} {} 40 | 41 | // Right side of copy assignment is the argument. 42 | Point &operator=(const Point &); 43 | 44 | inline void print() { 45 | cout << "x = " << _x << " y = " << _y << endl; 46 | } 47 | 48 | private: 49 | int _x, _y; 50 | }; 51 | 52 | // Define copy assignment operator. 53 | Point &Point::operator=(const Point &otherPoint) { 54 | _x = otherPoint._x; 55 | _y = otherPoint._y; 56 | 57 | // Assignment operator returns left side of assignment. 58 | return *this; 59 | } 60 | 61 | int main() { 62 | Point pt1{2, 3}, pt2{3, 5}; 63 | 64 | // 使用复制构造器 65 | Point pt3(pt2); 66 | pt3.print(); 67 | 68 | // 使用赋值运算符 69 | pt1 = pt2; 70 | pt1.print(); 71 | } 72 | ``` -------------------------------------------------------------------------------- /docs/algorithm/machine-learning.md: -------------------------------------------------------------------------------- 1 | 2 | # 机器学习 3 | 4 | * 数据降维 5 | * [主成分分析](https://blog.zhujian.life/posts/49729a62.html) 6 | * 数据分类 7 | * [决策边界](https://blog.zhujian.life/posts/c37e79f3.html) 8 | * [线性回归](https://blog.zhujian.life/posts/ec419bd2.html) 9 | * [从numpy到pytorch实现线性回归](https://blog.zhujian.life/posts/ca2079f0.html) 10 | * [逻辑回归](https://blog.zhujian.life/posts/9f2d3388.html) 11 | * [从numpy到pytorch实现逻辑回归](https://blog.zhujian.life/posts/730913b9.html) 12 | * [softmax回归](https://blog.zhujian.life/posts/2626bec3.html) 13 | * [从numpy到pytorch实现softmax回归](https://blog.zhujian.life/posts/1c195604.html) 14 | * [使用softmax回归进行mnist分类](https://blog.zhujian.life/posts/dd673751.html) 15 | * [[PyTorch][Numpy][Softmax]计算概率](https://blog.zhujian.life/posts/f6b1346b.html) 16 | * 分类器 17 | * [KNN分类器](https://blog.zhujian.life/posts/1ee29eaf.html) 18 | * [线性SVM分类器](https://blog.zhujian.life/posts/ebe205e.html) 19 | * [逻辑回归分类器](https://blog.zhujian.life/posts/96ce93d9.html) 20 | * [softmax分类器](https://blog.zhujian.life/posts/e043b7fb.html) 21 | * [神经网络分类器](https://blog.zhujian.life/posts/81a57a7.html) 22 | * [线性SVM分类器-PyTorch实现](https://blog.zhujian.life/posts/4d25cbab.html) 23 | * 目标分割 24 | * [基于图的图像分割-引言](https://blog.zhujian.life/posts/2e594804.html) 25 | * [基于图的图像分割-工程源码](https://blog.zhujian.life/posts/a4b1a6d9.html) 26 | * [基于图的图像分割-OpenCV源码](https://blog.zhujian.life/posts/18052054.html) 27 | * 目标检测/识别 28 | * [[译]作用于目标识别的选择性搜索](https://blog.zhujian.life/posts/1cb6a408.html) 29 | * [选择性搜索算法小结](https://blog.zhujian.life/posts/58ff6dae.html) -------------------------------------------------------------------------------- /docs/cplusplus/advanced/reference/引用概述.md: -------------------------------------------------------------------------------- 1 | 2 | # 引用概述 3 | 4 | 参考:[References (C++)](https://docs.microsoft.com/en-us/cpp/cpp/references-cpp?view=vs-2019) 5 | 6 | ## 引用 vs. 指针 7 | 8 | 引用(`reference`)相对于指针(`pointer`)而言,有如下异同: 9 | 10 | 1. 相似:存储其他对象的内存地址 11 | 2. 不同:初始化后的引用不能引用其他对象或设置为空 12 | 13 | ## lvalue和rvalue 14 | 15 | 引用可分为左值引用(`lvalue`)和右值引用(`rvalue`): 16 | 17 | 1. `lvalue`引用命名变量,用符号`&`表示 18 | 2. `rvalue`引用临时对象,用符号`&&`表示 19 | 20 | ## 语法 21 | 22 | 通用语法如下: 23 | 24 | ``` 25 | [storage-class-specifiers] [cv-qualifiers] type-specifiers declarator [= expression]; 26 | ``` 27 | 28 | 简化语法如下: 29 | 30 | ``` 31 | [storage-class-specifiers] [cv-qualifiers] type-specifiers [& or &&] [cv-qualifiers] identifier [= expression]; 32 | ``` 33 | 34 | 引用声明顺序如下: 35 | 36 | 1. 说明符 37 | * 可选的存储类说明符 38 | * 可选的`cv`限定符 39 | * 类说明符:类名 40 | 2. 声明符 41 | * `&`或者`&&`运算符 42 | * 可选的`cv`限定符 43 | * 标识符 44 | 3. 可选的初始化器 45 | 46 | 引用类型的声明必须包含初始化器,以下情况除外: 47 | 48 | * 显式`extern`声明 49 | * 类成员的声明 50 | * 类内声明 51 | * 函数的参数或函数的返回类型声明 52 | 53 | ## 示例 54 | 55 | 引用对象拥有对象的地址,但其操作和对象一样,可看成是对象的别名 56 | 57 | ``` 58 | struct S { 59 | short i; 60 | }; 61 | 62 | int main() { 63 | S s; // Declare the object. 64 | S &SRef = s; // Declare the reference. 65 | 66 | cout << (void *) &s << endl; 67 | cout << (void *) &SRef << endl; 68 | 69 | // 已初始化的引用不能引用其他对象 70 | S ss; 71 | SRef = ss; 72 | 73 | cout << (void *) &s << endl; 74 | cout << (void *) &ss << endl; 75 | cout << (void *) &SRef << endl; 76 | } 77 | ``` 78 | 79 | 结果: 80 | 81 | ``` 82 | 0x7ffc568084d0 83 | 0x7ffc568084d0 84 | 0x7ffc568084d0 // s 85 | 0x7ffc568084e0 // ss 86 | 0x7ffc568084d0 // SRef 87 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/type-cast-deduce/标准转换.md: -------------------------------------------------------------------------------- 1 | 2 | # 标准转换 3 | 4 | 参考:[Standard conversions](https://docs.microsoft.com/en-us/cpp/cpp/standard-conversions?view=vs-2019) 5 | 6 | `C++`定义了基本类型之间的转换,也定义了指针、引用和成员指针派生类型之间的转换,这些统称为标准转换(`standard conversion`) 7 | 8 | 共分为`8`个部分: 9 | 10 | 1. 整型提升(`integral promotions`) 11 | 2. 整型转换(`integral conversions`) 12 | 3. 浮点型转换(`floating conversions`) 13 | 4. 浮点和整数转换(`floating and integral conversions`) 14 | 5. 数值型间的转换(`arithmetic conversions`) 15 | 6. 指针转换(`pointer conversions`) 16 | 7. 引用转换(`reference conversions`) 17 | 8. 成员指针的转换(`pointer-to-member conversions`) 18 | 19 | ## 隐式类型转换 20 | 21 | 当表达式包含不同内置类型的操作数且不存在显式强制转换时,编译器执行隐式转换 22 | 23 | ### 加宽转换 24 | 25 | 在加宽转换(`Widening conversions`,也称为提升,`promotion`)中,较小变量中的值被分配给较大变量,而不会丢失数据。因为扩大转换总是安全的,编译器会静默地执行它们,不会发出警告。以下转换是加宽转换。 26 | 27 | ![](./imgs/promotion.png) 28 | 29 | ### 变窄转换 30 | 31 | 如果将精度较大变量分配给精度较小的变量,有可能发生数据损失情况,编译器会因为这个情况报出一个警告(`warn`) 32 | 33 | * 对于明确知道不会发生数据损失的情况,可以执行强制类型转换以消除警告 34 | * 如果不明确是否会发生数据损失,可以增加一些运行时检查 35 | 36 | ### 有符号-无符号转换 37 | 38 | `signed-unsigned`转换不会改变变量位数,但是因为其位模式发生了变化导致数据的大小发生了变化。编译器不警告有符号和无符号整数类型之间的隐式转换,但是建议完全避免有符号到无符号的转换 39 | 40 | 如果不能避免它们,那么在代码中添加一个运行时检查,以检测正在转换的值是否大于或等于零,并且小于或等于已签名类型的最大值。此范围内的值将从有符号转换为无符号,或从无符号转换为有符号,而不需要重新解释 41 | 42 | ### 指针转换 43 | 44 | `C`风格的数组可以隐式看成指向数组第一个元素的指针。虽然进行数据操作很简单,但也容易出错,不推荐使用 45 | 46 | ``` 47 | # 示例 48 | $ char* s = "Help" + 3; 49 | ``` 50 | 51 | ## 显式类型转换 52 | 53 | 相比较于隐式类型转换,使用显式类型转换更能明确转换目标,有两种方式: 54 | 55 | 1. `C`风格转换 56 | 2. `C++`风格转换 57 | 58 | ### C风格转换 59 | 60 | 最常用的是使用`C`风格的转换算子,即直接在变量前添加类型,如下所示: 61 | 62 | ``` 63 | (int) x; // old-style cast, old-style syntax 64 | int(x); // old-style cast, functional syntax 65 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/keywords/nullptr.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11]nullptr 3 | 4 | 参考:[nullptr](https://docs.microsoft.com/en-us/cpp/cpp/nullptr?view=vs-2019) 5 | 6 | `std::nullptr`是类型`std::nullptr_t`的空指针常量,可以转换成任何原始指针类型 7 | 8 | 之前通常使用`NULL`或者`0`来表示空指针,这有可能造成编译错误。从`c++11`开始推荐使用常量`std::nullptr` 9 | 10 | ## 示例一 11 | 12 | 参考:[std::nullptr_t](https://en.cppreference.com/w/cpp/types/nullptr_t) 13 | 14 | ``` 15 | void f(int *pi) { 16 | std::cout << "Pointer to integer overload\n"; 17 | } 18 | 19 | void f(double *pd) { 20 | std::cout << "Pointer to double overload\n"; 21 | } 22 | 23 | void f(std::nullptr_t nullp) { 24 | std::cout << "null pointer overload\n"; 25 | } 26 | 27 | int main() { 28 | int *pi; 29 | double *pd; 30 | 31 | f(pi); 32 | f(pd); 33 | f(nullptr); // would be ambiguous without void f(nullptr_t) 34 | // f(0); // ambiguous call: all three functions are candidates 35 | // f(NULL); // ambiguous if NULL is an integral null pointer constant 36 | // (as is the case in most implementations) 37 | } 38 | ``` 39 | 40 | 定义了三个重载函数,分别使用`int/double/nullptr_t`作为参数类型。如果输入`0`或者`NULL`作为参数,会存在二义性(`ambiguous`),因为均符合这`3`个重载函数 41 | 42 | ## 示例二 43 | 44 | 参考:[nullptr, the pointer literal](https://en.cppreference.com/w/cpp/language/nullptr) 45 | 46 | `std::nullptr`能够输入模板函数,而`0`和`NULL`会发生错误 47 | 48 | ``` 49 | template 50 | void Fwd(F f, A a) { 51 | f(a); 52 | } 53 | 54 | void g(int *i) { 55 | std::cout << "Function g called\n"; 56 | } 57 | 58 | int main() { 59 | g(NULL); // Fine 60 | g(0); // Fine 61 | 62 | Fwd(g, nullptr); // Fine 63 | // Fwd(g, NULL); // ERROR: No function g(int) 64 | } 65 | ``` -------------------------------------------------------------------------------- /docs/opencv/install-configure/OpenCV-4.0.1测试.md: -------------------------------------------------------------------------------- 1 | 2 | # [Ubuntu 16.04]OpenCV-4.0.1测试 3 | 4 | 参考:[[Ubuntu 16.04]OpenCV-3.4测试](https://zj-image-processing.readthedocs.io/zh_CN/latest/opencv/[Ubuntu%2016.04]OpenCV%E6%B5%8B%E8%AF%95.html) 5 | 6 | 和之前`OpenCV`版本不同,`OpenCV-4.0.1`使用`c++11`,所以需要在配置文件中指定编译环境 7 | 8 | ## `cmake` 9 | 10 | 参考:[cmake增加C++11](https://blog.csdn.net/sinat_21190681/article/details/83508228) 11 | 12 | $ cat CMakeLists.txt 13 | cmake_minimum_required(VERSION 2.8) 14 | # 指定c++11 15 | add_definitions(-std=c++11) 16 | project( DisplayImage ) 17 | find_package( OpenCV REQUIRED ) 18 | MESSAGE("OpenCV version: ${OpenCV_VERSION}") 19 | include_directories( ${OpenCV_INCLUDE_DIRS} ) 20 | add_executable( DisplayImage DisplayImage.cpp ) 21 | target_link_libraries( DisplayImage ${OpenCV_LIBS} ) 22 | 23 | ## `make` 24 | 25 | $ cat makefile 26 | INCLUDE=$(shell pkg-config --cflags opencv) 27 | LIB=$(shell pkg-config --libs opencv) 28 | SOURCE=DisplayImage.cpp 29 | RES=DisplayImage 30 | 31 | $(RES):$(SOURCE) 32 | g++ -std=c++11 $(SOURCE) $(INCLUDE) $(LIB) -o $(RES) 33 | 34 | clean: 35 | rm $(RES) 36 | 37 | ### 错误 38 | 39 | 参考:[Linux locate ldconfig pkg-config ldd 以及 OpenCV C++库的使用](https://blog.csdn.net/u012005313/article/details/82350430#T2) 40 | 41 | $ ./DisplayImage lena.jpg 42 | ./DisplayImage: error while loading shared libraries: libopencv_highgui.so.4.0: cannot open shared object file: No such file or directory 43 | 44 | 系统找不到动态库,需要配置进行动态库的绑定,在路径`/etc/ld.so.conf.d`下新建配置文件`opencv.conf`并刷新 45 | 46 | $ sudo vim opencv.conf 47 | /home/zj/opencv/opencv-4.0.1/install/lib 48 | $ sudo ldconfig -------------------------------------------------------------------------------- /docs/cplusplus/get-started/operator-overload/下标运算符重载.md: -------------------------------------------------------------------------------- 1 | 2 | # 下标运算符重载 3 | 4 | 下标运算符`operator[]`和函数调用运算符类似,也是二元运算符。除了二元运算符的规范外,下标运算符必须是**非静态成员函数** 5 | 6 | ## 示例 7 | 8 | ``` 9 | class IntVector { 10 | public: 11 | IntVector(int cElements); 12 | 13 | ~IntVector() { delete[] _iElements; } 14 | 15 | int &operator[](int nSubscript); 16 | 17 | private: 18 | int *_iElements; 19 | int _iUpperBound; 20 | }; 21 | 22 | // Construct an IntVector. 23 | IntVector::IntVector(int cElements) { 24 | _iElements = new int[cElements]; 25 | _iUpperBound = cElements; 26 | } 27 | 28 | // Subscript operator for IntVector. 29 | int &IntVector::operator[](int nSubscript) { 30 | static int iErr = -1; 31 | 32 | if (nSubscript >= 0 && nSubscript < _iUpperBound) 33 | return _iElements[nSubscript]; 34 | else { 35 | cout << "Array bounds violation." << endl; 36 | return iErr; 37 | } 38 | } 39 | 40 | // Test the IntVector class. 41 | int main() { 42 | IntVector v(10); 43 | int i; 44 | 45 | for (i = 0; i <= 10; ++i) 46 | v[i] = i; 47 | 48 | v[3] = v[9]; 49 | 50 | for (i = 0; i <= 10; ++i) 51 | cout << "Element: [" << i << "] = " << v[i] << endl; 52 | } 53 | ``` 54 | 55 | 结果 56 | 57 | ``` 58 | Array bounds violation. 59 | Array bounds violation. 60 | Element: [0] = 0 61 | Element: [1] = 1 62 | Element: [2] = 2 63 | Element: [3] = 9 64 | Element: [4] = 4 65 | Element: [5] = 5 66 | Element: [6] = 6 67 | Element: [7] = 7 68 | Element: [8] = 8 69 | Element: [9] = 9 70 | Array bounds violation. 71 | Element: [10] = 10 72 | ``` 73 | 74 | * 重载下标运算符函数返回的是左值引用,所以可用于等号左右侧 75 | * 当下标值为`10`时,超出了数组界限,返回静态`int`值引用并赋值为`10`,所以之后调用过程中超出数组界限的返回值为`10` -------------------------------------------------------------------------------- /docs/pytorch/[softmax]分类概率计算.md: -------------------------------------------------------------------------------- 1 | 2 | # [softmax]分类概率计算 3 | 4 | 参考: 5 | 6 | [class torch.nn.Softmax(dim=None)](https://pytorch.org/docs/master/nn.html?highlight=softmax#torch.nn.Softmax) 7 | 8 | [torch.nn.functional.softmax(input, dim=None, _stacklevel=3, dtype=None)](https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmax) 9 | 10 | 模型输出置信度后,使用`softmax`函数计算每类成绩,`pytorch`提供了`softmax`实现 11 | 12 | ## 解析 13 | 14 | 可以使用类`nn.Softmax`或者使用函数`nn.functional.softmax`进行分类概率的计算 15 | 16 | ``` 17 | def softmax(input, dim=None, _stacklevel=3, dtype=None): 18 | # type: (Tensor, Optional[int], int, Optional[int]) -> Tensor 19 | ``` 20 | 21 | * `dim`:计算维度,`0`表示按列计算,`1`表示按行计算 22 | 23 | 计算公式如下: 24 | 25 | $$ 26 | \text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)} 27 | $$ 28 | 29 | ## 示例 30 | 31 | ``` 32 | # -*- coding: utf-8 -*- 33 | 34 | """ 35 | @author: zj 36 | @file: softmax.py 37 | @time: 2020-01-27 38 | """ 39 | 40 | import torch 41 | import torch.nn as nn 42 | import torch.nn.functional as F 43 | 44 | if __name__ == '__main__': 45 | inputs = torch.randn((2, 4)) 46 | print('输入:', inputs) 47 | 48 | # 按行计算概率 49 | # 使用softmax类 50 | softmax = nn.Softmax(dim=1) 51 | res = softmax.forward(inputs) 52 | print('结果:', res) 53 | 54 | # 使用softmax函数 55 | res2 = F.softmax(inputs, dim=1) 56 | print('结果:', res) 57 | ``` 58 | 59 | 计算结果如下: 60 | 61 | ``` 62 | 输入: tensor([[-0.5450, 0.3742, 0.8121, 0.1191], 63 | [-0.8926, 0.2907, -0.1550, 0.1468]]) 64 | 结果: tensor([[0.1071, 0.2686, 0.4162, 0.2081], 65 | [0.1089, 0.3555, 0.2277, 0.3079]]) 66 | 结果: tensor([[0.1071, 0.2686, 0.4162, 0.2081], 67 | [0.1089, 0.3555, 0.2277, 0.3079]]) 68 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/get-started/pointer-array/指针和数组.md: -------------------------------------------------------------------------------- 1 | 2 | # 指针和数组 3 | 4 | 从`C++11`开始,除了提供原始数组形式外,`STL`容器库还提供了`std::array`;与此同时,除了提供原始指针形式外,`C++`还提供了智能指针操作 5 | 6 | * 原始指针 7 | * 原始数组 8 | * 指针名和数组名的区别 9 | * 指针数组 10 | * 数组指针 11 | 12 | ## 一维/二维/三维数组 13 | 14 | 实现原始数组和`std::array`的一维/二维/三维创建 15 | 16 | ``` 17 | const int LENGTH = 3; 18 | const int WIDTH = 10; 19 | const int HEIGHT = 5; 20 | 21 | int a1[LENGTH] = {1, 2, 3}; 22 | int a2[LENGTH][WIDTH]{}; 23 | int a3[LENGTH][WIDTH][HEIGHT]{}; 24 | 25 | using std::array; 26 | array aa1 = {1, 2, 3}; 27 | array, WIDTH> aa2{}; 28 | array, WIDTH>, HEIGHT> aa3{}; 29 | ``` 30 | 31 | 使用大括号进行列表初始化,`std::array`的存储形式是`第3维->第2维->第1维`(从外到里) 32 | 33 | ### 数组大小 34 | 35 | 打印第一维/第二维/第三维大小 36 | 37 | ``` 38 | cout << aa1.size() << endl; 39 | cout << aa2[0].size() << endl; 40 | cout << aa3[0][0].size() << endl; 41 | ``` 42 | 43 | 结果如下 44 | 45 | ``` 46 | 3 47 | 3 48 | 3 49 | ``` 50 | 51 | ### 数组遍历 52 | 53 | 通过迭代器方式可以快速遍历`std::array` 54 | 55 | ``` 56 | for (auto it = aa1.begin(); it != aa1.end(); ++it) { 57 | cout << *it << " "; 58 | } 59 | ``` 60 | 61 | 结果如下: 62 | 63 | ``` 64 | 1 2 3 65 | ``` 66 | 67 | 或者直接使用`for`循环 68 | 69 | ``` 70 | int i = 0; 71 | for (auto &items: aa2) { 72 | for (auto &x: items) { 73 | x = i + 1; 74 | i++; 75 | cout << x << " "; 76 | } 77 | cout << endl; 78 | } 79 | ``` 80 | 81 | 外边的`for`循环遍历了二维数组`aa2`的第二维,里面的`for`循环遍历了第一维。结果如下: 82 | 83 | ``` 84 | 1 2 3 85 | 4 5 6 86 | 7 8 9 87 | 10 11 12 88 | 13 14 15 89 | 16 17 18 90 | 19 20 21 91 | 22 23 24 92 | 25 26 27 93 | 28 29 30 94 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/advanced/template/模板和名称解析.md: -------------------------------------------------------------------------------- 1 | 2 | # 模板和名称解析 3 | 4 | 参考:[Templates and Name Resolution](https://docs.microsoft.com/en-us/cpp/cpp/templates-and-name-resolution?view=vs-2019) 5 | 6 | ## 名称类型 7 | 8 | 在模板定义中分为`3`种类型名称: 9 | 10 | 1. 局部声明名称,包括模板名以及在模板定义中声明的名称 11 | 2. 模板定义之外的封闭作用域中的名称 12 | 3. 以某种方式依赖于模板参数的名称,称为依赖名称 13 | 14 | 模板的名称解析规则主要处理第三类依赖名称,因为编译器在模板被实例化之前对这些名称知之甚少,因为它们可能是完全不同的类型,具体取决于使用的模板参数 15 | 16 | 依赖名称可能使用如下几种方式定义: 17 | 18 | * 使用模板参数本身定义 19 | 20 | ``` 21 | T 22 | ``` 23 | 24 | * 限定名的限定部分包含了依赖类型 25 | 26 | ``` 27 | T::myType 28 | ``` 29 | 30 | * 限定名的非限定部分包含了依赖类型 31 | 32 | ``` 33 | N::T 34 | ``` 35 | 36 | * 基于依赖类型的`const/volatile`定义 37 | 38 | ``` 39 | const T 40 | ``` 41 | 42 | * 基于依赖类型的指针、引用、数组和函数指针类型 43 | 44 | ``` 45 | T *、T &、T [10]、T (*)() 46 | ``` 47 | 48 | * 大小基于模板参数的数组 49 | 50 | ``` 51 | template class X { 52 | int x[arg] ; // dependent type 53 | } 54 | ``` 55 | 56 | * 从模板参数构造的模板类型 57 | 58 | ``` 59 | T, MyTemplate 60 | ``` 61 | 62 | ## 类型依赖和值依赖 63 | 64 | * 依赖于类型参数的模板,称为类型依赖 65 | * 依赖于值参数的模板,称为值依赖 66 | 67 | ## 依赖类型名称解析 68 | 69 | 参考:[Name Resolution for Dependent Types](https://docs.microsoft.com/en-us/cpp/cpp/name-resolution-for-dependent-types?view=vs-2019) 70 | 71 | 在模板定义中使用`typename`作为限定名,告诉编译器给定的限定名称标识类型 72 | 73 | ``` 74 | template 75 | class X 76 | { 77 | public: 78 | typename T::myType f(typename T::myType* mt) { 79 | mt->aa = 3; 80 | return *mt; 81 | } 82 | }; 83 | 84 | class Yarg 85 | { 86 | public: 87 | struct myType { 88 | int aa; 89 | }; 90 | }; 91 | 92 | int main() 93 | { 94 | X x; 95 | Yarg::myType type = x.f(new Yarg::myType()); 96 | std::cout < 24 | struct array_deleter { 25 | void operator()(T const *p) { 26 | delete[] p; 27 | } 28 | }; 29 | 30 | int main() { 31 | // 创建整型数组 32 | std::shared_ptr ints(new int[10], array_deleter()); 33 | for (int i = 0; i < 5; i++) { 34 | ints.get()[i] = i; 35 | } 36 | 37 | // 创建weak_ptr 38 | std::weak_ptr wints(ints); 39 | 40 | cout << wints.use_count() << endl; 41 | cout << wints.expired() << endl; 42 | 43 | auto sptr = wints.lock(); 44 | for (int i = 0; i < 5; i++) { 45 | cout << sptr.get()[i] << endl; 46 | } 47 | } 48 | ``` -------------------------------------------------------------------------------- /py/data_preprocessing/color.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | @date: 2020/4/30 下午2:41 5 | @file: color.py 6 | @author: zj 7 | @description: 随机改变图像的亮度、对比度和饱和度 8 | """ 9 | 10 | import torchvision.transforms as transforms 11 | from PIL import Image 12 | import matplotlib.pyplot as plt 13 | 14 | plt.rcParams['font.sans-serif'] = ['simhei'] # 用来正常显示中文标签 15 | plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号 16 | 17 | 18 | def plot(src, dst, dst2, dst3, dst4): 19 | f = plt.figure() 20 | 21 | plt.subplot(231) 22 | plt.title('原图') 23 | plt.imshow(src), plt.axis('off') 24 | 25 | plt.subplot(232) 26 | plt.title('随机亮度') 27 | plt.imshow(dst), plt.axis('off') 28 | 29 | plt.subplot(233) 30 | plt.title('随机对比度') 31 | plt.imshow(dst2), plt.axis('off') 32 | 33 | plt.subplot(234) 34 | plt.title('随机饱和度') 35 | plt.imshow(dst3), plt.axis('off') 36 | 37 | plt.subplot(235) 38 | plt.title('随机色调') 39 | plt.imshow(dst4), plt.axis('off') 40 | 41 | plt.show() 42 | 43 | 44 | if __name__ == '__main__': 45 | src = Image.open('../data/butterfly.jpg') 46 | 47 | # 随机改变亮度 48 | transform = transforms.Compose([ 49 | transforms.ColorJitter(brightness=1) 50 | ]) 51 | dst = transform(src) 52 | 53 | # 随机改变对比度 54 | transform2 = transforms.Compose([ 55 | transforms.ColorJitter(contrast=1) 56 | ]) 57 | dst2 = transform2(src) 58 | 59 | # 随机改变饱和度 60 | transform3 = transforms.Compose([ 61 | transforms.ColorJitter(saturation=1) 62 | ]) 63 | dst3 = transform3(src) 64 | 65 | # 随机改变色调 66 | transform4 = transforms.Compose([ 67 | transforms.ColorJitter(hue=0.5) 68 | ]) 69 | dst4 = transform4(src) 70 | 71 | plot(src, dst, dst2, dst3, dst4) 72 | -------------------------------------------------------------------------------- /docs/python/[setup.py]保存额外数据.md: -------------------------------------------------------------------------------- 1 | 2 | # [setup.py]保存额外数据 3 | 4 | 使用`setuptools`打包`Python`包时,需要加入额外数据,比如图片、文档、压缩包等等。其配置方式参考: 5 | 6 | * [2.6. Installing Package Data](https://docs.python.org/3/distutils/setupscript.html#installing-package-data) 7 | * [2.7. Installing Additional Files](https://docs.python.org/3/distutils/setupscript.html#installing-additional-files) 8 | * [4.1. Specifying the files to distribute](https://docs.python.org/3/distutils/sourcedist.html#specifying-the-files-to-distribute) 9 | 10 | 有多种方式可以实现: 11 | 12 | 1. 配置属性`package_data` 13 | 2. 配置属性`data_files` 14 | 3. 编辑文件`MANIFEST.in` 15 | 16 | ## package_data 17 | 18 | ### 实现 19 | 20 | 其编辑方式如下: 21 | 22 | ``` 23 | setuptools.setup( 24 | ... 25 | ... 26 | package_data={'目录名': ['文件一', '文件二']}, 27 | ... 28 | ... 29 | } 30 | ``` 31 | 32 | 通过键值对的方式指定要添加的文件 33 | 34 | 1. 如果同一目录下有多个子文件夹的文件,可以使用列表的方式保存在同一键下 35 | 2. 可以使用通配符的方式指定相同格式的文件,比如`*.jpg` 36 | 37 | 注意:其根目录为`setup.py`所在路径 38 | 39 | ### 示例 40 | 41 | 参考:[ zjykzj/zlogo ](https://github.com/zjykzj/zlogo) 42 | 43 | ``` 44 | setuptools.setup( 45 | name=NAME, # Replace with your own username 46 | version=get_version(), 47 | author=AUTHOR, 48 | author_email=AUTHOR_EMAIL, 49 | description=DESCRIPTION, 50 | long_description=long_description, 51 | long_description_content_type="text/markdown", 52 | url=URL, 53 | packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), 54 | package_data={'zlogo': ['tool/logo', 'config/*. logorc']}, <----------------- 这里 55 | classifiers=CLASSIFIERS, 56 | python_requires=PYTHON_REQUIRES, 57 | entry_points={ 58 | 'console_scripts': [ 59 | CONSOLE_SCRIPTS 60 | ] 61 | }, 62 | cmdclass={ 63 | 'upload': UploadCommand, 64 | }, 65 | ) 66 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/advanced/stl/vector.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11][stl]vector 3 | 4 | 参考:[std::vector](http://www.cplusplus.com/reference/vector/vector/) 5 | 6 | `vector`是序列容器,它是可以改变大小的数组 7 | 8 | ## 头文件 9 | 10 | ``` 11 | #include 12 | ``` 13 | 14 | ## 把一个vector追加到另一个vector 15 | 16 | 参考:[Vector 把一个vector追加到另一个vector](https://blog.csdn.net/Fly_as_tadpole/article/details/82710781) 17 | 18 | ``` 19 | std::vector src; 20 | std::vector dest; 21 | dest.insert(dest.end(), src.begin(), src.end()); 22 | ``` 23 | 24 | ## 使用 25 | 26 | ``` 27 | #include 28 | 29 | void forward_print(std::vector vecs) { 30 | // for (auto it = vecs.cbegin(); it != vecs.cend(); ++it) { 31 | // std::cout << " " << *it; 32 | // } 33 | // std::cout << std::endl; 34 | 35 | for (auto &x: vecs) { 36 | std::cout << " " << x; 37 | } 38 | std::cout << std::endl; 39 | } 40 | 41 | void backward_print(std::vector vecs) { 42 | for (auto it = vecs.crbegin(); it != vecs.crend(); ++it) { 43 | std::cout << " " << *it; 44 | } 45 | std::cout << std::endl; 46 | } 47 | 48 | int main() { 49 | // 创建 50 | std::vector vectors; 51 | // 添加 52 | for (int i = 0; i < 10; i++) { 53 | vectors.emplace_back(i + 1); 54 | } 55 | forward_print(vectors); 56 | 57 | // 插入 58 | // 第二个位置 59 | vectors.emplace(vectors.begin() + 1, 333); 60 | forward_print(vectors); 61 | 62 | // 修改 63 | // 第二个位置,从0开始 64 | vectors.at(1) = 444; 65 | forward_print(vectors); 66 | 67 | // 删除 68 | // 最后一个位置 69 | vectors.pop_back(); 70 | forward_print(vectors); 71 | // 删除第3个 72 | vectors.erase(vectors.begin() + 2); 73 | forward_print(vectors); 74 | // 删除所有 75 | vectors.clear(); 76 | std::cout << "size: " << vectors.size() << std::endl; 77 | } 78 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/advanced/smart-pointer/智能指针类型.md: -------------------------------------------------------------------------------- 1 | 2 | # [c++11]智能指针类型 3 | 4 | 参考:[Smart Pointers (Modern C++)](https://docs.microsoft.com/en-us/cpp/cpp/smart-pointers-modern-cpp?view=vs-2019) 5 | 6 | 使用原始指针需要手动操作内存分配和删除,同时需要密切关注指针引用。在现代`C++`编程中,标准库包括智能指针(`smart pointer`),用于确保程序不受内存(`free of memory`)和资源泄漏(`resource leaks`)的影响,并且是异常安全(`exception-safe`)的 7 | 8 | ## 头文件 9 | 10 | 智能指针的声明位于头文件`` 11 | 12 | ## 原理 13 | 14 | `C++`没有单独的垃圾收集器(`garbage collector`)在后台运行,是通过标准`C++`范围规则(`scoping rule`)来管理内存,以便运行时环境更快、更高效 15 | 16 | 智能指针是在栈上声明的类模板,通过使用指向堆分配对象的原始指针进行初始化。智能指针初始化后,它拥有原始指针。这意味着智能指针负责删除原始指针指定的内存。智能指针析构函数包含对`delete`的调用,并且由于智能指针在栈上声明,因此当智能指针超出作用域时将调用其析构函数,即使在栈上的某个位置引发异常 17 | 18 | 使用熟悉的指针运算符`->`和`*`访问封装的指针,智能指针类重载这些运算符以返回封装的原始指针 19 | 20 | ## 类别 21 | 22 | 学习`3`种不同的智能指针: 23 | 24 | * `unique_ptr` 25 | * `shared_ptr` 26 | * `weak_ptr` 27 | 28 | ## 编程规范 29 | 30 | 智能指针对原始指针进行了封装,能够保证其安全使用,与此同时也造成了效率的小小降低 31 | 32 | * 大多数情况下,当初始化原始指针或资源句柄以指向实际资源时,立即将指针传递给智能指针 33 | * 原始指针只用于有限范围、循环或辅助函数的小代码块中。在这些代码块中,性能至关重要,并且不可能混淆所有权 34 | * 始终在单独的代码行上创建智能指针,而不是在参数列表中,这样就不会由于某些参数列表分配规则而发生细微的资源泄漏 35 | 36 | 使用智能指针的基本步骤如下: 37 | 38 | 1. 声明智能指针作为自动(`automatic`)或局部(`local`)变量(不要对智能指针使用`new`或`malloc`表达式) 39 | 2. 在类型参数中,指定封装指针的指向类型 40 | 3. 在智能指针构造函数中传递原始指针(已指向对象)(一些实用程序函数或智能指针构造函数可以辅助执行此操作) 41 | 4. 使用重载的`*`或`->`运算符访问对象 42 | 5. 让智能指针删除对象 43 | 44 | ## 示例 45 | 46 | 创建结构体`S`,分别使用原始指针`rptr`和智能智能`sptr`创建对象,输入函数`print`进行打印 47 | 48 | ``` 49 | struct S { 50 | S(char a, int b) : a(a), b(b) {} 51 | 52 | char a; 53 | int b; 54 | }; 55 | 56 | void print(const struct S &ptr) { 57 | cout << ptr.a << " " << ptr.b << endl; 58 | } 59 | 60 | int main(int argc, char *argv[]) { 61 | std::unique_ptr sptr(new struct S('b', 3)); 62 | auto *rptr = new struct S('b', 3); 63 | 64 | print(*sptr); 65 | print(*rptr); 66 | 67 | delete (rptr); 68 | } 69 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/advanced/template/函数模板.md: -------------------------------------------------------------------------------- 1 | 2 | # 函数模板 3 | 4 | 参考:[Function Templates](https://docs.microsoft.com/en-us/cpp/cpp/function-templates?view=vs-2019) 5 | 6 | 函数模板定义示例如下: 7 | 8 | ``` 9 | template< class T > 10 | void MySwap( T& a, T& b ) { 11 | T c(a); 12 | a = b; 13 | b = c; 14 | } 15 | ``` 16 | 17 | 实例化示例如下: 18 | 19 | ``` 20 | // function_template_instantiation.cpp 21 | template void f(T) { } 22 | 23 | // Instantiate f with the explicitly specified template. 24 | // argument 'int' 25 | // 26 | template void f (int); 27 | 28 | // Instantiate f with the deduced template argument 'char'. 29 | template void f(char); 30 | int main() 31 | { 32 | } 33 | ``` 34 | 35 | ## 显式实例化 36 | 37 | 参考:[Explicit Instantiation](https://docs.microsoft.com/en-us/cpp/cpp/explicit-instantiation?view=vs-2019) 38 | 39 | 在创建使用模板进行分发的库(`.lib`)文件时,未实例化的模板定义不会放入对象(`.obj`)文件中 40 | 41 | ``` 42 | # 显式实例化模板MyStack 43 | template class MyStack; 44 | ``` 45 | 46 | 可使用关键字extern阻止模板的实例化 47 | 48 | ``` 49 | extern template class MyStack; 50 | ``` 51 | 52 | 模板专门化中的`extern`关键字仅适用于在类主体外部定义的成员函数。类声明中定义的函数被视为内联函数,总是被实例化 53 | 54 | ## 函数模板的偏序 55 | 56 | 参考:[Partial Ordering of Function Templates (C++)](https://docs.microsoft.com/en-us/cpp/cpp/partial-ordering-of-function-templates-cpp?view=vs-2019) 57 | 58 | ## 成员函数模板 59 | 60 | 成员模板指的是成员函数模板和嵌套类模板 61 | 62 | 成员函数模板指的是类或类模板的成员的函数模板 63 | 64 | ``` 65 | struct X 66 | { 67 | template void mf(T* t) {} 68 | }; 69 | 70 | template 71 | class X 72 | { 73 | public: 74 | template 75 | void mf(const U &u) 76 | { 77 | } 78 | }; 79 | 80 | template 81 | class X 82 | { 83 | public: 84 | template 85 | void mf(const U &u); 86 | }; 87 | 88 | template template 89 | void X::mf(const U &u) 90 | { 91 | } 92 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/基本类结构.md: -------------------------------------------------------------------------------- 1 | 2 | # 基本类结构 3 | 4 | 新建类`BaseClass`,头文件`BaseClass.h`如下 5 | 6 | // 7 | // Created by zj on 19-2-28. 8 | // 9 | 10 | #ifndef FIRST_BASECLASS_H 11 | #define FIRST_BASECLASS_H 12 | 13 | 14 | #include 15 | 16 | using namespace std; 17 | 18 | class BaseClass { 19 | public: 20 | BaseClass(); 21 | 22 | virtual ~BaseClass(); 23 | 24 | public: 25 | string publicFunc(); 26 | 27 | private: 28 | string privateFunc(); 29 | 30 | }; 31 | 32 | 33 | #endif //FIRST_BASECLASS_H 34 | 35 | 类文件`BaseClass.cpp`如下: 36 | 37 | // 38 | // Created by zj on 19-2-28. 39 | // 40 | 41 | #include "BaseClass.h" 42 | 43 | /*** 44 | * 构造器 45 | */ 46 | BaseClass::BaseClass() { 47 | cout << "BaseClass Constructor" << endl; 48 | } 49 | 50 | /** 51 | * 析构函数 52 | */ 53 | BaseClass::~BaseClass() { 54 | cout << "BaseClass Destructor" << endl; 55 | } 56 | 57 | /** 58 | * 公有函数 59 | * @return 60 | */ 61 | string BaseClass::publicFunc() { 62 | cout << "public func" << endl; 63 | privateFunc(); 64 | return std::__cxx11::string(); 65 | } 66 | 67 | /** 68 | * 私有函数 69 | * @return 70 | */ 71 | string BaseClass::privateFunc() { 72 | cout << "private func" << endl; 73 | return std::__cxx11::string(); 74 | } 75 | 76 | 实现如下: 77 | 78 | #include 79 | #include "BaseClass.h" 80 | 81 | using namespace std; 82 | 83 | int main() { 84 | BaseClass *baseClass = new BaseClass(); 85 | baseClass->publicFunc(); 86 | delete (baseClass); 87 | 88 | return 0; 89 | } 90 | 91 | 结果如下: 92 | 93 | BaseClass Constructor 94 | public func 95 | private func 96 | BaseClass Destructor -------------------------------------------------------------------------------- /docs/opencv/install-configure/[opencv-python]编译与安装.md: -------------------------------------------------------------------------------- 1 | 2 | # [opencv-python]编译与安装 3 | 4 | 仓库[skvark/opencv-python](https://github.com/skvark/opencv-python)配置了`Python OpenCV`包编译环境 5 | 6 | 相比较于自编译的`Python`实现,`skvark/opencv-python`有以下优点: 7 | 8 | 1. 可以通过`pip`方式安装预编译包 9 | 2. 编译得到的`cv2`文件能够提供相应的`Python`定义(*就是`PyCharm`编程时可以点击函数查看相关定义*) 10 | 11 | 其缺点就是实现速度比自编译包慢(参考[OpenCV-4.4.0安装](./OpenCV-4.4.0安装.md)) 12 | 13 | ## pip安装 14 | 15 | `skvark/opencv-python`提供了多种形式的预编译包 16 | 17 | ``` 18 | $ pip install opencv-python 19 | $ pip install opencv-contrib-python 20 | $ pip install opencv-python-headless 21 | $ pip install opencv-contrib-python-headless 22 | ``` 23 | 24 | 其中`*-contrib-*`表示包含了第三方模块,`*-headless`表示不包含`GUI`函数,适用于服务器版本`/Docker`版本 25 | 26 | 也可以指定版本安装,比如 27 | 28 | ``` 29 | $ pip install opencv-contrib-python==4.4.0.46 30 | ``` 31 | 32 | ## 编译安装 33 | 34 | 当前想要通过加载中文字体,需要使用`freetype`库,`OpenCV`集成了该模块,不过在`skvark/opencv-python`中默认没有编译,需要自编译实现 35 | 36 | 1. 下载源码 37 | 38 | ``` 39 | git clone --recursive https://github.com/skvark/opencv-python.git 40 | ``` 41 | 42 | 2. 设置`CMake`符号 43 | 44 | ``` 45 | export CMAKE_ARGS="-DWITH_FREETYPE=ON" 46 | ``` 47 | 48 | 3. 设置第三方模块编译 49 | 50 | ``` 51 | export ENABLE_CONTRIB=1 52 | ``` 53 | 54 | 4. 编译 55 | 56 | ``` 57 | pip wheel . --verbose. 58 | ``` 59 | 60 | 编译完成后即可生成对应的`wheel - opencv_contrib_python-4.4.0.46-cp37-cp37m-linux_x86_64.whl` 61 | 62 | 5. 安装 63 | 64 | ``` 65 | pip install opencv_contrib_python-4.4.0.46-cp37-cp37m-linux_x86_64.whl 66 | ``` 67 | 68 | ## 问题一:xfeatures2d/boostdesc: Download failed: 28;"Timeout was reached" 69 | 70 | 参考[安装opencv时,xfeatures2d模块缺失boostdesc_bgm.i文件,下载超时问题](https://blog.csdn.net/sazass/article/details/108406518) 71 | 72 | ## 问题二: ippicv_2020_lnx_intel64_20191018_general.tgz 73 | 74 | 参考[ubuntu安装opencv无法下载IPPICV的问题 ippicv_2020_lnx_intel64_20191018_general.tgz](https://blog.csdn.net/gadwgdsk/article/details/107423625) -------------------------------------------------------------------------------- /docs/cplusplus/get-started/keywords/main.md: -------------------------------------------------------------------------------- 1 | 2 | # main 3 | 4 | 参考:[Main function](https://en.cppreference.com/w/cpp/language/main_function) 5 | 6 | `main`函数是所有`C`和`C++`程序的执行起点 7 | 8 | ## 语法 9 | 10 | 参考:[Argument Definitions](https://docs.microsoft.com/en-us/cpp/cpp/argument-definitions?view=vs-2019) 11 | 12 | ``` 13 | int main () { body } 14 | int main (int argc, char *argv[]) { body } 15 | ``` 16 | 17 | * `argc(argument count)`:包含后面参数`argv`数组的计数,`argc`参数始终大于或等于`1` 18 | * `argv(argument vector)`:字符串数组,表示程序输入的命令行参数。按照惯例,`argv[0]`是用来调用程序的命令,`argv[1]`是第一个命令行参数,依此类推,直到`argv[argc]=null` 19 | 20 | `argc`和`argv`的名称是任意的,并且使用指针表示数组同样有效:`int main(int ac,char**av)` 21 | 22 | ## 限制 23 | 24 | 参考:[main Function Restrictions](https://docs.microsoft.com/en-us/cpp/cpp/main-function-restrictions?view=vs-2019) 25 | 26 | 在`C++`编程中`main`函数有以下限制: 27 | 28 | 1. 不能被重载(`overloaded`) 29 | 2. 不能声明为内联(`inline`) 30 | 3. 不能声明为`static` 31 | 4. 不能传递其地址 32 | 5. 不能被调用 33 | 34 | ## 打印命令行参数 35 | 36 | 参考:[How to parse command line parameters.](http://www.cplusplus.com/articles/DEN36Up4/) 37 | 38 | ``` 39 | int main(int argc, char **argv) { 40 | // Walk through list of strings until a NULL is encountered. 41 | for (int i = 0; argv[i] != nullptr; ++i) { 42 | cout << i << ": " << argv[i] << "\n"; 43 | } 44 | } 45 | ``` 46 | 47 | 输入: 48 | 49 | ``` 50 | $ ./first hi zj 51 | ``` 52 | 53 | 结果: 54 | 55 | ``` 56 | 0: ./first 57 | 1: hi 58 | 2: zj 59 | ``` 60 | 61 | ## 启动注意事项 62 | 63 | 参考:[Additional Startup Considerations](https://docs.microsoft.com/en-us/cpp/cpp/additional-startup-considerations?view=vs-2019) 64 | 65 | 在`C++`中,对象构造(`constructor`)和析构(`destructor`)涉及执行用户代码(`executing user code`)。因此,重要的是要了解哪些初始化发生在进入`main`之前,哪些析构函数在退出`main`之后被调用 66 | 67 | 在进入`main`之前进行以下初始化: 68 | 69 | 1. 静态数据的默认初始化为零。在执行任何其他代码(包括运行时初始化)之前,所有没有显式初始值设定项的静态数据都设置为零。静态数据成员必须显式定义 70 | 2. 在翻译单元中初始化全局静态对象。这可能发生在进入`main`之前,也可能发生在对象所在的翻译单元中的任何函数或对象首次使用之前 -------------------------------------------------------------------------------- /docs/python/[抽象基类]abc.md: -------------------------------------------------------------------------------- 1 | 2 | # [抽象基类]abc 3 | 4 | `python`提供了[abc](https://docs.python.org/zh-cn/3/library/abc.html?highlight=decorator)模块用于抽象基类的创建 5 | 6 | ## 简单实现 7 | 8 | * 创建一个抽象基类`Person`,定义抽象方法`print` 9 | * 创建子类`Men`和`Women`,实现抽象方法`print` 10 | 11 | ``` 12 | # -*- coding: utf-8 -*- 13 | 14 | # @Time : 19-6-12 上午11:02 15 | # @Author : zj 16 | 17 | from abc import ABCMeta 18 | from abc import abstractmethod 19 | 20 | 21 | class Person(metaclass=ABCMeta): 22 | 23 | @abstractmethod 24 | def print(self): 25 | pass 26 | 27 | 28 | class Men(Person): 29 | 30 | def print(self): 31 | print("men") 32 | 33 | 34 | class Women(Person): 35 | 36 | def print(self): 37 | print('women') 38 | 39 | 40 | if __name__ == '__main__': 41 | b = Men() 42 | c = Women() 43 | 44 | print(type(Person)) 45 | print(type(b)) 46 | print(type(c)) 47 | ``` 48 | 49 | 在上面代码中,使用`abc.ABCMeta`作为抽象基类的元类,使用装饰器`@abstractmethod`声明抽象方法 50 | 51 | ## ABC 52 | 53 | 类`ABC`是`abc`模块中定义的类,其继承了元类`ABCMeta`,可以作为辅助类使用标准方式进行类定义,上面的抽象基类可改写成 54 | 55 | ``` 56 | class Person(ABC): 57 | 58 | @abstractmethod 59 | def print(self): 60 | pass 61 | ``` 62 | 63 | ## 抽象基类特性 64 | 65 | 1. 无法实例化抽象基类 66 | ``` 67 | TypeError: Can't instantiate abstract class Person with abstract methods print 68 | ``` 69 | 2. 无法实例化未重写抽象方法的子类 70 | ``` 71 | TypeError: Can't instantiate abstract class Men with abstract methods print 72 | ``` 73 | 74 | ## 什么是元类 75 | 76 | 参考: 77 | 78 | [使用元类](https://www.liaoxuefeng.com/wiki/1016959663602400/1017592449371072) 79 | 80 | [What are metaclasses in Python?](https://stackoverflow.com/questions/100003/what-are-metaclasses-in-python) 81 | 82 | 元类就是定义类的类。在面向对象思想中,所有类都是对象,包括定义的类 83 | 84 | 默认情况下,[type](https://docs.python.org/zh-cn/3/library/functions.html?highlight=type#type)是所有的类的元类 85 | 86 | ## 小结 87 | 88 | 元类/抽象基类的出现进一步完善了`python`面向对象特性 89 | 90 | *这些思想在`Java`中已经有了实现* 91 | -------------------------------------------------------------------------------- /docs/cplusplus/get-started/pointer-array/const指针和volatile指针.md: -------------------------------------------------------------------------------- 1 | 2 | # const指针和volatile指针 3 | 4 | 参考:[const and volatile Pointers](https://docs.microsoft.com/en-us/cpp/cpp/const-and-volatile-pointers?view=vs-2019) 5 | 6 | ## const 7 | 8 | `const`可用于指针的两方面,一是指针所指对象值,二是指针存储地址值 9 | 10 | ### 常量指针 11 | 12 | 声明指针所指对象为`const`,即为常量指针,语法如下: 13 | 14 | ``` 15 | const char *p; 16 | ``` 17 | 18 | 声明常量指针后可以赋值指针另一个对象地址,但是无法通过指针修改对象值(*可以通过对象本身进行修改*),比如 19 | 20 | ``` 21 | char a = 'A'; 22 | const char *p = &a; 23 | // *p = 'D'; // error, *p只读 24 | 25 | cout << (void *) &a << endl; 26 | cout << (void *) p << endl; 27 | 28 | a = 'B'; 29 | 30 | cout << (void *) &a << endl; 31 | cout << (void *) p << endl; 32 | 33 | char b = 'C'; 34 | p = &b; 35 | 36 | cout << (void *) &a << endl; 37 | cout << (void *) p << endl; 38 | cout << (void *) &b << endl; 39 | ``` 40 | 41 | ### 指针常量 42 | 43 | 声明指针值(即指针存储地址)为`const`,即为指针常量,语法如下: 44 | 45 | ``` 46 | char const *p; 47 | ``` 48 | 49 | 声明指针常量后可以通过指针修改对象值,但是无法赋值指针另一个对象地址。示例如下 50 | 51 | ``` 52 | char a = 'A'; 53 | char *const p = &a; 54 | 55 | cout << (void *) &a << endl; 56 | cout << (void *) p << endl; 57 | 58 | a = 'B'; 59 | 60 | cout << (void *) &a << endl; 61 | cout << (void *) p << endl; 62 | 63 | cout << a << endl; 64 | cout << *p << endl; 65 | 66 | char b = 'C'; 67 | // p = &b; // error,指针p存储的地址固定为a 68 | ``` 69 | 70 | ### 常量指针 vs. 指针常量 71 | 72 | 1. 常量指针可看成对象的`const`类型,只能读取对象值而不能修改 73 | 2. 指针常量可看成对象的别名,其存储地址固定为初始对象地址 74 | 75 | 可同时声明指针为常量指针和指针常量 76 | 77 | ``` 78 | const char *const p = &a; 79 | ``` 80 | 81 | 此时指针`p`可看成对象`a`的别名,同时不能通过`p`修改对象值 82 | 83 | ## volatile 84 | 85 | `volatile`关键字的语法和`const`一样,可作用于所指对象或者指针存储地址 86 | 87 | ``` 88 | // 作用于对象 89 | volatile char *vpch; 90 | // 作用于指针地址 91 | char * volatile pchv; 92 | ``` 93 | 94 | `volatile`关键字指定了可以通过用户应用程序中的操作以外的操作进行修改,对于在共享内存中声明可由多个进程或用于与中断服务例程通信的全局数据区域访问的对象非常有用 95 | 96 | 当对象声明为`volatile`时,每次程序访问编译器都将从存储器中获取对象值。这极大地减少了可能的优化。如果对象的状态无法预期时,这是确保可预测的程序性能的唯一途径 -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/成员函数概述.md: -------------------------------------------------------------------------------- 1 | 2 | # 成员函数概述 3 | 4 | 参考:[Overview of Member Functions](https://docs.microsoft.com/en-us/cpp/cpp/overview-of-member-functions?view=vs-2019) 5 | 6 | ## 函数定义 7 | 8 | 在类声明中定义函数时,不需要添加类名限定;当定义在类外时,需要使用作用域解析运算符(`::`)进行类名限定 9 | 10 | ## static 11 | 12 | 成员函数可分为静态和非静态,非静态参数隐式包含了`this`参数 13 | 14 | ## inline 15 | 16 | 定义在类声明内部的成员函数称为内联函数,也可以使用关键字`inline`在类外部定义 17 | 18 | ## virtual 19 | 20 | 参考:[virtual Specifier](https://docs.microsoft.com/en-us/cpp/cpp/virtual-specifier?view=vs-2019) 21 | 22 | `virtual`关键字仅适用于非静态成员函数,它表示调用函数的绑定被推迟到运行时 23 | 24 | 在类函数中添加关键字virtual表示声明该函数为虚函数:当通过基类的指针或者引用调用该成员函数时,将根据指针指向的对象类型确定调用的函数,而非指针的类型 25 | 26 | ``` 27 | #include 28 | using namespace std; 29 | 30 | class clsA { 31 | public: 32 | virtual void haha() { 33 | cout << "clsA" << endl; 34 | }; 35 | }; 36 | 37 | class clsB : public clsA { 38 | public: 39 | void haha() override { 40 | cout << "clsB" << endl; 41 | }; 42 | }; 43 | 44 | int main(int argc, char *argv[]) { 45 | clsA *cls = new clsB(); 46 | cls->haha(); 47 | 48 | return 0; 49 | } 50 | ``` 51 | 52 | 如果基类函数没有定义,那么在基类函数声明后添加`=0`,表示纯虚函数定义 53 | 54 | ``` 55 | class clsA { 56 | public: 57 | virtual void haha() = 0; 58 | }; 59 | ``` 60 | 61 | ## override 62 | 63 | 参考:[override Specifier](https://docs.microsoft.com/en-us/cpp/cpp/override-specifier?view=vs-2019) 64 | 65 | 可以使用`override`关键字指定要重写基类虚拟函数的成员函数 66 | 67 | *`override`是上下文敏感的,只有在成员函数声明之后使用时才具有特殊意义;否则,它不是保留关键字* 68 | 69 | 语法如下: 70 | 71 | ``` 72 | function-declaration override; 73 | ``` 74 | 75 | 关键字`override`的使用能够辅助进行代码检查,有利于程序的纠错 76 | 77 | ## final 78 | 79 | 参考:[final Specifier](https://docs.microsoft.com/en-us/cpp/cpp/final-specifier?view=vs-2019) 80 | 81 | 使用`final`关键字可以指定不能在派生类中重写的虚拟函数。还可以使用它来指定不能继承的类 82 | 83 | *`final`是上下文敏感的,只有当它在函数声明或类名之后使用时才有特殊意义;否则,它不是保留关键字* 84 | 85 | 语法如下: 86 | 87 | ``` 88 | function-declaration final; 89 | class class-name final base-classes(基类可选) 90 | ``` -------------------------------------------------------------------------------- /docs/uml/类图小结.md: -------------------------------------------------------------------------------- 1 | 2 | # 类图小结 3 | 4 | 参考:[UML类图](https://www.jianshu.com/p/57620b762160) 5 | 6 | 学习[wiki Class diagram](./[译][wiki]Class-Diagram.md),小结以下内容: 7 | 8 | 1. 类与类之间的关系 9 | 2. 如何表示属性可见性 10 | 11 | ## 类与类之间的关系 12 | 13 | 类与类之间包含以来`6`种关系 14 | 15 | 1. 泛化(`Generalization/Inheritance`) 16 | 2. 实现(`Relization/Implementation`) 17 | 3. 组合(`Composition`) 18 | 4. 聚合(`Aggregation`) 19 | 5. 关联(`Association`) 20 | 6. 依赖(`Dependency`) 21 | 22 | 上述`6`种关系之间的依赖强度:**泛化=实现>组合>聚合>关联>依赖** 23 | 24 | ![](./imgs/Uml_classes_en.svg.png) 25 | 26 | ### 泛化 27 | 28 | 定义:类之间的泛化关系就是继承关系 29 | 30 | 表示:使用空心三角形+实线,从子类指向父类 31 | 32 | ### 实现 33 | 34 | 定义: 类实现接口的功能 35 | 36 | 表示:使用空心三角形+虚线,从类指向接口 37 | 38 | ### 组合 39 | 40 | 定义:表示`contains a`,体现了严格的整体和部分之间的关系,两者的生命周期相同,不能分离 41 | 42 | 表示:使用实心菱形+实线,菱形这一端表示整体,箭头这一端表示部分 43 | 44 | ### 聚合 45 | 46 | 定义:表示`has a`,`A`对象可以包含`B`对象,但B对象不是`A`对象的一部分。 两个对象具有各自的生命周期 47 | 48 | 表示:使用空心菱形+实线,菱形这一端表示整体,箭头这一端表示部分 49 | 50 | ### 关联 51 | 52 | 定义:对于两个相对独立的对象,当一个对象的实例与另一个对象的一些特定实例存在固定的对应关系时,这两个对象之间为关联关系 53 | 54 | 表示:使用箭头+实线,从一个对象指向关联的实例 55 | 56 | ### 依赖 57 | 58 | 定义:对于两个相对独立的对象,一个对象负责构造另一个对象的实例,或者依赖另一个对象的服务时,这两个对象之间主要体现为依赖关系 59 | 60 | 表示:使用箭头+虚线,从一个对象指向依赖的对象 61 | 62 | ### 组合 vs. 聚合 63 | 64 | 组合关系 65 | 66 | * 当试图表示真实世界的整体-部分关系时,例如,发动机是汽车的一部分 67 | * 当容器被销毁时,内容物也被销毁,例如一所大学及其系 68 | 69 | 聚合关系 70 | 71 | * 当表示软件或数据库关系时,例如,汽车模型引擎`ENG01`是汽车模型`CM01`的一部分,引擎`ENG01`也可以是不同汽车模型的一部分[8] 72 | * 当容器被销毁时,内容物通常不会被销毁,例如,教授有学生;当教授去世时,学生们不会和他或她一起死去 73 | 74 | **聚合关系通常是“目录”包容,以区别于组合的“物理”包容** 75 | 76 | ### 关联 vs. 依赖 77 | 78 | 参考: 79 | 80 | [UML类图依赖与关联的区别](https://www.cnblogs.com/liuzhang/archive/2013/03/17/2964095.html) 81 | 82 | [Difference between association and dependency?](https://stackoverflow.com/questions/1230889/difference-between-association-and-dependency) 83 | 84 | 关系是一种强依赖关系 85 | 86 | 1. 当对象以类属性的形式出现在另一个对象中,那么就是关联关系 87 | 2. 当对象以函数参数或者仅调用其方法时,就是依赖关系 88 | 89 | ## 如何表示属性可见性 90 | 91 | 在成员名称前添加以下符号来表示属性的可见性 92 | 93 | + 公共 94 | - 私有 95 | # 受保护 96 | ~ 包 97 | -------------------------------------------------------------------------------- /docs/matplotlib/散点图.md: -------------------------------------------------------------------------------- 1 | 2 | # 散点图 3 | 4 | 绘制散点图使用函数[matplotlib.pyplot.scatter](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.scatter.html#matplotlib.pyplot.scatter) 5 | 6 | ``` 7 | matplotlib.pyplot.scatter(x, y, s=None, c=None, marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, verts=None, edgecolors=None, *, data=None, **kwargs)[source] 8 | ``` 9 | 10 | 参数`x,y`是数组形式,表示数据位置 11 | 12 | 参数`c`表示颜色,可选颜色值参考[格式化绘图样式](https://zj-image-processing.readthedocs.io/zh_CN/latest/matplotlib/%E6%8A%98%E7%BA%BF%E5%9B%BE.html#id2) 13 | 14 | ![](./imgs/fmt-color.png) 15 | 16 | * b:蓝色 17 | * g:绿色 18 | * r:红色 19 | * c:青色 20 | * m:紫红 21 | * y:黄色 22 | * k:黑色 23 | * w:白色 24 | 25 | 参数`marker`表示图标形状,可选值参考[MarkerStyle](https://matplotlib.org/api/_as_gen/matplotlib.markers.MarkerStyle.html#matplotlib.markers.MarkerStyle.filled_markers) 26 | 27 | ``` 28 | filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X') 29 | ``` 30 | 31 | 参数`s`表示点大小,默认为`rcParams['lines.markersize'] ** 2` 32 | 33 | ``` 34 | >>> import matplotlib.pyplot as plt 35 | >>> plt.rcParams['lines.markersize'] 36 | 6.0 37 | ``` 38 | 39 | 40 | ## 示例 41 | 42 | 简单的散点图 43 | 44 | ``` 45 | # -*- coding: utf-8 -*- 46 | 47 | import matplotlib.pyplot as plt 48 | import numpy as np 49 | 50 | if __name__ == '__main__': 51 | fig = plt.figure() 52 | 53 | x = np.random.rand(10) 54 | y = np.random.rand(10) 55 | 56 | plt.scatter(x, y) 57 | 58 | plt.show() 59 | ``` 60 | 61 | ![](./imgs/single-scatter.png) 62 | 63 | 使用颜色和`maker`标记多条散点图 64 | 65 | ``` 66 | import matplotlib.pyplot as plt 67 | import numpy as np 68 | 69 | if __name__ == '__main__': 70 | fig = plt.figure() 71 | 72 | x = np.random.rand(10) 73 | y = np.random.rand(10) 74 | 75 | plt.scatter(x, y, c='r', marker='<') 76 | plt.scatter(x, y ** 2, c='g', marker='8') 77 | plt.scatter(x ** 2, y, c='y', marker='*') 78 | 79 | plt.show() 80 | ``` 81 | 82 | ![](./imgs/multi-scatter.png) -------------------------------------------------------------------------------- /docs/algorithm/optimization.md: -------------------------------------------------------------------------------- 1 | 2 | # 最优化 3 | 4 | * [过拟合与欠拟合](https://blog.zhujian.life/posts/2f1a2a1c.html) 5 | * 预处理 6 | * [特征缩放](https://blog.zhujian.life/posts/dea583b1.html) 7 | * [合理性检查](https://blog.zhujian.life/posts/869619ac.html) 8 | * [权重初始化](https://blog.zhujian.life/posts/cfd35552.html) 9 | * 反向传播 10 | * [成绩函数、目标函数、代价函数和损失函数](https://blog.zhujian.life/posts/5d2f01d1.html) 11 | * [小批量随机梯度下降](https://blog.zhujian.life/posts/3c50d4b7.html) 12 | * [动量更新](https://blog.zhujian.life/posts/2b34c959.html) 13 | * [Nesterov加速梯度](https://blog.zhujian.life/posts/e51acd5.html) 14 | * [AdaGrad、RMSProp和Adam](https://blog.zhujian.life/posts/2bdd8f16.html) 15 | * [梯度检查](https://blog.zhujian.life/posts/d91a1c6f.html) 16 | * [超参数优化](https://blog.zhujian.life/posts/a042eba2.html) 17 | * [正则化](https://blog.zhujian.life/posts/ce0afb50.html) 18 | * 学习率调度 19 | * [学习率退火](https://blog.zhujian.life/posts/936eda30.html) 20 | * [[LR Scheduler]余弦退火](https://blog.zhujian.life/posts/6eb7f24f.html) 21 | * [[LR Scheduler]warmup](https://blog.zhujian.life/posts/f311f0.html) 22 | * [[LR Scheduler]如何找到最优学习率](https://blog.zhujian.life/posts/78a36c78.html) 23 | * [如何找到最优权重衰减值](https://blog.zhujian.life/posts/b2e2c47b.html) 24 | * [随机失活](https://blog.zhujian.life/posts/20cc7a49.html) 25 | * [随机失活-pytorch](https://blog.zhujian.life/posts/2bee4fce.html) 26 | * [模型集成](https://blog.zhujian.life/posts/e0761e53.html) 27 | * [Hard Negative Mining](https://blog.zhujian.life/posts/bc29003.html) 28 | * [[目标检测]Non-Maximum Suppression](https://blog.zhujian.life/posts/7b326d08.html) 29 | * 损失函数 30 | * [标签平滑正则化](https://blog.zhujian.life/posts/9a85fe27.html) 31 | * 迁移学习 32 | * [[译]TorchVision Object Detection Finetuning Tutorial](https://blog.zhujian.life/posts/1a1c504e.html) 33 | * [[译]Transfer Learning for Computer Vision Tutorial](https://blog.zhujian.life/posts/c8566254.html) 34 | * [迁移学习](https://blog.zhujian.life/posts/c7511b44.html) -------------------------------------------------------------------------------- /docs/python/[easydict]访问属性的方式来访问字典.md: -------------------------------------------------------------------------------- 1 | 2 | # [easydict]访问属性的方式来访问字典 3 | 4 | [EasyDict](https://github.com/makinacorpus/easydict)提供了一种更便捷的方式来访问键值对,像访问属性一样来访问 5 | 6 | ## 安装 7 | 8 | ``` 9 | $ pip install easydict 10 | ``` 11 | 12 | ## 示例一:解析Dict 13 | 14 | ``` 15 | from easydict import EasyDict as edict 16 | 17 | if __name__ == '__main__': 18 | d = edict({'foo': 3, 'bar': {'x': 1, 'y': 2}}) 19 | print(d) 20 | 21 | print(d.foo) 22 | # 可以递归调用 23 | print(d.bar) 24 | print(d.bar.x) 25 | ################# 输出 26 | {'foo': 3, 'bar': {'x': 1, 'y': 2}} 27 | 3 28 | {'x': 1, 'y': 2} 29 | 1 30 | ``` 31 | 32 | ## 示例二:解析Json 33 | 34 | `EasyDict`同样能够解析`Json`格式内容 35 | 36 | ``` 37 | from easydict import EasyDict as edict 38 | from simplejson import loads 39 | import pprint 40 | 41 | if __name__ == '__main__': 42 | j = """{ 43 | "Buffer": 12, 44 | "List1": [ 45 | {"type" : "point", "coordinates" : [100.1,54.9] }, 46 | {"type" : "point", "coordinates" : [109.4,65.1] }, 47 | {"type" : "point", "coordinates" : [115.2,80.2] }, 48 | {"type" : "point", "coordinates" : [150.9,97.8] } 49 | ] 50 | }""" 51 | 52 | pprint.pprint(j) 53 | json_j = loads(j) 54 | pprint.pprint(json_j) 55 | d = edict(json_j) 56 | print(d.Buffer) 57 | print(d.List1[2].type) 58 | ####################### 输出 59 | ('{\n' 60 | ' "Buffer": 12,\n' 61 | ' "List1": [\n' 62 | ' {"type" : "point", "coordinates" : [100.1,54.9] },\n' 63 | ' {"type" : "point", "coordinates" : [109.4,65.1] },\n' 64 | ' {"type" : "point", "coordinates" : [115.2,80.2] },\n' 65 | ' {"type" : "point", "coordinates" : [150.9,97.8] }\n' 66 | ' ]\n' 67 | ' }') 68 | {'Buffer': 12, 69 | 'List1': [{'coordinates': [100.1, 54.9], 'type': 'point'}, 70 | {'coordinates': [109.4, 65.1], 'type': 'point'}, 71 | {'coordinates': [115.2, 80.2], 'type': 'point'}, 72 | {'coordinates': [150.9, 97.8], 'type': 'point'}]} 73 | 12 74 | point 75 | ``` -------------------------------------------------------------------------------- /docs/cplusplus/advanced/class/类定义.md: -------------------------------------------------------------------------------- 1 | 2 | # 类定义 3 | 4 | 参考:[class (C++)](https://docs.microsoft.com/en-us/cpp/cpp/class-cpp?view=vs-2019) 5 | 6 | `class`关键字用于声明类类型或定义类类型的对象。基本语法如下: 7 | 8 | ``` 9 | [template-spec] 10 | class [tag [: base-list ]] 11 | { 12 | member-list 13 | } [declarators]; 14 | [ class ] tag declarators; 15 | ``` 16 | 17 | ## 参数解析 18 | 19 | * `template-spec`:模板规范(可选) 20 | * `class`:关键字 21 | * `tag`:给定类名。类名将成为类范围内的保留字。如果不给定类名,则定义一个匿名类 22 | * `base-list`:继承的类或结构体列表 23 | * `member-list`:类成员列表 24 | * `declarators`:声明符列表,指定类类型的一个或多个实例名。如果类的所有数据成员都是公共的,则声明符可以包括初始值设定项列表。这在数据成员默认为公共的结构体中比在类中更常见 25 | 26 | ## 示例 27 | 28 | * 定义类`dog`,数据成员设置为私有,成员函数设置为公有内联,并设置函数`setEars`为`virtual` 29 | * 定义类`breed`,继承自`dog`,重写`virtual`函数`setEars` 30 | 31 | ``` 32 | #include 33 | #include 34 | #define TRUE = 1 35 | using namespace std; 36 | 37 | class dog 38 | { 39 | public: 40 | dog() 41 | { 42 | _legs = 4; 43 | _bark = true; 44 | } 45 | 46 | void setDogSize(string dogSize) 47 | { 48 | _dogSize = dogSize; 49 | } 50 | virtual void setEars(string type) // virtual function 51 | { 52 | _earType = type; 53 | } 54 | 55 | private: 56 | string _dogSize, _earType; 57 | int _legs; 58 | bool _bark; 59 | 60 | }; 61 | 62 | class breed : public dog 63 | { 64 | public: 65 | breed( string color, string size) 66 | { 67 | _color = color; 68 | setDogSize(size); 69 | } 70 | 71 | string getColor() 72 | { 73 | return _color; 74 | } 75 | 76 | // virtual function redefined 77 | void setEars(string length, string type) 78 | { 79 | _earLength = length; 80 | _earType = type; 81 | } 82 | 83 | protected: 84 | string _color, _earLength, _earType; 85 | }; 86 | 87 | int main() 88 | { 89 | dog mongrel; 90 | breed labrador("yellow", "large"); 91 | mongrel.setEars("pointy"); 92 | labrador.setEars("long", "floppy"); 93 | cout << "Cody is a " << labrador.getColor() << " labrador" << endl; 94 | } 95 | ``` -------------------------------------------------------------------------------- /docs/cnn/线性和非线性.md: -------------------------------------------------------------------------------- 1 | 2 | # 线性和非线性 3 | 4 | 文章[Network In Network](https://arxiv.org/abs/1312.4400)提到传统的卷积神经网络更多的是使用线性模型(卷积操作)对数据进行抽象,所以针对非线性数据没有很好的拟合效果 5 | 6 | 理清有关线性和非线性的相关内容 7 | 8 | ## 什么是线性函数/非线性函数 9 | 10 | 参考: 11 | 12 | [线性函数](https://baike.baidu.com/item/%E7%BA%BF%E6%80%A7%E5%87%BD%E6%95%B0/1085447?fr=aladdin) 13 | 14 | [非线性函数](https://baike.baidu.com/item/%E9%9D%9E%E7%BA%BF%E6%80%A7%E5%87%BD%E6%95%B0/16029251?fr=aladdin) 15 | 16 | [线性变换](https://baike.baidu.com/item/%E7%BA%BF%E6%80%A7%E5%8F%98%E6%8D%A2/5904192?fr=aladdin) 17 | 18 | [非线性模型](https://baike.baidu.com/item/%E9%9D%9E%E7%BA%BF%E6%80%A7%E6%A8%A1%E5%9E%8B/10463547?fr=aladdin) 19 | 20 | 在初级数学与解析几何中,线性函数(`linear function`)指仅包含单个变量的一阶多项式 21 | 22 | $$y=ax+b$$ 23 | 24 | 非线性函数(`nonlinear function`)指其他类型函数(不是一条直线),包括指数函数、幂函数、对数函数和二阶及以上多项式函数等等 25 | 26 | $$ 27 | Y_{i}=f(X_{1},X_{2},...,X_{m};a_{1},a_{2},...,a_{n})+\mu 28 | $$ 29 | 30 | *其中函数f是非线性函数,$\mu$是扰动项,变量x和参数a的个数不一定一致* 31 | 32 | 在高等数学和线性代数中,线性函数指的是线性映射(`linear mapping`),指通过加法和数量乘法运算,能够实现向量空间V到向量空间W的映射;非线性函数指的是不满足线性条件的变换 33 | 34 | ## 什么是线性/非线性 35 | 36 | 参考:[线性与非线性](https://baike.baidu.com/item/%E7%BA%BF%E6%80%A7%E4%B8%8E%E9%9D%9E%E7%BA%BF%E6%80%A7/22412673?fr=aladdin) 37 | 38 | **线性和非线性是用来描述不同因素相互作用的特性** 39 | 40 | 如果不同因素的组合作用仅是各个因素的简单叠加,那么这种作用是线性的 41 | 42 | 如果某些因素的变化会带来无法衡量的结果,那么这中作用就是非线性的 43 | 44 | 所以区分线性和非线性的关键在于**其叠加性是否有效** 45 | 46 | ## 什么是线性模型/非线性模型 47 | 48 | 参考: 49 | 50 | [机器学习中线性模型和非线性的区别](https://blog.csdn.net/wbcnb/article/details/78306970) 51 | 52 | [决策边界](https://baike.baidu.com/item/%E5%86%B3%E7%AD%96%E8%BE%B9%E7%95%8C/22778546?fr=aladdin) 53 | 54 | [线性模型](https://baike.baidu.com/item/%E7%BA%BF%E6%80%A7%E6%A8%A1%E5%9E%8B/9857200?fr=aladdin) 55 | 56 | 如果独立变量仅被单个参数影响,那么该模型是线性的;否则就是非线性的 57 | 58 | * 线性模型同样有可能包含多个独立变量,只要每个独立变量仅被单个参数(矩阵)影响即可 59 | * 线性模型的决策边界是超平面的,比如在二维平面上,用一条直线就可以区分两个子集 60 | 61 | ## 卷积神经网络 62 | 63 | 卷积神经网络一般包括卷积操作、激活操作、池化操作、正则化操作等等 64 | 65 | 其中卷积操作就是将输入层数据和卷积核进行点积操作,是线性操作 66 | 67 | 而激活操作、池化操作和正则化操作都是非线性操作 68 | 69 | 新发展的卷积神经网络结构和函数都是关于增加非线性操作的,比如随机失火,全局平均池化,`Inception`结构等等,其目的就是提高网络对于非线性数据的抽象能力,从而增强泛化能力 --------------------------------------------------------------------------------