├── hello-world
└── HelloWorld.py
├── python-spider
├── first_scrapy
│ ├── first_scrapy
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── items.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── settings.cpython-37.pyc
│ │ │ ├── middlewares.cpython-37.pyc
│ │ │ └── pipelines.cpython-37.pyc
│ │ ├── spiders
│ │ │ ├── __pycache__
│ │ │ │ ├── __init__.cpython-37.pyc
│ │ │ │ ├── quotes.cpython-37.pyc
│ │ │ │ ├── HttpbinSpider.cpython-37.pyc
│ │ │ │ └── MzituSpider.cpython-37.pyc
│ │ │ ├── __init__.py
│ │ │ ├── HttpbinSpider.py
│ │ │ ├── quotes.py
│ │ │ └── MzituSpider.py
│ │ ├── others
│ │ │ └── selectorsdemo.py
│ │ └── items.py
│ ├── scrapy.cfg
│ └── .gitignore
├── scrapy_splash_demo
│ ├── scrapy_splash_demo
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ └── settings.cpython-37.pyc
│ │ ├── spiders
│ │ │ ├── __pycache__
│ │ │ │ ├── jd.cpython-37.pyc
│ │ │ │ └── __init__.cpython-37.pyc
│ │ │ ├── __init__.py
│ │ │ └── jd.py
│ │ ├── pipelines.py
│ │ └── items.py
│ ├── scrapy.cfg
│ └── .gitignore
├── scrapy_selenium_demo
│ ├── scrapy_selenium_demo
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── items.cpython-37.pyc
│ │ │ ├── __init__.cpython-37.pyc
│ │ │ ├── pipelines.cpython-37.pyc
│ │ │ ├── settings.cpython-37.pyc
│ │ │ └── middlewares.cpython-37.pyc
│ │ ├── spiders
│ │ │ ├── __pycache__
│ │ │ │ ├── jd.cpython-37.pyc
│ │ │ │ └── __init__.cpython-37.pyc
│ │ │ └── __init__.py
│ │ ├── items.py
│ │ └── pipelines.py
│ ├── scrapy.cfg
│ └── .gitignore
├── splash-demo
│ ├── jd.png
│ ├── .gitignore
│ ├── execute_demo.py
│ └── demo.py
├── mafengwo_demo
│ ├── mfw.xlsx
│ └── .gitignore
├── requests-demo
│ ├── baidu_logo.png
│ ├── time_demo.py
│ ├── response_demo.py
│ ├── session_demo.py
│ ├── proxy_demo.py
│ ├── post_demo.py
│ ├── .gitignore
│ └── get_demo.py
├── proxy-pool
│ ├── __pycache__
│ │ ├── MysqlClient.cpython-37.pyc
│ │ └── VerifyProxy.cpython-37.pyc
│ └── .gitignore
├── selenium-demo
│ ├── execute_script_demo.py
│ ├── implicit_waits_demo.py
│ ├── back_forward_demo.py
│ ├── interaction_demo.py
│ ├── cookies_demo.py
│ ├── Demo2.py
│ ├── .gitignore
│ ├── explicit_waits_demo.py
│ ├── get_data_demo.py
│ └── Demo1.py
├── aiohttp-demo
│ ├── request-demo.py
│ ├── .gitignore
│ ├── aio-demo.py
│ └── aio-basic-demo.py
├── proxy-set-demo
│ ├── selenium_proxy_demo.py
│ ├── requests_proxy_demo.py
│ ├── urllib_proxy_demo.py
│ └── .gitignore
├── bs4-demo
│ ├── .gitignore
│ ├── bs4_demo.py
│ └── bs4_demo1.py
├── douyin
│ ├── .gitignore
│ └── demo-video.py
├── douban-2019
│ └── .gitignore
├── gupiao-demo
│ └── .gitignore
├── pyquery-demo
│ └── .gitignore
├── urllib-request
│ ├── .gitignore
│ ├── Demo_Robotparser.py
│ ├── cookies.txt
│ ├── cookies_mozilla.txt
│ ├── Demo_Request.py
│ ├── cookies_lwp.txt
│ └── Demo_Error.py
├── xpath-demo
│ ├── .gitignore
│ ├── xpath_demo.py
│ └── xpath_demo_advanced.py
├── jd-spider-demo
│ └── .gitignore
├── lianjia-spider
│ └── .gitignore
├── urllib-spider-mzitu
│ └── .gitignore
└── database-docker-conf
│ └── mysql
│ └── my.cnf
├── myqr-demo
├── 1.gif
├── 3.gif
├── .gitignore
└── demo.py
├── base-excel
├── wx.jpg
├── demo.xlsx
├── test.xlsx
├── .gitignore
└── Demo1.py
├── python-opencv
├── blog9-open
│ ├── demo.png
│ ├── demo_noise_black.jpg
│ ├── demo_noise_white.jpg
│ ├── .gitignore
│ ├── demo-gradient-1.py
│ ├── demo-open-1.py
│ ├── demo-close-1.py
│ ├── demo-open.py
│ ├── demo-close.py
│ ├── demo-gradient.py
│ └── demo-noise.py
├── blog1-start
│ ├── demo.jpg
│ ├── maliao.jpg
│ ├── demo2.py
│ ├── demo1.py
│ └── .gitignore
├── blog7-blur
│ ├── maliao.jpg
│ ├── maliao_noise.jpg
│ ├── demo-noise.py
│ ├── .gitignore
│ ├── demo-medianblur.py
│ ├── demo-bilateralfilter.py
│ ├── demo-gaussianblur.py
│ ├── demo-boxfilter.py
│ ├── demo-filter2D.py
│ └── demo-blur.py
├── blog8-erode
│ ├── demo.png
│ ├── test.png
│ ├── quanjiafu.png
│ ├── demo-dilate.py
│ ├── demo-erode.py
│ ├── .gitignore
│ └── quanjiafu.py
├── blog11-canny
│ ├── maliao.jpg
│ ├── edge_result1.png
│ └── canny.py
├── blog12-sobel
│ ├── maliao.jpg
│ ├── demo-laplacian.py
│ ├── demo-sobel.py
│ ├── demo-roberts.py
│ └── demo-prewitt.py
├── blog13-scharr
│ ├── maliao.jpg
│ ├── demo-log.py
│ ├── demo-scharr.py
│ └── demo-summary.py
├── blog2-pixel
│ ├── maliao.jpg
│ ├── demo7.py
│ ├── demo4.py
│ ├── demo1.py
│ ├── demo6.py
│ ├── demo5.py
│ ├── demo8.py
│ ├── demo2.py
│ ├── .gitignore
│ └── demo3.py
├── blog4-calculate
│ ├── rain.jpg
│ ├── maliao.jpg
│ ├── demo-flag.py
│ ├── demo-cvt.py
│ ├── demo-addWeighted.py
│ ├── demo-add.py
│ └── .gitignore
├── blog5-resize
│ ├── flip_1.png
│ ├── maliao.jpg
│ ├── demo-resize.py
│ ├── demo-resize-fxfy.py
│ ├── demo-resize-scale.py
│ ├── demo-matrix2D.py
│ ├── demo-warpAffine.py
│ ├── .gitignore
│ └── demo-flip.py
├── blog14-pyramid
│ ├── maliao.jpg
│ └── demo-pyramid.py
├── blog15-contours
│ ├── black.png
│ ├── convex.png
│ ├── number.png
│ ├── findContours.py
│ ├── drawContours.py
│ ├── moments.py
│ ├── convex.py
│ ├── rect.py
│ ├── minEnclosingCircle.py
│ └── approx.py
├── blog16-histogram
│ ├── dahai.jpg
│ ├── bgr_equ.png
│ ├── grey_equ.png
│ ├── maliao.jpg
│ ├── tiankong.jpg
│ ├── xueshan.jpg
│ ├── clahe_src.jpg
│ ├── clahe_result.jpg
│ ├── matplotlib-hist.py
│ ├── clahe.py
│ ├── bgr-hist.py
│ ├── calcHist.py
│ └── equalize.py
├── blog3-attribute
│ ├── maliao.jpg
│ ├── demo2-size.py
│ ├── demo1-shape.py
│ ├── demo3-dtype.py
│ ├── demo7-merge.py
│ ├── demo4-roi.py
│ ├── demo5-roi.py
│ ├── demo6-split.py
│ ├── demo8-merge.py
│ └── .gitignore
├── blog6-threshold
│ ├── maliao.jpg
│ ├── demo-trunc.py
│ ├── demo-binary.py
│ ├── demo-tozero.py
│ ├── demo-binary-inv.py
│ ├── demo-tozero-inv.py
│ ├── .gitignore
│ └── demo-quanjiafu.py
├── bilateral-filter
│ ├── zhaopian.jpg
│ ├── demo.py
│ └── .gitignore
└── blog10-hat
│ ├── demo_noise_black.jpg
│ ├── demo_noise_white.jpg
│ ├── .gitignore
│ ├── demo-tophat-1.py
│ ├── demo-blackhat-1.py
│ ├── demo-tophat.py
│ └── demo-blackhat.py
├── python-data-analysis
├── pyecharts
│ ├── bar.png
│ ├── Gauge_demo.py
│ ├── Liquid_base_demo.py
│ ├── .gitignore
│ ├── Graph_base_demo.py
│ ├── Calendar_base_demo.py
│ └── Line3d_autorotate_demo.py
├── bilibili
│ ├── wordcloud.png
│ ├── .gitignore
│ └── houlang.py
├── mojito
│ ├── wordcloud.png
│ ├── emoji-demo.py
│ ├── .gitignore
│ ├── barrage-spder.py
│ └── damu-wordcloud.py
├── pandas-demo
│ ├── demo.xlsx
│ ├── epidemic_dxy.xlsx
│ ├── result_data.xlsx
│ ├── table_join_exp.xlsx
│ ├── epidemic_history.xlsx
│ ├── GetEpidemicDxy.py
│ ├── GetEpidemicHistory.py
│ ├── .gitignore
│ ├── PivotTableDemo.py
│ ├── DataPre.py
│ ├── ExportDemo.py
│ ├── GroupByDemo.py
│ ├── TableJoinDemo.py
│ ├── DataOperation.py
│ ├── demo1.py
│ └── demo2.py
├── dingtoujijin
│ ├── 110020.txt
│ └── .gitignore
├── matplotlib
│ ├── barh_demo.png
│ ├── pie_demo.png
│ ├── pie_demo1.png
│ ├── plot_demo.png
│ ├── bar_demo_1.png
│ ├── bar_demo_2.png
│ ├── bar_demo_3.png
│ ├── imshow_demo.png
│ ├── polar_demo.png
│ ├── scatter_demo.png
│ ├── scatter_demo1.png
│ ├── stackplot_demo.png
│ ├── ImshowDemo.py
│ ├── ScatterDemo.py
│ ├── .gitignore
│ ├── StackplotDemo.py
│ ├── BarhDemo.py
│ ├── PlotDemo.py
│ ├── ScatterDemo1.py
│ ├── BarDemo1.py
│ ├── BarDemo2.py
│ ├── BarDemo3.py
│ ├── XYDemo.py
│ ├── FirstMatplotlibDemo.py
│ ├── PieDemo.py
│ ├── PieDemo1.py
│ └── PolarDemo.py
├── dynamic_yiqing
│ ├── demo.py
│ └── .gitignore
├── 2019-nCoV-global
│ ├── __pycache__
│ │ └── namemap.cpython-37.pyc
│ └── .gitignore
├── series
│ ├── .gitignore
│ └── demo1.py
├── dataframe
│ └── .gitignore
└── pyecharts_map
│ ├── .gitignore
│ ├── ChinaMap.py
│ ├── GlobeMap.py
│ └── ShanghaiMap.py
├── spider-blog
├── tongji
│ ├── .mvn
│ │ └── wrapper
│ │ │ ├── maven-wrapper.jar
│ │ │ └── maven-wrapper.properties
│ ├── src
│ │ ├── test
│ │ │ └── java
│ │ │ │ └── com
│ │ │ │ └── geekdigging
│ │ │ │ └── tongji
│ │ │ │ └── TongjiApplicationTests.java
│ │ └── main
│ │ │ ├── java
│ │ │ └── com
│ │ │ │ └── geekdigging
│ │ │ │ └── tongji
│ │ │ │ ├── TongjiApplication.java
│ │ │ │ ├── model
│ │ │ │ └── SpiderDataModel.java
│ │ │ │ └── mapper
│ │ │ │ └── SpiderDataMapper.java
│ │ │ └── resources
│ │ │ ├── mybatis
│ │ │ └── mybatis-config.xml
│ │ │ └── application.yml
│ └── .gitignore
├── README.md
├── .gitignore
├── spider_data.sql
└── 报表sql.sql
├── data_structure
├── QueueTest.py
├── .gitignore
├── Stack.py
└── StackNode.py
├── .gitignore
├── base-data-def
├── .gitignore
├── Demo1.py
└── Demo.py
├── base-data-set
├── .gitignore
├── Demo.py
└── Demo1.py
├── base-data-str
├── .gitignore
└── Demo.py
├── base-except
├── .gitignore
└── Demo.py
├── base-file
├── .gitignore
└── Demo.py
├── base-iter
├── .gitignore
└── Demo.py
├── base-operator
├── .gitignore
└── Demo-1.py
├── base-process
├── .gitignore
├── Demo1.py
└── Demo.py
├── base-time
├── .gitignore
└── Demo.py
├── base-variable
├── .gitignore
└── Demo.py
├── base-data-dict
├── .gitignore
├── Demo.py
└── Demo1.py
├── base-data-list
├── .gitignore
├── Demo.py
└── Demo1.py
├── base-data-tuple
├── .gitignore
└── Demo.py
├── base-generator
├── .gitignore
└── Demo.py
├── python-lottery
└── .gitignore
├── base-data-number
└── Demo.py
└── LICENSE
/hello-world/HelloWorld.py:
--------------------------------------------------------------------------------
1 | print('Hello World')
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/myqr-demo/1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/myqr-demo/1.gif
--------------------------------------------------------------------------------
/myqr-demo/3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/myqr-demo/3.gif
--------------------------------------------------------------------------------
/base-excel/wx.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/base-excel/wx.jpg
--------------------------------------------------------------------------------
/base-excel/demo.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/base-excel/demo.xlsx
--------------------------------------------------------------------------------
/base-excel/test.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/base-excel/test.xlsx
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog9-open/demo.png
--------------------------------------------------------------------------------
/python-spider/splash-demo/jd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/splash-demo/jd.png
--------------------------------------------------------------------------------
/python-opencv/blog1-start/demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog1-start/demo.jpg
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog7-blur/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog8-erode/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog8-erode/demo.png
--------------------------------------------------------------------------------
/python-opencv/blog8-erode/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog8-erode/test.png
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts/bar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/pyecharts/bar.png
--------------------------------------------------------------------------------
/python-opencv/blog1-start/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog1-start/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog11-canny/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog11-canny/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog12-sobel/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog12-sobel/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog13-scharr/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog13-scharr/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog2-pixel/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog4-calculate/rain.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog4-calculate/rain.jpg
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/flip_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog5-resize/flip_1.png
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog5-resize/maliao.jpg
--------------------------------------------------------------------------------
/python-spider/mafengwo_demo/mfw.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/mafengwo_demo/mfw.xlsx
--------------------------------------------------------------------------------
/python-opencv/blog14-pyramid/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog14-pyramid/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog15-contours/black.png
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/convex.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog15-contours/convex.png
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/number.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog15-contours/number.png
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/dahai.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog16-histogram/dahai.jpg
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog3-attribute/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog4-calculate/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog4-calculate/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog6-threshold/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog6-threshold/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog8-erode/quanjiafu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog8-erode/quanjiafu.png
--------------------------------------------------------------------------------
/python-data-analysis/bilibili/wordcloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/bilibili/wordcloud.png
--------------------------------------------------------------------------------
/python-data-analysis/mojito/wordcloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/mojito/wordcloud.png
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/demo.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/pandas-demo/demo.xlsx
--------------------------------------------------------------------------------
/python-opencv/bilateral-filter/zhaopian.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/bilateral-filter/zhaopian.jpg
--------------------------------------------------------------------------------
/python-opencv/blog11-canny/edge_result1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog11-canny/edge_result1.png
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/bgr_equ.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog16-histogram/bgr_equ.png
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/grey_equ.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog16-histogram/grey_equ.png
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/maliao.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog16-histogram/maliao.jpg
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/tiankong.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog16-histogram/tiankong.jpg
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/xueshan.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog16-histogram/xueshan.jpg
--------------------------------------------------------------------------------
/python-opencv/blog4-calculate/demo-flag.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | flags = [i for i in dir(cv) if i.startswith('COLOR_')]
4 |
5 | print(flags)
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/maliao_noise.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog7-blur/maliao_noise.jpg
--------------------------------------------------------------------------------
/python-spider/requests-demo/baidu_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/requests-demo/baidu_logo.png
--------------------------------------------------------------------------------
/python-data-analysis/dingtoujijin/110020.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/dingtoujijin/110020.txt
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/barh_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/barh_demo.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/pie_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/pie_demo.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/pie_demo1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/pie_demo1.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/plot_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/plot_demo.png
--------------------------------------------------------------------------------
/python-opencv/blog10-hat/demo_noise_black.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog10-hat/demo_noise_black.jpg
--------------------------------------------------------------------------------
/python-opencv/blog10-hat/demo_noise_white.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog10-hat/demo_noise_white.jpg
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/clahe_src.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog16-histogram/clahe_src.jpg
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo_noise_black.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog9-open/demo_noise_black.jpg
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo_noise_white.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog9-open/demo_noise_white.jpg
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/bar_demo_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/bar_demo_1.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/bar_demo_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/bar_demo_2.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/bar_demo_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/bar_demo_3.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/imshow_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/imshow_demo.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/polar_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/polar_demo.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/scatter_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/scatter_demo.png
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/clahe_result.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-opencv/blog16-histogram/clahe_result.jpg
--------------------------------------------------------------------------------
/python-spider/requests-demo/time_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | r = requests.get("https://www.geekdigging.com/", timeout = 1)
4 | print(r.status_code)
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/scatter_demo1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/scatter_demo1.png
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/stackplot_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/matplotlib/stackplot_demo.png
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/epidemic_dxy.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/pandas-demo/epidemic_dxy.xlsx
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/result_data.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/pandas-demo/result_data.xlsx
--------------------------------------------------------------------------------
/spider-blog/tongji/.mvn/wrapper/maven-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/spider-blog/tongji/.mvn/wrapper/maven-wrapper.jar
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/table_join_exp.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/pandas-demo/table_join_exp.xlsx
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/epidemic_history.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/pandas-demo/epidemic_history.xlsx
--------------------------------------------------------------------------------
/spider-blog/README.md:
--------------------------------------------------------------------------------
1 | `报表sql.sql` 提供的脚本仅供参考,如有不对的地方欢迎指出,可直接联系笔者或者在公众号中留言。
2 |
3 | 报表展示笔者这里有时间会逐步完善它,后端可以使用使用 Java 也可以使用 Python 提供 REST API ,前端页面取到参数直接使用 echart 等图标插件展示出来即可。
--------------------------------------------------------------------------------
/spider-blog/tongji/.mvn/wrapper/maven-wrapper.properties:
--------------------------------------------------------------------------------
1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip
2 |
--------------------------------------------------------------------------------
/python-data-analysis/dynamic_yiqing/demo.py:
--------------------------------------------------------------------------------
1 | import akshare as ak
2 | covid_19_history_df = ak.covid_19_history()
3 | covid_19_history_df.to_csv('data.csv')
4 | print(covid_19_history_df)
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/GetEpidemicDxy.py:
--------------------------------------------------------------------------------
1 | import akshare as ak
2 |
3 | epidemic_dxy_df = ak.epidemic_dxy(indicator="global")
4 | epidemic_dxy_df.to_excel('epidemic_dxy.xlsx')
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/GetEpidemicHistory.py:
--------------------------------------------------------------------------------
1 | import akshare as ak
2 |
3 | epidemic_history_df = ak.epidemic_history()
4 | epidemic_history_df.to_excel('epidemic_history.xlsx')
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/demo7.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | from matplotlib import pyplot as plt
3 |
4 | img=cv.imread('maliao.jpg', cv.IMREAD_COLOR)
5 | plt.imshow(img)
6 | plt.show()
--------------------------------------------------------------------------------
/python-spider/proxy-pool/__pycache__/MysqlClient.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/proxy-pool/__pycache__/MysqlClient.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/proxy-pool/__pycache__/VerifyProxy.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/proxy-pool/__pycache__/VerifyProxy.cpython-37.pyc
--------------------------------------------------------------------------------
/python-data-analysis/2019-nCoV-global/__pycache__/namemap.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-data-analysis/2019-nCoV-global/__pycache__/namemap.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/__pycache__/items.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/__pycache__/items.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/__pycache__/settings.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/__pycache__/settings.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/__pycache__/middlewares.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/__pycache__/middlewares.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/__pycache__/pipelines.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/__pycache__/pipelines.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/spiders/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/spiders/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/spiders/__pycache__/quotes.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/spiders/__pycache__/quotes.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/spiders/__pycache__/HttpbinSpider.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/spiders/__pycache__/HttpbinSpider.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/spiders/__pycache__/MzituSpider.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/first_scrapy/first_scrapy/spiders/__pycache__/MzituSpider.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/items.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/items.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_splash_demo/scrapy_splash_demo/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/__pycache__/settings.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_splash_demo/scrapy_splash_demo/__pycache__/settings.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/spiders/__pycache__/jd.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_splash_demo/scrapy_splash_demo/spiders/__pycache__/jd.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/selenium-demo/execute_script_demo.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 |
3 | driver = webdriver.Chrome()
4 | driver.get('https://www.taobao.com/')
5 | driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
--------------------------------------------------------------------------------
/python-data-analysis/mojito/emoji-demo.py:
--------------------------------------------------------------------------------
1 | import emoji
2 |
3 | with open("dan_mu.txt", encoding="utf-8") as f:
4 | txt = f.read()
5 | danmu_list = txt.split("\n")
6 |
7 | for item in danmu_list:
8 | print(emoji.demojize(item))
9 |
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/pipelines.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/pipelines.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/settings.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/settings.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/spiders/__pycache__/jd.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/spiders/__pycache__/jd.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/middlewares.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/__pycache__/middlewares.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/spiders/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_splash_demo/scrapy_splash_demo/spiders/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/demo4.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | color_img = cv.imread("maliao.jpg", cv.IMREAD_COLOR)
4 | color_img[50:100, 50:100] = [255, 255, 255]
5 |
6 | cv.imshow("color_img", color_img)
7 | cv.waitKey()
8 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/spiders/__pycache__/__init__.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteor1993/python-learning/HEAD/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/spiders/__pycache__/__init__.cpython-37.pyc
--------------------------------------------------------------------------------
/python-spider/selenium-demo/implicit_waits_demo.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 |
3 | driver = webdriver.Chrome()
4 |
5 | driver.implicitly_wait(10) # seconds
6 | driver.get("https://www.jd.com/")
7 | key = driver.find_element_by_id("key")
8 |
9 | print(key)
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/ImshowDemo.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 | x = np.random.rand(10, 10)
5 | plt.imshow(x, cmap=plt.cm.hot)
6 |
7 | # 显示右边颜色条
8 | plt.colorbar()
9 |
10 | plt.savefig('imshow_demo.png')
11 |
--------------------------------------------------------------------------------
/python-opencv/blog1-start/demo2.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取图片
4 | img = cv.imread("maliao.jpg", 1)
5 |
6 | # 显示图片
7 | cv.imshow("demo", img)
8 |
9 | # 等待输入
10 | cv.waitKey(0)
11 | cv.destroyAllWindows()
12 |
13 | # 图片写入
14 | cv.imwrite("demo.jpg", img)
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/others/selectorsdemo.py:
--------------------------------------------------------------------------------
1 | from scrapy import Selector
2 |
3 | body = '
Hello Python'
4 | selector = Selector(text=body)
5 | title = selector.xpath('//title/text()').extract_first()
6 | print(title)
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/matplotlib-hist.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | img = cv.imread("maliao.jpg")
5 |
6 | cv.imshow("img", img)
7 | cv.waitKey(0)
8 | cv.destroyAllWindows()
9 |
10 | plt.hist(img.ravel(), 256, [0, 256])
11 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/demo1.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 灰度图像读取
4 | gray_img = cv.imread("maliao.jpg", cv.IMREAD_GRAYSCALE)
5 | print(gray_img[20, 30])
6 |
7 | # 显示图片
8 | cv.imshow("gray_img", gray_img)
9 |
10 | # 等待输入
11 | cv.waitKey()
12 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/demo2-size.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取彩色图片
4 | color_img = cv.imread("maliao.jpg", cv.IMREAD_ANYCOLOR)
5 |
6 | print(color_img.size)
7 |
8 | # 读取灰度图片
9 | gray_img = cv.imread("maliao.jpg", cv.IMREAD_GRAYSCALE)
10 |
11 | print(gray_img.size)
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/demo1-shape.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取彩色图片
4 | color_img = cv.imread("maliao.jpg", cv.IMREAD_ANYCOLOR)
5 |
6 | print(color_img.shape)
7 |
8 | # 读取灰度图片
9 | gray_img = cv.imread("maliao.jpg", cv.IMREAD_GRAYSCALE)
10 |
11 | print(gray_img.shape)
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/demo3-dtype.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取彩色图片
4 | color_img = cv.imread("maliao.jpg", cv.IMREAD_ANYCOLOR)
5 |
6 | print(color_img.dtype)
7 |
8 | # 读取灰度图片
9 | gray_img = cv.imread("maliao.jpg", cv.IMREAD_GRAYSCALE)
10 |
11 | print(gray_img.dtype)
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts/Gauge_demo.py:
--------------------------------------------------------------------------------
1 | from pyecharts import options as opts
2 | from pyecharts.charts import Gauge
3 |
4 | c = (
5 | Gauge()
6 | .add("", [("完成率", 80)])
7 | .set_global_opts(title_opts=opts.TitleOpts(title="Gauge-基本示例"))
8 | .render("gauge_base.html")
9 | )
--------------------------------------------------------------------------------
/python-opencv/bilateral-filter/demo.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | source = cv.imread("zhaopian.jpg")
4 | dst = cv.bilateralFilter(src=source, d=-1, sigmaColor=30, sigmaSpace=15)
5 |
6 | cv.imshow("source", source)
7 | cv.imshow("dst", dst)
8 |
9 | cv.waitKey()
10 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/aiohttp-demo/request-demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from datetime import datetime
3 |
4 | start = datetime.now()
5 |
6 | for i in range(100):
7 | print(requests.get('https://www.baidu.com/').text)
8 |
9 | end = datetime.now()
10 |
11 | print("request花费时间为:", end - start)
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts/Liquid_base_demo.py:
--------------------------------------------------------------------------------
1 | from pyecharts import options as opts
2 | from pyecharts.charts import Liquid
3 |
4 | c = (
5 | Liquid()
6 | .add("lq", [0.6, 0.7])
7 | .set_global_opts(title_opts=opts.TitleOpts(title="Liquid-基本示例"))
8 | .render("liquid_base.html")
9 | )
--------------------------------------------------------------------------------
/data_structure/QueueTest.py:
--------------------------------------------------------------------------------
1 | import queue
2 |
3 | q1 = queue.Queue(maxsize=5)
4 | q2 = queue.LifoQueue(maxsize=5)
5 |
6 | for i in range(5):
7 | q1.put(i)
8 | q2.put(i)
9 |
10 | while not q1.empty():
11 | print('q1:',q1.get())
12 |
13 | while not q2.empty():
14 | print('q2:',q2.get())
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/demo7-merge.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
4 |
5 | # 拆分通道
6 | b, g, r = cv.split(img)
7 |
8 | # 合并图像通道
9 | m = cv.merge([b, g, r])
10 |
11 | cv.imshow('merge', m)
12 |
13 | # 等待显示
14 | cv.waitKey(0)
15 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/demo4-roi.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
4 |
5 | face = img[10:175, 100:260]
6 |
7 | # 原始图像显示
8 | cv.imshow("demo", img)
9 |
10 | # 马里奥的脸显示
11 | cv.imshow("face", face)
12 |
13 | #等待显示
14 | cv.waitKey(0)
15 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/demo6.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取彩色图像
4 | color_img = cv.imread("maliao.jpg", cv.IMREAD_COLOR)
5 |
6 | print(color_img[20, 30])
7 |
8 | color_img.itemset((20, 30, 0), 255)
9 | color_img.itemset((20, 30, 1), 255)
10 | color_img.itemset((20, 30, 2), 255)
11 |
12 | print(color_img[20, 30])
--------------------------------------------------------------------------------
/python-spider/proxy-set-demo/selenium_proxy_demo.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 |
3 | proxy = '222.95.241.6:3000'
4 | chrome_options = webdriver.ChromeOptions()
5 | chrome_options.add_argument('--proxy-server=https://' + proxy)
6 | driver = webdriver.Chrome(chrome_options=chrome_options)
7 | driver.get('https://httpbin.org/get')
--------------------------------------------------------------------------------
/python-spider/selenium-demo/back_forward_demo.py:
--------------------------------------------------------------------------------
1 | import time
2 | from selenium import webdriver
3 |
4 | browser = webdriver.Chrome()
5 | browser.get('https://www.jd.com/')
6 | browser.get('https://www.taobao.com/')
7 | browser.get('https://www.geekdigging.com/')
8 | browser.back()
9 | time.sleep(1)
10 | browser.forward()
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/demo5-roi.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
4 |
5 | # 获取 ROI 区域
6 | face = img[10:175, 100:260]
7 | # 图像赋值
8 | img[0:165, 0:160] = face
9 |
10 | # 原始图像显示
11 | cv.imshow("demo", img)
12 |
13 | #等待显示
14 | cv.waitKey(0)
15 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/requests-demo/response_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | r = requests.get('https://www.baidu.com')
4 | print(type(r.content), r.content)
5 | print(type(r.status_code), r.status_code)
6 | print(type(r.headers), r.headers)
7 | print(type(r.cookies), r.cookies)
8 | print(type(r.url), r.url)
9 | print(type(r.history), r.history)
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/findContours.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("black.png")
4 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
5 | # 降噪
6 | ret, thresh = cv.threshold(gray_img, 127, 255, 0)
7 | # 寻找轮廓
8 | contours, hierarchy = cv.findContours(gray_img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
9 |
10 | print(len(contours[0]))
--------------------------------------------------------------------------------
/python-opencv/blog4-calculate/demo-cvt.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取图像
4 | img = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
5 |
6 | # 图像类型转换
7 | result = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
8 |
9 | # 图像展示
10 | cv.imshow("img", img)
11 | cv.imshow("result", result)
12 |
13 | # 等待显示
14 | cv.waitKey()
15 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/first_scrapy/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = first_scrapy.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = first_scrapy
12 |
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/demo-resize.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | #读取图片
4 | src = cv.imread('maliao.jpg')
5 | print(src.shape)
6 |
7 | #图像缩放
8 | result = cv.resize(src, (300, 150))
9 | print(result.shape)
10 |
11 | #显示图像
12 | cv.imshow("src", src)
13 | cv.imshow("result", result)
14 |
15 | #等待显示
16 | cv.waitKey()
17 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/spiders/HttpbinSpider.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import scrapy
3 |
4 | class HttpbinSpider(scrapy.Spider):
5 | name = 'httpbin'
6 | allowed_domains = ['httpbin.org']
7 | start_urls = ['https://httpbin.org/get']
8 |
9 | def parse(self, response):
10 | self.logger.debug(response.text)
--------------------------------------------------------------------------------
/spider-blog/tongji/src/test/java/com/geekdigging/tongji/TongjiApplicationTests.java:
--------------------------------------------------------------------------------
1 | package com.geekdigging.tongji;
2 |
3 | import org.junit.jupiter.api.Test;
4 | import org.springframework.boot.test.context.SpringBootTest;
5 |
6 | @SpringBootTest
7 | class TongjiApplicationTests {
8 |
9 | @Test
10 | void contextLoads() {
11 | }
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = scrapy_splash_demo.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = scrapy_splash_demo
12 |
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/demo-resize-fxfy.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | #读取图片
4 | src = cv.imread('maliao.jpg')
5 | print(src.shape)
6 |
7 | #图像缩放
8 | result = cv.resize(src, None, fx=0.5, fy=0.5)
9 | print(result.shape)
10 |
11 | #显示图像
12 | cv.imshow("src", src)
13 | cv.imshow("result", result)
14 |
15 | #等待显示
16 | cv.waitKey()
17 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = scrapy_selenium_demo.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = scrapy_selenium_demo
12 |
--------------------------------------------------------------------------------
/python-spider/requests-demo/session_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | requests.get('https://httpbin.org/cookies/set/number/123456789')
4 | r = requests.get('https://httpbin.org/cookies')
5 | print(r.text)
6 |
7 | # Session 示例代码
8 | s = requests.Session()
9 | s.get('https://httpbin.org/cookies/set/number/123456789')
10 | r = s.get('https://httpbin.org/cookies')
11 | print(r.text)
--------------------------------------------------------------------------------
/python-spider/proxy-set-demo/requests_proxy_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | proxies = {
4 | 'http': 'http://59.52.186.117:9999',
5 | 'https': 'https://222.95.241.6:3000',
6 | }
7 | try:
8 | response = requests.get('https://httpbin.org/get', proxies = proxies)
9 | print(response.text)
10 | except requests.exceptions.ConnectionError as e:
11 | print('Error', e.args)
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/clahe.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 |
4 | img = cv.imread('clahe_src.jpg', 0)
5 |
6 | # 全局直方图均衡
7 | equ = cv.equalizeHist(img)
8 |
9 | # 自适应直方图均衡
10 | clahe = cv.createCLAHE(clipLimit = 2.0, tileGridSize = (8, 8))
11 | cl1 = clahe.apply(img)
12 |
13 | # 水平拼接三张图像
14 | result1 = np.hstack((img, equ, cl1))
15 |
16 | cv.imwrite('clahe_result.jpg', result1)
--------------------------------------------------------------------------------
/python-opencv/blog6-threshold/demo-trunc.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | src = cv.imread("maliao.jpg")
4 |
5 | # BGR 图像转灰度
6 | gray_img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
7 |
8 | # 二值图像处理
9 | r, b = cv.threshold(gray_img, 127, 255, cv.THRESH_TRUNC)
10 |
11 | # 显示图像
12 | cv.imshow("src", src)
13 | cv.imshow("result", b)
14 |
15 | # 等待显示
16 | cv.waitKey(0)
17 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog6-threshold/demo-binary.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | src = cv.imread("maliao.jpg")
4 |
5 | # BGR 图像转灰度
6 | gray_img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
7 |
8 | # 二值图像处理
9 | r, b = cv.threshold(gray_img, 127, 255, cv.THRESH_BINARY)
10 |
11 | # 显示图像
12 | cv.imshow("src", src)
13 | cv.imshow("result", b)
14 |
15 | # 等待显示
16 | cv.waitKey(0)
17 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog6-threshold/demo-tozero.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | src = cv.imread("maliao.jpg")
4 |
5 | # BGR 图像转灰度
6 | gray_img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
7 |
8 | # 二值图像处理
9 | r, b = cv.threshold(gray_img, 127, 255, cv.THRESH_TOZERO)
10 |
11 | # 显示图像
12 | cv.imshow("src", src)
13 | cv.imshow("result", b)
14 |
15 | # 等待显示
16 | cv.waitKey(0)
17 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 |
9 | class ScrapySplashDemoPipeline(object):
10 | def process_item(self, item, spider):
11 | return item
12 |
--------------------------------------------------------------------------------
/python-opencv/blog6-threshold/demo-binary-inv.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | src = cv.imread("maliao.jpg")
4 |
5 | # BGR 图像转灰度
6 | gray_img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
7 |
8 | # 二值图像处理
9 | r, b = cv.threshold(gray_img, 127, 255, cv.THRESH_BINARY_INV)
10 |
11 | # 显示图像
12 | cv.imshow("src", src)
13 | cv.imshow("result", b)
14 |
15 | # 等待显示
16 | cv.waitKey(0)
17 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog6-threshold/demo-tozero-inv.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | src = cv.imread("maliao.jpg")
4 |
5 | # BGR 图像转灰度
6 | gray_img = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
7 |
8 | # 二值图像处理
9 | r, b = cv.threshold(gray_img, 127, 255, cv.THRESH_TOZERO_INV)
10 |
11 | # 显示图像
12 | cv.imshow("src", src)
13 | cv.imshow("result", b)
14 |
15 | # 等待显示
16 | cv.waitKey(0)
17 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://docs.scrapy.org/en/latest/topics/items.html
7 |
8 | import scrapy
9 |
10 |
11 | class ScrapySplashDemoItem(scrapy.Item):
12 | # define the fields for your item here like:
13 | # name = scrapy.Field()
14 | pass
15 |
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/bgr-hist.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | img = cv.imread("tiankong.jpg")
5 | color = ('b', 'g', 'r')
6 |
7 | cv.imshow("img", img)
8 | cv.waitKey(0)
9 | cv.destroyAllWindows()
10 |
11 | for i, col in enumerate(color):
12 | histr = cv.calcHist([img], [i], None, [256], [0, 256])
13 | plt.plot(histr, color = col)
14 | plt.xlim([0, 256])
15 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/demo5.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取灰度图像
4 | gray_img = cv.imread("maliao.jpg", cv.IMREAD_GRAYSCALE)
5 | print(gray_img.item(20, 30))
6 |
7 | # 读取彩色图像
8 | color_img = cv.imread("maliao.jpg", cv.IMREAD_COLOR)
9 |
10 | blue = color_img.item(20, 30, 0)
11 | print(blue)
12 |
13 | green = color_img.item(20, 30, 1)
14 | print(green)
15 |
16 | red = color_img.item(20, 30, 2)
17 | print(red)
--------------------------------------------------------------------------------
/python-opencv/blog8-erode/demo-dilate.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 |
4 | # 图像读取
5 | source = cv.imread("demo.png", cv.IMREAD_GRAYSCALE)
6 |
7 | # 设置卷积核
8 | kernel = np.ones((5, 5),np.uint8)
9 |
10 | # 进行图像膨胀,默认迭代 1 次
11 | dst = cv.dilate(source, kernel)
12 |
13 | # 图像显示
14 | cv.imshow("source", source)
15 | cv.imshow("dst", dst)
16 |
17 | # 等待操作
18 | cv.waitKey(0)
19 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/demo6-split.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
4 |
5 | # 拆分通道
6 | b, g, r = cv.split(img)
7 |
8 | # 拆分通道使用 numpy 索引
9 | # b = img[:, :, 0]
10 | # g = img[:, :, 1]
11 | # r = img[:, :, 2]
12 |
13 | # 分别显示三个通道的图像
14 | cv.imshow("B", b)
15 | cv.imshow("G", g)
16 | cv.imshow("R", r)
17 |
18 | # 等待显示
19 | cv.waitKey(0)
20 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog4-calculate/demo-addWeighted.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取图像
4 | img1 = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
5 | img2 = cv.imread("rain.jpg", cv.IMREAD_UNCHANGED)
6 |
7 | # 图像融合
8 | img = cv.addWeighted(img1, 0.4, img2, 0.6, 10)
9 |
10 | # 显示图像
11 | cv.imshow("img1", img1)
12 | cv.imshow("img2", img2)
13 | cv.imshow("img", img)
14 |
15 | # 等待显示
16 | cv.waitKey()
17 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog8-erode/demo-erode.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 |
4 | # 图像读取
5 | source = cv.imread("test.png", cv.IMREAD_GRAYSCALE)
6 |
7 | # 设置卷积核
8 | kernel = np.ones((5, 5),np.uint8)
9 |
10 | # 进行图像腐蚀,默认迭代 1 次
11 | dst = cv.erode(source, kernel, iterations=5)
12 |
13 | # 图像显示
14 | cv.imshow("source", source)
15 | cv.imshow("dst", dst)
16 |
17 | # 等待操作
18 | cv.waitKey(0)
19 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/selenium-demo/interaction_demo.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | import time
3 |
4 | driver = webdriver.Chrome()
5 | driver.implicitly_wait(10)
6 | driver.get('https://www.taobao.com/')
7 | input = driver.find_element_by_id('q')
8 | input.send_keys('IPad')
9 | time.sleep(1)
10 | input.clear()
11 | input.send_keys('Surface Pro')
12 | button = driver.find_element_by_class_name('btn-search')
13 | button.click()
--------------------------------------------------------------------------------
/python-opencv/blog4-calculate/demo-add.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 读取图像
4 | img = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
5 |
6 | test = img
7 |
8 | # Numpy 加法
9 | result1 = img + test
10 |
11 | # OpenCV 加法
12 | result2 = cv.add(img, test)
13 |
14 | # 显示图像
15 | cv.imshow("img", img)
16 | cv.imshow("result1", result1)
17 | cv.imshow("result2", result2)
18 |
19 | # 等待显示
20 | cv.waitKey()
21 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/demo-resize-scale.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 设定比例
4 | scale = 0.5
5 |
6 | #读取图片
7 | src = cv.imread('maliao.jpg')
8 | rows, cols = src.shape[:2]
9 |
10 | #图像缩放
11 | result = cv.resize(src, ((int(cols * scale), int(rows * scale))))
12 | print(result.shape)
13 |
14 | #显示图像
15 | cv.imshow("src", src)
16 | cv.imshow("result", result)
17 |
18 | #等待显示
19 | cv.waitKey()
20 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog1-start/demo1.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 查看版本信息
4 | print(cv.getVersionString())
5 |
6 | # 读取图像
7 | img = cv.imread("maliao.jpg", cv.IMREAD_COLOR)
8 | cv.imshow("read_img", img)
9 | # 灰度图像
10 | img_gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
11 | cv.imshow("gray_img",img_gray)
12 | # 二值图像
13 | ret, binary = cv.threshold(img_gray, 127, 255, cv.THRESH_BINARY)
14 | cv.imshow("binary_img", binary)
15 |
16 | cv.waitKey()
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/demo8.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | from matplotlib import pyplot as plt
3 |
4 | img=cv.imread('maliao.jpg',cv.IMREAD_COLOR)
5 |
6 | # method1
7 | b,g,r=cv.split(img)
8 | img2=cv.merge([r,g,b])
9 | plt.imshow(img2)
10 | plt.show()
11 |
12 | # method2
13 | img3=img[:,:,::-1]
14 | plt.imshow(img3)
15 | plt.show()
16 |
17 | # method3
18 | img4=cv.cvtColor(img, cv.COLOR_BGR2RGB)
19 | plt.imshow(img4)
20 | plt.show()
--------------------------------------------------------------------------------
/python-spider/selenium-demo/cookies_demo.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 |
3 | browser = webdriver.Chrome()
4 | browser.get('https://www.geekdigging.com/')
5 | # 获取 cookies
6 | print(browser.get_cookies())
7 | # 添加一个 cookie
8 | browser.add_cookie({'name': 'name', 'domain': 'www.geekdigging.com', 'value': 'geekdigging'})
9 | print(browser.get_cookies())
10 | # 删除所有 cookie
11 | browser.delete_all_cookies()
12 | print(browser.get_cookies())
--------------------------------------------------------------------------------
/python-spider/selenium-demo/Demo2.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from selenium.webdriver.common.by import By
3 |
4 | browser = webdriver.Chrome()
5 |
6 | browser.get('https://www.jd.com/')
7 | input_key = browser.find_element_by_id('key')
8 | input_key1 = browser.find_element(By.ID, 'key')
9 | print(input_key)
10 | print(input_key1)
11 |
12 | lis = browser.find_elements_by_css_selector('.cate_menu li')
13 | print(lis)
14 | browser.close()
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/demo2.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 彩色图像读取
4 | color_img = cv.imread("maliao.jpg", cv.IMREAD_COLOR)
5 |
6 | print(color_img[20, 30])
7 |
8 | blue = color_img[20, 30, 0]
9 | print(blue)
10 |
11 | green = color_img[20, 30, 1]
12 | print(green)
13 |
14 | red = color_img[20, 30, 2]
15 | print(red)
16 |
17 | # 显示图片
18 | cv.imshow("color_img", color_img)
19 |
20 | # 等待输入
21 | cv.waitKey()
22 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/requests-demo/proxy_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | proxies = {
4 | "http": "http://10.10.1.10:3128",
5 | "https": "http://10.10.1.10:1080",
6 | }
7 |
8 | requests.get("https://www.geekdigging.com/", proxies = proxies)
9 |
10 | proxies_socket = {
11 | 'http': 'socks5://user:pass@host:port',
12 | 'https': 'socks5://user:pass@host:port'
13 | }
14 |
15 | requests.get("https://www.geekdigging.com/", proxies = proxies_socket)
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/demo-matrix2D.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | #读取图片
4 | src = cv.imread('maliao.jpg')
5 |
6 | # 原图的高、宽
7 | rows, cols = src.shape[:2]
8 |
9 | # 绕图像的中心旋转
10 | # 参数:旋转中心 旋转度数 scale
11 | M = cv.getRotationMatrix2D((cols/2, rows/2), 90, 1)
12 | #
13 | dst = cv.warpAffine(src, M, (cols, rows))
14 |
15 | # 显示图像
16 | cv.imshow("src", src)
17 | cv.imshow("dst", dst)
18 |
19 | # 等待显示
20 | cv.waitKey()
21 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/requests-demo/post_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | headers = {
4 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
5 | 'referer': 'https://www.geekdigging.com/'
6 | }
7 |
8 | params = {
9 | 'name': 'geekdigging',
10 | 'age': '18'
11 | }
12 |
13 | r = requests.post('https://httpbin.org/post', data = params, headers = headers)
14 | print(r.text)
--------------------------------------------------------------------------------
/python-opencv/blog11-canny/canny.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | from matplotlib import pyplot as plt
3 |
4 | # 图像读入
5 | img = cv.imread('maliao.jpg', 0)
6 | edges = cv.Canny(img, 100, 200)
7 |
8 | # 显示结果
9 | titles = ['Original Img', 'Edge Img']
10 | images = [img, edges]
11 |
12 | # matplotlib 绘图
13 | for i in range(2):
14 | plt.subplot(1, 2, i+1), plt.imshow(images[i],'gray')
15 | plt.title(titles[i])
16 | plt.xticks([]),plt.yticks([])
17 |
18 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/demo8-merge.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 |
4 | # 读取图片
5 | img = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
6 | rows, cols, chn = img.shape
7 |
8 | # 拆分通道
9 | b = img[:, :, 0]
10 | g = np.zeros((rows,cols), dtype=img.dtype)
11 | r = np.zeros((rows,cols), dtype=img.dtype)
12 |
13 | # 合并图像通道
14 | m = cv.merge([b, g, r])
15 |
16 | cv.imshow('merge', m)
17 |
18 | # 等待显示
19 | cv.waitKey(0)
20 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/demo-warpAffine.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 |
4 | #读取图片
5 | src = cv.imread('maliao.jpg')
6 | rows, cols = src.shape[:2]
7 |
8 | # 定义移动距离
9 | tx = 50
10 | ty = 100
11 |
12 | # 生成 M 矩阵
13 | affine = np.float32([[1, 0, tx], [0, 1, ty]])
14 | dst = cv.warpAffine(src, affine, (cols, rows))
15 |
16 | # 显示图像
17 | cv.imshow('src', src)
18 | cv.imshow("dst", dst)
19 |
20 | # 等待显示
21 | cv.waitKey(0)
22 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/ScatterDemo.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | # 处理中文乱码
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 |
7 | x_data = np.array([2011,2012,2013,2014,2015,2016,2017])
8 | y_data = np.array([58000,60200,63000,71000,84000,90500,107000])
9 |
10 | plt.scatter(x_data, y_data, s = 100, c = 'green', marker='o', edgecolor='black', alpha=0.5, label = '产品销量')
11 |
12 | plt.legend()
13 |
14 | plt.savefig("scatter_demo.png")
--------------------------------------------------------------------------------
/python-spider/proxy-set-demo/urllib_proxy_demo.py:
--------------------------------------------------------------------------------
1 | from urllib.error import URLError
2 | from urllib.request import ProxyHandler, build_opener
3 |
4 | proxy_handler = ProxyHandler({
5 | 'http': 'http://182.34.37.0:9999',
6 | 'https': 'https://117.69.150.84:9999'
7 | })
8 | opener = build_opener(proxy_handler)
9 | try:
10 | response = opener.open('https://httpbin.org/get')
11 | print(response.read().decode('utf-8'))
12 | except URLError as e:
13 | print(e.reason)
--------------------------------------------------------------------------------
/base-data-def/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-data-set/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-data-str/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-excel/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-except/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-file/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-iter/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-operator/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-process/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-time/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-variable/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/myqr-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/spider-blog/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-data-dict/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-data-list/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-data-tuple/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-generator/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/data_structure/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-lottery/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/demo-noise.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 |
4 | # 读取图片
5 | img = cv.imread("maliao.jpg", cv.IMREAD_UNCHANGED)
6 | rows, cols, chn = img.shape
7 |
8 | # 加噪声
9 | for i in range(5000):
10 | x = np.random.randint(0, rows)
11 | y = np.random.randint(0, cols)
12 | img[x, y, :] = 255
13 |
14 | cv.imshow("noise", img)
15 |
16 | # 图像保存
17 | cv.imwrite("maliao_noise.jpg", img)
18 |
19 | # 等待显示
20 | cv.waitKey()
21 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/spider-blog/tongji/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/bs4-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/douyin/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/mojito/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/series/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog1-start/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog10-hat/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog8-erode/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog9-open/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/aiohttp-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/douban-2019/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | *.idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/first_scrapy/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/gupiao-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | *.idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/mafengwo_demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/proxy-pool/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/proxy-set-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/pyquery-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/requests-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/selenium-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | *.idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/splash-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/urllib-request/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/xpath-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/bilibili/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/dataframe/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/dingtoujijin/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/StackplotDemo.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | # 处理中文乱码
4 | plt.rcParams['font.sans-serif']=['SimHei']
5 |
6 | x_data = [2011,2012,2013,2014,2015,2016,2017]
7 | y_data = [58000,60200,63000,71000,84000,90500,107000]
8 | y_data_1 = [78000,80200,93000,101000,64000,70500,87000]
9 |
10 | plt.title(label='xxx 公司 xxx 产品销量')
11 |
12 | plt.stackplot(x_data, y_data, y_data_1, labels=['产品销量', '用户增长数'])
13 |
14 | plt.legend()
15 |
16 | plt.savefig("stackplot_demo.png")
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/bilateral-filter/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog3-attribute/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog4-calculate/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-opencv/blog6-threshold/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/jd-spider-demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | *.idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/lianjia-spider/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | *.idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/splash-demo/execute_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from urllib.parse import quote
3 |
4 | lua = '''
5 | function main(splash, args)
6 | splash:go("https://www.geekdigging.com/")
7 | return {
8 | url = splash:url(),
9 | jpeg = splash:jpeg(),
10 | har = splash:har(),
11 | cookies = splash:get_cookies()
12 | }
13 | end
14 | '''
15 |
16 | url = 'http://localhost:8050/execute?lua_source=' + quote(lua)
17 | response = requests.get(url)
18 | print(response.text)
--------------------------------------------------------------------------------
/python-spider/urllib-spider-mzitu/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/2019-nCoV-global/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/dynamic_yiqing/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts_map/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/.gitignore:
--------------------------------------------------------------------------------
1 | HELP.md
2 | target/
3 | !.mvn/wrapper/maven-wrapper.jar
4 | !**/src/main/**
5 | !**/src/test/**
6 |
7 | ### STS ###
8 | .apt_generated
9 | .classpath
10 | .factorypath
11 | .project
12 | .settings
13 | .springBeans
14 | .sts4-cache
15 |
16 | ### IntelliJ IDEA ###
17 | .idea
18 | *.iws
19 | *.iml
20 | *.ipr
21 |
22 | ### NetBeans ###
23 | /nbproject/private/
24 | /nbbuild/
25 | /dist/
26 | /nbdist/
27 | /.nb-gradle/
28 | build/
29 |
30 | ### VS Code ###
31 | .vscode/
32 |
--------------------------------------------------------------------------------
/base-variable/Demo.py:
--------------------------------------------------------------------------------
1 | # 因 name 未定义,此处直接打印将会报错
2 | # print(name)
3 |
4 | name = "小明"
5 |
6 | print(name)
7 |
8 | name = "小红"
9 |
10 | print(name)
11 |
12 | del name
13 |
14 | # 因上面已经删除了变量 name ,这里再打印将会报错该变量未定义
15 | # print(name)
16 |
17 | # print('123' + 123)
18 |
19 | print('123' + str(123))
20 |
21 | print(int('123') + 123)
22 |
23 | print(123.5 + 123)
24 |
25 | print(int(123.7))
26 |
27 | print(int(123.7 + 0.5))
28 |
29 | print(int(round(123.4)))
30 |
31 | print(int(round(123.5)))
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/drawContours.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("black.png")
4 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
5 | cv.imshow("img", img)
6 | # 降噪
7 | ret, thresh = cv.threshold(gray_img, 127, 255, 0)
8 | # 寻找轮廓
9 | contours, hierarchy = cv.findContours(gray_img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
10 |
11 | print(len(contours[0]))
12 |
13 | # 绘制绿色轮廓
14 | cv.drawContours(img, contours, -1, (0,255,0), 3)
15 |
16 | cv.imshow("draw", img)
17 |
18 | cv.waitKey(0)
19 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://docs.scrapy.org/en/latest/topics/items.html
7 |
8 | import scrapy
9 |
10 |
11 | class ProductItem(scrapy.Item):
12 | collection = 'products'
13 | image = scrapy.Field()
14 | price = scrapy.Field()
15 | name = scrapy.Field()
16 | commit = scrapy.Field()
17 | shop = scrapy.Field()
18 | icons = scrapy.Field()
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/BarhDemo.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | # 处理中文乱码
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 |
7 | x_data = np.array([2011,2012,2013,2014,2015,2016,2017])
8 | y_data = np.array([58000,60200,63000,71000,84000,90500,107000])
9 |
10 | plt.title(label='xxx 公司 xxx 产品销量')
11 |
12 |
13 | plt.barh(x_data, y_data, alpha=0.6, facecolor = 'deeppink', edgecolor = 'deeppink', label='产品销量')
14 |
15 | plt.legend()
16 |
17 | plt.savefig("barh_demo.png")
--------------------------------------------------------------------------------
/python-spider/selenium-demo/explicit_waits_demo.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from selenium.webdriver.common.by import By
3 | from selenium.webdriver.support.ui import WebDriverWait
4 | from selenium.webdriver.support import expected_conditions as EC
5 |
6 | driver = webdriver.Chrome()
7 |
8 | driver.get("https://www.jd.com/")
9 | try:
10 | element = WebDriverWait(driver, 10).until(
11 | EC.presence_of_element_located((By.ID, "key"))
12 | )
13 | print(element)
14 | finally:
15 | driver.quit()
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/calcHist.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | img = cv.imread("xueshan.jpg")
5 | # 参数:原图像 通道[0]-B 掩码 BINS为256 像素范围0-255
6 | histB = cv.calcHist([img], [0], None, [256], [0, 255])
7 | histG = cv.calcHist([img], [1], None, [256], [0, 255])
8 | histR = cv.calcHist([img], [2], None, [256], [0, 255])
9 |
10 | cv.imshow("img", img)
11 | cv.waitKey(0)
12 | cv.destroyAllWindows()
13 |
14 | plt.plot(histB, color='b')
15 | plt.plot(histG, color='g')
16 | plt.plot(histR, color='r')
17 | plt.show()
18 |
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/demo-medianblur.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图片
5 | img = cv.imread('maliao_noise.jpg')
6 | source = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | # 方框滤波
9 | result = cv.medianBlur(source, 3)
10 |
11 | # 显示图形
12 | titles = ['Source Image', 'medianBlur Image']
13 | images = [source, result]
14 |
15 | for i in range(2):
16 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
17 | plt.title(titles[i])
18 | plt.xticks([]), plt.yticks([])
19 |
20 | plt.show()
--------------------------------------------------------------------------------
/python-spider/urllib-request/Demo_Robotparser.py:
--------------------------------------------------------------------------------
1 | import urllib.robotparser
2 |
3 | rp = urllib.robotparser.RobotFileParser()
4 | rp.set_url("https://www.taobao.com/robots.txt")
5 | rp.read()
6 |
7 | print(rp.can_fetch('Googlebot', 'https://www.taobao.com/article'))
8 | print(rp.can_fetch('Googlebot', "https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&q=iphone&suggest=history_1&_input_charset=utf-8&wq=&suggest_query=&source=suggest"))
--------------------------------------------------------------------------------
/base-data-def/Demo1.py:
--------------------------------------------------------------------------------
1 | a = 0
2 |
3 | def print_1():
4 | # a = 1
5 | print('a1 =', a)
6 |
7 | def print_2():
8 | a = 2
9 | print('a2 =', a)
10 |
11 | print_2()
12 |
13 | print('a3 =', a)
14 | print_1()
15 |
16 | add = lambda x,y: x + y
17 |
18 | print(add(1, 2))
19 |
20 | max_num = lambda x,y: x if x >= y else y
21 |
22 | print(max_num(5, 9))
23 |
24 | def factorial(n):
25 | if n == 1:
26 | return 1
27 | return n * factorial(n - 1)
28 |
29 | print('10的阶乘为:', factorial(10))
--------------------------------------------------------------------------------
/base-data-set/Demo.py:
--------------------------------------------------------------------------------
1 | # 演示集合不可变元素
2 | set1 = {1, 2, 3, 'Python', (1, 'geekdigging')}
3 | print(set1)
4 | print(type(set1))
5 |
6 | # 演示不可重复
7 | set2 = {1, 2, 2}
8 | print(set2)
9 |
10 | # 演示空集合
11 | set3 = set()
12 | print(set3)
13 | print(type(set3))
14 |
15 | # 使用 list 创建集合
16 | list1 = [1, 1, 2, 2, 3, 4]
17 | set4 = set(list1)
18 | print(set4)
19 |
20 | # 使用 tuple 创建集合
21 | tup1 = (1, 1, 2, 2, 3, 4)
22 | set5 = set(tup1)
23 | print(set5)
24 |
25 | # 使用字符串创建集合
26 | str1 = 'geekdigging'
27 | set6 = set(str1)
28 | print(set6)
--------------------------------------------------------------------------------
/python-opencv/blog2-pixel/demo3.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | # 灰度图像读取
4 | gray_img = cv.imread("maliao.jpg", cv.IMREAD_GRAYSCALE)
5 | print(gray_img[20, 30])
6 | # 像素赋值
7 | gray_img[20, 30] = 255
8 | print(gray_img[20, 30])
9 |
10 | # 彩色图像读取
11 | color_img = cv.imread("maliao.jpg", cv.IMREAD_COLOR)
12 | print(color_img[20, 30])
13 | # 像素依次赋值
14 | color_img[20, 30, 0] = 255
15 | color_img[20, 30, 1] = 255
16 | color_img[20, 30, 2] = 255
17 | print(color_img[20, 30])
18 | # 像素一次赋值
19 | color_img[20, 30] = [0, 0, 0]
20 | print(color_img[20, 30])
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/demo-bilateralfilter.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图片
5 | img = cv.imread('maliao_noise.jpg')
6 | source = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | # 方框滤波
9 | result = cv.bilateralFilter(source, 3)
10 |
11 | # 显示图形
12 | titles = ['Source Image', 'medianBlur Image']
13 | images = [source, result]
14 |
15 | for i in range(2):
16 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
17 | plt.title(titles[i])
18 | plt.xticks([]), plt.yticks([])
19 |
20 | plt.show()
--------------------------------------------------------------------------------
/spider-blog/tongji/src/main/java/com/geekdigging/tongji/TongjiApplication.java:
--------------------------------------------------------------------------------
1 | package com.geekdigging.tongji;
2 |
3 | import org.mybatis.spring.annotation.MapperScan;
4 | import org.springframework.boot.SpringApplication;
5 | import org.springframework.boot.autoconfigure.SpringBootApplication;
6 |
7 | @SpringBootApplication
8 | @MapperScan("com.geekdigging.tongji.mapper")
9 | public class TongjiApplication {
10 |
11 | public static void main(String[] args) {
12 | SpringApplication.run(TongjiApplication.class, args);
13 | }
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/demo-gaussianblur.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图片
5 | img = cv.imread('maliao_noise.jpg')
6 | source = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | # 方框滤波
9 | result = cv.GaussianBlur(source, (3, 3), 0)
10 |
11 | # 显示图形
12 | titles = ['Source Image', 'GaussianBlur Image']
13 | images = [source, result]
14 |
15 | for i in range(2):
16 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
17 | plt.title(titles[i])
18 | plt.xticks([]), plt.yticks([])
19 |
20 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/demo-boxfilter.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图片
5 | img = cv.imread('maliao_noise.jpg')
6 | source = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | # 方框滤波
9 | result = cv.boxFilter(source, -1, (5, 5), normalize=0)
10 |
11 | # 显示图形
12 | titles = ['Source Image', 'BoxFilter Image']
13 | images = [source, result]
14 |
15 | for i in range(2):
16 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
17 | plt.title(titles[i])
18 | plt.xticks([]), plt.yticks([])
19 |
20 | plt.show()
--------------------------------------------------------------------------------
/myqr-demo/demo.py:
--------------------------------------------------------------------------------
1 | from MyQR import myqr
2 | import os
3 |
4 | version, level, qr_name = myqr.run(
5 | words="http://weixin.qq.com/r/Lym2rp7Ev8PArdrN93w9", # 可以是字符串,也可以是网址(前面要加http(s)://)
6 | version=1, # 设置容错率为最高
7 | level='H', # 控制纠错水平,范围是L、M、Q、H,从左到右依次升高
8 | picture="3.gif", # 将二维码和图片合成
9 | colorized=True, # 彩色二维码
10 | contrast=1.0, # 用以调节图片的对比度,1.0 表示原始图片,更小的值表示更低对比度,更大反之。默认为1.0
11 | brightness=1.0, # 用来调节图片的亮度,其余用法和取值同上
12 | save_name="4.gif", # 保存文件的名字,格式可以是jpg,png,bmp,gif
13 | save_dir=os.getcwd() # 控制位置
14 | )
--------------------------------------------------------------------------------
/base-file/Demo.py:
--------------------------------------------------------------------------------
1 | str1 = open('F:/project/python-learning/base-data-def/Demo.py', mode='r').read()
2 | # print(str1)
3 |
4 |
5 | str2 = '好好学习,天天向上'
6 | # print(type(str2))
7 | a = str2.encode('utf-8')
8 | # print(type(a))
9 | # print(a.decode('utf-8'))
10 | # 报错,无法使用 gbk 解码 utf-8 编码的内容
11 | # print(a.decode('gbk'))
12 |
13 | import os
14 | os.chdir('F:/project')
15 | # file = open('test.txt', mode='a+')
16 | # print(file.read())
17 | # file.write('\n关注公众号,好好学习,天天向上')
18 | # file.close()
19 |
20 | file = open('test.txt')
21 | print(file.read())
22 | print(file.read())
--------------------------------------------------------------------------------
/python-spider/urllib-request/cookies.txt:
--------------------------------------------------------------------------------
1 | # Netscape HTTP Cookie File
2 | # http://curl.haxx.se/rfc/cookie_spec.html
3 | # This is a generated file! Do not edit.
4 |
5 | .baidu.com TRUE / FALSE 1606703804 BAIDUID 0A7A76A3705A730B35A559B601425953:FG=1
6 | .baidu.com TRUE / FALSE 3722651451 BIDUPSID 0A7A76A3705A730BE64A1F6D826869B5
7 | .baidu.com TRUE / FALSE H_PS_PSSID 1461_21102_30211_30125_26350_30239
8 | .baidu.com TRUE / FALSE 3722651451 PSTM 1575167805
9 | .baidu.com TRUE / FALSE delPer 0
10 | www.baidu.com FALSE / FALSE BDSVRTM 0
11 | www.baidu.com FALSE / FALSE BD_HOME 0
12 |
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/PlotDemo.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | # 处理中文乱码
4 | plt.rcParams['font.sans-serif']=['SimHei']
5 |
6 | x_data = [2011,2012,2013,2014,2015,2016,2017]
7 | y_data = [58000,60200,63000,71000,84000,90500,107000]
8 | y_data_1 = [78000,80200,93000,101000,64000,70500,87000]
9 |
10 | plt.title(label='xxx 公司 xxx 产品销量')
11 | # 设置标题
12 | plt.plot(x_data, y_data, linestyle = '-.', label = '产品销量')
13 | plt.plot(x_data, y_data_1, label = '用户增长数')
14 | # 开启网格线
15 | plt.grid(True)
16 | # 设置图例
17 | plt.legend()
18 | # 文件保存
19 | plt.savefig("plot_demo.png")
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/ScatterDemo1.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | # 处理中文乱码
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 |
7 | x_data = np.array([2011,2012,2013,2014,2015,2016,2017])
8 | y_data = np.array([58000,60200,63000,71000,84000,90500,107000])
9 |
10 | # 根据 y 值的不同生成不同的颜色
11 | colors = y_data * 10
12 | # 根据 y 值的不同生成不同的大小
13 | area = y_data / 300
14 |
15 | plt.scatter(x_data, y_data, s = area, c = colors, marker='o', edgecolor='black', alpha=0.5, label = '产品销量')
16 |
17 | plt.legend()
18 |
19 | plt.savefig("scatter_demo1.png")
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/PivotTableDemo.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | # 数据导入
4 | epidemic_dxy = pd.read_excel("epidemic_dxy.xlsx")
5 |
6 | df = pd.pivot_table(epidemic_dxy, values='currentConfirmedCount', index='continents', aggfunc='sum')
7 |
8 | print(df)
9 |
10 | df1 = pd.pivot_table(epidemic_dxy, values='currentConfirmedCount', index='continents', columns='provinceName', aggfunc='sum')
11 |
12 | print(df1)
13 |
14 | df2 = pd.pivot_table(epidemic_dxy, values='currentConfirmedCount', index=['continents', 'createTime'], columns='provinceName', aggfunc='sum')
15 | print(df2)
--------------------------------------------------------------------------------
/python-spider/aiohttp-demo/aio-demo.py:
--------------------------------------------------------------------------------
1 | import aiohttp
2 | import asyncio
3 | from datetime import datetime
4 |
5 | async def main():
6 | async with aiohttp.ClientSession() as client:
7 | html = await client.get('https://www.baidu.com/')
8 | print(html)
9 |
10 | loop = asyncio.get_event_loop()
11 |
12 | tasks = []
13 | for i in range(100):
14 | task = loop.create_task(main())
15 | tasks.append(task)
16 |
17 | start = datetime.now()
18 |
19 | loop.run_until_complete(main())
20 |
21 | end = datetime.now()
22 |
23 | print("aiohttp花费时间为:", end - start)
--------------------------------------------------------------------------------
/python-spider/urllib-request/cookies_mozilla.txt:
--------------------------------------------------------------------------------
1 | # Netscape HTTP Cookie File
2 | # http://curl.haxx.se/rfc/cookie_spec.html
3 | # This is a generated file! Do not edit.
4 |
5 | .baidu.com TRUE / FALSE 1606704556 BAIDUID 823ED2595594806EFB96B98DCF67960C:FG=1
6 | .baidu.com TRUE / FALSE 3722652203 BIDUPSID 823ED2595594806E24971A2B24620EA4
7 | .baidu.com TRUE / FALSE H_PS_PSSID 1450_21089_30210_30087_26350
8 | .baidu.com TRUE / FALSE 3722652203 PSTM 1575168556
9 | .baidu.com TRUE / FALSE delPer 0
10 | www.baidu.com FALSE / FALSE BDSVRTM 0
11 | www.baidu.com FALSE / FALSE BD_HOME 0
12 |
--------------------------------------------------------------------------------
/python-opencv/blog16-histogram/equalize.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 |
4 | img = cv.imread("dahai.jpg")
5 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
6 |
7 | # 灰度图均衡化
8 | equ = cv.equalizeHist(gray)
9 | # 水平拼接原图和均衡图
10 | result1 = np.hstack((gray, equ))
11 | cv.imwrite('grey_equ.png', result1)
12 |
13 | # 彩色图像均衡化,需要分解通道 对每一个通道均衡化
14 | (b, g, r) = cv.split(img)
15 | bH = cv.equalizeHist(b)
16 | gH = cv.equalizeHist(g)
17 | rH = cv.equalizeHist(r)
18 | # 合并每一个通道
19 | equ2 = cv.merge((bH, gH, rH))
20 | # 水平拼接原图和均衡图
21 | result2 = np.hstack((img, equ2))
22 | cv.imwrite('bgr_equ.png', result2)
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/BarDemo1.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | # 处理中文乱码
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 |
7 | x_data = np.array([2011,2012,2013,2014,2015,2016,2017])
8 | y_data = np.array([58000,60200,63000,71000,84000,90500,107000])
9 | y_data_1 = np.array([78000,80200,93000,101000,64000,70500,87000])
10 |
11 | plt.title(label='xxx 公司 xxx 产品销量')
12 |
13 | plt.bar(x_data, y_data, width=0.5, alpha=0.6, facecolor = 'deeppink', edgecolor = 'darkblue', lw=2, label='产品销量')
14 |
15 | plt.legend()
16 |
17 | plt.savefig("bar_demo_1.png")
--------------------------------------------------------------------------------
/base-data-def/Demo.py:
--------------------------------------------------------------------------------
1 | def add(a, b):
2 | c = a + b
3 | return c
4 |
5 |
6 | c = add(1, 2)
7 | print(c)
8 |
9 |
10 | def subtraction(a, b):
11 | return a - b
12 |
13 |
14 | print(subtraction(b=5, a=10))
15 |
16 |
17 | def division(a, b=1):
18 | return a / b
19 |
20 |
21 | print(division(5))
22 | print(division(10, 5))
23 |
24 |
25 | def print_a(a, *b):
26 | print(a, b)
27 |
28 |
29 | print_a(1, 2, 3, 4, 5, 6)
30 |
31 |
32 | def print_b(a, **b):
33 | print_a(a, b)
34 |
35 |
36 | print_b(1, q='q', w='w', e='e')
37 |
38 | print_a(1)
39 | print_b(1)
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/demo-filter2D.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2 as cv
3 | from matplotlib import pyplot as plt
4 |
5 | # 读取图片
6 | img = cv.imread("maliao_noise.jpg", cv.IMREAD_UNCHANGED)
7 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
8 |
9 | kernel = np.ones((5,5),np.float32)/25
10 |
11 | dst = cv.filter2D(rgb_img, -1, kernel)
12 |
13 | titles = ['Source Image', 'filter2D Image']
14 | images = [rgb_img, dst]
15 |
16 | for i in range(2):
17 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
18 | plt.title(titles[i])
19 | plt.xticks([]), plt.yticks([])
20 |
21 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo-gradient-1.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo.png", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5), np.uint8)
10 |
11 | # 图像梯度运算
12 | dst = cv.morphologyEx(source, cv.MORPH_GRADIENT, kernel)
13 |
14 | # 显示结果
15 | titles = ['Source Img','Dst Img']
16 | images = [source, dst]
17 |
18 | # matplotlib 绘图
19 | for i in range(2):
20 | plt.subplot(1, 2, i+1), plt.imshow(images[i],'gray')
21 | plt.title(titles[i])
22 | plt.xticks([]),plt.yticks([])
23 |
24 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo-open-1.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo_noise_white.jpg", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5),np.uint8)
10 |
11 | #图像开运算
12 | dst = cv.morphologyEx(source, cv.MORPH_OPEN, kernel)
13 |
14 | # 显示结果
15 | titles = ['Source Img','Dst Img']
16 | images = [source, dst]
17 |
18 | # matplotlib 绘图
19 | for i in range(2):
20 | plt.subplot(1, 2, i+1), plt.imshow(images[i],'gray')
21 | plt.title(titles[i])
22 | plt.xticks([]),plt.yticks([])
23 |
24 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo-close-1.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo_noise_black.jpg", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5),np.uint8)
10 |
11 | # 图像闭运算
12 | dst = cv.morphologyEx(source, cv.MORPH_CLOSE, kernel)
13 |
14 | # 显示结果
15 | titles = ['Source Img','Dst Img']
16 | images = [source, dst]
17 |
18 | # matplotlib 绘图
19 | for i in range(2):
20 | plt.subplot(1, 2, i+1), plt.imshow(images[i],'gray')
21 | plt.title(titles[i])
22 | plt.xticks([]),plt.yticks([])
23 |
24 | plt.show()
--------------------------------------------------------------------------------
/python-data-analysis/bilibili/houlang.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import re
3 | import wordcloud
4 |
5 | res = requests.get("https://api.bilibili.com/x/player/pagelist?bvid=BV1FV411d7u7&jsonp=jsonp")
6 | cid = res.json()['data'][0]['cid']
7 | print(cid)
8 |
9 | danmu_url = f"https://api.bilibili.com/x/v1/dm/list.so?oid={cid}"
10 | result = requests.get(danmu_url).content.decode('utf-8')
11 | pattern = re.compile('(.*?)')
12 | danmu_list = pattern.findall(result)
13 |
14 | wordcloud = wordcloud.WordCloud(font_path='msyh.ttc', width=900, height=1600).generate("".join(danmu_list))
15 | wordcloud.to_file('wordcloud.png')
--------------------------------------------------------------------------------
/python-opencv/blog10-hat/demo-tophat-1.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo_noise_white.jpg", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5), np.uint8)
10 |
11 | # 顶帽运算
12 | dst = cv.morphologyEx(source, cv.MORPH_TOPHAT, kernel)
13 |
14 | # 显示结果
15 | titles = ['Source Img', 'Tophat Img']
16 | images = [source, dst]
17 |
18 | # matplotlib 绘图
19 | for i in range(2):
20 | plt.subplot(1, 2, i+1), plt.imshow(images[i],'gray')
21 | plt.title(titles[i])
22 | plt.xticks([]),plt.yticks([])
23 |
24 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog10-hat/demo-blackhat-1.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo_noise_black.jpg", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5), np.uint8)
10 |
11 | # 黑帽运算
12 | dst = cv.morphologyEx(source, cv.MORPH_BLACKHAT, kernel)
13 |
14 | # 构造显示结果数组
15 | titles = ['Source Img', 'Black Img']
16 | images = [source, dst]
17 |
18 | # matplotlib 绘图
19 | for i in range(2):
20 | plt.subplot(1, 2, i+1), plt.imshow(images[i],'gray')
21 | plt.title(titles[i])
22 | plt.xticks([]),plt.yticks([])
23 |
24 | plt.show()
--------------------------------------------------------------------------------
/base-time/Demo.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | for i in range(0, 5):
4 | print(i)
5 | # time.sleep(1)
6 |
7 | print(time.time())
8 |
9 | print(time.localtime())
10 |
11 | print(time.mktime(time.localtime()))
12 |
13 | print(time.asctime(time.localtime()))
14 |
15 | print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
16 |
17 | print(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime()))
18 |
19 | import calendar
20 |
21 | print(calendar.calendar(theyear=2020, w=2, l=1, c=6))
22 |
23 | print(calendar.month(2019, 11))
24 |
25 | print(calendar.monthlen(2019, 11))
26 |
27 | print(calendar.weekday(2019, 11, 7))
--------------------------------------------------------------------------------
/python-data-analysis/mojito/barrage-spder.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import re
3 |
4 | # 获取 cid
5 | res = requests.get("https://api.bilibili.com/x/player/pagelist?bvid=BV1PK4y1b7dt&jsonp=jsonp")
6 | cid = res.json()['data'][0]['cid']
7 |
8 | # 将弹幕 xml 通过正则取出,生成 list
9 | danmu_url = f"https://api.bilibili.com/x/v1/dm/list.so?oid={cid}"
10 | result = requests.get(danmu_url).content.decode('utf-8')
11 | pattern = re.compile('(.*?)')
12 | danmu_list = pattern.findall(result)
13 |
14 | # 将弹幕 list 保存至 txt 文件
15 | with open("dan_mu.txt", mode="w", encoding="utf-8") as f:
16 | for item in danmu_list:
17 | f.write(item)
18 | f.write("\n")
--------------------------------------------------------------------------------
/python-opencv/blog7-blur/demo-blur.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图片
5 | img = cv.imread("maliao_noise.jpg", cv.IMREAD_UNCHANGED)
6 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | # 均值滤波
9 | blur_img = cv.blur(rgb_img, (3, 3))
10 | # blur_img = cv.blur(img, (5, 5))
11 | # blur_img = cv.blur(img, (10, 10))
12 | # blur_img = cv.blur(img, (20, 20))
13 |
14 | titles = ['Source Image', 'Blur Image']
15 | images = [rgb_img, blur_img]
16 |
17 | for i in range(2):
18 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
19 | plt.title(titles[i])
20 | plt.xticks([]), plt.yticks([])
21 |
22 | plt.show()
--------------------------------------------------------------------------------
/python-spider/selenium-demo/get_data_demo.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from selenium.webdriver.chrome.options import Options
3 |
4 | # 实例化一个启动参数对象
5 | chrome_options = Options()
6 | # 设置浏览器窗口大小
7 | chrome_options.add_argument('--window-size=1366, 768')
8 | # 启动浏览器
9 | driver = webdriver.Chrome(chrome_options=chrome_options)
10 | url = 'https://www.geekdigging.com/'
11 | driver.get(url)
12 | title = driver.find_element_by_xpath('//*[@id="text-4"]/div/div/div[1]/div[2]/a')
13 | print(title)
14 | # 获取属性信息
15 | print(title.get_attribute('href'))
16 | # 获取文本信息
17 | print(title.text)
18 | # 获取位置
19 | print(title.location)
20 | # 获取大小
21 | print(title.size)
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/moments.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("number.png")
4 |
5 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
6 | # 降噪
7 | ret, thresh = cv.threshold(gray_img, 127, 255, 0)
8 | # 寻找轮廓
9 | contours, hierarchy = cv.findContours(gray_img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
10 |
11 | cnt = contours[0]
12 | # 获取图像矩
13 | M = cv.moments(cnt)
14 | print(M)
15 |
16 | # 质心
17 | cx = int(M['m10'] / M['m00'])
18 | cy = int(M['m01'] / M['m00'])
19 |
20 | print(f'轮廓质心为:[{cx}, {cy}]')
21 |
22 | # 轮廓面积
23 | area = cv.contourArea(cnt)
24 | print(f'轮廓面积为:{area}')
25 |
26 | # 轮廓周长
27 | perimeter = cv.arcLength(cnt, True)
28 | print(f'轮廓周长为:{perimeter}')
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/convex.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("number.png")
4 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
5 | # 降噪
6 | ret, thresh = cv.threshold(gray_img, 127, 255, 0)
7 | # 寻找轮廓
8 | contours, hierarchy = cv.findContours(gray_img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
9 | cnt = contours[0]
10 | # 绘制轮廓
11 | image = cv.cvtColor(gray_img, cv.COLOR_GRAY2BGR)
12 | cv.drawContours(image, contours, -1, (0, 0 , 255), 2)
13 |
14 | # 寻找凸包,得到凸包的角点
15 | hull = cv.convexHull(cnt)
16 |
17 | # 绘制凸包
18 | cv.polylines(image, [hull], True, (0, 255, 0), 2)
19 |
20 | print(cv.isContourConvex(hull))
21 |
22 | cv.imshow("image", image)
23 | cv.waitKey(0)
24 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog8-erode/quanjiafu.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 |
5 | # 读取图像
6 | source = cv.imread('demo.png', cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5),np.uint8)
10 |
11 | # 图像腐蚀
12 | erode_img = cv.erode(source, kernel)
13 |
14 | # 图像膨胀
15 | dilate_result = cv.dilate(source, kernel)
16 |
17 | # 显示结果
18 | titles = ['Source Img','Erode Img','Dilate Img']
19 | images = [source, erode_img, dilate_result]
20 |
21 | # matplotlib 绘图
22 | for i in range(3):
23 | plt.subplot(1, 3, i+1), plt.imshow(images[i],'gray')
24 | plt.title(titles[i])
25 | plt.xticks([]),plt.yticks([])
26 |
27 | plt.show()
--------------------------------------------------------------------------------
/base-data-number/Demo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: UTF-8 -*-
2 |
3 | print(type(123))
4 |
5 | print(type(123.0))
6 |
7 | print(type('123'))
8 |
9 | print(type("123"))
10 |
11 | print(isinstance(123, int))
12 |
13 | print(isinstance(123.0, float))
14 |
15 | print(isinstance('123', str))
16 |
17 | print(10**1000)
18 |
19 | print(0b10101010101)
20 |
21 | print(0o12345670)
22 |
23 | print(0xdb273dc)
24 |
25 | print(0xDB273DC)
26 |
27 | print(1/2)
28 |
29 | print(1/3)
30 |
31 | print(1/6)
32 |
33 | print(1+1j)
34 |
35 | print(type(1+1j))
36 |
37 | print((2.46+1.37j).real)
38 |
39 | print((2.46+1.37j).imag)
40 |
41 | print(123 == 123.0)
42 |
43 | print(123 == '123')
--------------------------------------------------------------------------------
/python-opencv/blog5-resize/demo-flip.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图片 由 GBR 转 RGB
5 | img = cv.imread('maliao.jpg')
6 | src = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | # 图像翻转
9 | # flipCode 为 0 ,则以 X 轴为对称轴翻转,如果 fliipCode > 0 则以 Y 轴为对称轴翻转,如果 flipCode < 0 则在 X 轴、 Y 轴方向同时翻转。
10 | img1 = cv.flip(src, 0)
11 | img2 = cv.flip(src, 1)
12 | img3 = cv.flip(src, -1)
13 |
14 | # plt 显示图形
15 | titles = ['Source', 'Img1', 'Img2', 'Img3']
16 | images = [src, img1, img2, img3]
17 |
18 | for i in range(4):
19 | plt.subplot(2, 2, i + 1)
20 | plt.imshow(images[i])
21 | plt.title(titles[i])
22 | plt.xticks([])
23 | plt.yticks([])
24 |
25 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo-open.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo_noise_white.jpg", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5),np.uint8)
10 |
11 | # 图像腐蚀
12 | erode_img = cv.erode(source, kernel)
13 |
14 | # 图像膨胀
15 | dilate_result = cv.dilate(erode_img, kernel)
16 |
17 | # 显示结果
18 | titles = ['Source Img','Erode Img','Dilate Img']
19 | images = [source, erode_img, dilate_result]
20 |
21 | # matplotlib 绘图
22 | for i in range(3):
23 | plt.subplot(1, 3, i+1), plt.imshow(images[i],'gray')
24 | plt.title(titles[i])
25 | plt.xticks([]),plt.yticks([])
26 |
27 | plt.show()
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/BarDemo2.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | # 处理中文乱码
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 |
7 | x_data = np.array([2011,2012,2013,2014,2015,2016,2017])
8 | y_data = np.array([58000,60200,63000,71000,84000,90500,107000])
9 | y_data_1 = np.array([78000,80200,93000,101000,64000,70500,87000])
10 |
11 | plt.title(label='xxx 公司 xxx 产品销量')
12 |
13 | plt.bar(x_data, y_data, width=0.3, alpha=0.6, facecolor = 'pink', edgecolor = 'blue', lw=1, label='产品销量')
14 | plt.bar(x_data + 0.3, y_data_1, width=0.3, alpha=0.6, facecolor = 'blue', edgecolor = 'blue', lw=1, label='用户增长数')
15 |
16 | plt.legend()
17 |
18 | plt.savefig("bar_demo_2.png")
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo-close.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo_noise_black.jpg", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5),np.uint8)
10 |
11 | # 图像膨胀
12 | dilate_result = cv.dilate(source, kernel)
13 |
14 | # 图像腐蚀
15 | erode_img = cv.erode(dilate_result, kernel)
16 |
17 | # 显示结果
18 | titles = ['Source Img','Dilate Img','Erode Img']
19 | images = [source, dilate_result, erode_img]
20 |
21 | # matplotlib 绘图
22 | for i in range(3):
23 | plt.subplot(1, 3, i+1), plt.imshow(images[i],'gray')
24 | plt.title(titles[i])
25 | plt.xticks([]),plt.yticks([])
26 |
27 | plt.show()
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/BarDemo3.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | # 处理中文乱码
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 |
7 | x_data = np.array([2011,2012,2013,2014,2015,2016,2017])
8 | y_data = np.array([58000,60200,63000,71000,84000,90500,107000])
9 | y_data_1 = np.array([78000,80200,93000,101000,64000,70500,87000])
10 |
11 | plt.title(label='xxx 公司 xxx 产品销量')
12 |
13 | plt.bar(x_data, y_data, width=0.3, alpha=0.6, facecolor = 'pink', edgecolor = 'blue', lw=1, label='产品销量')
14 | plt.bar(x_data, y_data_1, bottom=y_data, width=0.3, alpha=0.6, facecolor = 'blue', edgecolor = 'blue', lw=1, label='用户增长数')
15 |
16 | plt.legend()
17 |
18 | plt.savefig("bar_demo_3.png")
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts_map/ChinaMap.py:
--------------------------------------------------------------------------------
1 | from pyecharts import options as opts
2 | from pyecharts.charts import Map
3 | from pyecharts.faker import Faker
4 | from pyecharts.globals import ThemeType
5 |
6 | c = (
7 | Map(init_opts=opts.InitOpts(
8 | theme=ThemeType.DARK,
9 | bg_color='#404a59',
10 | width='1600px',
11 | height='900px'
12 | ))
13 | .add(
14 | "中国地图",
15 | [list(z) for z in zip(Faker.provinces, Faker.values())],
16 | "china"
17 | )
18 | .set_global_opts(
19 | title_opts=opts.TitleOpts(title="中国地图-示例"),
20 | visualmap_opts=opts.VisualMapOpts(),
21 | )
22 | .render("china_map.html")
23 | )
--------------------------------------------------------------------------------
/data_structure/Stack.py:
--------------------------------------------------------------------------------
1 | class Stack(object):
2 |
3 | def __init__(self):
4 | '''
5 | 创建空列表实现栈
6 | '''
7 | self.__list = []
8 |
9 | def is_empty(self):
10 | '''
11 | 判断是否为空
12 | :return:
13 | '''
14 | return self.__list == []
15 |
16 | def push(self,item):
17 | '''
18 | 压栈,添加元素
19 | :param item:
20 | :return:
21 | '''
22 | self.__list.append(item)
23 |
24 | def pop(self):
25 | '''
26 | 弹出栈,将元素取出
27 | :return:
28 | '''
29 | if self.is_empty():
30 | return
31 | else:
32 | return self.__list.pop()
--------------------------------------------------------------------------------
/python-opencv/blog10-hat/demo-tophat.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo_noise_white.jpg", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5), np.uint8)
10 |
11 | # 开运算
12 | open = cv.morphologyEx(source, cv.MORPH_OPEN, kernel)
13 |
14 | # 顶帽运算
15 | dst = cv.morphologyEx(source, cv.MORPH_TOPHAT, kernel)
16 |
17 | # 显示结果
18 | titles = ['Source Img','Open Img', 'Tophat Img']
19 | images = [source, open, dst]
20 |
21 | # matplotlib 绘图
22 | for i in range(3):
23 | plt.subplot(1, 3, i+1), plt.imshow(images[i],'gray')
24 | plt.title(titles[i])
25 | plt.xticks([]),plt.yticks([])
26 |
27 | plt.show()
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://docs.scrapy.org/en/latest/topics/items.html
7 |
8 | import scrapy
9 |
10 |
11 | class QuoteItem(scrapy.Item):
12 | text = scrapy.Field()
13 | author = scrapy.Field()
14 | tags = scrapy.Field()
15 |
16 | class ImageItem(scrapy.Item):
17 | collection = table = 'image'
18 | id = scrapy.Field()
19 | url = scrapy.Field()
20 | title = scrapy.Field()
21 | thumb = scrapy.Field()
22 |
23 | class FirstScrapyItem(scrapy.Item):
24 | # define the fields for your item here like:
25 | # name = scrapy.Field()
26 | pass
27 |
--------------------------------------------------------------------------------
/spider-blog/tongji/src/main/resources/mybatis/mybatis-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/python-opencv/blog10-hat/demo-blackhat.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo_noise_black.jpg", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5), np.uint8)
10 |
11 | # 闭运算
12 | close = cv.morphologyEx(source, cv.MORPH_CLOSE, kernel)
13 |
14 | # 黑帽运算
15 | dst = cv.morphologyEx(source, cv.MORPH_BLACKHAT, kernel)
16 |
17 | # 构造显示结果数组
18 | titles = ['Source Img','Close Img', 'Tophat Img']
19 | images = [source, close, dst]
20 |
21 | # matplotlib 绘图
22 | for i in range(3):
23 | plt.subplot(1, 3, i+1), plt.imshow(images[i],'gray')
24 | plt.title(titles[i])
25 | plt.xticks([]),plt.yticks([])
26 |
27 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/rect.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 |
4 | img = cv.imread("number.png")
5 |
6 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
7 | # 降噪
8 | ret, thresh = cv.threshold(gray_img, 127, 255, 0)
9 | # 寻找轮廓
10 | contours, hierarchy = cv.findContours(gray_img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
11 |
12 | cnt = contours[0]
13 |
14 | # 外接正矩形
15 | x, y, w, h = cv.boundingRect(cnt)
16 | cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
17 |
18 | # 外接最小矩形
19 | min_rect = cv.minAreaRect(cnt)
20 | print(min_rect)
21 |
22 | box = cv.boxPoints(min_rect)
23 | box = np.int0(box)
24 | cv.drawContours(img, [box], 0, (0, 0, 255), 2)
25 |
26 | cv.imshow("draw", img)
27 |
28 | cv.waitKey(0)
29 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog12-sobel/demo-laplacian.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图像
5 | img = cv.imread('maliao.jpg', cv.COLOR_BGR2GRAY)
6 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | # 灰度化处理图像
9 | grayImage = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
10 |
11 | # Laplacian
12 | dst = cv.Laplacian(grayImage, cv.CV_16S, ksize = 3)
13 | Laplacian = cv.convertScaleAbs(dst)
14 |
15 | # 用来正常显示中文标签
16 | plt.rcParams['font.sans-serif'] = ['SimHei']
17 |
18 | # 显示图形
19 | titles = ['原始图像', 'Laplacian 算子']
20 | images = [rgb_img, Laplacian]
21 |
22 | for i in range(2):
23 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
24 | plt.title(titles[i])
25 | plt.xticks([]), plt.yticks([])
26 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog13-scharr/demo-log.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图像
5 | img = cv.imread("maliao.jpg")
6 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
9 |
10 | # 先通过高斯滤波降噪
11 | gaussian = cv.GaussianBlur(gray_img, (3, 3), 0)
12 |
13 | # 再通过拉普拉斯算子做边缘检测
14 | dst = cv.Laplacian(gaussian, cv.CV_16S, ksize=3)
15 | LOG = cv.convertScaleAbs(dst)
16 |
17 | # 用来正常显示中文标签
18 | plt.rcParams['font.sans-serif'] = ['SimHei']
19 |
20 | # 显示图形
21 | titles = ['原始图像', 'LOG 算子']
22 | images = [rgb_img, LOG]
23 |
24 | for i in range(2):
25 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
26 | plt.title(titles[i])
27 | plt.xticks([]), plt.yticks([])
28 | plt.show()
29 |
--------------------------------------------------------------------------------
/base-iter/Demo.py:
--------------------------------------------------------------------------------
1 | from collections.abc import Iterable
2 |
3 | print(isinstance('geekdigging', Iterable))
4 | print(isinstance([], Iterable))
5 | print(isinstance([], Iterable))
6 | print(isinstance({x for x in range(5)}, Iterable))
7 | print(isinstance(123, Iterable))
8 |
9 | list1 = [1, 2, 3, 4]
10 | # 错误使用方式
11 | # next(list1)
12 |
13 | from collections.abc import Iterator
14 |
15 | list1 = iter(list1)
16 | print(type(list1))
17 |
18 | print(next(list1))
19 | print(next(list1))
20 | print(next(list1))
21 | print(next(list1))
22 | # StopIteration 异常
23 | # print(next(list1))
24 |
25 | set1 = {1, 2, 3, 4, 5}
26 | set1 = iter(set1)
27 | print(next(set1))
28 | print(next(set1))
29 | print(next(set1))
30 | print(next(set1))
31 | print(next(set1))
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/minEnclosingCircle.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("number.png")
4 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
5 | # 降噪
6 | ret, thresh = cv.threshold(gray_img, 127, 255, 0)
7 | # 寻找轮廓
8 | contours, hierarchy = cv.findContours(gray_img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
9 | cnt = contours[0]
10 |
11 | # 绘制最小外接圆
12 | # (x, y), radius = cv.minEnclosingCircle(cnt)
13 | # center = (int(x), int(y))
14 | # radius = int(radius)
15 | # cv.circle(img, center, radius, (0, 255, 0), 2)
16 | #
17 | # cv.imshow("img", img)
18 | # cv.waitKey(0)
19 | # cv.destroyAllWindows()
20 | # 绘制椭圆
21 | ellipse = cv.fitEllipse(cnt)
22 | cv.ellipse(img, ellipse, (0, 255, 0), 2)
23 |
24 | cv.imshow("img", img)
25 | cv.waitKey(0)
26 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/XYDemo.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | plt.rcParams['font.sans-serif']=['SimHei']
4 |
5 | x_data = [2011,2012,2013,2014,2015,2016,2017]
6 | y_data = [58000,60200,63000,71000,84000,90500,107000]
7 |
8 | # plt.xlabel('年份', labelpad=50, fontsize='xx-large', fontweight='bold', rotation='vertical', backgroundcolor='red')
9 | # plt.ylabel('销量', labelpad=50)
10 |
11 | # plt.xticks(x_data)
12 | # plt.yticks(y_data)
13 |
14 | # plt.xlim(2011, 2020)
15 | # plt.ylim(50000, 90000)
16 |
17 | plt.title(label='xxx 公司 xxx 产品销量')
18 |
19 | plt.plot(x_data, y_data, label = '折线图')
20 | plt.bar(x_data, y_data, label = '柱状图')
21 |
22 | plt.legend()
23 |
24 | # plt.axis("off")
25 |
26 | # plt.grid(b=True, axis='x')
27 |
28 | plt.show()
--------------------------------------------------------------------------------
/python-opencv/blog13-scharr/demo-scharr.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | img = cv.imread("maliao.jpg")
5 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
6 |
7 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
8 |
9 | # Scharr 算子
10 | x = cv.Scharr(gray_img, cv.CV_16S, 1, 0) # X 方向
11 | y = cv.Scharr(gray_img, cv.CV_16S, 0, 1) # Y 方向
12 | absX = cv.convertScaleAbs(x)
13 | absY = cv.convertScaleAbs(y)
14 | Scharr = cv.addWeighted(absX, 0.5, absY, 0.5, 0)
15 |
16 | # 显示图形
17 | plt.rcParams['font.sans-serif']=['SimHei']
18 |
19 | titles = ['原始图像', 'Scharr 算子']
20 | images = [rgb_img, Scharr]
21 |
22 | for i in range(2):
23 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
24 | plt.title(titles[i])
25 | plt.xticks([]), plt.yticks([])
26 | plt.show()
27 |
--------------------------------------------------------------------------------
/python-spider/scrapy_splash_demo/scrapy_splash_demo/spiders/jd.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import scrapy
3 | from scrapy_splash import SplashRequest
4 |
5 |
6 | lua_script = """
7 | function main(splash, args)
8 | splash:go(args.url)
9 | return {
10 | url = splash:url(),
11 | jpeg = splash:jpeg(),
12 | har = splash:har(),
13 | cookies = splash:get_cookies()
14 | }
15 | end
16 | """
17 |
18 |
19 | class JdSpider(scrapy.Spider):
20 | name = 'jd'
21 | allowed_domains = ['www.jd.com']
22 | start_urls = ['http://www.jd.com/']
23 |
24 | def start_requests(self):
25 | url = 'https://www.jd.com/'
26 | yield SplashRequest(url=url, callback=self.parse)
27 |
28 | def parse(self, response):
29 | self.logger.debug(response.text)
30 |
--------------------------------------------------------------------------------
/base-data-dict/Demo.py:
--------------------------------------------------------------------------------
1 | dict1 = {'name': 'geekdigging', 'age': 2}
2 | print(dict1)
3 | print(type(dict1))
4 |
5 | dict2 = {(1, 2, 3): '123', 'name': 'geekdigging', 2: [1, 2, 3]}
6 | print(dict2)
7 | print(type(dict2))
8 |
9 | dict3 = dict(name = 'geekdigging', age = 2)
10 | print(dict3)
11 | print(type(dict3))
12 |
13 | # 语法错误,注释
14 | # dict4 = dict(1 = 'geekdigging', 2 = 2)
15 |
16 | print(dict1['name'])
17 | # 键不存在示例,报错 KeyError
18 | # print(dict1['geekdigging'])
19 |
20 | str = 'geekdigging'
21 |
22 | if str in dict1:
23 | print(dict1['geekdigging'])
24 | else:
25 | print('您查询的键', str, '不存在')
26 |
27 | # 添加
28 | dict1['a'] = 18
29 | print(dict1)
30 | # 更新
31 | dict1['name'] = 'www.geekdigging.com'
32 | print(dict1)
33 | # 删除
34 | del dict1['a']
35 | print(dict1)
--------------------------------------------------------------------------------
/python-spider/selenium-demo/Demo1.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from selenium.webdriver.common.keys import Keys
3 |
4 | browser = webdriver.Chrome()
5 |
6 | browser.get('https://www.baidu.com')
7 | input = browser.find_element_by_id('kw')
8 | input.send_keys('极客挖掘机')
9 | input.send_keys(Keys.ENTER)
10 | print(browser.current_url)
11 | print(browser.get_cookies())
12 | print(browser.page_source)
13 | # 关闭浏览器
14 | browser.close()
15 |
16 | # 声明浏览器对象,需对应的驱动程序方可使用
17 | # browser = webdriver.android()
18 | # browser = webdriver.blackberry()
19 | # browser = webdriver.chrome()
20 | # browser = webdriver.edge()
21 | # browser = webdriver.firefox()
22 | # browser = webdriver.ie()
23 | # browser = webdriver.opera()
24 | # browser = webdriver.phantomjs()
25 | # browser = webdriver.safari()
--------------------------------------------------------------------------------
/python-spider/splash-demo/demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | url = 'http://localhost:8050/render.html?url=https://www.jd.com'
4 | response = requests.get(url)
5 | print(response.text)
6 |
7 | url = 'http://localhost:8050/render.png?url=https://www.jd.com&width=1000&height=700'
8 | response = requests.get(url)
9 | with open('jd.png', 'wb') as f:
10 | f.write(response.content)
11 |
12 | url = 'http://localhost:8050/render.har?url=https://www.jd.com'
13 | response = requests.get(url)
14 | print(response.text)
15 |
16 | url = 'http://localhost:8050/render.json?url=https://httpbin.org/get'
17 | response = requests.get(url)
18 | print(response.text)
19 |
20 | url = 'http://localhost:8050/render.json?url=https://httpbin.org/get&html=1&har=1'
21 | response = requests.get(url)
22 | print(response.text)
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/FirstMatplotlibDemo.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 |
4 | # fig = plt.figure(figsize=(6, 6))
5 | #
6 | # ax1 = fig.add_subplot(2,2,1)
7 | # ax2 = fig.add_subplot(2,2,2)
8 | # ax3 = fig.add_subplot(2,2,3)
9 | # ax4 = fig.add_subplot(2,2,4)
10 |
11 |
12 | x = np.arange(4)
13 | y = np.arange(4)
14 |
15 | # 绘制折线图
16 | plt.subplot2grid((2,2),(0,0))
17 | plt.plot(x, y)
18 |
19 | # 绘制柱状图
20 | plt.subplot2grid((2,2),(0,1))
21 | plt.bar(x, y)
22 |
23 | # plt.show()
24 |
25 | # 绘制折线图
26 | plt.subplot(221)
27 | plt.plot(x, y)
28 |
29 | # 绘制柱状图
30 | plt.subplot(222)
31 | plt.bar(x, y)
32 |
33 | # plt.show()
34 |
35 | fig, axes = plt.subplots(2, 2)
36 | # 绘制折线图
37 | axes[0,0].plot(x,y)
38 | # 绘制柱状图
39 | axes[0,1].bar(x,y)
40 | plt.show()
--------------------------------------------------------------------------------
/spider-blog/tongji/src/main/java/com/geekdigging/tongji/model/SpiderDataModel.java:
--------------------------------------------------------------------------------
1 | package com.geekdigging.tongji.model;
2 |
3 | import lombok.AllArgsConstructor;
4 | import lombok.Data;
5 | import lombok.NoArgsConstructor;
6 |
7 | import java.io.Serializable;
8 | import java.util.Date;
9 |
10 | /**
11 | * Created with IntelliJ IDEA.
12 | *
13 | * @Date: 2019/11/2
14 | * @Time: 17:12
15 | * @email: inwsy@hotmail.com
16 | * Description:
17 | */
18 | @Data
19 | @AllArgsConstructor
20 | @NoArgsConstructor
21 | public class SpiderDataModel implements Serializable {
22 | private String id;
23 | private String plantform;
24 | private int read_num;
25 | private int fans_num;
26 | private int rank_num;
27 | private int like_num;
28 | private Date create_date;
29 | }
30 |
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo-gradient.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | source = cv.imread("demo.png", cv.IMREAD_GRAYSCALE)
7 |
8 | # 设置卷积核
9 | kernel = np.ones((5, 5), np.uint8)
10 |
11 | # 图像膨胀
12 | dilate = cv.morphologyEx(source, cv.MORPH_DILATE, kernel)
13 |
14 | # 图像腐蚀
15 | erode = cv.morphologyEx(source, cv.MORPH_ERODE, kernel)
16 |
17 | # 图像梯度运算
18 | gradient = cv.morphologyEx(source, cv.MORPH_GRADIENT, kernel)
19 |
20 | # 显示结果
21 | titles = ['Source Img', 'Dilate Img', 'Erode Img', 'Gradient Img']
22 | images = [source, dilate, erode, gradient]
23 |
24 | # matplotlib 绘图
25 | for i in range(4):
26 | plt.subplot(1, 4, i+1), plt.imshow(images[i],'gray')
27 | plt.title(titles[i])
28 | plt.xticks([]),plt.yticks([])
29 |
30 | plt.show()
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/PieDemo.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | # 中文和负号的正常显示
4 | plt.rcParams['font.sans-serif']=['SimHei']
5 | plt.rcParams['axes.unicode_minus'] = False
6 |
7 | # 数据
8 | edu = [0.2515,0.3724,0.3336,0.0368,0.0057]
9 | labels = ['中专','大专','本科','硕士','其他']
10 |
11 | # 让本科学历离圆心远一点
12 | explode = [0,0,0.1,0,0]
13 |
14 | # 将横、纵坐标轴标准化处理,保证饼图是一个正圆,否则为椭圆
15 | plt.axes(aspect='equal')
16 |
17 | # 自定义颜色
18 | colors=['#9999ff','#ff9999','#7777aa','#2442aa','#dd5555'] # 自定义颜色
19 |
20 | # 绘制饼图
21 | plt.pie(x=edu, # 绘图数据
22 | explode = explode, # 突出显示大专人群
23 | labels = labels, # 添加教育水平标签
24 | colors = colors, # 设置饼图的自定义填充色
25 | autopct = '%.1f%%', # 设置百分比的格式,这里保留一位小数
26 | )
27 |
28 | # 添加图标题
29 | plt.title('xxx 公司员工教育水平分布')
30 |
31 | # 保存图形
32 | plt.savefig('pie_demo.png')
--------------------------------------------------------------------------------
/python-spider/urllib-request/Demo_Request.py:
--------------------------------------------------------------------------------
1 | import urllib.request, urllib.parse
2 | import json
3 | request = urllib.request.Request('https://www.geekdigging.com/')
4 | response = urllib.request.urlopen(request)
5 | # print(response.read().decode('utf-8'))
6 |
7 | url = 'https://httpbin.org/post'
8 | headers = {
9 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
10 | 'Content-Type': 'application/json;encoding=utf-8',
11 | 'Host': 'geekdigging.com'
12 | }
13 | data = {
14 | 'name': 'geekdigging',
15 | 'hello':'world'
16 | }
17 | data = bytes(json.dumps(data), encoding='utf8')
18 | req = urllib.request.Request(url=url, data=data, headers=headers, method='POST')
19 | resp = urllib.request.urlopen(req)
20 | print(resp.read().decode('utf-8'))
--------------------------------------------------------------------------------
/base-process/Demo1.py:
--------------------------------------------------------------------------------
1 | happy = 0
2 |
3 | while happy < 10:
4 | print("学习使我快乐,快乐 + 1,当前快乐值为:", happy)
5 | happy += 1
6 |
7 | print("我不快乐了")
8 |
9 |
10 | # happy = 0
11 | #
12 | # while True:
13 | # print("学习使大佬快乐,快乐 + 1,当前快乐值为:", happy)
14 | # happy += 1
15 |
16 | for index in "Python":
17 | print(index)
18 |
19 | for index in range(5):
20 | print(index)
21 |
22 | for index in range(0, 10, 3):
23 | print(index)
24 |
25 | happy = 0
26 |
27 | while happy < 10:
28 | happy += 1
29 | if happy == 5:
30 | break
31 | print("学习使我快乐,快乐 + 1,当前快乐值为:", happy)
32 |
33 | print("还是开黑更快乐一些~~~")
34 |
35 | happy = 0
36 |
37 | while happy < 10:
38 | happy += 1
39 | if happy == 5:
40 | continue
41 | print("学习使我快乐,快乐 + 1,当前快乐值为:", happy)
42 |
43 | print("还是学习会更快乐~~~")
--------------------------------------------------------------------------------
/python-spider/requests-demo/get_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | r = requests.get('https://httpbin.org/get')
4 | print(r.text)
5 |
6 | params = {
7 | 'name': 'geekdigging',
8 | 'age': '18'
9 | }
10 |
11 | r1 = requests.get('https://httpbin.org/get', params)
12 | print(r1.text)
13 |
14 | print(type(r1.text))
15 | print(r1.json())
16 | print(type(r.json()))
17 |
18 | headers = {
19 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
20 | 'referer': 'https://www.geekdigging.com/'
21 | }
22 | r2 = requests.get('https://httpbin.org/get', headers = headers)
23 | print(r2.text)
24 |
25 | r3 = requests.get("https://www.baidu.com/img/superlogo_c4d7df0a003d3db9b65e9ef0fe6da1ec.png")
26 | with open('baidu_logo.png', 'wb') as f:
27 | f.write(r3.content)
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/DataPre.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | # 相对路径
4 | df = pd.read_excel("result_data.xlsx")
5 | print(df)
6 |
7 | print(df.info())
8 |
9 | # 显示所有行
10 | # pd.set_option('display.max_rows', None)
11 | print(df.isnull())
12 |
13 | print(df.dropna())
14 |
15 | print(df.dropna(how="any"))
16 |
17 | print(df.fillna(0))
18 |
19 | print(df.fillna({'read_num': 10}))
20 |
21 | print(df)
22 | print(df.drop_duplicates())
23 |
24 | print(df.drop_duplicates(subset='read_num'))
25 |
26 | print(df.drop_duplicates(subset='plantform', keep='last'))
27 |
28 | print(df.dtypes)
29 |
30 | print(df['read_num'].dtypes)
31 |
32 | print(df['fans_num'].astype('float64'))
33 |
34 | df1 = pd.read_excel("demo.xlsx")
35 | print(df1)
36 |
37 | df1.columns = ['编号', '序号', '姓名', '消费金额']
38 | print(df1)
39 |
40 | print(df1.set_index('编号'))
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts/Graph_base_demo.py:
--------------------------------------------------------------------------------
1 | from pyecharts import options as opts
2 | from pyecharts.charts import Graph
3 |
4 | nodes = [
5 | {"name": "肖恩", "symbolSize": 10},
6 | {"name": "海棠朵朵", "symbolSize": 20},
7 | {"name": "长公主", "symbolSize": 30},
8 | {"name": "陈萍萍", "symbolSize": 40},
9 | {"name": "范闲", "symbolSize": 50},
10 | {"name": "林婉儿", "symbolSize": 40},
11 | {"name": "庆帝", "symbolSize": 30},
12 | {"name": "范若若", "symbolSize": 20},
13 | {"name": "司理理", "symbolSize": 10}
14 | ]
15 | links = []
16 | for i in nodes:
17 | for j in nodes:
18 | links.append({"source": i.get("name"), "target": j.get("name")})
19 | c = (
20 | Graph()
21 | .add("", nodes, links, repulsion=8000)
22 | .set_global_opts(title_opts=opts.TitleOpts(title="庆余年人物关系图"))
23 | .render("graph_base.html")
24 | )
25 |
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/ExportDemo.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | # 数据读取
4 | df = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet1')
5 |
6 | # 数据导出
7 | df.to_excel(excel_writer=r'D:\Development\Projects\demo.xlsx')
8 |
9 | df.to_excel(excel_writer=r'D:\Development\Projects\demo.xlsx',
10 | sheet_name='测试文档', # 创建 sheet 名称
11 | index=False, # 设置索引不显示
12 | columns=['编号', '姓名'], # 设置要导出的列
13 | encoding='utf-8', # 设置编码格式
14 | na_rep='0', # 缺失值处理
15 | inf_rep='inf' # 无穷值处理
16 | )
17 |
18 | df.to_csv(path_or_buf=r'D:\Development\Projects\demo.csv', # 设置导出路径
19 | index=False, # 设置索引不显示
20 | sep=',', # 设置分隔符号
21 | na_rep='0', # 缺失值处理
22 | columns=['编号', '姓名'], # 设置要导出的列
23 | encoding='utf-8', # 设置编码格式
24 | )
--------------------------------------------------------------------------------
/spider-blog/tongji/src/main/java/com/geekdigging/tongji/mapper/SpiderDataMapper.java:
--------------------------------------------------------------------------------
1 | package com.geekdigging.tongji.mapper;
2 |
3 | import com.geekdigging.tongji.model.SpiderDataModel;
4 | import org.apache.ibatis.annotations.Param;
5 |
6 | import java.util.Date;
7 | import java.util.List;
8 |
9 | /**
10 | * Created with IntelliJ IDEA.
11 | *
12 | * @Date: 2019/11/2
13 | * @Time: 17:15
14 | * @email: inwsy@hotmail.com
15 | * Description:
16 | */
17 | public interface SpiderDataMapper {
18 | // 查询增量数据
19 | List getIncrementalData(@Param("plantform") String plantform, @Param("start_date") Date start_date, @Param("end_date") Date end_date);
20 |
21 | // 查询累计数据
22 | List getAccumulatedData(@Param("plantform") String plantform, @Param("start_date") Date start_date, @Param("end_date") Date end_date);
23 | }
24 |
--------------------------------------------------------------------------------
/python-opencv/blog12-sobel/demo-sobel.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import matplotlib.pyplot as plt
3 |
4 | # 读取图像
5 | img = cv.imread('maliao.jpg', cv.COLOR_BGR2GRAY)
6 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
7 |
8 | # 灰度化处理图像
9 | grayImage = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
10 |
11 | # Sobel 算子
12 | x = cv.Sobel(grayImage, cv.CV_16S, 1, 0)
13 | y = cv.Sobel(grayImage, cv.CV_16S, 0, 1)
14 |
15 | # 转 uint8 ,图像融合
16 | absX = cv.convertScaleAbs(x)
17 | absY = cv.convertScaleAbs(y)
18 | Sobel = cv.addWeighted(absX, 0.5, absY, 0.5, 0)
19 |
20 | # 用来正常显示中文标签
21 | plt.rcParams['font.sans-serif'] = ['SimHei']
22 |
23 | # 显示图形
24 | titles = ['原始图像', 'Sobel 算子']
25 | images = [rgb_img, Sobel]
26 |
27 | for i in range(2):
28 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
29 | plt.title(titles[i])
30 | plt.xticks([]), plt.yticks([])
31 | plt.show()
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/spiders/quotes.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import scrapy
3 | from first_scrapy.items import QuoteItem
4 |
5 | class QuotesSpider(scrapy.Spider):
6 | name = 'quotes'
7 | allowed_domains = ['quotes.toscrape.com']
8 | start_urls = ['http://quotes.toscrape.com/']
9 |
10 | def parse(self, response):
11 | quotes = response.css('.quote')
12 | for quote in quotes:
13 | item = QuoteItem()
14 | item['text'] = quote.css('.text::text').extract_first()
15 | item['author'] = quote.css('.author::text').extract_first()
16 | item['tags'] = quote.css('.tags .tag::text').extract()
17 | yield item
18 |
19 | next = response.css('.pager .next a::attr("href")').extract_first()
20 | url = response.urljoin(next)
21 | yield scrapy.Request(url=url, callback=self.parse)
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/PieDemo1.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | # 中文和负号的正常显示
4 | plt.rcParams['font.sans-serif']=['SimHei']
5 | plt.rcParams['axes.unicode_minus'] = False
6 |
7 | # 数据
8 | edu = [0.2515,0.3724,0.3336,0.0368,0.0057]
9 | labels = ['中专','大专','本科','硕士','其他']
10 |
11 | # 让本科学历离圆心远一点
12 | explode = [0,0,0.1,0,0]
13 |
14 | # 将横、纵坐标轴标准化处理,保证饼图是一个正圆,否则为椭圆
15 | plt.axes(aspect='equal')
16 |
17 | # 自定义颜色
18 | colors=['#9999ff','#ff9999','#7777aa','#2442aa','#dd5555'] # 自定义颜色
19 |
20 | # 绘制饼图
21 | plt.pie(x=edu, # 绘图数据
22 | explode = explode, # 突出显示大专人群
23 | labels = labels, # 添加教育水平标签
24 | colors = colors, # 设置饼图的自定义填充色
25 | autopct = '%.1f%%', # 设置百分比的格式,这里保留一位小数
26 | wedgeprops = {'width': 0.3, 'edgecolor':'green'}
27 | )
28 |
29 | # 添加图标题
30 | plt.title('xxx 公司员工教育水平分布')
31 |
32 | # 保存图形
33 | plt.savefig('pie_demo1.png')
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts_map/GlobeMap.py:
--------------------------------------------------------------------------------
1 | import pyecharts.options as opts
2 | from pyecharts.charts import MapGlobe
3 | from pyecharts.faker import POPULATION
4 |
5 |
6 | data = [x for _, x in POPULATION[1:]]
7 | low, high = min(data), max(data)
8 |
9 | c = (
10 | MapGlobe()
11 | .add_schema()
12 | .add(
13 | maptype="world",
14 | series_name="World Population",
15 | data_pair=POPULATION[1:],
16 | is_map_symbol_show=False,
17 | label_opts=opts.LabelOpts(is_show=False),
18 | )
19 | .set_global_opts(
20 | visualmap_opts=opts.VisualMapOpts(
21 | min_=low,
22 | max_=high,
23 | range_text=["max", "min"],
24 | is_calculable=True,
25 | range_color=["lightskyblue", "yellow", "orangered"],
26 | )
27 |
28 | )
29 | .render("map_globe.html")
30 | )
--------------------------------------------------------------------------------
/python-spider/urllib-request/cookies_lwp.txt:
--------------------------------------------------------------------------------
1 | #LWP-Cookies-2.0
2 | Set-Cookie3: BAIDUID="823ED2595594806E60BCE6477089CABB:FG=1"; path="/"; domain=".baidu.com"; path_spec; domain_dot; expires="2020-11-30 02:49:16Z"; comment=bd; version=0
3 | Set-Cookie3: BIDUPSID=823ED2595594806EF45F17B3045C8D5B; path="/"; domain=".baidu.com"; path_spec; domain_dot; expires="2087-12-19 06:03:23Z"; version=0
4 | Set-Cookie3: H_PS_PSSID=1427_21109_30210_26350; path="/"; domain=".baidu.com"; path_spec; domain_dot; discard; version=0
5 | Set-Cookie3: PSTM=1575168556; path="/"; domain=".baidu.com"; path_spec; domain_dot; expires="2087-12-19 06:03:23Z"; version=0
6 | Set-Cookie3: delPer=0; path="/"; domain=".baidu.com"; path_spec; domain_dot; discard; version=0
7 | Set-Cookie3: BDSVRTM=0; path="/"; domain="www.baidu.com"; path_spec; discard; version=0
8 | Set-Cookie3: BD_HOME=0; path="/"; domain="www.baidu.com"; path_spec; discard; version=0
9 |
--------------------------------------------------------------------------------
/python-spider/xpath-demo/xpath_demo.py:
--------------------------------------------------------------------------------
1 | from lxml import etree
2 | import requests
3 |
4 | response = requests.get('https://www.geekdigging.com/')
5 | html_str = response.content.decode('UTF-8')
6 | html = etree.HTML(html_str)
7 | result = etree.tostring(html, encoding = 'UTF-8').decode('UTF-8')
8 | # 输出太长,先注释
9 | # print(result)
10 |
11 | result_1 = html.xpath('//*')
12 | print(result_1)
13 |
14 | result_2 = html.xpath('//meta')
15 | print(result_2)
16 |
17 | result_3 = html.xpath('//main/article')
18 | print(result_3)
19 |
20 | result_4 = html.xpath('//main//div')
21 | print(result_4)
22 |
23 | result_5 = html.xpath('//img[@alt="小白学 Python 爬虫(16):urllib 实战之爬取妹子图"]/../@href')
24 | print(result_5)
25 |
26 | result_6 = html.xpath('//img[@alt="小白学 Python 爬虫(16):urllib 实战之爬取妹子图"]/parent::*/@href')
27 | print(result_6)
28 |
29 | result_7 = html.xpath('//section/div[@class="container"]')
30 | print(result_7)
--------------------------------------------------------------------------------
/data_structure/StackNode.py:
--------------------------------------------------------------------------------
1 | class Node(object):
2 | '''
3 | 节点实现
4 | '''
5 | def __init__(self,elem):
6 | self.elem = elem
7 | self.next = None
8 |
9 |
10 | class Stack(object):
11 | def __init__(self):
12 | '''
13 | 初始化链表头
14 | '''
15 | self.__head = None
16 |
17 | def is_empty(self):
18 | return self.__head is None
19 |
20 | def push(self, item):
21 | '''
22 | 压栈
23 | :param item:
24 | :return:
25 | '''
26 | node = Node(item)
27 | node.next = self.__head
28 | self.__head = node
29 |
30 | def pop(self):
31 | '''
32 | 弹出栈
33 | :return:
34 | '''
35 | if self.is_empty():
36 | return
37 | else:
38 | p = self.__head
39 | self.__head = p.next
40 | return p.elem
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts/Calendar_base_demo.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import random
3 |
4 | from pyecharts import options as opts
5 | from pyecharts.charts import Calendar
6 |
7 |
8 | begin = datetime.date(2019, 1, 1)
9 | end = datetime.date(2019, 12, 31)
10 | data = [
11 | [str(begin + datetime.timedelta(days=i)), random.randint(1, 20)]
12 | for i in range((end - begin).days + 1)
13 | ]
14 |
15 | c = (
16 | Calendar()
17 | .add("", data, calendar_opts=opts.CalendarOpts(range_="2019"))
18 | .set_global_opts(
19 | title_opts=opts.TitleOpts(title="Calendar-2019 Github 提交情况"),
20 | visualmap_opts=opts.VisualMapOpts(
21 | max_=20,
22 | min_=1,
23 | orient="horizontal",
24 | is_piecewise=True,
25 | pos_top="230px",
26 | pos_left="100px",
27 | ),
28 | )
29 | .render("calendar_base.html")
30 | )
31 |
--------------------------------------------------------------------------------
/spider-blog/spider_data.sql:
--------------------------------------------------------------------------------
1 | SET NAMES utf8mb4;
2 | SET FOREIGN_KEY_CHECKS = 0;
3 |
4 | -- ----------------------------
5 | -- Table structure for spider_data
6 | -- ----------------------------
7 | DROP TABLE IF EXISTS `spider_data`;
8 | CREATE TABLE `spider_data` (
9 | `id` varchar(36) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '主键 UUID',
10 | `plantform` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '平台类型',
11 | `read_num` int(11) NULL DEFAULT NULL COMMENT '总阅读数',
12 | `fans_num` int(11) NULL DEFAULT NULL COMMENT '总粉丝数',
13 | `rank_num` int(11) NULL DEFAULT NULL COMMENT '排名',
14 | `like_num` int(11) NULL DEFAULT NULL COMMENT '总点赞数',
15 | `create_date` datetime(0) NULL DEFAULT NULL COMMENT '创建时间',
16 | PRIMARY KEY (`id`) USING BTREE
17 | ) ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
18 |
19 | SET FOREIGN_KEY_CHECKS = 1;
20 |
--------------------------------------------------------------------------------
/base-data-str/Demo.py:
--------------------------------------------------------------------------------
1 | print('www.geekdigging.com')
2 |
3 | print("www.geekdigging.com")
4 |
5 | print('小明说:"吃了吗?"')
6 |
7 | print("小明说:\"吃了吗?\"")
8 |
9 | print('''
10 | 这是一个多行字符串的实例
11 | 多行字符串可以使用制表符
12 | TAB ( \t )。
13 | 也可以使用换行符 [ \n ]。
14 | ''')
15 |
16 | print('I \nlike Python!')
17 | print("")
18 | print('I\tlike\tPython!')
19 |
20 | print("横向制表符:\\t")
21 |
22 | print(r"横向制表符:\t")
23 |
24 | print(len('I like Python!'))
25 |
26 | print(len('I like 小明!'))
27 |
28 | print(len(''))
29 |
30 | print(len(' '))
31 |
32 | print(len("\n"))
33 |
34 | a = "Hello"
35 | b = "Python"
36 |
37 | print("a + b 输出结果:", a + " " + b)
38 |
39 | print("a * 2 输出结果:", a * 2)
40 |
41 | print("a[1] 输出结果:", a[1])
42 |
43 | print("a[1:4] 输出结果:", a[1:4])
44 |
45 | if "H" in a:
46 | print("H 在变量 a 中")
47 | else:
48 | print("H 不在变量 a 中")
49 |
50 | if "M" not in a:
51 | print("M 不在变量 a 中")
52 | else:
53 | print("M 在变量 a 中")
--------------------------------------------------------------------------------
/python-spider/douyin/demo-video.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import re
3 |
4 | # 创建一个请求头
5 | headers = {
6 | 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1',
7 | }
8 |
9 | # 分享路径
10 | share_url = "https://v.douyin.com/JefvNdx/"
11 |
12 | session = requests.Session()
13 | res = session.get(share_url, headers = headers)
14 |
15 | # 获取视频 id
16 | item_ids = re.compile(r'itemId: "([0-9]+)"').findall(res.text)[0]
17 |
18 | # 拼接请求
19 | item_info_url = f"https://www.iesdouyin.com/web/api/v2/aweme/iteminfo/?item_ids={item_ids}"
20 | res_json = session.get(item_info_url, headers = headers).json()
21 |
22 | # 获取视频源路径
23 | vedio_url = res_json['item_list'][0]['video']['play_addr']['url_list'][0]
24 |
25 | res = requests.get(vedio_url, headers = headers)
26 | with open('demo.mp4', 'wb') as fb:
27 | fb.write(res.content)
28 |
29 | print("视频下载完成~~~")
--------------------------------------------------------------------------------
/python-opencv/blog15-contours/approx.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | img = cv.imread("number.png")
4 |
5 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
6 | # 降噪
7 | ret, thresh = cv.threshold(gray_img, 127, 255, 0)
8 | # 寻找轮廓
9 | contours, hierarchy = cv.findContours(gray_img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
10 |
11 | cnt = contours[0]
12 |
13 | # 计算 epsilon ,按照周长百分比进行计算,分别取周长 1% 和 10%
14 | epsilon_1 = 0.1 * cv.arcLength(cnt, True)
15 | epsilon_2 = 0.01 * cv.arcLength(cnt, True)
16 |
17 | # 进行多边形逼近
18 | approx_1 = cv.approxPolyDP(cnt, epsilon_1, True)
19 | approx_2 = cv.approxPolyDP(cnt, epsilon_2, True)
20 |
21 | # 画出多边形
22 | image_1 = cv.cvtColor(gray_img, cv.COLOR_GRAY2BGR)
23 | image_2 = cv.cvtColor(gray_img, cv.COLOR_GRAY2BGR)
24 |
25 | cv.polylines(image_1, [approx_1], True, (0, 0, 255), 2)
26 | cv.polylines(image_2, [approx_2], True, (0, 0, 255), 2)
27 |
28 | cv.imshow("image_1", image_1)
29 | cv.imshow("image_2", image_2)
30 | cv.waitKey(0)
31 | cv.destroyAllWindows()
--------------------------------------------------------------------------------
/python-opencv/blog6-threshold/demo-quanjiafu.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图像
6 | img=cv.imread('maliao.jpg')
7 | lenna_img = cv.cvtColor(img,cv.COLOR_BGR2RGB)
8 | gray_img=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
9 |
10 | # 阈值化处理
11 | ret1, thresh1=cv.threshold(gray_img, 127, 255, cv.THRESH_BINARY)
12 | ret2, thresh2=cv.threshold(gray_img, 127, 255, cv.THRESH_BINARY_INV)
13 | ret3, thresh3=cv.threshold(gray_img, 127, 255, cv.THRESH_TRUNC)
14 | ret4, thresh4=cv.threshold(gray_img, 127, 255, cv.THRESH_TOZERO)
15 | ret5, thresh5=cv.threshold(gray_img, 127, 255, cv.THRESH_TOZERO_INV)
16 |
17 | # 显示结果
18 | titles = ['Gray Img','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
19 | images = [gray_img, thresh1, thresh2, thresh3, thresh4, thresh5]
20 |
21 | # matplotlib 绘图
22 | for i in range(6):
23 | plt.subplot(2, 3, i+1), plt.imshow(images[i],'gray')
24 | plt.title(titles[i])
25 | plt.xticks([]),plt.yticks([])
26 |
27 | plt.show()
28 |
--------------------------------------------------------------------------------
/base-data-list/Demo.py:
--------------------------------------------------------------------------------
1 | list1 = [1, 2, 3, 4, 5]
2 | print(list1)
3 |
4 | list2 = ['a', 'b', 'c', 'd', 'e']
5 | print(list2)
6 |
7 | list3 = [1, 2, 3, 'a', 'b']
8 | print(list3)
9 |
10 | list4 = [1, 2.33, 'a', list3]
11 | print(list4)
12 |
13 | list5 = []
14 | print(list5)
15 |
16 | print(type(list4))
17 |
18 | list1 = [1, 2, 3, 4, 5]
19 | print(list1[0])
20 | # 异常索引打印
21 | # print(list1[5])
22 |
23 | print(list1[-1])
24 |
25 | print(list1 + list2)
26 |
27 | for i in list1:
28 | print(i)
29 |
30 | print(len(list1))
31 |
32 | print(len(list1 + list2))
33 |
34 | print('a' in list1)
35 | print(1 in list1)
36 |
37 | # 删除 list1 示例
38 | # list1 = [1, 2, 3, 4, 5]
39 | #
40 | # del list1
41 | # print(list1)
42 |
43 | list1 = [1, 2, 3, 4, 5]
44 |
45 | del list1[2]
46 | print(list1)
47 |
48 | print(list1)
49 | print(max(list1))
50 | # 比较会报错,注释
51 | # print(list4)
52 | # print(max(list4))
53 |
54 | print(list1)
55 | print(min(list1))
56 | print(list4)
57 | print(min(list4))
--------------------------------------------------------------------------------
/python-data-analysis/mojito/damu-wordcloud.py:
--------------------------------------------------------------------------------
1 | import jieba
2 | import pandas as pd
3 | import wordcloud
4 |
5 | # 读取弹幕 txt 文件
6 | with open("dan_mu.txt", encoding="utf-8") as f:
7 | txt = f.read()
8 | danmu_list = txt.split("\n")
9 |
10 | # jieba 分词
11 | danmu_cut = [jieba.lcut(item) for item in danmu_list]
12 |
13 | # 获取停用词
14 | with open("baidu_stopwords.txt",encoding="utf-8") as f:
15 | stop = f.read()
16 | stop_words = stop.split()
17 |
18 | # 去掉停用词后的最终词
19 | s_data_cut = pd.Series(danmu_cut)
20 | all_words_after = s_data_cut.apply(lambda x:[i for i in x if i not in stop])
21 |
22 | # 词频统计
23 | all_words = []
24 | for i in all_words_after:
25 | all_words.extend(i)
26 | word_count = pd.Series(all_words).value_counts()
27 |
28 | wordcloud.WordCloud(
29 | font_path='msyh.ttc',
30 | background_color="#fff",
31 | max_words=1000,
32 | max_font_size=200,
33 | random_state=42,
34 | width=900,
35 | height=1600
36 | ).fit_words(word_count).to_file("wordcloud.png")
--------------------------------------------------------------------------------
/python-opencv/blog12-sobel/demo-roberts.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图像
6 | img = cv.imread('maliao.jpg', cv.COLOR_BGR2GRAY)
7 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
8 |
9 | # 灰度化处理图像
10 | grayImage = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
11 |
12 | # Roberts 算子
13 | kernelx = np.array([[-1, 0], [0, 1]], dtype=int)
14 | kernely = np.array([[0, -1], [1, 0]], dtype=int)
15 |
16 | x = cv.filter2D(grayImage, cv.CV_16S, kernelx)
17 | y = cv.filter2D(grayImage, cv.CV_16S, kernely)
18 |
19 | # 转 uint8 ,图像融合
20 | absX = cv.convertScaleAbs(x)
21 | absY = cv.convertScaleAbs(y)
22 | Roberts = cv.addWeighted(absX, 0.5, absY, 0.5, 0)
23 |
24 | # 用来正常显示中文标签
25 | plt.rcParams['font.sans-serif'] = ['SimHei']
26 |
27 | # 显示图形
28 | titles = ['原始图像', 'Roberts 算子']
29 | images = [rgb_img, Roberts]
30 |
31 | for i in range(2):
32 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
33 | plt.title(titles[i])
34 | plt.xticks([]), plt.yticks([])
35 | plt.show()
--------------------------------------------------------------------------------
/base-except/Demo.py:
--------------------------------------------------------------------------------
1 | def division(x, y):
2 | try:
3 | return x / y
4 | except:
5 | print('程序报错啦!!!')
6 | return None
7 |
8 | print(division(15, 5))
9 |
10 | def division1(x, y):
11 | try:
12 | return x / y
13 | except ZeroDivisionError:
14 | print('程序报错啦!!!')
15 | return None
16 |
17 | print(division1(15, 0))
18 |
19 | def division2(x, y):
20 | try:
21 | return x / y
22 | except ZeroDivisionError:
23 | print('您输出的除数为 0 !!!')
24 | return None
25 | except TypeError:
26 | print('您输出的参数类型非法!!!')
27 | return None
28 |
29 | print(division2('python', 0))
30 |
31 | def division3(x, y):
32 | try:
33 | return x / y
34 | except ZeroDivisionError:
35 | print('您输出的除数为 0 !!!')
36 | return None
37 | except TypeError:
38 | print('您输出的参数类型非法!!!')
39 | return None
40 | finally:
41 | print('你一定能看到我!!!')
42 |
43 | print(division3(15, 3))
44 | print(division3('python', 0))
--------------------------------------------------------------------------------
/python-opencv/blog12-sobel/demo-prewitt.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图像
6 | img = cv.imread('maliao.jpg', cv.COLOR_BGR2GRAY)
7 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
8 |
9 | # 灰度化处理图像
10 | grayImage = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
11 |
12 | # Prewitt 算子
13 | kernelx = np.array([[1,1,1],[0,0,0],[-1,-1,-1]],dtype=int)
14 | kernely = np.array([[-1,0,1],[-1,0,1],[-1,0,1]],dtype=int)
15 |
16 | x = cv.filter2D(grayImage, cv.CV_16S, kernelx)
17 | y = cv.filter2D(grayImage, cv.CV_16S, kernely)
18 |
19 | # 转 uint8 ,图像融合
20 | absX = cv.convertScaleAbs(x)
21 | absY = cv.convertScaleAbs(y)
22 | Prewitt = cv.addWeighted(absX, 0.5, absY, 0.5, 0)
23 |
24 | # 用来正常显示中文标签
25 | plt.rcParams['font.sans-serif'] = ['SimHei']
26 |
27 | # 显示图形
28 | titles = ['原始图像', 'Prewitt 算子']
29 | images = [rgb_img, Prewitt]
30 |
31 | for i in range(2):
32 | plt.subplot(1, 2, i + 1), plt.imshow(images[i], 'gray')
33 | plt.title(titles[i])
34 | plt.xticks([]), plt.yticks([])
35 | plt.show()
--------------------------------------------------------------------------------
/python-data-analysis/matplotlib/PolarDemo.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 | # 中文和负号的正常显示
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 | plt.rcParams['axes.unicode_minus'] = False
7 |
8 | # 使用ggplot的绘图风格
9 | plt.style.use('ggplot')
10 |
11 | # 构造数据
12 | values = [3.2, 2.1, 3.5, 2.8, 3]
13 | feature = ['攻击力', '防御力', '恢复力', '法术强度', '生命值']
14 |
15 | N = len(values)
16 | # 设置雷达图的角度,用于平分切开一个圆面
17 | angles = np.linspace(0, 2 * np.pi, N, endpoint=False)
18 |
19 | # 为了使雷达图一圈封闭起来,需要下面的步骤
20 | values = np.concatenate((values, [values[0]]))
21 | angles = np.concatenate((angles, [angles[0]]))
22 |
23 | # 绘图
24 | fig = plt.figure()
25 | # 这里一定要设置为极坐标格式
26 | ax = fig.add_subplot(111, polar=True)
27 | # 绘制折线图
28 | ax.plot(angles, values, 'o-', linewidth=2)
29 | # 填充颜色
30 | ax.fill(angles, values, alpha=0.25)
31 | # 添加每个特征的标签
32 | ax.set_thetagrids(angles * 180 / np.pi, feature)
33 | # 设置雷达图的范围
34 | ax.set_ylim(0, 5)
35 | # 添加标题
36 | plt.title('游戏人物属性')
37 | # 添加网格线
38 | ax.grid(True)
39 | # 显示图形
40 | plt.savefig('polar_demo.png')
--------------------------------------------------------------------------------
/python-spider/aiohttp-demo/aio-basic-demo.py:
--------------------------------------------------------------------------------
1 | import aiohttp
2 | import asyncio
3 |
4 | async def aio_1():
5 | async with aiohttp.ClientSession() as session:
6 | async with session.get('https://www.baidu.com/') as resp:
7 | print(resp.status)
8 | # print(await resp.text())
9 |
10 | loop = asyncio.get_event_loop()
11 | loop.run_until_complete(aio_1())
12 |
13 | async def aio_2():
14 | async with aiohttp.ClientSession() as session:
15 | async with session.get('https://www.geekdigging.com/') as resp:
16 | print(resp.status)
17 | print(await resp.text())
18 |
19 | loop = asyncio.get_event_loop()
20 | loop.run_until_complete(aio_2())
21 |
22 | async def aio_3():
23 | timeout = aiohttp.ClientTimeout(total=60)
24 | async with aiohttp.ClientSession(timeout = timeout) as session:
25 | async with session.get('https://www.geekdigging.com/', timeout = timeout) as resp:
26 | print(resp.status)
27 |
28 | loop = asyncio.get_event_loop()
29 | loop.run_until_complete(aio_3())
--------------------------------------------------------------------------------
/base-data-set/Demo1.py:
--------------------------------------------------------------------------------
1 | set1 = {1, 2, 3, 4, 5, 6}
2 | set2 = {4, 5, 6, 7, 7, 9}
3 |
4 | # 求交集
5 | set3 = set1.intersection(set2)
6 | print('交集:', set3)
7 |
8 | # 求并集
9 | set4 = set1.union(set2)
10 | print('并集:', set4)
11 |
12 | # 做差
13 | set5 = set1.difference(set2)
14 | print('做差:', set5)
15 |
16 | set6 = {1, 2, 3}
17 | set6.add(4)
18 | print(set6)
19 | set6.add('python')
20 | print(set6)
21 | set6.add((1, 2))
22 | print(set6)
23 |
24 | set7 = {1, 2}
25 | set7.update({3, 4, 'python', (4, 5)})
26 | print(set7)
27 |
28 | set7.pop()
29 | print(set7)
30 |
31 | set8 = {1, 2, 3, 4}
32 | set8.remove(4)
33 | print(set8)
34 | # 删除不存在元素,报错
35 | # set8.remove(9)
36 |
37 | set8.discard(9)
38 | print(set8)
39 |
40 | set9 = {1, 2, 3}
41 | set9.clear()
42 | print(set9)
43 |
44 | set10 = {1, 2, 3}
45 | set11 = {1, 2}
46 | set12 = {4, 5}
47 | print(set10.isdisjoint(set11))
48 | print(set10.isdisjoint(set12))
49 |
50 | print(set11.issubset(set10))
51 | print(set12.issubset(set10))
52 |
53 | print(set10.issuperset(set11))
54 | print(set10.issuperset(set12))
--------------------------------------------------------------------------------
/base-process/Demo.py:
--------------------------------------------------------------------------------
1 | # 示例 1
2 |
3 | # width = input("请输入长方形的宽:")
4 | # height = input("请输入长方形的高:")
5 | # area = int(width) * int(height)
6 | # print("长方形的面积为:", area)
7 |
8 | # 示例 2
9 |
10 | # weight = input("请输入当前的体重:")
11 | #
12 | # if float(weight) >= 200:
13 | # print("你和加菲猫一样肥!!")
14 | # else:
15 | # print("你还是很苗条的么!!")
16 |
17 | # 示例 3
18 |
19 | # weight = input("请输入您当前的体重:")
20 | #
21 | # if float(weight) >= 200:
22 | # print("你和加菲猫一样肥!!")
23 | # elif float(weight) >= 100:
24 | # print("你的身材真棒!!")
25 | # else:
26 | # print("有点瘦哦,要多吃肉!!")
27 |
28 | # 示例 4
29 | gender = input("请输入您的性别(M或者F):")
30 | height = input("请输入您的身高:")
31 |
32 | if gender == 'M':
33 | if float(height) >= 185:
34 | print("海拔太高了,可能会导致缺氧!!!")
35 | elif float(height) >= 175:
36 | print("男神身高!!!")
37 | else:
38 | print("哥们,该补钙了!!!")
39 | else:
40 | if float(height) >= 175:
41 | print("您可以去当模特了!!!")
42 | elif float(height) >= 165:
43 | print("女神身高,您是一位美丽的女孩子!!!")
44 | else:
45 | print("美女,多晒晒太阳吧!!!")
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts_map/ShanghaiMap.py:
--------------------------------------------------------------------------------
1 | from pyecharts import options as opts
2 | from pyecharts.charts import Map
3 | from pyecharts.globals import ThemeType
4 |
5 | shanghai_list = ['黄浦区', '徐汇区', '长宁区', '静安区', '普陀区', '虹口区', '杨浦区', '闵行区', '宝山区', '嘉定区', '金山区', '松江区', '青浦区', '奉贤区', '崇明区', '浦东新区']
6 |
7 | shanghai_people = [65.38, 108.44, 69.4, 106.28, 128.19, 79.7, 131.27, 254.35, 204.23, 158.89, 80.5, 176.22, 121.9, 115.2, 68.81, 555.02]
8 |
9 | BAIDU_LINK='https://baike.baidu.com/item/%E4%B8%8A%E6%B5%B7%E8%A1%8C%E6%94%BF%E5%8C%BA%E5%88%92/7426389?fr=aladdin'
10 |
11 | c = (
12 | Map(init_opts=opts.InitOpts(theme=ThemeType.DARK, bg_color='#404a59', width='1600px', height='900px'))
13 | .add("上海市-常住人口", [list(z) for z in zip(shanghai_list, shanghai_people)], "上海")
14 | .set_global_opts(
15 | title_opts=opts.TitleOpts(
16 | title="上海地图-常住人口(单位:万人)",
17 | subtitle="常住人口数据来自百度百科",
18 | subtitle_link=BAIDU_LINK,
19 | ),
20 | visualmap_opts=opts.VisualMapOpts()
21 | )
22 | .render("map_shanghai.html")
23 | )
--------------------------------------------------------------------------------
/python-data-analysis/pyecharts/Line3d_autorotate_demo.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | from pyecharts import options as opts
4 | from pyecharts.charts import Line3D
5 | from pyecharts.faker import Faker
6 |
7 | data = []
8 | for t in range(0, 25000):
9 | _t = t / 1000
10 | x = (1 + 0.25 * math.cos(75 * _t)) * math.cos(_t)
11 | y = (1 + 0.25 * math.cos(75 * _t)) * math.sin(_t)
12 | z = _t + 2.0 * math.sin(75 * _t)
13 | data.append([x, y, z])
14 | c = (
15 | Line3D()
16 | .add(
17 | "",
18 | data,
19 | xaxis3d_opts=opts.Axis3DOpts(Faker.clock, type_="value"),
20 | yaxis3d_opts=opts.Axis3DOpts(Faker.week_en, type_="value"),
21 | grid3d_opts=opts.Grid3DOpts(
22 | width=100, depth=100, rotate_speed=150, is_rotate=True
23 | ),
24 | )
25 | .set_global_opts(
26 | visualmap_opts=opts.VisualMapOpts(
27 | max_=30, min_=0, range_color=Faker.visual_color
28 | ),
29 | title_opts=opts.TitleOpts(title="Line3D-旋转的弹簧"),
30 | )
31 | .render("line3d_autorotate.html")
32 | )
33 |
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/GroupByDemo.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | # 数据导入
4 | epidemic_dxy = pd.read_excel("epidemic_dxy.xlsx")
5 |
6 | print(epidemic_dxy.groupby(['continents']))
7 |
8 | print(epidemic_dxy.groupby(['continents']).count())
9 |
10 | # pd.set_option('display.max_columns', None)
11 | print(epidemic_dxy.groupby(['continents']).sum())
12 |
13 | print(epidemic_dxy.groupby(['continents'])['confirmedCount', 'suspectedCount', 'curedCount', 'deadCount'].sum())
14 |
15 | print(epidemic_dxy.groupby(epidemic_dxy['continents'])['confirmedCount', 'suspectedCount', 'curedCount', 'deadCount'].sum())
16 |
17 | print(epidemic_dxy.groupby([epidemic_dxy['continents'], epidemic_dxy['provinceName']])['confirmedCount', 'suspectedCount', 'curedCount', 'deadCount'].sum())
18 |
19 | print(epidemic_dxy.groupby(epidemic_dxy['continents'])['confirmedCount', 'suspectedCount', 'curedCount', 'deadCount'].aggregate(['count', 'sum']))
20 |
21 | new_dataframe = epidemic_dxy.groupby(epidemic_dxy['continents'])['confirmedCount', 'suspectedCount', 'curedCount', 'deadCount'].sum().reset_index()
22 |
23 | print(new_dataframe)
--------------------------------------------------------------------------------
/python-spider/bs4-demo/bs4_demo.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup
3 |
4 | response = requests.get('https://www.geekdigging.com/')
5 | soup = BeautifulSoup(response.content, "html5lib")
6 | # 打印内容过长,先注释
7 | # print(soup.prettify())
8 |
9 | print(soup.title)
10 | print(type(soup.title))
11 | print(soup.title.string)
12 | print(soup.a)
13 |
14 | tag = soup.section
15 | print(tag.name)
16 |
17 | print(tag['class'])
18 | print(tag.attrs)
19 |
20 | print(soup.title.string)
21 |
22 | print(soup.a.img)
23 | print(type(soup.a.img))
24 | print(soup.a.img.attrs)
25 |
26 | # 获取子节点
27 | print(soup.article.contents)
28 |
29 | for i, child in enumerate(soup.article.children):
30 | print(i, child)
31 |
32 | # 获取所有子孙节点
33 | for i, child in enumerate(soup.article.descendants):
34 | print(i, child)
35 | # 父节点
36 | print(soup.title.parent)
37 |
38 | # 兄弟节点
39 | print('next_sibling:', soup.title.next_sibling)
40 | print('previous_sibling:', soup.title.previous_sibling)
41 | print('next_siblings:', soup.title.next_siblings)
42 | print('previous_siblings:', soup.title.previous_siblings)
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/TableJoinDemo.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | df1 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet1')
4 | print(df1)
5 |
6 | df2 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet2')
7 | print(df2)
8 |
9 | print(pd.merge(df1, df2))
10 |
11 | df3 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet3')
12 | print(df3)
13 |
14 | print(pd.merge(df1, df3, on='编号'))
15 |
16 | df4 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet4')
17 | print(df4)
18 |
19 | print(pd.merge(df4, df3, on='编号'))
20 |
21 | df5 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet5')
22 | print(df5)
23 |
24 | # 内连接
25 | print(pd.merge(df5, df3, on='编号', how='inner'))
26 |
27 | # 左连接
28 | print(pd.merge(df5, df3, on='编号', how='left'))
29 |
30 | # 右连接
31 | print(pd.merge(df5, df3, on='编号', how='right'))
32 |
33 | # 外连接
34 | print(pd.merge(df5, df3, on='编号', how='outer'))
35 |
36 | df6 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet6')
37 | print(df6)
38 |
39 | # 纵向拼接
40 | print(pd.concat([df5, df6]))
41 |
42 | print(pd.concat([df5, df6], ignore_index=True))
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 meteor1993
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/python-opencv/blog13-scharr/demo-summary.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | #读取图像
6 | img = cv.imread("maliao.jpg")
7 | rgb_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
8 |
9 | gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
10 |
11 | #高斯滤波
12 | gaussianBlur = cv.GaussianBlur(gray_img, (3,3), 0)
13 |
14 | #阈值处理
15 | ret, binary = cv.threshold(gaussianBlur, 127, 255, cv.THRESH_BINARY)
16 |
17 | #Scharr算子
18 | x = cv.Scharr(gray_img, cv.CV_32F, 1, 0) #X方向
19 | y = cv.Scharr(gray_img, cv.CV_32F, 0, 1) #Y方向
20 | absX = cv.convertScaleAbs(x)
21 | absY = cv.convertScaleAbs(y)
22 | Scharr = cv.addWeighted(absX, 0.5, absY, 0.5, 0)
23 |
24 | #LOG算子
25 | gaussian = cv.GaussianBlur(gray_img, (3,3), 0) #先通过高斯滤波降噪
26 | dst = cv.Laplacian(gaussian, cv.CV_16S, ksize = 3) #再通过拉普拉斯算子做边缘检测
27 | LOG = cv.convertScaleAbs(dst)
28 |
29 | #效果图
30 | titles = ['Source Image', 'Gray Image', 'Scharr Image', 'LOG Image']
31 | images = [rgb_img, gray_img, Scharr, LOG]
32 |
33 | for i in np.arange(4):
34 | plt.subplot(2, 2, i+1),plt.imshow(images[i],'gray')
35 | plt.title(titles[i])
36 | plt.xticks([]),plt.yticks([])
37 | plt.show()
38 |
--------------------------------------------------------------------------------
/python-spider/urllib-request/Demo_Error.py:
--------------------------------------------------------------------------------
1 | from urllib import request, error
2 | import socket
3 |
4 | # 访问明显不存在的地址,报错:Not Found
5 | try:
6 | response = request.urlopen('https://www.geekdigging.com/aa')
7 | except error.URLError as e:
8 | print(e.reason)
9 |
10 | # 访问超时,报错:timed out
11 | try:
12 | response = request.urlopen('https://www.baidu.com', timeout=0.001)
13 | except error.URLError as e:
14 | print(e.reason)
15 |
16 | # 异常类型示例
17 | try:
18 | response = request.urlopen('https://www.baidu.com', timeout=0.001)
19 | except error.URLError as e:
20 | print(type(e.reason))
21 | if isinstance(e.reason, socket.timeout):
22 | print('TIME OUT')
23 |
24 | # 访问明显不存在的地址,使用 HTTPError 捕捉异常
25 | try:
26 | response = request.urlopen('https://www.geekdigging.com/aa')
27 | except error.HTTPError as e:
28 | print(e.reason, e.code, e.headers, sep='\n')
29 |
30 | # 优化异常捕捉代码
31 | try:
32 | response = request.urlopen('https://www.geekdigging.com/aa')
33 | except error.HTTPError as e:
34 | print(e.reason, e.code, e.headers, sep='\n')
35 | except error.URLError as e:
36 | print(e.reason)
37 | else:
38 | print('Request Success!')
--------------------------------------------------------------------------------
/base-data-tuple/Demo.py:
--------------------------------------------------------------------------------
1 | tuple1 = "Python", "Java", 2011, 2015
2 | print(tuple1)
3 |
4 | tuple2 = ("Python", "Java", 2011, 2015)
5 | print(tuple2)
6 | print(type(tuple2))
7 |
8 | tuple3 = ("Python", "Java", [1 ,2, 'python', 'java'], 2011, 2015)
9 | print(tuple3)
10 |
11 | tuple4 = (0 ,1, 2, 3, 4, 5, 6, 7, 8, 9)
12 | print(tuple4)
13 | # 索引
14 | print(tuple4[2])
15 | # 索引
16 | print(tuple4[-2])
17 | # 切片
18 | print(tuple4[0:8:2])
19 | # 切片
20 | print(tuple4[8:1:-1])
21 |
22 | tuple5 = (2333, '98k')
23 | # 连接
24 | print(tuple4 + tuple5)
25 | # 循环
26 | for index in tuple4:
27 | print(index)
28 | # 查找元素是否存在
29 | print(1 in tuple4)
30 | print(11 in tuple4)
31 | # 删除元组
32 | # del tuple5
33 | # print(tuple5)
34 |
35 | # 取最大
36 | print(max(tuple4))
37 | # 取最小
38 | print(min(tuple4))
39 | # 元组长度
40 | print(len(tuple4))
41 | # 修改元组
42 | # tuple4[0] = 11
43 |
44 | # 相互转化
45 | print(type(tuple4))
46 | print(list(tuple4))
47 | print(type(list(tuple4)))
48 | list1 = [0 ,1, 2, 3, 4, 5, 6, 7, 8, 9]
49 | print(type(list1))
50 | print(tuple(list1))
51 | print(type(tuple(list1)))
52 |
53 | # 元组解包
54 | tuple6 = (1, 2, 3)
55 | print(tuple6)
56 | a, b, c = tuple6
57 | print(a, b, c)
--------------------------------------------------------------------------------
/python-opencv/blog9-open/demo-noise.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # 读取图片
6 | img = cv.imread("demo.png", cv.IMREAD_UNCHANGED)
7 | source = cv.cvtColor(img, cv.COLOR_BGR2RGB)
8 | rows, cols, chn = source.shape
9 |
10 | # 加噪声-白点噪声
11 | for i in range(500):
12 | x = np.random.randint(0, rows)
13 | y = np.random.randint(0, cols)
14 | source[x, y, :] = 255
15 |
16 | # 图像保存 白点噪声图像
17 | cv.imwrite("demo_noise_white.jpg", source)
18 | print("白点噪声添加完成")
19 |
20 | # 重新读取图像
21 | img1 = cv.imread("demo.png", cv.IMREAD_UNCHANGED)
22 | source1 = cv.cvtColor(img1, cv.COLOR_BGR2RGB)
23 |
24 | # 加噪声-黑点噪声
25 | for i in range(1000):
26 | x = np.random.randint(0, rows)
27 | y = np.random.randint(0, cols)
28 | source1[x, y, :] = 0
29 |
30 | # 图像保存 黑点噪声图像
31 | cv.imwrite("demo_noise_black.jpg", source1)
32 | print("黑点噪声添加完成")
33 |
34 | # 显示结果
35 | titles = ['White Img','Black Img']
36 | images = [source, source1]
37 |
38 | # matplotlib 绘图
39 | for i in range(2):
40 | plt.subplot(1, 2, i+1), plt.imshow(images[i],'gray')
41 | plt.title(titles[i])
42 | plt.xticks([]),plt.yticks([])
43 |
44 | plt.show()
45 |
--------------------------------------------------------------------------------
/python-data-analysis/series/demo1.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 |
4 | s = pd.Series(np.random.rand(5), index=['a', 'b', 'c', 'd', 'e'])
5 | print(s)
6 | print(s.index)
7 |
8 | s1 = pd.Series(np.random.randn(5))
9 | print(s1)
10 |
11 | d = {'b': 1, 'a': 0, 'c': 2}
12 | s2 = pd.Series(d)
13 | print(s2)
14 |
15 | s3 = pd.Series(d, index=['b', 'c', 'd', 'a'])
16 | print(s3)
17 |
18 | s4 = pd.Series(5., index=['a', 'b', 'c', 'd', 'e'])
19 | print(s4)
20 |
21 | print(s[0])
22 | print(s[:3])
23 | print(s[s > s.median()])
24 | print(s[[4, 3, 1]])
25 | # 打印 e 的幂次方, e 是一个常数为 2.71828
26 | print (np.exp(s))
27 | # 打印 s 里每个元素的开方
28 | print (np.sqrt(s))
29 | print(s.dtype)
30 | print(s.array)
31 | print(s.to_numpy())
32 |
33 | print(s['a'])
34 | s['e'] = 12.
35 | print(s)
36 |
37 | print('e' in s)
38 | print('f' in s)
39 | # 抛出 KeyError 异常
40 | # print(s['f'])
41 |
42 | print(s.get('f'))
43 | print(s.get('f', np.nan))
44 |
45 | print(s[1:] + s[:-1])
46 |
47 | s5 = pd.Series(np.random.randn(5), name='my_series')
48 | print(s5)
49 | print(s5.name)
50 | print(id(s5))
51 |
52 | # 重命名 series
53 | s6 = s5.rename("my_series_different")
54 | print(s6)
55 | print(id(s6))
--------------------------------------------------------------------------------
/python-spider/scrapy_selenium_demo/scrapy_selenium_demo/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 |
9 | class ScrapySeleniumDemoPipeline(object):
10 | def process_item(self, item, spider):
11 | return item
12 |
13 |
14 | import pymongo
15 |
16 | class MongoPipeline(object):
17 | def __init__(self, mongo_uri, mongo_db):
18 | self.mongo_uri = mongo_uri
19 | self.mongo_db = mongo_db
20 |
21 | @classmethod
22 | def from_crawler(cls, crawler):
23 | return cls(mongo_uri=crawler.settings.get('MONGO_URI'),
24 | mongo_db=crawler.settings.get('MONGO_DB')
25 | )
26 |
27 | def open_spider(self, spider):
28 | self.client = pymongo.MongoClient(self.mongo_uri)
29 | self.db = self.client[self.mongo_db]
30 |
31 | def process_item(self, item, spider):
32 | name = item.__class__.__name__
33 | self.db[name].insert(dict(item))
34 | return item
35 |
36 | def close_spider(self, spider):
37 | self.client.close()
--------------------------------------------------------------------------------
/base-data-dict/Demo1.py:
--------------------------------------------------------------------------------
1 | import copy
2 |
3 | dict1 = {'name': 'geekdigging', 'age': 2}
4 |
5 | print(dict1.keys())
6 | print(list(dict1.keys()))
7 | print(type(list(dict1.keys())))
8 |
9 | print(dict1.values())
10 | print(list(dict1.values()))
11 | print(type(list(dict1.values())))
12 |
13 | print(dict1.items())
14 | print(list(dict1.items()))
15 | print(type(list(dict1.items())))
16 |
17 | print(dict1.get('name'))
18 | print(dict1.get('geekdigging'))
19 |
20 | print(dict1.pop('age'))
21 | print(dict1)
22 |
23 | dict1.setdefault('age')
24 | print(dict1)
25 |
26 | dict2 = {'sex': 'male'}
27 | dict1.update(dict2)
28 | print(dict1)
29 |
30 | dict2.clear()
31 | print(dict2)
32 |
33 | dict3 = {'name': 'geekdigging', 'age': [1, 2, 3]}
34 | # 浅拷贝: 引用对象
35 | dict4 = dict3
36 | print(id(dict3))
37 | print(id(dict4))
38 | # 浅拷贝:深拷贝父对象(一级目录),子对象(二级目录)不拷贝,还是引用
39 | dict5 = dict3.copy()
40 | dict3['age'].remove(1)
41 | print(dict3)
42 | print(dict5)
43 | print(id(dict3))
44 | print(id(dict5))
45 |
46 | dict3 = {'name': 'geekdigging', 'age': [1, 2, 3]}
47 | dict6 = copy.deepcopy(dict3)
48 | dict3['age'].remove(1)
49 | print(dict3)
50 | print(dict6)
51 | print(id(dict3))
52 | print(id(dict6))
--------------------------------------------------------------------------------
/python-spider/first_scrapy/first_scrapy/spiders/MzituSpider.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from scrapy import Spider, Request
3 | from first_scrapy.items import ImageItem
4 |
5 | class MziTuSpider(Spider):
6 | name = 'MziTuSpider'
7 | allowed_domains = ['www.mzitu.com']
8 | start_urls = ['https://www.mzitu.com/mm/']
9 |
10 | def start_requests(self):
11 | headers = {
12 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
13 | 'referer': 'https://www.mzitu.com/'
14 | }
15 | yield Request('https://www.mzitu.com/mm/', self.parse, headers = headers)
16 |
17 | def parse(self, response):
18 | imageList = response.css('.postlist ul li')
19 | for image in imageList:
20 | item = ImageItem()
21 | item['id'] = image.css('a::attr("href")').extract_first().split('/')[3]
22 | item['url'] = image.css('a::attr("href")').extract_first()
23 | item['title'] = image.css('a img::attr("alt")').extract_first()
24 | item['thumb'] = image.css('a img::attr("data-original")').extract_first()
25 | yield item
--------------------------------------------------------------------------------
/base-excel/Demo1.py:
--------------------------------------------------------------------------------
1 | import xlsxwriter
2 | import datetime
3 |
4 | workbook = xlsxwriter.Workbook('demo.xlsx')
5 |
6 | sheet1 = workbook.add_worksheet('test_sheet')
7 |
8 | workfomat = workbook.add_format()
9 | # 字体加粗
10 | workfomat.set_bold(True)
11 | # 单元格边框宽度
12 | workfomat.set_border(1)
13 | # 对齐方式
14 | workfomat.set_align('left')
15 | # 格式化数据格式为小数点后两位
16 | workfomat.set_num_format('0.00')
17 |
18 | heads = ['', '语文', '数学', '英语']
19 | datas = [
20 | ['小明', 76, 85, 95],
21 | ['小红', 85, 58, 92],
22 | ['小王', 98, 96, 91]
23 | ]
24 |
25 | sheet1.write_row('A1', heads, workfomat)
26 |
27 | sheet1.write_row('A2', datas[0], workfomat)
28 | sheet1.write_row('A3', datas[1], workfomat)
29 | sheet1.write_row('A4', datas[2], workfomat)
30 |
31 | fomat1 = workbook.add_format({'num_format': 'yyyy/mm/dd/ hh:mm:ss'})
32 |
33 | sheet1.write_datetime('E5', datetime.datetime(2019, 11, 9, 22, 44, 26), fomat1)
34 |
35 | sheet1.insert_image('I6', 'wx.jpg')
36 |
37 | chart = workbook.add_chart({'type': 'column'})
38 |
39 | chart.add_series({'values': '=test_sheet!$B$2:$B$4'})
40 | chart.add_series({'values': '=test_sheet!$C$2:$C$4'})
41 | chart.add_series({'values': '=test_sheet!$D$2:$D$4'})
42 |
43 | sheet1.insert_chart('A7', chart)
44 |
45 | workbook.close()
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/DataOperation.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | # 数据导入
4 | data_train = pd.read_csv("train.csv")
5 |
6 | # 数据查看
7 | print(data_train.info())
8 |
9 | pd.set_option('display.max_columns', None)
10 | print(data_train.describe())
11 |
12 | print(data_train)
13 |
14 | print(data_train['SibSp'] + data_train['Parch'])
15 |
16 | print(data_train['SibSp'] + data_train['Parch'] + 1 - data_train['Survived'])
17 |
18 | print(data_train['Survived'] > (data_train['SibSp'] + data_train['Parch']))
19 |
20 | # 按列统计
21 | print(data_train.count())
22 | # 按行统计
23 | print(data_train.count(axis=1))
24 | # 某一列单独统计
25 | print(data_train['Age'].count())
26 |
27 | # 按列求和
28 | print(data_train.sum())
29 | # 按行求和
30 | print(data_train.sum(axis=1))
31 |
32 | print(data_train.mean())
33 |
34 | print(data_train.max())
35 | print('-----------------------------------------')
36 | print(data_train.min())
37 |
38 | print(data_train.median())
39 |
40 | print(data_train.mode())
41 | # 单独获取某列众数
42 | print(data_train['Sex'].mode())
43 |
44 | print(data_train.var())
45 |
46 | print(data_train.std())
47 |
48 | print(data_train.quantile(0.25))
49 | print('-----------------------------------------')
50 | print(data_train.quantile(0.5))
51 |
52 | print(data_train.corr(method='pearson'))
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/demo1.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 |
4 | dates = pd.date_range('20200101', periods=6)
5 | df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
6 | print(df)
7 |
8 | # 查看头部数据
9 | print(df.head(1))
10 | # 查看尾部数据
11 | print(df.tail(2))
12 | # 获取索引
13 | print(df.index)
14 | # 获取列名
15 | print(df.columns)
16 | # 查看数据的统计摘要
17 | print(df.describe())
18 | # 转置数据
19 | print(df.T)
20 | # 参考 https://www.jianshu.com/p/f0ed06cd5003
21 | # 两种排序 sort_index() 和 sort_values()
22 | df1 = pd.DataFrame({'b' :[1,2,3,2],'a':[4,3,2,1],'c':[1,3,8,2]},index=[2,0,1,3])
23 | print(df1)
24 | # sort_values() 排序
25 | # 按 b 列升序排序
26 | print(df1.sort_values(by='b'))
27 | # 先按 b 列降序,再按 a 列升序排序
28 | print(df1.sort_values(by=['b','a'],axis=0,ascending=[False,True]))
29 | # 按行 3 升序排列,必须指定 axis = 1
30 | print(df1.sort_values(by=3,axis=1))
31 | # 按行 3 升序,行 0 降排列
32 | print(df1.sort_values(by=[3,0],axis=1,ascending=[True,False]))
33 | # sort_index() 排序
34 | # 默认按「行标签」升序排列
35 | print(df1.sort_index())
36 | # 按「列标签」升序排列
37 | print(df1.sort_index(axis=1))
38 | # 先按 b 列「降序」排列,因为 b 列中有相同值,相同值再按 a 列的「升序」排列
39 | print(df1.sort_index(by=['b','a'],ascending=[False,True]))
40 | # 先按 a 列「降序」排列,而 a 列中没有相同值,因此这里按 b 列的「升序」排列不起作用。
41 | print(df1.sort_index(by=['a','b'],ascending=[False,True]))
--------------------------------------------------------------------------------
/python-data-analysis/pandas-demo/demo2.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 |
4 | dates = pd.date_range('20200101', periods=6)
5 | df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
6 | print(df)
7 |
8 | # 获取单列,获得 Series
9 | print(df['A'])
10 | # 行切片
11 | print(df[0:3])
12 | print(df['20200101' : '20200103'])
13 |
14 | # loc,可以使用 column 名和 index 名进行定位
15 | # 用标签提取一行数据
16 | print(df.loc[dates[0]])
17 | # 用标签提取多列数据
18 | print(df.loc[:, ['A', 'B']])
19 | # 用标签进行切片操作,同时制定行与列的结束点
20 | print(df.loc['20200101':'20200103', ['A', 'B']])
21 | # 返回一行中的两列
22 | print(df.loc['20200101', ['A', 'B']])
23 | # 获取某个标量值
24 | print(df.loc[dates[0], 'A'])
25 |
26 | # iloc,即 index locate 用 index 索引进行定位,所以参数是整型
27 | # 用整数位置选择
28 | print(df.iloc[3])
29 | # 使用整数按行和列进行切片操作
30 | print(df.iloc[3:5, 0:2])
31 | # 用整数列表按位置切片
32 | print(df.iloc[[1, 2, 4], [0, 2]])
33 | # 整行切片
34 | print(df.iloc[1:3, :])
35 | # 整列切片
36 | print(df.iloc[:, 1:3])
37 | # 获取某个标量值 同上
38 | print(df.iloc[1, 1])
39 |
40 | # at 使用方法与 loc 类似,但是比 loc 有更快的访问数据的速度,而且只能访问单个元素,不能访问多个元素。
41 | print(df.at[dates[0], 'A'])
42 |
43 | # iat iat 对于 iloc 的关系就像 at 对于 loc 的关系,是一种更快的基于索引位置的选择方法,同 at 一样只能访问单个元素。
44 | print(df.iat[1, 1])
45 |
46 | # 用单列的值选择数据
47 | print(df[df.A > 0])
48 |
49 | # 选择 df 里满足条件的值
50 | print(df[df < 0])
51 |
52 |
--------------------------------------------------------------------------------
/python-spider/xpath-demo/xpath_demo_advanced.py:
--------------------------------------------------------------------------------
1 | from lxml import etree
2 | import requests
3 |
4 | response = requests.get('https://www.geekdigging.com/')
5 | html_str = response.content.decode('UTF-8')
6 | html = etree.HTML(html_str)
7 |
8 | result_1 = html.xpath('/html/body/section/div/div/main/article[1]/div[2]/div/h3/a/text()')
9 | print(result_1)
10 |
11 | result_2 = html.xpath('/html/body/section/div/div/main/article[1]/div[2]/div/h3/a/@href')
12 | print(result_2)
13 |
14 | result_3 = html.xpath('//div[contains(@class, "post-head")]')
15 | print(result_3)
16 |
17 | result_4 = html.xpath('//img[@class="img-ajax" and @alt="小白学 Python 爬虫(18):Requests 进阶操作"]')
18 | print(result_4)
19 |
20 | result_5 = html.xpath('//article/div/div/h3[@class="post-title"]/a/text()')
21 | print(result_5)
22 | result_6 = html.xpath('//article[1]/div/div/h3[@class="post-title"]/a/text()')
23 | print(result_6)
24 | result_7 = html.xpath('//article[last()]/div/div/h3[@class="post-title"]/a/text()')
25 | print(result_7)
26 | result_8 = html.xpath('//article[position() < 5]/div/div/h3[@class="post-title"]/a/text()')
27 | print(result_8)
28 |
29 | # 节点轴示例
30 | # 获取所有祖先节点
31 | result_9 = html.xpath('//article/ancestor::*')
32 | print(result_9)
33 | # 获取祖先节点 main 节点
34 | result_10 = html.xpath('//article/ancestor::main')
35 | print(result_10)
--------------------------------------------------------------------------------
/base-data-list/Demo1.py:
--------------------------------------------------------------------------------
1 | list1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
2 |
3 | # 省略步长时默认为 1
4 | print(list1[3:8])
5 | # 步长为 2
6 | print(list1[3:8:2])
7 | # 从索引 3 开始取到最后
8 | print(list1[3:])
9 | # 从头开始取,取到索引 8 ,并且索引 8 娶不到
10 | print(list1[:8])
11 | # 取所有,步长为 3
12 | print(list1[::3])
13 | # 从索引 1 开始,取到倒数第 2 个,并且倒数第 2 个 取不到
14 | print(list1[1:-2])
15 | # 取所有
16 | print(list1[:])
17 | # 取逆序列表
18 | print(list1[::-1])
19 | # 取逆序,并且步长为 2
20 | print(list1[8:1:-2])
21 |
22 | list1.append("Python")
23 | print(list1)
24 |
25 | list2 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
26 |
27 | list2.append("Python")
28 | list2.append("Python")
29 | list2.append("Python")
30 | list2.append(1)
31 | print(list2.count("Python"))
32 | print(list2.count(1))
33 |
34 | list1.extend(list2)
35 | print(list1)
36 |
37 | print(list1.index("Python"))
38 |
39 | list1.insert(0, "Hello")
40 | print(list1)
41 |
42 | list3 = [0, 1, 2]
43 | list4 = [2, 2]
44 | list3.insert(1, list4)
45 | print(list3)
46 |
47 | list3.pop()
48 | print(list3)
49 |
50 | list3.pop(1)
51 | print(list3)
52 |
53 | list5 = [1, 2, 3, 4, 4, 5]
54 | list5.remove(4)
55 | print(list5)
56 | print(id(list5))
57 | list5.reverse()
58 | print(list5)
59 | print(id(list5))
60 | print(id(list5[::-1]))
61 |
62 | list6 = [2, 5, 1, 9, 6, 3]
63 | list6.sort()
64 | print(list6)
65 | list6.sort(reverse=True)
66 | print(list6)
--------------------------------------------------------------------------------
/base-generator/Demo.py:
--------------------------------------------------------------------------------
1 | list1 = [x*x for x in range(10)]
2 | print(list1)
3 | # list2 列表创建语句慎重执行,先注释
4 | # list2 = [x*x for x in range(1000000000000000000000000)]
5 |
6 | generator1 = (x*x for x in range(1000000000000000000000000))
7 | print(generator1)
8 | print(type(generator1))
9 |
10 | generator2 = (x*x for x in range(3))
11 | print(next(generator2))
12 | print(next(generator2))
13 | print(next(generator2))
14 | # print(next(generator2))
15 |
16 | generator3 = (x*x for x in range(5))
17 | for index in generator3:
18 | print(index)
19 |
20 | def print_a(max):
21 | i = 0
22 | while i < max:
23 | i += 1
24 | yield i
25 |
26 | a = print_a(10)
27 | print(a)
28 | print(type(a))
29 |
30 | print(next(a))
31 | print(next(a))
32 | print(next(a))
33 | print(next(a))
34 |
35 | print(a.__next__())
36 | print(a.__next__())
37 |
38 | def print_b(max):
39 | i = 0
40 | while i < max:
41 | i += 1
42 | args = yield i
43 | print('传入参数为:' + args)
44 |
45 | b = print_b(20)
46 | print(next(b))
47 | print(b.send('Python'))
48 |
49 | def print_c():
50 | while True:
51 | print('执行 A ')
52 | yield None
53 | def print_d():
54 | while True:
55 | print('执行 B ')
56 | yield None
57 |
58 | c = print_c()
59 | d = print_d()
60 | while True:
61 | c.__next__()
62 | d.__next__()
--------------------------------------------------------------------------------
/base-operator/Demo-1.py:
--------------------------------------------------------------------------------
1 | # 对应《小白学 Python(6):基础运算符(下)》示例代码
2 |
3 | a = 10
4 | b = 20
5 |
6 | c = a + b
7 | print("c = a + b 的值为:", c)
8 |
9 | c += a
10 | print("c += a 的值为:", c)
11 |
12 | c *= a
13 | print("c *= a 的值为:", c)
14 |
15 | c /= a
16 | print("c /= a 的值为:", c)
17 |
18 | c = 2
19 | c %= a
20 | print("c %= a 的值为:", c)
21 |
22 | c **= a
23 | print("c **= a 的值为:", c)
24 |
25 | c //= a
26 | print("c //= a 的值为:", c)
27 |
28 | print(True and True)
29 | # True
30 | print(True and False)
31 | # False
32 | print(True or True)
33 | # True
34 | print(True or False)
35 | # True
36 | print(False or False)
37 | # False
38 | print(not True)
39 | # False
40 | print(not False)
41 | # True
42 |
43 | str = "asdfghjkl"
44 |
45 | if 'a' in str:
46 | print('a 在字符串 str 中')
47 | else:
48 | print('a 不在字符串 str 中')
49 |
50 | if 'a' not in str:
51 | print('a 不在字符串 str 中')
52 | else:
53 | print('a 在字符串 str 中')
54 |
55 | a = 20
56 | b = 20
57 |
58 | if a is b:
59 | print("a 和 b 有相同的标识")
60 | else:
61 | print("a 和 b 没有相同的标识")
62 |
63 | if id(a) == id(b):
64 | print("a 和 b 有相同的标识")
65 | else:
66 | print("a 和 b 没有相同的标识")
67 |
68 | # 修改变量 b 的值
69 | b = 30
70 | if a is b:
71 | print("a 和 b 有相同的标识")
72 | else:
73 | print("a 和 b 没有相同的标识")
74 |
75 | if a is not b:
76 | print("a 和 b 没有相同的标识")
77 | else:
78 | print("a 和 b 有相同的标识")
--------------------------------------------------------------------------------
/spider-blog/报表sql.sql:
--------------------------------------------------------------------------------
1 | -- 因为掘金使用的是排名是掘力值,顾取出来的数据是负数
2 | SELECT a.read_num - (
3 | SELECT b.read_num
4 | FROM spider_data b
5 | WHERE b.plantform = a.plantform
6 | AND DATE_FORMAT(b.create_date, '%Y-%m-%d') = date_sub(DATE_FORMAT(a.create_date, '%Y-%m-%d'), INTERVAL 1 DAY)
7 | ORDER BY b.create_date DESC LIMIT 1
8 | ) AS read_num, a.fans_num - (
9 | SELECT b.fans_num
10 | FROM spider_data b
11 | WHERE b.plantform = a.plantform
12 | AND DATE_FORMAT(b.create_date, '%Y-%m-%d') = date_sub(DATE_FORMAT(a.create_date, '%Y-%m-%d'), INTERVAL 1 DAY)
13 | ORDER BY b.create_date DESC LIMIT 1
14 | ) AS fans_num
15 | , a.like_num - (
16 | SELECT b.like_num
17 | FROM spider_data b
18 | WHERE b.plantform = a.plantform
19 | AND DATE_FORMAT(b.create_date, '%Y-%m-%d') = date_sub(DATE_FORMAT(a.create_date, '%Y-%m-%d'), INTERVAL 1 DAY)
20 | ORDER BY b.create_date DESC LIMIT 1
21 | ) AS like_num, (
22 | SELECT b.rank_num
23 | FROM spider_data b
24 | WHERE b.plantform = a.plantform
25 | AND DATE_FORMAT(b.create_date, '%Y-%m-%d') = date_sub(DATE_FORMAT(a.create_date, '%Y-%m-%d'), INTERVAL 1 DAY)
26 | ORDER BY b.create_date DESC LIMIT 1
27 | ) - a.rank_num AS rank_num
28 | , a.create_date,a.plantform
29 | FROM (SELECT * FROM spider_data ORDER BY create_date DESC LIMIT 1000000000000000) a
30 |
31 | GROUP BY DATE_FORMAT(a.create_date, '%Y-%m-%d'), a.plantform
32 | ORDER BY a.create_date DESC;
--------------------------------------------------------------------------------
/spider-blog/tongji/src/main/resources/application.yml:
--------------------------------------------------------------------------------
1 | server:
2 | port: 8080
3 | spring:
4 | application:
5 | name: tongji
6 | thymeleaf:
7 | # 关闭thymeleaf缓存 开发时使用 否则没有实时画面
8 | cache: false
9 | # 检查模板是否存在,然后再呈现
10 | check-template-location: true
11 | # Content-Type value.
12 | servlet:
13 | content-type: text/html
14 | # 启用MVC Thymeleaf视图分辨率
15 | enabled: true
16 | # Template encoding.
17 | encoding: UTF-8
18 | # 关闭严格模式
19 | mode: LEGACYHTML5
20 | # Prefix that gets prepended to view names when building a URL.
21 | prefix: classpath:/templates/
22 | # Suffix that gets appended to view names when building a URL.
23 | suffix: .html
24 | datasource:
25 | url: jdbc:mysql://ip:port/blog_data?serverTimezone=Asia/Shanghai&useUnicode=true&characterEncoding=UTF-8&useSSL=false
26 | username: root
27 | password: xxxxxxxxxxx
28 | driver-class-name: com.mysql.cj.jdbc.Driver
29 | type: com.zaxxer.hikari.HikariDataSource
30 | hikari:
31 | auto-commit: true
32 | minimum-idle: 2
33 | idle-timeout: 60000
34 | connection-timeout: 30000
35 | max-lifetime: 1800000
36 | pool-name: DatebookHikariCP
37 | maximum-pool-size: 5
38 | # 配置slq打印日志
39 | mybatis:
40 | type-aliases-package: com.geekdigging.tongji.model
41 | config-location: classpath:mybatis/mybatis-config.xml
42 | mapper-locations: classpath:mybatis/mapper/*.xml
--------------------------------------------------------------------------------
/python-spider/database-docker-conf/mysql/my.cnf:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
2 | #
3 | # This program is free software; you can redistribute it and/or modify
4 | # it under the terms of the GNU General Public License, version 2.0,
5 | # as published by the Free Software Foundation.
6 | #
7 | # This program is also distributed with certain software (including
8 | # but not limited to OpenSSL) that is licensed under separate terms,
9 | # as designated in a particular file or component or in included license
10 | # documentation. The authors of MySQL hereby grant you an additional
11 | # permission to link the program and your derivative works with the
12 | # separately licensed software that they have included with MySQL.
13 | #
14 | # This program is distributed in the hope that it will be useful,
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 | # GNU General Public License, version 2.0, for more details.
18 | #
19 | # You should have received a copy of the GNU General Public License
20 | # along with this program; if not, write to the Free Software
21 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 |
23 | !includedir /etc/mysql/conf.d/
24 | !includedir /etc/mysql/mysql.conf.d/
25 |
26 | sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
--------------------------------------------------------------------------------
/python-spider/bs4-demo/bs4_demo1.py:
--------------------------------------------------------------------------------
1 | from bs4 import BeautifulSoup
2 |
3 | html_doc = """
4 | The Dormouse's story
5 |
6 | The Dormouse's story
7 |
8 | Once upon a time there were three little sisters; and their names were
9 | Elsie,
10 | Lacie and
11 | Tillie;
12 | and they lived at the bottom of a well.
13 |
14 | ...
15 | """
16 |
17 | soup = BeautifulSoup(html_doc, 'lxml')
18 |
19 | print(soup.find_all(name = "a"))
20 | print(type(soup.find_all(name = "a")[0]))
21 |
22 | for a in soup.find_all(name = "a"):
23 | print(a.string)
24 |
25 | print(soup.find_all(attrs={'id': 'link1'}))
26 | print(soup.find_all(attrs={'id': 'link2'}))
27 | print(type(soup.find_all(attrs={'id': 'link1'})))
28 | print(type(soup.find_all(attrs={'id': 'link2'})))
29 |
30 | import re
31 |
32 | print(soup.find_all(text=re.compile('sisters')))
33 |
34 | print(soup.find_all(id='link1'))
35 | print(soup.find_all(class_='title'))
36 |
37 | print(soup.find_all(href=re.compile("elsie"), id='link1'))
38 |
39 | print(soup.find(name = "a"))
40 | print(type(soup.find(name = "a")))
41 |
42 | print(soup.select('#link1'))
43 | print(type(soup.select('#link1')[0]))
44 | print(soup.select('.story .sister'))
--------------------------------------------------------------------------------
/python-opencv/blog14-pyramid/demo-pyramid.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv
2 |
3 | #高斯金字塔
4 | def gaussian_pyramid(image):
5 | level = 3 #设置金字塔的层数为3
6 | temp = image.copy() #拷贝图像
7 | gaussian_images = [] #建立一个空列表
8 | for i in range(level):
9 | dst = cv.pyrDown(temp) #先对图像进行高斯平滑,然后再进行降采样(将图像尺寸行和列方向缩减一半)
10 | gaussian_images.append(dst) #在列表末尾添加新的对象
11 | cv.imshow("gaussian"+str(i), dst)
12 | temp = dst.copy()
13 | return gaussian_images
14 |
15 |
16 | #拉普拉斯金字塔
17 | def laplacian_pyramid(image):
18 | gaussian_images = gaussian_pyramid(image) #做拉普拉斯金字塔必须用到高斯金字塔的结果
19 | level = len(gaussian_images)
20 | for i in range(level-1, -1, -1):
21 | if (i-1) < 0:
22 | expand = cv.pyrUp(gaussian_images[i], dstsize = image.shape[:2])
23 | laplacian = cv.subtract(image, expand)
24 | # 展示差值图像
25 | cv.imshow("laplacian_down_"+str(i), laplacian)
26 | else:
27 | expand = cv.pyrUp(gaussian_images[i], dstsize = gaussian_images[i-1].shape[:2])
28 | laplacian = cv.subtract(gaussian_images[i-1], expand)
29 | # 展示差值图像
30 | cv.imshow("laplacian_down_"+str(i), laplacian)
31 |
32 |
33 | src = cv.imread('maliao.jpg')
34 | print(src.shape)
35 | # 先将图像转化成正方形,否则会报错
36 | input_image = cv.resize(src, (560, 560))
37 | # 设置为 WINDOW_NORMAL 可以任意缩放
38 | cv.namedWindow('input_image', cv.WINDOW_AUTOSIZE)
39 | cv.imshow('input_image', input_image)
40 | laplacian_pyramid(input_image)
41 | cv.waitKey(0)
42 | cv.destroyAllWindows()
--------------------------------------------------------------------------------