├── toutiao
├── nba
│ ├── __init__.py
│ ├── config.py
│ └── __pycache__
│ │ ├── config.cpython-36.pyc
│ │ └── __init__.cpython-36.pyc
├── readme.txt
└── .idea
│ ├── modules.xml
│ ├── misc.xml
│ └── toutiao.iml
├── douban
├── douban
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── items.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── settings.cpython-36.pyc
│ │ ├── pipelines.cpython-36.pyc
│ │ └── middlewares.cpython-36.pyc
│ ├── spiders
│ │ ├── __pycache__
│ │ │ ├── movie.cpython-36.pyc
│ │ │ └── __init__.cpython-36.pyc
│ │ └── __init__.py
│ ├── items.py
│ └── pipelines.py
├── begin.py
├── readme
├── .idea
│ ├── modules.xml
│ ├── misc.xml
│ └── douban.iml
└── scrapy.cfg
├── python
├── .idea
│ ├── py2
│ │ ├── excel.csv
│ │ ├── wc2.py
│ │ ├── 三国演义.txt
│ │ ├── img
│ │ │ ├── 548663.jpg
│ │ │ └── 5486633.jpg
│ │ ├── csvs.py
│ │ └── jsons.py
│ ├── py4
│ │ ├── __init__.py
│ │ ├── image1.py
│ │ ├── image
│ │ ├── rank.py
│ │ └── rank1.py
│ ├── __init__.py
│ ├── pachong1
│ │ ├── __init__.py
│ │ ├── mycookie.txt
│ │ ├── login.py
│ │ ├── excepts.py
│ │ ├── loginsina.py
│ │ └── cookie.py
│ ├── study
│ │ ├── __init__.py
│ │ ├── ipget.py
│ │ ├── hehe.pkl
│ │ ├── __pycache__
│ │ │ └── jiandan.cpython-36.pyc
│ │ ├── theieves.py
│ │ ├── haha.txt
│ │ ├── oo.py
│ │ ├── pickles.py
│ │ ├── oo1.py
│ │ ├── oo3.py
│ │ ├── easyguis.py
│ │ ├── mygen.py
│ │ ├── mylist.py
│ │ ├── myIterator.py
│ │ ├── translate2.py
│ │ ├── Retangle.py
│ │ ├── proxy.py
│ │ ├── property.py
│ │ ├── translate.py
│ │ ├── mytimer.py
│ │ ├── file.py
│ │ ├── myproperty.py
│ │ ├── pachong.py
│ │ └── regix.py
│ ├── pachongdemo
│ │ └── __init__.py
│ ├── py3
│ │ ├── heat.jpg
│ │ ├── james.jpg
│ │ ├── sins.py
│ │ ├── sanwei.py
│ │ ├── dikaerheat.py
│ │ └── leida.py
│ ├── exam
│ │ ├── image.png
│ │ ├── Febinaci.py
│ │ ├── ceshi.py
│ │ ├── JumpFloor.py
│ │ ├── JumpFloor2.py
│ │ ├── Retangle.py
│ │ └── __init__.py
│ ├── vcs.xml
│ ├── .idea
│ │ ├── vcs.xml
│ │ ├── modules.xml
│ │ ├── misc.xml
│ │ └── .idea.iml
│ ├── modules.xml
│ ├── misc.xml
│ └── py1
│ │ ├── hannuota.py
│ │ ├── kehequxian.py
│ │ ├── ImageCut.py
│ │ ├── GetName.py
│ │ ├── daxiaoxie.py
│ │ ├── qipan.py
│ │ └── love.py
└── python.iml
├── wangzherongyao
├── __init__.py
├── 1.jpg
├── 2s.jpg
└── 王者荣耀
│ ├── 三太子哪吒.jpg
│ ├── 亚瑟狮心王.jpg
│ ├── 兰陵王隐刃.jpg
│ ├── 典韦穷奇.jpg
│ ├── 刘备皮肤.jpg
│ ├── 刘邦吸血鬼.jpg
│ ├── 后羿精灵王.jpg
│ ├── 墨子龙骑士.jpg
│ ├── 姜子牙皮肤.jpg
│ ├── 孙膑妖精王.jpg
│ ├── 庄周蜃楼王.jpg
│ ├── 张飞皮肤.jpg
│ ├── 德古拉伯爵.jpg
│ ├── 扁鹊星元.jpg
│ ├── 扁鹊炼金王.jpg
│ ├── 日式女巫.jpg
│ ├── 曹操烛龙.jpg
│ ├── 李白范海辛.jpg
│ ├── 杨戬皮肤.jpg
│ ├── 王昭君皮肤.jpg
│ ├── 电玩小子.jpg
│ ├── 白色死神.jpg
│ ├── 白起巫毒师.jpg
│ ├── 芈月重明.jpg
│ ├── 花木兰星元.jpg
│ ├── 街头霸王.jpg
│ ├── 裴擒虎梅西.jpg
│ ├── 赵云忍炎影.jpg
│ ├── 赵云白执事.jpg
│ ├── 赵云皮肤.jpg
│ ├── 达摩拳王.jpg
│ ├── 达摩皮肤.jpg
│ ├── 钟馗皮肤.jpg
│ ├── 铠龙域领主.jpg
│ ├── 韩信白龙吟.jpg
│ ├── 黑帮老大.jpg
│ ├── SNK霸王丸.jpg
│ ├── 亚瑟心灵战警.jpg
│ ├── 亚瑟死亡骑士.jpg
│ ├── 元歌午夜歌剧院.jpg
│ ├── 公孙离花间舞.jpg
│ ├── 公孙离蜜橘之夏.jpg
│ ├── 关羽-冰锋战神.jpg
│ ├── 关羽天启骑士.jpg
│ ├── 关羽龙腾万里.jpg
│ ├── 典韦黄金武士.jpg
│ ├── 刘备汉昭烈帝.jpg
│ ├── 刘备纽约教父.jpg
│ ├── 刘禅天才门将.jpg
│ ├── 刘禅绅士熊喵.jpg
│ ├── 刘禅英喵野望.jpg
│ ├── 刘邦圣殿之光.jpg
│ ├── 司马懿伴生皮肤.jpg
│ ├── 后羿-恶魔猎人.jpg
│ ├── 后羿辉光之辰.jpg
│ ├── 后羿铠甲勇士.jpg
│ ├── 后羿阿尔法小队.jpg
│ ├── 后羿黄金射手座.jpg
│ ├── 吕布圣诞狂欢.jpg
│ ├── 吕布天魔缭乱.jpg
│ ├── 吕布末日机甲.jpg
│ ├── 周瑜海军大将.jpg
│ ├── 周瑜真爱至上.jpg
│ ├── 哪吒逐梦之翼.jpg
│ ├── 墨子进击墨子号.jpg
│ ├── 墨子金属风暴.jpg
│ ├── 夏侯惇乘风破浪.jpg
│ ├── 夏侯惇战争骑士.jpg
│ ├── 大乔伊势女巫.jpg
│ ├── 大乔守护之力.jpg
│ ├── 太乙真人饕餮.jpg
│ ├── 女娲尼罗河女神.jpg
│ ├── 妲己仙境爱丽丝.jpg
│ ├── 妲己女仆咖啡.jpg
│ ├── 妲己少女阿狸.jpg
│ ├── 妲己热情桑巴.jpg
│ ├── 妲己魅力维加斯.jpg
│ ├── 嬴政优雅恋人.jpg
│ ├── 嬴政摇滚巨星.jpg
│ ├── 嬴政暗夜贵公子.jpg
│ ├── 孙尚香末日机甲.jpg
│ ├── 孙尚香水果甜心.jpg
│ ├── 孙尚香沉稳之力.jpg
│ ├── 孙尚香火炮千金.jpg
│ ├── 孙尚香蔷薇恋人.jpg
│ ├── 孙悟空全息碎影.jpg
│ ├── 孙悟空地狱火.jpg
│ ├── 孙悟空美猴王.jpg
│ ├── 孙悟空至尊宝.jpg
│ ├── 孙策海之征途.jpg
│ ├── 孙膑天使之翼.jpg
│ ├── 孙膑未来旅行.jpg
│ ├── 安琪拉心灵骇客.jpg
│ ├── 宫本地狱之眼.jpg
│ ├── 宫本武藏皮肤.jpg
│ ├── 宫本鬼剑武藏.jpg
│ ├── 小乔万圣前夜.jpg
│ ├── 小乔天鹅之梦.jpg
│ ├── 小乔纯白花嫁.jpg
│ ├── 小乔缤纷独角兽.jpg
│ ├── 庄周云端筑梦师.jpg
│ ├── 庄周鲤鱼之梦.jpg
│ ├── 庞统死亡笔记.jpg
│ ├── 廉颇地狱岩魂.jpg
│ ├── 弈星踏雪寻梅.jpg
│ ├── 张良一千零一夜.jpg
│ ├── 张良天堂福音.jpg
│ ├── 张飞乱世虎臣.jpg
│ ├── 扁鹊化身博士.jpg
│ ├── 扁鹊红莲之瞳.jpg
│ ├── 明世隐占星术士.jpg
│ ├── 曹操幽灵船长.jpg
│ ├── 曹操死神来了.jpg
│ ├── 曹操超能战警.jpg
│ ├── 李元芳特种部队.jpg
│ ├── 李元芳逐浪之夏.jpg
│ ├── 李白凤求凰皮肤.jpg
│ ├── 李白千年之狐.jpg
│ ├── 李白敏锐之力.jpg
│ ├── 李白毁灭机甲.jpg
│ ├── 李白皮肤曝光.jpg
│ ├── 杨戬埃及法老.jpg
│ ├── 杨戬永耀之星.jpg
│ ├── 杨玉环霓裳曲.jpg
│ ├── 梦奇美梦成真.jpg
│ ├── 梦见猫新皮肤.jpg
│ ├── 武则天东方不败.jpg
│ ├── 武则天海洋之心.jpg
│ ├── 牛魔制霸全明星.jpg
│ ├── 牛魔西部大镖客.jpg
│ ├── 狂铁命运角斗场.jpg
│ ├── 狄仁杰锦衣卫.jpg
│ ├── 狄仁杰阴阳师.jpg
│ ├── 狄仁杰魔术师.jpg
│ ├── 王昭君凤凰于飞.jpg
│ ├── 王昭君精灵公主.jpg
│ ├── 甄姬冰雪圆舞曲.jpg
│ ├── 甄姬游园惊梦.jpg
│ ├── 甄姬花好人间.jpg
│ ├── 白起白色死神.jpg
│ ├── 盾山极冰防御线.jpg
│ ├── 程咬金功夫厨神.jpg
│ ├── 程咬金爱与正义.jpg
│ ├── 老夫子功夫老勺.jpg
│ ├── 老夫子圣诞老人.jpg
│ ├── 老夫子潮流仙人.jpg
│ ├── 芈月大秦宣太后.jpg
│ ├── 芈月红桃皇后.jpg
│ ├── 花木兰兔女郎.jpg
│ ├── 花木兰剑舞者.jpg
│ ├── 苏烈爱与和平.jpg
│ ├── 蔡文姬舞动绿茵.jpg
│ ├── 蔡文姬蔷薇王座.jpg
│ ├── 虞姬凯尔特女王.jpg
│ ├── 虞姬加勒比小姐.jpg
│ ├── 虞姬霸王别姬.jpg
│ ├── 裴擒虎街头旋风.jpg
│ ├── 诸葛亮掌控之力.jpg
│ ├── 诸葛亮暗鸦之灵.jpg
│ ├── 诸葛亮武陵仙君.jpg
│ ├── 诸葛亮海军少将.jpg
│ ├── 貂蝉仲夏夜之梦.jpg
│ ├── 貂蝉圣诞恋歌.jpg
│ ├── 貂蝉异域舞娘.jpg
│ ├── 貂蝉逐梦之音.jpg
│ ├── 貂蝉金色仲夏夜.jpg
│ ├── 赵云嘻哈天王.jpg
│ ├── 赵云引擎之心.jpg
│ ├── 赵云未来纪元.jpg
│ ├── 赵云皇家上将.jpg
│ ├── 达摩大发明家.jpg
│ ├── 钟无艳海滩丽影.jpg
│ ├── 钟无艳王者之锤.jpg
│ ├── 钟无艳生化警戒.jpg
│ ├── 铠曙光守护者.jpg
│ ├── 阿轲暗夜猫娘.jpg
│ ├── 阿轲爱心护理.jpg
│ ├── 阿轲致命风华.jpg
│ ├── 雅典娜冰冠公主.jpg
│ ├── 雅典娜埃及艳后.jpg
│ ├── 雅典娜战争女神.jpg
│ ├── 雅典娜神奇女侠.jpg
│ ├── 露娜哥特玫瑰.jpg
│ ├── 露娜圣辉骑士.jpg
│ ├── 露娜紫霞仙子.jpg
│ ├── 露娜绯红之刃.jpg
│ ├── 韩信教廷特使.jpg
│ ├── 韩信街头霸王.jpg
│ ├── 韩信逐梦之影.jpg
│ ├── 项羽帝国元帅.jpg
│ ├── 项羽海滩派对.jpg
│ ├── 项羽职棒王牌.jpg
│ ├── 项羽苍穹之光.jpg
│ ├── 项羽霸王别姬.jpg
│ ├── 高渐离死亡摇滚.jpg
│ ├── 高渐离金属狂潮.jpg
│ ├── 鬼谷子幻乐之宴.jpg
│ ├── 黄忠芝加哥教父.jpg
│ ├── 龙且海军少将.jpg
│ ├── 东皇太一东海龙王.jpg
│ ├── 兰陵王暗隐狩猎者.jpg
│ ├── 墨子进击的墨子号.jpg
│ ├── 太乙真人圆桌骑士.jpg
│ ├── 孙尚香杀手不太冷.jpg
│ ├── 孙悟空西部大镖客.jpg
│ ├── 安琪拉-电子纪元.jpg
│ ├── 安琪拉玩偶对对碰.jpg
│ ├── 安琪拉魔法小厨娘.jpg
│ ├── 宫本武藏万象初新.jpg
│ ├── 干将莫邪第七人偶.jpg
│ ├── 成吉思汗维京掠夺者.jpg
│ ├── 李元芳黑猫爱糖果.jpg
│ ├── 狄仁杰超时空战士.jpg
│ ├── 王昭君幻想奇妙夜皮肤.jpg
│ ├── 白起无畏之灵-狰.jpg
│ ├── 百里守约全军出击.jpg
│ ├── 百里守约特工魅影.jpg
│ ├── 百里守约绝影神枪.jpg
│ ├── 百里玄策威尼斯狂欢.jpg
│ ├── 程咬金华尔街大亨.jpg
│ ├── 程咬金星际陆战队.jpg
│ ├── 米莱狄精准探案法.jpg
│ ├── 花木兰水晶猎龙者.jpg
│ ├── 花木兰青春决赛季.jpg
│ ├── 诸葛亮-冬日舞会.jpg
│ ├── 诸葛亮星航指挥官.jpg
│ ├── 诸葛亮黄金分割率.jpg
│ ├── 马可波罗激情绿茵.jpg
│ ├── 鲁班七号星空梦想.jpg
│ ├── 鲁班七号木偶奇遇记.jpg
│ ├── 鲁班七号电玩小子.jpg
│ ├── 鲁班七号福禄兄弟.jpg
│ └── 兰陵王隐刃重做造型曝光.jpg
├── Images360II
├── images360
│ ├── __init__.py
│ ├── spiders
│ │ ├── __init__.py
│ │ └── images.py
│ └── items.py
├── .gitignore
├── .DS_Store
├── README.md
├── begin.py
└── scrapy.cfg
├── tutorial
├── tutorial
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── items.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── pipelines.cpython-36.pyc
│ │ └── settings.cpython-36.pyc
│ ├── spiders
│ │ ├── __pycache__
│ │ │ ├── quotes.cpython-36.pyc
│ │ │ └── __init__.cpython-36.pyc
│ │ ├── __init__.py
│ │ └── quotes.py
│ └── items.py
├── requirements.txt
├── Dockerfile
├── .idea
│ ├── modules.xml
│ ├── misc.xml
│ ├── tutorial.iml
│ └── inspectionProfiles
│ │ └── Project_Default.xml
└── scrapy.cfg
├── python3pra
├── .idea
│ ├── proxypool
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── db.cpython-36.pyc
│ │ │ ├── api.cpython-36.pyc
│ │ │ ├── error.cpython-36.pyc
│ │ │ ├── utils.cpython-36.pyc
│ │ │ ├── crawler.cpython-36.pyc
│ │ │ ├── getter.cpython-36.pyc
│ │ │ ├── setting.cpython-36.pyc
│ │ │ ├── tester.cpython-36.pyc
│ │ │ └── scheduler.cpython-36.pyc
│ │ ├── error.py
│ │ ├── run.py
│ │ ├── importer.py
│ │ ├── setting.py
│ │ ├── api.py
│ │ ├── utils.py
│ │ ├── getter.py
│ │ └── scheduler.py
│ ├── part5
│ │ ├── test.csv
│ │ ├── __init__.py
│ │ ├── redise.py
│ │ ├── mysqlc.py
│ │ ├── csvs.py
│ │ └── zhihu.py
│ ├── __init__.py
│ ├── part10
│ │ ├── __init__.py
│ │ └── logingithub2.py
│ ├── part11
│ │ └── __init__.py
│ ├── part3
│ │ ├── __init__.py
│ │ ├── github.ico
│ │ ├── regix.py
│ │ ├── github.py
│ │ ├── postex.py
│ │ ├── demo.py
│ │ └── userequests.py
│ ├── part6
│ │ └── __init__.py
│ ├── part7
│ │ ├── __init__.py
│ │ ├── splashapi.py
│ │ ├── executejs.py
│ │ ├── fb.py
│ │ ├── Cookies.py
│ │ ├── actionchain.py
│ │ ├── select.py
│ │ ├── getattr.py
│ │ ├── taobao.py
│ │ ├── seleniume.py
│ │ └── ceshi.py
│ ├── part9
│ │ ├── __init__.py
│ │ ├── seleniumproxy.py
│ │ ├── requestproxy.py
│ │ └── urlproxy.py
│ ├── proxypool1
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── api.cpython-36.pyc
│ │ │ ├── db.cpython-36.pyc
│ │ │ ├── error.cpython-36.pyc
│ │ │ ├── getter.cpython-36.pyc
│ │ │ ├── tester.cpython-36.pyc
│ │ │ ├── utils.cpython-36.pyc
│ │ │ ├── crawler.cpython-36.pyc
│ │ │ ├── scheduler.cpython-36.pyc
│ │ │ └── setting.cpython-36.pyc
│ │ ├── error.py
│ │ ├── run.py
│ │ ├── setting.py
│ │ ├── api.py
│ │ ├── utils.py
│ │ ├── getter.py
│ │ └── scheduler.py
│ ├── weixingongzhonghao
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── db.cpython-36.pyc
│ │ │ ├── config.cpython-36.pyc
│ │ │ ├── mysql.cpython-36.pyc
│ │ │ └── request.cpython-36.pyc
│ │ ├── config.py
│ │ ├── request.py
│ │ ├── mysql.py
│ │ ├── weixin.py
│ │ └── db.py
│ ├── part8
│ │ ├── Code.jpg
│ │ ├── captcha1.png
│ │ ├── captcha2.png
│ │ └── zhiwang.py
│ ├── ceshi
│ │ └── __init__.py
│ ├── vcs.xml
│ ├── .idea
│ │ ├── vcs.xml
│ │ ├── modules.xml
│ │ ├── misc.xml
│ │ └── .idea.iml
│ ├── misc.xml
│ └── modules.xml
└── python3pra.iml
├── scrapychinacom
├── scrapychinacom
│ ├── __init__.py
│ ├── configs
│ │ └── __init__.py
│ ├── __pycache__
│ │ ├── urls.cpython-36.pyc
│ │ ├── items.cpython-36.pyc
│ │ ├── loaders.cpython-36.pyc
│ │ ├── rules.cpython-36.pyc
│ │ ├── utils.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── pipelines.cpython-36.pyc
│ │ └── settings.cpython-36.pyc
│ ├── urls.py
│ ├── spiders
│ │ ├── __pycache__
│ │ │ ├── china.cpython-36.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ └── universal.cpython-36.pyc
│ │ ├── __init__.py
│ │ └── china.py
│ ├── utils.py
│ ├── items.py
│ ├── loaders.py
│ ├── rules.py
│ └── pipelines.py
├── begin.py
├── readme.txt
├── .idea
│ ├── misc.xml
│ ├── modules.xml
│ └── scrapychinacom.iml
├── scrapy.cfg
└── run.py
├── scrapytoutiao
├── scrapytoutiao
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── items.cpython-36.pyc
│ │ ├── links.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── pipelines.cpython-36.pyc
│ │ ├── settings.cpython-36.pyc
│ │ └── middlewares.cpython-36.pyc
│ ├── spiders
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ └── toutiao.cpython-36.pyc
│ │ ├── __init__.py
│ │ ├── toutiao.py
│ │ └── ghostdriver.log
│ ├── items.py
│ ├── links.py
│ ├── pipelines.py
│ └── ghostdriver.log
├── README.md
├── readme
├── begin.py
├── .idea
│ ├── modules.xml
│ ├── misc.xml
│ └── scrapytoutiao.iml
└── scrapy.cfg
├── scrapysplashtest
├── scrapysplashtest
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── items.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── settings.cpython-36.pyc
│ │ └── pipelines.cpython-36.pyc
│ ├── spiders
│ │ ├── __pycache__
│ │ │ ├── taobao.cpython-36.pyc
│ │ │ └── __init__.cpython-36.pyc
│ │ └── __init__.py
│ ├── items.py
│ └── pipelines.py
├── readme
├── begin.py
├── .idea
│ ├── misc.xml
│ ├── modules.xml
│ └── scrapysplashtest.iml
└── scrapy.cfg
├── scrapydownloadertest
├── scrapydownloadertest
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── settings.cpython-36.pyc
│ │ └── middlewares.cpython-36.pyc
│ ├── spiders
│ │ ├── __pycache__
│ │ │ ├── httpbin.cpython-36.pyc
│ │ │ └── __init__.cpython-36.pyc
│ │ ├── __init__.py
│ │ └── httpbin.py
│ ├── pipelines.py
│ └── items.py
├── begin.py
├── .idea
│ ├── modules.xml
│ ├── misc.xml
│ └── scrapydownloadertest.iml
└── scrapy.cfg
├── scrapyseleniumtest
├── scrapyseleniumtest
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── items.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── pipelines.cpython-36.pyc
│ │ ├── settings.cpython-36.pyc
│ │ └── middlewares.cpython-36.pyc
│ ├── spiders
│ │ ├── __pycache__
│ │ │ ├── taobao.cpython-36.pyc
│ │ │ └── __init__.cpython-36.pyc
│ │ └── __init__.py
│ ├── items.py
│ └── pipelines.py
├── begin.py
├── .idea
│ ├── misc.xml
│ ├── modules.xml
│ └── scrapyseleniumtest.iml
└── scrapy.cfg
├── dataanalysis
├── readme
├── .idea
│ ├── modules.xml
│ ├── misc.xml
│ └── youdao.iml
└── extract_json.py
└── README.md
/toutiao/nba/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/douban/douban/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/python/.idea/py2/excel.csv:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/python/.idea/py4/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/wangzherongyao/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Images360II/images360/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tutorial/tutorial/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tutorial/requirements.txt:
--------------------------------------------------------------------------------
1 | scrapy
2 | pymongo
--------------------------------------------------------------------------------
/python/.idea/__init__.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 |
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Images360II/.gitignore:
--------------------------------------------------------------------------------
1 | /images
2 | .idea
3 | *.pyc
--------------------------------------------------------------------------------
/python/.idea/pachong1/__init__.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 |
--------------------------------------------------------------------------------
/python/.idea/study/__init__.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 |
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/configs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/scrapysplashtest/readme:
--------------------------------------------------------------------------------
1 | 利用Scrapy对接Selenium抓取淘宝商品
2 |
--------------------------------------------------------------------------------
/python/.idea/pachongdemo/__init__.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 |
--------------------------------------------------------------------------------
/python/.idea/study/ipget.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #抓取代理ip
--------------------------------------------------------------------------------
/scrapytoutiao/README.md:
--------------------------------------------------------------------------------
1 | # Python3Spider
2 | 自己学爬虫的小项目
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/part5/test.csv:
--------------------------------------------------------------------------------
1 | id,name,age
2 | 10001,jordan,22
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/part10/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/part11/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/part3/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/part5/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/part6/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/part7/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/part9/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/toutiao/readme.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/toutiao/readme.txt
--------------------------------------------------------------------------------
/Images360II/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/Images360II/.DS_Store
--------------------------------------------------------------------------------
/Images360II/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/Images360II/README.md
--------------------------------------------------------------------------------
/douban/begin.py:
--------------------------------------------------------------------------------
1 | from scrapy import cmdline
2 |
3 | cmdline.execute('scrapy crawl movie'.split())
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 |
--------------------------------------------------------------------------------
/wangzherongyao/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/1.jpg
--------------------------------------------------------------------------------
/wangzherongyao/2s.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/2s.jpg
--------------------------------------------------------------------------------
/python/.idea/py2/wc2.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/py2/wc2.py
--------------------------------------------------------------------------------
/scrapychinacom/begin.py:
--------------------------------------------------------------------------------
1 | from scrapy import cmdline
2 | cmdline.execute('python run.py china'.split())
--------------------------------------------------------------------------------
/scrapytoutiao/readme:
--------------------------------------------------------------------------------
1 | 利用scrapy对接selenium抓取今日头条(模拟Ajax请求)
2 | 模拟人进行鼠标拖动来进行多个页面的抓取
3 | 抓取数据存入mongodb数据库
--------------------------------------------------------------------------------
/Images360II/begin.py:
--------------------------------------------------------------------------------
1 | #运行文件
2 | from scrapy import cmdline
3 | cmdline.execute('scrapy crawl images'.split())
--------------------------------------------------------------------------------
/python/.idea/py2/三国演义.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/py2/三国演义.txt
--------------------------------------------------------------------------------
/python/.idea/py3/heat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/py3/heat.jpg
--------------------------------------------------------------------------------
/python/.idea/py3/james.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/py3/james.jpg
--------------------------------------------------------------------------------
/scrapychinacom/readme.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/readme.txt
--------------------------------------------------------------------------------
/scrapysplashtest/begin.py:
--------------------------------------------------------------------------------
1 | from scrapy import cmdline
2 |
3 | cmdline.execute('scrapy crawl taobao'.split())
--------------------------------------------------------------------------------
/scrapytoutiao/begin.py:
--------------------------------------------------------------------------------
1 | from scrapy import cmdline
2 |
3 | cmdline.execute('scrapy crawl toutiao'.split())
--------------------------------------------------------------------------------
/douban/readme:
--------------------------------------------------------------------------------
1 | 利用scrapy对接selenium抓取豆瓣最受欢迎影评
2 | url='https://movie.douban.com/review/best/'
3 | 抓取数据存入mongodb数据库
--------------------------------------------------------------------------------
/python/.idea/exam/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/exam/image.png
--------------------------------------------------------------------------------
/python/.idea/study/hehe.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/study/hehe.pkl
--------------------------------------------------------------------------------
/scrapyseleniumtest/begin.py:
--------------------------------------------------------------------------------
1 | from scrapy import cmdline
2 |
3 | cmdline.execute('scrapy crawl taobao'.split())
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/三太子哪吒.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/三太子哪吒.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/亚瑟狮心王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/亚瑟狮心王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/兰陵王隐刃.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/兰陵王隐刃.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/典韦穷奇.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/典韦穷奇.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/刘备皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/刘备皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/刘邦吸血鬼.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/刘邦吸血鬼.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/后羿精灵王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/后羿精灵王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/墨子龙骑士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/墨子龙骑士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/姜子牙皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/姜子牙皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙膑妖精王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙膑妖精王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/庄周蜃楼王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/庄周蜃楼王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/张飞皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/张飞皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/德古拉伯爵.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/德古拉伯爵.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/扁鹊星元.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/扁鹊星元.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/扁鹊炼金王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/扁鹊炼金王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/日式女巫.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/日式女巫.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/曹操烛龙.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/曹操烛龙.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李白范海辛.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李白范海辛.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/杨戬皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/杨戬皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/王昭君皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/王昭君皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/电玩小子.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/电玩小子.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/白色死神.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/白色死神.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/白起巫毒师.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/白起巫毒师.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/芈月重明.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/芈月重明.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/花木兰星元.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/花木兰星元.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/街头霸王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/街头霸王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/裴擒虎梅西.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/裴擒虎梅西.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/赵云忍炎影.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/赵云忍炎影.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/赵云白执事.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/赵云白执事.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/赵云皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/赵云皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/达摩拳王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/达摩拳王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/达摩皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/达摩皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/钟馗皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/钟馗皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/铠龙域领主.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/铠龙域领主.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/韩信白龙吟.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/韩信白龙吟.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/黑帮老大.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/黑帮老大.jpg
--------------------------------------------------------------------------------
/python/.idea/py2/img/548663.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/py2/img/548663.jpg
--------------------------------------------------------------------------------
/python3pra/.idea/part8/Code.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/part8/Code.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/SNK霸王丸.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/SNK霸王丸.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/亚瑟心灵战警.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/亚瑟心灵战警.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/亚瑟死亡骑士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/亚瑟死亡骑士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/元歌午夜歌剧院.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/元歌午夜歌剧院.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/公孙离花间舞.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/公孙离花间舞.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/公孙离蜜橘之夏.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/公孙离蜜橘之夏.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/关羽-冰锋战神.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/关羽-冰锋战神.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/关羽天启骑士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/关羽天启骑士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/关羽龙腾万里.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/关羽龙腾万里.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/典韦黄金武士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/典韦黄金武士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/刘备汉昭烈帝.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/刘备汉昭烈帝.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/刘备纽约教父.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/刘备纽约教父.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/刘禅天才门将.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/刘禅天才门将.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/刘禅绅士熊喵.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/刘禅绅士熊喵.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/刘禅英喵野望.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/刘禅英喵野望.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/刘邦圣殿之光.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/刘邦圣殿之光.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/司马懿伴生皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/司马懿伴生皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/后羿-恶魔猎人.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/后羿-恶魔猎人.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/后羿辉光之辰.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/后羿辉光之辰.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/后羿铠甲勇士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/后羿铠甲勇士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/后羿阿尔法小队.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/后羿阿尔法小队.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/后羿黄金射手座.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/后羿黄金射手座.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/吕布圣诞狂欢.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/吕布圣诞狂欢.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/吕布天魔缭乱.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/吕布天魔缭乱.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/吕布末日机甲.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/吕布末日机甲.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/周瑜海军大将.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/周瑜海军大将.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/周瑜真爱至上.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/周瑜真爱至上.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/哪吒逐梦之翼.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/哪吒逐梦之翼.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/墨子进击墨子号.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/墨子进击墨子号.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/墨子金属风暴.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/墨子金属风暴.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/夏侯惇乘风破浪.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/夏侯惇乘风破浪.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/夏侯惇战争骑士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/夏侯惇战争骑士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/大乔伊势女巫.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/大乔伊势女巫.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/大乔守护之力.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/大乔守护之力.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/太乙真人饕餮.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/太乙真人饕餮.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/女娲尼罗河女神.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/女娲尼罗河女神.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/妲己仙境爱丽丝.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/妲己仙境爱丽丝.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/妲己女仆咖啡.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/妲己女仆咖啡.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/妲己少女阿狸.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/妲己少女阿狸.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/妲己热情桑巴.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/妲己热情桑巴.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/妲己魅力维加斯.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/妲己魅力维加斯.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/嬴政优雅恋人.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/嬴政优雅恋人.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/嬴政摇滚巨星.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/嬴政摇滚巨星.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/嬴政暗夜贵公子.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/嬴政暗夜贵公子.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙尚香末日机甲.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙尚香末日机甲.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙尚香水果甜心.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙尚香水果甜心.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙尚香沉稳之力.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙尚香沉稳之力.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙尚香火炮千金.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙尚香火炮千金.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙尚香蔷薇恋人.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙尚香蔷薇恋人.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙悟空全息碎影.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙悟空全息碎影.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙悟空地狱火.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙悟空地狱火.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙悟空美猴王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙悟空美猴王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙悟空至尊宝.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙悟空至尊宝.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙策海之征途.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙策海之征途.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙膑天使之翼.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙膑天使之翼.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙膑未来旅行.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙膑未来旅行.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/安琪拉心灵骇客.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/安琪拉心灵骇客.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/宫本地狱之眼.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/宫本地狱之眼.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/宫本武藏皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/宫本武藏皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/宫本鬼剑武藏.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/宫本鬼剑武藏.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/小乔万圣前夜.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/小乔万圣前夜.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/小乔天鹅之梦.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/小乔天鹅之梦.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/小乔纯白花嫁.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/小乔纯白花嫁.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/小乔缤纷独角兽.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/小乔缤纷独角兽.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/庄周云端筑梦师.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/庄周云端筑梦师.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/庄周鲤鱼之梦.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/庄周鲤鱼之梦.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/庞统死亡笔记.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/庞统死亡笔记.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/廉颇地狱岩魂.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/廉颇地狱岩魂.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/弈星踏雪寻梅.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/弈星踏雪寻梅.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/张良一千零一夜.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/张良一千零一夜.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/张良天堂福音.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/张良天堂福音.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/张飞乱世虎臣.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/张飞乱世虎臣.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/扁鹊化身博士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/扁鹊化身博士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/扁鹊红莲之瞳.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/扁鹊红莲之瞳.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/明世隐占星术士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/明世隐占星术士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/曹操幽灵船长.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/曹操幽灵船长.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/曹操死神来了.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/曹操死神来了.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/曹操超能战警.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/曹操超能战警.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李元芳特种部队.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李元芳特种部队.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李元芳逐浪之夏.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李元芳逐浪之夏.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李白凤求凰皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李白凤求凰皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李白千年之狐.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李白千年之狐.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李白敏锐之力.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李白敏锐之力.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李白毁灭机甲.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李白毁灭机甲.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李白皮肤曝光.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李白皮肤曝光.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/杨戬埃及法老.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/杨戬埃及法老.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/杨戬永耀之星.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/杨戬永耀之星.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/杨玉环霓裳曲.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/杨玉环霓裳曲.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/梦奇美梦成真.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/梦奇美梦成真.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/梦见猫新皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/梦见猫新皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/武则天东方不败.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/武则天东方不败.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/武则天海洋之心.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/武则天海洋之心.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/牛魔制霸全明星.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/牛魔制霸全明星.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/牛魔西部大镖客.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/牛魔西部大镖客.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/狂铁命运角斗场.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/狂铁命运角斗场.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/狄仁杰锦衣卫.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/狄仁杰锦衣卫.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/狄仁杰阴阳师.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/狄仁杰阴阳师.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/狄仁杰魔术师.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/狄仁杰魔术师.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/王昭君凤凰于飞.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/王昭君凤凰于飞.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/王昭君精灵公主.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/王昭君精灵公主.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/甄姬冰雪圆舞曲.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/甄姬冰雪圆舞曲.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/甄姬游园惊梦.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/甄姬游园惊梦.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/甄姬花好人间.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/甄姬花好人间.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/白起白色死神.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/白起白色死神.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/盾山极冰防御线.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/盾山极冰防御线.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/程咬金功夫厨神.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/程咬金功夫厨神.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/程咬金爱与正义.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/程咬金爱与正义.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/老夫子功夫老勺.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/老夫子功夫老勺.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/老夫子圣诞老人.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/老夫子圣诞老人.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/老夫子潮流仙人.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/老夫子潮流仙人.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/芈月大秦宣太后.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/芈月大秦宣太后.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/芈月红桃皇后.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/芈月红桃皇后.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/花木兰兔女郎.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/花木兰兔女郎.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/花木兰剑舞者.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/花木兰剑舞者.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/苏烈爱与和平.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/苏烈爱与和平.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/蔡文姬舞动绿茵.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/蔡文姬舞动绿茵.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/蔡文姬蔷薇王座.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/蔡文姬蔷薇王座.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/虞姬凯尔特女王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/虞姬凯尔特女王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/虞姬加勒比小姐.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/虞姬加勒比小姐.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/虞姬霸王别姬.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/虞姬霸王别姬.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/裴擒虎街头旋风.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/裴擒虎街头旋风.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/诸葛亮掌控之力.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/诸葛亮掌控之力.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/诸葛亮暗鸦之灵.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/诸葛亮暗鸦之灵.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/诸葛亮武陵仙君.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/诸葛亮武陵仙君.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/诸葛亮海军少将.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/诸葛亮海军少将.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/貂蝉仲夏夜之梦.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/貂蝉仲夏夜之梦.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/貂蝉圣诞恋歌.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/貂蝉圣诞恋歌.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/貂蝉异域舞娘.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/貂蝉异域舞娘.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/貂蝉逐梦之音.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/貂蝉逐梦之音.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/貂蝉金色仲夏夜.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/貂蝉金色仲夏夜.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/赵云嘻哈天王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/赵云嘻哈天王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/赵云引擎之心.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/赵云引擎之心.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/赵云未来纪元.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/赵云未来纪元.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/赵云皇家上将.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/赵云皇家上将.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/达摩大发明家.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/达摩大发明家.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/钟无艳海滩丽影.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/钟无艳海滩丽影.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/钟无艳王者之锤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/钟无艳王者之锤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/钟无艳生化警戒.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/钟无艳生化警戒.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/铠曙光守护者.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/铠曙光守护者.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/阿轲暗夜猫娘.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/阿轲暗夜猫娘.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/阿轲爱心护理.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/阿轲爱心护理.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/阿轲致命风华.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/阿轲致命风华.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/雅典娜冰冠公主.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/雅典娜冰冠公主.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/雅典娜埃及艳后.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/雅典娜埃及艳后.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/雅典娜战争女神.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/雅典娜战争女神.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/雅典娜神奇女侠.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/雅典娜神奇女侠.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/露娜哥特玫瑰.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/露娜哥特玫瑰.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/露娜圣辉骑士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/露娜圣辉骑士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/露娜紫霞仙子.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/露娜紫霞仙子.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/露娜绯红之刃.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/露娜绯红之刃.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/韩信教廷特使.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/韩信教廷特使.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/韩信街头霸王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/韩信街头霸王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/韩信逐梦之影.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/韩信逐梦之影.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/项羽帝国元帅.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/项羽帝国元帅.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/项羽海滩派对.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/项羽海滩派对.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/项羽职棒王牌.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/项羽职棒王牌.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/项羽苍穹之光.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/项羽苍穹之光.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/项羽霸王别姬.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/项羽霸王别姬.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/高渐离死亡摇滚.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/高渐离死亡摇滚.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/高渐离金属狂潮.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/高渐离金属狂潮.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/鬼谷子幻乐之宴.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/鬼谷子幻乐之宴.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/黄忠芝加哥教父.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/黄忠芝加哥教父.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/龙且海军少将.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/龙且海军少将.jpg
--------------------------------------------------------------------------------
/python/.idea/py2/img/5486633.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/py2/img/5486633.jpg
--------------------------------------------------------------------------------
/python3pra/.idea/part3/github.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/part3/github.ico
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/东皇太一东海龙王.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/东皇太一东海龙王.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/兰陵王暗隐狩猎者.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/兰陵王暗隐狩猎者.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/墨子进击的墨子号.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/墨子进击的墨子号.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/太乙真人圆桌骑士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/太乙真人圆桌骑士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙尚香杀手不太冷.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙尚香杀手不太冷.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/孙悟空西部大镖客.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/孙悟空西部大镖客.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/安琪拉-电子纪元.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/安琪拉-电子纪元.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/安琪拉玩偶对对碰.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/安琪拉玩偶对对碰.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/安琪拉魔法小厨娘.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/安琪拉魔法小厨娘.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/宫本武藏万象初新.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/宫本武藏万象初新.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/干将莫邪第七人偶.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/干将莫邪第七人偶.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/成吉思汗维京掠夺者.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/成吉思汗维京掠夺者.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/李元芳黑猫爱糖果.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/李元芳黑猫爱糖果.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/狄仁杰超时空战士.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/狄仁杰超时空战士.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/王昭君幻想奇妙夜皮肤.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/王昭君幻想奇妙夜皮肤.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/白起无畏之灵-狰.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/白起无畏之灵-狰.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/百里守约全军出击.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/百里守约全军出击.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/百里守约特工魅影.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/百里守约特工魅影.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/百里守约绝影神枪.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/百里守约绝影神枪.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/百里玄策威尼斯狂欢.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/百里玄策威尼斯狂欢.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/程咬金华尔街大亨.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/程咬金华尔街大亨.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/程咬金星际陆战队.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/程咬金星际陆战队.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/米莱狄精准探案法.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/米莱狄精准探案法.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/花木兰水晶猎龙者.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/花木兰水晶猎龙者.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/花木兰青春决赛季.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/花木兰青春决赛季.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/诸葛亮-冬日舞会.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/诸葛亮-冬日舞会.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/诸葛亮星航指挥官.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/诸葛亮星航指挥官.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/诸葛亮黄金分割率.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/诸葛亮黄金分割率.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/马可波罗激情绿茵.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/马可波罗激情绿茵.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/鲁班七号星空梦想.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/鲁班七号星空梦想.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/鲁班七号木偶奇遇记.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/鲁班七号木偶奇遇记.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/鲁班七号电玩小子.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/鲁班七号电玩小子.jpg
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/鲁班七号福禄兄弟.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/鲁班七号福禄兄弟.jpg
--------------------------------------------------------------------------------
/python3pra/.idea/part8/captcha1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/part8/captcha1.png
--------------------------------------------------------------------------------
/python3pra/.idea/part8/captcha2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/part8/captcha2.png
--------------------------------------------------------------------------------
/wangzherongyao/王者荣耀/兰陵王隐刃重做造型曝光.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/wangzherongyao/王者荣耀/兰陵王隐刃重做造型曝光.jpg
--------------------------------------------------------------------------------
/toutiao/nba/config.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | BASE_URL='https://www.toutiao.com'
4 |
5 |
6 | #MONGO
7 | MONGO_URI='localhost'
8 | MONGO_DB='news'
--------------------------------------------------------------------------------
/douban/douban/__pycache__/items.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/douban/douban/__pycache__/items.cpython-36.pyc
--------------------------------------------------------------------------------
/toutiao/nba/__pycache__/config.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/toutiao/nba/__pycache__/config.cpython-36.pyc
--------------------------------------------------------------------------------
/dataanalysis/readme:
--------------------------------------------------------------------------------
1 | 1.利用selenium对接有道翻译实现翻译(支持任何语言)
2 |
3 | 2.利用request库通过构建表单实现
4 |
5 | 3.抓取去哪儿自由行数据
6 |
7 | 4.利用selenium抓取去哪儿酒店数据(可抓全国以及境外所有酒店信息)
8 |
--------------------------------------------------------------------------------
/douban/douban/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/douban/douban/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/douban/douban/__pycache__/settings.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/douban/douban/__pycache__/settings.cpython-36.pyc
--------------------------------------------------------------------------------
/toutiao/nba/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/toutiao/nba/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/douban/douban/__pycache__/pipelines.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/douban/douban/__pycache__/pipelines.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapydownloadertest/begin.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from scrapy import cmdline
4 | cmdline.execute("scrapy crawl httpbin".split())
--------------------------------------------------------------------------------
/tutorial/tutorial/__pycache__/items.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/tutorial/tutorial/__pycache__/items.cpython-36.pyc
--------------------------------------------------------------------------------
/douban/douban/__pycache__/middlewares.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/douban/douban/__pycache__/middlewares.cpython-36.pyc
--------------------------------------------------------------------------------
/douban/douban/spiders/__pycache__/movie.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/douban/douban/spiders/__pycache__/movie.cpython-36.pyc
--------------------------------------------------------------------------------
/python/.idea/study/__pycache__/jiandan.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python/.idea/study/__pycache__/jiandan.cpython-36.pyc
--------------------------------------------------------------------------------
/tutorial/tutorial/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/tutorial/tutorial/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/tutorial/tutorial/__pycache__/pipelines.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/tutorial/tutorial/__pycache__/pipelines.cpython-36.pyc
--------------------------------------------------------------------------------
/tutorial/tutorial/__pycache__/settings.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/tutorial/tutorial/__pycache__/settings.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/db.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/db.cpython-36.pyc
--------------------------------------------------------------------------------
/douban/douban/spiders/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/douban/douban/spiders/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/python/.idea/py3/sins.py:
--------------------------------------------------------------------------------
1 | #绘制正弦曲线
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | x=np.arange(0,2*np.pi,0.01)
5 | y=np.sin(x)
6 | plt.plot(x,y)
7 | plt.show()
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/api.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/api.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/error.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/error.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/utils.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/api.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/api.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/db.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/db.cpython-36.pyc
--------------------------------------------------------------------------------
/tutorial/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.6
2 | ENV PATH /usr/local/bin:$PATH
3 | ADD . /code
4 | WORKDIR /code
5 | RUN pip install -r requirements.txt
6 | CMD scrapy crawl quotes
--------------------------------------------------------------------------------
/tutorial/tutorial/spiders/__pycache__/quotes.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/tutorial/tutorial/spiders/__pycache__/quotes.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/crawler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/crawler.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/getter.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/getter.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/setting.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/setting.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/tester.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/tester.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/error.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/error.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/getter.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/getter.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/tester.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/tester.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/utils.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__pycache__/urls.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/__pycache__/urls.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/__pycache__/items.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapytoutiao/scrapytoutiao/__pycache__/items.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/__pycache__/links.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapytoutiao/scrapytoutiao/__pycache__/links.cpython-36.pyc
--------------------------------------------------------------------------------
/tutorial/tutorial/spiders/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/tutorial/tutorial/spiders/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/python/.idea/exam/Febinaci.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | def function(n):
3 | if n==1 or n==2:
4 | return 1
5 | return function(n-2)+function(n-1)
6 | print(function(7))
7 |
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/__pycache__/scheduler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool/__pycache__/scheduler.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/crawler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/crawler.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/scheduler.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/scheduler.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/__pycache__/setting.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/proxypool1/__pycache__/setting.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__pycache__/items.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/__pycache__/items.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__pycache__/loaders.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/__pycache__/loaders.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__pycache__/rules.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/__pycache__/rules.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__pycache__/utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/__pycache__/utils.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapytoutiao/scrapytoutiao/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/__pycache__/pipelines.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapytoutiao/scrapytoutiao/__pycache__/pipelines.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/__pycache__/settings.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapytoutiao/scrapytoutiao/__pycache__/settings.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/__pycache__/db.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/weixingongzhonghao/__pycache__/db.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__pycache__/pipelines.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/__pycache__/pipelines.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/__pycache__/settings.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/__pycache__/settings.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/urls.py:
--------------------------------------------------------------------------------
1 | def china(start, end):
2 | for page in range(start, end + 1):
3 | yield 'http://tech.china.com/articles/index_' + str(page) + '.html'
4 |
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/__pycache__/items.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapysplashtest/scrapysplashtest/__pycache__/items.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/__pycache__/middlewares.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapytoutiao/scrapytoutiao/__pycache__/middlewares.cpython-36.pyc
--------------------------------------------------------------------------------
/python/.idea/study/theieves.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | for theif in ['a','b','c','d']:
3 | sum=(theif!='a')+(theif=='c')+(theif=='d')+(theif!='d')
4 | if sum==3:
5 | print(theif)
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/__pycache__/config.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/weixingongzhonghao/__pycache__/config.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/__pycache__/mysql.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/weixingongzhonghao/__pycache__/mysql.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapysplashtest/scrapysplashtest/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/__pycache__/settings.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapysplashtest/scrapysplashtest/__pycache__/settings.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/__pycache__/request.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/python3pra/.idea/weixingongzhonghao/__pycache__/request.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/spiders/__pycache__/china.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/spiders/__pycache__/china.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/__pycache__/items.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapyseleniumtest/scrapyseleniumtest/__pycache__/items.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/__pycache__/pipelines.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapysplashtest/scrapysplashtest/__pycache__/pipelines.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/spiders/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapytoutiao/scrapytoutiao/spiders/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/spiders/__pycache__/toutiao.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapytoutiao/scrapytoutiao/spiders/__pycache__/toutiao.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/spiders/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/spiders/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/spiders/__pycache__/universal.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapychinacom/scrapychinacom/spiders/__pycache__/universal.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapyseleniumtest/scrapyseleniumtest/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/__pycache__/pipelines.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapyseleniumtest/scrapyseleniumtest/__pycache__/pipelines.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/__pycache__/settings.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapyseleniumtest/scrapyseleniumtest/__pycache__/settings.cpython-36.pyc
--------------------------------------------------------------------------------
/python/.idea/study/haha.txt:
--------------------------------------------------------------------------------
1 | 你好:11111111
2 | 我们:22222222
3 | ================================
4 | 你好:333333333333
5 | 我们:44444444444
6 | ================================
7 | 你好:77777777777
8 | 我们:666666666666
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/__pycache__/middlewares.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapyseleniumtest/scrapyseleniumtest/__pycache__/middlewares.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/spiders/__pycache__/taobao.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapysplashtest/scrapysplashtest/spiders/__pycache__/taobao.cpython-36.pyc
--------------------------------------------------------------------------------
/douban/douban/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/error.py:
--------------------------------------------------------------------------------
1 | class PoolEmptyError(Exception):
2 |
3 | def __init__(self):
4 | Exception.__init__(self)
5 |
6 | def __str__(self):
7 | return repr('代理池已经枯竭')
8 |
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapydownloadertest/scrapydownloadertest/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/__pycache__/settings.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapydownloadertest/scrapydownloadertest/__pycache__/settings.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/spiders/__pycache__/taobao.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapyseleniumtest/scrapyseleniumtest/spiders/__pycache__/taobao.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/spiders/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapysplashtest/scrapysplashtest/spiders/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/Images360II/images360/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python/.idea/study/oo.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | class Person:
3 | name="wq"
4 | p=Person()
5 | #print(hasattr(p,'gg'))
6 | setattr(p,'rr','fda')
7 | print(getattr(p,'rr'))
8 | print(dir(Person()))
9 | print(dir(p))
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/__pycache__/middlewares.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapydownloadertest/scrapydownloadertest/__pycache__/middlewares.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/spiders/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapyseleniumtest/scrapyseleniumtest/spiders/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/tutorial/tutorial/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/python3pra/.idea/ceshi/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from selenium import webdriver
4 |
5 | browser=webdriver.PhantomJS()
6 | browser.get('https://www.baidu.com')
7 | print(browser.current_url)
--------------------------------------------------------------------------------
/python3pra/.idea/part3/regix.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import re
4 | content='''hello 1234567
5 | haha
6 | '''
7 | result=re.match('^he.*?(\d+).*?haha$',content,re.S)
8 | print(result.group(1))
9 |
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/spiders/__pycache__/httpbin.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapydownloadertest/scrapydownloadertest/spiders/__pycache__/httpbin.cpython-36.pyc
--------------------------------------------------------------------------------
/python3pra/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/spiders/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NGUWQ/Python3Spider/HEAD/scrapydownloadertest/scrapydownloadertest/spiders/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python/.idea/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/python3pra/.idea/part7/splashapi.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import requests
4 | url = 'http://localhost:8050/render.html?url=https://www.baidu.com&wait=5'
5 | response=requests.get(url)
6 | print(response.text)
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python/.idea/exam/ceshi.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from flask import Flask
4 | app=Flask(__name__)
5 | @app.route('/')
6 | def hello():
7 | return 'hello world'
8 | if __name__=='__main__':
9 | app.run()
--------------------------------------------------------------------------------
/python/.idea/pachong1/mycookie.txt:
--------------------------------------------------------------------------------
1 | # Netscape HTTP Cookie File
2 | # http://curl.haxx.se/rfc/cookie_spec.html
3 | # This is a generated file! Do not edit.
4 |
5 | weibo.com FALSE / FALSE YF-Ugrow-G0 ad83bc19c1269e709f753b172bddb094
6 |
--------------------------------------------------------------------------------
/python3pra/.idea/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/python3pra/.idea/part5/redise.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from redis import StrictRedis
4 | redis=StrictRedis(host='localhost',port=6379,db=0,password=None)
5 | redis.set('name','Bob')
6 | print(redis.get('name'))
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/python/.idea/exam/JumpFloor.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | ''''
3 | 一只青蛙一次可以跳上1级台阶,也可以跳上2级。求该青蛙跳上一个n级的台阶总共有多少种跳法。
4 | '''
5 | def function(n):
6 | if n==1 or n==2:
7 | return n
8 | return function(n-2)+function(n-1)
9 | print(function(5))
--------------------------------------------------------------------------------
/python/.idea/exam/JumpFloor2.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | '''
3 | 一只青蛙一次可以跳上1级台阶,也可以跳上2级……它也可以跳上n级。
4 | 求该青蛙跳上一个n级的台阶总共有多少种跳法。
5 | '''
6 | def function(n):
7 | if n==1:
8 | return n
9 | return 2*function(n-1)
10 | print(function(4))
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/error.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | class PoolEmptyError(Exception):
4 |
5 | def __init__(self):
6 | Exception.__init__(self)
7 |
8 | def __str__(self):
9 | return repr('代理池已经枯竭')
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/utils.py:
--------------------------------------------------------------------------------
1 | from os.path import realpath, dirname
2 | import json
3 |
4 |
5 | def get_config(name):
6 | path = dirname(realpath(__file__)) + '/configs/' + name + '.json'
7 | with open(path, 'r', encoding='utf-8') as f:
8 | return json.loads(f.read())
--------------------------------------------------------------------------------
/python/.idea/exam/Retangle.py:
--------------------------------------------------------------------------------
1 | # Created by TTT
2 | '''
3 | 我们可以用2*1的小矩形横着或者竖着去覆盖更大的矩形。
4 | 请问用n个2*1的小矩形无重叠地覆盖一个2*n的大矩形,总共有多少种方法?
5 | '''
6 |
7 |
8 | def function(n):
9 | if n == 1 or n == 2:
10 | return n
11 | return function(n - 2) + function(n - 1)
12 | print(function(5))
13 |
--------------------------------------------------------------------------------
/python/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/python/.idea/study/pickles.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #将一个列表写入一个二进制文件中
3 | from pickle import *#pickle(泡菜)
4 | l1=[123,3.3,'nihao',[1,'da']]
5 | '''
6 | filename=open('hehe.pkl','wb')
7 | dump(l1,filename)
8 | '''
9 | filename=open('hehe.pkl','rb')
10 | print(load(filename))
11 |
12 | filename.close()
--------------------------------------------------------------------------------
/python/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/python3pra/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/douban/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/python/.idea/study/oo1.py:
--------------------------------------------------------------------------------
1 | # Created by TTT
2 | class mystr(int):
3 | def __new__(cls,s):
4 | s=s.upper()
5 | return str.__new__(cls,s)
6 | def __init__(self, s):
7 | self.s = s
8 | def get(self):
9 | return self.s
10 | m = mystr('uuuu')
11 | print(m.get())
12 |
--------------------------------------------------------------------------------
/python3pra/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/dataanalysis/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/python/.idea/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/toutiao/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/tutorial/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/douban/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = douban.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = douban
12 |
--------------------------------------------------------------------------------
/python3pra/.idea/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/douban/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/scrapytoutiao/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/toutiao/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/tutorial/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/tutorial/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = tutorial.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = tutorial
12 |
--------------------------------------------------------------------------------
/Images360II/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.org/en/latest/deploy.html
5 |
6 | [settings]
7 | default = images360.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = images360
12 |
--------------------------------------------------------------------------------
/dataanalysis/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/python/.idea/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/python3pra/.idea/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/python3pra/.idea/part7/executejs.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #执行JavaScript模拟下拉进度条
4 | from selenium import webdriver
5 | browser=webdriver.Chrome()
6 | browser.get('https://www.zhihu.com/explore')
7 | browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')
8 | browser.execute_script('alert("To Buttom")')
--------------------------------------------------------------------------------
/scrapychinacom/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/scrapychinacom/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/scrapysplashtest/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/scrapytoutiao/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/python/.idea/exam/__init__.py:
--------------------------------------------------------------------------------
1 | def f(w, s, n):
2 | if s == 0:
3 | return True
4 | elif (s < 0) or (s > 0 and n < 1):
5 | return False
6 | elif f(w, s - w[n - 1], n - 1):
7 | print(w[n - 1])
8 | return True
9 | else:
10 | return f(w, s, n - 1)
11 | w=[5,7,9,8,6]
12 | print(f(w,9,2))
--------------------------------------------------------------------------------
/scrapyseleniumtest/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/scrapysplashtest/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/scrapychinacom/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = scrapychinacom.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = scrapychinacom
12 |
--------------------------------------------------------------------------------
/scrapyseleniumtest/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/scrapytoutiao/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = scrapytoutiao.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = scrapytoutiao
12 |
--------------------------------------------------------------------------------
/dataanalysis/extract_json.py:
--------------------------------------------------------------------------------
1 | #提取qunaer_sights.csv文件中的经纬度和销量信息
2 |
3 | import pandas as pd
4 | import json
5 |
6 | df=pd.read_csv('qunaer_sights.csv')
7 | points=[]
8 | df=df[['经度','纬度','月销量']]
9 | for item in df.values:
10 | points.append({'lng':item[0],'lat':item[1],'count':item[2]})
11 | strs=json.dumps(points)
12 | print(strs)
--------------------------------------------------------------------------------
/scrapydownloadertest/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/scrapysplashtest/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = scrapysplashtest.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = scrapysplashtest
12 |
--------------------------------------------------------------------------------
/python/.idea/study/oo3.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | class myint(int):
3 | def __add__(self, other):
4 | return int.__sub__(self,other)
5 | def __sub__(self, other):
6 | return int.__add__(self,other)
7 | def __iadd__(self, other):
8 | return int.__add__(self,other)
9 |
10 | a=myint('4')
11 | b=myint('6')
12 | print(a+b)
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = scrapyseleniumtest.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = scrapyseleniumtest
12 |
--------------------------------------------------------------------------------
/python/.idea/pachong1/login.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | import urllib.request
3 | import urllib.parse
4 | values={'username':'18872738629','password':'wq1996122418'}
5 | data=urllib.parse.urlencode(values).encode('utf-8')
6 | url='https://weibo.com/login.php'
7 | request=urllib.request.Request(url,data)
8 | response=urllib.request.urlopen(request)
9 | print(response.read())
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html
5 |
6 | [settings]
7 | default = scrapydownloadertest.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = scrapydownloadertest
12 |
--------------------------------------------------------------------------------
/python/python.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/scrapydownloadertest/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/python/.idea/study/easyguis.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | import easygui as e
3 | import sys
4 | e.msgbox("haha","hehe")
5 | choice=['愿意','不愿意']
6 | e.choicebox('你愿意嫁给我吗','haha',choices=choice)
7 | e.msgbox('yyyyyyyyyyy',ok_button='enen')
8 | if e.ccbox("do you want",choices=choice):
9 | e.choicebox('你愿意嫁给我吗','haha',choices=choice)
10 | else:
11 | sys.exit(0)
12 | e.enterbox()
--------------------------------------------------------------------------------
/python3pra/.idea/part7/fb.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #前进和后退
4 | import time
5 | from selenium import webdriver
6 |
7 | browser=webdriver.Chrome()
8 | browser.get('https://www.baidu.com/')
9 | browser.get('https://www.zhihu.com/')
10 | browser.get('https://www.taobao.com/')
11 | browser.back()
12 | time.sleep(1)
13 | browser.forward()
14 | browser.close()
--------------------------------------------------------------------------------
/python3pra/python3pra.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 |
9 | class ScrapydownloadertestPipeline(object):
10 | def process_item(self, item, spider):
11 | return item
--------------------------------------------------------------------------------
/python3pra/.idea/part5/mysqlc.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #连接mysql
4 | import pymysql
5 | db=pymysql.connect(host='localhost',user='root',password='123',port=3306)
6 | cursor=db.cursor()
7 | cursor.execute('SELECT VERSION()')
8 | data=cursor.fetchone()
9 | print('database version:',data)
10 | cursor.execute("CREATE DATABASE spiders DEFAULT CHARACTER SET utf8")
11 | db.close()
--------------------------------------------------------------------------------
/python3pra/.idea/part9/seleniumproxy.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from selenium import webdriver
4 |
5 | proxy='119.23.64.49:3128'
6 | chrome_options=webdriver.ChromeOptions()
7 | chrome_options.add_argument('--proxy-server=http://'+proxy)
8 | browser=webdriver.Chrome(chrome_options=chrome_options)
9 | #browser.get('http://httpbin.org/get')
10 | browser.get('https://www.baidu.com')
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/run.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from scheduler import Scheduler
4 | import sys
5 | import io
6 |
7 | sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
8 |
9 |
10 | def main():
11 | try:
12 | s = Scheduler()
13 | s.run()
14 | except:
15 | main()
16 |
17 |
18 | if __name__ == '__main__':
19 | main()
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/run.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from scheduler import Scheduler
4 | import sys
5 | import io
6 |
7 |
8 | sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
9 |
10 |
11 | def main():
12 | try:
13 | s = Scheduler()
14 | s.run()
15 | except:
16 | main()
17 |
18 |
19 | if __name__ == '__main__':
20 | main()
--------------------------------------------------------------------------------
/scrapydownloadertest/.idea/scrapydownloadertest.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | import scrapy
9 |
10 |
11 | class ScrapydownloadertestItem(scrapy.Item):
12 | # define the fields for your item here like:
13 | # name = scrapy.Field()
14 | pass
15 |
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | from scrapy import Item,Field
9 |
10 |
11 | class NBAItem(Item):
12 | collection='nba'
13 | title=Field()
14 | source=Field()
15 | datetime=Field()
16 | content=Field()
17 |
--------------------------------------------------------------------------------
/Images360II/images360/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # http://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | from scrapy import Item, Field
9 |
10 |
11 | class ImageItem(Item):
12 | collection = table = 'images'
13 |
14 | id = Field()
15 | url = Field()
16 | title = Field()
17 | thumb = Field()
18 |
--------------------------------------------------------------------------------
/douban/douban/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | from scrapy import Item,Field
9 |
10 |
11 | class DoubanItem(Item):
12 | collection='review'
13 | user=Field()
14 | title=Field()
15 | datetime=Field()
16 | tag=Field()
17 | content=Field()
18 |
19 |
--------------------------------------------------------------------------------
/python/.idea/study/mygen.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #生成器
3 | '''
4 | def fibs():
5 | a=0
6 | b=1
7 | while True:
8 | a,b=b,b+a
9 | yield a
10 |
11 | for i in fibs():
12 | if i>100:
13 | break
14 | print(i)
15 | for i in fibs():
16 | if i>200:
17 | break
18 | print(i)
19 | '''
20 | a=[i for i in range(100) if i%2 and i%3]
21 | b={i:i%2!=0 for i in range(10) }
22 | print(b)
--------------------------------------------------------------------------------
/python3pra/.idea/part7/Cookies.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #selenium的cookies操作
4 | from selenium import webdriver
5 | browser=webdriver.Chrome()
6 | browser.get('https://www.zhihu.com/explore')
7 | print(browser.get_cookies())
8 | browser.add_cookie({'name':'wang','domain':'www.zhihu.com','value':'germey'})
9 | print(browser.get_cookies())
10 | browser.delete_all_cookies()
11 | print(browser.get_cookies())
--------------------------------------------------------------------------------
/scrapydownloadertest/scrapydownloadertest/spiders/httpbin.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import scrapy
4 |
5 | class HttpbinSpider(scrapy.Spider):
6 | name = 'httpbin'
7 | allowed_domains=['httpbin.org']
8 | start_urls=['http://httpbin.org/get']
9 |
10 | def parse(self, response):
11 | self.logger.debug(response.text)
12 | self.logger.debug('status code:'+str(response.status))
--------------------------------------------------------------------------------
/python/.idea/pachong1/excepts.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | import urllib.request
3 | import urllib.error as u
4 | import http.cookiejar
5 | url='http://www.baidu.com'
6 | request=urllib.request.Request(url)
7 | try:
8 | response=urllib.request.urlopen(request,timeout=10)
9 | except u.HTTPError as e:#URLError的子类
10 | print(e.code)
11 | except u.URLError as e:
12 | if hasattr(e,'reason'):
13 | print(e.reason)
14 |
15 |
16 |
--------------------------------------------------------------------------------
/python/.idea/study/mylist.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #定义自己的序列
3 | class Mylist:
4 | def __init__(self,*args):
5 | self.values=[x for x in args]
6 | self.count={}.fromkeys(range(len(self.values)),0)
7 | def __len__(self):#必须
8 | return len(self.values)
9 | def __getitem__(self, item):#必须
10 | self.count[item]+=1
11 | return self.values[item]
12 | c1=Mylist(1,2,4)
13 | print(c1[0])
14 | print(c1.count)
--------------------------------------------------------------------------------
/python3pra/.idea/part3/github.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import requests
4 | import re
5 | headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
6 | '(KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
7 | url='https://github.com/favicon.ico'
8 | r=requests.get(url,headers=headers)
9 | with open('github.ico','wb') as f:
10 | f.write(r.content)
11 | f.close()
12 |
13 |
--------------------------------------------------------------------------------
/tutorial/tutorial/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | import scrapy
9 |
10 |
11 | class TutorialItem(scrapy.Item):
12 | # define the fields for your item here like:
13 | # name = scrapy.Field()
14 | text=scrapy.Field()
15 | authors=scrapy.Field()
16 | tags=scrapy.Field()
17 |
--------------------------------------------------------------------------------
/python/.idea/study/myIterator.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #实现一个迭代器
3 | class Fibs:
4 | def __init__(self,n=20):
5 | self.n=n
6 | self.a=0
7 | self.b=1
8 | def __iter__(self):#必须
9 | return self
10 | def __next__(self):#必须
11 | self.a,self.b=self.b,self.b+self.a
12 | if self.a>self.n:
13 | raise StopIteration
14 | return self.a
15 | fib=Fibs()
16 | for h in fib:
17 | print(h)
--------------------------------------------------------------------------------
/python/.idea/study/translate2.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | import urllib.request
3 | import urllib.parse
4 | import json
5 | content='你好'
6 | url = "http://fanyi.baidu.com/basetrans"
7 | data = {
8 | "query": content,
9 | "from": "zh",
10 | "to": "en",
11 | }
12 | data=urllib.parse.urlencode(data).encode('utf-8')
13 | response=urllib.request.urlopen(url,data)
14 | html=response.read().decode('utf-8')
15 | target=json.loads(html)
16 | print(target)
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | from scrapy import Item,Field
9 |
10 |
11 | class NewsItem(Item):
12 | collection='news'
13 | title=Field()
14 | url=Field()
15 | text=Field()
16 | datetime=Field()
17 | source=Field()
18 | website=Field()
19 |
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | from scrapy import Item,Field
9 |
10 | class ProductItem(Item):
11 | collection='products'
12 | image=Field()
13 | price=Field()
14 | deal=Field()
15 | title=Field()
16 | shop=Field()
17 | location=Field()
--------------------------------------------------------------------------------
/python/.idea/py3/sanwei.py:
--------------------------------------------------------------------------------
1 | from matplotlib import pyplot as plt
2 | import numpy as np
3 | from mpl_toolkits.mplot3d import Axes3D
4 |
5 | fig = plt.figure()
6 | ax = Axes3D(fig)
7 | X = np.arange(-4, 4, 0.25)
8 | Y = np.arange(-4, 4, 0.25)
9 | X, Y = np.meshgrid(X, Y)
10 | R = np.sqrt(X**2 + Y**2)
11 | Z = np.sin(R)
12 |
13 | # 具体函数方法可用 help(function) 查看,如:help(ax.plot_surface)
14 | ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='rainbow')
15 |
16 | plt.show()
--------------------------------------------------------------------------------
/python3pra/.idea/part3/postex.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #request高级用法
4 | import requests
5 | headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
6 | '(KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
7 | url='http://httpbin.org/post'
8 | data={'name':'wang','age':'22'}
9 | files={'file':open('github.ico','rb')}
10 | r=requests.post(url,data=data,headers=headers,files=files)
11 | print(r.text)
12 |
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/importer.py:
--------------------------------------------------------------------------------
1 | from db import RedisClient
2 |
3 | conn = RedisClient()
4 |
5 |
6 | def set(proxy):
7 | result = conn.add(proxy)
8 | print(proxy)
9 | print('录入成功' if result else '录入失败')
10 |
11 |
12 | def scan():
13 | print('请输入代理, 输入exit退出读入')
14 | while True:
15 | proxy = input()
16 | if proxy == 'exit':
17 | break
18 | set(proxy)
19 |
20 |
21 | if __name__ == '__main__':
22 | scan()
23 |
--------------------------------------------------------------------------------
/python/.idea/py1/hannuota.py:
--------------------------------------------------------------------------------
1 | #汉诺塔问题(递归)
2 | steps=0
3 | def move():
4 | global steps
5 | steps+=1
6 | def towers(n,x,y,z):
7 | if(n==1):
8 | print('{0} from '.format(n),x,' to ',z)
9 | move()
10 | else:
11 | towers(n-1,x,z,y)
12 | move()
13 | print('{0} from '.format(n),x,' to ',z)
14 | towers(n-1,y,x,z)
15 | n=int(input("请输入要移动的层数:"))
16 | towers(n,'A','B','C')
17 | print('{0}层汉诺塔需要移动{1}次'.format(n,steps))
18 | #64层需要2**64-1次移动
19 |
--------------------------------------------------------------------------------
/python/.idea/study/Retangle.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | class Retangle:
3 | def __init__(self,x=0,y=0):
4 | self.x=x
5 | self.y=y
6 | def __setattr__(self, key, value):
7 | if key=='square':
8 | self.x=value
9 | self.y=value
10 | else:#两种方法
11 | super().__setattr__(key,value)
12 | #self.__dict__[key]=value
13 | def squares(self):
14 | return self.x*self.y
15 | r=Retangle(3,3)
16 | #r.square=10
17 | print(r.squares())
--------------------------------------------------------------------------------
/python3pra/.idea/part8/zhiwang.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #利用tesserocr库来识别图形验证码(识别率低y)
4 | import tesserocr
5 | from PIL import Image
6 |
7 |
8 | image=Image.open('Code.jpg')
9 | image=image.convert('L')#将图片转化为灰度图像
10 | threshold=174
11 | table=[]
12 | for i in range(256):
13 | if i
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/toutiao/.idea/toutiao.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 爬虫的小项目,有需要的可以拿来练手
2 |
3 | 1.抓取360摄影图片项目
4 |
5 | 2.数据分析类项目(包含爬虫案例,如爬取去哪儿网自由行数据10万条)
6 |
7 | 3.抓取豆瓣影评
8 |
9 | 4.百度贴吧,内涵段子等爬虫
10 |
11 | 5.python3pra内有各种爬虫案例
12 |
13 | 6.scrapy框架爬取中国技术网
14 |
15 | 7.scrapy框架爬取中国技术网
16 |
17 | 8.scrapy框架(测试),可直接跳过
18 |
19 | 9.scrapy框架爬取淘宝商品,可扩展至分布式爬虫
20 |
21 | 10.scrapy框架爬取淘宝商品2可跳过
22 |
23 | 11.scrapy抓取今日头条
24 |
25 | 12.头条项目
26 |
27 | 13.scrapy测试可跳过
28 |
29 | 14.王者荣耀蒙太奇拼图(爬虫+合成)
30 |
31 | 15.小猪佩奇代码
32 |
33 | 以上项目均为本人所写,欢迎加入你自己的爬虫项目(加上备注)
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/dataanalysis/.idea/youdao.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/python/.idea/.idea/.idea.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/tutorial/.idea/tutorial.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/python/.idea/py1/kehequxian.py:
--------------------------------------------------------------------------------
1 | #科赫曲线
2 | from turtle import *
3 | def koch(size,n):
4 | if n==0:
5 | fd(size)
6 | else:
7 | for angle in [0,60,-120,60]:
8 | left(angle)
9 | koch(size/3,n-1)
10 | def main():
11 | setup(600,600)
12 | speed(0)
13 | penup()
14 | goto(-200,100)
15 | pendown()
16 | pensize(2)
17 | level=5
18 | koch(400,level)
19 | right(120)
20 | koch(400,level)
21 | right(120)
22 | koch(400,level)
23 | hideturtle()
24 | main()
25 |
--------------------------------------------------------------------------------
/python3pra/.idea/.idea/.idea.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/request.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from config import *
4 | from requests import Request
5 |
6 |
7 | class WeixinRequest(Request):
8 | def __init__(self, url, callback, method='GET', headers=None, need_proxy=False, fail_time=0, timeout=TIMEOUT):
9 | Request.__init__(self, method, url, headers)
10 | self.callback = callback#回调函数
11 | self.need_proxy = need_proxy#是否需要代理爬取
12 | self.fail_time = fail_time#失败次数
13 | self.timeout = timeout#超时时间
--------------------------------------------------------------------------------
/scrapychinacom/.idea/scrapychinacom.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/scrapytoutiao/.idea/scrapytoutiao.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/scrapysplashtest/.idea/scrapysplashtest.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/python/.idea/py2/csvs.py:
--------------------------------------------------------------------------------
1 | #转csv格式
2 | import xlrd
3 | import csv
4 | import codecs
5 | def xlsx_to_csv():
6 | workbook = xlrd.open_workbook('C:\\Users\\TTT\\Desktop\\杂\\2.xlsx')
7 | table = workbook.sheet_by_index(0)
8 | with codecs.open('C:\\Users\\TTT\\Desktop\\杂\\excel.csv', 'w', encoding='utf-8') as f:
9 | write = csv.writer(f)
10 | for row_num in range(table.nrows):
11 | row_value = table.row_values(row_num)
12 | write.writerow(row_value)
13 |
14 | if __name__ == '__main__':
15 | xlsx_to_csv()
16 |
--------------------------------------------------------------------------------
/scrapyseleniumtest/.idea/scrapyseleniumtest.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/python/.idea/py1/ImageCut.py:
--------------------------------------------------------------------------------
1 | #压缩图片
2 |
3 | import os
4 | from PIL import Image
5 |
6 | def resizeImg():
7 | im = Image.open('C:/Users/TTT/Desktop/杂/图片/548663.jpg')
8 | w, h = im.size
9 | ims=os.path.getsize('C:/Users/TTT/Desktop/杂/图片/548663.jpg')
10 | while ims>10240:
11 | w=w*0.9
12 | h=h*0.9
13 | im.thumbnail((w,h))
14 | im.save('C:/Users/TTT/Desktop/杂/图片/548663s.jpg')
15 | ims=os.path.getsize('C:/Users/TTT/Desktop/杂/图片/548663s.jpg')
16 | return ims
17 | size=resizeImg()
18 | print(size)
19 |
20 |
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # https://doc.scrapy.org/en/latest/topics/items.html
7 |
8 | from scrapy import Field,Item
9 |
10 |
11 | class ProductItem(Item):
12 | # define the fields for your item here like:
13 | # name = scrapy.Field()
14 | collection='products'
15 | image = Field()
16 | price = Field()
17 | deal = Field()
18 | title = Field()
19 | shop = Field()
20 | location = Field()
21 |
22 |
--------------------------------------------------------------------------------
/python3pra/.idea/part7/actionchain.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #动作链完成拖拽
4 | from selenium import webdriver
5 | from selenium.webdriver import ActionChains
6 | browser=webdriver.Chrome()
7 | url='http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'
8 | browser.get(url)
9 | browser.switch_to_frame('iframeResult')
10 | source=browser.find_element_by_css_selector('#draggable')
11 | target=browser.find_element_by_css_selector('#droppable')
12 | actions=ActionChains(browser)
13 | actions.drag_and_drop(source,target)
14 | actions.perform()
--------------------------------------------------------------------------------
/python3pra/.idea/part7/select.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #selenium模拟浏览器进行选项卡管理
4 | import time
5 | from selenium import webdriver
6 |
7 | browser=webdriver.Chrome()
8 | browser.get('https://www.baidu.com')
9 | browser.execute_script('window.open()')
10 | print(browser.window_handles)
11 | browser.switch_to_window(browser.window_handles[1])
12 | browser.get('https://www.taobao.com')
13 | time.sleep(1)
14 | '''
15 | browser.switch_to_window(browser.window_handles[0])
16 | browser.get('https://www.zhihu.com')
17 | '''
18 | print(browser.page_source)
--------------------------------------------------------------------------------
/python3pra/.idea/part9/requestproxy.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import requests
4 | headers={
5 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
6 | #'Host': 'ww3.sinaimg.cn'
7 | }
8 | proxy='119.23.64.49:3128'
9 | proxies={
10 | 'http':'http://'+proxy,
11 | 'https':'https://'+proxy
12 | }
13 | try:
14 | response=requests.get('http://httpbin.org/get',proxies=proxies)
15 | print(response.text)
16 | except requests.ConnectionError as e:
17 | print(e.args)
--------------------------------------------------------------------------------
/python/.idea/study/proxy.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #代理(隐藏)
3 | import urllib.request
4 | import random
5 | url='https://www.whatismyip.com'
6 | iplist=['212.8.252.106:1080']
7 | proxy_support=urllib.request.ProxyHandler({'http':random.choice(iplist)})#字典类型
8 | opener=urllib.request.build_opener(proxy_support)
9 | opener.addheaders=[('user-agent','Mozilla/5.0 (Windows NT 10.0; WOW64)'
10 | 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36')]
11 | reponse=urllib.request.urlopen(url)
12 | html=reponse.read().decode('utf-8')
13 | print(html)
--------------------------------------------------------------------------------
/python3pra/.idea/part7/getattr.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #获取属性
4 | from selenium import webdriver
5 | from selenium.webdriver import ActionChains
6 | browser=webdriver.Chrome()
7 | browser.get('https://www.zhihu.com/explore')
8 | logo=browser.find_element_by_id('zh-top-link-logo')
9 | input=browser.find_element_by_class_name('zu-top-link-logo')
10 | print(logo)
11 | print(logo.get_attribute('class'))#获取当前节点属性
12 | print(logo.text)#获取当前节点文本
13 | print(input.id)#获取节点id
14 | print(input.location)#获取该节点在页面中的相对位置
15 | print(input.tag_name)#获取标签名称
16 | print(input.size)#获取节点的大小
--------------------------------------------------------------------------------
/tutorial/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/python/.idea/py4/image1.py:
--------------------------------------------------------------------------------
1 | #coding=utf-8
2 | import urllib
3 | import re
4 | #py抓取页面图片并保存到本地
5 |
6 | #获取页面信息
7 | def getHtml(url):
8 | page = urllib.request.urlopen(url)
9 | html = page.read()
10 | return html
11 |
12 | #通过正则获取图片
13 | def getImg(html):
14 | reg ='.png'
15 | imgre = re.compile(reg)
16 | imglist = re.findall(imgre,html)
17 | return imglist
18 | #循环把图片存到本地
19 | x = 0
20 | for imgurl in imglist:
21 | #保存到本地
22 | urllib.urlretrieve(imgurl,'.idea/py2/img/%s.jpg' % x)
23 | x+=1
24 |
25 | html = getHtml("http://www.hugsmxy.com/")
26 | getImg(html)
--------------------------------------------------------------------------------
/python/.idea/py3/dikaerheat.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 | #笛卡尔心形图
3 | import matplotlib.pyplot as plt
4 | from matplotlib import animation
5 | import numpy as np
6 | import math
7 |
8 | def drawHeart():
9 | t = np.linspace(0, math.pi, 1000)
10 | x = np.sin(t)
11 | y = np.cos(t) + np.power(x, 2.0/3)
12 | plt.plot(x, y, color='red', linewidth=2, label='h')
13 | plt.plot(-x, y, color='red', linewidth=2, label='-h')
14 | plt.xlabel('t')
15 | plt.ylabel('h')
16 | plt.ylim(-3, 3)
17 | plt.xlim(-3, 3)
18 |
19 | plt.legend()
20 | plt.savefig('heat.jpg')
21 | plt.show()
22 |
23 | drawHeart()
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/rules.py:
--------------------------------------------------------------------------------
1 | from scrapy.linkextractors import LinkExtractor
2 | from scrapy.spiders import Rule
3 |
4 | rules = {
5 | 'china': (
6 | Rule(LinkExtractor(allow='article\/.*\.html', restrict_xpaths='//div[@id="left_side"]//div[@class="con_item"]'),
7 | callback='parse_item'),
8 | Rule(LinkExtractor(restrict_xpaths='//div[@id="pageStyle"]//a[contains(., "下一页")]'))
9 | ),
10 |
11 | 'nba':(
12 | Rule(LinkExtractor(allow='\/group\/\d+\/', restrict_xpaths='//div[@class="wcommonFeed"]//div[@class="item"]'),
13 | callback='parse_item')
14 | )
15 | }
--------------------------------------------------------------------------------
/python/.idea/py1/GetName.py:
--------------------------------------------------------------------------------
1 | #提取姓名
2 | import openpyxl
3 | import json
4 | wbr=openpyxl.load_workbook('C:\\Users\\TTT\\Desktop\\杂\\1.xlsx')
5 | ws=wbr['15计科本1']
6 | lst=[]
7 | for cell in list(ws.rows)[20]:
8 | lst.append(cell.value)
9 | lst0=[]
10 | for cell in list(ws.rows)[0]:
11 | lst0.append(cell.value)
12 |
13 | wbw=openpyxl.Workbook()
14 | wbw.remove(wbw.active)
15 | nws=wbw.create_sheet('mysheet',index=1)
16 | nws.append(lst)
17 | for i in range(10):
18 | nws['C%d'%(i+2)].value=i+1
19 | nws['b%d'%(i+2)].value=i+1
20 | nws['D11'].value='=sum(A2:A11)'
21 | wbw.save('C:\\Users\\TTT\\Desktop\\杂\\2.xlsx')
--------------------------------------------------------------------------------
/python/.idea/py1/daxiaoxie.py:
--------------------------------------------------------------------------------
1 | import keyword
2 | s=keyword.kwlist
3 | #建立保留字列表
4 |
5 | n=input("输入一个文件名:")
6 | f=open(n,"r","utf-8").readlines()
7 | ls=[]
8 | for i in f:
9 | i=i.split()
10 | ls.append(i)
11 | #建立一个以每行的所有单词为元素组成的一个列表组
12 |
13 | fo=open(n,"w+")
14 | for i in range(len(ls)):
15 | if f[i].isspace():
16 | fo.write(" "+"\n")
17 | for j in range(len(ls[i])):
18 | x= ls[i][j]
19 | if x not in s:
20 | x=x.upper()
21 | else:
22 | x=x.lower()
23 | if x==ls[i][len(ls[i])-1]:#判定是否遍历至每行的末尾
24 | fo.write(x+"\n")
25 | else:
26 | fo.write(x+" ")
27 |
--------------------------------------------------------------------------------
/python3pra/.idea/part3/demo.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import pyquery
4 | html='''
5 |
16 | '''
17 | doc=pyquery.PyQuery(html)
18 | #li=doc('li:first-child')
19 | l0=doc('li a:first-child')
20 | #print(li)
21 | print(l0)
--------------------------------------------------------------------------------
/python3pra/.idea/part9/urlproxy.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from urllib.error import URLError
4 | from urllib.request import ProxyHandler,build_opener
5 | headers={
6 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
7 | #'Host': 'ww3.sinaimg.cn'
8 | }
9 | proxy='119.23.64.49:3128'
10 | proxy_handler=ProxyHandler({
11 | 'http':'http://'+proxy,
12 | 'https':'https://'+proxy
13 | })
14 | opener=build_opener(proxy_handler)
15 | try:
16 | response=opener.open('http:www.zhihu.com')
17 | print(response.read().decode('utf-8'))
18 | except URLError as e:
19 | print(e.reason)
--------------------------------------------------------------------------------
/python/.idea/pachong1/loginsina.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #利用cookie模拟登陆新浪(失败)
3 | import urllib.request
4 | import urllib.parse
5 | import http.cookiejar as c
6 | filaname='mycookie.txt'
7 | cookie=c.MozillaCookieJar(filaname)
8 | opener=urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie))
9 | data=urllib.parse.urlencode({'loginname':'18872738629',
10 | 'password':'wq1996122418'}).encode('utf-8')
11 | sinaurl='https://weibo.com/#_loginLayer_1530966056592'
12 | result=opener.open(sinaurl,data)
13 | cookie.save(ignore_discard=True,ignore_expires=True)
14 | sinaurls='http://my.sina.cn/?vt=4&pos=108&his=0'
15 | result=opener.open(sinaurls)
16 | print(result.read())
--------------------------------------------------------------------------------
/python/.idea/py2/jsons.py:
--------------------------------------------------------------------------------
1 | #转json格式
2 | import xlrd
3 | from collections import OrderedDict
4 | import json
5 | import codecs
6 | wb = xlrd.open_workbook('C:\\Users\\TTT\\Desktop\\杂\\2.xlsx')
7 | convert_list = []
8 | sh = wb.sheet_by_index(0)
9 | title = sh.row_values(0)
10 | for rownum in range(1, sh.nrows):
11 | rowvalue = sh.row_values(rownum)
12 | single = OrderedDict()
13 | for colnum in range(0, len(rowvalue)):
14 | print(title[colnum], rowvalue[colnum])
15 | single[title[colnum]] = rowvalue[colnum]
16 | convert_list.append(single)
17 |
18 | j = json.dumps(convert_list)
19 |
20 | with codecs.open('C:\\Users\\TTT\\Desktop\\杂\\excel.json',"w","utf-8") as f:
21 | f.write(j)
--------------------------------------------------------------------------------
/scrapychinacom/run.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from scrapy.utils.project import get_project_settings
3 | from scrapychinacom.spiders.universal import UniversalSpider
4 | from scrapychinacom.utils import get_config
5 | from scrapy.crawler import CrawlerProcess
6 |
7 | def run():
8 | name = sys.argv[1]
9 | custom_settings = get_config(name)
10 | spider = custom_settings.get('spider', 'universal')
11 | project_settings = get_project_settings()
12 | settings = dict(project_settings.copy())
13 | settings.update(custom_settings.get('settings'))
14 | process = CrawlerProcess(settings)
15 | process.crawl(spider, **{'name': name})
16 | process.start()
17 |
18 |
19 | if __name__ == '__main__':
20 | run()
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/setting.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | # Redis数据库地址
4 | REDIS_HOST = '127.0.0.1'
5 |
6 | # Redis端口
7 | REDIS_PORT = 6379
8 |
9 | # Redis密码,如无填None
10 | REDIS_PASSWORD = None
11 |
12 | REDIS_KEY = 'proxies'
13 |
14 | # 代理分数
15 | MAX_SCORE = 100
16 | MIN_SCORE = 0
17 | INITIAL_SCORE = 10
18 |
19 | VALID_STATUS_CODES = [200, 302]
20 |
21 | # 代理池数量界限
22 | POOL_UPPER_THRESHOLD = 50000
23 |
24 | # 检查周期
25 | TESTER_CYCLE = 20
26 | # 获取周期
27 | GETTER_CYCLE = 300
28 |
29 | # 测试API,建议抓哪个网站测哪个
30 | TEST_URL = 'http://www.baidu.com'
31 |
32 | # API配置
33 | API_HOST = '0.0.0.0'
34 | API_PORT = 5555
35 |
36 | # 开关
37 | TESTER_ENABLED = True
38 | GETTER_ENABLED = True
39 | API_ENABLED = True
40 |
41 | # 最大批测试量
42 | BATCH_TEST_SIZE = 10
--------------------------------------------------------------------------------
/python/.idea/py4/image:
--------------------------------------------------------------------------------
1 | #从学校网站批量抓取图片
2 | import requests
3 | from bs4 import BeautifulSoup as BS
4 | import re
5 | import urllib.request
6 | def gettext(url):
7 | try:
8 | r = requests.get(url, timeout=30)
9 | r.raise_for_status()
10 | r.encoding = 'utf-8'
11 | return r.text
12 | except:
13 | return ""
14 | url = "http://www.hugsmxy.com/"
15 | soup = BS(gettext(url), 'html.parser')
16 | img = (soup.find_all('img', {"src":re.compile('.png')}))
17 | imgurl=list()
18 | i=0
19 | for imgs in img:
20 | imgurl.append(url+(imgs.attrs.get('src')))
21 | print(type(imgurl[i]))
22 | imgreq = requests.get(imgurl[i])
23 | with open(i.__str__()+'.jpg', 'wb') as f:
24 | f.write(imgreq.content)
25 | i=i+1
26 | print("have save")
27 |
--------------------------------------------------------------------------------
/python3pra/.idea/part7/taobao.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from selenium import webdriver
4 | import time
5 | browser=webdriver.Chrome()
6 | browser.get('https://www.taobao.com')
7 | input=browser.find_element_by_id('q')
8 | input.send_keys('iphone')
9 | time.sleep(2)
10 | input.clear()
11 | input.send_keys('ipad')
12 | #button=browser.find_element_by_class_name('btn-search')
13 | #button=browser.find_element_by_css_selector('.btn-search')#css选择器获取
14 | button=browser.find_element_by_xpath('//div[@class="search-button"]/button')#xpath获取
15 | button.click()
16 | '''
17 | #lis=browser.find_elements_by_css_selector('.service-bd li')css选择器获取多个节点
18 | #lis=browser.find_elements_by_xpath('//ul[@class="service-bd"]//li')#xpath获取多个节点
19 | print(lis)
20 | browser.close()
21 | '''
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/setting.py:
--------------------------------------------------------------------------------
1 | # Redis数据库地址
2 | REDIS_HOST = '127.0.0.1'
3 |
4 | # Redis端口
5 | REDIS_PORT = 6379
6 |
7 | # Redis密码,如无填None
8 | REDIS_PASSWORD = None
9 |
10 | REDIS_KEY = 'proxies'
11 |
12 | # 代理分数
13 | MAX_SCORE = 100
14 | MIN_SCORE = 98
15 | INITIAL_SCORE = 99
16 |
17 | #VALID_STATUS_CODES = [200, 302]
18 | VALID_STATUS_CODES = [200]
19 |
20 | # 代理池数量界限
21 | POOL_UPPER_THRESHOLD = 50000
22 |
23 | # 检查周期
24 | TESTER_CYCLE = 20
25 | # 获取周期
26 | GETTER_CYCLE = 300
27 |
28 | # 测试API,建议抓哪个网站测哪个
29 | TEST_URL = 'http://weixin.sogou.com/weixin?type=2&s_from=input&query=NBA'
30 |
31 | # API配置
32 | API_HOST = '127.0.0.1'
33 | API_PORT = 5555
34 |
35 | # 开关
36 | TESTER_ENABLED = True
37 | GETTER_ENABLED = True
38 | API_ENABLED = True
39 |
40 | # 最大批测试量
41 | BATCH_TEST_SIZE = 10
42 |
--------------------------------------------------------------------------------
/python/.idea/study/property.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | class defined:
3 | '''
4 | def __init__(self,size=10):
5 | self.size=size
6 | def get(self):
7 | return self.size
8 | def set(self,value):
9 | self.size=value
10 | def dele(self):
11 | del self.size
12 | x=property(get,set,dele)
13 | '''
14 | def __getattribute__(self, item):
15 | print("gggg")
16 | return super().__getattribute__(item)
17 | def __getattr__(self, item):
18 | print("haha")
19 | def __setattr__(self, key, value):
20 | print('faafa')
21 | return super().__setattr__(key,value)
22 | def __delattr__(self, item):
23 | print('fafafafa')
24 | return super().__delattr__(item)
25 | d=defined()
26 | d.x=1
27 | print(d.x)
28 | #del d.x
--------------------------------------------------------------------------------
/python3pra/.idea/part5/csvs.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import csv
4 | import pandas
5 |
6 | #csv写入读取数据
7 | with open('test.csv', 'w',newline='') as csvfile:
8 |
9 | filename=['id', 'name', 'age']
10 | writer = csv.DictWriter(csvfile,fieldnames=filename)
11 | writer.writeheader()
12 | writer.writerow({'id':'10001','name':'jordan','age':'22'})
13 | with open('test.csv', 'r') as csvfile:
14 | reader=csv.reader(csvfile)
15 | for row in reader:
16 | print(row)
17 | '''
18 | #pandas写入读取数据
19 | da=['id', 'name', 'age']
20 | content=[['10001','jordan','22'],['10002','jorda','21'],['10003','jord','20']]
21 | df=pandas.DataFrame(content,columns=da)
22 | df.to_csv('test.csv',encoding='utf-8')
23 | df=pandas.read_csv('test.csv',encoding='utf-8')
24 | print(df)
25 | '''
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/api.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, g
2 |
3 | from db import RedisClient
4 |
5 | __all__ = ['app']
6 |
7 | app = Flask(__name__)
8 |
9 |
10 | def get_conn():
11 | if not hasattr(g, 'redis'):
12 | g.redis = RedisClient()
13 | return g.redis
14 |
15 |
16 | @app.route('/')
17 | def index():
18 | return 'Welcome to Proxy Pool System
'
19 |
20 |
21 | @app.route('/random')
22 | def get_proxy():
23 | """
24 | Get a proxy
25 | :return: 随机代理
26 | """
27 | conn = get_conn()
28 | return conn.random()
29 |
30 |
31 | @app.route('/count')
32 | def get_counts():
33 | """
34 | Get the count of proxies
35 | :return: 代理池总量
36 | """
37 | conn = get_conn()
38 | return str(conn.count())
39 |
40 |
41 | if __name__ == '__main__':
42 | app.run()
43 |
--------------------------------------------------------------------------------
/python/.idea/study/translate.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #爬取手机百度翻译
3 | import requests
4 | import json
5 | content=0
6 | while True:
7 | content=input('请输入您要翻译的句子\n')
8 | if content!='':
9 | url = "http://fanyi.baidu.com/basetrans"
10 | data = {
11 | "query": content,
12 | "from": "zh",
13 | "to": "en",
14 | }
15 | headers = {
16 | "User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Mobile Safari/537.36",
17 | }
18 | response = requests.post(url,data=data,headers=headers)
19 | html = response.content.decode('unicode-escape')
20 | target = json.loads(html)
21 | print(target['trans'][0]['dst']) # 显示出来unicode的中文
22 | else:
23 | break
24 |
--------------------------------------------------------------------------------
/python/.idea/study/mytimer.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | import time
3 | class Timer:
4 | def __init__(self):
5 | self.ls=0#起始时间
6 | self.lss=0#结束时间
7 | self.strs='还没有开始计时'#单个时间的输出
8 | self.strss=''#两个时间的输出
9 | self.result=0#时间差
10 | def __str__(self,ls,lss):
11 | return self.strs
12 |
13 | __repr__=__str__
14 |
15 | def __add__(self, other):
16 | self.strss='运行时间为'
17 | return self.strss+str(int(self.strs[-1])+int(other.strs[-1]))
18 | def start(self):
19 | self.ls=time.localtime()[5]
20 | self.strs='请调用end方法'
21 | print('计时开始')
22 | def end(self):
23 | print('计时结束')
24 | self.lss=time.localtime()[5]
25 | self.result=self.lss-self.ls
26 | self.strs='运行时间为'+str(self.result)
27 | self.strs='请直接输出对象'
28 | #print(self.strs)
--------------------------------------------------------------------------------
/tutorial/tutorial/spiders/quotes.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import scrapy
3 | from tutorial.items import TutorialItem
4 |
5 |
6 | class QuotesSpider(scrapy.Spider):
7 | name = 'quotes'
8 | allowed_domains = ['quotes.toscrape.com/']
9 | start_urls = ['http://quotes.toscrape.com/']
10 |
11 | def parse(self, response):
12 | quotes=response.css('.quote')
13 | for quote in quotes:
14 | item=TutorialItem()
15 | item['text']=quote.css('.text::text').extract_first()
16 | item['authors']=quote.css('.author::text').extract_first()
17 | item['tags']=quote.css('.tags.tag::text').extract()
18 | yield item
19 |
20 | next=response.css('.pager .next a::attr(href)').extract_first()
21 | url=response.urljoin(next)
22 | yield scrapy.Request(url=url,callback=self.parse)
23 |
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/api.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from flask import Flask, g
4 |
5 | from db import RedisClient
6 |
7 | __all__ = ['app']
8 |
9 | app = Flask(__name__)
10 |
11 |
12 | def get_conn():
13 | if not hasattr(g, 'redis'):
14 | g.redis = RedisClient()
15 | return g.redis
16 |
17 |
18 | @app.route('/')
19 | def index():
20 | return 'Welcome to Proxy Pool System
'
21 |
22 |
23 | @app.route('/random')
24 | def get_proxy():
25 | """
26 | Get a proxy
27 | :return: 随机代理
28 | """
29 | conn = get_conn()
30 | return conn.random()
31 |
32 |
33 | @app.route('/count')
34 | def get_counts():
35 | """
36 | Get the count of proxies
37 | :return: 代理池总量
38 | """
39 | conn = get_conn()
40 | return str(conn.count())
41 |
42 |
43 | if __name__ == '__main__':
44 | app.run()
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/utils.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from requests.exceptions import ConnectionError
3 |
4 | base_headers = {
5 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
6 | 'Accept-Encoding': 'gzip, deflate, sdch',
7 | 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
8 | }
9 |
10 |
11 | def get_page(url, options={}):
12 | """
13 | 抓取代理
14 | :param url:
15 | :param options:
16 | :return:
17 | """
18 | headers = dict(base_headers, **options)
19 | print('正在抓取', url)
20 | try:
21 | response = requests.get(url, headers=headers)
22 | print('抓取成功', url, response.status_code)
23 | if response.status_code == 200:
24 | return response.text
25 | except ConnectionError:
26 | print('抓取失败', url)
27 | return None
28 |
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/links.py:
--------------------------------------------------------------------------------
1 | from selenium import webdriver
2 | from bs4 import BeautifulSoup as BS
3 | import time
4 |
5 | browser=webdriver.Chrome()
6 | def getlink():
7 | """
8 | 拿到每篇文章的链接
9 | :return:
10 | """
11 | url = 'https://www.toutiao.com/ch/nba/'
12 | browser.get(url)
13 | # 设置隐式等待,最多等待10s
14 | browser.implicitly_wait(5)
15 | # 模拟鼠标拖动
16 | for x in range(7):
17 | js = "var q=document.documentElement.scrollTop=" + str(x * 700)
18 | browser.execute_script(js)
19 | time.sleep(2)
20 | time.sleep(5)
21 | #链接数组
22 | links=[]
23 | response = browser.page_source
24 | soup = BS(response, 'lxml')
25 | groups = soup.find_all(class_='link')
26 | for group in groups:
27 | links.append(group.attrs['href'])
28 | return links
29 |
30 | if __name__ == '__main__':
31 | print(getlink())
32 | browser.close()
--------------------------------------------------------------------------------
/python/.idea/study/file.py:
--------------------------------------------------------------------------------
1 | # Created by TTT
2 | #读取一个文档并实现目标分割
3 | def method(count,l1,l2):
4 | name1 = 'l' + str(count) + '.txt'
5 | name2 = 'f' + str(count) + '.txt'
6 | ha = open(name1, 'w', encoding='utf-8')
7 | he = open(name2, 'w', encoding='utf-8')
8 | ha.writelines(l1)
9 | he.writelines(l2)
10 | ha.close()
11 | he.close()
12 | def method2(filename):
13 | l1 = []
14 | l2 = []
15 | count = 1
16 | f = open(filename, 'r', encoding='utf-8')
17 | for line in f:
18 | if line[:6] != '======':
19 | (first, second) = line.split(":", 1)
20 | if first == '你好':
21 | l1.append(second)
22 | if first== '我们':
23 | l2.append(second)
24 | else:
25 | method(count,l1,l2)
26 | l1 = []
27 | l2 = []
28 | count += 1
29 | method(count,l1,l2)
30 | f.close()
31 | method2('haha.txt')
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/utils.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import requests
4 | from requests.exceptions import ConnectionError
5 |
6 | base_headers = {
7 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
8 | 'Accept-Encoding': 'gzip, deflate, sdch',
9 | 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
10 | }
11 |
12 |
13 | def get_page(url, options={}):
14 | """
15 | 抓取代理
16 | :param url:
17 | :param options:
18 | :return:
19 | """
20 | headers = dict(base_headers, **options)
21 | print('正在抓取', url)
22 | try:
23 | response = requests.get(url, headers=headers)
24 | print('抓取成功', url, response.status_code)
25 | if response.status_code == 200:
26 | return response.text
27 | except ConnectionError:
28 | print('抓取失败', url)
29 | return None
30 |
31 |
32 |
--------------------------------------------------------------------------------
/python/.idea/study/myproperty.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | class Myproperty:
3 | def __init__(self,mset=None,mget=None,mdel=None):
4 | self.mset=mset
5 | self.mget=mget
6 | self.mdel=mdel
7 | def __get__(self, instance, owner):
8 | return self.mget(instance)
9 | def __set__(self, instance, value):
10 | self.mset(instance,value)
11 | def __delete__(self, instance):
12 | self.mdel(instance)
13 | class sheshi:
14 | def __init__(self,x=26.0):
15 | self.x=float(x)
16 | def __get__(self, instance, owner):
17 | return self.x
18 | def __set__(self, instance, value):
19 | self.x=float(value)
20 | class huashi:
21 | def __get__(self, instance, owner):
22 | return instance.c*1.8+32
23 | def __set__(self, instance, value):
24 | instance.c=(float(value)-32)/1.8
25 | class Wram:
26 | c=sheshi()
27 | h=huashi()
28 |
29 | w=Wram()
30 | w.c=44
31 | print(w.h)
32 |
33 |
--------------------------------------------------------------------------------
/python3pra/.idea/part10/logingithub2.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from selenium import webdriver
4 | from selenium.webdriver.common.by import By
5 | from selenium.webdriver.common.keys import Keys
6 | from selenium.webdriver.support.wait import WebDriverWait
7 | from selenium.webdriver.support import expected_conditions as EC
8 | from pyquery import PyQuery as pq
9 |
10 | url='https://github.com/login'
11 | githubu='NGUWQ'
12 | githubp='Wq1996122421'
13 | browser=webdriver.Chrome()
14 | wait=WebDriverWait(browser,10)
15 |
16 | browser.get(url)
17 | username=browser.find_element_by_id('login_field')
18 | username.send_keys(githubu)
19 | password=browser.find_element_by_id('password')
20 | password.send_keys(githubp)
21 | submit=wait.until(EC.element_to_be_clickable((By.NAME,'commit')))
22 | submit.click()
23 | html=browser.page_source
24 | doc=pq(html)
25 | items=doc('.news .d-flex .flex-items-baseline').items()
26 | for item in items:
27 | print(item)
28 |
--------------------------------------------------------------------------------
/python/.idea/py1/qipan.py:
--------------------------------------------------------------------------------
1 | #象棋问题,L型(递归)
2 | '''
3 | tr表示棋盘的行位置
4 | tc表示棋盘的列位置
5 | dr表示当前所在棋盘的行位置
6 | dc表示当前所在棋盘的列位置
7 | s表示当前棋盘的尺寸
8 | '''
9 | steps=0
10 | lists=[[]]*4
11 | def move(n,a,b):
12 | global steps
13 | steps+=1
14 | def qipan(tr,tc,dr,dc,s):
15 | if s==1:
16 | return
17 | cut=steps
18 | s=s/2
19 | #左上部分
20 | if dr= POOL_UPPER_THRESHOLD:
17 | return True
18 | else:
19 | return False
20 |
21 | def run(self):
22 | print('获取器开始执行')
23 | if not self.is_over_threshold():
24 | for callback_label in range(self.crawler.__CrawlFuncCount__):
25 | callback = self.crawler.__CrawlFunc__[callback_label]
26 | # 获取代理
27 | proxies = self.crawler.get_proxies(callback)
28 | sys.stdout.flush()
29 | for proxy in proxies:
30 | self.redis.add(proxy)
31 |
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 |
9 | import pymongo
10 |
11 | class MongoPipeline(object):
12 | def __init__(self,mongo_uri,mongo_db):
13 | self.mongo_uri=mongo_uri
14 | self.mongo_db=mongo_db
15 |
16 | @classmethod
17 | def from_crawler(cls,crawler):
18 | return cls(
19 | mongo_uri=crawler.settings.get('MONGO_URI'),
20 | mongo_db=crawler.settings.get('MONGO_DB')
21 | )
22 |
23 |
24 | def open_spider(self,spider):
25 | self.client=pymongo.MongoClient(self.mongo_uri)
26 | self.db=self.client[self.mongo_db]
27 |
28 | def process_item(self,item,spider):
29 | self.db[item.collection].insert(dict(item))
30 | return item
31 |
32 | def close_spider(self,spider):
33 | self.client.close()
--------------------------------------------------------------------------------
/scrapyseleniumtest/scrapyseleniumtest/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 | import pymongo
8 |
9 | class MongoPipeline(object):
10 | def __init__(self,mongo_uri,mongo_db):
11 | self.mongo_uri=mongo_uri
12 | self.mongo_db=mongo_db
13 |
14 | @classmethod
15 | def from_crawler(cls,crawler):
16 | return cls(
17 | mongo_uri=crawler.settings.get('MONGO_URI'),
18 | mongo_db=crawler.settings.get('MONGO_DB')
19 | )
20 |
21 |
22 | def open_spider(self,spider):
23 | self.client=pymongo.MongoClient(self.mongo_uri)
24 | self.db=self.client[self.mongo_db]
25 |
26 | def process_item(self,item,spider):
27 | self.db[item.collection].insert(dict(item))
28 | return item
29 |
30 | def close_spider(self,spider):
31 | self.client.close()
32 |
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 |
9 | import pymongo
10 |
11 | class MongoPipeline(object):
12 | def __init__(self,mongo_uri,mongo_db):
13 | self.mongo_uri=mongo_uri
14 | self.mongo_db=mongo_db
15 |
16 | @classmethod
17 | def from_crawler(cls,crawler):
18 | return cls(
19 | mongo_uri=crawler.settings.get('MONGO_URI'),
20 | mongo_db=crawler.settings.get('MONGO_DB')
21 | )
22 |
23 |
24 | def open_spider(self,spider):
25 | self.client=pymongo.MongoClient(self.mongo_uri)
26 | self.db=self.client[self.mongo_db]
27 |
28 | def process_item(self,item,spider):
29 | self.db[item.collection].insert(dict(item))
30 | return item
31 |
32 | def close_spider(self,spider):
33 | self.client.close()
34 |
--------------------------------------------------------------------------------
/scrapychinacom/scrapychinacom/spiders/china.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import scrapy
3 | from scrapy.linkextractors import LinkExtractor
4 | from scrapy.spiders import CrawlSpider, Rule
5 | from scrapychinacom.loaders import ChinaLoader
6 | from scrapychinacom.items import NewsItem
7 |
8 | class ChinaSpider(CrawlSpider):
9 | name = 'china'
10 | allowed_domains = ['tech.china.com']
11 | start_urls = ['http://tech.china.com/articles/']
12 |
13 | def parse_item(self, response):
14 | loader=ChinaLoader(item=NewsItem(),response=response)
15 | loader.add_xpath('title','//h1[@id="chan_newsTitle"]/text()')
16 | loader.add_value('url',response.url)
17 | loader.add_xpath('text','//div[@id="chan_newsDetail"]//text()')
18 | loader.add_xpath('datetime','//div[@id="chan_newsInfo"]/text()',re='(\d+-\d+-\d+\s\d+:\d+:\d+)')
19 | loader.add_xpath('source','//div[@id="chan_newsInfo"]/text()',re='来源:(.*)')
20 | loader.add_value('website','中华网')
21 | yield loader.load_item()
22 |
--------------------------------------------------------------------------------
/scrapysplashtest/scrapysplashtest/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 |
9 | import pymongo
10 |
11 | class MongoPipeline(object):
12 | def __init__(self,mongo_uri,mongo_db):
13 | self.mongo_uri=mongo_uri
14 | self.mongo_db=mongo_db
15 |
16 | @classmethod
17 | def from_crawler(cls,crawler):
18 | return cls(
19 | mongo_uri=crawler.settings.get('MONGO_URI'),
20 | mongo_db=crawler.settings.get('MONGO_DB')
21 | )
22 |
23 |
24 | def open_spider(self,spider):
25 | self.client=pymongo.MongoClient(self.mongo_uri)
26 | self.db=self.client[self.mongo_db]
27 |
28 | def process_item(self,item,spider):
29 | self.db[item.collection].insert(dict(item))
30 | return item
31 |
32 | def close_spider(self,spider):
33 | self.client.close()
34 |
--------------------------------------------------------------------------------
/douban/douban/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 | import pymongo
9 |
10 | class MoviePipeline(object):
11 |
12 | def __init__(self,mongo_uri,mongo_db):
13 | self.mongo_uri=mongo_uri
14 | self.mongo_db=mongo_db
15 |
16 |
17 | @classmethod
18 | def from_crawler(cls,crawler):
19 |
20 | return cls(
21 | mongo_uri=crawler.settings.get('MONGO_URI'),
22 | mongo_db=crawler.settings.get('MONGO_DB')
23 | )
24 |
25 | def open_spider(self,spider):
26 | self.client=pymongo.MongoClient(self.mongo_uri)
27 | self.db=self.client[self.mongo_db]
28 |
29 | def process_item(self,item,spider):
30 | self.db[item.collection].insert(dict(item))
31 | return item
32 |
33 | def close_spider(self,spider):
34 | self.client.close()
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/getter.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from tester import Tester
4 | from db import RedisClient
5 | from crawler import Crawler
6 | from setting import *
7 | import sys
8 |
9 | class Getter():
10 | def __init__(self):
11 | self.redis = RedisClient()
12 | self.crawler = Crawler()
13 |
14 | def is_over_threshold(self):
15 | """
16 | 判断是否达到了代理池限制
17 | """
18 | if self.redis.count() >= POOL_UPPER_THRESHOLD:
19 | return True
20 | else:
21 | return False
22 |
23 | def run(self):
24 | print('获取器开始执行')
25 | if not self.is_over_threshold():
26 | for callback_label in range(self.crawler.__CrawlFuncCount__):
27 | callback = self.crawler.__CrawlFunc__[callback_label]
28 | # 获取代理
29 | proxies = self.crawler.get_proxies(callback)
30 | sys.stdout.flush()
31 | for proxy in proxies:
32 | self.redis.add(proxy)
--------------------------------------------------------------------------------
/python/.idea/py4/rank.py:
--------------------------------------------------------------------------------
1 | #获取大学排名
2 | import requests
3 | from bs4 import BeautifulSoup
4 | allUniv=[]
5 | def gettext(url):
6 | try:
7 | r=requests.get(url,timeout=30)
8 | r.raise_for_status()
9 | r.encoding='utf-8'
10 | return r.text
11 | except:
12 | return ""
13 | def fill(soup):
14 | data=soup.find_all('tr')
15 | for tr in data:
16 | ltd=tr.find_all('td')
17 | if len(ltd)==0:
18 | continue
19 | single=[]
20 | for td in ltd:
21 | single.append(td.string)
22 | allUniv.append(single)
23 | def printss(num):
24 | print("{:^4}{:^10}{:^5}{:^8}{:^10}".format("排名","学校名称","省市","总分","培养规模"))
25 | for i in range(num):
26 | u=allUniv[i]
27 | print("{:^4}{:^10}{:^5}{:^8}{:^10}".format(u[0],u[1],u[2],u[3],u[6]))
28 | def main(num):
29 | url='http://www.zuihaodaxue.cn/zuihaodaxuepaiming2016.html'
30 | html=gettext(url)
31 | soup=BeautifulSoup(html,"html.parser")
32 | fill(soup)
33 | printss(num)
34 | main(310)
--------------------------------------------------------------------------------
/python3pra/.idea/part7/seleniume.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from selenium import webdriver
4 | from selenium.webdriver.common.by import By
5 | from selenium.webdriver.common.keys import Keys
6 | from selenium.webdriver.support import expected_conditions as EC
7 | from selenium.webdriver.support.wait import WebDriverWait
8 |
9 | browser = webdriver.Chrome() # 浏览器对象初始化
10 | try:
11 | browser.get('https://www.cnblogs.com/101718qiong/') # 请求网页
12 | input = browser.find_element_by_id('kw') # 根据id值来获取搜索节点
13 | input.send_keys('Python') # 节点交互传入搜索值
14 | input.send_keys(Keys.ENTER) # 传入enter键
15 | wait = WebDriverWait(browser, 10) # 最长等待时间
16 | ouput = wait.until(EC.presence_of_element_located((By.ID, 'content_left'))) # 10秒内该id节点是否成功加载出来
17 | other = wait.until(EC.title_is(u'Silence&QH - 博客园'))
18 | print(browser.current_url) # 打印当前url
19 | print(ouput)
20 | print(other)
21 | # print(browser.get_cookies())#daycookies
22 | # print(browser.page_source) # 打印源代码
23 | finally:
24 | browser.close()
25 |
--------------------------------------------------------------------------------
/Images360II/images360/spiders/images.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from scrapy import Spider, Request
3 | from urllib.parse import urlencode
4 | import json
5 |
6 | from images360.items import ImageItem
7 |
8 |
9 | class ImagesSpider(Spider):
10 | name = 'images'
11 | allowed_domains = ['images.so.com']
12 | start_urls = ['http://images.so.com/']
13 |
14 |
15 | def start_requests(self):
16 | data = {'ch': 'photography', 'listtype': 'new'}
17 | base_url = 'https://image.so.com/zj?'
18 | for page in range(1, self.settings.get('MAX_PAGE') + 1):
19 | data['sn'] = page * 30
20 | params = urlencode(data)
21 | url = base_url + params
22 | yield Request(url, self.parse)
23 |
24 | def parse(self, response):
25 | result = json.loads(response.text)
26 | for image in result.get('list'):
27 | item = ImageItem()
28 | item['id'] = image.get('imageid')
29 | item['url'] = image.get('qhimg_url')
30 | item['title'] = image.get('group_title')
31 | item['thumb'] = image.get('qhimg_thumb_url')
32 | yield item
33 |
--------------------------------------------------------------------------------
/python/.idea/py4/rank1.py:
--------------------------------------------------------------------------------
1 | from bs4 import BeautifulSoup
2 | import requests
3 | import bs4
4 |
5 | def getHTMLText(url):
6 | try:
7 | r=requests.get(url,timeout=30)
8 | r.raise_for_status()
9 | r.encoding=r.apparent_encoding
10 | return r.text
11 | except:
12 | return ""
13 |
14 | def fillUniversityList(UList,text):
15 | soup=BeautifulSoup(text,'html.parser')
16 | for tr in soup.find("tbody").children:
17 | #tr必须是一个标签的内容,每一行tr表示一个大学信息,用td标签对隔开大学信息
18 | if isinstance(tr,bs4.element.Tag):
19 | tdlist=tr.find_all("td")
20 | UList.append([tdlist[0].string,tdlist[1].string,tdlist[3].string])
21 |
22 | def printUniversityList(UList):
23 | #使3列数据居中显示,域宽为10
24 | demo="{0:^10}{1:{3}^10}{2:^10}"
25 | #chr(12288)采用中文的空格填充
26 | print(demo.format("排名:","大学:","分数:",chr(12288)))
27 | for info in UList:
28 | print(demo.format(info[0],info[1],info[2],chr(12288)))
29 |
30 |
31 | def main():
32 | UInfo=[]
33 | html=getHTMLText("http://www.zuihaodaxue.cn/zuihaodaxuepaiming2017.html")
34 | fillUniversityList(UInfo,html)
35 | printUniversityList(UInfo)
36 |
37 | main()
--------------------------------------------------------------------------------
/python/.idea/py3/leida.py:
--------------------------------------------------------------------------------
1 | #雷达分析图
2 | # encoding: utf-8
3 | import pandas as pd
4 | import numpy as np
5 | import matplotlib.pyplot as plt
6 | plt.rcParams['font.sans-serif'] = ['KaiTi'] # 显示中文
7 | labels = np.array([u'得分', u'篮板', u'助攻',u'抢断',u'盖帽',u'失误']) # 标签
8 | dataLenth = 6 # 数据长度
9 | data = np.array([[27.5,8.6,9.1,1.41,0.87,4.23],
10 | [26.4,8.6,8.7,1.24,0.59,4.09],
11 | [25.3,7.4,6.8,1.37,0.64,3.28],
12 | [25.3,6.0,7.4,1.58,0.71,3.94],
13 | [27.1,6.9,6.3,1.57,0.34,3.51],
14 | [26.8,8.0,7.2,1.7,0.88,2.97]]) # 数据
15 | angles = np.linspace(0, 2*np.pi, dataLenth, endpoint=False) # 分割圆周长
16 | data = np.concatenate((data, [data[0]])) # 闭合
17 | angles = np.concatenate((angles, [angles[0]])) # 闭合
18 | fig=plt.figure(facecolor="white")
19 | plt.subplot(111,polar=True)
20 | plt.plot(angles,data,'bo-',color='gray',linewidth=1,alpha=0.2)
21 | plt.plot(angles,data,'o-',linewidth=1.5,alpha=0.2)
22 | plt.fill(angles, data, alpha=0.25)# 填充
23 | plt.thetagrids(angles * 180/np.pi, labels,frac=1.2) # 做标签
24 | plt.figtext(0.52,0.95,'勒布朗詹姆斯各项数据分析',ha='center',size=20)
25 | legend=plt.legend(labels,loc=(0.94,0.80),labelspacing=0.1)
26 | #plt.step(legend.get_texts(),fontsize='small')
27 | plt.grid(True)
28 | plt.savefig('james.jpg')
29 | plt.show()
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/mysql.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import pymysql
4 | from config import *
5 |
6 |
7 | class MySQL():
8 | def __init__(self, host=MYSQL_HOST, username=MYSQL_USER, password=MYSQL_PASSWORD, port=MYSQL_PORT,
9 | database=MYSQL_DATABASE):
10 | """
11 | MySQL初始化
12 | :param host:
13 | :param username:
14 | :param password:
15 | :param port:
16 | :param database:
17 | """
18 | try:
19 | self.db = pymysql.connect(host, username, password, database, charset='utf8', port=port)
20 | self.cursor = self.db.cursor()
21 | except pymysql.MySQLError as e:
22 | print(e.args)
23 |
24 | def insert(self, table, data):
25 | """
26 | 插入数据
27 | :param table:
28 | :param data:
29 | :return:
30 | """
31 | keys = ', '.join(data.keys())
32 | values = ', '.join(['%s'] * len(data))
33 | sql_query = 'insert into %s (%s) values (%s)' % (table, keys, values)
34 | try:
35 | self.cursor.execute(sql_query, tuple(data.values()))
36 | self.db.commit()
37 | except pymysql.MySQLError as e:
38 | print(e.args)
39 | self.db.rollback()
--------------------------------------------------------------------------------
/python/.idea/pachong1/cookie.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | import urllib.request
3 | import http.cookiejar as c
4 | '''
5 | 3.从文件中获取Cookie并访问
6 | filename='mycookie.txt'
7 | url='http://www.baidu.com'
8 | cookie=c.MozillaCookieJar()
9 | cookie.load(filename,ignore_expires=True,ignore_discard=True)
10 | request=urllib.request.Request(url)
11 | opener=urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie))
12 | response=opener.open(request)
13 | print(response.read())
14 | '''
15 | '''
16 | 2.保存cookie到文件
17 | filename='mycookie.txt'
18 | cookie=c.MozillaCookieJar(filename)
19 | handler=urllib.request.HTTPCookieProcessor(cookie)
20 | openner=urllib.request.build_opener(handler)
21 | url='http://www.baidu.com'
22 | request=urllib.request.Request(url)
23 | response=openner.open(request)
24 | cookie.save(ignore_discard=True,ignore_expires=True)
25 | '''
26 | '''
27 | 1.获取cookie保存到变量
28 | #声明一个CookieJar对象实例来保存cookie
29 | cookie=c.CookieJar()
30 | #利用urllib.request库中的HTTPCookieProcessor对象来创建cookie处理器
31 | handler=urllib.request.HTTPCookieProcessor(cookie)
32 | #通过handler来构建opener
33 | opener=urllib.request.build_opener(handler)
34 | url='http://www.baidu.com'
35 | request=urllib.request.Request(url)
36 | response=opener.open(request)
37 | for item in cookie:
38 | print('Name='+item.name)
39 | print('Value'+item.value)
40 | '''
--------------------------------------------------------------------------------
/python/.idea/study/pachong.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | import urllib.request
3 | import re
4 | import random
5 | import requests
6 | def daili(url):
7 | headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
8 | req=urllib.request.Request(url=url,headers=headers)
9 | iplist=['212.8.252.106:1080','61.135.217.7:80','118.190.95.43:9001','110.72.193.161:8123']
10 | proxy_support=urllib.request.ProxyHandler({'http':random.choice(iplist)})
11 | openner=urllib.request.build_opener(proxy_support)
12 | urllib.request.install_opener(openner)
13 | return req
14 |
15 | def pachong():
16 | url='http://www.hugsmxy.com/'
17 | request=daili(url)
18 | reponse=urllib.request.urlopen(request)
19 | content = reponse.read().decode('utf-8')
20 | pattern = re.compile('.png')
21 | img=re.findall(pattern,content)
22 | imgurl=list()
23 | i=0
24 | for imgs in img:
25 | #imgurl.append(url+(imgs.attrs.get('src')))
26 | #print(i.__str__()+'.jpg')
27 | #imgreq=requests.get(imgurl[i])
28 | with open(i.__str__()+'.jpg','w') as f:
29 | #f.write(imgreq.content)
30 | f.write(imgs)
31 | i+=1
32 | if i>5:
33 | break
34 | if __name__=='__main__':
35 | pachong()
36 |
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/ghostdriver.log:
--------------------------------------------------------------------------------
1 | [INFO - 2018-08-02T13:04:04.967Z] GhostDriver - Main - running on port 50077
2 | [INFO - 2018-08-02T13:04:07.069Z] Session [89f31a70-9654-11e8-8c99-6b7bcc14c448] - page.settings - {"XSSAuditingEnabled":false,"javascriptCanCloseWindows":true,"javascriptCanOpenWindows":true,"javascriptEnabled":true,"loadImages":true,"localToRemoteUrlAccessEnabled":false,"userAgent":"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/538.1 (KHTML, like Gecko) PhantomJS/2.1.1 Safari/538.1","webSecurityEnabled":true}
3 | [INFO - 2018-08-02T13:04:07.069Z] Session [89f31a70-9654-11e8-8c99-6b7bcc14c448] - page.customHeaders: - {}
4 | [INFO - 2018-08-02T13:04:07.070Z] Session [89f31a70-9654-11e8-8c99-6b7bcc14c448] - Session.negotiatedCapabilities - {"browserName":"phantomjs","version":"2.1.1","driverName":"ghostdriver","driverVersion":"1.2.0","platform":"windows-10-32bit","javascriptEnabled":true,"takesScreenshot":true,"handlesAlerts":false,"databaseEnabled":false,"locationContextEnabled":false,"applicationCacheEnabled":false,"browserConnectionEnabled":false,"cssSelectorsEnabled":true,"webStorageEnabled":false,"rotatable":false,"acceptSslCerts":false,"nativeEvents":true,"proxy":{"proxyType":"direct"}}
5 | [INFO - 2018-08-02T13:04:07.070Z] SessionManagerReqHand - _postNewSessionCommand - New Session Created: 89f31a70-9654-11e8-8c99-6b7bcc14c448
6 |
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/spiders/toutiao.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from scrapy import Spider,Request
3 | from scrapytoutiao.links import getlink
4 | from scrapytoutiao.items import NBAItem
5 |
6 | class ToutiaoSpider(Spider):
7 | name = 'toutiao'
8 | allowed_domains = ['www.toutiao.com']
9 | base_urls = 'https://www.toutiao.com'
10 |
11 | def start_requests(self):
12 | links=getlink()
13 | for link in links:
14 | url=self.base_urls+link
15 | yield Request(url=url,callback=self.parse,dont_filter=True)
16 |
17 | def parse(self, response):
18 | try:
19 | item=NBAItem()
20 | item['title']=response.xpath('//div[@class="article-box"]//h1[contains(@class,"article-title")]//text()').extract_first()
21 | item['source']=response.xpath('//div[@class="article-box"]//div[contains(@class,"article-sub")]//text()').re('[\u4e00-\u9fa5_a-zA-Z]+')[-1]#匹配中文和字母
22 | item['datetime']=response.xpath('//div[@class="article-box"]//div[contains(@class,"article-sub")]//text()').re_first('(\d+-\d+-\d+\s\d+:\d+:\d+)')#匹配时间
23 | item['content']=''.join(response.xpath('//div[@class="article-box"]//div[contains(@class,"article-content")]//text()').extract()).strip()
24 | yield item
25 | except:
26 | print('与规则不符,抓取失败')
27 |
--------------------------------------------------------------------------------
/scrapytoutiao/scrapytoutiao/spiders/ghostdriver.log:
--------------------------------------------------------------------------------
1 | [INFO - 2018-08-02T02:50:19.717Z] GhostDriver - Main - running on port 64482
2 | [INFO - 2018-08-02T02:50:21.779Z] Session [cc5dc7d0-95fe-11e8-81c6-4f1ac8007243] - page.settings - {"XSSAuditingEnabled":false,"javascriptCanCloseWindows":true,"javascriptCanOpenWindows":true,"javascriptEnabled":true,"loadImages":true,"localToRemoteUrlAccessEnabled":false,"userAgent":"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/538.1 (KHTML, like Gecko) PhantomJS/2.1.1 Safari/538.1","webSecurityEnabled":true}
3 | [INFO - 2018-08-02T02:50:21.779Z] Session [cc5dc7d0-95fe-11e8-81c6-4f1ac8007243] - page.customHeaders: - {}
4 | [INFO - 2018-08-02T02:50:21.779Z] Session [cc5dc7d0-95fe-11e8-81c6-4f1ac8007243] - Session.negotiatedCapabilities - {"browserName":"phantomjs","version":"2.1.1","driverName":"ghostdriver","driverVersion":"1.2.0","platform":"windows-10-32bit","javascriptEnabled":true,"takesScreenshot":true,"handlesAlerts":false,"databaseEnabled":false,"locationContextEnabled":false,"applicationCacheEnabled":false,"browserConnectionEnabled":false,"cssSelectorsEnabled":true,"webStorageEnabled":false,"rotatable":false,"acceptSslCerts":false,"nativeEvents":true,"proxy":{"proxyType":"direct"}}
5 | [INFO - 2018-08-02T02:50:21.779Z] SessionManagerReqHand - _postNewSessionCommand - New Session Created: cc5dc7d0-95fe-11e8-81c6-4f1ac8007243
6 |
--------------------------------------------------------------------------------
/python/.idea/py1/love.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 | import requests
3 | import itchat
4 | import time
5 |
6 | def get_news():
7 | url = "http://open.iciba.com/dsapi"
8 | r = requests.get(url)
9 | contents = r.json()['content']
10 | translation = r.json()['translation']
11 | return contents, translation
12 |
13 | def send_news():
14 | try:
15 | # 登陆你的微信账号,会弹出网页二维码,扫描即可
16 | itchat.auto_login(hotReload=True)
17 | # 获取你对应的好友备注
18 | # 改成你最心爱的人的名字。
19 | my_friend = itchat.search_friends(name=u'骑摩托的糖')
20 | # 获取对应名称的一串数字
21 | TTT = my_friend[0]["UserName"]
22 | # 获取金山字典的内容
23 | message1 = str(get_news()[0])
24 | content = str(get_news()[1][17:])
25 | message2 = str(content)
26 | message3 = "来自你最爱的人"
27 | # 发送消息
28 | itchat.send(message1, toUserName=TTT)
29 | itchat.send(message2, toUserName=TTT)
30 | itchat.send(message3, toUserName=TTT)
31 | # 每86400秒(1天),发送1次,
32 | # 不用linux的定时任务是因为每次登陆都需要扫描二维码登陆,
33 | # 很麻烦的一件事,就让他一直挂着吧
34 | # t = time(86400, send_news())
35 | # t.start()
36 | except:
37 | message4 = u"今天最爱你的人出现了 bug /(ㄒoㄒ)/~~"
38 | itchat.send(message4, toUserName=XiaoMing)
39 |
40 | def main():
41 | send_news()
42 |
43 | if __name__ == '__main__':
44 | main()
--------------------------------------------------------------------------------
/python3pra/.idea/part5/zhihu.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import requests
4 | import pyquery
5 | from requests.exceptions import RequestException
6 | def getonepage(url):
7 | try:
8 | headers = {
9 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
10 | 'Referer': 'https://www.zhihu.com/'
11 | }
12 | html=requests.get(url,headers=headers)
13 | if html.status_code==200:
14 | return html.text
15 | return None
16 | except RequestException:
17 | return None
18 |
19 | def parseonepage(html):
20 | doc=pyquery.PyQuery(html)
21 | items=doc('.explore-tab .feed-item').items()
22 | for item in items:
23 | question=item.find('h2').text()
24 | author=item.find('.author-link').text()
25 | answer=pyquery.PyQuery(item.find('.content').html()).text()
26 | print(question)
27 | '''
28 | with open('知乎热门回答.txt','a',encoding='utf-8') as f:
29 | f.write('\n'.join([question,author,answer]))
30 | f.write('\n'+'='*50+'\n')
31 | '''
32 |
33 | def main():
34 | url='https://www.zhihu.com/explore#daily-hot'
35 | html=getonepage(url)
36 | parseonepage(html)
37 |
38 | if __name__ == '__main__':
39 | main()
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/weixin.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from selenium import webdriver
4 | from pyquery import PyQuery as pq
5 | from bs4 import BeautifulSoup as BS
6 | from lxml import etree
7 | import requests
8 |
9 | base_url = 'http://weixin.sogou.com/weixin?type=2&s_from=input&query='
10 | browser=webdriver.Chrome()
11 | keyword = 'NBA'
12 |
13 |
14 | def getsougouindex():
15 | start_url = base_url +keyword
16 | browser.get(start_url)
17 |
18 | def getarticlehref():
19 | response=browser.page_source
20 | html=etree.HTML(response)
21 | results=html.xpath('//h3/a/@href')
22 | for result in results:
23 | response=requests.get(result)
24 | doc=etree.HTML(response.text)
25 | items=doc.xpath('//div[@class="rich_media_content "]/p/text()')
26 | print(items)
27 | print('---------------------------')
28 | '''
29 | soup=BS(response,'lxml')
30 | href=soup.find_all('h3 a')
31 | print(href)
32 |
33 | items = doc('.news-box .news-list .txt-box').items()
34 | for item in items:
35 | doc=pq(item)
36 | items=doc('h3').items()
37 | for item in items:
38 | print(item)
39 |
40 | url = item.attr('href')
41 | print(url)
42 | '''
43 |
44 |
45 |
46 | if __name__ == '__main__':
47 | getsougouindex()
48 | getarticlehref()
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool/scheduler.py:
--------------------------------------------------------------------------------
1 | import time
2 | from multiprocessing import Process
3 | from api import app
4 | from getter import Getter
5 | from tester import Tester
6 | from db import RedisClient
7 | from setting import *
8 |
9 |
10 | class Scheduler():
11 | def schedule_tester(self, cycle=TESTER_CYCLE):
12 | """
13 | 定时测试代理
14 | """
15 | tester = Tester()
16 | while True:
17 | print('测试器开始运行')
18 | tester.run()
19 | time.sleep(cycle)
20 |
21 | def schedule_getter(self, cycle=GETTER_CYCLE):
22 | """
23 | 定时获取代理
24 | """
25 | getter = Getter()
26 | while True:
27 | print('开始抓取代理')
28 | getter.run()
29 | time.sleep(cycle)
30 |
31 | def schedule_api(self):
32 | """
33 | 开启API
34 | """
35 | app.run(API_HOST, API_PORT)
36 |
37 | def run(self):
38 | print('代理池开始运行')
39 |
40 | if TESTER_ENABLED:
41 | tester_process = Process(target=self.schedule_tester)
42 | tester_process.start()
43 |
44 | if GETTER_ENABLED:
45 | getter_process = Process(target=self.schedule_getter)
46 | getter_process.start()
47 |
48 | if API_ENABLED:
49 | api_process = Process(target=self.schedule_api)
50 | api_process.start()
51 |
--------------------------------------------------------------------------------
/python/.idea/study/regix.py:
--------------------------------------------------------------------------------
1 | #Created by TTT
2 | #用正则表达式抓取ip地址
3 | import urllib.request
4 | import urllib
5 | import re
6 | import time
7 | import random
8 | import os
9 | import sys
10 | sys.path.append('.idea/study/jiandan.py')
11 | import jiandan
12 | jiandan.url_open()
13 | #抓取代理ip
14 | def ipget(folder='iplist'):
15 | os.mkdir(folder)
16 | os.chdir(folder)
17 | ip_list = []
18 | url = 'http://www.xicidaili.com/nn/'
19 | headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
20 | request = urllib.request.Request(url=url, headers=headers)
21 | reponse = urllib.request.urlopen(request)
22 | content = reponse.read().decode('utf-8')
23 | pattern = re.compile('| (\d.*?) | ') # 截取与 | 之间第一个数为数字的内容
24 | ip_page = re.findall(pattern, str(content))
25 | ip_list.extend(ip_page)
26 | time.sleep(random.choice(range(1, 3)))
27 | ip_list=ip_list[::4]
28 | save(ip_list)
29 | def save(ip_list):
30 | for each in ip_list:
31 | #print(each)
32 | with open('ipget.txt','a') as f:#这里写文件要用到的模式为'a'意为写入一行后追加在后面,用'w'只有一行数据
33 | f.writelines(each+'\n')
34 | if __name__=='__main__':
35 | ipget()
36 | '''
37 | print('代理IP地址 ','\t','端口','\t','速度','\t','验证时间')
38 | for i in range(0,len(ip_list),4):
39 | print(ip_list[i],' ','\t',ip_list[i+1],'\t',ip_list[i+2],'\t',ip_list[i+3])
40 | '''
--------------------------------------------------------------------------------
/python3pra/.idea/part3/userequests.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | import requests
4 | import socket
5 |
6 | proxies={'https':'203.130.46.108:9090','http':'222.41.154.119:61202'}
7 | headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
8 | '(KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
9 | 'host':'www.zhihu.com',
10 | 'referer': 'https://www.zhihu.com/',
11 | 'cookie': 'd_c0="AEDmtvylrw2PTjgGcgn0aB7ekvQa7I2PGGk=|1527905863"; _zap=a636d264-dc6f-4081-8720-a682748ed85f; z_c0=Mi4xYUluWUFRQUFBQUFBUU9hMl9LV3ZEUmNBQUFCaEFsVk5hMHpfV3dCcTZUTzh4ZGF3X2J6c1JJRVdHVXQxaF9SN1NR|1527905899|1d825851309e6376126ca10dee0186c5fd61c971; q_c1=6e7afb7ad9c44fa7a2cc151b8f6eb7cf|1530753153000|1527905863000; __utmv=51854390.100--|2=registration_date=20150713=1^3=entry_date=20150713=1; __utma=51854390.1924231716.1530753171.1531118074.1531638088.5; __utmz=51854390.1531638088.5.5.utmcsr=zhihu.com|utmccn=(referral)|utmcmd=referral|utmcct=/collections/mine; _xsrf=2771115d-2aca-42ef-8116-e671edf4a8c2; tgw_l7_route=4902c7c12bebebe28366186aba4ffcde'}
12 |
13 | url='https://www.zhihu.com'
14 | r=requests.get(url,headers=headers,proxies=proxies)
15 | print(r.text)
16 | #print(r.headers)
17 | '''
18 | print(socket.gethostbyname(socket.gethostname()))
19 |
20 | pattern=re.compile('explore-feed.*?question_link.*?>(.*?)',re.S)
21 | tittle=re.findall(pattern,r.text)
22 | print(tittle)
23 | '''
--------------------------------------------------------------------------------
/python3pra/.idea/proxypool1/scheduler.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | #调度模块
4 | import time
5 | from multiprocessing import Process
6 | from api import app
7 | from getter import Getter
8 | from tester import Tester
9 | from db import RedisClient
10 | from setting import *
11 |
12 |
13 | class Scheduler():
14 | def schedule_tester(self, cycle=TESTER_CYCLE):
15 | """
16 | 定时测试代理
17 | """
18 | tester = Tester()
19 | while True:
20 | print('测试器开始运行')
21 | tester.run()
22 | time.sleep(cycle)
23 |
24 | def schedule_getter(self, cycle=GETTER_CYCLE):
25 | """
26 | 定时获取代理
27 | """
28 | getter = Getter()
29 | while True:
30 | print('开始抓取代理')
31 | getter.run()
32 | time.sleep(cycle)
33 |
34 | def schedule_api(self):
35 | """
36 | 开启API
37 | """
38 | app.run(API_HOST, API_PORT)
39 |
40 | def run(self):
41 | print('代理池开始运行')
42 |
43 | if TESTER_ENABLED:
44 | tester_process = Process(target=self.schedule_tester)
45 | tester_process.start()
46 |
47 | if GETTER_ENABLED:
48 | getter_process = Process(target=self.schedule_getter)
49 | getter_process.start()
50 |
51 | if API_ENABLED:
52 | api_process = Process(target=self.schedule_api)
53 | api_process.start()
54 |
--------------------------------------------------------------------------------
/python3pra/.idea/part7/ceshi.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from selenium import webdriver
4 | from selenium.webdriver.support.wait import WebDriverWait
5 | from selenium.webdriver.common.by import By
6 | from selenium.webdriver.support import expected_conditions as EC
7 | from selenium.webdriver.common.keys import Keys
8 | from urllib.parse import quote
9 | from pyquery import PyQuery as pq
10 | from hashlib import md5
11 | import os
12 | import requests
13 |
14 | #chrome_options=webdriver.ChromeOptions()#创建ChromeOptions对象
15 | #chrome_options.add_argument('--headless')#添加headless参数
16 | browser=webdriver.Chrome()
17 | wait=WebDriverWait(browser,10)
18 | KEYWORD='街拍'
19 | browser.get('https://www.toutiao.com/')
20 | input=wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#rightModule div.tt-input >input')))
21 | submit=wait.until(EC.element_to_be_clickable((By.CLASS_NAME,'tt-button')))#第一种点击
22 | input.clear()
23 | input.send_keys(quote(KEYWORD))#传入关键词
24 | #input.send_keys(Keys.ENTER)#第二种点击
25 | now_handle=browser.current_window_handle#定位到当前页面
26 | submit.click()#点击确定
27 | '''
28 | baseurl='https://www.toutiao.com/a'
29 | browser.get(baseurl+'6580500676835541511/')
30 | wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'.imageList')))
31 | html=browser.page_source
32 | doc=pq(html)
33 | items=doc('.imageList .image-list').items()
34 | for item in items:
35 | for i in item.find('.image-item .image-item-inner').items():
36 | print(i.children().attr('data-src'))
37 | '''
--------------------------------------------------------------------------------
/python3pra/.idea/weixingongzhonghao/db.py:
--------------------------------------------------------------------------------
1 | __author__ = 'WQ'
2 | # *_*coding:utf-8 *_*
3 | from redis import StrictRedis
4 | from config import *
5 | from pickle import dumps, loads
6 | from request import WeixinRequest
7 |
8 |
9 | class RedisQueue():
10 | def __init__(self):
11 | """
12 | 初始化Redis
13 | """
14 | self.db = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
15 |
16 | def add(self, request):
17 | """
18 | 向队列添加序列化后的Request
19 | :param request: 请求对象
20 | :param fail_time: 失败次数
21 | :return: 添加结果
22 | """
23 | if isinstance(request, WeixinRequest):
24 | return self.db.rpush(REDIS_KEY, dumps(request))
25 | return False
26 |
27 | def pop(self):
28 | """
29 | 取出下一个Request并反序列化
30 | :return: Request or None
31 | """
32 | if self.db.llen(REDIS_KEY):
33 | return loads(self.db.lpop(REDIS_KEY))
34 | else:
35 | return False
36 |
37 | def clear(self):
38 | self.db.delete(REDIS_KEY)
39 |
40 | def empty(self):
41 | return self.db.llen(REDIS_KEY) == 0
42 |
43 |
44 | if __name__ == '__main__':
45 | db = RedisQueue()
46 | start_url = 'http://www.baidu.com'
47 | weixin_request = WeixinRequest(url=start_url, callback='hello', need_proxy=True)
48 | db.add(weixin_request)
49 | request = db.pop()
50 | print(request)
51 | print(request.callback, request.need_proxy)
--------------------------------------------------------------------------------