├── .gitignore ├── README.md ├── images ├── 实现步骤.png ├── 小程序工具页.png ├── 小程序首页.png ├── 技术栈.png ├── 文本分类.png ├── 新闻分类动图演示.gif ├── 新闻数量分布.png └── 系统架构图.png ├── 后端API ├── app.py ├── mnb.model ├── new_predict.py ├── transformer └── util │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── common.cpython-37.pyc │ ├── mongo.cpython-37.pyc │ └── result.cpython-37.pyc │ ├── common.py │ ├── data.py │ ├── mongo.py │ ├── result.py │ └── stopwords.txt ├── 微信小程序 ├── app.js ├── app.json ├── app.wxss ├── images │ └── icon │ │ ├── category1.png │ │ ├── category2.png │ │ ├── home1.png │ │ ├── home2.png │ │ ├── tool1.png │ │ └── tool2.png ├── pages │ ├── detail │ │ ├── detail.js │ │ ├── detail.json │ │ ├── detail.wxml │ │ └── detail.wxss │ ├── index │ │ ├── index.js │ │ ├── index.json │ │ ├── index.wxml │ │ └── index.wxss │ └── tool │ │ ├── tool.js │ │ ├── tool.json │ │ ├── tool.wxml │ │ └── tool.wxss ├── project.config.json └── sitemap.json ├── 机器学习 ├── README.md ├── common.py ├── data.py ├── images │ └── 每个分类2000条数据.png ├── mnb.model ├── new_predict.py ├── news_train.py ├── stopwords.txt └── transformer └── 爬虫 ├── README.md ├── 测试新闻数据爬取 └── news_spider │ ├── chinanews.csv │ ├── main.py │ ├── news_spider │ ├── __init__.py │ ├── items.py │ ├── middlewares.py │ ├── pipelines.py │ ├── settings.py │ └── spiders │ │ ├── README.md │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-37.pyc │ │ ├── chinanews.cpython-37.pyc │ │ ├── common.cpython-37.pyc │ │ ├── proxy.cpython-37.pyc │ │ ├── sina.cpython-37.pyc │ │ ├── sohu.cpython-37.pyc │ │ ├── tencent.cpython-37.pyc │ │ ├── wangyi.cpython-37.pyc │ │ └── wangyi_test.cpython-37.pyc │ │ ├── chinanews.py │ │ ├── common.py │ │ └── proxy.py │ └── scrapy.cfg └── 训练新闻数据爬取 ├── spider ├── __init__.py ├── pc_user_agent.json ├── pc_user_agent.py ├── spider.py └── test.py └── 图片 └── 各个分类下的新闻数量.png /.gitignore: -------------------------------------------------------------------------------- 1 | # 忽略 __pycache__ 文件 2 | __pycache__/* 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 基于 Scrapy 的新闻智能分类微信小程序 2 | 3 | ## 1、项目介绍 4 | 5 | 该项目是我的毕业设计,是一个文本分类相关的应用,目的是打造出一个可以对新闻进行智能分类的微信小程序。 6 | 7 | ## 2、技术栈 8 | 9 | Python + Scrapy + MongoDB + scikit-learn + Flask + 微信小程序,涉及爬虫、文本分类、Web 开发和微信小程序。 10 | 11 | ![技术栈](./images/技术栈.png) 12 | 13 | ## 3、系统架构图 14 | 15 | 系统架构图如下图所示,分为基础设施层、服务层、交互层、应用层。 16 | 17 | ![系统架构图](./images/系统架构图.png) 18 | 19 | ## 4、实现步骤 20 | 21 | 整个系统的实现分为如下四个步骤,分别是新闻数据爬取、文本分类处理、后端 API 接口开发,微信小程序客户端构建。 22 | 23 | ![实现步骤](./images/实现步骤.png) 24 | 25 | ### 4.1 数据爬取 26 | 27 | 爬虫的目标网站为中国新闻网,新闻数据爬取分类为:国内、国际、军事、体育、社会、娱乐、财经,爬取新闻的时间跨度为 2012 年到 2019 年。 28 | 29 | ![新闻数量分布](./images/新闻数量分布.png) 30 | 31 | ### 4.2 文本分类 32 | 33 | 文本分类的效果如下图所示,橙色和蓝色的数量约接近,代表这个分类下的新闻机器分类正确率越高,可以看到由于上面军事类新闻的数量较少,所以训练出来的新闻分类模型效果不是很高,错误率较高。 34 | 35 | ![文本分类](./images/文本分类.png) 36 | 37 | ## 5、效果演示 38 | 39 | ### 5.1 小程序首页 40 | 41 | 小程序首页展示新闻列表,点击新闻条目可以查看新闻详情,可以点击分类按钮选择新闻分类,还可以输入关键字查找新闻。 42 | 43 | ![小程序首页](./images/小程序首页.png) 44 | 45 | ### 5.2 小程序工具页 46 | 47 | ![小程序工具页](./images/小程序工具页.png) 48 | 49 | ### 5.3 新闻分类功能演示 50 | 51 | 下面通过动图来演示该小程序的特色功能-新闻分类功能(建议下载下面的 gif 图片查看!)。 52 | 53 | ![新闻分类动图演示](./images/新闻分类动图演示.gif) 54 | 55 | -------------------------------------------------------------------------------- /images/实现步骤.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/images/实现步骤.png -------------------------------------------------------------------------------- /images/小程序工具页.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/images/小程序工具页.png -------------------------------------------------------------------------------- /images/小程序首页.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/images/小程序首页.png -------------------------------------------------------------------------------- /images/技术栈.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/images/技术栈.png -------------------------------------------------------------------------------- /images/文本分类.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/images/文本分类.png -------------------------------------------------------------------------------- /images/新闻分类动图演示.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/images/新闻分类动图演示.gif -------------------------------------------------------------------------------- /images/新闻数量分布.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/images/新闻数量分布.png -------------------------------------------------------------------------------- /images/系统架构图.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/images/系统架构图.png -------------------------------------------------------------------------------- /后端API/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request 2 | 3 | from new_predict import text2matrix, classfiy_predict 4 | from util.mongo import MongoDB 5 | from util.result import Result, PageResult 6 | import re 7 | 8 | app = Flask(__name__) 9 | 10 | mongo = MongoDB(db='news', collection='test_news', username='weizhiwen', password='123456') 11 | 12 | 13 | @app.route('/') 14 | def hello_world(): 15 | return '基于Scrapy的智能分类微信小程序后端API' 16 | 17 | 18 | # 新闻列表 19 | @app.route('/news//') 20 | def news(offset, limit=20): 21 | result = {} 22 | items = [] 23 | category = request.args.get('category') 24 | keyword = request.args.get('keyword') 25 | if offset < 0 or limit <= 0: 26 | result = Result(code=Result.FAIL, msg='offset或limit参数有误!') 27 | return result.to_json() 28 | # 筛选字段,过滤 _id(ObjectId) 避免序列化异常 29 | column = {'_id': 0} 30 | # 组装查询条件,无法拼接出带模糊查询的 sql,pymongo 就是个垃圾!!! 31 | # condition = {} 32 | # if category: 33 | # condition['news_web_category'] = category 34 | # if keyword: 35 | # regex = {'$regex': '.*{}.*'.format(keyword)} 36 | # condition['new_title'] = regex 37 | # print(condition) 38 | try: 39 | if not category and not keyword: 40 | items = list(mongo.collection.find({}, column).sort('news_datetime', -1).skip(offset).limit(limit)) 41 | count = mongo.collection.find({}, column).count() 42 | result = PageResult(items, count=count) 43 | if category and not keyword: 44 | items = list(mongo.collection.find({'news_web_category': category}, column).sort('news_datetime', -1).skip( 45 | offset).limit(limit)) 46 | count = mongo.collection.find({'news_web_category': category}, column).count() 47 | result = PageResult(items, count=count) 48 | if not category and keyword: 49 | items = list( 50 | mongo.collection.find({'news_title': re.compile(keyword, re.IGNORECASE)}, column).sort('news_datetime', 51 | -1).skip( 52 | offset).limit(limit)) 53 | count = mongo.collection.find({'news_title': re.compile(keyword, re.IGNORECASE)}, column).count() 54 | result = PageResult(items, count=count) 55 | if keyword and category: 56 | items = list( 57 | mongo.collection.find({'news_web_category': category, 'news_title': re.compile(keyword, re.IGNORECASE)}, 58 | column).sort('news_datetime', -1).skip(offset).limit(limit)) 59 | count = mongo.collection.find( 60 | {'news_web_category': category, 'news_title': re.compile(keyword, re.IGNORECASE)}, column).count() 61 | result = PageResult(items, count=count) 62 | except Exception as e: 63 | result = Result(code=Result.FAIL, msg=e) 64 | return result.to_json() 65 | 66 | 67 | # 新闻分类 68 | @app.route('/news/') 69 | def category(title): 70 | news_title_list = [] 71 | news_title_list.append(title) 72 | data = text2matrix(news_title_list) 73 | # 使用 MNB 分类模型对新闻进行分类 74 | news_category_list = classfiy_predict(model_path='mnb.model', data=data) 75 | return Result(items=news_category_list[0]).to_json() 76 | 77 | 78 | if __name__ == '__main__': 79 | app.run(host='0.0.0.0') 80 | -------------------------------------------------------------------------------- /后端API/mnb.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/后端API/mnb.model -------------------------------------------------------------------------------- /后端API/new_predict.py: -------------------------------------------------------------------------------- 1 | # 加载模型文件,生成模型对象 2 | import pickle 3 | import scipy.sparse as sp 4 | import jieba 5 | from sklearn.externals import joblib 6 | from sklearn.feature_extraction.text import TfidfVectorizer 7 | from util.common import category_dict_reverse 8 | 9 | # 获取中文停用词数组 10 | stop_words_list = ['$', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '?', '_', '“', '”', '、', '。', '《', '》', '一', 11 | '一些', '一何', '一切', '一则', '一方面', '一旦', '一来', '一样', '一般', '一转眼', '万一', '上', '上下', '下', '不', '不仅', '不但', 12 | '不光', '不单', '不只', '不外乎', '不如', '不妨', '不尽', '不尽然', '不得', '不怕', '不惟', '不成', '不拘', '不料', '不是', '不比', 13 | '不然', '不特', '不独', '不管', '不至于', '不若', '不论', '不过', '不问', '与', '与其', '与其说', '与否', '与此同时', '且', '且不说', 14 | '且说', '两者', '个', '个别', '临', '为', '为了', '为什么', '为何', '为止', '为此', '为着', '乃', '乃至', '乃至于', '么', '之', 15 | '之一', '之所以', '之类', '乌乎', '乎', '乘', '也', '也好', '也罢', '了', '二来', '于', '于是', '于是乎', '云云', '云尔', '些', 16 | '亦', '人', '人们', '人家', '什么', '什么样', '今', '介于', '仍', '仍旧', '从', '从此', '从而', '他', '他人', '他们', '以', '以上', 17 | '以为', '以便', '以免', '以及', '以故', '以期', '以来', '以至', '以至于', '以致', '们', '任', '任何', '任凭', '似的', '但', '但凡', 18 | '但是', '何', '何以', '何况', '何处', '何时', '余外', '作为', '你', '你们', '使', '使得', '例如', '依', '依据', '依照', '便于', 19 | '俺', '俺们', '倘', '倘使', '倘或', '倘然', '倘若', '借', '假使', '假如', '假若', '傥然', '像', '儿', '先不先', '光是', '全体', 20 | '全部', '兮', '关于', '其', '其一', '其中', '其二', '其他', '其余', '其它', '其次', '具体地说', '具体说来', '兼之', '内', '再', 21 | '再其次', '再则', '再有', '再者', '再者说', '再说', '冒', '冲', '况且', '几', '几时', '凡', '凡是', '凭', '凭借', '出于', '出来', 22 | '分别', '则', '则甚', '别', '别人', '别处', '别是', '别的', '别管', '别说', '到', '前后', '前此', '前者', '加之', '加以', '即', 23 | '即令', '即使', '即便', '即如', '即或', '即若', '却', '去', '又', '又及', '及', '及其', '及至', '反之', '反而', '反过来', '反过来说', 24 | '受到', '另', '另一方面', '另外', '另悉', '只', '只当', '只怕', '只是', '只有', '只消', '只要', '只限', '叫', '叮咚', '可', '可以', 25 | '可是', '可见', '各', '各个', '各位', '各种', '各自', '同', '同时', '后', '后者', '向', '向使', '向着', '吓', '吗', '否则', '吧', 26 | '吧哒', '吱', '呀', '呃', '呕', '呗', '呜', '呜呼', '呢', '呵', '呵呵', '呸', '呼哧', '咋', '和', '咚', '咦', '咧', '咱', 27 | '咱们', '咳', '哇', '哈', '哈哈', '哉', '哎', '哎呀', '哎哟', '哗', '哟', '哦', '哩', '哪', '哪个', '哪些', '哪儿', '哪天', 28 | '哪年', '哪怕', '哪样', '哪边', '哪里', '哼', '哼唷', '唉', '唯有', '啊', '啐', '啥', '啦', '啪达', '啷当', '喂', '喏', '喔唷', 29 | '喽', '嗡', '嗡嗡', '嗬', '嗯', '嗳', '嘎', '嘎登', '嘘', '嘛', '嘻', '嘿', '嘿嘿', '因', '因为', '因了', '因此', '因着', 30 | '因而', '固然', '在', '在下', '在于', '地', '基于', '处在', '多', '多么', '多少', '大', '大家', '她', '她们', '好', '如', '如上', 31 | '如上所述', '如下', '如何', '如其', '如同', '如是', '如果', '如此', '如若', '始而', '孰料', '孰知', '宁', '宁可', '宁愿', '宁肯', '它', 32 | '它们', '对', '对于', '对待', '对方', '对比', '将', '小', '尔', '尔后', '尔尔', '尚且', '就', '就是', '就是了', '就是说', '就算', 33 | '就要', '尽', '尽管', '尽管如此', '岂但', '己', '已', '已矣', '巴', '巴巴', '并', '并且', '并非', '庶乎', '庶几', '开外', '开始', 34 | '归', '归齐', '当', '当地', '当然', '当着', '彼', '彼时', '彼此', '往', '待', '很', '得', '得了', '怎', '怎么', '怎么办', '怎么样', 35 | '怎奈', '怎样', '总之', '总的来看', '总的来说', '总的说来', '总而言之', '恰恰相反', '您', '惟其', '慢说', '我', '我们', '或', '或则', 36 | '或是', '或曰', '或者', '截至', '所', '所以', '所在', '所幸', '所有', '才', '才能', '打', '打从', '把', '抑或', '拿', '按', '按照', 37 | '换句话说', '换言之', '据', '据此', '接着', '故', '故此', '故而', '旁人', '无', '无宁', '无论', '既', '既往', '既是', '既然', '时候', 38 | '是', '是以', '是的', '曾', '替', '替代', '最', '有', '有些', '有关', '有及', '有时', '有的', '望', '朝', '朝着', '本', '本人', 39 | '本地', '本着', '本身', '来', '来着', '来自', '来说', '极了', '果然', '果真', '某', '某个', '某些', '某某', '根据', '欤', '正值', 40 | '正如', '正巧', '正是', '此', '此地', '此处', '此外', '此时', '此次', '此间', '毋宁', '每', '每当', '比', '比及', '比如', '比方', 41 | '没奈何', '沿', '沿着', '漫说', '焉', '然则', '然后', '然而', '照', '照着', '犹且', '犹自', '甚且', '甚么', '甚或', '甚而', '甚至', 42 | '甚至于', '用', '用来', '由', '由于', '由是', '由此', '由此可见', '的', '的确', '的话', '直到', '相对而言', '省得', '看', '眨眼', '着', 43 | '着呢', '矣', '矣乎', '矣哉', '离', '竟而', '第', '等', '等到', '等等', '简言之', '管', '类如', '紧接着', '纵', '纵令', '纵使', 44 | '纵然', '经', '经过', '结果', '给', '继之', '继后', '继而', '综上所述', '罢了', '者', '而', '而且', '而况', '而后', '而外', '而已', 45 | '而是', '而言', '能', '能否', '腾', '自', '自个儿', '自从', '自各儿', '自后', '自家', '自己', '自打', '自身', '至', '至于', '至今', 46 | '至若', '致', '般的', '若', '若夫', '若是', '若果', '若非', '莫不然', '莫如', '莫若', '虽', '虽则', '虽然', '虽说', '被', '要', 47 | '要不', '要不是', '要不然', '要么', '要是', '譬喻', '譬如', '让', '许多', '论', '设使', '设或', '设若', '诚如', '诚然', '该', '说来', 48 | '诸', '诸位', '诸如', '谁', '谁人', '谁料', '谁知', '贼死', '赖以', '赶', '起', '起见', '趁', '趁着', '越是', '距', '跟', '较', 49 | '较之', '边', '过', '还', '还是', '还有', '还要', '这', '这一来', '这个', '这么', '这么些', '这么样', '这么点儿', '这些', '这会儿', 50 | '这儿', '这就是说', '这时', '这样', '这次', '这般', '这边', '这里', '进而', '连', '连同', '逐步', '通过', '遵循', '遵照', '那', '那个', 51 | '那么', '那么些', '那么样', '那些', '那会儿', '那儿', '那时', '那样', '那般', '那边', '那里', '都', '鄙人', '鉴于', '针对', '阿', '除', 52 | '除了', '除外', '除开', '除此之外', '除非', '随', '随后', '随时', '随着', '难道说', '非但', '非徒', '非特', '非独', '靠', '顺', '顺着', 53 | '首先', '!', ',', ':', ';', '?'] 54 | 55 | 56 | # 新闻标题文本转文本矩阵 57 | def text2matrix(text_list, transfomer_path='transformer'): 58 | # 加载词汇表 59 | with open(transfomer_path, 'rb') as f: 60 | train_transformer = pickle.load(f) 61 | tf_transformer = TfidfVectorizer(smooth_idf=True, stop_words=stop_words_list, 62 | vocabulary=train_transformer.vocabulary_) 63 | tf_transformer._tfidf._idf_diag = sp.spdiags(train_transformer.idf_, diags=0, m=len(train_transformer.idf_), 64 | n=len(train_transformer.idf_)) 65 | text_data = [] 66 | for text in text_list: 67 | text_data.append(" ".join(jieba.cut(text))) 68 | return tf_transformer.transform(text_data) 69 | 70 | 71 | # 分类模型预测 72 | def classfiy_predict(model_path, data): 73 | with open(model_path, 'rb') as f: 74 | new_model = joblib.load(f) 75 | predictions = new_model.predict(data) 76 | category_list = predictions.tolist() 77 | news_category_list = [] 78 | for item in category_list: 79 | category = category_dict_reverse[item] 80 | news_category_list.append(category) 81 | return news_category_list 82 | -------------------------------------------------------------------------------- /后端API/transformer: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/后端API/transformer -------------------------------------------------------------------------------- /后端API/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/后端API/util/__init__.py -------------------------------------------------------------------------------- /后端API/util/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/后端API/util/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /后端API/util/__pycache__/common.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/后端API/util/__pycache__/common.cpython-37.pyc -------------------------------------------------------------------------------- /后端API/util/__pycache__/mongo.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/后端API/util/__pycache__/mongo.cpython-37.pyc -------------------------------------------------------------------------------- /后端API/util/__pycache__/result.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/后端API/util/__pycache__/result.cpython-37.pyc -------------------------------------------------------------------------------- /后端API/util/common.py: -------------------------------------------------------------------------------- 1 | # 获取停用词表数组 2 | def get_stop_words_list(): 3 | with open('stopwords.txt', encoding='utf-8', errors='ignore') as file: 4 | return file.read().replace('\n', ' ').split() 5 | 6 | 7 | # 新闻分类文本转数字 8 | category_dict = { 9 | '国内': 0, 10 | '国际': 1, 11 | '军事': 2, 12 | '体育': 3, 13 | '社会': 4, 14 | '娱乐': 5, 15 | '财经': 6 16 | } 17 | 18 | # 新闻分类数字转文本 19 | category_dict_reverse = { 20 | 0: '国内', 21 | 1: '国际', 22 | 2: '军事', 23 | 3: '体育', 24 | 4: '社会', 25 | 5: '娱乐', 26 | 6: '财经' 27 | } 28 | 29 | if __name__ == '__main__': 30 | print(get_stop_words_list()) 31 | -------------------------------------------------------------------------------- /后端API/util/data.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient 2 | from pprint import pprint 3 | 4 | 5 | # 新闻数据类 6 | class NewsData(object): 7 | # 构造方法,使用指定的db和collection 8 | def __init__(self, db, collection, host='127.0.0.1', port=27017, username='weizhiwen', password='123456'): 9 | self.client = MongoClient(host=host, port=port, username=username, password=password) 10 | self.db = self.client[db] 11 | self.collection = self.db[collection] 12 | 13 | # 获取训练数据库中的新闻数据 14 | def get_train_news(self, news_num_dict=None): 15 | new_list = [] 16 | # 如果传入了各个分类指定的新闻数量,就使用指定值,否则查询所有 17 | if news_num_dict: 18 | new_list.extend(self.collection.find({'news_category': '国内'}).limit(news_num_dict['国内'])) 19 | new_list.extend(self.collection.find({'news_category': '国际'}).limit(news_num_dict['国际'])) 20 | new_list.extend(self.collection.find({'news_category': '军事'}).limit(news_num_dict['军事'])) 21 | new_list.extend(self.collection.find({'news_category': '体育'}).limit(news_num_dict['体育'])) 22 | new_list.extend(self.collection.find({'news_category': '社会'}).limit(news_num_dict['社会'])) 23 | new_list.extend(self.collection.find({'news_category': '娱乐'}).limit(news_num_dict['娱乐'])) 24 | new_list.extend(self.collection.find({'news_category': '财经'}).limit(news_num_dict['财经'])) 25 | else: 26 | new_list.extend( 27 | self.collection.find({'news_category': {'$in': ['国内', '国际', '军事', '体育', '社会', '娱乐', '财经']}})) 28 | return new_list 29 | 30 | # 获取测试数据库中的新闻数据 31 | def get_test_news(self, news_num_dict=None): 32 | new_list = [] 33 | # 如果传入了各个分类指定的新闻数量,就使用指定值,否则查询所有 34 | if news_num_dict: 35 | new_list.extend(self.collection.find({'news_web_category': '国内'}).limit(news_num_dict['国内'])) 36 | new_list.extend(self.collection.find({'news_web_category': '国际'}).limit(news_num_dict['国际'])) 37 | new_list.extend(self.collection.find({'news_web_category': '军事'}).limit(news_num_dict['军事'])) 38 | new_list.extend(self.collection.find({'news_web_category': '体育'}).limit(news_num_dict['体育'])) 39 | new_list.extend(self.collection.find({'news_web_category': '社会'}).limit(news_num_dict['社会'])) 40 | new_list.extend(self.collection.find({'news_web_category': '娱乐'}).limit(news_num_dict['娱乐'])) 41 | new_list.extend(self.collection.find({'news_web_category': '财经'}).limit(news_num_dict['财经'])) 42 | else: 43 | new_list.extend( 44 | self.collection.find({'news_web_category': {'$in': ['国内', '国际', '军事', '体育', '社会', '娱乐', '财经']}})) 45 | return new_list 46 | 47 | 48 | if __name__ == '__main__': 49 | train_news_data = NewsData(db='news', collection='train_news') 50 | test_news_data = NewsData(db='news', collection='test_news') 51 | pprint(test_news_data.get_test_news()) 52 | print(len(test_news_data.get_test_news())) 53 | -------------------------------------------------------------------------------- /后端API/util/mongo.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient 2 | 3 | 4 | # MongoDB 封装类 5 | class MongoDB: 6 | # 构造方法 7 | def __init__(self, db, collection, host='localhost', port=27017, username=None, password=None): 8 | self.host = host 9 | self.port = port 10 | self.client = MongoClient(host=host, port=port, username=username, password=password) 11 | self.db = self.client[db] 12 | self.collection = self.db[collection] 13 | -------------------------------------------------------------------------------- /后端API/util/result.py: -------------------------------------------------------------------------------- 1 | # 返回的JSON结果封装类 2 | import json 3 | 4 | 5 | # 基本的result类 6 | class Result: 7 | # 成功 8 | SUCCESS = 0 9 | # 失败 10 | FAIL = 1 11 | 12 | def __init__(self, items=None, code=SUCCESS, msg='success'): 13 | if items: 14 | self.data = {} 15 | self.data['items'] = items 16 | self.code = code 17 | self.msg = msg 18 | 19 | def to_json(self): 20 | return json.dumps(self.__dict__) 21 | 22 | 23 | # 带分页的result类 24 | class PageResult(Result): 25 | def __init__(self, items=None, count=0, code=Result.SUCCESS, msg='success'): 26 | if items: 27 | self.data = {} 28 | self.data['items'] = items 29 | self.data['count'] = count 30 | self.code = code 31 | self.msg = msg 32 | 33 | 34 | if __name__ == '__main__': 35 | items = None 36 | result = Result(items=items) 37 | print(result.to_json()) 38 | print(type(result.__dict__)) 39 | print(type(json.dumps(result.__dict__))) 40 | result = Result(code=Result.FAIL, msg='') 41 | print(result.to_json()) 42 | 43 | -------------------------------------------------------------------------------- /后端API/util/stopwords.txt: -------------------------------------------------------------------------------- 1 | $ 2 | 0 3 | 1 4 | 2 5 | 3 6 | 4 7 | 5 8 | 6 9 | 7 10 | 8 11 | 9 12 | ? 13 | _ 14 | “ 15 | ” 16 | 、 17 | 。 18 | 《 19 | 》 20 | 一 21 | 一些 22 | 一何 23 | 一切 24 | 一则 25 | 一方面 26 | 一旦 27 | 一来 28 | 一样 29 | 一般 30 | 一转眼 31 | 万一 32 | 上 33 | 上下 34 | 下 35 | 不 36 | 不仅 37 | 不但 38 | 不光 39 | 不单 40 | 不只 41 | 不外乎 42 | 不如 43 | 不妨 44 | 不尽 45 | 不尽然 46 | 不得 47 | 不怕 48 | 不惟 49 | 不成 50 | 不拘 51 | 不料 52 | 不是 53 | 不比 54 | 不然 55 | 不特 56 | 不独 57 | 不管 58 | 不至于 59 | 不若 60 | 不论 61 | 不过 62 | 不问 63 | 与 64 | 与其 65 | 与其说 66 | 与否 67 | 与此同时 68 | 且 69 | 且不说 70 | 且说 71 | 两者 72 | 个 73 | 个别 74 | 临 75 | 为 76 | 为了 77 | 为什么 78 | 为何 79 | 为止 80 | 为此 81 | 为着 82 | 乃 83 | 乃至 84 | 乃至于 85 | 么 86 | 之 87 | 之一 88 | 之所以 89 | 之类 90 | 乌乎 91 | 乎 92 | 乘 93 | 也 94 | 也好 95 | 也罢 96 | 了 97 | 二来 98 | 于 99 | 于是 100 | 于是乎 101 | 云云 102 | 云尔 103 | 些 104 | 亦 105 | 人 106 | 人们 107 | 人家 108 | 什么 109 | 什么样 110 | 今 111 | 介于 112 | 仍 113 | 仍旧 114 | 从 115 | 从此 116 | 从而 117 | 他 118 | 他人 119 | 他们 120 | 以 121 | 以上 122 | 以为 123 | 以便 124 | 以免 125 | 以及 126 | 以故 127 | 以期 128 | 以来 129 | 以至 130 | 以至于 131 | 以致 132 | 们 133 | 任 134 | 任何 135 | 任凭 136 | 似的 137 | 但 138 | 但凡 139 | 但是 140 | 何 141 | 何以 142 | 何况 143 | 何处 144 | 何时 145 | 余外 146 | 作为 147 | 你 148 | 你们 149 | 使 150 | 使得 151 | 例如 152 | 依 153 | 依据 154 | 依照 155 | 便于 156 | 俺 157 | 俺们 158 | 倘 159 | 倘使 160 | 倘或 161 | 倘然 162 | 倘若 163 | 借 164 | 假使 165 | 假如 166 | 假若 167 | 傥然 168 | 像 169 | 儿 170 | 先不先 171 | 光是 172 | 全体 173 | 全部 174 | 兮 175 | 关于 176 | 其 177 | 其一 178 | 其中 179 | 其二 180 | 其他 181 | 其余 182 | 其它 183 | 其次 184 | 具体地说 185 | 具体说来 186 | 兼之 187 | 内 188 | 再 189 | 再其次 190 | 再则 191 | 再有 192 | 再者 193 | 再者说 194 | 再说 195 | 冒 196 | 冲 197 | 况且 198 | 几 199 | 几时 200 | 凡 201 | 凡是 202 | 凭 203 | 凭借 204 | 出于 205 | 出来 206 | 分别 207 | 则 208 | 则甚 209 | 别 210 | 别人 211 | 别处 212 | 别是 213 | 别的 214 | 别管 215 | 别说 216 | 到 217 | 前后 218 | 前此 219 | 前者 220 | 加之 221 | 加以 222 | 即 223 | 即令 224 | 即使 225 | 即便 226 | 即如 227 | 即或 228 | 即若 229 | 却 230 | 去 231 | 又 232 | 又及 233 | 及 234 | 及其 235 | 及至 236 | 反之 237 | 反而 238 | 反过来 239 | 反过来说 240 | 受到 241 | 另 242 | 另一方面 243 | 另外 244 | 另悉 245 | 只 246 | 只当 247 | 只怕 248 | 只是 249 | 只有 250 | 只消 251 | 只要 252 | 只限 253 | 叫 254 | 叮咚 255 | 可 256 | 可以 257 | 可是 258 | 可见 259 | 各 260 | 各个 261 | 各位 262 | 各种 263 | 各自 264 | 同 265 | 同时 266 | 后 267 | 后者 268 | 向 269 | 向使 270 | 向着 271 | 吓 272 | 吗 273 | 否则 274 | 吧 275 | 吧哒 276 | 吱 277 | 呀 278 | 呃 279 | 呕 280 | 呗 281 | 呜 282 | 呜呼 283 | 呢 284 | 呵 285 | 呵呵 286 | 呸 287 | 呼哧 288 | 咋 289 | 和 290 | 咚 291 | 咦 292 | 咧 293 | 咱 294 | 咱们 295 | 咳 296 | 哇 297 | 哈 298 | 哈哈 299 | 哉 300 | 哎 301 | 哎呀 302 | 哎哟 303 | 哗 304 | 哟 305 | 哦 306 | 哩 307 | 哪 308 | 哪个 309 | 哪些 310 | 哪儿 311 | 哪天 312 | 哪年 313 | 哪怕 314 | 哪样 315 | 哪边 316 | 哪里 317 | 哼 318 | 哼唷 319 | 唉 320 | 唯有 321 | 啊 322 | 啐 323 | 啥 324 | 啦 325 | 啪达 326 | 啷当 327 | 喂 328 | 喏 329 | 喔唷 330 | 喽 331 | 嗡 332 | 嗡嗡 333 | 嗬 334 | 嗯 335 | 嗳 336 | 嘎 337 | 嘎登 338 | 嘘 339 | 嘛 340 | 嘻 341 | 嘿 342 | 嘿嘿 343 | 因 344 | 因为 345 | 因了 346 | 因此 347 | 因着 348 | 因而 349 | 固然 350 | 在 351 | 在下 352 | 在于 353 | 地 354 | 基于 355 | 处在 356 | 多 357 | 多么 358 | 多少 359 | 大 360 | 大家 361 | 她 362 | 她们 363 | 好 364 | 如 365 | 如上 366 | 如上所述 367 | 如下 368 | 如何 369 | 如其 370 | 如同 371 | 如是 372 | 如果 373 | 如此 374 | 如若 375 | 始而 376 | 孰料 377 | 孰知 378 | 宁 379 | 宁可 380 | 宁愿 381 | 宁肯 382 | 它 383 | 它们 384 | 对 385 | 对于 386 | 对待 387 | 对方 388 | 对比 389 | 将 390 | 小 391 | 尔 392 | 尔后 393 | 尔尔 394 | 尚且 395 | 就 396 | 就是 397 | 就是了 398 | 就是说 399 | 就算 400 | 就要 401 | 尽 402 | 尽管 403 | 尽管如此 404 | 岂但 405 | 己 406 | 已 407 | 已矣 408 | 巴 409 | 巴巴 410 | 并 411 | 并且 412 | 并非 413 | 庶乎 414 | 庶几 415 | 开外 416 | 开始 417 | 归 418 | 归齐 419 | 当 420 | 当地 421 | 当然 422 | 当着 423 | 彼 424 | 彼时 425 | 彼此 426 | 往 427 | 待 428 | 很 429 | 得 430 | 得了 431 | 怎 432 | 怎么 433 | 怎么办 434 | 怎么样 435 | 怎奈 436 | 怎样 437 | 总之 438 | 总的来看 439 | 总的来说 440 | 总的说来 441 | 总而言之 442 | 恰恰相反 443 | 您 444 | 惟其 445 | 慢说 446 | 我 447 | 我们 448 | 或 449 | 或则 450 | 或是 451 | 或曰 452 | 或者 453 | 截至 454 | 所 455 | 所以 456 | 所在 457 | 所幸 458 | 所有 459 | 才 460 | 才能 461 | 打 462 | 打从 463 | 把 464 | 抑或 465 | 拿 466 | 按 467 | 按照 468 | 换句话说 469 | 换言之 470 | 据 471 | 据此 472 | 接着 473 | 故 474 | 故此 475 | 故而 476 | 旁人 477 | 无 478 | 无宁 479 | 无论 480 | 既 481 | 既往 482 | 既是 483 | 既然 484 | 时候 485 | 是 486 | 是以 487 | 是的 488 | 曾 489 | 替 490 | 替代 491 | 最 492 | 有 493 | 有些 494 | 有关 495 | 有及 496 | 有时 497 | 有的 498 | 望 499 | 朝 500 | 朝着 501 | 本 502 | 本人 503 | 本地 504 | 本着 505 | 本身 506 | 来 507 | 来着 508 | 来自 509 | 来说 510 | 极了 511 | 果然 512 | 果真 513 | 某 514 | 某个 515 | 某些 516 | 某某 517 | 根据 518 | 欤 519 | 正值 520 | 正如 521 | 正巧 522 | 正是 523 | 此 524 | 此地 525 | 此处 526 | 此外 527 | 此时 528 | 此次 529 | 此间 530 | 毋宁 531 | 每 532 | 每当 533 | 比 534 | 比及 535 | 比如 536 | 比方 537 | 没奈何 538 | 沿 539 | 沿着 540 | 漫说 541 | 焉 542 | 然则 543 | 然后 544 | 然而 545 | 照 546 | 照着 547 | 犹且 548 | 犹自 549 | 甚且 550 | 甚么 551 | 甚或 552 | 甚而 553 | 甚至 554 | 甚至于 555 | 用 556 | 用来 557 | 由 558 | 由于 559 | 由是 560 | 由此 561 | 由此可见 562 | 的 563 | 的确 564 | 的话 565 | 直到 566 | 相对而言 567 | 省得 568 | 看 569 | 眨眼 570 | 着 571 | 着呢 572 | 矣 573 | 矣乎 574 | 矣哉 575 | 离 576 | 竟而 577 | 第 578 | 等 579 | 等到 580 | 等等 581 | 简言之 582 | 管 583 | 类如 584 | 紧接着 585 | 纵 586 | 纵令 587 | 纵使 588 | 纵然 589 | 经 590 | 经过 591 | 结果 592 | 给 593 | 继之 594 | 继后 595 | 继而 596 | 综上所述 597 | 罢了 598 | 者 599 | 而 600 | 而且 601 | 而况 602 | 而后 603 | 而外 604 | 而已 605 | 而是 606 | 而言 607 | 能 608 | 能否 609 | 腾 610 | 自 611 | 自个儿 612 | 自从 613 | 自各儿 614 | 自后 615 | 自家 616 | 自己 617 | 自打 618 | 自身 619 | 至 620 | 至于 621 | 至今 622 | 至若 623 | 致 624 | 般的 625 | 若 626 | 若夫 627 | 若是 628 | 若果 629 | 若非 630 | 莫不然 631 | 莫如 632 | 莫若 633 | 虽 634 | 虽则 635 | 虽然 636 | 虽说 637 | 被 638 | 要 639 | 要不 640 | 要不是 641 | 要不然 642 | 要么 643 | 要是 644 | 譬喻 645 | 譬如 646 | 让 647 | 许多 648 | 论 649 | 设使 650 | 设或 651 | 设若 652 | 诚如 653 | 诚然 654 | 该 655 | 说来 656 | 诸 657 | 诸位 658 | 诸如 659 | 谁 660 | 谁人 661 | 谁料 662 | 谁知 663 | 贼死 664 | 赖以 665 | 赶 666 | 起 667 | 起见 668 | 趁 669 | 趁着 670 | 越是 671 | 距 672 | 跟 673 | 较 674 | 较之 675 | 边 676 | 过 677 | 还 678 | 还是 679 | 还有 680 | 还要 681 | 这 682 | 这一来 683 | 这个 684 | 这么 685 | 这么些 686 | 这么样 687 | 这么点儿 688 | 这些 689 | 这会儿 690 | 这儿 691 | 这就是说 692 | 这时 693 | 这样 694 | 这次 695 | 这般 696 | 这边 697 | 这里 698 | 进而 699 | 连 700 | 连同 701 | 逐步 702 | 通过 703 | 遵循 704 | 遵照 705 | 那 706 | 那个 707 | 那么 708 | 那么些 709 | 那么样 710 | 那些 711 | 那会儿 712 | 那儿 713 | 那时 714 | 那样 715 | 那般 716 | 那边 717 | 那里 718 | 都 719 | 鄙人 720 | 鉴于 721 | 针对 722 | 阿 723 | 除 724 | 除了 725 | 除外 726 | 除开 727 | 除此之外 728 | 除非 729 | 随 730 | 随后 731 | 随时 732 | 随着 733 | 难道说 734 | 非但 735 | 非徒 736 | 非特 737 | 非独 738 | 靠 739 | 顺 740 | 顺着 741 | 首先 742 | ! 743 | , 744 | : 745 | ; 746 | ? -------------------------------------------------------------------------------- /微信小程序/app.js: -------------------------------------------------------------------------------- 1 | //app.js 2 | App({ 3 | onLaunch: function () { 4 | // 展示本地存储能力 5 | var logs = wx.getStorageSync('logs') || [] 6 | logs.unshift(Date.now()) 7 | wx.setStorageSync('logs', logs) 8 | 9 | // 登录 10 | wx.login({ 11 | success: res => { 12 | // 发送 res.code 到后台换取 openId, sessionKey, unionId 13 | } 14 | }) 15 | // 获取用户信息 16 | wx.getSetting({ 17 | success: res => { 18 | if (res.authSetting['scope.userInfo']) { 19 | // 已经授权,可以直接调用 getUserInfo 获取头像昵称,不会弹框 20 | wx.getUserInfo({ 21 | success: res => { 22 | // 可以将 res 发送给后台解码出 unionId 23 | this.globalData.userInfo = res.userInfo 24 | 25 | // 由于 getUserInfo 是网络请求,可能会在 Page.onLoad 之后才返回 26 | // 所以此处加入 callback 以防止这种情况 27 | if (this.userInfoReadyCallback) { 28 | this.userInfoReadyCallback(res) 29 | } 30 | } 31 | }) 32 | } 33 | } 34 | }) 35 | }, 36 | globalData: { 37 | local_host: 'http://127.0.0.1:5000/', 38 | remote_host: '', 39 | host: 'http://127.0.0.1:5000/', 40 | limit: 10, 41 | } 42 | }) -------------------------------------------------------------------------------- /微信小程序/app.json: -------------------------------------------------------------------------------- 1 | { 2 | "pages": [ 3 | "pages/index/index", 4 | "pages/tool/tool", 5 | "pages/detail/detail" 6 | ], 7 | "window": { 8 | "backgroundTextStyle": "light", 9 | "navigationBarBackgroundColor": "#6CD128", 10 | "navigationBarTitleText": "虫虫新闻", 11 | "navigationBarTextStyle": "white" 12 | }, 13 | "tabBar": { 14 | "color": "#BFBFBF", 15 | "selectedColor": "#1AFA29", 16 | "backgroundColor": "#ffffff", 17 | "list": [ 18 | { 19 | "iconPath": "images/icon/home1.png", 20 | "selectedIconPath": "images/icon/home2.png", 21 | "text": "首页", 22 | "pagePath": "pages/index/index" 23 | }, 24 | { 25 | "iconPath": "images/icon/tool1.png", 26 | "selectedIconPath": "images/icon/tool2.png", 27 | "text": "工具", 28 | "pagePath": "pages/tool/tool" 29 | } 30 | ] 31 | }, 32 | "sitemapLocation": "sitemap.json" 33 | } -------------------------------------------------------------------------------- /微信小程序/app.wxss: -------------------------------------------------------------------------------- 1 | /**app.wxss**/ 2 | .container { 3 | height: 100%; 4 | display: flex; 5 | flex-direction: column; 6 | align-items: center; 7 | justify-content: space-between; 8 | padding: 200rpx 0; 9 | box-sizing: border-box; 10 | } 11 | -------------------------------------------------------------------------------- /微信小程序/images/icon/category1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/微信小程序/images/icon/category1.png -------------------------------------------------------------------------------- /微信小程序/images/icon/category2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/微信小程序/images/icon/category2.png -------------------------------------------------------------------------------- /微信小程序/images/icon/home1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/微信小程序/images/icon/home1.png -------------------------------------------------------------------------------- /微信小程序/images/icon/home2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/微信小程序/images/icon/home2.png -------------------------------------------------------------------------------- /微信小程序/images/icon/tool1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/微信小程序/images/icon/tool1.png -------------------------------------------------------------------------------- /微信小程序/images/icon/tool2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/微信小程序/images/icon/tool2.png -------------------------------------------------------------------------------- /微信小程序/pages/detail/detail.js: -------------------------------------------------------------------------------- 1 | // pages/detail/detail.js 2 | Page({ 3 | 4 | /** 5 | * 页面的初始数据 6 | */ 7 | data: { 8 | url: '' 9 | }, 10 | 11 | /** 12 | * 生命周期函数--监听页面加载 13 | */ 14 | onLoad: function (options) { 15 | that = this 16 | that.setData({ 17 | url: options.url 18 | }) 19 | }, 20 | 21 | /** 22 | * 生命周期函数--监听页面初次渲染完成 23 | */ 24 | onReady: function () { 25 | 26 | }, 27 | 28 | /** 29 | * 生命周期函数--监听页面显示 30 | */ 31 | onShow: function () { 32 | 33 | }, 34 | 35 | /** 36 | * 生命周期函数--监听页面隐藏 37 | */ 38 | onHide: function () { 39 | 40 | }, 41 | 42 | /** 43 | * 生命周期函数--监听页面卸载 44 | */ 45 | onUnload: function () { 46 | 47 | }, 48 | 49 | /** 50 | * 页面相关事件处理函数--监听用户下拉动作 51 | */ 52 | onPullDownRefresh: function () { 53 | 54 | }, 55 | 56 | /** 57 | * 页面上拉触底事件的处理函数 58 | */ 59 | onReachBottom: function () { 60 | 61 | }, 62 | 63 | /** 64 | * 用户点击右上角分享 65 | */ 66 | onShareAppMessage: function () { 67 | 68 | } 69 | }) -------------------------------------------------------------------------------- /微信小程序/pages/detail/detail.json: -------------------------------------------------------------------------------- 1 | { 2 | "usingComponents": {} 3 | } -------------------------------------------------------------------------------- /微信小程序/pages/detail/detail.wxml: -------------------------------------------------------------------------------- 1 | <web-view src="{{url}}">百度</web-view> 2 | -------------------------------------------------------------------------------- /微信小程序/pages/detail/detail.wxss: -------------------------------------------------------------------------------- 1 | /* pages/detail/detail.wxss */ -------------------------------------------------------------------------------- /微信小程序/pages/index/index.js: -------------------------------------------------------------------------------- 1 | var app = getApp() 2 | Page({ 3 | data: { 4 | news: [], 5 | categoryIcon: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAwklEQVRYR+2WMQ7CMAxFa+U4MDnKDNyBA/VA3AGYo3iC41hGRu1QgmhrITWDO/srzov0fqHb+IONz+/aWiDn3IcQDiJy/EUGAG7MfE8p9TpnzWl2QoCIHiKyW/IsAPBExL3OWnPVAqUUWXL4OBNjfF/AmvMFnIATaI8AEV3nLDg6QG2IiKdBRKZcRWBQ6nnOhmpBZr58qHh1rlpgjQX/Nett6G04IWCtVWuuPRFZb2LNOQEn8O233NRq1hb1NvQ6VgIvfmMgMBn2T0cAAAAASUVORK5CYII=', 6 | show: false, 7 | currentCategory: '', 8 | inputValue: '', 9 | keyword: '', 10 | categories: ['国内', '国际', '军事', '体育', '社会', '财经', '娱乐'], 11 | offset: 0, 12 | totalCount: 0, 13 | searchLoading: false, //"上拉加载"的变量,默认false,隐藏 14 | searchLoadingComplete: false //“没有数据”的变量,默认false,隐藏 15 | }, 16 | 17 | /** 18 | * 用户自定义的函数 19 | */ 20 | 21 | // 新闻搜索 22 | search_news: function(e) { 23 | that = this 24 | keyword = e.detail.value 25 | // 在发送请求前把新闻数据数组清空 26 | that.setData({ 27 | currentCategory:'', 28 | news: [], 29 | offset: 0, 30 | }) 31 | that.fetch_news(that, '', keyword) 32 | }, 33 | 34 | // 弹出分类侧栏 35 | tap_category: function(e) { 36 | var that = this 37 | if (that.data.show) { 38 | that.setData({ 39 | show: false, 40 | categoryIcon: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAwklEQVRYR+2WMQ7CMAxFa+U4MDnKDNyBA/VA3AGYo3iC41hGRu1QgmhrITWDO/srzov0fqHb+IONz+/aWiDn3IcQDiJy/EUGAG7MfE8p9TpnzWl2QoCIHiKyW/IsAPBExL3OWnPVAqUUWXL4OBNjfF/AmvMFnIATaI8AEV3nLDg6QG2IiKdBRKZcRWBQ6nnOhmpBZr58qHh1rlpgjQX/Nett6G04IWCtVWuuPRFZb2LNOQEn8O233NRq1hb1NvQ6VgIvfmMgMBn2T0cAAAAASUVORK5CYII=' 41 | }); 42 | } else { 43 | that.setData({ 44 | show: true, 45 | categoryIcon: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAB/klEQVRYR+1WPWgTYRh+3s/cpQ4KLQ720qGD4N1llQ4uUoqDi1BsK4VCCw4u4iKC3drJDt2K0KHgZClOBQehIM2mDp0kuQMLBWm+BMSfrdwFv1cu6SU9f5ofPkgCuem4e+55n+95f+4ldPmiLsfHQEBvOZAJ3Dm+wKFMebtna+PqiT0uBJZ01YtM+ysxV90BK3Cfg/hZ9IKZl0tpfy0GWaFzBGBclwAQvZFG4W7EVxeQCZ0cA7eqQQi70vCmo9sxzl5TFfVZW/Aav5SGl0kIsALnPgjbAAIl6F45VXjbcMB9CbC2FIB5NU5DbxWhVptbJBs40DsOWIE9T4JmmPFOGGLnmPLf4zRmQntJgRZbTGtTGP/i1fJFP5dow9HQfk2g2dogooVSuvDqdA6MqIr61pS1PcCBNL0bLQmIQFbocHv856MZyJVMb/KPQfT/FFz5ev2SeZme6BLxz3+BLvJ2eXqnC9pVrgvfPw6McXZEx6kFBH+hTz9irjMLib0CohkAH4Qh1o4pf9gYRO4mgx/qEHDK8Uia3otkG1acIhhWDUCPpVnYqA6iMDuhoD5qDB5R/ZSmN5wUEDpbAB5UHxLfLBr++zioVXH2wLitTQTzukz7TxMCaqsX31EKh+WhxjZUT8OJO6VFQIpU0cjv/1UDWsg7IOmfNuzgcC19MnDgNyWYqyFyPnQ0AAAAAElFTkSuQmCC' 46 | }); 47 | } 48 | }, 49 | 50 | // 选择分类 51 | select_category: function(e) { 52 | var that = this 53 | var category = e.currentTarget.dataset.text 54 | // 在发送请求前把新闻数据数组清空 55 | that.setData({ 56 | currentCategory: category, 57 | news: [], 58 | offset: 0, 59 | }) 60 | that.fetch_news(that, category, that.data.keyword) 61 | }, 62 | 63 | // 根据分类和关键字获取新闻数据 64 | fetch_news(that, category, keyword) { 65 | wx.request({ 66 | url: app.globalData.host + 'news/' + that.data.offset + '/' + app.globalData.limit, 67 | data: { 68 | category: category, 69 | keyword: keyword, 70 | }, 71 | header: { 72 | 'content-type': 'application/json' 73 | }, 74 | // 如果请求成功,并且 msg 为 success 75 | success(res) { 76 | // 设置新闻数组和数量 77 | if (res.data.code == 0) { 78 | var news = that.data.news.concat(res.data.data.items) 79 | that.setData({ 80 | news: news, 81 | offset: that.data.offset + app.globalData.limit, 82 | totalCount: res.data.data.count, 83 | }) 84 | } 85 | } 86 | }) 87 | }, 88 | 89 | /** 90 | * 生命周期函数--监听页面加载 91 | */ 92 | onLoad: function(options) { 93 | var that = this 94 | that.fetch_news(that, that.data.currentCategory, that.data.keyword) 95 | }, 96 | 97 | /** 98 | * 生命周期函数--监听页面初次渲染完成 99 | */ 100 | onReady: function() { 101 | 102 | }, 103 | 104 | /** 105 | * 生命周期函数--监听页面显示 106 | */ 107 | onShow: function() { 108 | 109 | }, 110 | 111 | /** 112 | * 生命周期函数--监听页面隐藏 113 | */ 114 | onHide: function() { 115 | 116 | }, 117 | 118 | /** 119 | * 生命周期函数--监听页面卸载 120 | */ 121 | onUnload: function() { 122 | 123 | }, 124 | 125 | /** 126 | * 页面相关事件处理函数--监听用户下拉动作 127 | */ 128 | onPullDownRefresh: function() { 129 | 130 | }, 131 | 132 | /** 133 | * 页面上拉触底事件的处理函数 134 | */ 135 | onReachBottom: function() { 136 | that = this 137 | // 如果还有数据就加载数据 138 | if (that.data.offset < that.data.totalCount) { 139 | that.fetch_news(that, that.data.currentCategory, that.data.keyword) 140 | } 141 | }, 142 | 143 | /** 144 | * 用户点击右上角分享 145 | */ 146 | onShareAppMessage: function() { 147 | 148 | } 149 | }) -------------------------------------------------------------------------------- /微信小程序/pages/index/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "usingComponents": {} 3 | } -------------------------------------------------------------------------------- /微信小程序/pages/index/index.wxml: -------------------------------------------------------------------------------- 1 | <view id='home'> 2 | <!-- 顶部栏 --> 3 | <view class='top'> 4 | <image bindtap='tap_category' src='{{categoryIcon}}'></image> 5 | <view class='search'> 6 | <icon type='search' size='35rpx' color='#c8c8c8' /> 7 | <form> 8 | <input name='keyword' value='{{inputValue}}' placeholder="请输入新闻标题" confirm-type='send' bindconfirm="search_news" /> 9 | </form> 10 | </view> 11 | </view> 12 | <!-- 分类侧边栏 --> 13 | <view bindtap='tap_category' class='category {{show ? "category-show" : ""}}'> 14 | <view data-text='{{item}}' bindtap='select_category' class='wc {{(currentCategory == item) ? "wc-bold" : ""}}' wx:for='{{categories}}' wx:key='unique'> 15 | <text>{{item}}</text> 16 | </view> 17 | </view> 18 | <!-- 新闻列表 --> 19 | <view class='news-list'> 20 | <view wx:for='{{news}}' wx:key='unique'> 21 | <navigator url="../detail/detail?url={{item.news_url}}" hover-class="navigator-hover"> 22 | <view class='news' data-text='{{item.news_url}}'> 23 | <view class='news-title'>{{item.news_title}}</view> 24 | <view class='news-category'>网站分类:{{item.news_web_category}} | 机器分类:{{item.news_machine_category}}</view> 25 | <view class='news-description'>来源:{{item.news_source}} | 时间:{{item.news_datetime}}</view> 26 | </view> 27 | </navigator> 28 | </view> 29 | <view class="loading" hidden="{{!searchLoading}}">正在载入更多...</view> 30 | <view class="loading complete" hidden="{{!searchLoadingComplete}}">已加载全部</view> 31 | </view> 32 | </view> -------------------------------------------------------------------------------- /微信小程序/pages/index/index.wxss: -------------------------------------------------------------------------------- 1 | #home { 2 | height: 100%; 3 | padding: 0 0.2rem; 4 | } 5 | 6 | /* 顶部栏 */ 7 | 8 | .top { 9 | position: fixed; 10 | width: 100%; 11 | height: 70rpx; 12 | padding: 5rpx 0 0 0; 13 | background: white; 14 | display: flex; 15 | flex-direction: row; 16 | border-bottom: 1rpx solid #c8c8c8; 17 | } 18 | 19 | .top image { 20 | margin-left: 0.2rem; 21 | width: 60rpx; 22 | height: 60rpx; 23 | align-self: center; 24 | } 25 | 26 | .search { 27 | margin-left: 15rpx; 28 | width: 85%; 29 | height: 60rpx; 30 | display: flex; 31 | flex-direction: row; 32 | border: 1rpx solid #c8c8c8; 33 | border-radius: 10rpx; 34 | align-self: center; 35 | } 36 | 37 | .search icon { 38 | padding-left: 0.2rem; 39 | align-self: center; 40 | } 41 | 42 | .search input { 43 | padding: 0.1rem 0.1rem 0.1rem 0.4rem; 44 | width: 100%; 45 | height: 50rpx; 46 | min-height: 50rpx; 47 | align-self: center; 48 | } 49 | 50 | /* 侧边栏 */ 51 | 52 | .category { 53 | height: 100%; 54 | width: 150rpx; 55 | position: fixed; 56 | top: 75rpx; 57 | left: -200rpx; 58 | background-color: white; 59 | z-index: 0; 60 | transition: 0.5s; 61 | border-right: 1rpx solid #c8c8c8; 62 | } 63 | 64 | .category .wc { 65 | text-align: center; 66 | color: #c8c8c8; 67 | /* border-bottom: 1rpx solid #c8c8c8; */ 68 | padding-top: 1rem; 69 | padding-bottom: 1rem; 70 | font-size: 1rem; 71 | } 72 | 73 | .category .wc-bold { 74 | color: black; 75 | font-size: 1.1rem; 76 | font-weight: bold; 77 | } 78 | 79 | .category-show { 80 | left: 0; 81 | transition: 0.5s; 82 | } 83 | 84 | .page-top { 85 | height: 100%; 86 | position: fixed; 87 | width: 750rpx; 88 | background-color: rgb(57, 125, 230); 89 | z-index: 0; 90 | transition: All 0.4s ease; 91 | -webkit-transition: All 0.4s ease; 92 | } 93 | 94 | .c-state1 { 95 | transform: rotate(0deg) scale(1) translate(75%, 0%); 96 | -webkit-transform: rotate(0deg) scale(1) translate(75%, 0%); 97 | } 98 | 99 | /* 新闻列表 */ 100 | 101 | .news-list { 102 | width: 100%; 103 | height: 500px; 104 | padding-top: 80rpx; 105 | flex-direction: column; 106 | } 107 | 108 | .news { 109 | margin-bottom: 0.3rem; 110 | padding: 0.3rem; 111 | border: 0.1rem solid #c8c8c8; 112 | border-radius: 10rpx; 113 | background: #f0f0f0; 114 | } 115 | 116 | .news-title { 117 | font-size: 1.2rem; 118 | } 119 | 120 | .news-category { 121 | margin-top: 0.2rem; 122 | font-size: 0.9rem; 123 | color: grey; 124 | } 125 | 126 | .news-description { 127 | margin-top: 0.2rem; 128 | font-size: 0.9rem; 129 | color: grey; 130 | } 131 | 132 | /* 加载更多 */ 133 | 134 | .loading { 135 | padding: 10rpx; 136 | text-align: center; 137 | } 138 | 139 | .loading:before { 140 | display: inline-block; 141 | margin-right: 5rpx; 142 | vertical-align: middle; 143 | content: ''; 144 | width: 40rpx; 145 | height: 40rpx; 146 | /* background: url(../../images/icon-loading.png) no-repeat; */ 147 | background-size: contain; 148 | animation: rotate 1s linear infinite; 149 | } 150 | 151 | .loading.complete:before { 152 | display: none; 153 | } 154 | -------------------------------------------------------------------------------- /微信小程序/pages/tool/tool.js: -------------------------------------------------------------------------------- 1 | var app = getApp() 2 | Page({ 3 | 4 | /** 5 | * 页面的初始数据 6 | */ 7 | data: { 8 | category: '??', 9 | inputValue: '', 10 | }, 11 | 12 | /** 13 | * 用户自定义的函数 14 | */ 15 | // 新闻智能分类 16 | category_news: function(e) { 17 | that = this 18 | title = e.detail.value 19 | that.predict_news(that, title) 20 | }, 21 | 22 | // 预测新闻分类 23 | predict_news(that, title) { 24 | wx.showLoading({ 25 | title: '预测中', 26 | }) 27 | setTimeout(function () { 28 | wx.hideLoading() 29 | }, 2000) 30 | wx.request({ 31 | url: app.globalData.host + 'news/' + title, 32 | header: { 33 | 'content-type': 'application/json' 34 | }, 35 | // 如果请求成功,并且 msg 为 success 36 | success(res) { 37 | // 设置新闻数组和数量 38 | if (res.data.code == 0) { 39 | that.setData({ 40 | category: res.data.data.items, 41 | }) 42 | } 43 | } 44 | }) 45 | }, 46 | 47 | // 扫一扫 48 | rich_scan: function() { 49 | var that = this; 50 | wx.scanCode({ 51 | // 只允许从相机中扫描 52 | onlyFromCamera: false, 53 | success: (res) => { 54 | title = res.result 55 | that.predict_news(that, title) 56 | wx.showToast({ 57 | title: '成功', 58 | icon: 'success' 59 | }) 60 | }, 61 | fail: (res) => { 62 | wx.showToast({ 63 | title: '失败', 64 | icon: 'fail' 65 | }) 66 | } 67 | }) 68 | }, 69 | 70 | /** 71 | * 生命周期函数--监听页面加载 72 | */ 73 | onLoad: function(options) { 74 | 75 | }, 76 | 77 | /** 78 | * 生命周期函数--监听页面初次渲染完成 79 | */ 80 | onReady: function() { 81 | 82 | }, 83 | 84 | /** 85 | * 生命周期函数--监听页面显示 86 | */ 87 | onShow: function() { 88 | 89 | }, 90 | 91 | /** 92 | * 生命周期函数--监听页面隐藏 93 | */ 94 | onHide: function() { 95 | 96 | }, 97 | 98 | /** 99 | * 生命周期函数--监听页面卸载 100 | */ 101 | onUnload: function() { 102 | 103 | }, 104 | 105 | /** 106 | * 页面相关事件处理函数--监听用户下拉动作 107 | */ 108 | onPullDownRefresh: function() { 109 | 110 | }, 111 | 112 | /** 113 | * 页面上拉触底事件的处理函数 114 | */ 115 | onReachBottom: function() { 116 | 117 | }, 118 | 119 | /** 120 | * 用户点击右上角分享 121 | */ 122 | onShareAppMessage: function() { 123 | 124 | } 125 | }) -------------------------------------------------------------------------------- /微信小程序/pages/tool/tool.json: -------------------------------------------------------------------------------- 1 | { 2 | "usingComponents": {} 3 | } -------------------------------------------------------------------------------- /微信小程序/pages/tool/tool.wxml: -------------------------------------------------------------------------------- 1 | <view id='tool'> 2 | <view class='category'> 3 | <view class='text'>分类:{{category}}</view> 4 | <form> 5 | <input placeholder="请输入新闻标题,AI帮你做分类" confirm-type='search' value='{{inputValue}}' bindconfirm="category_news" /> 6 | </form> 7 | <!-- <button type='primary' bindtap="rich_scan">扫一扫,帮你做新闻分类</button> --> 8 | </view> 9 | 10 | </view> -------------------------------------------------------------------------------- /微信小程序/pages/tool/tool.wxss: -------------------------------------------------------------------------------- 1 | #tool { 2 | width: 100%; 3 | height: 100%; 4 | padding: 0.3rem 0.2rem 0 0.2rem; 5 | position: absolute; 6 | display: flex; 7 | justify-content: center; 8 | align-items: center; 9 | } 10 | 11 | .category { 12 | margin-bottom: 1rem; 13 | width: 100%; 14 | } 15 | 16 | .text { 17 | width: 80%; 18 | margin: 0 auto; 19 | text-align: center; 20 | font-size: 1.5rem; 21 | margin-bottom: 1rem; 22 | } 23 | 24 | .category input { 25 | margin: 0 auto; 26 | width: 80%; 27 | height: 70rpx; 28 | min-height: 70rpx; 29 | border: 0.1rem solid #c8c8c8; 30 | border-radius: 0.5rem; 31 | margin-bottom: 1rem; 32 | text-align: center; 33 | } 34 | 35 | .category button { 36 | width: 80%; 37 | height: 70rpx; 38 | line-height: 70rpx; 39 | } -------------------------------------------------------------------------------- /微信小程序/project.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "项目配置文件", 3 | "packOptions": { 4 | "ignore": [] 5 | }, 6 | "setting": { 7 | "urlCheck": false, 8 | "es6": false, 9 | "postcss": true, 10 | "minified": true, 11 | "newFeature": true, 12 | "autoAudits": false, 13 | "uglifyFileName": true, 14 | "checkInvalidKey": true 15 | }, 16 | "compileType": "miniprogram", 17 | "libVersion": "2.6.4", 18 | "appid": "wx07fab28c53535edd", 19 | "projectname": "%E8%99%AB%E8%99%AB%E6%96%B0%E9%97%BB", 20 | "debugOptions": { 21 | "hidedInDevtools": [] 22 | }, 23 | "isGameTourist": false, 24 | "simulatorType": "wechat", 25 | "simulatorPluginLibVersion": {}, 26 | "condition": { 27 | "search": { 28 | "current": -1, 29 | "list": [] 30 | }, 31 | "conversation": { 32 | "current": -1, 33 | "list": [] 34 | }, 35 | "game": { 36 | "currentL": -1, 37 | "list": [] 38 | }, 39 | "miniprogram": { 40 | "current": -1, 41 | "list": [] 42 | } 43 | } 44 | } -------------------------------------------------------------------------------- /微信小程序/sitemap.json: -------------------------------------------------------------------------------- 1 | { 2 | "desc": "关于本文件的更多信息,请参考文档 https://developers.weixin.qq.com/miniprogram/dev/framework/sitemap.html", 3 | "rules": [{ 4 | "action": "allow", 5 | "page": "*" 6 | }] 7 | } -------------------------------------------------------------------------------- /机器学习/README.md: -------------------------------------------------------------------------------- 1 | ## 数据准备: 2 | 3 | 从 MongoDB 数据库中查出所需分类的新闻数据,来做训练数据集,共1653384条新闻。 4 | 5 | 查询语句:`db.train_news.find({'news_category':{$in: ['国内', '国际', '军事', '体育', '社会', '娱乐', '财经']}}).count()` 6 | 7 | ## 构建 Bunch 对象 8 | 9 | Bunch 的几个属性: 10 | 11 | - **data: 数据数组** 12 | 13 | - **target:与filenames一一对应,是分类的数字化** 14 | 15 | - target_names:文本分类数组 16 | 17 | - filenames:文件名数组 18 | 19 | - DESCR:数据描述 20 | 21 | data 为进行结巴分词后的新闻标题数组,target 为进行分类数字化后的分类数组,这两个属性是 Bunch 的关键属性,为下一步计算 TF-IDF 值做准备。 22 | 23 | ## 计算词频和TF-IDF值 24 | 25 | 构建中文停用词列表,使用 CountVectorizer() 方法和 TfidfVectorizer() 方法分别计算出词频矩阵和TF-IDF矩阵。 26 | 27 | 数据维度中行数和特征值为 (1653384, 348840)。 28 | 29 | ## 六种机器学习分类算法 30 | 31 | ### 线性算法 32 | 33 | #### 1、逻辑回归(LR) 34 | 35 | 准确率:LR : 0.787259 (0.029209) 36 | 37 | #### 2、线性判别分析(LDA) 38 | 39 | 准确率: 40 | 41 | ### 非线性算法 42 | 43 | #### 1、K近邻(KNN) 44 | 45 | 准确率: 46 | 47 | #### 2、朴素贝叶斯分类器(MNB) 48 | 49 | 准确率: 50 | 51 | #### 3、分类与回归树(CART) 52 | 53 | 准确率: 54 | 55 | #### 4、支持向量机(SVM) 56 | 57 | 准确率: 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /机器学习/common.py: -------------------------------------------------------------------------------- 1 | # 获取停用词表数组 2 | def get_stop_words_list(): 3 | with open('./stopwords.txt', encoding='utf-8', errors='ignore') as file: 4 | return file.read().replace('\n', ' ').split() 5 | 6 | 7 | # 新闻分类文本转数字 8 | category_dict = { 9 | '国内': 0, 10 | '国际': 1, 11 | '军事': 2, 12 | '体育': 3, 13 | '社会': 4, 14 | '娱乐': 5, 15 | '财经': 6 16 | } 17 | 18 | # 新闻分类数字转文本 19 | category_dict_reverse = { 20 | 0: '国内', 21 | 1: '国际', 22 | 2: '军事', 23 | 3: '体育', 24 | 4: '社会', 25 | 5: '娱乐', 26 | 6: '财经' 27 | } 28 | 29 | if __name__ == '__main__': 30 | print(get_stop_words_list()) 31 | -------------------------------------------------------------------------------- /机器学习/data.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient 2 | from pprint import pprint 3 | 4 | 5 | # 新闻数据类 6 | class NewsData(object): 7 | # 构造方法,使用指定的db和collection 8 | def __init__(self, db, collection, host='127.0.0.1', port=27017, username='weizhiwen', password='123456'): 9 | self.client = MongoClient(host=host, port=port, username=username, password=password) 10 | self.db = self.client[db] 11 | self.collection = self.db[collection] 12 | 13 | # 获取训练数据库中的新闻数据 14 | def get_train_news(self, news_num_dict=None): 15 | new_list = [] 16 | # 如果传入了各个分类指定的新闻数量,就使用指定值,否则查询所有 17 | if news_num_dict: 18 | new_list.extend(self.collection.find({'news_category': '国内'}).limit(news_num_dict['国内'])) 19 | new_list.extend(self.collection.find({'news_category': '国际'}).limit(news_num_dict['国际'])) 20 | new_list.extend(self.collection.find({'news_category': '军事'}).limit(news_num_dict['军事'])) 21 | new_list.extend(self.collection.find({'news_category': '体育'}).limit(news_num_dict['体育'])) 22 | new_list.extend(self.collection.find({'news_category': '社会'}).limit(news_num_dict['社会'])) 23 | new_list.extend(self.collection.find({'news_category': '娱乐'}).limit(news_num_dict['娱乐'])) 24 | new_list.extend(self.collection.find({'news_category': '财经'}).limit(news_num_dict['财经'])) 25 | else: 26 | new_list.extend(self.collection.find({'news_category': {'$in': ['国内', '国际', '军事', '体育', '社会', '娱乐', '财经']}})) 27 | return new_list 28 | 29 | # 获取测试数据库中的新闻数据 30 | def get_test_news(self, news_num_dict=None): 31 | new_list = [] 32 | # 如果传入了各个分类指定的新闻数量,就使用指定值,否则查询所有 33 | if news_num_dict: 34 | new_list.extend(self.collection.find({'news_web_category': '国内'}).limit(news_num_dict['国内'])) 35 | new_list.extend(self.collection.find({'news_web_category': '国际'}).limit(news_num_dict['国际'])) 36 | new_list.extend(self.collection.find({'news_web_category': '军事'}).limit(news_num_dict['军事'])) 37 | new_list.extend(self.collection.find({'news_web_category': '体育'}).limit(news_num_dict['体育'])) 38 | new_list.extend(self.collection.find({'news_web_category': '社会'}).limit(news_num_dict['社会'])) 39 | new_list.extend(self.collection.find({'news_web_category': '娱乐'}).limit(news_num_dict['娱乐'])) 40 | new_list.extend(self.collection.find({'news_web_category': '财经'}).limit(news_num_dict['财经'])) 41 | else: 42 | new_list.extend(self.collection.find({'news_web_category': {'$in': ['国内', '国际', '军事', '体育', '社会', '娱乐', '财经']}})) 43 | return new_list 44 | 45 | 46 | # 分离训练数据集和评估数据集 47 | def get_train_and_test_news(self, test_size=0.33): 48 | news = self.get_news() 49 | total_num = len(news) 50 | # 前 (1-test_size) 的数据作为训练数据集 51 | size1 = int((1 - test_size) * total_num) 52 | train_news_list = news[:size1] 53 | # 后 test_size 的数据作为评估(测试)数据集 54 | size2 = - int(test_size * total_num) 55 | test_news_list = news[size2:] 56 | news_dict = {'train_news': train_news_list, 'test_news': test_news_list} 57 | return news_dict 58 | 59 | 60 | if __name__ == '__main__': 61 | train_news_data = NewsData(db='news', collection='train_news') 62 | test_news_data = NewsData(db='news', collection='test_news') 63 | pprint(test_news_data.get_test_news()) 64 | print(len(test_news_data.get_test_news())) 65 | -------------------------------------------------------------------------------- /机器学习/images/每个分类2000条数据.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/机器学习/images/每个分类2000条数据.png -------------------------------------------------------------------------------- /机器学习/mnb.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/机器学习/mnb.model -------------------------------------------------------------------------------- /机器学习/new_predict.py: -------------------------------------------------------------------------------- 1 | # 加载模型文件,生成模型对象 2 | import pickle 3 | import scipy.sparse as sp 4 | from pprint import pprint 5 | import jieba 6 | from sklearn.externals import joblib 7 | from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer 8 | from common import get_stop_words_list, category_dict_reverse 9 | from data import NewsData 10 | 11 | # 获取中文停用词数组 12 | stop_words_list = get_stop_words_list() 13 | 14 | 15 | # test_news = test_news_data.collection.find({'news_web_category': '社会'}).limit(1729) 16 | 17 | # 加载词汇表 18 | # vocabulary_file = 'vocabulary.word' 19 | # with open(vocabulary_file, 'rb') as f: 20 | # train_transformer = pickle.load(f) 21 | # # count_vector = CountVectorizer(stop_words=stop_words_list, vocabulary=train_transformer.vocabulary_) 22 | # 23 | # tf_transformer = TfidfVectorizer(smooth_idf=True, stop_words=stop_words_list, 24 | # vocabulary=train_transformer.vocabulary_) 25 | # tf_transformer._tfidf._idf_diag = sp.spdiags(train_transformer.idf_, diags=0, m=len(train_transformer.idf_), 26 | # n=len(train_transformer.idf_)) 27 | 28 | # 对数据做一些处理 29 | # test_data = [] 30 | # for item in test_news: 31 | # test_data.append(" ".join(jieba.cut(item['news_title']))) 32 | # data = tf_transformer.transform(test_data) 33 | 34 | 35 | # data = count_vector.transform(test_data) 36 | # print(type(data)) 37 | # print(data) 38 | 39 | # 使用加载生成的模型预测新样本 40 | # model_file = 'lr.model' 41 | # with open(model_file, 'rb') as f: 42 | # new_model = joblib.load(f) 43 | # predictions = new_model.predict(data) 44 | # category_list = predictions.tolist() 45 | # count = 0 46 | # news_category_list = [] 47 | # for item in category_list: 48 | # category = category_dict_reverse[item] 49 | # news_category_list.append(category) 50 | # if category == '社会': 51 | # count += 1 52 | # print('社会新闻预测的正确率', count / 1729) 53 | # print(news_category_list) 54 | 55 | 56 | # 新闻标题文本转文本矩阵 57 | def text2matrix(text_list, transfomer_path='transformer'): 58 | # 加载词汇表 59 | with open(transfomer_path, 'rb') as f: 60 | train_transformer = pickle.load(f) 61 | tf_transformer = TfidfVectorizer(smooth_idf=True, stop_words=stop_words_list, 62 | vocabulary=train_transformer.vocabulary_) 63 | tf_transformer._tfidf._idf_diag = sp.spdiags(train_transformer.idf_, diags=0, m=len(train_transformer.idf_), 64 | n=len(train_transformer.idf_)) 65 | text_data = [] 66 | for text in text_list: 67 | text_data.append(" ".join(jieba.cut(text))) 68 | return tf_transformer.transform(text_data) 69 | 70 | 71 | # 分类模型预测 72 | def classfiy_predict(model_path, data): 73 | with open(model_path, 'rb') as f: 74 | new_model = joblib.load(f) 75 | predictions = new_model.predict(data) 76 | category_list = predictions.tolist() 77 | news_category_list = [] 78 | for item in category_list: 79 | category = category_dict_reverse[item] 80 | news_category_list.append(category) 81 | return news_category_list 82 | 83 | 84 | if __name__ == '__main__': 85 | # 新闻单独测试 86 | # text = [ 87 | # '首战告负火箭此轮翻盘无望? 杰弗森:系列赛已经结束', 88 | # '约基奇已成系列赛最大无解杀器 享受MVP呼声他理所当然', 89 | # '贾乃亮金晨再次否认恋情,晒完手机壳再晒猫节奏都一致', 90 | # '余远帆被扔鸡蛋,于浩然被后援会坑,粉丝后援会有毒?', 91 | # '再曝“黑料”!波音向全世界隐瞒了737MAX这一“致命问题”', 92 | # '美舰开识别系统高调过台海 耍新花招刷存在感', 93 | # '赶紧自查手机!三大运营商给你发送的这条短信,背后没那么简单', 94 | # '新京报:药品价格放开后,“平价救命药”为何还短缺', 95 | # '人民日报连续刊登:微观察·习近平主席的这一周', 96 | # '16省份GDP增速跑赢全国6.4%增速 地方经济增速谁最快?', 97 | # '客流高峰男子欲在武汉站跳楼 铁警这招亮了' 98 | # ] 99 | # data = text2matrix(text) 100 | # news_category_list = classfiy_predict(model_path='mn' 101 | # 'b.model', data=data) 102 | # for i in range(len(text)): 103 | # print('新闻标题:' + text[i] + ',预测分类:' + news_category_list[i]) 104 | 105 | # 对数据库中没有进行机器分类的新闻进行测试 106 | test_news = NewsData(db='news', collection='test_news') 107 | # 查询所有 news_machine_category 不为空 108 | news_data = list(test_news.collection.find({"news_machine_category": {"$in": ['']}})) 109 | news_title_list = [] 110 | for item in news_data: 111 | news_title_list.append(item['news_title']) 112 | data = text2matrix(news_title_list) 113 | news_category_list = classfiy_predict(model_path='mnb.model', data=data) 114 | # 总数据量 115 | total_count = len(news_data) 116 | # 预测正确的数量 117 | predict_true_count = 0 118 | for i in range(len(news_category_list)): 119 | if news_category_list[i] == news_data[i]['news_web_category']: 120 | predict_true_count += 1 121 | print( 122 | '预测分类: ' + news_category_list[i] + ', 实际分类: ' + news_data[i]['news_web_category'] + ', ' + news_title_list[ 123 | i]) 124 | # 更新数据库的机器分类字段 125 | test_news.collection.update_many({'news_title': news_title_list[i]}, 126 | {'$set': {'news_machine_category': news_category_list[i]}}) 127 | print('预测正确率: ' + str(predict_true_count / total_count)) 128 | -------------------------------------------------------------------------------- /机器学习/news_train.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | 3 | import jieba 4 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis 5 | from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, VotingClassifier 6 | from sklearn.externals import joblib 7 | from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer 8 | from sklearn.linear_model import LogisticRegression 9 | from sklearn.metrics import accuracy_score, classification_report, confusion_matrix 10 | from sklearn.model_selection import KFold, cross_val_score, GridSearchCV 11 | from sklearn.naive_bayes import MultinomialNB 12 | from sklearn.neighbors import KNeighborsClassifier 13 | from sklearn.svm import SVC 14 | from sklearn.tree import DecisionTreeClassifier 15 | from sklearn.utils import Bunch 16 | from data import NewsData 17 | from common import get_stop_words_list, category_dict 18 | 19 | # 1、准备数据 20 | 21 | # 获取中文停用词数组 22 | stop_words_list = get_stop_words_list() 23 | 24 | # 新闻分类数组,作为 Bunch 的 target_names 25 | category_list = ['国内', '国际', '军事', '体育', '社会', '娱乐', '财经'] 26 | train_news_data = NewsData(db='news', collection='train_news') 27 | # test_news_data = NewsData(db='news', collection='test_news') 28 | 29 | # 训练新闻数组 30 | news_num_dict = { 31 | '国内': 10000, 32 | '国际': 10000, 33 | '军事': 10000, 34 | '体育': 10000, 35 | '社会': 10000, 36 | '娱乐': 10000, 37 | '财经': 10000, 38 | } 39 | train_news = train_news_data.get_train_news() 40 | 41 | # 构建新闻标题分词的数组和新闻对应的数字分类数组 42 | train_data = [] 43 | test_data = [] 44 | train_target = [] 45 | test_target = [] 46 | 47 | for item in train_news: 48 | train_data.append(" ".join(jieba.cut(item['news_title']))) 49 | train_target.append(category_dict[item['news_category']]) 50 | 51 | # 构建训练集数据bunch和测试集数据bunch 52 | train_bunch = Bunch(data=train_data, target=train_target, target_names=category_list, DESCR='新闻训练数据集') 53 | 54 | 55 | # 2、评估算法 56 | 57 | # 设置评估算法的基准 58 | num_folds = 10 59 | seed = 7 60 | scoring = 'accuracy' 61 | 62 | 63 | # 基本的分类算法模型 64 | # models = { 65 | # 'LR': LogisticRegression(penalty='l2', C=10, solver='sag', multi_class='auto', n_jobs=-1), # n_jobs=-1 66 | # 'SVM': SVC(), # 几次训练正确率都是 0,弃用此算法模型 67 | # 'CART': DecisionTreeClassifier(), 68 | # 'MNB': MultinomialNB(alpha=0.01), 69 | # 'KNN': KNeighborsClassifier(n_jobs=2), 70 | # } 71 | # 72 | # tf_transformer = TfidfVectorizer(smooth_idf=False, stop_words=stop_words_list) 73 | # X_train_counts_tf = tf_transformer.fit_transform(train_bunch.data) 74 | # 75 | # # 比较算法 76 | # results = [] 77 | # for key in models: 78 | # kfold = KFold(n_splits=num_folds, random_state=seed) 79 | # cv_results = cross_val_score(models[key], X_train_counts_tf, train_bunch.target, cv=kfold, scoring=scoring) 80 | # results.append(cv_results) 81 | # print('%s : %f (%f)' % (key, cv_results.mean(), cv_results.std())) 82 | 83 | # results = [] 84 | 85 | # 算法正确率 86 | # kfold = KFold(n_splits=num_folds, random_state=seed) 87 | # cart = DecisionTreeClassifier() 88 | # models = [] 89 | # model_logistic = LogisticRegression(penalty='l2', C=10, solver='sag', multi_class='auto', n_jobs=-1) 90 | # models.append(('logistic', model_logistic)) 91 | # model_cart = DecisionTreeClassifier() 92 | # models.append(('cart', model_logistic)) 93 | # model_svc = SVC() 94 | # ensemble_model = VotingClassifier(estimators=models) 95 | # cv_results = cross_val_score(ensemble_model, X_train_counts_tf, train_bunch.target, cv=kfold, scoring=scoring) 96 | # print(cv_results) 97 | 98 | # LR 算法调参 99 | # 每个分类 10000 数据:LR : 0.580571 (0.174557) 100 | # 每个分类 80000 数据:LR : 0.629282 (0.164184) 101 | # param_grid = {} 102 | # param_grid['C'] = [1, 5, 10, 15, 20, 30] 103 | # model = LogisticRegression(penalty='l2', solver='sag', multi_class='auto', n_jobs=-1) 104 | # kfold = KFold(n_splits=num_folds, random_state=seed) 105 | # grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold) 106 | # grid_result = grid.fit(X=X_train_counts_tf, y=train_bunch.target) 107 | # print('最优: %s 使用 %s' %(grid_result.best_score_, grid_result.best_params_)) 108 | 109 | 110 | # MNB 算法调参 111 | # 每个分类 50000 数据:MNB : 0.610083 (0.155375) 112 | # 每个分类 80000 数据:MNB : 0.619243 (0.164287) 113 | # 所有分类 150多万数据:MNB: 0.7617135523266223 114 | # param_grid = {} 115 | # param_grid['alpha'] = [0.001, 0.01, 0.1, 1.5] 116 | # model = MultinomialNB() 117 | # kfold = KFold(n_splits=num_folds, random_state=seed) 118 | # grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold, n_jobs=-1) 119 | # grid_result = grid.fit(X=X_train_counts_tf, y=train_bunch.target) 120 | # print('最优: %s 使用 %s' %(grid_result.best_score_, grid_result.best_params_)) 121 | 122 | # 123 | # model = LogisticRegression(penalty='l2', C=10, solver='sag', multi_class='auto', n_jobs=-1) 124 | # model.fit(X_train_counts_tf, train_bunch.target) 125 | 126 | 127 | # print(tf_transformer.vocabulary_) 128 | # 129 | # print(type(tf_transformer.idf_)) 130 | # X_test_counts = tf_transformer.transform(test_bunch.data) 131 | # print(type(X_test_counts)) 132 | # print(X_test_counts) 133 | # predictions = model.predict(X_test_counts) 134 | # # print(predictions[0]) 135 | # print(accuracy_score(test_bunch.target, predictions)) 136 | # print(confusion_matrix(test_bunch.target, predictions)) 137 | # print(classification_report(test_bunch.target, predictions)) 138 | 139 | # 保存transformer 140 | def save_transformer(bunch, vocabulary_path='transformer.word'): 141 | # 计算tf-idf值 142 | tf_transformer = TfidfVectorizer(smooth_idf=False, stop_words=stop_words_list) 143 | tf_transformer.fit_transform(train_bunch.data) 144 | with open(vocabulary_path, 'wb') as f: 145 | pickle.dump(tf_transformer, f) 146 | print('成功保存词汇表') 147 | 148 | 149 | # 保存tf-idf 150 | def save_tfidf(bunch, tfidf_path='transformer.tfidf'): 151 | # 计算tf-idf值 152 | tf_transformer = TfidfVectorizer(smooth_idf=False, stop_words=stop_words_list) 153 | X_train_counts_tf = tf_transformer.fit_transform(bunch.data) 154 | with open(tfidf_path, 'wb') as f: 155 | pickle.dump(X_train_counts_tf, f) 156 | print('成功保存TF-IDF值') 157 | 158 | 159 | # 保存transformer和TF-IDF值 160 | def save_transformer_tfidf(bunch, vocabulary_path='transformer.word', tfidf_path='transformer.tfidf'): 161 | tf_transformer = TfidfVectorizer(smooth_idf=False, stop_words=stop_words_list) 162 | X_train_counts_tf = tf_transformer.fit_transform(bunch.data) 163 | with open(vocabulary_path, 'wb') as f: 164 | pickle.dump(tf_transformer, f) 165 | print('成功保存词汇表') 166 | with open(tfidf_path, 'wb') as f: 167 | pickle.dump(X_train_counts_tf, f) 168 | print('成功保存TF-IDF值') 169 | 170 | 171 | # 分类算法生成模型 172 | def classify_model(bunch, algorithm_model, tfidf_path, model_path): 173 | # 计算tf-idf值 174 | # tf_transformer = TfidfVectorizer(smooth_idf=False, stop_words=stop_words_list) 175 | # X_train_counts_tf = tf_transformer.fit_transform(train_bunch.data) 176 | # 读取TF-IDF值 177 | with open(tfidf_path, 'rb') as f: 178 | X_train_counts_tf = pickle.load(f) 179 | # 算法模型拟合 180 | print('算法模型拟合中...') 181 | algorithm_model.fit(X_train_counts_tf, bunch.target) 182 | # 保存模型信息 183 | with open(model_path, 'wb') as f: 184 | joblib.dump(algorithm_model, f, compress=1) 185 | print('成功生成' + model_path + '模型') 186 | 187 | 188 | if __name__ == '__main__': 189 | # 生成TF-IDF值,只有当本地没有tfidf和有新数据时才执行该语句 190 | # save_transformer_tfidf(bunch=train_bunch) 191 | # 生成LR分类算法模型 192 | # model = LogisticRegression(penalty='l2', C=10, solver='sag', multi_class='auto', n_jobs=-1) 193 | # classify_model(train_bunch, model, 'transformer.tfidf', 'lr.model') 194 | # 生成MNB分类算法模型 195 | model = MultinomialNB(alpha=0.01) 196 | classify_model(train_bunch, model, 'transformer.tfidf', 'mnb.model') 197 | -------------------------------------------------------------------------------- /机器学习/stopwords.txt: -------------------------------------------------------------------------------- 1 | $ 2 | 0 3 | 1 4 | 2 5 | 3 6 | 4 7 | 5 8 | 6 9 | 7 10 | 8 11 | 9 12 | ? 13 | _ 14 | “ 15 | ” 16 | 、 17 | 。 18 | 《 19 | 》 20 | 一 21 | 一些 22 | 一何 23 | 一切 24 | 一则 25 | 一方面 26 | 一旦 27 | 一来 28 | 一样 29 | 一般 30 | 一转眼 31 | 万一 32 | 上 33 | 上下 34 | 下 35 | 不 36 | 不仅 37 | 不但 38 | 不光 39 | 不单 40 | 不只 41 | 不外乎 42 | 不如 43 | 不妨 44 | 不尽 45 | 不尽然 46 | 不得 47 | 不怕 48 | 不惟 49 | 不成 50 | 不拘 51 | 不料 52 | 不是 53 | 不比 54 | 不然 55 | 不特 56 | 不独 57 | 不管 58 | 不至于 59 | 不若 60 | 不论 61 | 不过 62 | 不问 63 | 与 64 | 与其 65 | 与其说 66 | 与否 67 | 与此同时 68 | 且 69 | 且不说 70 | 且说 71 | 两者 72 | 个 73 | 个别 74 | 临 75 | 为 76 | 为了 77 | 为什么 78 | 为何 79 | 为止 80 | 为此 81 | 为着 82 | 乃 83 | 乃至 84 | 乃至于 85 | 么 86 | 之 87 | 之一 88 | 之所以 89 | 之类 90 | 乌乎 91 | 乎 92 | 乘 93 | 也 94 | 也好 95 | 也罢 96 | 了 97 | 二来 98 | 于 99 | 于是 100 | 于是乎 101 | 云云 102 | 云尔 103 | 些 104 | 亦 105 | 人 106 | 人们 107 | 人家 108 | 什么 109 | 什么样 110 | 今 111 | 介于 112 | 仍 113 | 仍旧 114 | 从 115 | 从此 116 | 从而 117 | 他 118 | 他人 119 | 他们 120 | 以 121 | 以上 122 | 以为 123 | 以便 124 | 以免 125 | 以及 126 | 以故 127 | 以期 128 | 以来 129 | 以至 130 | 以至于 131 | 以致 132 | 们 133 | 任 134 | 任何 135 | 任凭 136 | 似的 137 | 但 138 | 但凡 139 | 但是 140 | 何 141 | 何以 142 | 何况 143 | 何处 144 | 何时 145 | 余外 146 | 作为 147 | 你 148 | 你们 149 | 使 150 | 使得 151 | 例如 152 | 依 153 | 依据 154 | 依照 155 | 便于 156 | 俺 157 | 俺们 158 | 倘 159 | 倘使 160 | 倘或 161 | 倘然 162 | 倘若 163 | 借 164 | 假使 165 | 假如 166 | 假若 167 | 傥然 168 | 像 169 | 儿 170 | 先不先 171 | 光是 172 | 全体 173 | 全部 174 | 兮 175 | 关于 176 | 其 177 | 其一 178 | 其中 179 | 其二 180 | 其他 181 | 其余 182 | 其它 183 | 其次 184 | 具体地说 185 | 具体说来 186 | 兼之 187 | 内 188 | 再 189 | 再其次 190 | 再则 191 | 再有 192 | 再者 193 | 再者说 194 | 再说 195 | 冒 196 | 冲 197 | 况且 198 | 几 199 | 几时 200 | 凡 201 | 凡是 202 | 凭 203 | 凭借 204 | 出于 205 | 出来 206 | 分别 207 | 则 208 | 则甚 209 | 别 210 | 别人 211 | 别处 212 | 别是 213 | 别的 214 | 别管 215 | 别说 216 | 到 217 | 前后 218 | 前此 219 | 前者 220 | 加之 221 | 加以 222 | 即 223 | 即令 224 | 即使 225 | 即便 226 | 即如 227 | 即或 228 | 即若 229 | 却 230 | 去 231 | 又 232 | 又及 233 | 及 234 | 及其 235 | 及至 236 | 反之 237 | 反而 238 | 反过来 239 | 反过来说 240 | 受到 241 | 另 242 | 另一方面 243 | 另外 244 | 另悉 245 | 只 246 | 只当 247 | 只怕 248 | 只是 249 | 只有 250 | 只消 251 | 只要 252 | 只限 253 | 叫 254 | 叮咚 255 | 可 256 | 可以 257 | 可是 258 | 可见 259 | 各 260 | 各个 261 | 各位 262 | 各种 263 | 各自 264 | 同 265 | 同时 266 | 后 267 | 后者 268 | 向 269 | 向使 270 | 向着 271 | 吓 272 | 吗 273 | 否则 274 | 吧 275 | 吧哒 276 | 吱 277 | 呀 278 | 呃 279 | 呕 280 | 呗 281 | 呜 282 | 呜呼 283 | 呢 284 | 呵 285 | 呵呵 286 | 呸 287 | 呼哧 288 | 咋 289 | 和 290 | 咚 291 | 咦 292 | 咧 293 | 咱 294 | 咱们 295 | 咳 296 | 哇 297 | 哈 298 | 哈哈 299 | 哉 300 | 哎 301 | 哎呀 302 | 哎哟 303 | 哗 304 | 哟 305 | 哦 306 | 哩 307 | 哪 308 | 哪个 309 | 哪些 310 | 哪儿 311 | 哪天 312 | 哪年 313 | 哪怕 314 | 哪样 315 | 哪边 316 | 哪里 317 | 哼 318 | 哼唷 319 | 唉 320 | 唯有 321 | 啊 322 | 啐 323 | 啥 324 | 啦 325 | 啪达 326 | 啷当 327 | 喂 328 | 喏 329 | 喔唷 330 | 喽 331 | 嗡 332 | 嗡嗡 333 | 嗬 334 | 嗯 335 | 嗳 336 | 嘎 337 | 嘎登 338 | 嘘 339 | 嘛 340 | 嘻 341 | 嘿 342 | 嘿嘿 343 | 因 344 | 因为 345 | 因了 346 | 因此 347 | 因着 348 | 因而 349 | 固然 350 | 在 351 | 在下 352 | 在于 353 | 地 354 | 基于 355 | 处在 356 | 多 357 | 多么 358 | 多少 359 | 大 360 | 大家 361 | 她 362 | 她们 363 | 好 364 | 如 365 | 如上 366 | 如上所述 367 | 如下 368 | 如何 369 | 如其 370 | 如同 371 | 如是 372 | 如果 373 | 如此 374 | 如若 375 | 始而 376 | 孰料 377 | 孰知 378 | 宁 379 | 宁可 380 | 宁愿 381 | 宁肯 382 | 它 383 | 它们 384 | 对 385 | 对于 386 | 对待 387 | 对方 388 | 对比 389 | 将 390 | 小 391 | 尔 392 | 尔后 393 | 尔尔 394 | 尚且 395 | 就 396 | 就是 397 | 就是了 398 | 就是说 399 | 就算 400 | 就要 401 | 尽 402 | 尽管 403 | 尽管如此 404 | 岂但 405 | 己 406 | 已 407 | 已矣 408 | 巴 409 | 巴巴 410 | 并 411 | 并且 412 | 并非 413 | 庶乎 414 | 庶几 415 | 开外 416 | 开始 417 | 归 418 | 归齐 419 | 当 420 | 当地 421 | 当然 422 | 当着 423 | 彼 424 | 彼时 425 | 彼此 426 | 往 427 | 待 428 | 很 429 | 得 430 | 得了 431 | 怎 432 | 怎么 433 | 怎么办 434 | 怎么样 435 | 怎奈 436 | 怎样 437 | 总之 438 | 总的来看 439 | 总的来说 440 | 总的说来 441 | 总而言之 442 | 恰恰相反 443 | 您 444 | 惟其 445 | 慢说 446 | 我 447 | 我们 448 | 或 449 | 或则 450 | 或是 451 | 或曰 452 | 或者 453 | 截至 454 | 所 455 | 所以 456 | 所在 457 | 所幸 458 | 所有 459 | 才 460 | 才能 461 | 打 462 | 打从 463 | 把 464 | 抑或 465 | 拿 466 | 按 467 | 按照 468 | 换句话说 469 | 换言之 470 | 据 471 | 据此 472 | 接着 473 | 故 474 | 故此 475 | 故而 476 | 旁人 477 | 无 478 | 无宁 479 | 无论 480 | 既 481 | 既往 482 | 既是 483 | 既然 484 | 时候 485 | 是 486 | 是以 487 | 是的 488 | 曾 489 | 替 490 | 替代 491 | 最 492 | 有 493 | 有些 494 | 有关 495 | 有及 496 | 有时 497 | 有的 498 | 望 499 | 朝 500 | 朝着 501 | 本 502 | 本人 503 | 本地 504 | 本着 505 | 本身 506 | 来 507 | 来着 508 | 来自 509 | 来说 510 | 极了 511 | 果然 512 | 果真 513 | 某 514 | 某个 515 | 某些 516 | 某某 517 | 根据 518 | 欤 519 | 正值 520 | 正如 521 | 正巧 522 | 正是 523 | 此 524 | 此地 525 | 此处 526 | 此外 527 | 此时 528 | 此次 529 | 此间 530 | 毋宁 531 | 每 532 | 每当 533 | 比 534 | 比及 535 | 比如 536 | 比方 537 | 没奈何 538 | 沿 539 | 沿着 540 | 漫说 541 | 焉 542 | 然则 543 | 然后 544 | 然而 545 | 照 546 | 照着 547 | 犹且 548 | 犹自 549 | 甚且 550 | 甚么 551 | 甚或 552 | 甚而 553 | 甚至 554 | 甚至于 555 | 用 556 | 用来 557 | 由 558 | 由于 559 | 由是 560 | 由此 561 | 由此可见 562 | 的 563 | 的确 564 | 的话 565 | 直到 566 | 相对而言 567 | 省得 568 | 看 569 | 眨眼 570 | 着 571 | 着呢 572 | 矣 573 | 矣乎 574 | 矣哉 575 | 离 576 | 竟而 577 | 第 578 | 等 579 | 等到 580 | 等等 581 | 简言之 582 | 管 583 | 类如 584 | 紧接着 585 | 纵 586 | 纵令 587 | 纵使 588 | 纵然 589 | 经 590 | 经过 591 | 结果 592 | 给 593 | 继之 594 | 继后 595 | 继而 596 | 综上所述 597 | 罢了 598 | 者 599 | 而 600 | 而且 601 | 而况 602 | 而后 603 | 而外 604 | 而已 605 | 而是 606 | 而言 607 | 能 608 | 能否 609 | 腾 610 | 自 611 | 自个儿 612 | 自从 613 | 自各儿 614 | 自后 615 | 自家 616 | 自己 617 | 自打 618 | 自身 619 | 至 620 | 至于 621 | 至今 622 | 至若 623 | 致 624 | 般的 625 | 若 626 | 若夫 627 | 若是 628 | 若果 629 | 若非 630 | 莫不然 631 | 莫如 632 | 莫若 633 | 虽 634 | 虽则 635 | 虽然 636 | 虽说 637 | 被 638 | 要 639 | 要不 640 | 要不是 641 | 要不然 642 | 要么 643 | 要是 644 | 譬喻 645 | 譬如 646 | 让 647 | 许多 648 | 论 649 | 设使 650 | 设或 651 | 设若 652 | 诚如 653 | 诚然 654 | 该 655 | 说来 656 | 诸 657 | 诸位 658 | 诸如 659 | 谁 660 | 谁人 661 | 谁料 662 | 谁知 663 | 贼死 664 | 赖以 665 | 赶 666 | 起 667 | 起见 668 | 趁 669 | 趁着 670 | 越是 671 | 距 672 | 跟 673 | 较 674 | 较之 675 | 边 676 | 过 677 | 还 678 | 还是 679 | 还有 680 | 还要 681 | 这 682 | 这一来 683 | 这个 684 | 这么 685 | 这么些 686 | 这么样 687 | 这么点儿 688 | 这些 689 | 这会儿 690 | 这儿 691 | 这就是说 692 | 这时 693 | 这样 694 | 这次 695 | 这般 696 | 这边 697 | 这里 698 | 进而 699 | 连 700 | 连同 701 | 逐步 702 | 通过 703 | 遵循 704 | 遵照 705 | 那 706 | 那个 707 | 那么 708 | 那么些 709 | 那么样 710 | 那些 711 | 那会儿 712 | 那儿 713 | 那时 714 | 那样 715 | 那般 716 | 那边 717 | 那里 718 | 都 719 | 鄙人 720 | 鉴于 721 | 针对 722 | 阿 723 | 除 724 | 除了 725 | 除外 726 | 除开 727 | 除此之外 728 | 除非 729 | 随 730 | 随后 731 | 随时 732 | 随着 733 | 难道说 734 | 非但 735 | 非徒 736 | 非特 737 | 非独 738 | 靠 739 | 顺 740 | 顺着 741 | 首先 742 | ! 743 | , 744 | : 745 | ; 746 | ? -------------------------------------------------------------------------------- /机器学习/transformer: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/机器学习/transformer -------------------------------------------------------------------------------- /爬虫/README.md: -------------------------------------------------------------------------------- 1 | # 数据爬取 2 | 3 | 训练新闻分类数据的爬取直接使用 requests 库爬取,BeautifulSoup 解析,存储到 MongoDB 的 news 库中的 train_news 表中。 4 | 5 | 测试新闻数据的爬取使用 Scrapy 框架爬取,存储到 MongoDB 的 news 库中的 test_news 表中。 6 | 7 | ## 1、训练新闻分类数据的爬取 8 | 9 | **1.1 目标网站** 10 | 11 | 中国新闻网(静态页面) 12 | 域名:http://www.chinanews.com 13 | 14 | 爬取规则: 15 | 16 | 滚动新闻:http://域名/scroll-news/年/月日/news.shtml ,例如:http://www.chinanews.com/scroll-news/2018/1207/news.shtml 17 | 18 | 某个分类下的新闻:http://域名/scroll-news/分类对应的网站文件夹/年/月日/news.shtml,例如:http://www.chinanews.com/scroll-news/mil/2017/1205/news.shtml 为体育分类下的新闻 19 | 20 | > 注:分类对应的网站文件夹请以实际名称为准。 21 | 22 | **1.2 存储字段** 23 | 24 | - 新闻标题 25 | - 新闻分类 26 | 27 | **1.3 数据统计** 28 | 29 | 爬取了 2019.03.06-2012.12.07 的数据,共 3035803 条数据,每个分类下的新闻数量如下: 30 | 31 | 查询要用到的SQL语句:`db.train_news.aggregate([{$group: {_id: "$news_category", "新闻数量": {$sum: 1}}}, {$sort: {"新闻数量":-1}}])`,分组查询,并按照新闻数量倒序排序。 32 | 33 | **1.4 数据处理** 34 | 35 | 最终新闻分为七大类: 36 | 37 | - 国内 38 | - 国际 39 | - 军事 40 | - 体育 41 | - 社会 42 | - 娱乐 43 | - 财经 44 | 45 | 数据处理后各个分类下的新闻数量如下: 46 | 47 | 48 | 49 | ## 2、详细新闻数据的爬取 50 | 51 | **2.1 目标网站** 52 | 53 | 中国新闻网 54 | 55 | **2.2 存储字段** 56 | 57 | - 新闻标题 58 | - 新闻链接 59 | - 新闻日期时间 60 | - 新闻来源 61 | - 网站上对新闻的分类 62 | - 机器对新闻的分类 63 | 64 | > 注意:数据库中的字段还要加上**机器对新闻的分类**这个字段,后面通过机器学习训练好的分类模型来填充该字段。 65 | -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/main.py: -------------------------------------------------------------------------------- 1 | # Scrapy 爬虫执行脚本 2 | from scrapy import cmdline 3 | 4 | cmdline.execute('scrapy crawl wangyi'.split()) 5 | cmdline.execute('scrapy crawl chinanews'.split()) -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/__init__.py -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/items.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define here the models for your scraped items 4 | # 5 | # See documentation in: 6 | # https://doc.scrapy.org/en/latest/topics/items.html 7 | 8 | import scrapy 9 | from scrapy import Field 10 | 11 | 12 | class NewsSpiderItem(scrapy.Item): 13 | # define the fields for your item here like: 14 | # name = scrapy.Field() 15 | pass 16 | 17 | # 中国新闻网新闻实体类 18 | class ChinaNewsItem(scrapy.Item): 19 | news_title = Field() # 新闻标题 20 | news_url = Field() # 新闻链接 21 | news_datetime = Field() # 新闻日期时间 22 | news_source = Field() # 新闻来源 23 | news_web_category = Field() # 网站上新闻的分类 24 | news_machine_category = Field() # 机器对新闻的分类 25 | -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/middlewares.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define here the models for your spider middleware 4 | # 5 | # See documentation in: 6 | # https://doc.scrapy.org/en/latest/topics/spider-middleware.html 7 | 8 | from scrapy import signals 9 | import scrapy 10 | from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware 11 | import random 12 | 13 | 14 | # 随机UserAgent中间件 15 | class MyUserAgentMiddleware(UserAgentMiddleware): 16 | 17 | def __init__(self, user_agent): 18 | self.user_agent = user_agent 19 | 20 | @classmethod 21 | def from_crawler(cls, crawler): 22 | return cls( 23 | user_agent=crawler.settings.get('MY_USER_AGENT') 24 | ) 25 | 26 | def process_request(self, request, spider): 27 | agent = random.choice(self.user_agent) 28 | request.headers['User-Agent'] = agent 29 | 30 | # 代理IP中间件 31 | class MyProxyMiddleware(object): 32 | def __init__(self, ip): 33 | self.ip = ip 34 | 35 | @classmethod 36 | def from_crawler(cls, crawler): 37 | return cls( 38 | ip=crawler.settings.get('PROXIES') 39 | ) 40 | 41 | def process_request(self, request, spider): 42 | ip = random.choice(self.ip) 43 | print('当前使用的代理IP为', ip) 44 | request.meta['proxy'] = ip 45 | 46 | 47 | class NewsSpiderSpiderMiddleware(object): 48 | # Not all methods need to be defined. If a method is not defined, 49 | # scrapy acts as if the spider middleware does not modify the 50 | # passed objects. 51 | 52 | @classmethod 53 | def from_crawler(cls, crawler): 54 | # This method is used by Scrapy to create your spiders. 55 | s = cls() 56 | crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) 57 | return s 58 | 59 | def process_spider_input(self, response, spider): 60 | # Called for each response that goes through the spider 61 | # middleware and into the spider. 62 | 63 | # Should return None or raise an exception. 64 | return None 65 | 66 | def process_spider_output(self, response, result, spider): 67 | # Called with the results returned from the Spider, after 68 | # it has processed the response. 69 | 70 | # Must return an iterable of Request, dict or Item objects. 71 | for i in result: 72 | yield i 73 | 74 | def process_spider_exception(self, response, exception, spider): 75 | # Called when a spider or process_spider_input() method 76 | # (from other spider middleware) raises an exception. 77 | 78 | # Should return either None or an iterable of Response, dict 79 | # or Item objects. 80 | pass 81 | 82 | def process_start_requests(self, start_requests, spider): 83 | # Called with the start requests of the spider, and works 84 | # similarly to the process_spider_output() method, except 85 | # that it doesn’t have a response associated. 86 | 87 | # Must return only requests (not items). 88 | for r in start_requests: 89 | yield r 90 | 91 | def spider_opened(self, spider): 92 | spider.logger.info('Spider opened: %s' % spider.name) 93 | 94 | 95 | class NewsSpiderDownloaderMiddleware(object): 96 | # Not all methods need to be defined. If a method is not defined, 97 | # scrapy acts as if the downloader middleware does not modify the 98 | # passed objects. 99 | 100 | @classmethod 101 | def from_crawler(cls, crawler): 102 | # This method is used by Scrapy to create your spiders. 103 | s = cls() 104 | crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) 105 | return s 106 | 107 | def process_request(self, request, spider): 108 | # Called for each request that goes through the downloader 109 | # middleware. 110 | 111 | # Must either: 112 | # - return None: continue processing this request 113 | # - or return a Response object 114 | # - or return a Request object 115 | # - or raise IgnoreRequest: process_exception() methods of 116 | # installed downloader middleware will be called 117 | return None 118 | 119 | def process_response(self, request, response, spider): 120 | # Called with the response returned from the downloader. 121 | 122 | # Must either; 123 | # - return a Response object 124 | # - return a Request object 125 | # - or raise IgnoreRequest 126 | return response 127 | 128 | def process_exception(self, request, exception, spider): 129 | # Called when a download handler or a process_request() 130 | # (from other downloader middleware) raises an exception. 131 | 132 | # Must either: 133 | # - return None: continue processing this exception 134 | # - return a Response object: stops process_exception() chain 135 | # - return a Request object: stops process_exception() chain 136 | pass 137 | 138 | def spider_opened(self, spider): 139 | spider.logger.info('Spider opened: %s' % spider.name) 140 | -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/pipelines.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define your item pipelines here 4 | # 5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting 6 | # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html 7 | 8 | from pymongo import MongoClient 9 | from scrapy import Item 10 | 11 | 12 | class NewsSpiderPipeline(object): 13 | def process_item(self, item, spider): 14 | return item 15 | 16 | 17 | class MongoDBPipeline(object): 18 | # 爬取之前调用,建立和MongoDB数据库的连接,方便后面使用 19 | def open_spider(self, spider): 20 | db_url = spider.settings.get('MONGODB_URI', 'mongodb:localhost:27017') 21 | db_name = spider.settings.get('MONGODB_DB_NAME', 'news') 22 | self.db_client = MongoClient(host='localhost:27017', username="weizhiwen", password="123456") 23 | self.db = self.db_client[db_name] 24 | 25 | # 爬取完全部数据之后调用,关闭与数据库的连接 26 | def close_spider(self, spider): 27 | self.db_client.close() 28 | 29 | # 处理爬取的每一项数据 30 | def process_item(self, item, spider): 31 | self.insert_db(item) 32 | return item 33 | 34 | # 具体的插入方法 35 | def insert_db(self, item): 36 | if isinstance(item, Item): 37 | item = dict(item) 38 | self.db.test_news.insert_one(item) 39 | -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/settings.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Scrapy settings for news_spider project 4 | # 5 | # For simplicity, this file contains only settings considered important or 6 | # commonly used. You can find more settings consulting the documentation: 7 | # 8 | # https://doc.scrapy.org/en/latest/topics/settings.html 9 | # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html 10 | # https://doc.scrapy.org/en/latest/topics/spider-middleware.html 11 | 12 | BOT_NAME = 'news_spider' 13 | 14 | SPIDER_MODULES = ['news_spider.spiders'] 15 | NEWSPIDER_MODULE = 'news_spider.spiders' 16 | 17 | # Spalsh 服务器地址 18 | SPLASH_URL = 'http://115.159.188.47:8050' 19 | 20 | # MongoDB 地址和数据库名称 21 | MONGODB_URI = 'mongodb:localhost:27017' 22 | MONGODB_DB_NAME = 'news' 23 | 24 | # 设置编码 25 | FEED_EXPORT_ENCODING = 'utf-8' 26 | 27 | # Crawl responsibly by identifying yourself (and your website) on the user-agent 28 | # USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36' 29 | 30 | # Obey robots.txt rules,这里选择不遵守 31 | ROBOTSTXT_OBEY = False 32 | 33 | # Configure maximum concurrent requests performed by Scrapy (default: 16) 34 | # CONCURRENT_REQUESTS = 32 35 | 36 | # Configure a delay for requests for the same website (default: 0) 37 | # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay 38 | # See also autothrottle settings and docs 39 | DOWNLOAD_DELAY = 3 40 | # The download delay setting will honor only one of: 41 | # CONCURRENT_REQUESTS_PER_DOMAIN = 16 42 | # CONCURRENT_REQUESTS_PER_IP = 16 43 | 44 | # Disable cookies (enabled by default) 禁用Cookie来防止目标网站发现爬虫 45 | COOKIES_ENABLED = False 46 | 47 | # Disable Telnet Console (enabled by default) 48 | # TELNETCONSOLE_ENABLED = False 49 | 50 | # Override the default request headers: 51 | # DEFAULT_REQUEST_HEADERS = { 52 | # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3', 53 | # 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7', 54 | # } 55 | 56 | # 设置去重过滤器 57 | DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter' 58 | 59 | # Enable or disable spider middlewares 60 | # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html 61 | # 用来支持 cache_args(可选) 62 | SPIDER_MIDDLEWARES = { 63 | 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100, 64 | } 65 | 66 | # Enable or disable downloader middlewares 67 | # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html 68 | # 开启Splash的两个下载中间件, 69 | DOWNLOADER_MIDDLEWARES = { 70 | 'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None, 71 | # 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware':123, 72 | 'news_spider.middlewares.MyUserAgentMiddleware': 400, 73 | # 'news_spider.middlewares.MyProxyMiddleware': 543, 74 | 'scrapy_splash.SplashCookiesMiddleware': 723, 75 | 'scrapy_splash.SplashMiddleware': 725, 76 | 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810, 77 | } 78 | 79 | # Enable or disable extensions 80 | # See https://doc.scrapy.org/en/latest/topics/extensions.html 81 | # EXTENSIONS = { 82 | # 'scrapy.extensions.telnet.TelnetConsole': None, 83 | # } 84 | 85 | # Configure item pipelines 86 | # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html 87 | ITEM_PIPELINES = { 88 | 'news_spider.pipelines.MongoDBPipeline': 403, 89 | } 90 | 91 | # Enable and configure the AutoThrottle extension (disabled by default) 92 | # See https://doc.scrapy.org/en/latest/topics/autothrottle.html 93 | # AUTOTHROTTLE_ENABLED = True 94 | # The initial download delay 95 | # AUTOTHROTTLE_START_DELAY = 5 96 | # The maximum download delay to be set in case of high latencies 97 | # AUTOTHROTTLE_MAX_DELAY = 60 98 | # The average number of requests Scrapy should be sending in parallel to 99 | # each remote server 100 | # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 101 | # Enable showing throttling stats for every response received: 102 | # AUTOTHROTTLE_DEBUG = False 103 | 104 | # Enable and configure HTTP caching (disabled by default) 105 | # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings 106 | # HTTPCACHE_ENABLED = True 107 | # HTTPCACHE_EXPIRATION_SECS = 0 108 | # HTTPCACHE_DIR = 'httpcache' 109 | # HTTPCACHE_IGNORE_HTTP_CODES = [] 110 | # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' 111 | 112 | # User-Agent 数组 113 | MY_USER_AGENT = [ 114 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36", 115 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36", 116 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36", 117 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36", 118 | "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36", 119 | "Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36", 120 | "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36", 121 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36", 122 | "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36", 123 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36", 124 | "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36", 125 | "Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36", 126 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36", 127 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36", 128 | "Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36", 129 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36", 130 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36", 131 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2309.372 Safari/537.36", 132 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36", 133 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36", 134 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1866.237 Safari/537.36", 135 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F", 136 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36 Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10", 137 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.517 Safari/537.36", 138 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36", 139 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36", 140 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36", 141 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36", 142 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36", 143 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36", 144 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36", 145 | "Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36", 146 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.2 Safari/537.36", 147 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36", 148 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36", 149 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36", 150 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1500.55 Safari/537.36", 151 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 152 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 153 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 154 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 155 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 156 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 157 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.90 Safari/537.36", 158 | "Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36", 159 | "Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36", 160 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.60 Safari/537.17", 161 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17", 162 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.15 (KHTML, like Gecko) Chrome/24.0.1295.0 Safari/537.15", 163 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/24.0.1292.0 Safari/537.14", 164 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1", 165 | "Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0", 166 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0", 167 | "Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0", 168 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0", 169 | "Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0", 170 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0", 171 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/29.0", 172 | "Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0", 173 | "Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0", 174 | "Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3", 175 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:27.0) Gecko/20121011 Firefox/27.0", 176 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0", 177 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0", 178 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0", 179 | "Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0", 180 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0", 181 | "Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/23.0", 182 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0", 183 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:23.0) Gecko/20131011 Firefox/23.0", 184 | "Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/22.0", 185 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:22.0) Gecko/20130328 Firefox/22.0", 186 | "Mozilla/5.0 (Windows NT 6.1; rv:22.0) Gecko/20130405 Firefox/22.0", 187 | "Mozilla/5.0 (Microsoft Windows NT 6.2.9200.0); rv:22.0) Gecko/20130405 Firefox/22.0", 188 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1", 189 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1", 190 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:21.0.0) Gecko/20121011 Firefox/21.0.0", 191 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20130331 Firefox/21.0", 192 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20100101 Firefox/21.0", 193 | "Mozilla/5.0 (X11; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0", 194 | "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20130514 Firefox/21.0", 195 | "Mozilla/5.0 (Windows NT 6.2; rv:21.0) Gecko/20130326 Firefox/21.0", 196 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130401 Firefox/21.0", 197 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130331 Firefox/21.0", 198 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130330 Firefox/21.0", 199 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0", 200 | "Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130401 Firefox/21.0", 201 | "Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130328 Firefox/21.0", 202 | "Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20100101 Firefox/21.0", 203 | "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130401 Firefox/21.0", 204 | "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130331 Firefox/21.0", 205 | "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20100101 Firefox/21.0", 206 | "Mozilla/5.0 (Windows NT 5.0; rv:21.0) Gecko/20100101 Firefox/21.0", 207 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0", 208 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64;) Gecko/20100101 Firefox/20.0", 209 | "Mozilla/5.0 (Windows x86; rv:19.0) Gecko/20100101 Firefox/19.0", 210 | "Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/19.0", 211 | "Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/18.0.1", 212 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0", 213 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0.6", 214 | "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko", 215 | "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko", 216 | "Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0", 217 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)", 218 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)", 219 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)", 220 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)", 221 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)", 222 | "Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)", 223 | "Mozilla/4.0 (Compatible; MSIE 8.0; Windows NT 5.2; Trident/6.0)", 224 | "Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)", 225 | "Mozilla/1.22 (compatible; MSIE 10.0; Windows 3.1)", 226 | "Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))", 227 | "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)", 228 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)", 229 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7)", 230 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7", 231 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; InfoPath.3; MS-RTC LM 8; .NET4.0C; .NET4.0E)", 232 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; chromeframe/12.0.742.112)", 233 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)", 234 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)", 235 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; Tablet PC 2.0; InfoPath.3; .NET4.0C; .NET4.0E)", 236 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0", 237 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; yie8)", 238 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET CLR 1.1.4322; .NET4.0C; Tablet PC 2.0)", 239 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; FunWebProducts)", 240 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/13.0.782.215)", 241 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/11.0.696.57)", 242 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205", 243 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.1; SV1; .NET CLR 2.8.52393; WOW64; en-US)", 244 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; chromeframe/11.0.696.57)", 245 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/4.0; GTB7.4; InfoPath.3; SV1; .NET CLR 3.1.76908; WOW64; en-US)", 246 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.2; SV1; .NET CLR 3.3.69573; WOW64; en-US)", 247 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)", 248 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.8.36217; WOW64; en-US)", 249 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; .NET CLR 2.7.58687; SLCC2; Media Center PC 5.0; Zune 3.4; Tablet PC 3.6; InfoPath.3)", 250 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.2; Trident/4.0; Media Center PC 4.0; SLCC1; .NET CLR 3.0.04320)", 251 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322)", 252 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727)", 253 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", 254 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; SLCC1; .NET CLR 1.1.4322)", 255 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 3.0.04506.30)", 256 | "Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.0; Trident/4.0; FBSMTWB; .NET CLR 2.0.34861; .NET CLR 3.0.3746.3218; .NET CLR 3.5.33652; msn OptimizedIE8;ENUS)", 257 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.2; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)", 258 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8)", 259 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8", 260 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; Media Center PC 6.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C)", 261 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.3; .NET4.0C; .NET4.0E; .NET CLR 3.5.30729; .NET CLR 3.0.30729; MS-RTC LM 8)", 262 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)", 263 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 3.0)" 264 | ] 265 | 266 | # 代理IP数组 267 | PROXIES = [ 268 | 'http://112.91.218.21:9000', 269 | 'http://59.34.2.92:3128', 270 | 'http://58.243.50.184:53281', 271 | 'http://222.217.68.51:54355' 272 | ] 273 | -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/README.md: -------------------------------------------------------------------------------- 1 | 新闻正文内容转换规则: 2 | 3 | 1、文本内容替换成 text 标签包裹 4 | 5 | 2、去除无用的标签,如 script 标签 -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__init__.py: -------------------------------------------------------------------------------- 1 | # This package will contain the spiders of your Scrapy project 2 | # 3 | # Please refer to the documentation for information on how to create and manage 4 | # your spiders. 5 | -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/chinanews.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/chinanews.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/common.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/common.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/proxy.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/proxy.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/sina.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/sina.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/sohu.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/sohu.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/tencent.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/tencent.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/wangyi.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/wangyi.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/wangyi_test.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/__pycache__/wangyi_test.cpython-37.pyc -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/chinanews.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from datetime import timedelta, datetime, date 3 | import scrapy 4 | from scrapy import Request 5 | from ..items import ChinaNewsItem 6 | 7 | # 要爬取的类别 8 | crawl_category = ['国内', '国际', '军事', '体育', '社会', '娱乐', '财经'] 9 | 10 | 11 | # 格式化日期函数 12 | def get_format_news_date(date=date.today()): 13 | start_date = datetime(date.year, date.month, date.day) 14 | # 返回格式化好的字符串 15 | return (start_date).strftime("%Y/%m%d") 16 | 17 | 18 | # 中国新闻网爬取指定日期的新闻 19 | class ChinanewsSpider(scrapy.Spider): 20 | name = 'chinanews' 21 | allowed_domains = ['www.chinanews.com'] 22 | # 开始爬取的日期 23 | special_date = date.today() 24 | # 要爬取的天数 25 | special_days = 10 26 | # 直接构建出第一天的url 27 | start_urls = ['http://www.chinanews.com/scroll-news/{}/news.shtml'.format(get_format_news_date(special_date))] 28 | 29 | # 新闻列表解析方法 30 | def parse(self, response): 31 | for sel in response.css('div.content_list li'): 32 | category = sel.css('div.dd_lm>a::text').extract_first() 33 | # 如果该新闻类别在要爬取的类别名单内 34 | if category in crawl_category: 35 | china_news_item = ChinaNewsItem() 36 | china_news_item['news_title'] = sel.css('div.dd_bt>a::text').extract_first() 37 | china_news_item['news_url'] = 'http://www.chinanews.com' + sel.css('div.dd_bt>a::attr(href)').extract_first() 38 | china_news_item['news_datetime'] = str(ChinanewsSpider.special_date) + ' ' + sel.css('div.dd_time::text').extract_first().split(' ')[1] 39 | china_news_item['news_source'] = '中国新闻网' 40 | china_news_item['news_web_category'] = category 41 | china_news_item['news_machine_category'] = '' 42 | yield china_news_item 43 | # 构建上一天的新闻 url,注意这里的天数要先减去一天 44 | if ChinanewsSpider.special_days-1: 45 | ChinanewsSpider.special_days -= 1 46 | # 下一个要爬取的url 47 | next_date = ChinanewsSpider.special_date + timedelta(days=-1) 48 | ChinanewsSpider.special_date = next_date 49 | url = 'http://www.chinanews.com/scroll-news/{}/news.shtml'.format(get_format_news_date(next_date)) 50 | yield Request(url, callback=self.parse) 51 | -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/common.py: -------------------------------------------------------------------------------- 1 | # 公用工具类 2 | 3 | from bs4 import BeautifulSoup 4 | 5 | # 转换新闻内容,将html标签转换为wxml标签 6 | def html2wxml_news_content(news_content): 7 | pass 8 | 9 | if __name__ == "__main__": 10 | news_content = """ 11 | <div class="left_zw" style="position:relative"> 12 | <p>  近日,有网友在微博上提起,全国各个省市的人力资源和社会保障局官方网站上列有许多补贴职业培训项目,推荐一些符合条件的网友报名参加,充实自我。很多课程全部由通过政府认证许可的职业学校或就业中心负责具体培训。符合享受政府培训费用补贴的对象,参加课程并考核合格后,不仅可以获得技能登记证书,还能享受高达50%-100%的培训费用补贴。</p> 13 | 14 | <p>  “过来人”分享体会:花费不多 收获颇丰</p> 15 | 16 | <p>  评论区出现了不少“过来人”,分享了许多自己参加课程的有趣经历。</p> 17 | 18 | <p>  无论是暗自蓄力重新出发的失业人员、还是退休后上课打发时间的大叔大妈、抑或是刚毕业还需继续充电的学生党们,都在这些花费不大的技能课程中收获颇丰。</p> 19 | 20 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/1736260037.png" /><img src="http://www.chinanews.com/cr/2019/0330/3525953723.png" /><img src="http://www.chinanews.com/cr/2019/0330/4244548533.png" /><img src="http://www.chinanews.com/cr/2019/0330/2383282182.png" /><img src="http://www.chinanews.com/cr/2019/0330/478750541.png" /> 21 | 22 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/2390654632.png" /></p> 23 | </p> 24 | 25 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/2570514240.png" /></p> 26 | 27 | <p>  课程种类丰富 专业老师指导</p> 28 | 29 | <p>  除了电工、钳工这类特种技能对一些技术人员来说是刚需,非常热门以外,像插花、烹饪、茶艺这类日常生活都能用到的技能也广受大众欢迎。</p> 30 | 31 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/3458061744.png" /><img src="http://www.chinanews.com/cr/2019/0330/300190990.png" /><img src="http://www.chinanews.com/cr/2019/0330/2168006367.png" /> 32 | 33 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/1427003489.png" /></p> 34 | </p> 35 | 36 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/265139664.png" /></p> 37 | 38 | <p>  △网友晒出的课程成果</p> 39 | 40 | <p>  很多人还提到,在许多负责认真的专业老师指导下,充分利用闲暇时光习得一门技能不是难事。</p> 41 | 42 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/3422112185.png" /> 43 | 44 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/307954894.png" /></p> 45 | </p> 46 | 47 | <p>  考核严格 通过则可获得国家认可的证书</p> 48 | 49 | <p>  与某些社会组织“光学就完事”的技能兴趣班不同,这些课程都必须通过正规严厉的最终考核,才能获得国家认可的技能证书持证上岗。</p> 50 | 51 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/2191376820.png" /> 52 | 53 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/3496522813.png" /></p> 54 | </p> 55 | 56 | <p>  各地职业培训和具体政策不相同</p> 57 | 58 | <p>  细看各个省市政府补贴的职业培训项目中,除了常见常用技能,一些省市的项目名单中还出现了一些“新奇玩意”。</p> 59 | 60 | <p>  在上海职业培训指导服务网站上,有网友发现职业资格项目中有一个名叫“白山羊饲养”的课程,课程介绍称这是一门有关白山羊饲养管理,羊病的预防和简易治疗,以及羊舍建设等技术。</p> 61 | 62 | <p>  同样在广州的职业培训名录上,也有像婚姻家庭咨询师、手语翻译员、芳香保健师、小儿推拿师等其他省市少见的技能培训。</p> 63 | 64 | <p>  职业培训服务更加人性化</p> 65 | 66 | <p>  为了方便学员能够更方便地查询相关培训信息,各地政府除了在人力资源官网上公布内容,还有些地方开始探索网络新工具——开发运营职业培训的APP和小程序。</p> 67 | 68 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/561000170.png" /></p> 69 | 70 | <p>  今年1月底,“广州职业培训地图(线上版)”就正式上线运行,这个小程序能够帮助广州全市职业培训学员仅仅通过微信,就可以简单便捷地选择职业培训机构以及课程内容。</p> 71 | 72 | <p>  所有培训机构的管理和展示集成到一个平台,清晰展示了培训课程与机构的简介信息、场地图片、许可培训项目、联系方式等内容,在地图功能的协助下还能够提供定位导航服务。</p> 73 | 74 | <p>  较为贴心的是,广州这款小程序还引入VR实景功能,学员在报名前足不出户,只要用手机就能了解培训机构的现场实景,这个功能在全国实属创新一步。</p> 75 | 76 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/574470060.gif" /></p> 77 | 78 | <p>  △某职业培训指导中心家政护理实训室</p> 79 | 80 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/849623132.gif" /></p> 81 | 82 | <p>  △某职业培训指导中心中式烹饪实训室</p> 83 | 84 | <p>  培训有条件 不是所有人都可以随意参加</p> 85 | 86 | <p>  根据2017年财政部、人力资源社会保障部印发的《就业补助资金管理办法》通知。其中明确规定,能够享受这类职业培训补贴的人员范围只有“五类人员”和部分符合条件的企业职工。</p> 87 | 88 | <p>  “五类人员”包括贫困家庭子女、毕业年度高校毕业生(含技师学院高级工班、预备技师班和特殊教育院校职业教育类毕业生)、城乡未继续升学的应届初高中毕业生、农村转移就业劳动者、城镇登记失业人员工。</p> 89 | 90 | <p>  各地具体办法实施也会在此基础上进行更详细的要求划分。以上海为例,补贴对象的大类主要以“是否为该市户籍”划分,并且将具备该市户籍的退役士兵和残疾人等经认定者也列入可享受补贴名单。</p> 91 | 92 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/3818101938.png" /></p> 93 | 94 | <p>  而各类人群具体可享受的补贴比例也有明确规定,在培训安排互不冲突的情况下如果同时参加不同培训,一年只能享受一个项目的补贴优惠。</p> 95 | 96 | <p>  <img src="http://www.chinanews.com/cr/2019/0330/1403117411.png" /></p> 97 | 98 | <p>  ▌本文来源:观察者网</p><table border=0 cellspacing=0 cellpadding=0 align=left style="padding-right:10px;"><tr><td><div id=adhzh name=hzh> 99 | 100 | <script> 101 | (function() { 102 | var s = "_" + Math.random().toString(36).slice(2); 103 | document.write('<div id="' + s + '"></div>'); 104 | (window.slotbydup=window.slotbydup || []).push({ 105 | id: '2473874', 106 | container: s, 107 | size: '300,250', 108 | display: 'inlay-fix' 109 | }); 110 | })(); 111 | </script> 112 | <script src="http://dup.baidustatic.com/js/os.js"></script> 113 | 114 | </div> 115 | 116 | </td></tr></table><div id="function_code_page"></div> 117 | 118 | </div> 119 | """ 120 | # print(news_content) 121 | htmlsoup = BeautifulSoup(news_content, "lxml") 122 | new_tag = htmlsoup.new_tag("text") 123 | # 去除所有的script标签 124 | [s.extract() for s in htmlsoup("script")] 125 | # 126 | [s.wrap(new_tag) for s in htmlsoup("p")] 127 | print(htmlsoup) 128 | # wxmlsoup = BeautifulSoup("<view></view>", "lxml") 129 | # print('测试:' + str(htmlsoup.p)) 130 | # for p in htmlsoup.find_all('p'): 131 | # wxmlsoup.insert(1, p) 132 | # print(wxmlsoup) -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/news_spider/spiders/proxy.py: -------------------------------------------------------------------------------- 1 | # 代理IP测试 2 | 3 | import scrapy 4 | 5 | class ProxySpider(scrapy.Spider): 6 | name = 'proxy' 7 | allowed_domains = [] 8 | 9 | def start_requests(self): 10 | url = 'http://www.httpbin.org/get' 11 | for i in range(4): 12 | yield scrapy.Request(url=url, callback=self.parse, dont_filter=True) 13 | 14 | def parse(self,response): 15 | print(response.text) -------------------------------------------------------------------------------- /爬虫/测试新闻数据爬取/news_spider/scrapy.cfg: -------------------------------------------------------------------------------- 1 | # Automatically created by: scrapy startproject 2 | # 3 | # For more information about the [deploy] section see: 4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html 5 | 6 | [settings] 7 | default = news_spider.settings 8 | 9 | [deploy] 10 | #url = http://localhost:6800/ 11 | project = news_spider 12 | -------------------------------------------------------------------------------- /爬虫/训练新闻数据爬取/spider/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/训练新闻数据爬取/spider/__init__.py -------------------------------------------------------------------------------- /爬虫/训练新闻数据爬取/spider/pc_user_agent.json: -------------------------------------------------------------------------------- 1 | { 2 | "browsers": { 3 | "chrome": [ 4 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36", 5 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36", 6 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36", 7 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36", 8 | "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36", 9 | "Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36", 10 | "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36", 11 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36", 12 | "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36", 13 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36", 14 | "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36", 15 | "Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36", 16 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36", 17 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36", 18 | "Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36", 19 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36", 20 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36", 21 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2309.372 Safari/537.36", 22 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36", 23 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36", 24 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1866.237 Safari/537.36", 25 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F", 26 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36 Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10", 27 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.517 Safari/537.36", 28 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36", 29 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36", 30 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36", 31 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36", 32 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36", 33 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36", 34 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36", 35 | "Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36", 36 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.2 Safari/537.36", 37 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36", 38 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36", 39 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36", 40 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1500.55 Safari/537.36", 41 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 42 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 43 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 44 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 45 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 46 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", 47 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.90 Safari/537.36", 48 | "Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36", 49 | "Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36", 50 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.60 Safari/537.17", 51 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17", 52 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.15 (KHTML, like Gecko) Chrome/24.0.1295.0 Safari/537.15", 53 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/24.0.1292.0 Safari/537.14" 54 | ], 55 | "opera": [ 56 | "Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16", 57 | "Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14", 58 | "Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14", 59 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14", 60 | "Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02", 61 | "Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00", 62 | "Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00", 63 | "Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00", 64 | "Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00", 65 | "Mozilla/5.0 (Windows NT 5.1) Gecko/20100101 Firefox/14.0 Opera/12.0", 66 | "Opera/9.80 (Windows NT 6.1; WOW64; U; pt) Presto/2.10.229 Version/11.62", 67 | "Opera/9.80 (Windows NT 6.0; U; pl) Presto/2.10.229 Version/11.62", 68 | "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52", 69 | "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; de) Presto/2.9.168 Version/11.52", 70 | "Opera/9.80 (Windows NT 5.1; U; en) Presto/2.9.168 Version/11.51", 71 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; de) Opera 11.51", 72 | "Opera/9.80 (X11; Linux x86_64; U; fr) Presto/2.9.168 Version/11.50", 73 | "Opera/9.80 (X11; Linux i686; U; hu) Presto/2.9.168 Version/11.50", 74 | "Opera/9.80 (X11; Linux i686; U; ru) Presto/2.8.131 Version/11.11", 75 | "Opera/9.80 (X11; Linux i686; U; es-ES) Presto/2.8.131 Version/11.11", 76 | "Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/5.0 Opera 11.11", 77 | "Opera/9.80 (X11; Linux x86_64; U; bg) Presto/2.8.131 Version/11.10", 78 | "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.8.99 Version/11.10", 79 | "Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10", 80 | "Opera/9.80 (Windows NT 6.1; Opera Tablet/15165; U; en) Presto/2.8.149 Version/11.1", 81 | "Opera/9.80 (X11; Linux x86_64; U; Ubuntu/10.10 (maverick); pl) Presto/2.7.62 Version/11.01", 82 | "Opera/9.80 (X11; Linux i686; U; ja) Presto/2.7.62 Version/11.01", 83 | "Opera/9.80 (X11; Linux i686; U; fr) Presto/2.7.62 Version/11.01", 84 | "Opera/9.80 (Windows NT 6.1; U; zh-tw) Presto/2.7.62 Version/11.01", 85 | "Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.7.62 Version/11.01", 86 | "Opera/9.80 (Windows NT 6.1; U; sv) Presto/2.7.62 Version/11.01", 87 | "Opera/9.80 (Windows NT 6.1; U; en-US) Presto/2.7.62 Version/11.01", 88 | "Opera/9.80 (Windows NT 6.1; U; cs) Presto/2.7.62 Version/11.01", 89 | "Opera/9.80 (Windows NT 6.0; U; pl) Presto/2.7.62 Version/11.01", 90 | "Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.7.62 Version/11.01", 91 | "Opera/9.80 (Windows NT 5.1; U;) Presto/2.7.62 Version/11.01", 92 | "Opera/9.80 (Windows NT 5.1; U; cs) Presto/2.7.62 Version/11.01", 93 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.13) Gecko/20101213 Opera/9.80 (Windows NT 6.1; U; zh-tw) Presto/2.7.62 Version/11.01", 94 | "Mozilla/5.0 (Windows NT 6.1; U; nl; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.01", 95 | "Mozilla/5.0 (Windows NT 6.1; U; de; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.01", 96 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; de) Opera 11.01", 97 | "Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00", 98 | "Opera/9.80 (X11; Linux i686; U; it) Presto/2.7.62 Version/11.00", 99 | "Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.6.37 Version/11.00", 100 | "Opera/9.80 (Windows NT 6.1; U; pl) Presto/2.7.62 Version/11.00", 101 | "Opera/9.80 (Windows NT 6.1; U; ko) Presto/2.7.62 Version/11.00", 102 | "Opera/9.80 (Windows NT 6.1; U; fi) Presto/2.7.62 Version/11.00", 103 | "Opera/9.80 (Windows NT 6.1; U; en-GB) Presto/2.7.62 Version/11.00", 104 | "Opera/9.80 (Windows NT 6.1 x64; U; en) Presto/2.7.62 Version/11.00", 105 | "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.7.39 Version/11.00" 106 | ], 107 | "firefox": [ 108 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1", 109 | "Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0", 110 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0", 111 | "Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0", 112 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0", 113 | "Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0", 114 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0", 115 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/29.0", 116 | "Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0", 117 | "Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0", 118 | "Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3", 119 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:27.0) Gecko/20121011 Firefox/27.0", 120 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0", 121 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0", 122 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0", 123 | "Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0", 124 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0", 125 | "Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/23.0", 126 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0", 127 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:23.0) Gecko/20131011 Firefox/23.0", 128 | "Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/22.0", 129 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:22.0) Gecko/20130328 Firefox/22.0", 130 | "Mozilla/5.0 (Windows NT 6.1; rv:22.0) Gecko/20130405 Firefox/22.0", 131 | "Mozilla/5.0 (Microsoft Windows NT 6.2.9200.0); rv:22.0) Gecko/20130405 Firefox/22.0", 132 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1", 133 | "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1", 134 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:21.0.0) Gecko/20121011 Firefox/21.0.0", 135 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20130331 Firefox/21.0", 136 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20100101 Firefox/21.0", 137 | "Mozilla/5.0 (X11; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0", 138 | "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20130514 Firefox/21.0", 139 | "Mozilla/5.0 (Windows NT 6.2; rv:21.0) Gecko/20130326 Firefox/21.0", 140 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130401 Firefox/21.0", 141 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130331 Firefox/21.0", 142 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130330 Firefox/21.0", 143 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0", 144 | "Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130401 Firefox/21.0", 145 | "Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130328 Firefox/21.0", 146 | "Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20100101 Firefox/21.0", 147 | "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130401 Firefox/21.0", 148 | "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130331 Firefox/21.0", 149 | "Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20100101 Firefox/21.0", 150 | "Mozilla/5.0 (Windows NT 5.0; rv:21.0) Gecko/20100101 Firefox/21.0", 151 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0", 152 | "Mozilla/5.0 (Windows NT 6.2; Win64; x64;) Gecko/20100101 Firefox/20.0", 153 | "Mozilla/5.0 (Windows x86; rv:19.0) Gecko/20100101 Firefox/19.0", 154 | "Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/19.0", 155 | "Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/18.0.1", 156 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0", 157 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0.6" 158 | ], 159 | "internetexplorer": [ 160 | "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko", 161 | "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko", 162 | "Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0", 163 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)", 164 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)", 165 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)", 166 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)", 167 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)", 168 | "Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)", 169 | "Mozilla/4.0 (Compatible; MSIE 8.0; Windows NT 5.2; Trident/6.0)", 170 | "Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)", 171 | "Mozilla/1.22 (compatible; MSIE 10.0; Windows 3.1)", 172 | "Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))", 173 | "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)", 174 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)", 175 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7)", 176 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7", 177 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; InfoPath.3; MS-RTC LM 8; .NET4.0C; .NET4.0E)", 178 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; chromeframe/12.0.742.112)", 179 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)", 180 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)", 181 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; Tablet PC 2.0; InfoPath.3; .NET4.0C; .NET4.0E)", 182 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0", 183 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; yie8)", 184 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET CLR 1.1.4322; .NET4.0C; Tablet PC 2.0)", 185 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; FunWebProducts)", 186 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/13.0.782.215)", 187 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/11.0.696.57)", 188 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205", 189 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.1; SV1; .NET CLR 2.8.52393; WOW64; en-US)", 190 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; chromeframe/11.0.696.57)", 191 | "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/4.0; GTB7.4; InfoPath.3; SV1; .NET CLR 3.1.76908; WOW64; en-US)", 192 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.2; SV1; .NET CLR 3.3.69573; WOW64; en-US)", 193 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)", 194 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.8.36217; WOW64; en-US)", 195 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; .NET CLR 2.7.58687; SLCC2; Media Center PC 5.0; Zune 3.4; Tablet PC 3.6; InfoPath.3)", 196 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.2; Trident/4.0; Media Center PC 4.0; SLCC1; .NET CLR 3.0.04320)", 197 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322)", 198 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727)", 199 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", 200 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; SLCC1; .NET CLR 1.1.4322)", 201 | "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 3.0.04506.30)", 202 | "Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.0; Trident/4.0; FBSMTWB; .NET CLR 2.0.34861; .NET CLR 3.0.3746.3218; .NET CLR 3.5.33652; msn OptimizedIE8;ENUS)", 203 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.2; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)", 204 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8)", 205 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8", 206 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; Media Center PC 6.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C)", 207 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.3; .NET4.0C; .NET4.0E; .NET CLR 3.5.30729; .NET CLR 3.0.30729; MS-RTC LM 8)", 208 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)", 209 | "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 3.0)" 210 | ], 211 | "safari": [ 212 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A", 213 | "Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25", 214 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2", 215 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10", 216 | "Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3", 217 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; de-at) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1", 218 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; da-dk) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1", 219 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 220 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; ko-KR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 221 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; fr-FR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 222 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 223 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; cs-CZ) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 224 | "Mozilla/5.0 (Windows; U; Windows NT 6.0; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 225 | "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 226 | "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_8; zh-cn) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 227 | "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_8; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 228 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 229 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; zh-cn) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 230 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; sv-se) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 231 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; ko-kr) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 232 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 233 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; it-it) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 234 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; fr-fr) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 235 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; es-es) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 236 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 237 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-gb) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 238 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", 239 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; sv-SE) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 240 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 241 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; de-DE) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 242 | "Mozilla/5.0 (Windows; U; Windows NT 6.0; hu-HU) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 243 | "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 244 | "Mozilla/5.0 (Windows; U; Windows NT 6.0; de-DE) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 245 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 246 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 247 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; it-IT) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 248 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 249 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-us) AppleWebKit/534.16+ (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 250 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; fr-ch) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 251 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; de-de) AppleWebKit/534.15+ (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 252 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; ar) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 253 | "Mozilla/5.0 (Android 2.2; Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4", 254 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-HK) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5", 255 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5", 256 | "Mozilla/5.0 (Windows; U; Windows NT 6.0; tr-TR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5", 257 | "Mozilla/5.0 (Windows; U; Windows NT 6.0; nb-NO) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5", 258 | "Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5", 259 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-TW) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5", 260 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5", 261 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; zh-cn) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5" 262 | ] 263 | } 264 | } -------------------------------------------------------------------------------- /爬虫/训练新闻数据爬取/spider/pc_user_agent.py: -------------------------------------------------------------------------------- 1 | # 电脑端浏览器标识处理类 2 | 3 | import json 4 | import random 5 | 6 | 7 | # chrome 浏览器的标识头 8 | chrome = [] 9 | # opera 浏览器的标识头 10 | opera = [] 11 | # firefox 浏览器的标志头 12 | firefox = [] 13 | # ie 浏览器的标志头 14 | ie = [] 15 | # safari 浏览器的标志头 16 | safari = [] 17 | # 所有浏览器的标识头 18 | all_browser = [] 19 | 20 | # json文件读取 21 | f = open("pc_user_agent.json") 22 | pc_user_agent = json.load(f) 23 | browsers = pc_user_agent["browsers"] 24 | 25 | for key, value in browsers.items(): 26 | # all_browser 在列表中追加所有浏览器的标识头 27 | all_browser.extend(value) 28 | # 特殊浏览器只追加属于自己的那部分浏览器标识头 29 | if key == "chrome": 30 | chrome = value 31 | elif key == "opera": 32 | opera = value 33 | elif key == "firefox": 34 | firefox = value 35 | elif key == "internetexplorer": 36 | ie = value 37 | elif key == "safari": 38 | safari = value 39 | 40 | # 得到所有浏览器的随机 User-Agent 41 | def get_random(): 42 | return random.choice(all_browser) 43 | 44 | # 得到chrome浏览器的随机 User-Agent 45 | def get_chrome_random(): 46 | return random.choice(chrome) 47 | 48 | # 得到opera浏览器的随机 User-Agent 49 | def get_opera_random(): 50 | return random.choice(opera) 51 | 52 | # 得到firefox浏览器的随机 User-Agent 53 | def get_firefox_random(): 54 | return random.choice(firefox) 55 | 56 | # 得到ie浏览器的随机 User-Agent 57 | def get_ie_random(): 58 | return random.choice(ie) 59 | 60 | # 得到safari浏览器的随机 User-Agent 61 | def get_safari_random(): 62 | return random.choice(safari) 63 | 64 | if __name__ == '__main__': 65 | # print("所有浏览器的标识头:", len(all), all) 66 | # print("chrome浏览器的标识头:", len(chrome), chrome) 67 | # print("opera浏览器的标识头:", len(opera), opera) 68 | # print("firefox浏览器的标识头:", len(firefox), firefox) 69 | # print("ie浏览器的标识头:", len(ie), ie) 70 | # print("safari浏览器的标识头:", len(safari), safari) 71 | print(get_random()) 72 | 73 | -------------------------------------------------------------------------------- /爬虫/训练新闻数据爬取/spider/spider.py: -------------------------------------------------------------------------------- 1 | # 爬取说明: 2 | # 目标网站: 3 | # 中国新闻网:http://www.chinanews.com 4 | # 爬取规则: 5 | # http://域名/scroll-news/年/月日/news.shtml 6 | # 例如,爬取2018年1月7日的新闻;http://www.chinanews.com/scroll-news/2018/0107/news.shtml 7 | # 爬取字段: 8 | # 新闻标题 9 | # 新闻分类 10 | from datetime import datetime, timedelta, date 11 | from pymongo import MongoClient 12 | import requests 13 | from bs4 import BeautifulSoup 14 | from random import randint 15 | import time 16 | 17 | # from .pc_user_agent import get_random 18 | 19 | 20 | # 封装自己的MongoDB类 21 | class MongoDB: 22 | # 构造方法 23 | def __init__(self, host, port, username, password, db, collection): 24 | self.host = host 25 | self.port = port 26 | self.client = MongoClient(host=host, port=port, username=username, password=password) 27 | self.db = self.client[db] 28 | self.collection = self.db[collection] 29 | 30 | # 批量插入方法 31 | def batch_add(self, dict): 32 | return self.collection.insert_many(dict) 33 | 34 | 35 | # 构建mongoDB对象 36 | mongoDB = MongoDB("localhost", 27017, "weizhiwen", "123456", "news", "train_news") 37 | 38 | 39 | # 格式化日期函数,start_date 为开始日期,days 为倒推的天数 40 | def format_news_date(start_date, days=0): 41 | return (start_date + timedelta(days=days)).strftime("%Y/%m%d") 42 | 43 | 44 | # 爬取一天的新闻数据,每次爬取都使用不同的浏览器标识头 45 | def crawl_data_by_url(url): 46 | r = requests.get(url, headers={"User-Agent": get_random()}) 47 | r.encoding = "gb2312" 48 | soup = BeautifulSoup(r.text, "lxml") 49 | lis = soup.find(class_="content_list").find_all("li") 50 | list = [] 51 | # 将每条新闻添加到list数组中 52 | for li in lis: 53 | try: 54 | # 新闻标题 55 | news_title = li.find("div", class_="dd_bt").find("a").get_text() 56 | # 新闻分类 57 | news_category = li.find(class_="dd_lm").find("a").get_text() 58 | # 构建新闻document数据 59 | news_document = {"news_category": news_category, "news_title": news_title} 60 | list.append(news_document) 61 | except: 62 | pass 63 | return list 64 | 65 | 66 | # 爬虫爬取方法,start_date 为开始日期,days 为要爬取的天数 67 | def spider_crawl(start_date, days): 68 | # 爬取每天的新闻 69 | for i in range(0, days): 70 | print("爬取", format_news_date(start_date, -i), "的新闻") 71 | # 根据中国新闻网新闻的规则构建url 72 | url = "http://www.chinanews.com/scroll-news/" + format_news_date(start_date, -i) + "/news.shtml" 73 | # 向mongoDB中批量插入数据 74 | list = crawl_data_by_url(url) 75 | mongoDB.batch_add(list) 76 | # 每爬完一天的数据就休息一下,防止被抓 77 | sleep_time = randint(5, 10) 78 | print("休息", sleep_time, "秒钟") 79 | time.sleep(sleep_time) 80 | print("爬取完毕!!!") 81 | 82 | # 爬虫爬取军事分类下的新闻 83 | def spider_crawl_mil(start_date, days): 84 | # 爬取每天的新闻 85 | for i in range(0, days): 86 | print("爬取", format_news_date(start_date, -i), "的新闻") 87 | # 根据中国新闻网新闻的规则构建url 88 | url = "http://www.chinanews.com/scroll-news/mil/" + format_news_date(start_date, -i) + "/news.shtml" 89 | # 判断list是否为空 90 | list = crawl_data_by_url(url) 91 | if len(list) > 0: 92 | # 如果list不为空,向mongoDB中批量插入数据 93 | mongoDB.batch_add(list) 94 | # 每爬完一天的数据就休息一下,防止被抓 95 | sleep_time = randint(5, 10) 96 | print("休息", sleep_time, "秒钟") 97 | time.sleep(sleep_time) 98 | print("爬取完毕!!!") 99 | 100 | 101 | if __name__ == "__main__": 102 | # 爬取的开始日期,现已爬取 2019.03.06-2012.12.07 的数据,共 3035803 条数据 103 | start_date = datetime(2013, 12, 5) 104 | print(type(start_date)) 105 | print((start_date + timedelta(days=-1)).date()) 106 | # spider_crawl(start_date, 365) 107 | # print(date.today()) 108 | # print(format_news_date(date.today())) 109 | # special_date = datetime(2018, 12, 7) 110 | # print('http://www.chinanews.com/scroll-news/{}/news.shtml'.format(format_news_date(special_date))) 111 | # print(datetime(2013, 12, 5).date()) 112 | today = date.today() 113 | print('today', today) 114 | print(type(today)) 115 | print(datetime(today.year, today.month, today.day).date()) 116 | print(type(format_news_date(datetime(today.year, today.month, today.day)))) 117 | -------------------------------------------------------------------------------- /爬虫/训练新闻数据爬取/spider/test.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, date, timedelta 2 | 3 | 4 | # 格式化日期函数,start_date 为开始日期(默认为当前日期),days 为倒推的天数 5 | def get_format_news_date(date=date.today(), days=0): 6 | start_date = datetime(date.year, date.month, date.day) 7 | # 返回格式化好的字符串 8 | return (start_date).strftime("%Y/%m%d") 9 | 10 | 11 | if __name__ == '__main__': 12 | special_date = date.today() 13 | print(special_date) 14 | print(get_format_news_date(special_date)) 15 | print('http://www.chinanews.com/scroll-news/{}/news.shtml'.format(get_format_news_date(special_date))) 16 | next_date = special_date + timedelta(days=-1) 17 | print(next_date) 18 | print(get_format_news_date(next_date)) 19 | print('http://www.chinanews.com/scroll-news/{}/news.shtml'.format(get_format_news_date(next_date))) 20 | -------------------------------------------------------------------------------- /爬虫/训练新闻数据爬取/图片/各个分类下的新闻数量.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/weizhiwen/News-Intelligent-Classification-WeChat-Mini-Program/768cea0744d316452f0be31b6224fd22bc19b5c8/爬虫/训练新闻数据爬取/图片/各个分类下的新闻数量.png --------------------------------------------------------------------------------