├── .DS_Store
├── .gitignore
├── LICENSE
├── README.md
├── mysql
├── __init__.py
├── items.py
├── middlewares.py
├── pipelines.py
├── settings.py
└── spiders
│ ├── __init__.py
│ ├── musicspider.py
│ ├── run.py
│ └── videospider.py
└── scrapy.cfg
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yanceyblog/scrapy-mysql/f426eaa94e87961cfcc9b35bb1108f6da24cbf5b/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # IPython Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule
77 |
78 | # dotenv
79 | .env
80 |
81 | # virtualenv
82 | venv/
83 | ENV/
84 |
85 | # Spyder project settings
86 | .spyderproject
87 |
88 | # Rope project settings
89 | .ropeproject
90 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 爬虫数据存储实例
2 | [TOC]
3 |
4 | > 本章将实现数据存储到数据库的实例。博客地址http://blog.csdn.net/yancey_blog/article/details/53895821
5 |
6 | ## 数据存储
7 | scrapy支持将数据存储到文件,例如csv、jl、jsonlines、pickle、marshal、json、xml,少量的数据存储到数据库还行,如果超大量的数据存储到文件(当然图片还是要存文件的),就显得不太友好,毕竟这些数据要为我所用。
8 |
9 | 因此我们通常将数据存储到数据库,本处将介绍的是最常用的数据库mysql。我们也看到scrapy中的pipeline文件还没有用到,其实这个文件就是处理spider分发下来的item,我们可以在pipeline中处理文件的存储。
10 |
11 | * mysql库(PyMysql)的添加
12 |
13 | 打开pycharm File-->Default Settings-->Project interpreter点击左下角的“+”,搜索PyMysql,如图:
14 |
15 |
16 |
17 | 点击安装install package,如果无法安装可以选择将上面的install to user‘s site...勾选安装到Users目录下。
18 |
19 | ## 配置mysql服务
20 |
21 | 1、安装mysql
22 |
23 | ```
24 | root@ubuntu:~# sudo apt-get install mysql-server
25 | ```
26 |
27 | ```
28 | root@ubuntu:~# apt isntall mysql-client
29 | ```
30 |
31 | ```
32 | root@ubuntu:~# apt install libmysqlclient-dev
33 | ```
34 |
35 | 期间会弹出设置root账户的密码框,输入两次相同密码。
36 |
37 |
38 | ----------
39 |
40 |
41 | 2、查询是否安装成功
42 |
43 | ```
44 | root@ubuntu:~# sudo netstat -tap | grep mysql
45 | ```
46 |
47 | ```shell
48 | root@ubuntu:~# netstat -tap | grep mysql
49 |
50 | tcp6 0 0 [::]:mysql [::]:* LISTEN 7510/mysqld
51 | ```
52 |
53 |
54 | ----------
55 |
56 |
57 | 3、开启远程访问mysql
58 |
59 | * 编辑mysql配置文件,注释掉“bind-address = 127.0.0.1”
60 |
61 | ```
62 | root@ubuntu:~# vi /etc/mysql/mysql.conf.d/mysqld.cnf
63 | ```
64 |
65 | ```
66 | #bind-address = 127.0.0.1
67 | ```
68 |
69 | * 进入mysql root账户
70 |
71 | ```
72 | root@ubuntu:~# mysql -u root -p123456
73 | ```
74 |
75 | * 在mysql环境中输入grant all on *.* to username@'%' identified by 'password';
76 | * 或者grant all on *.* to username@'%' identified by 'password' with grand option;
77 |
78 | ```
79 | root@ubuntu:~# grant all on *.* to china@'%' identified by '123456';
80 | ```
81 |
82 | * 刷新flush privileges;然后重启mysql,通过/etc/init.d/mysql restart命令
83 |
84 | ```
85 | root@ubuntu:~# flush privileges;
86 | ```
87 | ```
88 | root@ubuntu:~# /etc/init.d/mysql restart
89 | ```
90 |
91 | * 远程连接时客户端设置:
92 |
93 | >
94 |
95 | ----------
96 | 4、常见问题
97 |
98 | - 1045 access denied for user 'root'@'localhost(ip)' using password yes
99 |
100 |
101 | ```
102 | 1、mysql -u root -p;
103 | 2、GRANT ALL PRIVILEGES ON *.* TO 'myuser'@'%' IDENTIFIED BY 'mypassword' WITH GRANT OPTION;
104 | 3、FLUSH PRIVILEGES;
105 | ```
106 |
107 |
108 | ----------
109 | ## 在mysql中创建好四个item表
110 |
111 |
112 |
113 | ## 创建项目
114 |
115 | 安装好PyMysql后就可以在pipeline中处理存储的逻辑了。首先创建项目:`scrapy startproject mysql` 本例还是使用上一章多个爬虫组合实例的例子,处理将其中四个item存储到mysql数据库。
116 | 然后打开创建好的mysql项目,在settings.py中添加数据库连接相关的常量。
117 |
118 | ```python
119 | # -*- coding: utf-8 -*-
120 | BOT_NAME = 'mysql'
121 |
122 | SPIDER_MODULES = ['mysql.spiders']
123 | NEWSPIDER_MODULE = 'mysql.spiders'
124 |
125 | MYSQL_HOST = 'localhost'
126 | MYSQL_DBNAME = 'spider'
127 | MYSQL_USER = 'root'
128 | MYSQL_PASSWD = '123456'
129 |
130 | DOWNLOAD_DELAY = 1
131 |
132 | ITEM_PIPELINES = {
133 | 'mysql.pipelines.DoubanPipeline': 301,
134 | }
135 | ```
136 |
137 | pipeline.py配置
138 |
139 |
140 | ```python
141 | # -*- coding: utf-8 -*-
142 |
143 | # Define your item pipelines here
144 | #
145 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
146 | # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
147 | import pymysql
148 | from scrapy import log
149 |
150 | from mysql import settings
151 | from mysql.items import MusicItem, MusicReviewItem, VideoItem, VideoReviewItem
152 |
153 |
154 | class DoubanPipeline(object):
155 | def __init__(self):
156 | self.connect = pymysql.connect(
157 | host=settings.MYSQL_HOST,
158 | db=settings.MYSQL_DBNAME,
159 | user=settings.MYSQL_USER,
160 | passwd=settings.MYSQL_PASSWD,
161 | charset='utf8',
162 | use_unicode=True)
163 | self.cursor = self.connect.cursor()
164 |
165 | def process_item(self, item, spider):
166 | if item.__class__ == MusicItem:
167 | try:
168 | self.cursor.execute("""select * from music_douban where music_url = %s""", item["music_url"])
169 | ret = self.cursor.fetchone()
170 | if ret:
171 | self.cursor.execute(
172 | """update music_douban set music_name = %s,music_alias = %s,music_singer = %s,
173 | music_time = %s,music_rating = %s,music_votes = %s,music_tags = %s,music_url = %s
174 | where music_url = %s""",
175 | (item['music_name'],
176 | item['music_alias'],
177 | item['music_singer'],
178 | item['music_time'],
179 | item['music_rating'],
180 | item['music_votes'],
181 | item['music_tags'],
182 | item['music_url'],
183 | item['music_url']))
184 | else:
185 | self.cursor.execute(
186 | """insert into music_douban(music_name,music_alias,music_singer,music_time,music_rating,
187 | music_votes,music_tags,music_url)
188 | value (%s,%s,%s,%s,%s,%s,%s,%s)""",
189 | (item['music_name'],
190 | item['music_alias'],
191 | item['music_singer'],
192 | item['music_time'],
193 | item['music_rating'],
194 | item['music_votes'],
195 | item['music_tags'],
196 | item['music_url']))
197 | self.connect.commit()
198 | except Exception as error:
199 | log(error)
200 | return item
201 |
202 | elif item.__class__ == MusicReviewItem:
203 | try:
204 | self.cursor.execute("""select * from music_review_douban where review_url = %s""", item["review_url"])
205 | ret = self.cursor.fetchone()
206 | if ret:
207 | self.cursor.execute(
208 | """update music_review_douban set review_title = %s,review_content = %s,review_author = %s,
209 | review_music = %s,review_time = %s,review_url = %s
210 | where review_url = %s""",
211 | (item['review_title'],
212 | item['review_content'],
213 | item['review_author'],
214 | item['review_music'],
215 | item['review_time'],
216 | item['review_url'],
217 | item['review_url']))
218 | else:
219 | self.cursor.execute(
220 | """insert into music_review_douban(review_title,review_content,review_author,review_music,review_time,
221 | review_url)
222 | value (%s,%s,%s,%s,%s,%s)""",
223 | (item['review_title'],
224 | item['review_content'],
225 | item['review_author'],
226 | item['review_music'],
227 | item['review_time'],
228 | item['review_url']))
229 | self.connect.commit()
230 | except Exception as error:
231 | log(error)
232 | return item
233 |
234 | elif item.__class__ == VideoItem:
235 | try:
236 | self.cursor.execute("""select * from video_douban where video_url = %s""", item["video_url"])
237 | ret = self.cursor.fetchone()
238 | if ret:
239 | self.cursor.execute(
240 | """update video_douban set video_name= %s,video_alias= %s,video_actor= %s,video_year= %s,
241 | video_time= %s,video_rating= %s,video_votes= %s,video_tags= %s,video_url= %s,
242 | video_director= %s,video_type= %s,video_bigtype= %s,video_area= %s,video_language= %s,
243 | video_length= %s,video_writer= %s,video_desc= %s,video_episodes= %s where video_url = %s""",
244 | (item['video_name'],
245 | item['video_alias'],
246 | item['video_actor'],
247 | item['video_year'],
248 | item['video_time'],
249 | item['video_rating'],
250 | item['video_votes'],
251 | item['video_tags'],
252 | item['video_url'],
253 | item['video_director'],
254 | item['video_type'],
255 | item['video_bigtype'],
256 | item['video_area'],
257 | item['video_language'],
258 | item['video_length'],
259 | item['video_writer'],
260 | item['video_desc'],
261 | item['video_episodes'],
262 | item['video_url']))
263 | else:
264 | self.cursor.execute(
265 | """insert into video_douban(video_name,video_alias,video_actor,video_year,video_time,
266 | video_rating,video_votes,video_tags,video_url,video_director,video_type,video_bigtype,
267 | video_area,video_language,video_length,video_writer,video_desc,video_episodes)
268 | value (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""",
269 | (item['video_name'],
270 | item['video_alias'],
271 | item['video_actor'],
272 | item['video_year'],
273 | item['video_time'],
274 | item['video_rating'],
275 | item['video_votes'],
276 | item['video_tags'],
277 | item['video_url'],
278 | item['video_director'],
279 | item['video_type'],
280 | item['video_bigtype'],
281 | item['video_area'],
282 | item['video_language'],
283 | item['video_length'],
284 | item['video_writer'],
285 | item['video_desc'],
286 | item['video_episodes']))
287 | self.connect.commit()
288 | except Exception as error:
289 | log(error)
290 | return item
291 |
292 | elif item.__class__ == VideoReviewItem:
293 | try:
294 | self.cursor.execute("""select * from video_review_douban where review_url = %s""", item["review_url"])
295 | ret = self.cursor.fetchone()
296 | if ret:
297 | self.cursor.execute(
298 | """update video_review_douban set review_title = %s,review_content = %s,review_author = %s,
299 | review_video = %s,review_time = %s,review_url = %s
300 | where review_url = %s""",
301 | (item['review_title'],
302 | item['review_content'],
303 | item['review_author'],
304 | item['review_video'],
305 | item['review_time'],
306 | item['review_url'],
307 | item['review_url']))
308 | else:
309 | self.cursor.execute(
310 | """insert into video_review_douban(review_title,review_content,review_author,review_video,review_time,
311 | review_url)
312 | value (%s,%s,%s,%s,%s,%s)""",
313 | (item['review_title'],
314 | item['review_content'],
315 | item['review_author'],
316 | item['review_video'],
317 | item['review_time'],
318 | item['review_url']))
319 | self.connect.commit()
320 | except Exception as error:
321 | log(error)
322 | return item
323 | else:
324 | pass
325 |
326 | ```
327 | 在上面的pipeline中我已经做了数据库去重的操作。
328 |
329 | ## 运行爬虫
330 | pycharm运行run.py,mysql数据库表中已经存好了我们要的数据。
331 |
332 |
333 |
334 |
335 |
--------------------------------------------------------------------------------
/mysql/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yanceyblog/scrapy-mysql/f426eaa94e87961cfcc9b35bb1108f6da24cbf5b/mysql/__init__.py
--------------------------------------------------------------------------------
/mysql/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # http://doc.org/en/latest/topics/items.html
7 |
8 | from scrapy import Item, Field
9 |
10 |
11 | # 音乐
12 | class MusicItem(Item):
13 | music_name = Field()
14 | music_alias = Field()
15 | music_singer = Field()
16 | music_time = Field()
17 | music_rating = Field()
18 | music_votes = Field()
19 | music_tags = Field()
20 | music_url = Field()
21 |
22 |
23 | # 乐评
24 | class MusicReviewItem(Item):
25 | review_title = Field()
26 | review_content = Field()
27 | review_author = Field()
28 | review_music = Field()
29 | review_time = Field()
30 | review_url = Field()
31 |
32 |
33 | # 视频
34 | class VideoItem(Item):
35 | video_name = Field()
36 | video_alias = Field()
37 | video_actor = Field()
38 | video_year = Field()
39 | video_time = Field()
40 | video_rating = Field()
41 | video_votes = Field()
42 | video_tags = Field()
43 | video_url = Field()
44 | video_director = Field()
45 | video_type = Field()
46 | video_bigtype = Field()
47 | video_area = Field()
48 | video_language = Field()
49 | video_length = Field()
50 | video_writer = Field()
51 | video_desc = Field()
52 | video_episodes = Field()
53 |
54 |
55 | # 影评
56 | class VideoReviewItem(Item):
57 | review_title = Field()
58 | review_content = Field()
59 | review_author = Field()
60 | review_video = Field()
61 | review_time = Field()
62 | review_url = Field()
63 |
--------------------------------------------------------------------------------
/mysql/middlewares.py:
--------------------------------------------------------------------------------
1 | # -*-coding:utf-8-*-
2 |
3 |
4 | import random
5 | from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
6 |
7 |
8 | class RotateUserAgentMiddleware(UserAgentMiddleware):
9 | def __init__(self, user_agent=''):
10 | self.user_agent = user_agent
11 |
12 | def process_request(self, request, spider):
13 | ua = random.choice(self.user_agent_list)
14 | if ua:
15 | # print(ua)
16 | request.headers.setdefault('User-Agent', ua)
17 |
18 | # the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape
19 | # for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
20 | user_agent_list = [ \
21 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
22 | "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
23 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
24 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
25 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
26 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
27 | "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
28 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
29 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
30 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
31 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
32 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
33 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
34 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
35 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
36 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
37 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
38 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
39 | ]
40 |
--------------------------------------------------------------------------------
/mysql/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 | import pymysql
8 | from scrapy import log
9 |
10 | from mysql import settings
11 | from mysql.items import MusicItem, MusicReviewItem, VideoItem, VideoReviewItem
12 |
13 |
14 | class DoubanPipeline(object):
15 | def __init__(self):
16 | self.connect = pymysql.connect(
17 | host=settings.MYSQL_HOST,
18 | db=settings.MYSQL_DBNAME,
19 | user=settings.MYSQL_USER,
20 | passwd=settings.MYSQL_PASSWD,
21 | charset='utf8',
22 | use_unicode=True)
23 | self.cursor = self.connect.cursor()
24 |
25 | def process_item(self, item, spider):
26 | if item.__class__ == MusicItem:
27 | try:
28 | self.cursor.execute("""select * from music_douban where music_url = %s""", item["music_url"])
29 | ret = self.cursor.fetchone()
30 | if ret:
31 | self.cursor.execute(
32 | """update music_douban set music_name = %s,music_alias = %s,music_singer = %s,
33 | music_time = %s,music_rating = %s,music_votes = %s,music_tags = %s,music_url = %s
34 | where music_url = %s""",
35 | (item['music_name'],
36 | item['music_alias'],
37 | item['music_singer'],
38 | item['music_time'],
39 | item['music_rating'],
40 | item['music_votes'],
41 | item['music_tags'],
42 | item['music_url'],
43 | item['music_url']))
44 | else:
45 | self.cursor.execute(
46 | """insert into music_douban(music_name,music_alias,music_singer,music_time,music_rating,
47 | music_votes,music_tags,music_url)
48 | value (%s,%s,%s,%s,%s,%s,%s,%s)""",
49 | (item['music_name'],
50 | item['music_alias'],
51 | item['music_singer'],
52 | item['music_time'],
53 | item['music_rating'],
54 | item['music_votes'],
55 | item['music_tags'],
56 | item['music_url']))
57 | self.connect.commit()
58 | except Exception as error:
59 | log(error)
60 | return item
61 |
62 | elif item.__class__ == MusicReviewItem:
63 | try:
64 | self.cursor.execute("""select * from music_review_douban where review_url = %s""", item["review_url"])
65 | ret = self.cursor.fetchone()
66 | if ret:
67 | self.cursor.execute(
68 | """update music_review_douban set review_title = %s,review_content = %s,review_author = %s,
69 | review_music = %s,review_time = %s,review_url = %s
70 | where review_url = %s""",
71 | (item['review_title'],
72 | item['review_content'],
73 | item['review_author'],
74 | item['review_music'],
75 | item['review_time'],
76 | item['review_url'],
77 | item['review_url']))
78 | else:
79 | self.cursor.execute(
80 | """insert into music_review_douban(review_title,review_content,review_author,review_music,review_time,
81 | review_url)
82 | value (%s,%s,%s,%s,%s,%s)""",
83 | (item['review_title'],
84 | item['review_content'],
85 | item['review_author'],
86 | item['review_music'],
87 | item['review_time'],
88 | item['review_url']))
89 | self.connect.commit()
90 | except Exception as error:
91 | log(error)
92 | return item
93 |
94 | elif item.__class__ == VideoItem:
95 | try:
96 | self.cursor.execute("""select * from video_douban where video_url = %s""", item["video_url"])
97 | ret = self.cursor.fetchone()
98 | if ret:
99 | self.cursor.execute(
100 | """update video_douban set video_name= %s,video_alias= %s,video_actor= %s,video_year= %s,
101 | video_time= %s,video_rating= %s,video_votes= %s,video_tags= %s,video_url= %s,
102 | video_director= %s,video_type= %s,video_bigtype= %s,video_area= %s,video_language= %s,
103 | video_length= %s,video_writer= %s,video_desc= %s,video_episodes= %s where video_url = %s""",
104 | (item['video_name'],
105 | item['video_alias'],
106 | item['video_actor'],
107 | item['video_year'],
108 | item['video_time'],
109 | item['video_rating'],
110 | item['video_votes'],
111 | item['video_tags'],
112 | item['video_url'],
113 | item['video_director'],
114 | item['video_type'],
115 | item['video_bigtype'],
116 | item['video_area'],
117 | item['video_language'],
118 | item['video_length'],
119 | item['video_writer'],
120 | item['video_desc'],
121 | item['video_episodes'],
122 | item['video_url']))
123 | else:
124 | self.cursor.execute(
125 | """insert into video_douban(video_name,video_alias,video_actor,video_year,video_time,
126 | video_rating,video_votes,video_tags,video_url,video_director,video_type,video_bigtype,
127 | video_area,video_language,video_length,video_writer,video_desc,video_episodes)
128 | value (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""",
129 | (item['video_name'],
130 | item['video_alias'],
131 | item['video_actor'],
132 | item['video_year'],
133 | item['video_time'],
134 | item['video_rating'],
135 | item['video_votes'],
136 | item['video_tags'],
137 | item['video_url'],
138 | item['video_director'],
139 | item['video_type'],
140 | item['video_bigtype'],
141 | item['video_area'],
142 | item['video_language'],
143 | item['video_length'],
144 | item['video_writer'],
145 | item['video_desc'],
146 | item['video_episodes']))
147 | self.connect.commit()
148 | except Exception as error:
149 | log(error)
150 | return item
151 |
152 | elif item.__class__ == VideoReviewItem:
153 | try:
154 | self.cursor.execute("""select * from video_review_douban where review_url = %s""", item["review_url"])
155 | ret = self.cursor.fetchone()
156 | if ret:
157 | self.cursor.execute(
158 | """update video_review_douban set review_title = %s,review_content = %s,review_author = %s,
159 | review_video = %s,review_time = %s,review_url = %s
160 | where review_url = %s""",
161 | (item['review_title'],
162 | item['review_content'],
163 | item['review_author'],
164 | item['review_video'],
165 | item['review_time'],
166 | item['review_url'],
167 | item['review_url']))
168 | else:
169 | self.cursor.execute(
170 | """insert into video_review_douban(review_title,review_content,review_author,review_video,review_time,
171 | review_url)
172 | value (%s,%s,%s,%s,%s,%s)""",
173 | (item['review_title'],
174 | item['review_content'],
175 | item['review_author'],
176 | item['review_video'],
177 | item['review_time'],
178 | item['review_url']))
179 | self.connect.commit()
180 | except Exception as error:
181 | log(error)
182 | return item
183 | else:
184 | pass
185 |
--------------------------------------------------------------------------------
/mysql/settings.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Scrapy settings for mysql project
4 | #
5 | # For simplicity, this file contains only settings considered important or
6 | # commonly used. You can find more settings consulting the documentation:
7 | #
8 | # http://doc.scrapy.org/en/latest/topics/settings.html
9 | # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
10 | # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
11 |
12 | BOT_NAME = 'mysql'
13 |
14 | SPIDER_MODULES = ['mysql.spiders']
15 | NEWSPIDER_MODULE = 'mysql.spiders'
16 |
17 |
18 | MYSQL_HOST = 'localhost'
19 | MYSQL_DBNAME = 'spider'
20 | MYSQL_USER = 'root'
21 | MYSQL_PASSWD = '123456'
22 |
23 | DOWNLOAD_DELAY = 1
24 |
25 | ITEM_PIPELINES = {
26 | 'mysql.pipelines.DoubanPipeline': 301,
27 |
28 | }
29 |
30 | DOWNLOADER_MIDDLEWARES = {
31 | 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
32 | 'mysql.middlewares.RotateUserAgentMiddleware': 543,
33 | }
34 |
--------------------------------------------------------------------------------
/mysql/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/mysql/spiders/musicspider.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 | from scrapy.spider import CrawlSpider, Rule
4 | from scrapy.linkextractors import LinkExtractor
5 | from mysql.items import MusicItem, MusicReviewItem
6 | from scrapy import log
7 |
8 | import re
9 |
10 |
11 | class MusicSpider(CrawlSpider):
12 | name = 'music'
13 | allowed_domains = ['music.douban.com']
14 | start_urls = ['https://music.douban.com/tag/',
15 | 'https://music.douban.com/tag/?view=cloud'
16 | ]
17 | rules = (Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))$")),
18 | Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))\?start=\d+\&type=T$")),
19 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?sort=time$")),
20 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?sort=time\&start=\d+$")),
21 | Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_music", follow=True),
22 | Rule(LinkExtractor(allow=r"/review/\d+/$"), callback="parse_review", follow=True),
23 | )
24 |
25 | def parse_music(self, response):
26 | item = MusicItem()
27 | try:
28 | item['music_name'] = response.xpath('//*[@id="wrapper"]/h1/span/text()').extract()[0]
29 | content = "".join(response.xpath('//*[@id="info"]').extract())
30 | info = response.xpath('//*[@id="info"]/span').extract()
31 | item['music_alias'] = ""
32 | item['music_singer'] = ""
33 | item['music_time'] = ""
34 | for i in range(0, len(info)):
35 | if "又名" in info[i]:
36 | if i == 0:
37 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[1] \
38 | .replace("\xa0", "").replace("\n", "").rstrip()
39 | elif i == 1:
40 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[2] \
41 | .replace("\xa0", "").replace("\n", "").rstrip()
42 | elif i == 2:
43 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[3] \
44 | .replace("\xa0", "").replace("\n", "").rstrip()
45 |
46 | else:
47 | item['music_alias'] = ""
48 | # break
49 | if "表演者" in info[i]:
50 | if i == 0:
51 | item['music_singer'] = "|".join(
52 | response.xpath('//*[@id="info"]/span[1]/span/a/text()').extract())
53 | elif i == 1:
54 | item['music_singer'] = "|".join(
55 | response.xpath('//*[@id="info"]/span[2]/span/a/text()').extract())
56 | elif i == 2:
57 | item['music_singer'] = "|".join(
58 | response.xpath('//*[@id="info"]/span[3]/span/a/text()').extract())
59 | else:
60 | item['music_singer'] = ""
61 | # break
62 | if "发行时间" in info[i]:
63 | nbsp = re.findall(r"发行时间:(.*?)
", content, re.S)
64 | item['music_time'] = "".join(nbsp).replace("\xa0", "").replace("\n", "").replace(" ", "")
65 | # break
66 | try:
67 | item['music_rating'] = "".join(response.xpath(
68 | '//*[@class="rating_self clearfix"]/strong/text()').extract())
69 | item['music_votes'] = "".join(response.xpath(
70 | '//*[@class="rating_self clearfix"]/div/div[@class="rating_sum"]/a/span/text()').extract())
71 | except Exception as error:
72 | item['music_rating'] = '0'
73 | item['music_votes'] = '0'
74 | log(error)
75 | item['music_tags'] = "|".join(response.xpath('//*[@id="db-tags-section"]/div/a/text()').extract())
76 | item['music_url'] = response.url
77 | yield item
78 | except Exception as error:
79 | log(error)
80 |
81 | def parse_review(self, response):
82 | try:
83 | item = MusicReviewItem()
84 | item['review_title'] = "".join(response.xpath('//*[@property="v:summary"]/text()').extract())
85 | content = "".join(
86 | response.xpath('//*[@id="link-report"]/div[@property="v:description"]/text()').extract())
87 | item['review_content'] = content.lstrip().rstrip().replace("\n", " ")
88 | item['review_author'] = "".join(response.xpath('//*[@property = "v:reviewer"]/text()').extract())
89 | item['review_music'] = "".join(response.xpath('//*[@class="main-hd"]/a[2]/text()').extract())
90 | item['review_time'] = "".join(response.xpath('//*[@class="main-hd"]/p/text()').extract())
91 | item['review_url'] = response.url
92 | yield item
93 | except Exception as error:
94 | log(error)
95 |
--------------------------------------------------------------------------------
/mysql/spiders/run.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 |
4 | from scrapy import cmdline
5 |
6 | cmdline.execute("scrapy crawl music".split())
7 | cmdline.execute("scrapy crawl video".split())
8 |
--------------------------------------------------------------------------------
/mysql/spiders/videospider.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 | from scrapy.spider import CrawlSpider, Rule
4 | from scrapy.linkextractors import LinkExtractor
5 | from mysql.items import VideoItem, VideoReviewItem
6 | from scrapy import log
7 |
8 | import re
9 |
10 | AREA = re.compile(r"制片国家/地区: (.+?)
")
11 | ALIAS = re.compile(r"又名: (.+?)
")
12 | LANGUAGE = re.compile(r"语言: (.+?)
")
13 | EPISODES = re.compile(r"集数: (.+?)
")
14 | LENGTH = re.compile(r"单集片长: (.+?)
")
15 |
16 |
17 | class VideoSpider(CrawlSpider):
18 | name = 'video'
19 | allowed_domains = ['movie.douban.com']
20 | start_urls = ['https://movie.douban.com/tag/',
21 | 'https://movie.douban.com/tag/?view=cloud'
22 | ]
23 | rules = (Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))$")),
24 | Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))\?start=\d+\&type=T$")),
25 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews$")),
26 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?start=\d+$")),
27 | Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_video", follow=True),
28 | Rule(LinkExtractor(allow=r"/review/\d+/$"), callback="parse_review", follow=True),
29 | )
30 |
31 | def parse_video(self, response):
32 | item = VideoItem()
33 | try:
34 | item["video_url"] = response.url
35 | item["video_name"] = ''.join(
36 | response.xpath('//*[@id="content"]/h1/span[@property="v:itemreviewed"]/text()').extract())
37 | try:
38 | item["video_year"] = ''.join(
39 | response.xpath('//*[@id="content"]/h1/span[@class="year"]/text()').extract()).replace(
40 | "(", "").replace(")", "")
41 | except Exception as e:
42 | print('Exception:', e)
43 | item['video_year'] = ''
44 |
45 | introduction = response.xpath('//*[@id="link-report"]/span[@property="v:summary"]/text()').extract()
46 | if introduction:
47 | item["video_desc"] = ''.join(introduction).strip().replace("\r\n", " ")
48 | else:
49 | item["video_desc"] = ''.join(
50 | response.xpath('//*[@id="link-report"]/span/text()').extract()).strip().replace("\r\n", " ")
51 |
52 | item["video_director"] = "|".join(
53 | response.xpath('//*[@id="info"]/span/span/a[@rel="v:directedBy"]/text()').extract())
54 | item["video_writer"] = "|".join(
55 | response.xpath('//*[@id="info"]/span[2]/span[2]/a/text()').extract())
56 |
57 | item["video_actor"] = "|".join(response.xpath("//a[@rel='v:starring']/text()").extract())
58 |
59 | item["video_type"] = "|".join(response.xpath('//*[@id="info"]/span[@property="v:genre"]/text()').extract())
60 |
61 | S = "".join(response.xpath("//div[@id='info']").extract())
62 | M = AREA.search(S)
63 | if M is not None:
64 | item["video_area"] = "|".join([area.strip() for area in M.group(1).split("/")])
65 | else:
66 | item['video_area'] = ''
67 |
68 | A = "".join(response.xpath("//div[@id='info']").extract())
69 | AL = ALIAS.search(A)
70 | if AL is not None:
71 | item["video_alias"] = "|".join([alias.strip() for alias in AL.group(1).split("/")])
72 | else:
73 | item["video_alias"] = ""
74 |
75 | video_info = "".join(response.xpath("//div[@id='info']").extract())
76 | language = LANGUAGE.search(video_info)
77 | episodes = EPISODES.search(video_info)
78 | length = LENGTH.search(video_info)
79 |
80 | if language is not None:
81 | item["video_language"] = "|".join([language.strip() for language in language.group(1).split("/")])
82 | else:
83 | item['video_language'] = ''
84 | if length is not None:
85 | item["video_length"] = "|".join([runtime.strip() for runtime in length.group(1).split("/")])
86 | else:
87 | item["video_length"] = "".join(
88 | response.xpath('//*[@id="info"]/span[@property="v:runtime"]/text()').extract())
89 |
90 | item['video_time'] = "/".join(
91 | response.xpath('//*[@id="info"]/span[@property="v:initialReleaseDate"]/text()').extract())
92 | if episodes is not None:
93 | item['video_bigtype'] = "电视剧"
94 | item["video_episodes"] = "|".join([episodes.strip() for episodes in episodes.group(1).split("/")])
95 | else:
96 | item['video_bigtype'] = "电影"
97 | item['video_episodes'] = ''
98 | item['video_tags'] = "|".join(
99 | response.xpath('//*[@class="tags"]/div[@class="tags-body"]/a/text()').extract())
100 |
101 | try:
102 | item['video_rating'] = "".join(response.xpath(
103 | '//*[@class="rating_self clearfix"]/strong/text()').extract())
104 | item['video_votes'] = "".join(response.xpath(
105 | '//*[@class="rating_self clearfix"]/div/div[@class="rating_sum"]/a/span/text()').extract())
106 | except Exception as error:
107 | item['video_rating'] = '0'
108 | item['video_votes'] = '0'
109 | log(error)
110 |
111 | yield item
112 | except Exception as error:
113 | log(error)
114 |
115 | def parse_review(self, response):
116 | try:
117 | item = VideoReviewItem()
118 | item['review_title'] = "".join(response.xpath('//*[@property="v:summary"]/text()').extract())
119 | content = "".join(
120 | response.xpath('//*[@id="link-report"]/div[@property="v:description"]/text()').extract())
121 | item['review_content'] = content.lstrip().rstrip().replace("\n", " ")
122 | item['review_author'] = "".join(response.xpath('//*[@property = "v:reviewer"]/text()').extract())
123 | item['review_video'] = "".join(response.xpath('//*[@class="main-hd"]/a[2]/text()').extract())
124 | item['review_time'] = "".join(response.xpath('//*[@class="main-hd"]/p/text()').extract())
125 | item['review_url'] = response.url
126 | yield item
127 | except Exception as error:
128 | log(error)
129 |
--------------------------------------------------------------------------------
/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.org/en/latest/deploy.html
5 |
6 | [settings]
7 | default = mysql.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = mysql
12 |
--------------------------------------------------------------------------------