├── .gitignore
├── LICENSE
├── README.md
├── multi
├── __init__.py
├── items.py
├── pipelines.py
├── settings.py
└── spiders
│ ├── __init__.py
│ ├── musicspider.py
│ ├── run.py
│ └── videospider.py
└── scrapy.cfg
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 | local_settings.py
55 |
56 | # Flask stuff:
57 | instance/
58 | .webassets-cache
59 |
60 | # Scrapy stuff:
61 | .scrapy
62 |
63 | # Sphinx documentation
64 | docs/_build/
65 |
66 | # PyBuilder
67 | target/
68 |
69 | # IPython Notebook
70 | .ipynb_checkpoints
71 |
72 | # pyenv
73 | .python-version
74 |
75 | # celery beat schedule file
76 | celerybeat-schedule
77 |
78 | # dotenv
79 | .env
80 |
81 | # virtualenv
82 | venv/
83 | ENV/
84 |
85 | # Spyder project settings
86 | .spyderproject
87 |
88 | # Rope project settings
89 | .ropeproject
90 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 多个爬虫组合实例
2 | [TOC]
3 |
4 | > 本章将实现多个爬虫共同工作的实例,博客地址http://blog.csdn.net/yancey_blog/article/details/53895062
5 |
6 | ## 需求分析
7 | 我们现在有这么个需求,既要爬取音乐详情又要爬取乐评,既要爬取电影详情又要爬取影评,这个要怎么搞,难道是每一个需求就要创建一个项目么,如果按这种方式,我们就要创建四个项目,分别来爬取音乐、乐评、电影、影评,显然这么做的话,代码不仅有很多重合的部分,而且还不容易维护爬虫。
8 |
9 | 其实是可以将多个爬虫组合在一个项目中的,不信你看项目的spiders目录,这个目录本身就是个复数,相信你也可以看出一些端倪。
10 |
11 | 抓取爬虫从豆瓣音乐、豆瓣电影的tag页,作为起始页开始爬取。
12 |
13 |
14 |
15 |
16 |
17 | spider规则分析确定
18 | 豆瓣音乐、乐评
19 |
20 | ```python
21 | name = 'music'
22 | allowed_domains = ['music.douban.com']
23 | start_urls = ['https://music.douban.com/tag/',
24 | 'https://music.douban.com/tag/?view=cloud'
25 | ]
26 | rules = (Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))$")),
27 | Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))\?start=\d+\&type=T$")),
28 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?sort=time$")),
29 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?sort=time\&start=\d+$")),
30 | Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_music", follow=True),
31 | Rule(LinkExtractor(allow=r"/review/\d+/$"), callback="parse_review", follow=True),
32 | )
33 | ```
34 |
35 | 豆瓣电影、影评
36 |
37 | ```python
38 | name = 'video'
39 | allowed_domains = ['movie.douban.com']
40 | start_urls = ['https://movie.douban.com/tag/',
41 | 'https://movie.douban.com/tag/?view=cloud'
42 | ]
43 | rules = (Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))$")),
44 | Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))\?start=\d+\&type=T$")),
45 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews$")),
46 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?start=\d+$")),
47 | Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_video", follow=True),
48 | Rule(LinkExtractor(allow=r"/review/\d+/$"), callback="parse_review", follow=True),
49 | )
50 | ```
51 |
52 | ## 创建项目
53 | 使用命令`scrapy startproject multi`
54 |
55 | ```shell
56 | MACBOOK:~ yancey$ scrapy startproject multi
57 | New Scrapy project 'multi', using template directory '/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/scrapy/templates/project', created in:
58 | /Users/yancey/multi
59 |
60 | You can start your first spider with:
61 | cd multi
62 | scrapy genspider example example.com
63 | ```
64 | 使用pycharm打开multi项目,目录结构如下:
65 |
66 |
67 |
68 | 重写items.py,可以打开豆瓣的音乐、乐评、电影、影评,有这些元素。
69 |
70 |
71 | ```python
72 | # -*- coding: utf-8 -*-
73 |
74 | # Define here the models for your scraped items
75 | #
76 | # See documentation in:
77 | # http://doc.org/en/latest/topics/items.html
78 |
79 | from scrapy import Item, Field
80 |
81 | # 音乐
82 | class MusicItem(Item):
83 | music_name = Field()
84 | music_alias = Field()
85 | music_singer = Field()
86 | music_time = Field()
87 | music_rating = Field()
88 | music_votes = Field()
89 | music_tags = Field()
90 | music_url = Field()
91 |
92 | # 乐评
93 | class MusicReviewItem(Item):
94 | review_title = Field()
95 | review_content = Field()
96 | review_author = Field()
97 | review_music = Field()
98 | review_time = Field()
99 | review_url = Field()
100 |
101 | # 电影
102 | class VideoItem(Item):
103 | video_name = Field()
104 | video_alias = Field()
105 | video_actor = Field()
106 | video_year = Field()
107 | video_time = Field()
108 | video_rating = Field()
109 | video_votes = Field()
110 | video_tags = Field()
111 | video_url = Field()
112 | video_director = Field()
113 | video_type = Field()
114 | video_bigtype = Field()
115 | video_area = Field()
116 | video_language = Field()
117 | video_length = Field()
118 | video_writer = Field()
119 | video_desc = Field()
120 | video_episodes = Field()
121 |
122 | # 影评
123 | class VideoReviewItem(Item):
124 | review_title = Field()
125 | review_content = Field()
126 | review_author = Field()
127 | review_video = Field()
128 | review_time = Field()
129 | review_url = Field()
130 | ```
131 |
132 | 新建两个spider分别为musicspider.py、videospider.py
133 |
134 |
135 | ```python
136 | # coding:utf-8
137 |
138 | from scrapy.spider import CrawlSpider, Rule
139 | from scrapy.linkextractors import LinkExtractor
140 | from multi.items import VideoItem, VideoReviewItem
141 | from scrapy import log
142 |
143 | import re
144 |
145 | AREA = re.compile(r"制片国家/地区: (.+?)
")
146 | ALIAS = re.compile(r"又名: (.+?)
")
147 | LANGUAGE = re.compile(r"语言: (.+?)
")
148 | EPISODES = re.compile(r"集数: (.+?)
")
149 | LENGTH = re.compile(r"单集片长: (.+?)
")
150 |
151 |
152 | class VideoSpider(CrawlSpider):
153 | name = 'video'
154 | allowed_domains = ['movie.douban.com']
155 | start_urls = ['https://movie.douban.com/tag/',
156 | 'https://movie.douban.com/tag/?view=cloud'
157 | ]
158 | rules = (Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))$")),
159 | Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))\?start=\d+\&type=T$")),
160 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews$")),
161 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?start=\d+$")),
162 | Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_video", follow=True),
163 | Rule(LinkExtractor(allow=r"/review/\d+/$"), callback="parse_review", follow=True),
164 | )
165 |
166 | def parse_video(self, response):
167 | item = VideoItem()
168 | try:
169 | item["video_url"] = response.url
170 | item["video_name"] = ''.join(
171 | response.xpath('//*[@id="content"]/h1/span[@property="v:itemreviewed"]/text()').extract())
172 | try:
173 | item["video_year"] = ''.join(
174 | response.xpath('//*[@id="content"]/h1/span[@class="year"]/text()').extract()).replace(
175 | "(", "").replace(")", "")
176 | except Exception as e:
177 | print('Exception:', e)
178 | item['video_year'] = ''
179 |
180 | introduction = response.xpath('//*[@id="link-report"]/span[@property="v:summary"]/text()').extract()
181 | if introduction:
182 | item["video_desc"] = ''.join(introduction).strip().replace("\r\n", " ")
183 | else:
184 | item["video_desc"] = ''.join(
185 | response.xpath('//*[@id="link-report"]/span/text()').extract()).strip().replace("\r\n", " ")
186 |
187 | item["video_director"] = "|".join(
188 | response.xpath('//*[@id="info"]/span/span/a[@rel="v:directedBy"]/text()').extract())
189 | item["video_writer"] = "|".join(
190 | response.xpath('//*[@id="info"]/span[2]/span[2]/a/text()').extract())
191 |
192 | item["video_actor"] = "|".join(response.xpath("//a[@rel='v:starring']/text()").extract())
193 |
194 | item["video_type"] = "|".join(response.xpath('//*[@id="info"]/span[@property="v:genre"]/text()').extract())
195 |
196 | S = "".join(response.xpath("//div[@id='info']").extract())
197 | M = AREA.search(S)
198 | if M is not None:
199 | item["video_area"] = "|".join([area.strip() for area in M.group(1).split("/")])
200 | else:
201 | item['video_area'] = ''
202 |
203 | A = "".join(response.xpath("//div[@id='info']").extract())
204 | AL = ALIAS.search(A)
205 | if AL is not None:
206 | item["video_alias"] = "|".join([alias.strip() for alias in AL.group(1).split("/")])
207 | else:
208 | item["video_alias"] = ""
209 |
210 | video_info = "".join(response.xpath("//div[@id='info']").extract())
211 | language = LANGUAGE.search(video_info)
212 | episodes = EPISODES.search(video_info)
213 | length = LENGTH.search(video_info)
214 |
215 | if language is not None:
216 | item["video_language"] = "|".join([language.strip() for language in language.group(1).split("/")])
217 | else:
218 | item['video_language'] = ''
219 | if length is not None:
220 | item["video_length"] = "|".join([runtime.strip() for runtime in length.group(1).split("/")])
221 | else:
222 | item["video_length"] = "".join(
223 | response.xpath('//*[@id="info"]/span[@property="v:runtime"]/text()').extract())
224 |
225 | item['video_time'] = "/".join(
226 | response.xpath('//*[@id="info"]/span[@property="v:initialReleaseDate"]/text()').extract())
227 | if episodes is not None:
228 | item['video_bigtype'] = "电视剧"
229 | item["video_episodes"] = "|".join([episodes.strip() for episodes in episodes.group(1).split("/")])
230 | else:
231 | item['video_bigtype'] = "电影"
232 | item['video_episodes'] = ''
233 | item['video_tags'] = "|".join(
234 | response.xpath('//*[@class="tags"]/div[@class="tags-body"]/a/text()').extract())
235 |
236 | try:
237 | item['video_rating'] = "".join(response.xpath(
238 | '//*[@class="rating_self clearfix"]/strong/text()').extract())
239 | item['video_votes'] = "".join(response.xpath(
240 | '//*[@class="rating_self clearfix"]/div/div[@class="rating_sum"]/a/span/text()').extract())
241 | except Exception as error:
242 | item['video_rating'] = '0'
243 | item['video_votes'] = '0'
244 | log(error)
245 |
246 | yield item
247 | except Exception as error:
248 | log(error)
249 |
250 | def parse_review(self, response):
251 | try:
252 | item = VideoReviewItem()
253 | item['review_title'] = "".join(response.xpath('//*[@property="v:summary"]/text()').extract())
254 | content = "".join(
255 | response.xpath('//*[@id="link-report"]/div[@property="v:description"]/text()').extract())
256 | item['review_content'] = content.lstrip().rstrip().replace("\n", " ")
257 | item['review_author'] = "".join(response.xpath('//*[@property = "v:reviewer"]/text()').extract())
258 | item['review_video'] = "".join(response.xpath('//*[@class="main-hd"]/a[2]/text()').extract())
259 | item['review_time'] = "".join(response.xpath('//*[@class="main-hd"]/p/text()').extract())
260 | item['review_url'] = response.url
261 | yield item
262 | except Exception as error:
263 | log(error)
264 | ```
265 |
266 | ```python
267 | # coding:utf-8
268 |
269 | from scrapy.spider import CrawlSpider, Rule
270 | from scrapy.linkextractors import LinkExtractor
271 | from multi.items import MusicItem, MusicReviewItem
272 | from scrapy import log
273 |
274 | import re
275 |
276 |
277 | class MusicSpider(CrawlSpider):
278 | name = 'music'
279 | allowed_domains = ['music.douban.com']
280 | start_urls = ['https://music.douban.com/tag/',
281 | 'https://music.douban.com/tag/?view=cloud'
282 | ]
283 | rules = (Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))$")),
284 | Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))\?start=\d+\&type=T$")),
285 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?sort=time$")),
286 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?sort=time\&start=\d+$")),
287 | Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_music", follow=True),
288 | Rule(LinkExtractor(allow=r"/review/\d+/$"), callback="parse_review", follow=True),
289 | )
290 |
291 | def parse_music(self, response):
292 | item = MusicItem()
293 | try:
294 | item['music_name'] = response.xpath('//*[@id="wrapper"]/h1/span/text()').extract()[0]
295 | content = "".join(response.xpath('//*[@id="info"]').extract())
296 | info = response.xpath('//*[@id="info"]/span').extract()
297 | item['music_alias'] = ""
298 | item['music_singer'] = ""
299 | item['music_time'] = ""
300 | for i in range(0, len(info)):
301 | if "又名" in info[i]:
302 | if i == 0:
303 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[1] \
304 | .replace("\xa0", "").replace("\n", "").rstrip()
305 | elif i == 1:
306 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[2] \
307 | .replace("\xa0", "").replace("\n", "").rstrip()
308 | elif i == 2:
309 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[3] \
310 | .replace("\xa0", "").replace("\n", "").rstrip()
311 |
312 | else:
313 | item['music_alias'] = ""
314 | # break
315 | if "表演者" in info[i]:
316 | if i == 0:
317 | item['music_singer'] = "|".join(
318 | response.xpath('//*[@id="info"]/span[1]/span/a/text()').extract())
319 | elif i == 1:
320 | item['music_singer'] = "|".join(
321 | response.xpath('//*[@id="info"]/span[2]/span/a/text()').extract())
322 | elif i == 2:
323 | item['music_singer'] = "|".join(
324 | response.xpath('//*[@id="info"]/span[3]/span/a/text()').extract())
325 | else:
326 | item['music_singer'] = ""
327 | # break
328 | if "发行时间" in info[i]:
329 | nbsp = re.findall(r"发行时间:(.*?)
", content, re.S)
330 | item['music_time'] = "".join(nbsp).replace("\xa0", "").replace("\n", "").replace(" ", "")
331 | # break
332 | try:
333 | item['music_rating'] = "".join(response.xpath(
334 | '//*[@class="rating_self clearfix"]/strong/text()').extract())
335 | item['music_votes'] = "".join(response.xpath(
336 | '//*[@class="rating_self clearfix"]/div/div[@class="rating_sum"]/a/span/text()').extract())
337 | except Exception as error:
338 | item['music_rating'] = '0'
339 | item['music_votes'] = '0'
340 | log(error)
341 | item['music_tags'] = "|".join(response.xpath('//*[@id="db-tags-section"]/div/a/text()').extract())
342 | item['music_url'] = response.url
343 | yield item
344 | except Exception as error:
345 | log(error)
346 |
347 | def parse_review(self, response):
348 | try:
349 | item = MusicReviewItem()
350 | item['review_title'] = "".join(response.xpath('//*[@property="v:summary"]/text()').extract())
351 | content = "".join(
352 | response.xpath('//*[@id="link-report"]/div[@property="v:description"]/text()').extract())
353 | item['review_content'] = content.lstrip().rstrip().replace("\n", " ")
354 | item['review_author'] = "".join(response.xpath('//*[@property = "v:reviewer"]/text()').extract())
355 | item['review_music'] = "".join(response.xpath('//*[@class="main-hd"]/a[2]/text()').extract())
356 | item['review_time'] = "".join(response.xpath('//*[@class="main-hd"]/p/text()').extract())
357 | item['review_url'] = response.url
358 | yield item
359 | except Exception as error:
360 | log(error)
361 | ```
362 | spider中的xpath解析,我后面会讲到
363 |
364 | settings.py配置
365 |
366 | ```python
367 | # -*- coding: utf-8 -*-
368 |
369 | BOT_NAME = 'multi'
370 | SPIDER_MODULES = ['multi.spiders']
371 | NEWSPIDER_MODULE = 'multi.spiders'
372 | USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
373 | DOWNLOAD_DELAY = 2
374 | # Obey robots.txt rules
375 | ROBOTSTXT_OBEY = True #遵守robots协议
376 | ```
377 |
378 | 创建run.py
379 |
380 | ```python
381 | # coding:utf-8
382 |
383 | from scrapy import cmdline
384 | cmdline.execute("scrapy crawl music".split())
385 | cmdline.execute("scrapy crawl video".split())
386 |
387 | ```
388 | ## 运行爬虫
389 | pycharm运行run.py,结果如下:
390 |
391 | ```shell
392 | /Library/Frameworks/Python.framework/Versions/3.5/bin/python3.5 /Users/yancey/multi/multi/spiders/run.py
393 | /Users/yancey/multi/multi/spiders/musicspider.py:3: ScrapyDeprecationWarning: Module `scrapy.spider` is deprecated, use `scrapy.spiders` instead
394 | from scrapy.spider import CrawlSpider, Rule
395 | /Users/yancey/multi/multi/spiders/musicspider.py:6: ScrapyDeprecationWarning: Module `scrapy.log` has been deprecated, Scrapy now relies on the builtin Python library for logging. Read the updated logging entry in the documentation to learn more.
396 | from scrapy import log
397 | 2016-12-27 13:53:37 [scrapy] INFO: Scrapy 1.2.0 started (bot: multi)
398 | 2016-12-27 13:53:37 [scrapy] INFO: Overridden settings: {'DOWNLOAD_DELAY': 2, 'BOT_NAME': 'multi', 'USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36', 'SPIDER_MODULES': ['multi.spiders'], 'NEWSPIDER_MODULE': 'multi.spiders'}
399 | 2016-12-27 13:53:37 [scrapy] INFO: Enabled extensions:
400 | ['scrapy.extensions.corestats.CoreStats', 'scrapy.extensions.logstats.LogStats']
401 | 2016-12-27 13:53:37 [scrapy] INFO: Enabled downloader middlewares:
402 | ['scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
403 | 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
404 | 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
405 | 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
406 | 'scrapy.downloadermiddlewares.retry.RetryMiddleware',
407 | 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
408 | 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
409 | 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
410 | 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
411 | 'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware',
412 | 'scrapy.downloadermiddlewares.stats.DownloaderStats']
413 | 2016-12-27 13:53:37 [scrapy] INFO: Enabled spider middlewares:
414 | ['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
415 | 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
416 | 'scrapy.spidermiddlewares.referer.RefererMiddleware',
417 | 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
418 | 'scrapy.spidermiddlewares.depth.DepthMiddleware']
419 | 2016-12-27 13:53:37 [scrapy] INFO: Enabled item pipelines:
420 | []
421 | 2016-12-27 13:53:37 [scrapy] INFO: Spider opened
422 | 2016-12-27 13:53:37 [scrapy] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
423 | 2016-12-27 13:53:39 [scrapy] DEBUG: Crawled (200) (referer: None)
424 | 2016-12-27 13:53:41 [scrapy] DEBUG: Crawled (200) (referer: None)
425 | 2016-12-27 13:53:41 [scrapy] DEBUG: Filtered duplicate request: - no more duplicates will be shown (see DUPEFILTER_DEBUG to show all duplicates)
426 | 2016-12-27 13:53:42 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
427 | 2016-12-27 13:53:43 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
428 | 2016-12-27 13:53:46 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
429 | 2016-12-27 13:53:48 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
430 | 2016-12-27 13:53:51 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
431 | 2016-12-27 13:53:52 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
432 | 2016-12-27 13:53:55 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
433 | 2016-12-27 13:53:56 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
434 | 2016-12-27 13:53:59 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
435 | 2016-12-27 13:54:02 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
436 | 2016-12-27 13:54:03 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
437 | 2016-12-27 13:54:05 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
438 | 2016-12-27 13:54:08 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
439 | 2016-12-27 13:54:10 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
440 | 2016-12-27 13:54:11 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
441 | 2016-12-27 13:54:14 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/)
442 | 2016-12-27 13:54:16 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/?view=cloud)
443 | 2016-12-27 13:54:19 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/OST)
444 | 2016-12-27 13:54:19 [scrapy] DEBUG: Scraped from <200 https://music.douban.com/subject/26931992/>
445 | {'music_alias': '电影《摆渡人》岁月版主题曲',
446 | 'music_name': '十年',
447 | 'music_rating': '6.4',
448 | 'music_singer': '梁朝伟|李宇春',
449 | 'music_tags': '梁朝伟|李宇春|十年|OST|单曲|2016|电影歌曲|华语',
450 | 'music_time': '2016-12-15',
451 | 'music_url': 'https://music.douban.com/subject/26931992/',
452 | 'music_votes': '231'}
453 | 2016-12-27 13:54:19 [scrapy] DEBUG: Filtered offsite request to 'movie.douban.com':
454 | 2016-12-27 13:54:21 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/classical)
455 | 2016-12-27 13:54:21 [scrapy] DEBUG: Scraped from <200 https://music.douban.com/subject/3698231/>
456 | {'music_alias': 'Bach: Cantatas/Handel: Arias',
457 | 'music_name': 'The Art of Alfred Deller (Alfred Deller Edition, No. 7)',
458 | 'music_rating': '8.7',
459 | 'music_singer': 'Alfred Deller',
460 | 'music_tags': 'Bach|Cantatas|countertenor|classical|Deller|Handel|baroque|古典',
461 | 'music_time': '1997-6-24',
462 | 'music_url': 'https://music.douban.com/subject/3698231/',
463 | 'music_votes': '27'}
464 | 2016-12-27 13:54:23 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/Alternative)
465 | 2016-12-27 13:54:23 [scrapy] DEBUG: Scraped from <200 https://music.douban.com/subject/26842972/>
466 | {'music_alias': '',
467 | 'music_name': 'Skiptracing',
468 | 'music_rating': '7.9',
469 | 'music_singer': 'Mild High Club',
470 | 'music_tags': 'Alternative|2016|MildHighClub|Electronic|US|美国|Adult-Contemporary|AOR',
471 | 'music_time': '2016-09-02',
472 | 'music_url': 'https://music.douban.com/subject/26842972/',
473 | 'music_votes': '29'}
474 | 2016-12-27 13:54:25 [scrapy] DEBUG: Crawled (200) (referer: https://music.douban.com/tag/Metal)
475 | 2016-12-27 13:54:25 [scrapy] DEBUG: Scraped from <200 https://music.douban.com/subject/5404211/>
476 | {'music_alias': '',
477 | 'music_name': 'Surtur Rising',
478 | 'music_rating': '8.5',
479 | 'music_singer': 'Amon Amarth',
480 | 'music_tags': 'Melodic-Death-Metal|Viking-Metal|瑞典|2011|Metal|VikingMetal|Sweden|Amon_Amarth',
481 | 'music_time': '2011-03-23',
482 | 'music_url': 'https://music.douban.com/subject/5404211/',
483 | 'music_votes': '173'}
484 | ... ...
485 | ```
486 |
--------------------------------------------------------------------------------
/multi/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yanceyblog/scrapy-multi/b31ed8907f7b9e26fc650389ea04399ebfb27b70/multi/__init__.py
--------------------------------------------------------------------------------
/multi/items.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define here the models for your scraped items
4 | #
5 | # See documentation in:
6 | # http://doc.org/en/latest/topics/items.html
7 |
8 | from scrapy import Item, Field
9 |
10 |
11 | # 音乐
12 | class MusicItem(Item):
13 | music_name = Field()
14 | music_alias = Field()
15 | music_singer = Field()
16 | music_time = Field()
17 | music_rating = Field()
18 | music_votes = Field()
19 | music_tags = Field()
20 | music_url = Field()
21 |
22 |
23 | # 乐评
24 | class MusicReviewItem(Item):
25 | review_title = Field()
26 | review_content = Field()
27 | review_author = Field()
28 | review_music = Field()
29 | review_time = Field()
30 | review_url = Field()
31 |
32 |
33 | # 视频
34 | class VideoItem(Item):
35 | video_name = Field()
36 | video_alias = Field()
37 | video_actor = Field()
38 | video_year = Field()
39 | video_time = Field()
40 | video_rating = Field()
41 | video_votes = Field()
42 | video_tags = Field()
43 | video_url = Field()
44 | video_director = Field()
45 | video_type = Field()
46 | video_bigtype = Field()
47 | video_area = Field()
48 | video_language = Field()
49 | video_length = Field()
50 | video_writer = Field()
51 | video_desc = Field()
52 | video_episodes = Field()
53 |
54 |
55 | # 影评
56 | class VideoReviewItem(Item):
57 | review_title = Field()
58 | review_content = Field()
59 | review_author = Field()
60 | review_video = Field()
61 | review_time = Field()
62 | review_url = Field()
63 |
--------------------------------------------------------------------------------
/multi/pipelines.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Define your item pipelines here
4 | #
5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 | # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
7 |
8 |
9 | class MultiPipeline(object):
10 | def process_item(self, item, spider):
11 | return item
12 |
--------------------------------------------------------------------------------
/multi/settings.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Scrapy settings for multi project
4 | #
5 | # For simplicity, this file contains only settings considered important or
6 | # commonly used. You can find more settings consulting the documentation:
7 | #
8 | # http://doc.scrapy.org/en/latest/topics/settings.html
9 | # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
10 | # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
11 |
12 | BOT_NAME = 'multi'
13 |
14 | SPIDER_MODULES = ['multi.spiders']
15 | NEWSPIDER_MODULE = 'multi.spiders'
16 |
17 | USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
18 | DOWNLOAD_DELAY = 2
19 | # Obey robots.txt rules
20 | # ROBOTSTXT_OBEY = True
21 |
22 | # Configure maximum concurrent requests performed by Scrapy (default: 16)
23 | #CONCURRENT_REQUESTS = 32
24 |
25 | # Configure a delay for requests for the same website (default: 0)
26 | # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
27 | # See also autothrottle settings and docs
28 | #DOWNLOAD_DELAY = 3
29 | # The download delay setting will honor only one of:
30 | #CONCURRENT_REQUESTS_PER_DOMAIN = 16
31 | #CONCURRENT_REQUESTS_PER_IP = 16
32 |
33 | # Disable cookies (enabled by default)
34 | #COOKIES_ENABLED = False
35 |
36 | # Disable Telnet Console (enabled by default)
37 | #TELNETCONSOLE_ENABLED = False
38 |
39 | # Override the default request headers:
40 | #DEFAULT_REQUEST_HEADERS = {
41 | # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
42 | # 'Accept-Language': 'en',
43 | #}
44 |
45 | # Enable or disable spider middlewares
46 | # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
47 | #SPIDER_MIDDLEWARES = {
48 | # 'multi.middlewares.MyCustomSpiderMiddleware': 543,
49 | #}
50 |
51 | # Enable or disable downloader middlewares
52 | # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
53 | #DOWNLOADER_MIDDLEWARES = {
54 | # 'multi.middlewares.MyCustomDownloaderMiddleware': 543,
55 | #}
56 |
57 | # Enable or disable extensions
58 | # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
59 | #EXTENSIONS = {
60 | # 'scrapy.extensions.telnet.TelnetConsole': None,
61 | #}
62 |
63 | # Configure item pipelines
64 | # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
65 | #ITEM_PIPELINES = {
66 | # 'multi.pipelines.SomePipeline': 300,
67 | #}
68 |
69 | # Enable and configure the AutoThrottle extension (disabled by default)
70 | # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
71 | #AUTOTHROTTLE_ENABLED = True
72 | # The initial download delay
73 | #AUTOTHROTTLE_START_DELAY = 5
74 | # The maximum download delay to be set in case of high latencies
75 | #AUTOTHROTTLE_MAX_DELAY = 60
76 | # The average number of requests Scrapy should be sending in parallel to
77 | # each remote server
78 | #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
79 | # Enable showing throttling stats for every response received:
80 | #AUTOTHROTTLE_DEBUG = False
81 |
82 | # Enable and configure HTTP caching (disabled by default)
83 | # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
84 | #HTTPCACHE_ENABLED = True
85 | #HTTPCACHE_EXPIRATION_SECS = 0
86 | #HTTPCACHE_DIR = 'httpcache'
87 | #HTTPCACHE_IGNORE_HTTP_CODES = []
88 | #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
89 |
--------------------------------------------------------------------------------
/multi/spiders/__init__.py:
--------------------------------------------------------------------------------
1 | # This package will contain the spiders of your Scrapy project
2 | #
3 | # Please refer to the documentation for information on how to create and manage
4 | # your spiders.
5 |
--------------------------------------------------------------------------------
/multi/spiders/musicspider.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 | from scrapy.spider import CrawlSpider, Rule
4 | from scrapy.linkextractors import LinkExtractor
5 | from multi.items import MusicItem, MusicReviewItem
6 | from scrapy import log
7 |
8 | import re
9 |
10 |
11 | class MusicSpider(CrawlSpider):
12 | name = 'music'
13 | allowed_domains = ['music.douban.com']
14 | start_urls = ['https://music.douban.com/tag/',
15 | 'https://music.douban.com/tag/?view=cloud'
16 | ]
17 | rules = (Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))$")),
18 | Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))\?start=\d+\&type=T$")),
19 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?sort=time$")),
20 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?sort=time\&start=\d+$")),
21 | Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_music", follow=True),
22 | Rule(LinkExtractor(allow=r"/review/\d+/$"), callback="parse_review", follow=True),
23 | )
24 |
25 | def parse_music(self, response):
26 | item = MusicItem()
27 | try:
28 | item['music_name'] = response.xpath('//*[@id="wrapper"]/h1/span/text()').extract()[0]
29 | content = "".join(response.xpath('//*[@id="info"]').extract())
30 | info = response.xpath('//*[@id="info"]/span').extract()
31 | item['music_alias'] = ""
32 | item['music_singer'] = ""
33 | item['music_time'] = ""
34 | for i in range(0, len(info)):
35 | if "又名" in info[i]:
36 | if i == 0:
37 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[1] \
38 | .replace("\xa0", "").replace("\n", "").rstrip()
39 | elif i == 1:
40 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[2] \
41 | .replace("\xa0", "").replace("\n", "").rstrip()
42 | elif i == 2:
43 | item['music_alias'] = response.xpath('//*[@id="info"]/text()').extract()[3] \
44 | .replace("\xa0", "").replace("\n", "").rstrip()
45 |
46 | else:
47 | item['music_alias'] = ""
48 | # break
49 | if "表演者" in info[i]:
50 | if i == 0:
51 | item['music_singer'] = "|".join(
52 | response.xpath('//*[@id="info"]/span[1]/span/a/text()').extract())
53 | elif i == 1:
54 | item['music_singer'] = "|".join(
55 | response.xpath('//*[@id="info"]/span[2]/span/a/text()').extract())
56 | elif i == 2:
57 | item['music_singer'] = "|".join(
58 | response.xpath('//*[@id="info"]/span[3]/span/a/text()').extract())
59 | else:
60 | item['music_singer'] = ""
61 | # break
62 | if "发行时间" in info[i]:
63 | nbsp = re.findall(r"发行时间:(.*?)
", content, re.S)
64 | item['music_time'] = "".join(nbsp).replace("\xa0", "").replace("\n", "").replace(" ", "")
65 | # break
66 | try:
67 | item['music_rating'] = "".join(response.xpath(
68 | '//*[@class="rating_self clearfix"]/strong/text()').extract())
69 | item['music_votes'] = "".join(response.xpath(
70 | '//*[@class="rating_self clearfix"]/div/div[@class="rating_sum"]/a/span/text()').extract())
71 | except Exception as error:
72 | item['music_rating'] = '0'
73 | item['music_votes'] = '0'
74 | log(error)
75 | item['music_tags'] = "|".join(response.xpath('//*[@id="db-tags-section"]/div/a/text()').extract())
76 | item['music_url'] = response.url
77 | yield item
78 | except Exception as error:
79 | log(error)
80 |
81 | def parse_review(self, response):
82 | try:
83 | item = MusicReviewItem()
84 | item['review_title'] = "".join(response.xpath('//*[@property="v:summary"]/text()').extract())
85 | content = "".join(
86 | response.xpath('//*[@id="link-report"]/div[@property="v:description"]/text()').extract())
87 | item['review_content'] = content.lstrip().rstrip().replace("\n", " ")
88 | item['review_author'] = "".join(response.xpath('//*[@property = "v:reviewer"]/text()').extract())
89 | item['review_music'] = "".join(response.xpath('//*[@class="main-hd"]/a[2]/text()').extract())
90 | item['review_time'] = "".join(response.xpath('//*[@class="main-hd"]/p/text()').extract())
91 | item['review_url'] = response.url
92 | yield item
93 | except Exception as error:
94 | log(error)
95 |
--------------------------------------------------------------------------------
/multi/spiders/run.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 |
4 | from scrapy import cmdline
5 |
6 | cmdline.execute("scrapy crawl music".split())
7 | cmdline.execute("scrapy crawl video".split())
8 |
--------------------------------------------------------------------------------
/multi/spiders/videospider.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 | from scrapy.spider import CrawlSpider, Rule
4 | from scrapy.linkextractors import LinkExtractor
5 | from multi.items import VideoItem, VideoReviewItem
6 | from scrapy import log
7 |
8 | import re
9 |
10 | AREA = re.compile(r"制片国家/地区: (.+?)
")
11 | ALIAS = re.compile(r"又名: (.+?)
")
12 | LANGUAGE = re.compile(r"语言: (.+?)
")
13 | EPISODES = re.compile(r"集数: (.+?)
")
14 | LENGTH = re.compile(r"单集片长: (.+?)
")
15 |
16 |
17 | class VideoSpider(CrawlSpider):
18 | name = 'video'
19 | allowed_domains = ['movie.douban.com']
20 | start_urls = ['https://movie.douban.com/tag/',
21 | 'https://movie.douban.com/tag/?view=cloud'
22 | ]
23 | rules = (Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))$")),
24 | Rule(LinkExtractor(allow=r"/tag/((\d+)|([\u4e00-\u9fa5]+)|(\w+))\?start=\d+\&type=T$")),
25 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews$")),
26 | Rule(LinkExtractor(allow=r"/subject/\d+/reviews\?start=\d+$")),
27 | Rule(LinkExtractor(allow=r"/subject/\d+/$"), callback="parse_video", follow=True),
28 | Rule(LinkExtractor(allow=r"/review/\d+/$"), callback="parse_review", follow=True),
29 | )
30 |
31 | def parse_video(self, response):
32 | item = VideoItem()
33 | try:
34 | item["video_url"] = response.url
35 | item["video_name"] = ''.join(
36 | response.xpath('//*[@id="content"]/h1/span[@property="v:itemreviewed"]/text()').extract())
37 | try:
38 | item["video_year"] = ''.join(
39 | response.xpath('//*[@id="content"]/h1/span[@class="year"]/text()').extract()).replace(
40 | "(", "").replace(")", "")
41 | except Exception as e:
42 | print('Exception:', e)
43 | item['video_year'] = ''
44 |
45 | introduction = response.xpath('//*[@id="link-report"]/span[@property="v:summary"]/text()').extract()
46 | if introduction:
47 | item["video_desc"] = ''.join(introduction).strip().replace("\r\n", " ")
48 | else:
49 | item["video_desc"] = ''.join(
50 | response.xpath('//*[@id="link-report"]/span/text()').extract()).strip().replace("\r\n", " ")
51 |
52 | item["video_director"] = "|".join(
53 | response.xpath('//*[@id="info"]/span/span/a[@rel="v:directedBy"]/text()').extract())
54 | item["video_writer"] = "|".join(
55 | response.xpath('//*[@id="info"]/span[2]/span[2]/a/text()').extract())
56 |
57 | item["video_actor"] = "|".join(response.xpath("//a[@rel='v:starring']/text()").extract())
58 |
59 | item["video_type"] = "|".join(response.xpath('//*[@id="info"]/span[@property="v:genre"]/text()').extract())
60 |
61 | S = "".join(response.xpath("//div[@id='info']").extract())
62 | M = AREA.search(S)
63 | if M is not None:
64 | item["video_area"] = "|".join([area.strip() for area in M.group(1).split("/")])
65 | else:
66 | item['video_area'] = ''
67 |
68 | A = "".join(response.xpath("//div[@id='info']").extract())
69 | AL = ALIAS.search(A)
70 | if AL is not None:
71 | item["video_alias"] = "|".join([alias.strip() for alias in AL.group(1).split("/")])
72 | else:
73 | item["video_alias"] = ""
74 |
75 | video_info = "".join(response.xpath("//div[@id='info']").extract())
76 | language = LANGUAGE.search(video_info)
77 | episodes = EPISODES.search(video_info)
78 | length = LENGTH.search(video_info)
79 |
80 | if language is not None:
81 | item["video_language"] = "|".join([language.strip() for language in language.group(1).split("/")])
82 | else:
83 | item['video_language'] = ''
84 | if length is not None:
85 | item["video_length"] = "|".join([runtime.strip() for runtime in length.group(1).split("/")])
86 | else:
87 | item["video_length"] = "".join(
88 | response.xpath('//*[@id="info"]/span[@property="v:runtime"]/text()').extract())
89 |
90 | item['video_time'] = "/".join(
91 | response.xpath('//*[@id="info"]/span[@property="v:initialReleaseDate"]/text()').extract())
92 | if episodes is not None:
93 | item['video_bigtype'] = "电视剧"
94 | item["video_episodes"] = "|".join([episodes.strip() for episodes in episodes.group(1).split("/")])
95 | else:
96 | item['video_bigtype'] = "电影"
97 | item['video_episodes'] = ''
98 | item['video_tags'] = "|".join(
99 | response.xpath('//*[@class="tags"]/div[@class="tags-body"]/a/text()').extract())
100 |
101 | try:
102 | item['video_rating'] = "".join(response.xpath(
103 | '//*[@class="rating_self clearfix"]/strong/text()').extract())
104 | item['video_votes'] = "".join(response.xpath(
105 | '//*[@class="rating_self clearfix"]/div/div[@class="rating_sum"]/a/span/text()').extract())
106 | except Exception as error:
107 | item['video_rating'] = '0'
108 | item['video_votes'] = '0'
109 | log(error)
110 |
111 | yield item
112 | except Exception as error:
113 | log(error)
114 |
115 | def parse_review(self, response):
116 | try:
117 | item = VideoReviewItem()
118 | item['review_title'] = "".join(response.xpath('//*[@property="v:summary"]/text()').extract())
119 | content = "".join(
120 | response.xpath('//*[@id="link-report"]/div[@property="v:description"]/text()').extract())
121 | item['review_content'] = content.lstrip().rstrip().replace("\n", " ")
122 | item['review_author'] = "".join(response.xpath('//*[@property = "v:reviewer"]/text()').extract())
123 | item['review_video'] = "".join(response.xpath('//*[@class="main-hd"]/a[2]/text()').extract())
124 | item['review_time'] = "".join(response.xpath('//*[@class="main-hd"]/p/text()').extract())
125 | item['review_url'] = response.url
126 | yield item
127 | except Exception as error:
128 | log(error)
129 |
--------------------------------------------------------------------------------
/scrapy.cfg:
--------------------------------------------------------------------------------
1 | # Automatically created by: scrapy startproject
2 | #
3 | # For more information about the [deploy] section see:
4 | # https://scrapyd.readthedocs.org/en/latest/deploy.html
5 |
6 | [settings]
7 | default = multi.settings
8 |
9 | [deploy]
10 | #url = http://localhost:6800/
11 | project = multi
12 |
--------------------------------------------------------------------------------