├── program
├── __pycache__
│ ├── Conn.cpython-35.pyc
│ ├── Prelogin.cpython-35.pyc
│ ├── Sipder.cpython-35.pyc
│ ├── Spider.cpython-35.pyc
│ ├── __init__.cpython-35.pyc
│ └── logfile.cpython-35.pyc
├── __init__.py
├── logfile.py
├── Conn.py
├── main.py
├── Prelogin.py
└── Spider.py
├── __init__.py
├── .idea
├── misc.xml
├── modules.xml
├── phone_data.iml
└── workspace.xml
└── README.md
/program/__pycache__/Conn.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DWJWendy/Weibo_Spider/HEAD/program/__pycache__/Conn.cpython-35.pyc
--------------------------------------------------------------------------------
/program/__pycache__/Prelogin.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DWJWendy/Weibo_Spider/HEAD/program/__pycache__/Prelogin.cpython-35.pyc
--------------------------------------------------------------------------------
/program/__pycache__/Sipder.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DWJWendy/Weibo_Spider/HEAD/program/__pycache__/Sipder.cpython-35.pyc
--------------------------------------------------------------------------------
/program/__pycache__/Spider.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DWJWendy/Weibo_Spider/HEAD/program/__pycache__/Spider.cpython-35.pyc
--------------------------------------------------------------------------------
/program/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DWJWendy/Weibo_Spider/HEAD/program/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/program/__pycache__/logfile.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DWJWendy/Weibo_Spider/HEAD/program/__pycache__/logfile.cpython-35.pyc
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding:utf-8 -*-
3 | """
4 | @author:毛毛虫_Wendy
5 | @license:(c) Copyright 2017-
6 | @contact:dengwenjun818@gmail.com
7 | @file:__init__.py
8 | @time:18-1-30 下午1:34
9 | """
--------------------------------------------------------------------------------
/program/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding:utf-8 -*-
3 | """
4 | @author:毛毛虫_Wendy
5 | @license:(C) Copyright 2017-
6 | @contact:dengwenjun@gmail.com
7 | @file:__init__.py.py
8 | @time:10/26/17 5:35 PM
9 | """
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | ##Weibo_Spider
3 | #### 微博爬虫
4 |
5 | 爬取内容:微博内容、账号、发表时间、点赞数、转发数、评论数 存储方式:将爬取内容存储到mongo数据库中 注意:只需要更改微博账号的ID
6 |
7 | 更新时间:2017.11.18
8 |
9 | 如有问题:联系dengwenjun818@gmail.com
10 |
11 |
12 | #### 增加定时爬取
13 |
14 | 更新时间2018.1.30
15 |
16 | ### 环境
17 | ***Python3.6+Mongo数据库***
18 |
19 | 1. 填写自己的微博账号名
20 |
21 | 2. 整理自己爬取的微博账号的ID
22 |
23 | 3. 直接运行main.py文件
24 |
--------------------------------------------------------------------------------
/.idea/phone_data.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/program/logfile.py:
--------------------------------------------------------------------------------
1 | # -*- encoding:utf-8 -*-
2 | import logging
3 |
4 | import logging
5 |
6 | # 创建一个logger
7 | logger = logging.getLogger('logging')
8 | logger.setLevel(logging.DEBUG)
9 |
10 | # 创建一个handler,用于写入日志文件
11 | fh = logging.FileHandler('classification.log')
12 | fh.setLevel(logging.DEBUG)
13 |
14 | # 再创建一个handler,用于输出到控制台
15 | ch = logging.StreamHandler()
16 | ch.setLevel(logging.DEBUG)
17 |
18 | # 定义handler的输出格式
19 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
20 | fh.setFormatter(formatter)
21 | ch.setFormatter(formatter)
22 |
23 | # 给logger添加handler
24 | logger.addHandler(fh)
25 | logger.addHandler(ch)
26 |
27 |
--------------------------------------------------------------------------------
/program/Conn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding:utf-8 -*-
3 | """
4 | @author:毛毛虫_Wendy
5 | @license:(C) Copyright 2017-
6 | @contact:dengwenjun@gmail.com
7 | @file:Conn.py
8 | @time:10/26/17 5:37 PM
9 | """
10 | import pymongo
11 | from program.logfile import logger
12 |
13 | class MongoDB(object):
14 | def __init__(self):
15 | # -*- 链接数据库 -*-
16 | clinet = pymongo.MongoClient("localhost", 27017)
17 | db = clinet["Weibo201801"]
18 | self.data = db["data"]
19 |
20 | def process_item(self, item):
21 | """ 判断item的类型,并作相应的处理,再入数据库 """
22 | if isinstance(item, dict):
23 | if self.data.find_one({"nickname":item["nickname"],"Post":item["Post"],"Pubtime":item["Pubtime"]}):
24 | return "null"
25 | else:
26 | self.data.insert(item)
27 | logger.info("insert data into database...")
28 | return "ok"
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/program/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding:utf-8 -*-
3 | """
4 | @author:毛毛虫_Wendy
5 | @license:(C) Copyright 2017-
6 | @contact:dengwenjun@gmail.com
7 | @file:main.py
8 | @time:11/9/17 11:33 AM
9 | """
10 | import sched, time
11 | from program.Spider import Weibo_Spider
12 | from program.Prelogin import getData
13 | from program.logfile import logger
14 |
15 |
16 | # 初始化sched模块的scheduler类
17 | # 第一个参数是一个可以返回时间戳的函数,第二个参数可以在定时未到达之前阻塞。
18 | schedule = sched.scheduler(time.time, time.sleep)
19 |
20 | def domain():
21 | weibospider = Weibo_Spider()
22 | ID_urls = weibospider.ID_urls
23 | for i in range(len(ID_urls)):
24 | for j in range(len(ID_urls[i])):
25 | logger.info('正在爬取第'+str(i)+"个账号 第"+str(j+1)+"条网页")
26 | weibospider.get_content(text=getData(ID_urls[i][j]))
27 |
28 | def perform(inc):
29 | schedule.enter(inc, 0, perform, (inc,))
30 | domain() # 需要周期执行的函数
31 |
32 | def mymain():
33 | schedule.enter(0, 0, perform, (86400,))
34 |
35 |
36 | if __name__ == "__main__":
37 | mymain()
38 | schedule.run() # 开始运行,直到计划时间队
--------------------------------------------------------------------------------
/program/Prelogin.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding:utf-8 -*-
3 | """
4 | @author:毛毛虫_Wendy
5 | @license:(C) Copyright 2017-
6 | @contact:dengwenjun@gmail.com
7 | @file:Prelogin.py
8 | @time:10/26/17 5:35 PM
9 | """
10 | import re , urllib.parse , urllib.request , http.cookiejar , base64 , binascii , rsa , time, pprint, requests,json
11 | from bs4 import BeautifulSoup
12 | from program.Conn import MongoDB
13 |
14 | # -*- 以下4行代码说简单点就是让你接下来的所有get和post请求都带上已经获取的cookie,因为稍大些的网站的登陆验证全靠cookie -*-
15 | cj = http.cookiejar.LWPCookieJar()
16 | cookie_support = urllib.request.HTTPCookieProcessor(cj)
17 | opener = urllib.request.build_opener(cookie_support , urllib.request.HTTPHandler)
18 | urllib.request.install_opener(opener)
19 |
20 |
21 | def getData(url):
22 | """
23 | # 封装一个用于get的函数,新浪微博这边get出来的内容编码都是-8,所以把utf-8写死在里边了,真实项目中建议根据内容实际编码来决定
24 | :param url: 输入需要访问的url地址,返回text结果
25 | :return:
26 | """
27 |
28 | request = urllib.request.Request(url)
29 | response = urllib.request.urlopen(request)
30 | text = response.read()
31 | time.sleep(3)
32 | return text
33 |
34 | def postData(url , data) :
35 | """
36 | # 封装一个用于post的函数,验证密码和用户名都是post的,所以这个postData在本demo中专门用于验证用户名和密码
37 | :param url:
38 | :param data:
39 | :return:
40 | """
41 | # headers需要我们自己来模拟
42 | headers = {'User-Agent' : 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)'}
43 | # 这里的urlencode用于把一个请求对象用'&'来接来字符串化,接着就是编码成utf-8
44 | data = urllib.parse.urlencode(data).encode('utf-8')
45 | request = urllib.request.Request(url , data , headers)
46 | response = urllib.request.urlopen(request)
47 | text = response.read().decode('gbk')
48 | return text
49 |
50 | def login_weibo(nick , pwd) :
51 | #==========================获取servertime , pcid , pubkey , rsakv===========================
52 | # 预登陆请求,获取到若干参数
53 | prelogin_url = 'http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=%s&rsakt=mod&checkpin=1&client=ssologin.js(v1.4.15)&_=1400822309846' % nick
54 | preLogin = getData(prelogin_url)
55 | # 下面获取的四个值都是接下来要使用的
56 | servertime = re.findall('"servertime":(.*?),' , preLogin.decode('utf-8'))[0]
57 | pubkey = re.findall('"pubkey":"(.*?)",' , preLogin.decode('utf-8'))[0]
58 | rsakv = re.findall('"rsakv":"(.*?)",' ,preLogin.decode('utf-8'))[0]
59 | nonce = re.findall('"nonce":"(.*?)",' , preLogin.decode('utf-8'))[0]
60 |
61 | #===============对用户名和密码加密================
62 | # 好,你已经来到登陆新浪微博最难的一部分了,如果这部分没有大神出来指点一下,那就真是太难了,我也不想多说什么,反正就是各种加密,最后形成了加密后的su和sp
63 | su = base64.b64encode(bytes(urllib.request.quote(nick) , encoding = 'utf-8'))
64 | rsaPublickey = int(pubkey , 16)
65 | key = rsa.PublicKey(rsaPublickey , 65537)
66 | #稍微说一下的是在我网上搜到的文章中,有些文章里并没有对拼接起来的字符串进行bytes,这是python3的新方法好像是。rsa.encrypt需要一个字节参数,这一点和之前不一样。其实上面的base64.b64encode也一样
67 | message = bytes(str(servertime) + '\t' + str(nonce) + '\n' + str(pwd) , encoding = 'utf-8')
68 | sp = binascii.b2a_hex(rsa.encrypt(message , key))
69 | #=======================登录=======================
70 | #param就是激动人心的登陆post参数,这个参数用到了若干个上面第一步获取到的数据,可说的不多
71 | param = {'entry' : 'weibo' , 'gateway' : 1 , 'from' : '' , 'savestate' : 7 , 'useticket' : 1 , 'pagerefer' : 'http://login.sina.com.cn/sso/logout.php?entry=miniblog&r=http%3A%2F%2Fweibo.com%2Flogout.php%3Fbackurl%3D' , 'vsnf' : 1 , 'su' : su , 'service' : 'miniblog' , 'servertime' : servertime , 'nonce' : nonce , 'pwencode' : 'rsa2' , 'rsakv' : rsakv , 'sp' : sp , 'sr' : '1680*1050' ,
72 | 'encoding' : 'UTF-8' , 'prelt' : 961 , 'url' : 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack'}
73 | # 这里就是使用postData的唯一一处,也很简单
74 | s = postData('http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.15)' , param)
75 | # 好了,当你的代码执行到这里时,已经完成了大部分了,可是有很多爬虫童鞋跟我一样还就栽在了这里,假如你跳过这里直接去执行获取粉丝的这几行代码你就会发现你获取的到还是让你登陆的页面,真郁闷啊,我就栽在这里长达一天啊
76 | # 好了,我们还是继续。这个urll是登陆之后新浪返回的一段脚本中定义的一个进一步登陆的url,之前还都是获取参数和验证之类的,这一步才是真正的登陆,所以你还需要再一次把这个urll获取到并用get登陆即可
77 | urll = re.findall("location.replace\(\'(.*?)\'\);" , s)[0]
78 | getData(urll)
79 | #======================获取粉丝====================
80 | # 如果你没有跳过刚才那个urll来到这里的话,那么恭喜你!你成功了,接下来就是你在新浪微博里畅爬的时候了,获取到任何你想获取到的数据了!
81 | # 可以尝试着获取你自己的微博主页看看,你就会发现那是一个多大几百kb的文件了
82 |
83 |
84 |
--------------------------------------------------------------------------------
/program/Spider.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- encoding:utf-8 -*-
3 | """
4 | @author:毛毛虫_Wendy
5 | @license:(C) Copyright 2017-
6 | @contact:dengwenjun@gmail.com
7 | @file:Sipder.py
8 | @time:10/26/17 5:36 PM
9 | """
10 | import re, json,time
11 | from program.Conn import MongoDB
12 | from bs4 import BeautifulSoup
13 | from program.Prelogin import login_weibo,getData
14 | from program.logfile import logger
15 |
16 | class Weibo_Spider(object):
17 | login_weibo("××××","××")
18 | def __init__(self):
19 | self.host = "https://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain=100606&is_search=0&visible=0&is_all=1&is_tag=0&profile_ftype=1&"
20 | self.ID = [1006062557129567,1006061902909102,1006061809745371,1006061689575103,1006061888640485,
21 | 1006061710173801,1006062183473425,1006061771925961,1006062968634427,1006062531246845,
22 | 1006061807956030,1006062156294570,1006065183764432,1006061698698394,1006062683843043,
23 | 1006061890174912,1006062798510462,]
24 |
25 | self.ID_page_num = self.get_page()
26 | self.ID_urls = self.get_urls()
27 |
28 | def get_urls(self):
29 | logger.info('生成所有爬取网页的URLs...')
30 | ID_urls = {}
31 | for id in range(len(self.ID)):
32 | urls = []
33 | for i in range(1, self.ID_page_num[id]+1):
34 | urls.append(self.host+"page="+str(i)+"&pagebar=0&id="+str(self.ID[id]))
35 | for j in range(0, 2):
36 | urls.append(self.host+"page="+str(i)+"&pagebar="+str(j)+"&id="+str(self.ID[id])+"&pre_page="+str(i))
37 | ID_urls[id]=urls
38 | return ID_urls
39 |
40 |
41 | def get_page(self):
42 | logger.info('获取所有页码...')
43 | ID_page_num = {}
44 | for id in range(len(self.ID)):
45 | text = getData(url=self.host + "page=1&pagebar=1&id=" + str(self.ID[id]) + "&pre_page=1")
46 | content = json.loads(text.decode("ascii"))['data']
47 | # -*- 查询总页数 -*-
48 | reg = 'countPage=(\d+)"'
49 | try:
50 | page_num = int(re.findall(reg, content, re.S)[0])
51 | except:
52 | page_num = 0
53 | ID_page_num[id] = page_num
54 | return ID_page_num
55 |
56 | def get_content(self,text):
57 | mongo = MongoDB()
58 | reg = '(\d+)'
59 | logger.info('解析获取网页数据...')
60 | content = json.loads(text.decode("ascii"))['data']
61 | soup = BeautifulSoup("
" + content + "", "lxml")
62 | tmp = soup.find_all("div", attrs={"class": "WB_detail"})
63 | tmp2 = soup.find_all("div", attrs={"class":"WB_handle"})
64 | if len(tmp) > 0 :
65 | for i in range(len(tmp)):
66 | item = {}
67 | item["nickname"] = tmp[i].find("div", attrs={"class": "WB_info"}).find("a").get_text()
68 | item["Post"] = tmp[i].find("div", attrs={"class": "WB_text W_f14"}).get_text().replace("\n", "").replace(" ","").replace( "\u200b", "")
69 |
70 | # -*- 爬取发布时间 -*-
71 | item["Pubtime"] = tmp[i].find("a", attrs={"class": "S_txt2"}).get("title")
72 |
73 | # -*- 爬取转发数 -*-
74 | if re.findall(reg,str(tmp2[i].find("span", attrs={"class": "line S_line1","node-type":"forward_btn_text"})), re.S):
75 | item["Transfer_num"] = int(re.findall(reg,str(tmp2[i].find("span", attrs={"class": "line S_line1","node-type":"forward_btn_text"})), re.S)[0])
76 | else:
77 | item["Transfer_num"] = 0
78 |
79 | # -*- 爬取评论数 -*-
80 | if re.findall(reg, str(tmp2[i].find("span", attrs={"class": "line S_line1", "node-type": "comment_btn_text"})), re.S):
81 | item["Comment_num"] = int(re.findall(reg, str(tmp2[i].find("span", attrs={"class": "line S_line1", "node-type": "comment_btn_text"})), re.S)[0])
82 | else:
83 | item["Comment_num"] = 0
84 |
85 | # -*- 爬取点赞数 -*-
86 | if re.findall(reg, str(tmp2[i].find("span", attrs={"node-type": "like_status"})), re.S):
87 | item["Like_num"] = int(re.findall(reg, str(tmp2[i].find("span", attrs={"node-type": "like_status"})), re.S)[0])
88 | else:
89 | item["Like_num"] = 0
90 | item["Scraltime"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
91 |
92 | if mongo.process_item(item)== "null":
93 | break
94 | else:
95 | continue
96 |
97 |
98 |
99 | if __name__ == "__main__":
100 | a = Weibo_Spider()
101 | print(a.ID_page_num)
102 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
93 |
94 |
95 |
96 | print
97 | getData
98 | "WB_detail"
99 | countPage=19
100 | title=
101 | co
102 | getDa
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
369 |
370 |
371 |
372 |
373 |
374 |
375 |
376 |
377 |
378 |
379 |
380 |
381 |
382 |
383 |
384 |
385 |
386 |
387 |
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 |
402 |
403 |
404 |
405 |
406 |
407 |
408 |
409 |
410 |
411 |
412 |
413 |
414 |
415 |
416 |
417 |
418 |
419 |
420 |
421 |
422 |
423 |
424 |
425 |
426 |
427 | 1509010513997
428 |
429 |
430 | 1509010513997
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 |
445 |
446 |
447 |
448 |
449 |
450 |
451 |
452 |
453 |
454 |
455 |
456 |
457 |
458 |
459 |
460 |
461 |
462 |
463 |
464 |
465 |
466 |
467 |
468 |
469 |
470 |
471 |
472 |
473 |
474 |
475 |
476 |
477 |
478 |
479 |
480 |
481 |
482 |
483 |
484 |
485 |
486 |
487 |
488 |
489 |
490 |
491 |
492 |
493 |
494 |
495 |
496 |
497 |
498 |
499 |
500 |
501 |
502 |
503 |
504 |
505 |
506 |
507 |
508 |
509 |
510 |
511 |
512 |
513 |
514 |
515 |
516 |
517 |
518 |
519 |
520 |
521 |
522 |
523 |
524 |
525 |
526 |
527 |
528 |
529 |
530 |
531 |
532 |
533 |
534 |
535 |
536 |
537 |
538 |
539 |
540 |
541 |
542 |
543 |
544 |
545 |
546 |
547 |
548 |
549 |
550 |
551 |
552 |
553 |
554 |
555 |
556 |
557 |
558 |
559 |
560 |
561 |
562 |
563 |
564 |
565 |
566 |
567 |
568 |
569 |
570 |
571 |
572 |
573 |
574 |
575 |
576 |
577 |
578 |
579 |
580 |
581 |
582 |
583 |
584 |
585 |
586 |
587 |
588 |
589 |
590 |
591 |
592 |
593 |
594 |
595 |
596 |
597 |
598 |
599 |
600 |
601 |
602 |
603 |
604 |
605 |
606 |
607 |
608 |
609 |
610 |
611 |
612 |
613 |
614 |
615 |
616 |
617 |
618 |
619 |
620 |
621 |
622 |
623 |
624 |
625 |
626 |
627 |
628 |
629 |
630 |
631 |
632 |
633 |
634 |
635 |
636 |
637 |
638 |
639 |
640 |
641 |
642 |
643 |
644 |
645 |
646 |
647 |
648 |
649 |
650 |
651 |
652 |
653 |
654 |
655 |
656 |
657 |
658 |
659 |
660 |
661 |
662 |
663 |
664 |
665 |
666 |
667 |
668 |
669 |
670 |
671 |
672 |
673 |
674 |
675 |
676 |
677 |
678 |
679 |
680 |
681 |
682 |
683 |
684 |
685 |
686 |
687 |
688 |
689 |
690 |
691 |
692 |
693 |
694 |
695 |
696 |
697 |
698 |
699 |
700 |
701 |
702 |
703 |
704 |
705 |
706 |
707 |
708 |
709 |
710 |
711 |
712 |
713 |
714 |
715 |
716 |
717 |
718 |
719 |
720 |
721 |
722 |
723 |
724 |
725 |
726 |
727 |
728 |
729 |
730 |
731 |
732 |
733 |
734 |
735 |
736 |
737 |
738 |
739 |
740 |
741 |
742 |
743 |
744 |
745 |
746 |
747 |
748 |
749 |
750 |
751 |
752 |
753 |
754 |
755 |
756 |
757 |
758 |
759 |
760 |
761 |
762 |
763 |
764 |
765 |
766 |
767 |
768 |
769 |
770 |
771 |
772 |
773 |
774 |
775 |
776 |
777 |
778 |
779 |
780 |
781 |
782 |
783 |
784 |
785 |
786 |
787 |
788 |
789 |
790 |
791 |
792 |
793 |
794 |
795 |
796 |
797 |
798 |
799 |
800 |
801 |
802 |
803 |
804 |
805 |
806 |
807 |
808 |
809 |
810 |
811 |
812 |
813 |
814 |
815 |
816 |
817 |
818 |
819 |
820 |
821 |
822 |
823 |
824 |
825 |
826 |
827 |
828 |
829 |
830 |
831 |
--------------------------------------------------------------------------------