├── .gitattributes ├── .gitignore └── MorningStar_stock_finance_stat_scraping.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | 7 | # Standard to msysgit 8 | *.doc diff=astextplain 9 | *.DOC diff=astextplain 10 | *.docx diff=astextplain 11 | *.DOCX diff=astextplain 12 | *.dot diff=astextplain 13 | *.DOT diff=astextplain 14 | *.pdf diff=astextplain 15 | *.PDF diff=astextplain 16 | *.rtf diff=astextplain 17 | *.RTF diff=astextplain 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Windows image file caches 2 | Thumbs.db 3 | ehthumbs.db 4 | 5 | # Folder config file 6 | Desktop.ini 7 | 8 | # Recycle Bin used on file shares 9 | $RECYCLE.BIN/ 10 | 11 | # Windows Installer files 12 | *.cab 13 | *.msi 14 | *.msm 15 | *.msp 16 | 17 | # Windows shortcuts 18 | *.lnk 19 | 20 | # ========================= 21 | # Operating System Files 22 | # ========================= 23 | 24 | # OSX 25 | # ========================= 26 | 27 | .DS_Store 28 | .AppleDouble 29 | .LSOverride 30 | 31 | # Thumbnails 32 | ._* 33 | 34 | # Files that might appear on external disk 35 | .Spotlight-V100 36 | .Trashes 37 | 38 | # Directories potentially created on remote AFP share 39 | .AppleDB 40 | .AppleDesktop 41 | Network Trash Folder 42 | Temporary Items 43 | .apdisk 44 | -------------------------------------------------------------------------------- /MorningStar_stock_finance_stat_scraping.py: -------------------------------------------------------------------------------- 1 | """ 2 | Morning star finance stats scraping. 3 | Retrieve the information based on the export to csv button in morning star finance web page. 4 | 5 | Updates: 6 | Jul 25 2015: Add in MS_ValuationExtract class 7 | Feb 09 2015: Resolve joining problem. 8 | 9 | Learnings: 10 | How to make selenium execute the java script. 11 | http://stackoverflow.com/questions/2767690/how-do-you-use-selenium-to-execute-javascript-within-a-frame 12 | http://stackoverflow.com/questions/25209523/selenium-python-javascript-execution 13 | 14 | downloading a file 15 | http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python 16 | http://financials.morningstar.com/ajax/exportKR2CSV.html?&callback=?&t=XSES:N4E®ion=sgp&culture=en-US&cur=&order=asc 17 | 18 | XHR requests 19 | http://financials.morningstar.com/financials/getKeyStatPart.html?&callback=jsonp1423036133322&t=XSES:N4E®ion=sgp&culture=en-US&cur=&order=asc&_=1423036135111 20 | using chrome developer tools to see the respective XHR 21 | http://financials.morningstar.com/financials/getFinancePart.html?&callback=jsonp1423036133321&t=XSES:N4E®ion=sgp&culture=en-US&cur=&order=asc&_=1423036135109 22 | 23 | Update two dicts 24 | http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression 25 | 26 | pandas change index 27 | http://stackoverflow.com/questions/10457584/redefining-the-index-in-a-pandas-dataframe-object 28 | 29 | pandas to csv ascii error 30 | http://stackoverflow.com/questions/16923281/pandas-writing-dataframe-to-csv-file 31 | 32 | replace string in entire dataframe 33 | http://stackoverflow.com/questions/17142304/replace-string-value-in-entire-dataframe 34 | 35 | 36 | Todo: 37 | Add in min P/E?? average P/E across time or 3 year ago P/E (not able to show the historic P/E 38 | cannot scrape historic pE or coupled with the price to get the ratio?? 39 | 40 | Bugs: 41 | problem of unable to append maybe due to duplication of columns 42 | https://github.com/pydata/pandas/issues/3487 43 | 44 | 45 | """ 46 | 47 | 48 | import re, os, sys, math, time, datetime, shutil 49 | import pandas 50 | from pattern.web import URL, DOM, plaintext, extension, Element, find_urls 51 | 52 | class MS_StatsExtract(object): 53 | """ 54 | Using morning star ajax call. 55 | Can only get one stock at a time. 56 | """ 57 | def __init__(self): 58 | """ List of url parameters -- for url formation """ 59 | self.com_data_start_url = 'http://financials.morningstar.com/ajax/exportKR2CSV.html?&callback=?&t=XSES:' 60 | self.com_data_stock_portion_url = '' 61 | self.com_data_stock_portion_additional_url = ''# for adding additonal str to the stock url. 62 | self.com_data_end_url = '®ion=sgp&culture=en-US&cur=&order=asc' 63 | self.com_data_full_url = '' 64 | self.stock_list = ''#list of stock to parse. 65 | 66 | ## printing options 67 | self.__print_url = 0 68 | 69 | ## temp csv storage path 70 | self.ms_stats_extract_temp_csv = r'c:\data\temp\ms_stats.csv' 71 | self.ms_stats_extract_temp_csv_transpose = r'c:\data\temp\ms_stats_t.csv' 72 | 73 | ## Temp Results storage 74 | self.target_stock_data_df = object() 75 | 76 | ## full result storage 77 | self.com_data_allstock_df = pandas.DataFrame() 78 | self.hist_company_data_trends_df = pandas.DataFrame() 79 | 80 | def set_stock_sym_append_str(self, append_str): 81 | """ Set additional append str to stock symbol when forming stock url. 82 | Set to sel.cur_quotes_stock_portion_additional_url. 83 | Mainly to set the '.SI' for singapore stocks. 84 | Args: 85 | append_str (str): additional str to append to stock symbol. 86 | """ 87 | self.com_data_stock_portion_additional_url = append_str 88 | 89 | def set_target_stock_url(self, stock_sym): 90 | """ Set the target stock. Single stock again. 91 | Set to self.com_data_stock_portion_url 92 | Args: 93 | stock_sym (str): Stock symbol. 94 | """ 95 | self.com_data_stock_portion_url = stock_sym 96 | 97 | def set_stocklist(self, stocklist): 98 | """ Set list of stocks to be retrieved. 99 | Args: 100 | stocklist (list): list of stocks to be retrieved. 101 | """ 102 | self.stock_list = stocklist 103 | 104 | def form_url_str(self): 105 | """ Form the url str necessary to get the .csv file 106 | May need to segregate into the various types. 107 | Args: 108 | type (str): Retrieval type. 109 | """ 110 | self.com_data_full_url = self.com_data_start_url + self.com_data_stock_portion_url +\ 111 | self.com_data_end_url 112 | 113 | def get_com_data(self): 114 | """ Combine the cur quotes function. 115 | Formed the url, download the csv, put in the header. Have a dataframe object. 116 | Each one is one stock. 117 | """ 118 | self.form_url_str() 119 | if self.__print_url: print self.com_data_full_url 120 | 121 | ## here will process the data set 122 | self.downloading_csv() 123 | 124 | def downloading_csv(self): 125 | """ Download the csv information for particular stock. 126 | 127 | """ 128 | self.download_fault = 0 129 | 130 | url = URL(self.com_data_full_url) 131 | f = open(self.ms_stats_extract_temp_csv, 'wb') # save as test.gif 132 | try: 133 | f.write(url.download())#if have problem skip 134 | except: 135 | if self.__print_download_fault: print 'Problem with processing this data: ', self.com_data_full_url 136 | self.download_fault =1 137 | f.close() 138 | 139 | def process_dataset(self): 140 | """ Processed the data set by converting the csv to dataframe and attached the information for various stocks. 141 | Before concat the df, change the header label first. especially for the differerent currency --> 142 | 143 | """ 144 | 145 | ## Rows with additional headers are skipped 146 | try: 147 | #will try take care of commas in thousands 148 | self.target_stock_data_df = pandas.read_csv(self.ms_stats_extract_temp_csv, header =2,thousands =',', 149 | index_col = 0, skiprows = [19,20,31,41,42,43,48,58,53,64,65,72,73,95,101,102]) 150 | except: 151 | print 'problem downloading files. ' 152 | self.target_stock_data_df = self.target_stock_data_df.transpose().reset_index() 153 | self.target_stock_data_df["SYMBOL"] = self.com_data_stock_portion_url 154 | #after transpose save back to same file and call again for column duplication problem 155 | self.target_stock_data_df.to_csv(self.ms_stats_extract_temp_csv_transpose, index =False) 156 | self.target_stock_data_df = pandas.read_csv(self.ms_stats_extract_temp_csv_transpose) 157 | #rename columns 158 | self.target_stock_data_df.rename(columns={'Year over Year':'Revenue yoy','3-Year Average':'Revenue 3yr avg', 159 | '5-Year Average':'Revenue 5yr avg','10-Year Average':'Revenue 10yr avg', 160 | 161 | 'Year over Year.1':'Operating income yoy','3-Year Average.1':'Operating income 3yr avg', 162 | '5-Year Average.1':'Operating income 5yr avg','10-Year Average.1':'Operating income 10yr avg', 163 | 164 | 'Year over Year.2':'Net income yoy','3-Year Average.2':'Net income 3yr avg', 165 | '5-Year Average.2':'Net income 5yr avg','10-Year Average.2':'Net income 10yr avg', 166 | 167 | 'Year over Year.3':'EPS yoy','3-Year Average.3':'EPS 3yr avg', 168 | '5-Year Average.3':'EPS 5yr avg','10-Year Average.3':'EPS 10yr avg',}, 169 | inplace =True) 170 | 171 | 172 | #the concat need to handle different currenctly or convert auto to singapore currency?? 173 | """ Rename input before concat""" 174 | self.rename_columns_for_key_parameters() 175 | self.add_parameters_for_target_stock_df() 176 | 177 | if len(self.com_data_allstock_df) == 0: 178 | self.com_data_allstock_df = self.target_stock_data_df 179 | else: 180 | self.com_data_allstock_df = pandas.concat([self.com_data_allstock_df,self.target_stock_data_df],ignore_index =True) 181 | 182 | def rename_columns_for_key_parameters(self): 183 | """ rename columns in self.target_stock_data_df""" 184 | col_list = self.target_stock_data_df.columns.tolist() 185 | target_cols_with_currency_dff = [ n for n in col_list if re.search('\w+ \w+ Mil',n)] 186 | currency_unit = re.search('\w+ (\w+) Mil',target_cols_with_currency_dff[0]).group(1) #take any one to extract 187 | self.target_stock_data_df['currency_unit'] = currency_unit 188 | 189 | ## for the columns rename 190 | rename_cols = {n:n.replace(' '+ currency_unit,'') for n in target_cols_with_currency_dff} 191 | 192 | ## more columns 193 | add_target_cols_with_currency_dff = [ n for n in col_list if re.search('Earnings Per Share|Dividends',n)] 194 | rename_cols_2 = {n:n.replace(' '+ currency_unit,'') for n in add_target_cols_with_currency_dff} 195 | 196 | rename_cols.update(rename_cols_2) 197 | 198 | ## rename the columns in self.target_stock_data_df 199 | self.target_stock_data_df = self.target_stock_data_df.rename(columns = rename_cols) 200 | 201 | def add_parameters_for_target_stock_df(self): 202 | """ Add addtional paramters to the target stock df. Primiarly because the some of the morning star data lack certain resolutions 203 | need to take care of commmas 204 | 205 | """ 206 | self.target_stock_data_df['Earnings Per Share cal'] = self.target_stock_data_df['Net Income Mil']/self.target_stock_data_df['Shares Mil'] 207 | self.target_stock_data_df['Dividends cal'] = self.target_stock_data_df['Earnings Per Share cal']*self.target_stock_data_df['Payout Ratio %'] 208 | self.target_stock_data_df['Return on Equity'] = self.target_stock_data_df['Net Income Mil']/self.target_stock_data_df["Total Stockholders' Equity"] 209 | 210 | #add the year columns 211 | self.target_stock_data_df['Year'] = self.target_stock_data_df['index'].apply(lambda x: x[:4]) 212 | 213 | def get_com_data_fr_all_stocks(self): 214 | """ Cater for all stocks. Each stock is parse one at a time. 215 | """ 216 | self.com_data_allstock_df = pandas.DataFrame() 217 | 218 | for stock in self.stock_list: 219 | try: 220 | print 'Processing stock:', stock 221 | self.set_target_stock_url(stock) 222 | self.get_com_data() 223 | self.downloading_csv() 224 | self.process_dataset() 225 | except: 226 | print 'Problem with stock: ', stock 227 | 228 | ## process the data, group by each symbol and take the last 3-5 years EPS year on year?? 229 | def get_trend_data(self): 230 | """ Use for getting trends data of the dataset. 231 | Separate to two separate type. One is looking at gain in yoy gain, which means the gain of EPS eg is higher this year over the last as 232 | compared to the EPS gain of last year over the previous one. 233 | The other is positive gain which look for gain of company over year. 234 | may have accel growth if starting is negative 235 | 236 | """ 237 | grouped_symbol = self.com_data_allstock_df.groupby("SYMBOL") 238 | 239 | self.hist_company_data_trends_df = pandas.DataFrame() 240 | for label in ['EPS yoy','Revenue yoy','Net income yoy']: 241 | for n in range(9,5,-1): 242 | if n == 9: 243 | prev_data = grouped_symbol.nth(n)[label] 244 | accel_growth_check = (prev_data == prev_data) #for EPS growht increase every eyar 245 | normal_growth_check = (prev_data >0) #for normal increase 246 | continue 247 | current_data = grouped_symbol.nth(n)[label] 248 | accel_growth_check = accel_growth_check & (current_data <= prev_data) 249 | normal_growth_check = normal_growth_check & (current_data >0) 250 | prev_data = current_data 251 | 252 | accel_growth_check = accel_growth_check.to_frame().rename(columns = {label: label + ' 4yr_accel'}).reset_index() 253 | normal_growth_check = normal_growth_check.to_frame().rename(columns = {label: label + ' 4yr_grow'}).reset_index() 254 | 255 | both_check_df = pandas.merge(accel_growth_check, normal_growth_check, on = 'SYMBOL' ) 256 | 257 | if len(self.hist_company_data_trends_df) ==0: 258 | self.hist_company_data_trends_df = both_check_df 259 | else: 260 | self.hist_company_data_trends_df = pandas.merge(self.hist_company_data_trends_df, both_check_df, on = 'SYMBOL' ) 261 | 262 | def modify_stock_sym_in_df(self): 263 | """ Modify the stock sym in df especially for the Singapore stock where it require .SI to join in some cases. 264 | 265 | """ 266 | self.hist_company_data_trends_df['SYMBOL']= self.hist_company_data_trends_df['SYMBOL'].astype(str) +'.SI' 267 | 268 | def strip_additional_parm_fr_stocklist(self, stocklist, add_parm = '.SI'): 269 | """ Strip the addtional paramters from the stock list. True in case where the input is XXX.SI and morning star do not required the additioanl SI. 270 | Args: 271 | stocklist (list): list of stock sym. 272 | Kwargs: 273 | add_parm (str): string to omit (.SI) 274 | 275 | """ 276 | return [re.search('(.*)%s'%add_parm, n).group(1) for n in stocklist] 277 | 278 | 279 | 280 | class MS_ValuationExtract(object): 281 | """ 282 | Using morning star valuation extract 283 | Can only get one stock at a time. 284 | Make it as dict so can allow mutliple pull 285 | """ 286 | def __init__(self): 287 | """""" 288 | #tuple of start url, mid url, end url , mid url can be empty 289 | self.retrieval_url_dict = { 290 | 'valuation':('http://financials.morningstar.com/valuation/valuation-history.action?&t=XSES:','', 291 | '®ion=sgp&culture=en-US&cur=&type=price-earnings&_=1427535341054'), 292 | 293 | } 294 | self.retrieval_type = 'valuation' 295 | 296 | #result storage 297 | self.combined_valuation_df = pandas.DataFrame() 298 | 299 | ## printing options 300 | self.__print_url = 0 301 | 302 | def set_stocklist(self, stock_list): 303 | """ Set the list of stocks 304 | stock_list (list): list of stock symbol 305 | """ 306 | self.stock_list = stock_list 307 | 308 | def set_retrieval_type(self, ret_type): 309 | """ Set the url retrieval type. 310 | Set to self.retrieval_type. 311 | """ 312 | self.retrieval_type = ret_type 313 | 314 | def set_target_stock_url(self, stock_sym): 315 | """ Set the target stock. Single stock again. 316 | Set to self.com_data_stock_portion_url 317 | Args: 318 | stock_sym (str): Stock symbol. 319 | """ 320 | self.stock_portion_url = stock_sym 321 | 322 | def form_url_str(self): 323 | """ Form the url str necessary to get the url formation dependng on the retrieval type. 324 | Determine by the self.retrieval_type. 325 | 326 | """ 327 | start_url, mid_url, end_url = self.retrieval_url_dict[self.retrieval_type] 328 | self.target_full_url = start_url + self.stock_portion_url + mid_url + end_url 329 | 330 | def url_site_download(self): 331 | """ Download the csv information for particular stock depending on the retrieval type. 332 | Retrieval type determine by self.retrieval_type 333 | 334 | Return: 335 | (str): output html from url. 336 | 337 | """ 338 | self.download_fault = 0 339 | self.form_url_str() 340 | 341 | url = URL(self.target_full_url) 342 | try: 343 | return url.download() 344 | except: 345 | if self.__print_download_fault: print 'Problem with processing this data: ', self.target_full_url 346 | self.download_fault =1 347 | return None 348 | 349 | def process_valuation_for_single_stock(self, stock_sym): 350 | """ Process the valuation information for Single Stock. 351 | Args: 352 | stock_sym (str): particular stock sym 353 | 354 | Return: 355 | (Pandas Dataframe): dataframe of particular stock valuation df 356 | """ 357 | self.set_retrieval_type('valuation') 358 | self.set_target_stock_url(stock_sym) 359 | url_content = self.url_site_download() 360 | if url_content: 361 | return self.parse_valuation_data(url_content) 362 | 363 | return None 364 | 365 | 366 | def parse_valuation_data(self, url_html): 367 | """ Parse the valuation page for a particular stock and create a pandas dataframe for the particular stocks. 368 | Return: 369 | (Pandas Dataframe): dataframe of particular stock valuation df 370 | """ 371 | 372 | w = pandas.io.html.read_html(url_html,tupleize_cols = True,header=0 ) 373 | 374 | #select target rows. --> company historical data 375 | target_hist_com = w[0].iloc[[0,3,6,9]] 376 | 377 | # create the dict for renaming the columns 378 | org_col_list = ['Unnamed: ' + str(n) for n in range(1,12)] + ['Price/Earnings'] 379 | rename_col_list = ['wp_history_'+ str(year) for year in range(2005,2015)] + ['wp_history_TTM'] + ['Valuation_type'] 380 | rename_col_dict = {} 381 | for org_col, rename_col in zip(org_col_list, rename_col_list ): 382 | rename_col_dict[org_col] = rename_col 383 | 384 | # rename the columns 385 | target_hist_com = target_hist_com.rename(columns=rename_col_dict) 386 | 387 | # rename the rows. 388 | for index, val_type in zip([0,3,6,9],['PE_ratio','PB','Price_sales_ratio','Price_cashflow_ratio']): 389 | target_hist_com['Valuation_type'][index]=val_type 390 | 391 | target_hist_com = target_hist_com.set_index('Valuation_type') 392 | 393 | #rename columns --> so set the year 394 | target_hist_rename_cols_dict = {n:n.strip('wp_history_') for n in target_hist_com.columns.tolist() } 395 | target_hist_com = target_hist_com.rename(columns = target_hist_rename_cols_dict) 396 | 397 | #Transpose the data set 398 | target_hist_com_t = target_hist_com.transpose() 399 | target_hist_com_t = target_hist_com_t.reset_index() 400 | target_hist_com_t = target_hist_com_t.rename(columns = {'index': 'Year'}) 401 | 402 | # add in the symbol 403 | target_hist_com_t['SYMBOL'] = self.stock_portion_url #this will contain the stock symbol 404 | 405 | return target_hist_com_t 406 | 407 | 408 | def process_all_stock_data(self): 409 | """ Process all stock data for valuation results. 410 | """ 411 | for stock in self.stock_list: 412 | print 'processing stock: ', stock 413 | temp_stock_df = self.process_valuation_for_single_stock(stock) 414 | if len(self.combined_valuation_df) == 0: 415 | self.combined_valuation_df = temp_stock_df 416 | else: 417 | self.combined_valuation_df = self.combined_valuation_df.append(temp_stock_df) 418 | 419 | ## replace columns that do not have data '-' 420 | self.combined_valuation_df = self.combined_valuation_df.replace(u'\u2014','') #dash line 421 | 422 | 423 | 424 | if __name__ == '__main__': 425 | 426 | choice = 1 427 | 428 | if choice == 1: 429 | """ Combine both historical stats and valuation data""" 430 | 431 | # Get stock symbol from file 432 | # Or can input a series of stock Symbol 433 | file = r'C:\data\compile_stockdata\full_20150719.csv' 434 | full_stock_data_df = pandas.read_csv(file) 435 | stock_list = list(full_stock_data_df['SYMBOL']) 436 | stock_list = [n.strip('.SI') for n in stock_list] 437 | 438 | print 'Processing historical financial stats data' 439 | pp = MS_StatsExtract() 440 | pp.set_stocklist(stock_list) 441 | pp.get_com_data_fr_all_stocks() 442 | 443 | print 'Processing historical valuation data' 444 | yy = MS_ValuationExtract() 445 | yy.set_stocklist(stock_list) 446 | yy.process_all_stock_data() 447 | 448 | print 'Joining data set' 449 | combined_df = pandas.merge(pp.com_data_allstock_df, yy.combined_valuation_df, how = 'left', on = ['SYMBOL','Year']) 450 | 451 | # Join with additonal data from the compile data 452 | # Additional data have to retrieve from other sources. 453 | # Pls see: Retrieving stock news and Ex-date from SGX using python from https://simplypython.wordpress.com 454 | required_columns = ['SYMBOL','CompanyName','industry', 'industryGroup' ] 455 | partial_stock_df = full_stock_data_df[['SYMBOL','CompanyName','industry', 'industryGroup'] ] 456 | partial_stock_df['SYMBOL'] = partial_stock_df['SYMBOL'].str.strip('.SI') 457 | 458 | combined_df_2 = pandas.merge(combined_df, partial_stock_df, how = 'left', on = ['SYMBOL']) 459 | 460 | combined_df_2.to_csv(r'C:\data\temp\morn_star_data.csv', index = False) 461 | 462 | if choice ==2: 463 | 464 | ## Get stock symbol from file 465 | file = r'C:\data\compile_stockdata\full_20150521.csv' 466 | full_stock_data_df = pandas.read_csv(file) 467 | stock_list = list(full_stock_data_df['SYMBOL']) 468 | stock_list = [n.strip('.SI') for n in stock_list] 469 | 470 | pp = MS_StatsExtract() 471 | pp.set_stocklist(['BN4','BS6','N4E','U96','500','P13','S63']) 472 | #pp.set_stocklist(stock_list) 473 | #pp.set_stocklist(['D38']) 474 | pp.get_com_data_fr_all_stocks() 475 | pp.com_data_allstock_df.to_csv(r'C:\data\temp\morn_star_data.csv', index = False) 476 | #pp.get_trend_data() 477 | #pp.modify_stock_sym_in_df() 478 | #print pp.hist_company_data_trends_df 479 | 480 | if choice ==3: 481 | """ check on the historical valuation """ 482 | print 'Processing historical valuation data' 483 | yy = MS_ValuationExtract() 484 | yy.set_stocklist(['BN4','BS6']) 485 | yy.process_all_stock_data() 486 | yy.combined_valuation_df.to_csv(r'C:\data\temp\morn_star_data1.csv', index = False) 487 | 488 | 489 | 490 | if choice ==4: 491 | filename = r'C:\data\full_Feb08.csv' 492 | pp = MS_StatsExtract() 493 | stock_df = pandas.read_csv(filename) 494 | pp.set_stocklist(pp.strip_additional_parm_fr_stocklist(list(stock_df['SYMBOL']))) 495 | pp.get_com_data_fr_all_stocks() 496 | pp.get_trend_data() 497 | pp.modify_stock_sym_in_df() 498 | full_data_df = pandas.merge(stock_df,pp.hist_company_data_trends_df, on = 'SYMBOL' ) 499 | full_data_df.to_csv(filename, index ='False') 500 | 501 | 502 | 503 | 504 | 505 | 506 | 507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | 517 | 518 | 519 | 520 | 521 | 522 | --------------------------------------------------------------------------------