├── config.json ├── README.md └── google-image-crawler-zyf.py /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "save_dir": "./downloads/", 3 | "num_downloads_for_each_class": 200, 4 | "search_file_type": "jpg", 5 | "search_keywords_dict": { 6 | "animal": [ 7 | "猫", 8 | "狗", 9 | "cat", 10 | "dog" 11 | ], 12 | "fruit": [ 13 | "banana", 14 | "apple" 15 | ] 16 | }, 17 | "search_cdr_days": 60, 18 | "output_prefix": "download_urls", 19 | "output_suffix": "google" 20 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Google Image Crawler (by [James Zhao](http://github.com/walkoncross)) 2 | ## Introduction 3 | ### Image crawler for crawling images from google image search, which could download more than 100 images by sending many search queries with different date ranges. 4 | ## Features 5 | 1. Download as many images as you want; 6 | 2. Crawl images by classes, with each class having several keywords; 7 | 3. Check duplicate image URLs before downloading, even when you restart the crawler; 8 | 4. Easylly configure: 9 | Please refer to [config.json](./config.json). 10 | 11 | --- 12 | ## Reference 13 | ### This image crawler is based on: [hardikvasa/google-images-download](https://github.com/hardikvasa/google-images-download) 14 | -------------------------------------------------------------------------------- /google-image-crawler-zyf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding: UTF-8 -*- 3 | 4 | #Searching and Downloading Google Images/Image Links 5 | 6 | #Import Libraries 7 | 8 | import time #Importing the time library to check the time of code execution 9 | import sys #Importing the System Library 10 | 11 | import os 12 | import os.path as osp 13 | 14 | import fnmatch 15 | 16 | import json 17 | 18 | #import urllib2 19 | from urllib import urlencode 20 | 21 | from urllib2 import Request,urlopen 22 | from urllib2 import URLError, HTTPError 23 | 24 | import datetime 25 | 26 | import hashlib 27 | 28 | from collections import OrderedDict 29 | 30 | ########### CONFIGS ########### 31 | # Path to config_file 32 | config_file = './config.json' 33 | 34 | ########### Default CONFIGS ########### 35 | CONFIGS = {} 36 | 37 | # How many images you want to download for each class. Google will only return 100 images at most for one search 38 | CONFIGS[u'num_downloads_for_each_class'] = 200 39 | 40 | # image type to search 41 | CONFIGS[u'search_file_type'] = 'jpg' 42 | #CONFIGS[u'search_file_type'] = 'bmp' 43 | #CONFIGS[u'search_file_type'] = 'png' 44 | 45 | # Because google only returns at most 100 results for each search query, 46 | # we must send many search queries to get more than 100 results. 47 | # We need to set cdr (date range, in the form of "tbs=cdr:1,cd_min:{start_date},cd_max:{end_date}") to tell google 48 | # we want to search images in some date range (start_date, end_date), 49 | # so as to get different results for each search query. 50 | # CONFIGS[u'search_cdr_days'] is the days between cd_min and cd_max. 51 | CONFIGS[u'search_cdr_days'] = 60 52 | 53 | #This dict is used to search keywords. You can edit this dict to search for google images of your choice. You can simply add and remove elements of the list. 54 | #{class1:[list of related keywords], class2:[list of related keywords]...} 55 | CONFIGS[u'search_keywords_dict'] = {'animal':[u'猫', 'cat', 'dog'], 56 | 'fruit':[u'apple', u'banaba']} 57 | 58 | #This list is used to further add suffix to your search term. Each element of the list will help you download 100 images. First element is blank which denotes that no suffix is added to the search keyword of the above list. You can edit the list by adding/deleting elements from it.So if the first element of the search_keyword is 'Australia' and the second element of keywords is 'high resolution', then it will search for 'Australia High Resolution' 59 | #aux_keywords = [' high resolution'] 60 | 61 | CONFIGS[u'save_dir'] = './downloads' 62 | 63 | CONFIGS[u'output_prefix'] = 'download_urls' 64 | CONFIGS[u'output_suffix'] = 'google' 65 | 66 | print '==>Default CONFIGS:' 67 | print CONFIGS 68 | ########### End of Default CONFIGS ########### 69 | 70 | ########### Load config.json if there is one ########### 71 | if osp.exists(config_file): 72 | print "Load CONFIGS from " + config_file 73 | fp = open(config_file, 'a+') 74 | CONFIGS_loaded = json.load(fp, object_pairs_hook=OrderedDict) 75 | 76 | print '==>Loaded CONFIGS:' 77 | print CONFIGS_loaded 78 | 79 | for k,v in CONFIGS_loaded.iteritems(): 80 | if k in CONFIGS: 81 | CONFIGS[k] = v 82 | 83 | fp.close() 84 | 85 | print '==>CONFIGS after loading:' 86 | print CONFIGS 87 | ########### End of Load config.json ########### 88 | 89 | 90 | #CONFIGS[u'output_prefix'] = CONFIGS[u'output_prefix'] + '_' 91 | #CONFIGS[u'output_suffix'] = '_' + CONFIGS[u'output_suffix'] 92 | 93 | CONFIGS[u'save_dir'] = CONFIGS[u'save_dir']+'/' 94 | if not osp.exists(CONFIGS[u'save_dir']): 95 | os.mkdir(CONFIGS[u'save_dir']) 96 | 97 | ########### End of CONFIGS ########### 98 | 99 | ########### Functions to Load downloaded urls ########### 100 | def load_url_files(_dir, file_name_prefix): 101 | url_list = [] 102 | 103 | ttl_url_list_file_name = osp.join(_dir, file_name_prefix +'_all.txt') 104 | if osp.exists(ttl_url_list_file_name): 105 | fp_urls = open(ttl_url_list_file_name, 'r') #Open the text file called database.txt 106 | print 'load URLs from file: ' + ttl_url_list_file_name 107 | 108 | i = 0 109 | for line in fp_urls: 110 | line = line.strip() 111 | if len(line)>0: 112 | splits = line.split('\t') 113 | url_list.append(splits[0].strip()) 114 | i=i+1 115 | 116 | print str(i) + ' URLs loaded' 117 | fp_urls.close() 118 | else: 119 | url_list = load_all_url_files(_dir, file_name_prefix) 120 | 121 | return url_list 122 | 123 | def load_all_url_files(_dir, file_name_prefix): 124 | url_list = [] 125 | 126 | for file_name in os.listdir(_dir): 127 | if fnmatch.fnmatch(file_name, file_name_prefix +'*.txt'): 128 | file_name = osp.join(_dir, file_name) 129 | fp_urls = open(file_name, 'r') #Open the text file called database.txt 130 | print 'load URLs from file: ' + file_name 131 | 132 | i = 0 133 | for line in fp_urls: 134 | line = line.strip() 135 | if len(line)>0: 136 | splits = line.split('\t') 137 | url_list.append(splits[0].strip()) 138 | i=i+1 139 | print str(i) + ' URLs loaded' 140 | fp_urls.close() 141 | 142 | return url_list 143 | ########### End of Functions to Load downloaded urls ########### 144 | 145 | ############## Functions to get date/time strings ############ 146 | def get_current_date(): 147 | tm = time.gmtime() 148 | date = datetime.date(tm.tm_year, tm.tm_mon, tm.tm_mday) 149 | return date 150 | 151 | def get_new_date_by_delta_days(date, delta_days): 152 | delta = datetime.timedelta(delta_days) 153 | new_date = date+delta 154 | return new_date 155 | 156 | #Make a string from current GMT time 157 | def get_gmttime_string(): 158 | _str = time.strftime("GMT%Y%m%d_%H%M%S", time.gmtime()) 159 | return _str 160 | 161 | #Make a string from current local time 162 | def get_localtime_string(): 163 | _str = time.strftime("%Y%m%d_%H%M%S", time.localtime()) 164 | return _str 165 | ############## End of Functions to get date/time strings ############ 166 | 167 | ############## Google Image Search functions ############ 168 | # Get Image URL list form Google image search by keyword 169 | def google_get_query_url(keyword, file_type, cdr): 170 | url = None 171 | 172 | # if keyword is unicode, we need to encode it into utf-8 173 | if isinstance(keyword, unicode): 174 | keyword = keyword.encode('utf-8') 175 | 176 | query = dict(q = keyword, 177 | tbm = 'isch', 178 | tbs=cdr+',ift:'+file_type) 179 | 180 | #url = 'https://www.google.com/search?q=' + keyword + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg' 181 | #url = 'https://www.google.com/search?as_oq=' + keyword + '&as_st=y&tbm=isch&safe=images&tbs=ift:jpg' 182 | url = 'https://www.google.com/search?'+urlencode(query) 183 | 184 | print "\t==>Google Query URL is: " + url 185 | return url 186 | 187 | #Downloading entire Web Document (Raw Page Content) 188 | def google_download_page(url): 189 | version = (3,0) 190 | cur_version = sys.version_info 191 | if cur_version >= version: #If the Current Version of Python is 3.0 or above 192 | import urllib.request #urllib library for Extracting web pages 193 | try: 194 | headers = {} 195 | headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" 196 | req = urllib.request.Request(url, headers = headers) 197 | resp = urllib.request.urlopen(req) 198 | respData = str(resp.read()) 199 | return respData 200 | except Exception as e: 201 | print(str(e)) 202 | else: #If the Current Version of Python is 2.x 203 | import urllib2 204 | try: 205 | headers = {} 206 | headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" 207 | req = urllib2.Request(url, headers = headers) 208 | response = urllib2.urlopen(req) 209 | page = response.read() 210 | return page 211 | except: 212 | return"Page Not found" 213 | 214 | #Finding 'Next Image' from the given raw page 215 | def google_images_get_next_item(s): 216 | start_line = s.find('rg_di') 217 | if start_line == -1: #If no links are found then give an error! 218 | end_quote = 0 219 | link = "no_links" 220 | return link, end_quote 221 | else: 222 | start_line = s.find('"class="rg_meta"') 223 | start_content = s.find('"ou"',start_line+1) 224 | end_content = s.find(',"ow"',start_content+1) 225 | content_raw = str(s[start_content+6:end_content-1]) 226 | return content_raw, end_content 227 | 228 | #Getting all links with the help of '_images_get_next_image' 229 | def google_images_get_all_items(page): 230 | items = [] 231 | while True: 232 | item, end_content = google_images_get_next_item(page) 233 | if item == "no_links": 234 | break 235 | else: 236 | items.append(item) #Append all the links in the list named 'Links' 237 | time.sleep(0.1) #Timer could be used to slow down the request for image downloads 238 | page = page[end_content:] 239 | return items 240 | 241 | def google_search_keyword(keyword, file_type, cdr): 242 | query_url = google_get_query_url(keyword, file_type, cdr) 243 | raw_html = (google_download_page(query_url)) 244 | time.sleep(0.1) 245 | image_url_list = google_images_get_all_items(raw_html) 246 | return image_url_list 247 | ############## End of Google Image Search functions ############ 248 | 249 | ############## Functions to get real urls and download images ############ 250 | #Get real url of a input url 251 | def get_real_url(url, loaded_urls): 252 | real_url = None 253 | response = None 254 | try: 255 | req = Request(url, headers={"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"}) 256 | response = urlopen(req) 257 | 258 | real_url = response.geturl() 259 | print 'Real_url is: ' + str(real_url) 260 | 261 | if real_url in loaded_urls: 262 | print 'URL had been downloaded in previous ' 263 | real_url = None 264 | 265 | except IOError as e: #If there is any IOError 266 | print("IOError on url "+str(url)) 267 | print e 268 | except HTTPError as e: #If there is any HTTPError 269 | print("HTTPError on url "+str(url)) 270 | print e 271 | except URLError as e: 272 | print("URLError on url "+str(url)) 273 | print e 274 | 275 | if response: 276 | response.close() 277 | 278 | return real_url 279 | 280 | def download_image(url, save_dir, loaded_urls=None): 281 | real_url = None 282 | response = None 283 | save_image_name = None 284 | try: 285 | req = Request(url, headers={"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"}) 286 | response = urlopen(req) 287 | 288 | real_url = response.geturl() 289 | 290 | if loaded_urls and real_url in loaded_urls: 291 | print 'URL had been downloaded in previous searching' 292 | real_url = None 293 | else: 294 | img_name = hashlib.md5(real_url).hexdigest() 295 | save_image_name = save_dir + '/' + img_name + '.' + CONFIGS[u'search_file_type'] 296 | print 'Try to save image ' + real_url + ' into file: ' + save_image_name 297 | output_file = open(save_image_name,'wb') 298 | data = response.read() 299 | output_file.write(data) 300 | 301 | #response.close() 302 | except IOError as e: #If there is any IOError 303 | print("IOError on url "+str(url)) 304 | print e 305 | except HTTPError as e: #If there is any HTTPError 306 | print("HTTPError on url "+str(url)) 307 | print e 308 | except URLError as e: 309 | print("URLError on url "+str(url)) 310 | print e 311 | 312 | if response: 313 | response.close() 314 | 315 | return real_url, save_image_name 316 | ############## End of Functions to get real urls and download images ############ 317 | 318 | ############## Main Program ############ 319 | t0 = time.time() #start the timer 320 | 321 | #Download Image Links 322 | i= 0 323 | 324 | cur_date = get_current_date() 325 | print "Today is: " + cur_date.strftime("%Y/%m/%d") 326 | 327 | time_str = get_gmttime_string() 328 | 329 | for class_name,search_keywords in CONFIGS[u'search_keywords_dict'].iteritems(): 330 | print "Class no.: " + str(i+1) + " -->" + " Class name = " + str(class_name) 331 | 332 | class_urls_file_prefix = CONFIGS[u'output_prefix'] + '_' + str(class_name).strip() 333 | 334 | items = load_url_files(CONFIGS[u'save_dir'], class_urls_file_prefix) 335 | loaded_urls_num = len(items) 336 | print 'Loaded URLs in total is: ', loaded_urls_num 337 | 338 | # load pre-saved download parameters, actually cd_min for date range 339 | cd_max = cur_date 340 | 341 | params_file = osp.join(CONFIGS[u'save_dir'], class_urls_file_prefix + '_params_' + CONFIGS[u'output_suffix'] + '.txt') 342 | print 'Loaded pre-saved download parameters from: ' + params_file 343 | params_list = [] 344 | fp_params = open(params_file, 'a+') 345 | for line in fp_params: 346 | line = line.strip() 347 | if line!='': 348 | params_list.append(line) 349 | print "\t-->loaded parameters: ", line 350 | 351 | if len(params_list)>0: 352 | splits = params_list[-1].split('/') 353 | if len(splits)==3: 354 | cd_max = datetime.date(int(splits[0]), int(splits[1]), int(splits[2])) 355 | 356 | cd_min = get_new_date_by_delta_days(cd_max, -CONFIGS[u'search_cdr_days']) 357 | print 'cd_max: ', cd_max 358 | print 'cd_min: ', cd_min 359 | 360 | print ("Crawling Images...") 361 | 362 | class_save_dir = osp.join(CONFIGS[u'save_dir'], class_urls_file_prefix + '_' + time_str + '_' + CONFIGS[u'output_suffix']) 363 | if not osp.exists(class_save_dir): 364 | os.mkdir(class_save_dir) 365 | 366 | output_all_urls_file = osp.join(CONFIGS[u'save_dir'], class_urls_file_prefix +'_all.txt') 367 | fp_all_urls = open(output_all_urls_file, 'a+') 368 | 369 | output_urls_file = osp.join(CONFIGS[u'save_dir'], class_urls_file_prefix + '_' + time_str + '_' + CONFIGS[u'output_suffix'] + '.txt') 370 | fp_urls = open(output_urls_file, 'a+') 371 | 372 | # if osp.exists(output_urls_file): 373 | # fp_urls = open(output_urls_file, 'a+') #Open the text file called database.txt 374 | # for line in fp_urls: 375 | # items.append(line.strip()) 376 | # else: 377 | # fp_urls = open(output_urls_file, 'w+') #Open the text file called database.txt 378 | # 379 | cdr_enabled = False 380 | 381 | while True: 382 | if cdr_enabled: 383 | cdr = 'cdr:1,cd_min:{},cd_max:{}'.format(cd_min.strftime('%m/%d/%Y'), cd_max.strftime('%m/%d/%Y')) 384 | print "==>Search for Images between " + cd_min.strftime("%Y/%m/%d") + \ 385 | ' and ' + cd_max.strftime("%Y/%m/%d") 386 | else: 387 | cdr = '' 388 | print "==>Search for Images in any time" 389 | 390 | j = 0 391 | 392 | # Google only return 100 images at most for one search. So we may need to try many times 393 | while jClass name=" + str(class_name) + ', search keywords=' + search_keywords[j] 395 | keyword = search_keywords[j]#.replace(' ','%20') 396 | 397 | # # if keyword is unicode, we need to encode it into utf-8 398 | # if isinstance(keyword, unicode): 399 | # keyword = keyword.encode('utf-8') 400 | 401 | # query = dict(q = keyword, 402 | # tbm = 'isch', 403 | # tbs=tbs+',ift:'+CONFIGS[u'search_file_type']) 404 | # 405 | # #url = 'https://www.google.com/search?q=' + keyword + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg' 406 | # #url = 'https://www.google.com/search?as_oq=' + keyword + '&as_st=y&tbm=isch&safe=images&tbs=ift:jpg' 407 | # url = 'https://www.google.com/search?'+urlencode(query) 408 | # 409 | # print "\t==>Query URL is: " + url 410 | # 411 | # raw_html = (download_page(url)) 412 | # time.sleep(0.1) 413 | # new_items = _images_get_all_items(raw_html) 414 | 415 | new_items = google_search_keyword(keyword, CONFIGS[u'search_file_type'], cdr) 416 | 417 | for url in new_items: 418 | #real_url = get_real_url(url) 419 | real_url, save_name = download_image(url, class_save_dir, items) 420 | 421 | if real_url and real_url not in items: 422 | items.append(real_url) 423 | fp_all_urls.write(real_url + '\t' + save_name + "\n") 424 | fp_urls.write(real_url + '\t' + save_name + "\n") 425 | 426 | fp_all_urls.flush() 427 | fp_urls.flush() 428 | 429 | print 'len(items)=', len(items) 430 | j = j + 1 431 | 432 | if cdr_enabled: 433 | fp_params.write('{}/{}/{}\n'.format( cd_min.year, cd_min.month, cd_min.day)) 434 | cd_max = cd_min 435 | cd_min = get_new_date_by_delta_days(cd_max, -CONFIGS[u'search_cdr_days']) 436 | else: 437 | fp_params.write('{}/{}/{}\n'.format( cd_max.year, cd_max.month, cd_max.day)) 438 | cdr_enabled = True 439 | 440 | fp_params.flush() 441 | 442 | print 'len(items)=', len(items) 443 | if len(items) >= loaded_urls_num + CONFIGS[u'num_downloads_for_each_class']: 444 | break 445 | 446 | fp_params.close() 447 | fp_all_urls.close() 448 | fp_urls.close() 449 | 450 | #print ("Image Links = "+str(items)) 451 | print ("Total New Image Links = " + str(len(items) - loaded_urls_num)) 452 | print ("\n") 453 | i = i+1 454 | 455 | t1 = time.time() #stop the timer 456 | total_time = t1-t0 #Calculating the total time required to crawl, find and download all the links of 60,000 images 457 | print("Total time taken: "+str(total_time)+" Seconds") 458 | 459 | print("\n") 460 | print("===All are downloaded") 461 | #----End of the main program ----# 462 | --------------------------------------------------------------------------------