├── .gitattributes ├── .gitignore ├── README.md └── CivitaiInfoAgregator.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | CivitaiInfoAgregator_cache.json 2 | CivitaiInfoAgregator_settings.json 3 | models/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CivitaiBatchHelper 2 | ### If you used an older version, output folders now follow the same structure as the models (nested folder) 3 | --- 4 | 5 | When you run it for the first time, it will ask you for an civitai API key and a path to your models folder. 6 | Then it will scan all of your checkpoints/loras ect... and create a .txt file with 7 | - Creator/uploader 8 | - Model type 9 | - Model name 10 | - SD Base version 11 | - Version name 12 | - Civitai link 13 | - Trigger Words (if available) 14 | - Model description 15 | - Example Prompts (if available) 16 | - Training Data (if available) 17 | - Clip Skip 18 | - Epoch 19 | - Steps 20 | - Batch 21 | 22 | and also download the preview images with embeded pngInfo 23 | 24 | --- 25 | Civitai API key can be created at the bottom of https://civitai.com/user/account 26 | You can change the API key/model and output directory in the settings.json 27 | Required module will be installed if not present: PIL(pillow)/aiohttp 28 | 29 | --- -------------------------------------------------------------------------------- /CivitaiInfoAgregator.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os 3 | import json 4 | import glob 5 | import re 6 | import math 7 | import sys 8 | import subprocess 9 | import importlib.util 10 | import asyncio 11 | from tqdm import tqdm 12 | 13 | def is_installed(package, package_overwrite=None): 14 | try: 15 | spec = importlib.util.find_spec(package) 16 | except ModuleNotFoundError: 17 | pass 18 | 19 | package = package_overwrite or package 20 | 21 | if spec is None: 22 | print(f"Installing {package}...") 23 | command = f'"{sys.executable}" -m pip install --no-cache-dir {package}' 24 | 25 | result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ) 26 | 27 | if result.returncode != 0: 28 | print(f"{error_color}Couldn't install\nCommand: {command}\nError code: {result.returncode}{reset_color}") 29 | 30 | 31 | is_installed("aiohttp") 32 | is_installed("PIL") 33 | 34 | import aiohttp 35 | from io import BytesIO 36 | from PIL import Image 37 | from PIL.PngImagePlugin import PngInfo 38 | 39 | fileExtentions = ('*.safetensors', '*.ckpt', '*.pt') 40 | ok_color = '' 41 | error_color = '' 42 | reset_color = '' 43 | bar_color = ['#ff0044', '#f77622', '#fee761', '#63c74d'] 44 | exifRename = { 45 | 'negativePrompt': 'Negative prompt', 46 | 'cfgScale': 'CFG scale', 47 | 'ENSD': 'ENSD' 48 | } 49 | exifOrder = ['Prompt', 'Negative prompt', 'Steps', 'Sampler', 'CFG scale', 'Seed', 'Size', 'Model hash', 'Model', 'Denoising strength', 'Clip skip', 'ENSD', 'Hires upscale', 'Hires steps', 'Hires upscaler'] 50 | trainingMetadataAllow = ['ss_epoch', 'ss_clip_skip', 'ss_max_train_steps', 'ss_num_batches_per_epoch', 'ss_datasets', 'ss_tag_frequency'] 51 | 52 | base_url = 'https://civitai.com/api/v1' 53 | user_agent = 'CivitaiLink:CivitaiInfoAgregator' 54 | download_chunk_size = 8192 55 | 56 | 57 | def update_bar(bar, amount=1): 58 | index = min(math.floor(((bar.n+amount)/bar.total)*len(bar_color)), len(bar_color)-1) 59 | bar.colour = bar_color[index] 60 | bar.update(amount) 61 | 62 | def get_separator(title, lenght=50): 63 | return f'┌{"─"*lenght}┐\n│{title:^{lenght}}│\n└{"─"*lenght}┘' 64 | 65 | # Get and download preview images 66 | async def get_all_images(images): 67 | bar = tqdm(total=len(images), bar_format='{n_fmt}/{total_fmt} |{bar}| {percentage:3.0f}%') 68 | async with aiohttp.ClientSession() as session: 69 | await asyncio.gather(*[get_image(session, image, bar) for image in images]) 70 | bar.close() 71 | 72 | async def get_image(session, image, bar): 73 | headers = {'User-Agent': user_agent, 'Authorization': f"Bearer {settings['civitai_api_key']}"} 74 | try: 75 | async with session.get(url=image['url'], headers=headers) as response: 76 | 77 | image_data = BytesIO() 78 | 79 | async for chunk in response.content.iter_chunked(download_chunk_size): 80 | image_data.write(chunk) 81 | 82 | if response.status == 200: 83 | 84 | pil_image = Image.open(image_data) 85 | metadata = PngInfo() 86 | if image['pngInfo'] != '': 87 | metadata.add_text('parameters', image['pngInfo']) 88 | 89 | pil_image.save(image['path'], format='PNG', pnginfo=metadata, compress_level=4) 90 | else: 91 | bar.write(f'{error_color}{bar.n}/{bar.total} "{os.path.relpath(image["path"], start=settings["output_path"])}" failed Code: {response.status}{reset_color}\n') 92 | 93 | update_bar(bar, 1) 94 | 95 | except Exception as e: 96 | print(f'{error_color}Unable to get url {e.__class__}.{reset_color}') 97 | print('\n'.join()) 98 | 99 | 100 | # Get individual model description 101 | async def get_all_models(models): 102 | result = [] 103 | bar = tqdm(total=len(models), bar_format='{n_fmt}/{total_fmt} |{bar}| {percentage:3.0f}% {elapsed_s:.3f}s', dynamic_ncols=True, colour=bar_color[0]) 104 | 105 | async with aiohttp.ClientSession() as session: 106 | result.extend(await asyncio.gather(*[get_model(session, hashKey, modelID, bar) for hashKey, modelID in models.items()])) 107 | 108 | bar.close() 109 | 110 | return result 111 | 112 | async def get_model(session, hashKey, modelID, bar): 113 | url = f'{base_url}/models/{modelID}' 114 | headers = {'User-Agent': user_agent, 'Authorization': f"Bearer {settings['civitai_api_key']}"} 115 | try: 116 | async with session.get(url=url, headers=headers) as response: 117 | resp = await response.json() 118 | 119 | update_bar(bar, 1) 120 | 121 | return [hashKey, resp] 122 | except Exception as e: 123 | print(f'{error_color}Unable to get "{url}" due to.{reset_color}') 124 | print('\n'.join(e.args)) 125 | 126 | 127 | # Batch 100 model at a time and get the data 128 | async def get_all_models_by_hash(hashes): 129 | results = [] 130 | batches = [] 131 | for i in range(0, len(hashes), 100): 132 | batches.append(json.dumps(hashes[i:i + 100])) 133 | 134 | try: 135 | async with aiohttp.ClientSession() as session: 136 | result = (await asyncio.gather(*[get_model_by_hash(session, batch) for batch in batches])) 137 | for x in result: 138 | results.extend(x) 139 | except Exception as e: 140 | print(f'{error_color}Unable to get batch of hash due to.{reset_color}') 141 | if e.args: 142 | print('\n'.join(e.args)) 143 | 144 | return results 145 | 146 | async def get_model_by_hash(session, hashes): 147 | headers = {'User-Agent': user_agent, 'Authorization': f"Bearer {settings['civitai_api_key']}"} 148 | headers['Content-Type'] = 'application/json' 149 | 150 | async with session.post(url=f'{base_url}/model-versions/by-hash', headers=headers, data=hashes) as response: 151 | return await response.json() 152 | 153 | def save_dict(dictionary, filePath): 154 | with open(filePath, 'w', encoding='utf-8') as f: 155 | json.dump(dictionary, f, ensure_ascii=False, indent=1) 156 | 157 | def get_relative_path(path): 158 | return os.path.relpath(path, start=settings['models_path']) 159 | 160 | def lazyHTML2Text(text): 161 | text = re.sub(r']*>(.*?)<\/a>', r' \g<2>->\g<1> ', text) # Link 162 | text = re.sub(r'', r' \g<1> \n', text) # Image 163 | text = re.sub(r'
  • (.*?)<\/li>', r'- \g<1>', text) # List item 164 | text = re.sub(r'(.*?)<\/code>', r'「\g<1>」', text) # Code Block 165 | 166 | text = re.sub(r'', r'\n\g<1>\n', text) # div 167 | text = re.sub(r'(.*?)<\/h[0-9]>', r'\g<1>\n', text) # Header 168 | text = re.sub(r'

    (.*?)<\/p>', r'\g<1>\n', text) # Paragraph 169 | text = re.sub(r']*>', '\n', text) # br 170 | 171 | text = re.sub(r'<[^>]*>', '', text) # Remove all other or 172 | text = text.replace('<', '<').replace('>', '>') # <> 173 | 174 | return '\n'.join([f'\t{line.strip()}' for line in text.splitlines()]) 175 | 176 | # Settings 177 | folderPath = os.path.join(os.path.dirname(os.path.realpath(__file__))) 178 | os.makedirs(folderPath, exist_ok=True) 179 | 180 | defaultSettings = {'civitai_api_key': None, 'models_path': None, 'output_path': None, 'fullres_preview': False, 'rescan': False} 181 | settingPath = os.path.join(folderPath, 'CivitaiInfoAgregator_settings.json') 182 | try: 183 | with open(settingPath, 'r') as f: 184 | settings = json.load(f) 185 | except FileNotFoundError: 186 | with open(settingPath, 'w') as f: 187 | json.dump(defaultSettings, f, ensure_ascii=False, indent=1) 188 | settings = defaultSettings 189 | 190 | settings = defaultSettings | settings 191 | 192 | def changeValue(key, text): 193 | while True: 194 | inputText = input(text) 195 | settings[key] = inputText 196 | 197 | inputText = input("Confirm? (y/n): ") 198 | if inputText.lower() == "y" or inputText == "": 199 | break 200 | elif inputText.lower() != "n": 201 | print("Input a valid value") 202 | 203 | # Api Key 204 | if not settings['civitai_api_key']: 205 | print('Civitai API key can be generated at the bottom of your "Account settings" page') 206 | changeValue('civitai_api_key', 'API Key: ') 207 | 208 | # Input folder 209 | if not settings['models_path']: 210 | print('\nThe models folder is where all your checkpoints/loras are located') 211 | while True: 212 | changeValue('models_path', 'Model folder path: ') 213 | 214 | if os.path.exists(settings['models_path']): 215 | if os.path.isfile(settings['models_path']): 216 | settings['models_path'] = os.path.dirname(settings['models_path']) 217 | break 218 | else: 219 | print("Input a valid input path") 220 | 221 | # Output folder 222 | defaultOutputFolder = os.path.join(folderPath, "models") 223 | if not settings['output_path']: 224 | print('\nThe output folder is where all the data will be generated') 225 | while True: 226 | print(f'Default output path is: "{defaultOutputFolder}"') 227 | 228 | inputText = input("Confirm? (y/n): ") 229 | if inputText.lower() == "y" or inputText == "": 230 | settings['output_path'] = defaultOutputFolder 231 | os.makedirs(defaultOutputFolder, exist_ok=True) 232 | break 233 | elif inputText.lower() == "n": 234 | break 235 | else: 236 | print("Input a valid value") 237 | if not settings['output_path']: 238 | while True: 239 | changeValue('output_path', 'Output folder path: ') 240 | 241 | if os.path.exists(settings['output_path']): 242 | if os.path.isfile(settings['output_path']): 243 | settings['output_path'] = os.path.dirname(settings['output_path']) 244 | break 245 | else: 246 | print("Input a valid input path") 247 | 248 | # Save Settings 249 | save_dict(settings, settingPath) 250 | 251 | # Cache 252 | defaultCache = {'SHA256': None, 'civitai_has_data': None} 253 | cachePath = os.path.join(folderPath, 'CivitaiInfoAgregator_cache.json') 254 | try: 255 | with open(cachePath, 'r') as f: 256 | cache = json.load(f) 257 | except FileNotFoundError: 258 | with open(settingPath, 'w') as f: 259 | json.dump({}, f, ensure_ascii=False, indent=1) 260 | cache = {} 261 | 262 | print(f"{ok_color}{get_separator('Civitai Agregator V1.0', os.get_terminal_size()[0]-2)}{reset_color}") 263 | 264 | files = [] 265 | for ext in fileExtentions: 266 | files.extend(glob.glob(os.path.join(settings['models_path'], '**', ext), recursive=True)) 267 | 268 | fileCount = len(files) 269 | lenCount = len(str(fileCount)) 270 | 271 | if fileCount == 0: 272 | input(f'{error_color}No files detected:\npress "enter" to exit{reset_color}') 273 | exit() 274 | else: 275 | print(f"found: {fileCount} models\n") 276 | 277 | data = {} 278 | hashes = [] 279 | for i, file in enumerate(files): 280 | 281 | fileName = os.path.split(file)[1] 282 | 283 | if fileName in cache: 284 | sha256 = cache[fileName]['SHA256'] 285 | cache[fileName] = defaultCache.copy() | cache[fileName] 286 | else: 287 | fileSize = os.stat(file).st_size 288 | 289 | print(f'{i+1:>{lenCount}d}/{fileCount} caching hash "{fileName}"') 290 | 291 | if fileSize > 1024**3: 292 | bar = tqdm(total=fileSize, unit='B', unit_scale=True, bar_format='{n_fmt}/{total_fmt} |{bar}| {percentage:3.0f}% {elapsed_s:.3f}s', dynamic_ncols=True, colour=bar_color[0]) 293 | else: 294 | bar = None 295 | 296 | # Calculate SHA256 hash 297 | hash_sha256 = hashlib.sha256() 298 | blksize = 1024 * 1024 299 | 300 | with open(file, "rb") as f: 301 | for chunk in iter(lambda: f.read(blksize), b""): 302 | 303 | if bar: 304 | update_bar(bar, len(chunk)) 305 | 306 | hash_sha256.update(chunk) 307 | 308 | sha256 = hash_sha256.hexdigest().upper() 309 | 310 | cache[fileName] = defaultCache.copy() 311 | cache[fileName]['SHA256'] = sha256 312 | 313 | if bar: 314 | bar.leave = False 315 | bar.close() 316 | save_dict(cache, cachePath) 317 | 318 | if sha256 in data: 319 | print(f'{error_color}"{get_relative_path(file)}" is a duplicate of "{get_relative_path(data[sha256]["filepath"])}"{reset_color}') 320 | continue 321 | 322 | data[sha256] = {} 323 | data[sha256]['filepath'] = file 324 | data[sha256]['version'] = {} 325 | 326 | modelFolder = os.path.relpath(file, start=settings['models_path']) 327 | modelFolder = os.path.join(settings['output_path'], os.path.splitext(modelFolder)[0]) 328 | infoPath = os.path.join(modelFolder, os.path.splitext(os.path.split(file)[1])[0]+'.txt') 329 | 330 | data[sha256]['model_folder'] = modelFolder 331 | 332 | ifNew = cache[fileName]['civitai_has_data'] == None 333 | ifMissingFile = (cache[fileName]['civitai_has_data'] == True and (not os.path.exists(modelFolder) or not os.path.exists(infoPath))) 334 | if ifNew or ifMissingFile or settings['rescan']: 335 | hashes.append(sha256) 336 | 337 | 338 | save_dict(cache, cachePath) 339 | 340 | if settings['rescan']: 341 | print('RESCAN') 342 | settings['rescan'] = False 343 | save_dict(settings, settingPath) 344 | 345 | if len(hashes) == 0: 346 | input(f'No new model to match\n{ok_color}Done (press "enter" to exit){reset_color}') 347 | exit() 348 | 349 | 350 | # civitai request by hash 351 | print(f"\nRequesting: {len(hashes)} models from Civitai.com") 352 | results = asyncio.run(get_all_models_by_hash(hashes)) 353 | 354 | models = {} 355 | for result in results: 356 | for file in result['files']: 357 | 358 | if not 'SHA256' in file['hashes']: 359 | continue 360 | 361 | sha256 = file['hashes']['SHA256'] 362 | 363 | if not sha256 in data: 364 | continue 365 | 366 | if file['type'] == 'VAE': 367 | continue 368 | 369 | data[sha256]['version'] = result 370 | models[sha256] = result['modelId'] 371 | break 372 | 373 | # get model data 374 | print(f"Agregating data of {len(models)} matched models:") 375 | result = asyncio.run(get_all_models(models)) 376 | newModels = {} 377 | for x in result: 378 | newModels[x[0]] = data[x[0]] 379 | newModels[x[0]]['model'] = x[1] 380 | 381 | print() 382 | 383 | 384 | for hashKey in hashes: 385 | if not hashKey in newModels and not data[hashKey]['version']: 386 | path = get_relative_path(data[hashKey]['filepath']) 387 | print(f'Could not find Civitai Data for "{path}"') 388 | 389 | cache[fileName]['civitai_has_data'] = False 390 | 391 | # Main loop 392 | dataCount = len(newModels) 393 | lenCount = len(str(dataCount)) 394 | newImages = [] 395 | for i, hashKey in enumerate(newModels): 396 | model = newModels[hashKey] 397 | 398 | counter = f'{i+1:>{lenCount}d}/{dataCount}' 399 | 400 | fileName = os.path.split(model['filepath'])[1] 401 | infoPath = os.path.join(model['model_folder'], os.path.splitext(fileName)[0]+'.txt') 402 | 403 | 404 | modelName = model['version']['model']['name'] 405 | modelType = model['version']['model']['type'] 406 | modelID = model['version']['modelId'] 407 | 408 | versionName = model['version']['name'] 409 | versionID = model['version']['id'] 410 | 411 | print(f'{counter} Found {modelType} "{modelName}" version "{versionName}" on Civitai ') 412 | 413 | os.makedirs(model['model_folder'], exist_ok=True) 414 | 415 | creator = model['model']['creator']['username'] 416 | fileText = f"Creator: {creator}\nhttps://civitai.com/user/{creator}/models" 417 | 418 | fileText += f'\n\nModel type: {modelType}\nModel Name: {modelName}\nVersion Name: {versionName}' 419 | fileText += f"\nBase version: {model['version']['baseModel']}" 420 | fileText += f'\nhttps://civitai.com/models/{modelID}?modelVersionId={versionID}' 421 | 422 | if len(model['version']['trainedWords']) != 0: 423 | fileText += f"\n\n{get_separator('Trigger Words:')}\n{', '.join(model['version']['trainedWords'])}" 424 | 425 | if model['version']['description']: 426 | fileText += f"\n\n{get_separator('Version Description:')}\n{lazyHTML2Text(model['version']['description'])}" 427 | 428 | if model['model']['description']: 429 | fileText += f"\n\n{get_separator('Model Description:')}\n{lazyHTML2Text(model['model']['description'])}" 430 | 431 | # info txt file 432 | #newImages = [] 433 | if len(model['version']['images']) != 0: 434 | prompts = [] 435 | for image in model['version']['images']: 436 | 437 | if settings['fullres_preview']: 438 | imageUrl = re.sub(r'\/width=[0-9]*\/', f"/width={image['width']}/", image['url']) 439 | else: 440 | imageUrl = image['url'] 441 | 442 | imageID = os.path.splitext(os.path.basename(imageUrl))[0] 443 | imagePath = os.path.join(model['model_folder'], f'preview_{imageID}.png') 444 | 445 | # reconstruct auto1111 exif data 446 | pngInfo = '' 447 | if image['meta']: 448 | meta = {} 449 | for key, parameter in image['meta'].items(): 450 | if key in exifRename: 451 | key = exifRename[key] 452 | else: 453 | key = key.capitalize() 454 | 455 | if key in exifOrder: 456 | meta[key] = parameter 457 | 458 | parameters = [] 459 | for key in exifOrder: 460 | 461 | if key in meta: 462 | v = meta[key] 463 | 464 | if key == 'Prompt' and 'Prompt' in meta: 465 | pngInfo += f"{meta['Prompt']}\n" 466 | 467 | elif key == 'Negative prompt' and 'Negative prompt' in meta: 468 | pngInfo += f"Negative prompt: {meta['Negative prompt']}\n" 469 | 470 | else: 471 | parameters.append(f'{key}: {v}') 472 | 473 | pngInfo += ', '.join(parameters) 474 | 475 | # Remove blank lines 476 | pngInfo = '\n'.join([ll.rstrip() for ll in pngInfo.splitlines() if ll.strip()]) 477 | 478 | if not os.path.exists(imagePath): 479 | newImages.append({'url': imageUrl, 'path': imagePath, 'pngInfo': pngInfo}) 480 | 481 | if pngInfo: 482 | prompts.append(pngInfo) 483 | 484 | if len(prompts) != 0: 485 | fileText += f'\n\n{get_separator("Example Prompts:")}\n' 486 | fileText += '\n\n'.join(prompts) 487 | 488 | 489 | fileText += f"\n\n{get_separator('Licenses:')}" 490 | fileText += f"\n\nAllow no credit: {model['model']['allowNoCredit']}" 491 | fileText += f"\nAllow Comercial Use: {model['model']['allowCommercialUse']}" 492 | fileText += f"\nAllow Derivative: {model['model']['allowDerivatives']}" 493 | fileText += f"\nAllow Different License: {model['model']['allowDifferentLicense']}" 494 | 495 | 496 | # File Metadata 497 | with open(model['filepath'], "rb") as f: 498 | text = f.read(1024 * 1024) 499 | f.close() 500 | 501 | try: 502 | text = text.decode("iso-8859-1") 503 | m = re.search(r'{"__metadata__":({".*"}?)', text) 504 | 505 | if m and m.group(1): 506 | metadataText = m.group(1) 507 | 508 | metadata = {} 509 | 510 | for keyPairs in re.finditer(r'"([^"]*)":"([^{"]*?)"', metadataText): 511 | if not keyPairs.group(1) in trainingMetadataAllow: 512 | continue 513 | metadata[keyPairs.group(1)] = keyPairs.group(2) 514 | 515 | for dictPairs in re.finditer(r'"([^"]*)":"([\[{].*?[\]}])"', metadataText): 516 | if not dictPairs.group(1) in trainingMetadataAllow: 517 | continue 518 | try: 519 | metadata[dictPairs.group(1)] = json.loads(dictPairs.group(2).replace('\\', '')) 520 | except: 521 | pass 522 | 523 | trainingText = f"\n\n{get_separator('Training Data:')}" 524 | 525 | if 'ss_clip_skip' in metadata: 526 | trainingText += f"\n Clip Skip: {metadata['ss_clip_skip']}" 527 | 528 | if 'ss_epoch' in metadata: 529 | trainingText += f"\n Epoch: {metadata['ss_epoch']}" 530 | 531 | if 'ss_max_train_steps' in metadata: 532 | trainingText += f"\n Steps: {metadata['ss_max_train_steps']}" 533 | 534 | if 'ss_num_batches_per_epoch' in metadata: 535 | trainingText += f"\n Batch / Epoch: {metadata['ss_num_batches_per_epoch']}" 536 | 537 | 538 | # Tags 539 | if 'ss_datasets' in metadata: 540 | metadata['tags'] = metadata['ss_datasets'][0]['tag_frequency'] 541 | 542 | elif 'ss_tag_frequency' in metadata: 543 | metadata['tags'] = metadata['ss_tag_frequency'] 544 | 545 | if 'tags' in metadata: 546 | key = next(iter(metadata['tags'].keys())) 547 | 548 | if len(metadata['tags'][key].keys()) != 0: 549 | tagsSorted = (sorted(metadata['tags'][key].items(), key=lambda x: x[1], reverse=True))[:100] 550 | maxKey = max(len(v[0]) for v in tagsSorted) 551 | maxCount = max(len(str(tagsSorted[0][1])), 7) 552 | 553 | trainingText += f'\n\n Tags:\n {"[TAGS]":<{maxKey}}│{"[COUNT]":^{maxCount}}\n ─{"─"*maxKey}┼{"─"*maxCount}─' 554 | for tag in tagsSorted: 555 | trainingText += f'\n {tag[0].strip():<{maxKey}}│{tag[1]:^{maxCount}}' 556 | 557 | if metadata: 558 | fileText += trainingText 559 | 560 | except Exception as e: 561 | print(f"Unable to parse metadata for {fileName} -> {e.__class__}.") 562 | print('\n '.join(e.args)) 563 | 564 | with open(infoPath, 'w', encoding='utf-8') as f: 565 | f.write(fileText) 566 | 567 | cache[fileName]['civitai_has_data'] = True 568 | 569 | save_dict(cache, cachePath) 570 | 571 | # Download images preview files 572 | print(f'\nDownloading new preview images') 573 | asyncio.run(get_all_images(newImages)) 574 | 575 | input(f'\n{ok_color}Done (press "enter" to exit){reset_color}') --------------------------------------------------------------------------------