├── README.md ├── cleaning ├── clean.py ├── find_duplicate_images.py ├── modify_1_pixel.py ├── order_and_clean.py ├── prune_missing_jsons.py ├── remove_extra_filename.py ├── remove_json_extensions.py ├── rename_numerical.py ├── replace_cid.py ├── shuffle_and_clean.py └── shuffle_multiple.py ├── csv └── json_to_csv.py ├── etc ├── add_overlay.py ├── get_images_average.py └── shuffle_pngs.py ├── example-files └── csv_to_json_sample.csv ├── json ├── generate_json_args_no_traits.py ├── generate_json_args_no_traits_piped.py ├── generate_json_no_traits.py ├── generate_json_unrevealed.py ├── make_from_folders.py └── make_json_from_csv.py ├── refresh └── refresh_collection_opensea.py ├── verify ├── calculate_rarity.py ├── check_metadata.py ├── find_dupes_in_input.py ├── openrarity_fromjson.py ├── openrarity_fromjson_tocsv.py ├── validate_json.py ├── validate_unique_attributes.py ├── verify_for_instareveal.py └── verify_jsons_id_in_image.py └── whitelist-snapshots ├── blacklist_checker.py ├── combine_snapshots.py └── combine_snapshots_common.py /README.md: -------------------------------------------------------------------------------- 1 | # useful-scripts 2 | A collection of useful scripts when making collections for Scatter.art 3 | 4 | Check this link for more info: https://docs.scatter.art/docs/creators/useful-scripts 5 | 6 | Webtools versions: https://webtools.scatter.art/ (not all scripts have webtool versions) 7 | 8 | > THERE MAY BE BUGS! USE AT YOUR OWN RISK! VERIFY OUTPUT IS PROPER BEFORE YOU PUBLISH! 9 | -------------------------------------------------------------------------------- /cleaning/clean.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | input_folder = "json" 5 | output_folder = "output" 6 | 7 | if not os.path.exists(output_folder): 8 | os.makedirs(output_folder) 9 | 10 | for filename in os.listdir(input_folder): 11 | if filename == '.DS_Store': # Skip macOS .DS_Store files 12 | continue 13 | #if filename.endswith(".json"): 14 | file_path = os.path.join(input_folder, filename) 15 | with open(file_path, "r", encoding="utf-8") as f: 16 | data = json.load(f) 17 | 18 | if filename.endswith(".json"): 19 | filename = filename[:-5] 20 | 21 | # Update JSON fields 22 | data["name"] = f"Name {filename}" 23 | data["image"] = f"ipfs://REPLACEME/{filename}.png" 24 | data["description"] = "Your description" 25 | 26 | if "imageHash" in data: 27 | del data["imageHash"] 28 | if "dna" in data: 29 | del data["dna"] 30 | if "edition" in data: 31 | del data["edition"] 32 | if "date" in data: 33 | del data["date"] 34 | if "custom_fields" in data: 35 | del data["custom_fields"] 36 | if "file_url" in data: #not actually part of JSON format https://docs.opensea.io/docs/metadata-standards 37 | del data["file_url"] 38 | if "id" in data: 39 | del data["id"] 40 | if "seller_fee_basis_points" in data: 41 | del data["seller_fee_basis_points"] 42 | if "compiler" in data: 43 | del data["compiler"] 44 | if "properties" in data: 45 | del data["properties"] 46 | if "collection" in data: 47 | del data["collection"] 48 | if "external_url" in data: 49 | del data["external_url"] 50 | 51 | output_file_path = os.path.join(output_folder, filename) 52 | with open(output_file_path, "w", encoding="utf-8") as f: 53 | json.dump(data, f, indent=4) 54 | -------------------------------------------------------------------------------- /cleaning/find_duplicate_images.py: -------------------------------------------------------------------------------- 1 | import os 2 | import hashlib 3 | from PIL import Image 4 | 5 | def get_image_hash(image_path): 6 | with open(image_path, 'rb') as f: 7 | image_data = f.read() 8 | return hashlib.md5(image_data).hexdigest() 9 | 10 | def find_duplicate_images(images_folder): 11 | image_hashes = {} 12 | duplicate_images = [] 13 | 14 | for root, _, files in os.walk(images_folder): 15 | for file in files: 16 | if file.lower().endswith(('.png', '.jpg', '.jpeg', '.gif')): 17 | image_path = os.path.join(root, file) 18 | image_hash = get_image_hash(image_path) 19 | 20 | if image_hash in image_hashes: 21 | duplicate_images.append((image_path, image_hashes[image_hash])) 22 | else: 23 | image_hashes[image_hash] = image_path 24 | 25 | return duplicate_images 26 | 27 | def main(): 28 | images_folder = "images" 29 | duplicate_images = find_duplicate_images(images_folder) 30 | 31 | if duplicate_images: 32 | print("Duplicate images found:") 33 | for duplicate, original in duplicate_images: 34 | print(f" {duplicate} is a duplicate of {original}") 35 | else: 36 | print("No duplicate images found.") 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /cleaning/modify_1_pixel.py: -------------------------------------------------------------------------------- 1 | # This modifies images directly make sure you have a backup! 2 | 3 | import glob 4 | from PIL import Image 5 | 6 | files = [] 7 | for file in glob.glob('./images/*'): 8 | files.append(file) 9 | 10 | for file in files: 11 | if file.endswith(".png"): 12 | print(file) 13 | image = Image.open(file) 14 | rgb_image = image.convert("RGB") 15 | rgb_pixel_value = rgb_image.getpixel((0, 0)) 16 | print(rgb_pixel_value[0]) 17 | new_rgb = (rgb_pixel_value[0] - 1, 18 | rgb_pixel_value[1] - 1, rgb_pixel_value[2] - 1, 255) 19 | print(new_rgb) 20 | image.putpixel((0, 0), new_rgb) 21 | image.save(file) -------------------------------------------------------------------------------- /cleaning/order_and_clean.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import json 4 | import random 5 | import shutil 6 | import re 7 | 8 | def extract_number(filename): 9 | match = re.search(r'\d+', filename) 10 | return int(match.group()) if match else None 11 | 12 | input_dir = "input" 13 | output_dir = "output" 14 | 15 | json_files = glob.glob(os.path.join(input_dir, "json", "*")) 16 | image_files = glob.glob(os.path.join(input_dir, "images", "*")) 17 | 18 | sorted_json_files = sorted(json_files, key=extract_number) 19 | sorted_image_files = sorted(image_files, key=extract_number) 20 | 21 | # Make sure the number of JSON and image files are the same 22 | print(len(json_files), len(image_files)) 23 | assert len(json_files) == len(image_files), "The number of JSON and image files should be the same" 24 | 25 | # Create output directories if they don't exist 26 | os.makedirs(os.path.join(output_dir, "json"), exist_ok=True) 27 | os.makedirs(os.path.join(output_dir, "images"), exist_ok=True) 28 | 29 | # Create a list of pairs (json_file, image_file) 30 | pairs = list(zip(sorted_json_files, sorted_image_files)) 31 | #random.shuffle(pairs) 32 | 33 | # Process the pairs 34 | for i, (json_file, image_file) in enumerate(pairs, start=1): 35 | # Load JSON 36 | with open(json_file, "r", encoding="utf-8") as f: 37 | data = json.load(f) 38 | 39 | # Update JSON fields 40 | data["name"] = f"Name {i}" 41 | data["image"] = f"ipfs://REPLACEME/{i}.png" 42 | data["description"] = "Your description" 43 | 44 | if "imageHash" in data: 45 | del data["imageHash"] 46 | if "dna" in data: 47 | del data["dna"] 48 | if "edition" in data: 49 | del data["edition"] 50 | if "date" in data: 51 | del data["date"] 52 | if "custom_fields" in data: 53 | del data["custom_fields"] 54 | if "file_url" in data: #not actually part of JSON format https://docs.opensea.io/docs/metadata-standards 55 | del data["file_url"] 56 | if "id" in data: 57 | del data["id"] 58 | if "seller_fee_basis_points" in data: 59 | del data["seller_fee_basis_points"] 60 | if "compiler" in data: 61 | del data["compiler"] 62 | if "properties" in data: 63 | del data["properties"] 64 | if "collection" in data: 65 | del data["collection"] 66 | if "external_url" in data: 67 | del data["external_url"] 68 | 69 | # Save the updated JSON 70 | with open(os.path.join(output_dir, "json", f"{i}"), "w", encoding="utf-8") as f: 71 | json.dump(data, f, indent=4) 72 | 73 | # Copy the corresponding image 74 | shutil.copy(image_file, os.path.join(output_dir, "images", f"{i}.png")) 75 | -------------------------------------------------------------------------------- /cleaning/prune_missing_jsons.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | # Set input and output folder paths 5 | input_folder = "input" 6 | output_folder = "output" 7 | 8 | # Check if output folder exists, if not, create it 9 | if not os.path.exists(output_folder): 10 | os.makedirs(output_folder) 11 | 12 | # Create images and json folders inside output folder if they don't already exist 13 | for folder in ["images", "json"]: 14 | folder_path = os.path.join(output_folder, folder) 15 | if not os.path.exists(folder_path): 16 | os.makedirs(folder_path) 17 | 18 | # Loop through files in input/images folder and store them in a list 19 | input_images_folder = os.path.join(input_folder, "images") 20 | image_files = [f for f in os.listdir(input_images_folder) if os.path.isfile(os.path.join(input_images_folder, f))] 21 | 22 | # Copy each image file from input/images to output/images 23 | for image_file in image_files: 24 | src = os.path.join(input_images_folder, image_file) 25 | dst = os.path.join(output_folder, "images", image_file) 26 | shutil.copy(src, dst) 27 | 28 | # Check for matching JSON files and copy them to output/json 29 | file_name, file_ext = os.path.splitext(image_file) 30 | json_file = file_name + ".json" 31 | json_src = os.path.join(input_folder, "json", json_file) 32 | if os.path.isfile(json_src): 33 | json_dst = os.path.join(output_folder, "json", json_file) 34 | shutil.copy(json_src, json_dst) 35 | 36 | print("Prune process complete.") 37 | -------------------------------------------------------------------------------- /cleaning/remove_extra_filename.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import shutil 4 | 5 | input_folder = "input" 6 | output_folder = "output" 7 | 8 | # Create output folder if it does not exist 9 | if not os.path.exists(output_folder): 10 | os.makedirs(output_folder) 11 | 12 | # Iterate through all files in the input folder 13 | for filename in os.listdir(input_folder): 14 | # Extract the first number from the file name 15 | match = re.match(r"(\d+).*", filename) 16 | if match: 17 | new_filename = f"{match.group(1)}.png" 18 | old_path = os.path.join(input_folder, filename) 19 | new_path = os.path.join(output_folder, new_filename) 20 | # Copy the file and rename it 21 | shutil.copy2(old_path, new_path) 22 | -------------------------------------------------------------------------------- /cleaning/remove_json_extensions.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Define the folder path containing the .json files 4 | folder_path = "json" 5 | 6 | # Iterate through all the files in the folder 7 | for filename in os.listdir(folder_path): 8 | # Check if the file has a .json extension 9 | if filename.endswith(".json"): 10 | # Remove the .json extension from the filename 11 | new_filename = filename[:-5] 12 | 13 | # Create the full file paths for both the original and the renamed files 14 | old_file_path = os.path.join(folder_path, filename) 15 | new_file_path = os.path.join(folder_path, new_filename) 16 | 17 | # Rename the file 18 | os.rename(old_file_path, new_file_path) 19 | print(f"Renamed {old_file_path} to {new_file_path}") 20 | -------------------------------------------------------------------------------- /cleaning/rename_numerical.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | # Create the "output" folder if it doesn't exist 5 | if not os.path.exists("output"): 6 | os.makedirs("output") 7 | 8 | # Loop through all the .png files in the "input" folder 9 | i = 1 10 | for filename in os.listdir("input"): 11 | if filename.lower().endswith(".png"): 12 | # Construct the new filename as a zero-padded number 13 | new_filename = f"{i}.png" 14 | # Copy the file to the "output" folder with the new filename 15 | shutil.copy(os.path.join("input", filename), os.path.join("output", new_filename)) 16 | i += 1 17 | -------------------------------------------------------------------------------- /cleaning/replace_cid.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Iterate over all files in the current directory 4 | for file in os.listdir(): 5 | # Open the file for reading 6 | if file == '.DS_Store': # Skip macOS .DS_Store files 7 | continue 8 | with open(file, 'r', encoding="utf-8") as f: 9 | # Read the contents of the file 10 | contents = f.read() 11 | # Replace "REPLACEME" with "REPLACED" 12 | contents = contents.replace("REPLACEME", "REPLACED") 13 | # Open the file for writing 14 | with open(file, 'w') as f: 15 | # Write the modified contents to the file 16 | f.write(contents) 17 | -------------------------------------------------------------------------------- /cleaning/shuffle_and_clean.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import json 4 | import random 5 | import shutil 6 | 7 | # Seed for randomness 8 | random.seed(1234) # change this number if you want a different random order 9 | 10 | # Directories 11 | input_dir = "input" 12 | output_dir = "output" 13 | 14 | # Verify input directories exist 15 | assert os.path.isdir(os.path.join(input_dir, "json")), "The directory 'input/json' does not exist" 16 | assert os.path.isdir(os.path.join(input_dir, "images")), "The directory 'input/images' does not exist" 17 | 18 | # Find all JSON and image files 19 | json_files = [jf for jf in glob.glob(os.path.join(input_dir, "json", "*")) if not jf.endswith('.DS_Store')] 20 | image_files = [ifile for ifile in glob.glob(os.path.join(input_dir, "images", "*")) if not ifile.endswith('.DS_Store')] 21 | 22 | # Create dictionaries with basename (without extension) as keys and full path as values 23 | json_files_dict = {os.path.splitext(os.path.basename(jf))[0]: jf for jf in json_files} 24 | image_files_dict = {os.path.splitext(os.path.basename(ifile))[0]: ifile for ifile in image_files} 25 | 26 | # Make sure the number of JSON and image files are the same 27 | assert set(json_files_dict.keys()) == set(image_files_dict.keys()), "The JSON and image file names should match" 28 | 29 | # Create output directories if they don't exist 30 | os.makedirs(os.path.join(output_dir, "json"), exist_ok=True) 31 | os.makedirs(os.path.join(output_dir, "images"), exist_ok=True) 32 | 33 | # Create a list of pairs (json_file, image_file) 34 | pairs = [(json_files_dict[key], image_files_dict[key]) for key in json_files_dict.keys()] 35 | random.shuffle(pairs) 36 | 37 | # Process the pairs 38 | for i, (json_file, image_file) in enumerate(pairs, start=1): 39 | # Load JSON 40 | with open(json_file, "r", encoding="utf-8") as f: 41 | data = json.load(f) 42 | 43 | # Update JSON fields 44 | data["name"] = f"Name #{i}" 45 | filename, file_extension = os.path.splitext(data['image']) 46 | file_extension = file_extension or '.png' # default to .png if no extension found 47 | data["image"] = f"ipfs://REPLACEME/{i}{file_extension}" 48 | #data["description"] = "uncomment to add description if you desire" 49 | 50 | for key in ["imageHash", "dna", "edition", "date", "custom_fields", "file_url", "id", 51 | "seller_fee_basis_points", "compiler", "properties", "collection", "external_url"]: 52 | data.pop(key, None) 53 | 54 | # Save the updated JSON without file extension 55 | with open(os.path.join(output_dir, "json", f"{i}"), "w", encoding="utf-8") as f: 56 | json.dump(data, f, indent=4) 57 | 58 | # Copy the corresponding image 59 | shutil.copy(image_file, os.path.join(output_dir, "images", f"{i}{file_extension}")) 60 | -------------------------------------------------------------------------------- /cleaning/shuffle_multiple.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import json 4 | import random 5 | import shutil 6 | 7 | # Seed for randomness 8 | random.seed(12345) # change this number if you want a different random order 9 | 10 | # Base directories, the input folder should have multiple folders that each has an images/json folders pair 11 | input_base_dir = "input" 12 | output_dir = "output" 13 | 14 | # Ensure output directories exist 15 | os.makedirs(os.path.join(output_dir, "json"), exist_ok=True) 16 | os.makedirs(os.path.join(output_dir, "images"), exist_ok=True) 17 | 18 | def process_folder(subfolder): 19 | # Construct paths to JSON and image subdirectories 20 | json_path = os.path.join(input_base_dir, subfolder, "json") 21 | image_path = os.path.join(input_base_dir, subfolder, "images") 22 | 23 | # Verify directories exist 24 | assert os.path.isdir(json_path), f"The directory '{json_path}' does not exist" 25 | assert os.path.isdir(image_path), f"The directory '{image_path}' does not exist" 26 | 27 | # Find all JSON and image files 28 | json_files = [jf for jf in glob.glob(os.path.join(json_path, "*")) if not jf.endswith('.DS_Store')] 29 | image_files = [ifile for ifile in glob.glob(os.path.join(image_path, "*")) if not ifile.endswith('.DS_Store')] 30 | 31 | # Create dictionaries with basename (without extension) as keys and full path as values 32 | json_files_dict = {os.path.splitext(os.path.basename(jf))[0]: jf for jf in json_files} 33 | image_files_dict = {os.path.splitext(os.path.basename(ifile))[0]: ifile for ifile in image_files} 34 | 35 | # Ensure matching sets of JSON and image files 36 | assert set(json_files_dict.keys()) == set(image_files_dict.keys()), "The JSON and image file names should match" 37 | 38 | # Create list of (json_file, image_file) pairs 39 | pairs = [(json_files_dict[key], image_files_dict[key]) for key in json_files_dict.keys()] 40 | return pairs 41 | 42 | # List all subfolders in the input directory 43 | subfolders = [f.name for f in os.scandir(input_base_dir) if f.is_dir()] 44 | 45 | all_pairs = [] 46 | for subfolder in subfolders: 47 | pairs = process_folder(subfolder) 48 | all_pairs.extend(pairs) 49 | 50 | # Shuffle the combined list of pairs 51 | random.shuffle(all_pairs) 52 | 53 | # Process all shuffled pairs 54 | for i, (json_file, image_file) in enumerate(all_pairs, start=1): 55 | # Load JSON 56 | with open(json_file, "r", encoding="utf-8") as f: 57 | data = json.load(f) 58 | 59 | # Update JSON fields 60 | data["name"] = f"Name #{i}" 61 | filename, file_extension = os.path.splitext(data['image']) 62 | file_extension = file_extension or '.png' # default to .png if no extension found 63 | data["image"] = f"ipfs://REPLACEME/{i}{file_extension}" 64 | 65 | for key in ["imageHash", "dna", "edition", "date", "custom_fields", "file_url", "id", 66 | "seller_fee_basis_points", "compiler", "properties", "collection"]: 67 | data.pop(key, None) 68 | 69 | # Save the updated JSON 70 | with open(os.path.join(output_dir, "json", f"{i}"), "w", encoding="utf-8") as f: 71 | json.dump(data, f, indent=4) 72 | 73 | # Copy the corresponding image 74 | shutil.copy(image_file, os.path.join(output_dir, "images", f"{i}{file_extension}")) 75 | -------------------------------------------------------------------------------- /csv/json_to_csv.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import csv 4 | 5 | # Set the folder path containing JSON files 6 | json_folder = "json" 7 | 8 | # Create a list of JSON file paths 9 | json_files = [os.path.join(json_folder, file) for file in os.listdir(json_folder) if file.endswith('.json')] 10 | 11 | # Initialize the CSV data 12 | csv_data = [] 13 | 14 | # Loop through the JSON files 15 | for json_file in json_files: 16 | with open(json_file, 'r') as file: 17 | data = json.load(file) 18 | 19 | # Extract name, description, and image fields 20 | row = { 21 | 'name': data['name'], 22 | 'description': data['description'], 23 | 'image': data['image'] 24 | } 25 | 26 | # Loop through attributes 27 | for attribute in data['attributes']: 28 | trait_type = attribute['trait_type'] 29 | value = attribute['value'] 30 | 31 | # Add the trait_type and value to the row 32 | row[trait_type] = value 33 | 34 | # Add the row to the CSV data 35 | csv_data.append(row) 36 | 37 | # Get unique column names from the csv_data 38 | csv_columns = set() 39 | for row in csv_data: 40 | csv_columns.update(row.keys()) 41 | 42 | # Write the CSV data to the output.csv file 43 | with open('output.csv', 'w', newline='') as csvfile: 44 | writer = csv.DictWriter(csvfile, fieldnames=csv_columns) 45 | writer.writeheader() 46 | for row in csv_data: 47 | writer.writerow(row) 48 | -------------------------------------------------------------------------------- /etc/add_overlay.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import os 3 | 4 | def overlay_images(source_folder, overlay_image_path, output_folder): 5 | """ 6 | Overlays a transparent image on all images within the specified folder. 7 | 8 | :param source_folder: Folder containing images to overlay on. 9 | :param overlay_image_path: Path to the transparent image to overlay. 10 | :param output_folder: Folder to save the overlayed images. 11 | """ 12 | # Ensure the output directory exists 13 | if not os.path.exists(output_folder): 14 | os.makedirs(output_folder) 15 | 16 | # Load the overlay image 17 | overlay = Image.open(overlay_image_path) 18 | 19 | # Iterate over all files in the source directory 20 | for file_name in os.listdir(source_folder): 21 | if file_name.lower().endswith('.png'): 22 | # Construct the full file path 23 | file_path = os.path.join(source_folder, file_name) 24 | 25 | # Open the source image 26 | source_image = Image.open(file_path) 27 | 28 | # Ensure the overlay is the same size as the source image 29 | overlay_resized = overlay.resize(source_image.size) 30 | 31 | # Composite the images 32 | combined_image = Image.alpha_composite(source_image.convert("RGBA"), overlay_resized) 33 | 34 | # Save to output directory 35 | combined_image.save(os.path.join(output_folder, file_name)) 36 | 37 | if __name__ == '__main__': 38 | SOURCE_FOLDER = './input/' # Update with the path to your folder containing the images 39 | OVERLAY_IMAGE_PATH = './overlay.png' # Update with the path to your transparent overlay image 40 | OUTPUT_FOLDER = './output/' # Update with the path to the folder where you want to save the overlayed images 41 | 42 | overlay_images(SOURCE_FOLDER, OVERLAY_IMAGE_PATH, OUTPUT_FOLDER) -------------------------------------------------------------------------------- /etc/get_images_average.py: -------------------------------------------------------------------------------- 1 | # This script will average together all images in the images folder to create an avg image file 2 | 3 | from PIL import Image 4 | import os 5 | import numpy as np 6 | 7 | image_filetype = '.png' 8 | 9 | def average_images(image_folder): 10 | image_files = [f for f in os.listdir(image_folder) if f.endswith(image_filetype)] 11 | image_files.sort() 12 | 13 | avg_img = None 14 | count = 0 15 | 16 | for image_file in image_files: 17 | print(f"{image_file}.. ", end="") 18 | img_path = os.path.join(image_folder, image_file) 19 | img = Image.open(img_path).convert('RGBA') # Ensure the image is in RGBA format 20 | img_arr = np.array(img, dtype=np.float64) 21 | 22 | if avg_img is None: 23 | avg_img = img_arr 24 | else: 25 | avg_img = (count * avg_img + img_arr) / (count + 1) 26 | 27 | count += 1 28 | 29 | # Convert averaged image back into an image object 30 | avg_img = Image.fromarray(np.uint8(avg_img)) 31 | 32 | # Save the result 33 | avg_img.save("avg" + image_filetype) 34 | 35 | # Use the function 36 | average_images("images") 37 | -------------------------------------------------------------------------------- /etc/shuffle_pngs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import random 4 | 5 | # Define input and output directories 6 | input_dir = 'input' 7 | output_dir = 'output' 8 | 9 | # Check if the output directory exists, if not create it 10 | if not os.path.exists(output_dir): 11 | os.makedirs(output_dir) 12 | 13 | # Get all png files in the input directory 14 | files = [f for f in os.listdir(input_dir) if os.path.splitext(f)[1].lower() == '.png'] 15 | 16 | # Shuffle the files 17 | random.shuffle(files) 18 | 19 | # Copy and rename the files 20 | for i, file in enumerate(files, start=1): 21 | # Create the new file name 22 | new_file_name = f'{i}.png' 23 | 24 | # Get the paths 25 | input_file_path = os.path.join(input_dir, file) 26 | output_file_path = os.path.join(output_dir, new_file_name) 27 | 28 | # Copy the file 29 | shutil.copyfile(input_file_path, output_file_path) 30 | -------------------------------------------------------------------------------- /example-files/csv_to_json_sample.csv: -------------------------------------------------------------------------------- 1 | ,ID,Fur,Shirt,Glasses,Headwear,Accessory,Special 2 | ,1,Black,Neo Outfit,Sunglasses,None,Unlimited Power,Neo Rilla 3 | ,2,Brown,Baller Outfit,None,None,Basketball,Baller Rilla 4 | ,3,Brown,Royal Outfit,None,Keffiyeh,Unlimited Wealth,Royal Rilla 5 | ,4,Black,Master Outfit,Sunglasses,None,Unlimited Wealth,Master Rilla 6 | ,5,Black,Ozaru Outfit,None,None,Unlimited Power,Ozaru Rilla 7 | ,6,Black,Lab Outfit,None,Lab Ventilator,Research Chemicals,Lab Rilla 8 | ,7,Gold,Disco Outfit,Disco Glasses,None,Rilla Stogie,Disco Rilla 9 | ,8,Brown,Surfin Outfit,None,None,Surfboard,Surfin Rilla 10 | ,9,Grey,Continental Outfit,None,Beret,Continental Chow,Continental Rilla 11 | ,10,Black,Trad Outfit,None,Flat Cap,Rilla Stogie,Trad Rilla 12 | ,11,White,None,None,Skaal Helm,Odin's Blessings,Skaal Rilla 13 | ,12,Brown,Oedipus Complex,None,None,Rilla Chow Meal, 14 | ,13,Blue,North Rilla Jacket,Spectacles,Beret,None, 15 | ,14,Brown,Rilla Suit Jacket,None,None,None, 16 | ,15,Brown,Leather Jacket,None,None,Rilla Beatz, 17 | ,16,Brown,Samurai Outfit,None,None,Katana,Samurai Rilla 18 | ,17,Gold,Denim Jacket,None,None,Katana,Banana Time! 19 | ,18,Pink,Denim Jacket,None,Hoodie,None, 20 | ,19,Black,Heavenly Outfit,None,Halo,None,Heavenly Rilla 21 | ,20,Black,None,Sunglasses,None,None, 22 | ,21,Brown,Heavyweight Outfit,None,None,None,Heavyweight Rilla 23 | ,22,Grey,Leather Jacket,Sunglasses,Hoodie,None, 24 | ,23,Purple,Leather Jacket,None,None,Rose, 25 | ,24,Gold,Rilla Suit Jacket,None,None,Tie, 26 | ,25,Blue,Collared Shirt,None,None,Tie, 27 | ,26,Purple,Collared Shirt,Sunglasses,None,Tie, 28 | ,27,Black,Denim Jacket,None,None,None, 29 | ,28,Brown,None,Sunglasses,Bomber Hat,None, 30 | ,29,Grey,Leather Jacket,None,Visor,None, 31 | ,30,Brown,Warring Rilla,None,Helm,Bandolier, 32 | ,31,Brown,None,None,Fuzzy Hat,Boombox, 33 | ,32,Pink,Shirt,Spectacles,Visor,None, 34 | ,33,White,None,Sunglasses,None,None, 35 | ,34,Spotted,Shirt,Sunglasses,None,None, 36 | ,35,Black,Leather Jacket,Sunglasses,Beanie,None, 37 | ,36,Grey,North Rilla Jacket,None,Bucket Hat,None, 38 | ,37,Black,None,Sunglasses,None,Chow Bag, 39 | ,38,Grey,Denim Jacket,None,Cap,None, 40 | ,39,White,North Rilla Jacket,Sunglasses,Leather Hat,None, 41 | ,40,Grey,Collared Shirt,Sunglasses,Beanie,Rilla Stogie, 42 | ,41,Pink,Sailor Outfit,None,Kawaii Wig,None,Kawaii Rilla 43 | ,42,Pink,Maid Outfit,None,Kawaii Wig,None,Kawaii Rilla 44 | ,43,Pink,School Outfit,None,Kawaii Wig,None,Kawaii Rilla 45 | ,44,Green,Flame Hoodie,None,Bucket Hat,None, 46 | ,45,Green,Black Sweater,None,360 Yankee Hat,None, 47 | ,46,Green,Mecha Outfit,None,None,None,Kawaii Rilla 48 | ,47,Red,Stripy Sweater,Alien Glasses,None,None, 49 | ,48,Red,Camo Jacket,Viper Glasses,None,None, 50 | ,49,White,Bathing Rilla Hoodie,None,Bathing Rilla Beanie,None, 51 | ,50,White,Pimp Outfit,None,Pimp Hat,None,Pimp Rilla 52 | ,51,White,Safari Outfit,None,Safari Hat,None,Safari Rilla 53 | ,52,White,Maid Outfit,None,Kitty Headset,None,Kawaii Rilla 54 | ,53,White,Rilla Suit Jacket,None,Leather Hat,None, 55 | ,54,White,Lab Coat,Lab Goggles,None,None, 56 | ,55,Blue,Stripy Sweater,None,Beret,None, 57 | ,56,Blue,Camo Jacket,None,Beret,None, 58 | ,57,Blue,Rilla Suit Jacket,Monocle,Top Hat,None, 59 | ,58,Blue,None,Party Glasses,Party Hat,None, 60 | ,59,Pink,Bathing Rilla Hoodie,None,Cap,None, 61 | ,60,Pink,Minion Shirt,None,ATF Cap,None, 62 | ,61,Pink,Fortnite Hoodie,None,Kawaii Wig,None, 63 | ,62,Pink,Shirt,Viper Glasses,None,None, 64 | ,63,Green,Crustaceous Treat,Alien Glasses,360 Yankee Hat,None, 65 | ,64,Red,Crustaceous Treat,None,Top Hat,None, 66 | ,65,Red,Oedipus Complex,None,Bomber Hat,None, 67 | ,66,Pink,None,None,Bomber Hat,Bisou Kitty, 68 | ,67,Blue,None,None,Bucket Hat,Rilla Chow Meal, 69 | ,68,Brown,Oedipus Complex,None,Silly Hat,None, 70 | ,69,Brown,Leather Jacket,None,None,None, 71 | ,70,Pink,None,None,Bucket Hat,None, 72 | ,71,Black,Denim Jacket,Spectacles,None,None, 73 | ,72,Brown,None,None,Silly Hat,None, 74 | ,73,Black,Vest,None,Cap,None, 75 | ,74,Brown,None,Sunglasses,Visor,None, 76 | ,75,Black,Shirt,Sunglasses,Leather Hat,None, 77 | ,76,Black,Hoodie,Sunglasses,Cap,None, 78 | ,77,Black,Raincoat,None,Bucket Hat,None, 79 | ,78,Black,Collared Shirt,Sunglasses,None,None, 80 | ,79,Black,Oedipus Complex,None,Silly Hat,None, 81 | ,80,Black,Sailor Outfit,None,Silly Hat,None,Kawaii Rilla 82 | ,81,Black,Safari Outfit,None,Bathing Rilla Beanie,None, 83 | ,82,Black,Safari Outfit,Monocle,Safari Hat,None,Safari Rilla 84 | ,83,Black,Maid Outfit,None,Kawaii Wig,None,Kawaii Rilla 85 | ,84,Black,Bathing Rilla Hoodie,None,Kawaii Wig,None, 86 | ,85,Black,Rilla Suit Jacket,None,360 Yankee Hat,None, 87 | ,86,Black,Pink Spotted Jacket,None,Beanie,None, 88 | ,87,Black,Milady Jersey,None,Kawaii Wig,None, 89 | ,88,Black,Rilla Mothko,None,Beanie,None, 90 | ,89,Black,Minion Shirt,None,Beret,None, 91 | ,90,Green,Minion Shirt,None,Bathing Rilla Beanie,None, 92 | ,91,Green,Milady Jersey,None,None,None, 93 | ,92,Red,Milady Jersey,Alien Glasses,None,None, 94 | ,93,White,Rilla Mothko,None,Strawby Hat,None, 95 | ,94,Blue,Pink Spotted Jacket,None,Strawby Hat,None, 96 | ,95,Green,Milady Jersey,None,Strawby Hat,None, 97 | ,96,Pink,Flame Hoodie,None,Strawby Hat,None, 98 | ,97,Pink,Pink Spotted Jacket,None,Heihei Hat,Bisou Bunny, 99 | ,98,Black,Fortnite Hoodie,None,Fez Hat,None, 100 | ,99,Blue,Rilla Mothko,Viper Glasses,Fez Hat,None, 101 | ,100,White,Minion Shirt,None,Fez Hat,None, -------------------------------------------------------------------------------- /json/generate_json_args_no_traits.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import json 3 | import os 4 | 5 | # Usage: python generate_json_no_traits.py 6 | 7 | if len(sys.argv) < 5: 8 | print("Insufficient arguments provided.") 9 | print("Usage: python generate_json_no_traits.py ") 10 | sys.exit(1) 11 | 12 | cid_hash_image = sys.argv[1] 13 | output_folder = sys.argv[2] 14 | image_filename = sys.argv[3] 15 | supply = int(sys.argv[4]) 16 | 17 | if not os.path.exists(output_folder): 18 | os.makedirs(output_folder) 19 | print(f'Created folder: "{output_folder}"') 20 | 21 | for x in range(1, supply + 1): 22 | dictionary = { 23 | "name": "Short name #" + str(x), 24 | "description": "Description", 25 | "external_url": "https://scatter.art/", 26 | "image": "ipfs://" + cid_hash_image + "/" + str(x) + image_filename, 27 | } 28 | 29 | with open(f"./{output_folder}/{x}", "w") as outfile: 30 | json.dump(dictionary, outfile, indent=4) 31 | -------------------------------------------------------------------------------- /json/generate_json_args_no_traits_piped.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import json 3 | 4 | # Usage: python generate_json_no_traits.py 5 | 6 | if len(sys.argv) < 5: 7 | print("Insufficient arguments provided.") 8 | print("Usage: python generate_json_no_traits.py ") 9 | sys.exit(1) 10 | 11 | cid_hash_image = sys.argv[1] 12 | output_folder = sys.argv[2] 13 | image_filename = sys.argv[3] 14 | supply = int(sys.argv[4]) 15 | 16 | metadata = [] 17 | 18 | for x in range(1, supply + 1): 19 | dictionary = { 20 | "name": "Short name #" + str(x), 21 | "description": "Description", 22 | "external_url": "https://scatter.art/", 23 | "image": "ipfs://" + cid_hash_image + "/" + str(x) + image_filename, 24 | } 25 | 26 | metadata.append(dictionary) 27 | 28 | # Convert metadata list to JSON and print each JSON object 29 | for data in metadata: 30 | print(json.dumps(data, indent=4)) 31 | -------------------------------------------------------------------------------- /json/generate_json_no_traits.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | # Usage: python generate_json_no_traits.py 5 | # You can edit the values below 6 | 7 | cid_hash_image = "REPLACEME" # upload image with https://nft.storage/ then you can link to its cid here 8 | output_folder = "output" 9 | image_filename = ".png" # include this if needed 10 | supply = 1000 11 | 12 | if not os.path.exists(output_folder): 13 | os.makedirs(output_folder) 14 | print(f'Created folder: "{output_folder}"') 15 | 16 | for x in range(1, supply + 1): 17 | dictionary = { 18 | "name": "Short name #" + str(x), 19 | "description": "Description ", 20 | "external_url": "https://scatter.art/", 21 | "image": "ipfs://" + cid_hash_image + "/" + str(x) + image_filename, 22 | } 23 | 24 | with open("./" + output_folder + "/" + str(x), "w") as outfile: 25 | json.dump(dictionary, outfile, indent=4) 26 | -------------------------------------------------------------------------------- /json/generate_json_unrevealed.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | # Usage: python generate_json.py 5 | # Make sure you create an output folder first 6 | # You can edit the values below 7 | 8 | cid_hash_image = "REPLACEME" # upload image with https://nft.storage/ then you can link to its cid here 9 | output_folder = "output" 10 | image_filename = "" # include this if needed 11 | supply = 1000 12 | 13 | if not os.path.exists(output_folder): 14 | os.makedirs(output_folder) 15 | print(f'Created folder: "{output_folder}"') 16 | 17 | for x in range(1, supply + 1): 18 | dictionary = { 19 | "name": "Short name #" + str(x), 20 | "description": "Description ", 21 | "external_url": "https://scatter.art/", 22 | "image": "ipfs://" + cid_hash_image + "/" + image_filename, 23 | } 24 | 25 | with open("./" + output_folder + "/" + str(x), "w") as outfile: 26 | json.dump(dictionary, outfile, indent=4) 27 | -------------------------------------------------------------------------------- /json/make_from_folders.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import json 4 | import re 5 | import random 6 | 7 | # Config 8 | START_INDEX = 1 # put your starting index here 9 | INPUT_FOLDER = 'input' # put your input folder here 10 | OUTPUT_FOLDER = 'output' # put your output folder here 11 | SHUFFLE_OUTPUT = True # set to True to shuffle output 12 | RANDOM_SEED = 123 # set the seed for random number generation 13 | SUBFOLDERS_WITHOUT_ID = ['User Bonklers'] # put your subfolder names here to not add IDs 14 | IPFS_CID = 'REPLACEME' 15 | NAME_PREFIX = 'Name' 16 | FILE_TYPE = '.png' 17 | 18 | def ensure_dir(directory): 19 | if not os.path.exists(directory): 20 | os.makedirs(directory) 21 | print(f'Created directory: {directory}') 22 | 23 | def create_json(index, collection, original_id, file_extension, output_folder=OUTPUT_FOLDER): 24 | data = { 25 | "name": f"{NAME_PREFIX} #{index}", 26 | "image": f"ipfs://{IPFS_CID}/{index}{file_extension}", 27 | "description": "Description.", 28 | "external_url": "https://scatter.art", 29 | "attributes": [ 30 | { 31 | "trait_type": "Collection", 32 | "value": f"{collection}" 33 | } 34 | ] 35 | } 36 | if collection not in SUBFOLDERS_WITHOUT_ID: 37 | data["attributes"].append({ 38 | "trait_type": f"{collection} ID", 39 | "value": f"{original_id}" 40 | }) 41 | 42 | with open(f'{output_folder}/json/{index}', 'w', encoding='utf-8') as outfile: 43 | json.dump(data, outfile, ensure_ascii=False, indent=4) 44 | print(f'Created JSON file: {output_folder}/json/{index}') 45 | 46 | def process_images(): 47 | current_index = START_INDEX 48 | ensure_dir(f'{OUTPUT_FOLDER}/images') 49 | ensure_dir(f'{OUTPUT_FOLDER}/json') 50 | 51 | for root, dirs, files in os.walk(INPUT_FOLDER): 52 | for dir in dirs: 53 | for file in os.listdir(os.path.join(root, dir)): 54 | if file.endswith(FILE_TYPE): 55 | original_id, _ = os.path.splitext(file) # get filename without extension 56 | shutil.copy2(os.path.join(root, dir, file), f'{OUTPUT_FOLDER}/images/{current_index}{FILE_TYPE}') 57 | print(f'Copied image: {os.path.join(root, dir, file)} to {OUTPUT_FOLDER}/images/{current_index}{FILE_TYPE}') 58 | create_json(current_index, dir, original_id, FILE_TYPE) 59 | current_index += 1 60 | 61 | if SHUFFLE_OUTPUT: 62 | random.seed(RANDOM_SEED) # set the seed 63 | shuffle_folder = f'{OUTPUT_FOLDER}_shuffled' 64 | ensure_dir(f'{shuffle_folder}/images') 65 | ensure_dir(f'{shuffle_folder}/json') 66 | 67 | indices = list(range(START_INDEX, current_index)) 68 | random.shuffle(indices) 69 | 70 | for old_index, new_index in zip(range(START_INDEX, current_index), indices): 71 | shutil.copy2(f'{OUTPUT_FOLDER}/images/{old_index}{FILE_TYPE}', f'{shuffle_folder}/images/{new_index}{FILE_TYPE}') 72 | print(f'Copied image: {OUTPUT_FOLDER}/images/{old_index}{FILE_TYPE} to {shuffle_folder}/images/{new_index}{FILE_TYPE}') 73 | 74 | with open(f'{OUTPUT_FOLDER}/json/{old_index}', 'r', encoding='utf-8') as infile: 75 | data = json.load(infile) 76 | 77 | data["name"] = f"{NAME_PREFIX} #{new_index}" 78 | data["image"] = re.sub(r'(\d+)', str(new_index), data["image"]) 79 | 80 | create_json(new_index, data["attributes"][0]["value"], data["attributes"][1]["value"] if len(data["attributes"]) > 1 else '', FILE_TYPE, shuffle_folder) 81 | 82 | process_images() 83 | -------------------------------------------------------------------------------- /json/make_json_from_csv.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import os 3 | from datetime import datetime 4 | import json 5 | 6 | now = datetime.now() 7 | current_time = now.strftime("%m-%d-%Y-%I-%M-%S-%p") 8 | 9 | cid_hash_image = "REPLACEME" 10 | name = "Name #" 11 | description = "Your description" 12 | url = "https://scatter.art/" 13 | file_type = ".png" 14 | 15 | output_folder = "output_" + current_time 16 | os.mkdir(output_folder) 17 | 18 | id = 1 19 | with open('traits.csv') as csv_file: 20 | 21 | 22 | csv_reader = csv.reader(csv_file, delimiter=',') 23 | # Get the keys from the first row 24 | keys = next(csv_reader) 25 | for row in csv_reader: 26 | dictionary = { 27 | "name": name + str(id), 28 | "description": description, 29 | "external_url": url, 30 | "image": "ipfs://" + cid_hash_image + "/" + str(id) + file_type, 31 | } 32 | dictionary["attributes"] = [] 33 | # Create an associated array using the keys and the current row 34 | data = dict(zip(keys, row)) 35 | # Print the contents of each column using the keys 36 | print(id) 37 | for key in keys: 38 | if key != "ID" and key != "" and data[key] and data[key] != "None": 39 | print(key + ": " + data[key]) 40 | dictionary["attributes"].append({ 41 | "trait_type": key.strip(), 42 | "value": data[key].strip() 43 | }) 44 | with open("./" + output_folder + "/" + str(id), "w") as outfile: 45 | json.dump(dictionary, outfile, indent=4) 46 | id = id + 1 47 | -------------------------------------------------------------------------------- /refresh/refresh_collection_opensea.py: -------------------------------------------------------------------------------- 1 | # This script will no longer work as OS now requires an API key and getting an OS API key is annoying 2 | 3 | import requests 4 | import time 5 | 6 | collection = "0x441121df09c8c7f545a9444ab554ce640b566c4d" 7 | start_id = 1 8 | end_id = 3 9 | wait_time = 0.1 10 | headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0;Win64) AppleWebkit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'} 11 | 12 | 13 | for i in range(start_id, end_id + 1): 14 | print(i) 15 | url = "https://api.opensea.io/api/v1/asset/" + collection + "/" + str(i) + "?force_update=true" 16 | print(url) 17 | response = requests.get(url, headers=headers) 18 | print(response) 19 | time.sleep(wait_time) 20 | -------------------------------------------------------------------------------- /verify/calculate_rarity.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import csv 4 | from collections import defaultdict 5 | 6 | folder_name = "json" 7 | 8 | # Create a dictionary to keep track of trait counts 9 | traits = defaultdict(lambda: defaultdict(int)) 10 | traits_original = defaultdict(lambda: defaultdict(int)) 11 | 12 | # Loop through all the files in the "JSON" folder 13 | for filename in os.listdir(folder_name): 14 | # # Ignore non-JSON files 15 | # if not filename.endswith(".json"): 16 | # continue 17 | # Load the JSON file 18 | with open(os.path.join(folder_name, filename)) as f: 19 | data = json.load(f) 20 | # Count the traits in the attributes field 21 | for attribute in data["attributes"]: 22 | trait_type = attribute["trait_type"] 23 | # trait_type = trait_type.replace("Example ", "") 24 | value = attribute["value"] 25 | traits[trait_type][value] += 1 26 | traits_original[trait_type][value] += 1 27 | 28 | 29 | # Calculate the total count for each trait_type 30 | trait_counts = {trait_type: sum(trait_values.values()) for trait_type, trait_values in traits.items()} 31 | #print(trait_counts) 32 | 33 | # Calculate the rarity percentage for each trait value 34 | for trait_type, trait_values in traits.items(): 35 | #print(trait_values.items()) 36 | for value, count in trait_values.items(): 37 | rarity_percentage = count / trait_counts[trait_type] * 100 38 | rarity_percentage = round(rarity_percentage, 2) 39 | traits[trait_type][value] = rarity_percentage 40 | 41 | # Write the results to a CSV file 42 | with open("output.csv", "w", newline="") as f: 43 | writer = csv.writer(f) 44 | writer.writerow(["Trait Type", "Value", "Count", "Rarity Percentage"]) 45 | for trait_type, trait_values in traits.items(): 46 | for value, rarity_percentage in trait_values.items(): 47 | count = traits_original[trait_type][value] #traits[trait_type][value] * trait_counts[trait_type] / 100 48 | writer.writerow([trait_type, value, count, rarity_percentage]) 49 | -------------------------------------------------------------------------------- /verify/check_metadata.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | # set the path to the directory containing the JSON files 5 | path_to_json_files = "json" 6 | 7 | # loop through each file in the directory 8 | for filename in os.listdir(path_to_json_files): 9 | # check if the file is a JSON file 10 | #if filename.endswith(".json"): 11 | # load the file as a JSON object 12 | with open(os.path.join(path_to_json_files, filename), "r", encoding="utf-8") as f: 13 | json_data = json.load(f) 14 | 15 | # extract the expected number from the filename 16 | expected_num = filename.split(".")[0] 17 | 18 | # check if the numbers match 19 | if expected_num not in json_data["name"]: 20 | # print a message indicating the mismatch 21 | print(f"Error: Filename {filename} does not match name field in JSON object.") 22 | 23 | # check if the "image" attribute contains the expected number 24 | if expected_num not in json_data["image"]: 25 | # print a message indicating the mismatch 26 | print(f"Error: Filename {filename} expected image number {expected_num}, but found {json_data['image']}") -------------------------------------------------------------------------------- /verify/find_dupes_in_input.py: -------------------------------------------------------------------------------- 1 | import os 2 | import hashlib 3 | from collections import defaultdict 4 | 5 | def md5_hash(filepath): 6 | """Compute the MD5 hash of a file.""" 7 | hash_md5 = hashlib.md5() 8 | with open(filepath, "rb") as f: 9 | for chunk in iter(lambda: f.read(4096), b""): 10 | hash_md5.update(chunk) 11 | return hash_md5.hexdigest() 12 | 13 | def find_duplicates(directory): 14 | """Find and print duplicate files in a directory based on MD5 hash.""" 15 | hashes = defaultdict(list) 16 | # Walk through all files in the directory 17 | for root, dirs, files in os.walk(directory): 18 | for filename in files: 19 | filepath = os.path.join(root, filename) 20 | file_hash = md5_hash(filepath) 21 | hashes[file_hash].append(filepath) 22 | 23 | # Check for duplicate hashes and print them 24 | for paths in hashes.values(): 25 | if len(paths) > 1: 26 | print("Duplicates found:") 27 | for path in paths: 28 | print(path) 29 | 30 | # Path to the input folder 31 | input_directory = "input" 32 | find_duplicates(input_directory) 33 | -------------------------------------------------------------------------------- /verify/openrarity_fromjson.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import time 4 | import json 5 | 6 | from open_rarity import ( 7 | Collection, 8 | Token, 9 | RarityRanker, 10 | TokenMetadata, 11 | StringAttribute, 12 | ) 13 | from open_rarity.models.token_identifier import EVMContractTokenIdentifier 14 | from open_rarity.models.token_standard import TokenStandard 15 | 16 | parser = argparse.ArgumentParser(description='Calculate open rarity rankings for json folder') 17 | parser.add_argument('-f', '--folder', help='Json folder to calculate rarities', default='./json') 18 | args = parser.parse_args() 19 | INPUT_FOLDER = args.folder 20 | 21 | print("folder:", INPUT_FOLDER) 22 | 23 | trait_counts = {} 24 | tokens = [] 25 | start = time.time() 26 | 27 | for filename in os.listdir(INPUT_FOLDER): 28 | if filename == '.DS_Store': # Skip macOS .DS_Store files 29 | continue 30 | 31 | file_path = os.path.join(INPUT_FOLDER, filename) 32 | with open(file_path, "r", encoding="utf-8") as f: 33 | nft = json.load(f) 34 | 35 | tokenId = filename if not filename.endswith('.json') else filename[:-5] 36 | 37 | attributes = [] 38 | if 'attributes' in nft and nft['attributes'] is not None: 39 | attributes = nft['attributes'] 40 | if 'properties' in nft and nft['properties'] is not None and len(attributes) == 0: 41 | attributes = nft['properties'] 42 | 43 | if attributes == None or len(attributes) == 0: 44 | print("attributes not defined: ", tokenId) 45 | continue 46 | try: 47 | trait_type_used = [] 48 | for attribute in attributes: 49 | type = str(attribute['trait_type']) 50 | value = str(attribute['value']) 51 | if type in trait_type_used: 52 | continue # don't record duplicate attributes 53 | if type not in trait_counts: 54 | trait_counts[type] = {} 55 | if value not in trait_counts[type]: 56 | trait_counts[type][value] = 0 57 | 58 | trait_counts[type][value] += 1 59 | trait_type_used.append(type) 60 | 61 | tokens.append( 62 | Token( 63 | token_identifier=EVMContractTokenIdentifier( 64 | contract_address="0x0", token_id=tokenId 65 | ), 66 | token_standard=TokenStandard.ERC721, 67 | metadata=TokenMetadata( 68 | string_attributes= {str(a['trait_type']):StringAttribute(name=str(a['trait_type']), value=str(a['value'])) for a in attributes} 69 | ) 70 | ) 71 | ) 72 | except Exception as e: 73 | print(e) 74 | print("attributes not properly formatted: ", tokenId) 75 | 76 | print(len(os.listdir(INPUT_FOLDER)), len(tokens)) 77 | stop = time.time() 78 | duration = stop-start 79 | print(trait_counts) 80 | print('duration: ', duration, '\n') 81 | 82 | collection = Collection( 83 | name= "local", 84 | tokens=tokens 85 | ) 86 | 87 | # Generate scores for a collection 88 | ranked_tokens = RarityRanker.rank_collection(collection=collection) 89 | 90 | # Iterate over the ranked and sorted tokens 91 | for token_rarity in ranked_tokens: 92 | token_id = token_rarity.token.token_identifier.token_id 93 | rank = token_rarity.rank 94 | score = token_rarity.score 95 | unique_attributes = token_rarity.token_features.unique_attribute_count 96 | print(f"\tToken {token_id} has rank {rank} score: {score} unique attributes: {unique_attributes}") 97 | 98 | print('\n') -------------------------------------------------------------------------------- /verify/openrarity_fromjson_tocsv.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import time 4 | import json 5 | import csv 6 | 7 | from open_rarity import ( 8 | Collection, 9 | Token, 10 | RarityRanker, 11 | TokenMetadata, 12 | StringAttribute, 13 | ) 14 | from open_rarity.models.token_identifier import EVMContractTokenIdentifier 15 | from open_rarity.models.token_standard import TokenStandard 16 | 17 | parser = argparse.ArgumentParser(description='Calculate open rarity rankings for json folder') 18 | parser.add_argument('-f', '--folder', help='Json folder to calculate rarities', default='./json') 19 | args = parser.parse_args() 20 | INPUT_FOLDER = args.folder 21 | 22 | print("folder:", INPUT_FOLDER) 23 | 24 | trait_counts = {} 25 | tokens = [] 26 | start = time.time() 27 | 28 | for filename in os.listdir(INPUT_FOLDER): 29 | if filename == '.DS_Store': # Skip macOS .DS_Store files 30 | continue 31 | 32 | file_path = os.path.join(INPUT_FOLDER, filename) 33 | with open(file_path, "r", encoding="utf-8") as f: 34 | nft = json.load(f) 35 | 36 | tokenId = filename if not filename.endswith('.json') else filename[:-5] 37 | 38 | attributes = [] 39 | if 'attributes' in nft and nft['attributes'] is not None: 40 | attributes = nft['attributes'] 41 | if 'properties' in nft and nft['properties'] is not None and len(attributes) == 0: 42 | attributes = nft['properties'] 43 | 44 | if attributes == None or len(attributes) == 0: 45 | print("attributes not defined: ", tokenId) 46 | continue 47 | try: 48 | trait_type_used = [] 49 | for attribute in attributes: 50 | type = str(attribute['trait_type']) 51 | value = str(attribute['value']) 52 | if type in trait_type_used: 53 | continue # don't record duplicate attributes 54 | if type not in trait_counts: 55 | trait_counts[type] = {} 56 | if value not in trait_counts[type]: 57 | trait_counts[type][value] = 0 58 | 59 | trait_counts[type][value] += 1 60 | trait_type_used.append(type) 61 | 62 | tokens.append( 63 | Token( 64 | token_identifier=EVMContractTokenIdentifier( 65 | contract_address="0x0", token_id=tokenId 66 | ), 67 | token_standard=TokenStandard.ERC721, 68 | metadata=TokenMetadata( 69 | string_attributes= {str(a['trait_type']):StringAttribute(name=str(a['trait_type']), value=str(a['value'])) for a in attributes} 70 | ) 71 | ) 72 | ) 73 | except Exception as e: 74 | print(e) 75 | print("attributes not properly formatted: ", tokenId) 76 | 77 | print(len(os.listdir(INPUT_FOLDER)), len(tokens)) 78 | stop = time.time() 79 | duration = stop-start 80 | print(trait_counts) 81 | print('duration: ', duration, '\n') 82 | 83 | collection = Collection( 84 | name= "local", 85 | tokens=tokens 86 | ) 87 | 88 | # Generate scores for a collection 89 | ranked_tokens = RarityRanker.rank_collection(collection=collection) 90 | 91 | # Prepare CSV file 92 | with open('token_rarity.csv', 'w', newline='') as file: 93 | writer = csv.writer(file) 94 | writer.writerow(["Token", "Rank", "Score", "Unique attributes"]) # Write header 95 | 96 | # Iterate over the ranked and sorted tokens 97 | for token_rarity in ranked_tokens: 98 | token_id = token_rarity.token.token_identifier.token_id 99 | rank = token_rarity.rank 100 | score = token_rarity.score 101 | unique_attributes = token_rarity.token_features.unique_attribute_count 102 | print(f"\tToken {token_id} has rank {rank} score: {score} unique attributes: {unique_attributes}") 103 | 104 | # Write to CSV 105 | writer.writerow([token_id, rank, score, unique_attributes]) 106 | 107 | print('\n') 108 | -------------------------------------------------------------------------------- /verify/validate_json.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | json_dir = "json" 5 | 6 | for file_name in os.listdir(json_dir): 7 | 8 | file_path = os.path.join(json_dir, file_name) 9 | with open(file_path, 'r', encoding='utf-8') as file: 10 | try: 11 | json.load(file) 12 | #print(f"File OK! {file_name}") 13 | except json.JSONDecodeError as e: 14 | print(f"Error in file {file_name}: {e}") 15 | -------------------------------------------------------------------------------- /verify/validate_unique_attributes.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import hashlib 4 | 5 | all_hashes = {} 6 | 7 | for filename in os.listdir("output_final_json"): 8 | #if filename.endswith(".json"): 9 | with open("output_final_json/" + filename) as json_file: 10 | json_data = json.load(json_file) 11 | 12 | big_hash = "" 13 | attributes = json_data["attributes"] 14 | 15 | # sort the attributes list by trait_type 16 | attributes.sort(key=lambda x: x["trait_type"]) 17 | 18 | for attribute in attributes: 19 | trait_type = attribute["trait_type"] 20 | if trait_type != "Name": 21 | value = attribute["value"] 22 | value_hash = hashlib.sha256(value.encode()).hexdigest() 23 | big_hash += value_hash 24 | 25 | final_hash = hashlib.sha256(big_hash.encode()).hexdigest() 26 | all_hashes[filename] = final_hash 27 | 28 | duplicates = {} 29 | for key, value in all_hashes.items(): 30 | if value in duplicates: 31 | duplicates[value].append(key) 32 | else: 33 | duplicates[value] = [key] 34 | 35 | for key, value in duplicates.items(): 36 | if len(value) > 1: 37 | print("Duplicate hash found for files: " + str(value)) 38 | -------------------------------------------------------------------------------- /verify/verify_for_instareveal.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import hashlib 4 | 5 | def md5(file_path): 6 | hash_md5 = hashlib.md5() 7 | with open(file_path, "rb") as f: 8 | for chunk in iter(lambda: f.read(4096), b""): 9 | hash_md5.update(chunk) 10 | return hash_md5.hexdigest() 11 | 12 | def main(): 13 | image_folder = "images" 14 | json_folder = "json" 15 | duplicate_images = {} 16 | valid_json = True 17 | malformed_json = [] 18 | unique_attributes = {} 19 | duplicate_attributes = {} 20 | image_pattern_errors = [] 21 | image_name_errors = [] 22 | missing_image_errors = [] 23 | 24 | if not os.path.exists(image_folder) or not os.path.exists(json_folder): 25 | print("Critical Error: 'images' or 'json' folder not found.") 26 | return 27 | 28 | image_files = [f for f in os.listdir(image_folder) if os.path.isfile(os.path.join(image_folder, f))] 29 | json_files = [f for f in os.listdir(json_folder) if os.path.isfile(os.path.join(json_folder, f))] #and f.endswith('.json')] 30 | 31 | for image_file in image_files: 32 | file_path = os.path.join(image_folder, image_file) 33 | file_hash = md5(file_path) 34 | if file_hash in duplicate_images: 35 | duplicate_images[file_hash].append(image_file) 36 | else: 37 | duplicate_images[file_hash] = [image_file] 38 | 39 | for json_file in json_files: 40 | file_path = os.path.join(json_folder, json_file) 41 | if json_file == '.DS_Store': # Skip macOS .DS_Store files 42 | continue 43 | try: 44 | with open(file_path, "r", encoding="utf-8") as f: 45 | json_data = json.load(f) 46 | attributes = json_data.get("attributes") 47 | image = json_data.get("image") 48 | 49 | attributes_str = json.dumps(attributes, sort_keys=True) 50 | if attributes_str in unique_attributes: 51 | if attributes_str in duplicate_attributes: 52 | duplicate_attributes[attributes_str].append(json_file) 53 | else: 54 | duplicate_attributes[attributes_str] = [unique_attributes[attributes_str], json_file] 55 | else: 56 | unique_attributes[attributes_str] = json_file 57 | 58 | 59 | 60 | if image is not None: 61 | expected_image_name = json_file.rsplit(".", 1)[0] 62 | 63 | _, image_name_with_ext = os.path.split(image) 64 | image_name, _ = os.path.splitext(image_name_with_ext) 65 | if image_name != expected_image_name: 66 | image_name_errors.append(json_file) 67 | 68 | if not image.startswith("ipfs://REPLACEME/"): 69 | image_pattern_errors.append(json_file) 70 | 71 | expected_image_exts = ['.png', '.jpg', '.jpeg', '.gif'] 72 | image_found = False 73 | for ext in expected_image_exts: 74 | if f"{expected_image_name}{ext}" in image_files: 75 | image_found = True 76 | break 77 | if not image_found: 78 | missing_image_errors.append(json_file) 79 | 80 | except json.JSONDecodeError: 81 | valid_json = False 82 | malformed_json.append(json_file) 83 | 84 | 85 | for k, v in duplicate_images.items(): 86 | if len(v) > 1: 87 | print("Duplicate images:") 88 | print(", ".join(v)) 89 | if len(duplicate_images) == len(image_files): 90 | print("No duplicate images found.") 91 | 92 | if valid_json: 93 | print("All JSON files are valid.") 94 | else: 95 | print("Malformed JSON files:", ", ".join(malformed_json)) 96 | 97 | if len(image_files) == len(json_files): 98 | print("The number of files in each folder is the same.") 99 | else: 100 | print("Error: Different number of files in each folder.") 101 | 102 | if not image_name_errors: 103 | print("All JSON files have the correct filename in the image field.") 104 | else: 105 | print("JSON files with incorrect filename in the image field:", ", ".join(image_name_errors)) 106 | 107 | if not missing_image_errors: 108 | print("All JSON files have a matching image file.") 109 | else: 110 | print("JSON files with a missing corresponding image file:", ", ".join(missing_image_errors)) 111 | 112 | if not duplicate_attributes: 113 | print("All JSON files have unique attributes.") 114 | else: 115 | for k, v in duplicate_attributes.items(): 116 | print("Files with same attributes:", ", ".join(v)) 117 | 118 | if not image_pattern_errors: 119 | print("All JSON files have the correct image pattern.") 120 | else: 121 | print("JSON files with incorrect image pattern:", ", ".join(image_pattern_errors)) 122 | 123 | if valid_json and not image_name_errors and not duplicate_attributes and not image_pattern_errors and len(image_files) == len(json_files) and not missing_image_errors: 124 | print("All tests passed.") 125 | else: 126 | print("Some tests did not pass.") 127 | 128 | 129 | main() 130 | -------------------------------------------------------------------------------- /verify/verify_jsons_id_in_image.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | def main(): 5 | folder = "jsons" 6 | 7 | # Check if the folder exists 8 | if not os.path.exists(folder): 9 | print(f"Folder '{folder}' not found.") 10 | return 11 | 12 | # Loop through all files in the folder 13 | for filename in os.listdir(folder): 14 | # Check if the file has a .json extension 15 | #if filename.endswith(".json"): 16 | # Remove the .json extension to get the name 17 | name = os.path.splitext(filename)[0] 18 | 19 | # Load the JSON file 20 | with open(os.path.join(folder, filename), "r") as file: 21 | data = json.load(file) 22 | 23 | # Check if the name is in both the 'name' and 'image' fields 24 | if not (name in data.get("name", "") and name in data.get("image", "")): 25 | print(f"Issue with file: {filename}") 26 | 27 | if __name__ == "__main__": 28 | main() 29 | -------------------------------------------------------------------------------- /whitelist-snapshots/blacklist_checker.py: -------------------------------------------------------------------------------- 1 | with open("input.txt", "r") as input_file: 2 | input_lines = input_file.readlines() 3 | 4 | with open("blacklist.txt", "r") as blacklist_file: 5 | blacklist_lines = set(line.strip() for line in blacklist_file) 6 | 7 | whitelist_lines = [line for line in input_lines if line.strip() not in blacklist_lines] 8 | 9 | with open("whitelist.txt", "w") as whitelist_file: 10 | whitelist_file.writelines(whitelist_lines) -------------------------------------------------------------------------------- /whitelist-snapshots/combine_snapshots.py: -------------------------------------------------------------------------------- 1 | # Get snapshots: https://holders.at/ 2 | 3 | import os 4 | 5 | # Set the path to the snapshots folder 6 | folder_path = 'snapshots' 7 | 8 | # Initialize an empty list to store the contents of the .txt files 9 | contents = [] 10 | 11 | # Loop through the files in the snapshots folder 12 | for file in os.listdir(folder_path): 13 | # Check if the file is a .txt file 14 | if file.endswith('.txt'): 15 | # Open the file and read its contents 16 | with open(os.path.join(folder_path, file), 'r') as f: 17 | file_contents = f.read() 18 | # Add the contents of the file to the list 19 | contents.append(file_contents) 20 | 21 | # Combine the contents of the .txt files into a single string 22 | combined_contents = '\n'.join(contents) 23 | 24 | # Split the combined contents into a list of lines 25 | lines = combined_contents.split('\n') 26 | 27 | # Remove duplicate lines 28 | unique_lines = set(lines) 29 | 30 | # Save the unique lines to a new file 31 | with open('output.txt', 'w') as f: 32 | f.write('\n'.join(unique_lines)) 33 | -------------------------------------------------------------------------------- /whitelist-snapshots/combine_snapshots_common.py: -------------------------------------------------------------------------------- 1 | # Get snapshots: https://holders.at/ 2 | 3 | import os 4 | 5 | # Set the directory where the snapshots are located 6 | directory = "snapshots" 7 | 8 | # Create an empty dictionary to store the lines and their counts 9 | lines_count = dict() 10 | 11 | # Loop through all files in the directory 12 | for filename in os.listdir(directory): 13 | # Check if the file is a .txt file 14 | if filename.endswith(".txt"): 15 | # Open the file 16 | with open(os.path.join(directory, filename)) as file: 17 | # Read the lines of the file into a set 18 | lines = file.readlines() 19 | # Loop through the lines of the file 20 | for line in lines: 21 | line = line.strip() 22 | if line not in lines_count: 23 | lines_count[line] = 1 24 | else: 25 | lines_count[line] += 1 26 | 27 | # Write the common lines to the output file 28 | with open("output.txt", "w") as file: 29 | for line, count in lines_count.items(): 30 | if count == len(os.listdir(directory)): 31 | file.write(line + '\n') --------------------------------------------------------------------------------