├── Align Images ├── README.md └── imageAlignmentTool.py ├── Compare Folders ├── README.md └── compareFolders.py ├── Dataset Destroyer ├── README.md ├── config.ini ├── datasetDestroyer.py └── requirements.txt ├── De-dupe Images ├── ImageDeDupe.py └── README.md ├── Directory Tree Maker ├── README.md ├── directory_tree.py └── requirements.txt ├── Extract Video Frames ├── README.md └── VideoFrameExtract.py ├── Find Alpha Images ├── README.md └── findAlphaImages.py ├── Find Misaligned Images ├── README.md ├── findMisalignedImages.py └── requirements.txt ├── Hue Adjustment ├── README.md ├── hue_adjustment.py └── requirements.txt ├── ICC to sRGB ├── README.md ├── icc_to_srgb.py └── requirements.txt ├── Image Tiling ├── README.md ├── TileImages.py └── WTP Image Tiling │ ├── config.json │ ├── readme.md │ ├── requirements.txt │ ├── src │ ├── __init__.py │ ├── process.py │ └── tile_scripts │ │ ├── __init__.py │ │ ├── tiles_scripts.py │ │ └── utils.py │ └── tiling.py ├── LICENSE ├── Move Files ├── README.md ├── moveFiles.py └── requirements.txt ├── README.md ├── Re-Save Images ├── README.md └── resaveImages.py ├── Upscale Script ├── README.md ├── config.ini ├── requirements.txt └── upscale-script.py ├── Verify Images ├── README.md ├── requirements.txt └── verifyImages.py └── Video Frame Extractor ├── README.md └── vidpair.py /Align Images/README.md: -------------------------------------------------------------------------------- 1 | *Partially written by ChatGPT* 2 | 3 | **Use [ImgAlign](https://github.com/sonic41592/ImgAlign) instead. It supports all of the same features and more** 4 | 5 | This script handles basic image alignment. It looks at 2 folders, scans through and finds images with the same names, and performs projective transformations to align the image in the first folder to the image in the second folder. 6 | 7 | `imageAlignmentTool.py --folder1 FOLDER1 --folder2 FOLDER2 --output OUTPUT` 8 | 9 | **Features:** 10 | * Transparency support 11 | * Projective transformations using keypoints 12 | * Batch image handling 13 | 14 | **Required Packages** 15 | * opencv-python 16 | * numpy 17 | * argparse 18 | * time 19 | * logging 20 | -------------------------------------------------------------------------------- /Align Images/imageAlignmentTool.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | import logging 5 | import argparse 6 | import time 7 | 8 | # Set up logging 9 | logging.basicConfig(level=logging.INFO) 10 | 11 | # Prompt the user for the file paths and the alignment mode 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument("--folder1", "-f1", type=str, required=True, help="Path to the first folder") 14 | parser.add_argument("--folder2", "-f2", type=str, required=True, help="Path to the second folder") 15 | parser.add_argument("--output", "-o", type=str, required=True, help="Path to the output folder") 16 | 17 | # Parse arguments 18 | args = parser.parse_args() 19 | folder1_path = args.folder1 20 | folder2_path = args.folder2 21 | output_path = args.output 22 | 23 | # Normalize the file paths to parse backslashes 24 | folder1_path = os.path.normpath(folder1_path) 25 | folder2_path = os.path.normpath(folder2_path) 26 | output_path = os.path.normpath(output_path) 27 | 28 | # Get the list of images in both folders 29 | folder1_images = os.listdir(folder1_path) 30 | folder2_images = os.listdir(folder2_path) 31 | 32 | 33 | def align_image(image1, image2): 34 | # Get the size of the first image 35 | size = (image1.shape[1], image1.shape[0]) 36 | 37 | # Resize the second image to the same size as the first image 38 | image2 = cv2.resize(image2, size) 39 | 40 | # Convert the images to grayscale 41 | try: 42 | image1_gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY) 43 | image2_gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) 44 | except Exception as e: 45 | logging.error("Error converting images to grayscale: %s", e) 46 | return 47 | 48 | # Find the keypoints and descriptors for both images 49 | # Use SIFT for keypoint detection and description 50 | try: 51 | sift = cv2.SIFT_create() 52 | keypoints1, descriptors1 = sift.detectAndCompute(image1_gray, None) 53 | keypoints2, descriptors2 = sift.detectAndCompute(image2_gray, None) 54 | except Exception as e: 55 | logging.error("Error finding keypoints and descriptors: %s", e) 56 | return 57 | # Used to debug 58 | #print(f'Number of keypoints in image1: {len(keypoints1)}') 59 | #print(f'Number of keypoints in image2: {len(keypoints2)}') 60 | 61 | # Match the keypoints 62 | # Use FLANN for keypoint matching 63 | try: 64 | FLANN_INDEX_KDTREE = 1 65 | index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) 66 | search_params = dict(checks=50) 67 | flann = cv2.FlannBasedMatcher(index_params, search_params) 68 | matches = [m[0] for m in flann.knnMatch(descriptors1, descriptors2, k=2)] 69 | except Exception as e: 70 | logging.error("Error matching keypoints: %s", e) 71 | return 72 | 73 | # Sort the matches by distance 74 | matches = sorted(matches, key=lambda x: x.distance) 75 | 76 | # Estimate the transformation using the top n matches 77 | n = 20 # Number of top matches to use 78 | src_points = np.float32([keypoints1[m.queryIdx].pt for m in matches[:n]]).reshape(-1, 1, 2) 79 | dst_points = np.float32([keypoints2[m.trainIdx].pt for m in matches[:n]]).reshape(-1, 1, 2) 80 | M, mask = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0) 81 | 82 | # Print the homography matrix - Used to debug 83 | #print(f'Homography Matrix: \n{M}') 84 | 85 | # Warp the image 86 | h, w = image1.shape[:2] 87 | aligned_image = cv2.warpPerspective(image1, M, (w, h)) 88 | 89 | # Draw keypoints on image1 90 | image1_keypoints = cv2.drawKeypoints(aligned_image, keypoints1, outImage=np.array([]), color=(0, 0, 255), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) 91 | 92 | # Draw keypoints on image2 93 | image2_keypoints = cv2.drawKeypoints(image2, keypoints2, outImage=np.array([]), color=(0, 0, 255), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) 94 | 95 | # Save the images with keypoints drawn on them to file 96 | cv2.imwrite("image1_keypoints.png", image1_keypoints) 97 | cv2.imwrite("image2_keypoints.png", image2_keypoints) 98 | 99 | return aligned_image 100 | 101 | # Iterate through the files in both folders 102 | for file in folder1_images: 103 | # Check if the file exists in both folders 104 | if file in folder2_images: 105 | # If the file exists in both folders, align the image in folder1 with the image in folder2 106 | start_time = time.time() 107 | image1 = cv2.imread(os.path.join(folder1_path, file), cv2.IMREAD_UNCHANGED) 108 | image2 = cv2.imread(os.path.join(folder2_path, file), cv2.IMREAD_UNCHANGED) 109 | aligned_image = align_image(image1, image2) 110 | elapsed_time = time.time() - start_time 111 | 112 | # Save Files 113 | output_file = os.path.join(output_path, file) 114 | cv2.imwrite(output_file, aligned_image) 115 | 116 | # Print time taken per image 117 | print(f'Processed {file} in {elapsed_time:.2f} seconds') 118 | 119 | print('Done!') 120 | -------------------------------------------------------------------------------- /Compare Folders/README.md: -------------------------------------------------------------------------------- 1 | This script allows you to quickly compare the files in a given set of folders. The files must have the same names to be compared. This can be useful when making datasets 2 | 3 | **Required Packages:** 4 | 5 | * pillow 6 | 7 | How to use: `python compareFolders.py /path/to/folder` 8 | 9 | This is an experimental script. I've tested it lightly, but use at your own risk 10 | -------------------------------------------------------------------------------- /Compare Folders/compareFolders.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | def compare_folders(folder1, folder2): 5 | # Get the list of files in both folders 6 | files1 = os.listdir(folder1) 7 | files2 = os.listdir(folder2) 8 | 9 | # Find missing files in folder1 10 | missing_in_folder1 = [f for f in files2 if f not in files1] 11 | 12 | # Find missing files in folder2 13 | missing_in_folder2 = [f for f in files1 if f not in files2] 14 | 15 | if missing_in_folder1: 16 | print(f"Files missing in {folder1}:") 17 | for file in missing_in_folder1: 18 | print(file) 19 | print() 20 | 21 | if missing_in_folder2: 22 | print(f"Files missing in {folder2}:") 23 | for file in missing_in_folder2: 24 | print(file) 25 | print() 26 | 27 | if not missing_in_folder1 and not missing_in_folder2: 28 | print("Both folders contain the same files.") 29 | 30 | if __name__ == "__main__": 31 | if len(sys.argv) != 3: 32 | print("Usage: python script.py ") 33 | sys.exit(1) 34 | 35 | folder1 = sys.argv[1] 36 | folder2 = sys.argv[2] 37 | 38 | if not os.path.isdir(folder1): 39 | print(f"{folder1} is not a valid directory.") 40 | sys.exit(1) 41 | 42 | if not os.path.isdir(folder2): 43 | print(f"{folder2} is not a valid directory.") 44 | sys.exit(1) 45 | 46 | compare_folders(folder1, folder2) -------------------------------------------------------------------------------- /Dataset Destroyer/README.md: -------------------------------------------------------------------------------- 1 | 2 | This script's main purpose is to generate datasets for your image models. 3 | 4 | Note: Avoid running all degradations at once in combination with ffmpeg options (mpeg, mpeg2, h264, hevc, vp9). It will likely cause errors 5 | 6 | Main features: 7 | - Adjustable degradations 8 | - Supports: Blur, noise, compression, scaling, quantization, and unsharp mask 9 | - Adjustable strengths and order for every degradation, with a randomization option 10 | - Probability control for each degradation 11 | - Video compression support through ffmpeg-python 12 | - Progress bar 13 | 14 | 15 |
Supported filters: 16 | 17 | - Blur 18 | - Average 19 | - Gaussian 20 | - Isotropic 21 | - Anisotropic 22 | 23 | - Noise 24 | - Uniform 25 | - Gaussian 26 | - Color 27 | - Gray 28 | - ISO 29 | - Salt and Pepper 30 | 31 | - Quantization 32 | - Floyd-Steinberg 33 | - Jarvis-Judice-Ninke 34 | - Stucki 35 | - Atkinson 36 | - Burkes 37 | - Sierra 38 | - Two-Row Sierra 39 | - Sierra Lite 40 | 41 | - Compression 42 | - H264 43 | - HEVC 44 | - VP9 45 | - MPEG 46 | - MPEG-2 47 | - JPEG 48 | - WEBP 49 | 50 | - Scale 51 | - down_up 52 | - Box 53 | - Linear 54 | - Cubic_Catrom 55 | - Cubic_Mitchell 56 | - Cubic_BSpline 57 | - Lanczos 58 | - Gauss 59 | 60 | - Unsharp Mask 61 | 62 | - Chroma Blur 63 | 64 |
65 | 66 | Usage: 67 | - Download the script and the config.ini file 68 | - Edit config.ini to your liking. Make sure to add file paths! Comments within the file describe each function 69 | - Run the .py file with python 70 | - Enjoy! 71 | 72 | __You may want to consider using [wtp_dataset_destroyer](https://github.com/umzi2/wtp_dataset_destroyer) instead. It builds on the concepts used in this original version and makes it a lot easier to use.__ 73 | -------------------------------------------------------------------------------- /Dataset Destroyer/config.ini: -------------------------------------------------------------------------------- 1 | [main] 2 | input_folder = path\\to\\input 3 | output_folder = path\\to\\output 4 | # Output format for processed images (e.g., png, jpg) 5 | #!! If you save in a lossy format here, your images will be compressed again on top of the compression settings below !! 6 | output_format = png 7 | # List of degradations to apply to images in specified order (e.g., blur,noise,compression,scale,quantization,unsharp_mask,chroma) 8 | degradations = blur,noise,compression,scale 9 | # Whether to randomize the order of degradations (True or False) 10 | randomize = True 11 | # Whether to print the degradations applied onto the image. Useful for testing. 12 | print = False 13 | # Wheter to print the degradations applied into a separate text file. Useful for dataset statistics. 14 | textfile = False 15 | # The path where the applied_degradations.txt text file will be generated (or appended to if exists), use if textfile = True 16 | textfile_path = path\\to\\output 17 | 18 | # Controls likelihood of a degradation being applied. 0-1 19 | [likelihood] 20 | blur = 1 21 | noise = 1 22 | compression = 1 23 | scale = 1 24 | quantization = 1 25 | unsharp_mask = 1 26 | chroma = 1 27 | 28 | # Blur settings 29 | [blur] 30 | # List of available blur algorithms (e.g., average,gaussian,anisotropic) 31 | algorithms = average,gaussian,anisotropic 32 | # Whether to choose a random blur algorithm each time (True or False) 33 | randomize = True 34 | # Range of values for blur kernel size or standard deviation (e.g., 1,10) 35 | range = 1,16 36 | #Adjusts the scaling of the blur range. For average and gaussian, this will add 1 when the new value is even 37 | scale_factor = 0.25 38 | 39 | # Noise settings 40 | [noise] 41 | # List of available noise algorithms (e.g., uniform,gaussian,color,gray,salt-and-pepper) 42 | algorithms = uniform,gaussian,color,gray,salt-and-pepper 43 | # Whether to choose a random noise algorithm each time (True or False) 44 | randomize = True 45 | # Range of values for noise intensity (e.g., 0,50) !!Do not go below 0!! 46 | range = 0,5 47 | # Adjusts the scaling of the noise range 48 | scale_factor = 0.05 49 | # Range of values for salt and pepper noise intensity (e.g., 0,50) !!Do not go below 0!! 50 | sp_range = 1,5 51 | # Adjusts the scaling of the salt and pepper noise range 52 | sp_scale_factor = 0.02 53 | 54 | # Configuration for quantization degradation 55 | [quantization] 56 | # List of available quantization algorithms (e.g., floyd_steinberg, jarvis_judice_ninke, stucki, atkinson, burkes, sierra, two_row_sierra, sierra_lite) 57 | algorithms = floyd_steinberg,jarvis_judice_ninke,stucki,atkinson,burkes,sierra,two_row_sierra,sierra_lite 58 | # Whether to choose a random quantization algorithm each time (True or False) 59 | randomize = True 60 | # Range of values for quantization levels (e.g., 2, 255) 61 | range = 2, 255 62 | 63 | # Chroma degradation settings 64 | [chroma] 65 | # List of available blur algorithms (Only Gaussian) 66 | algorithms = gaussian 67 | # Whether to choose a random blur algorithm each time (True or False) 68 | randomize = True 69 | # Horizontal Blur range 70 | horizontal_range = 32,32 71 | # Vertical Blur range 72 | vertical_range = 1,2 73 | # Adjusts the scaling of the blur range 74 | scale_factor = 1 75 | 76 | # Compression settings 77 | [compression] 78 | # List of available compression algorithms (e.g., mpeg,mpeg2,h264,hevc,jpeg,webp,vp9) 79 | # Using more intensive codecs (such as vp9) in combination with other degradations may result in ffmpeg errors 80 | algorithms = mpeg,mpeg2,h264,hevc,jpeg,webp,vp9 81 | # Whether to choose a random algorithm from the list 82 | randomize = True 83 | # JPEG Quality Levels 84 | jpeg_quality_range = 40, 95 85 | # WebP Quality levels 86 | webp_quality_range = 45, 90 87 | # H.264 video quality levels in CRF format 88 | h264_crf_level_range = 23,32 89 | # HEVC video quality levels in CRF format 90 | hevc_crf_level_range = 25,34 91 | # VP9 video quality levels in CRF format 92 | vp9_crf_level_range = 25,35 93 | # Quality control for MPEG codec. Range 1-31 94 | mpeg_qscale_range = 2,15 95 | # Quality control for MPEG2 codec. Range 1-31 96 | mpeg2_qscale_range = 2,15 97 | 98 | # Scale settings 99 | [scale] 100 | # List of available scale algorithms (e.g., down_up,linear,cubic_catrom,cubic_mitchell,cubic_bspline,lanczos,gauss,hamming,hann,box,hermite,lagrange) 101 | algorithms = down_up,linear,cubic_mitchell,lanczos,gauss,box 102 | # List of available scale algorithms when applying down_up 103 | down_up_algorithms = linear,cubic_mitchell,lanczos,gauss,box 104 | # Whether to choose a random scale algorithm each time (True or False) 105 | randomize = True 106 | # Factor to scale your images to (e.g., 0.25, 0.50, 0.75) (0.25 = 25%, 0.50 = 50%) 107 | size_factor = 0.25 108 | # Range of values for down_up (e.g., 0.5,2.0) (0.5 = 50%, 2.0 = 200%) 109 | range = 0.75,1.5 110 | 111 | # Unsharp mask settings 112 | [unsharp_mask] 113 | # radius_range: Range for the Gaussian blur radius in pixels. Larger values result in less detail. 114 | radius_range = 0.2, 0.5 115 | # percent_range: Range for the amount of sharpening. Higher values result in more sharpening. 116 | percent_range = 3, 15 117 | # threshold_range: Range for the contrast threshold. Only details with contrast above this are enhanced. 118 | threshold_range = 1, 3 119 | -------------------------------------------------------------------------------- /Dataset Destroyer/datasetDestroyer.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import os 3 | import cv2 4 | import numpy as np 5 | import ffmpeg 6 | from random import random, randint, choice, shuffle, uniform 7 | import concurrent.futures 8 | from tqdm import tqdm 9 | from PIL import Image 10 | from chainner_ext import DiffusionAlgorithm, UniformQuantization, error_diffusion_dither, resize, ResizeFilter 11 | 12 | # Logging 13 | import logging 14 | logging.basicConfig(level=logging.DEBUG) 15 | 16 | # Read config file 17 | config = configparser.ConfigParser() 18 | config.read('config.ini') 19 | 20 | # Get config values 21 | input_folder = config.get('main', 'input_folder') 22 | output_folder = config.get('main', 'output_folder') 23 | output_format = config.get('main', 'output_format') 24 | degradations = config.get('main', 'degradations').split(',') 25 | degradations_randomize = config.getboolean('main', 'randomize') 26 | blur_algorithms = config.get('blur', 'algorithms').split(',') 27 | blur_randomize = config.getboolean('blur', 'randomize') 28 | blur_range = tuple(map(int, config.get('blur', 'range').split(','))) 29 | blur_scale_factor = config.getfloat('blur', 'scale_factor') 30 | noise_algorithms = config.get('noise', 'algorithms').split(',') 31 | noise_randomize = config.getboolean('noise', 'randomize') 32 | noise_range = tuple(map(int, config.get('noise', 'range').split(','))) 33 | noise_scale_factor = config.getfloat('noise', 'scale_factor') 34 | sp_noise_range = tuple(map(int, config.get('noise', 'sp_range').split(','))) 35 | sp_noise_scale_factor = config.getfloat('noise', 'sp_scale_factor') 36 | compression_algorithms = config.get('compression', 'algorithms').split(',') 37 | compression_randomize = config.getboolean('compression', 'randomize') 38 | chroma_blur_algorithms = config.get('chroma', 'algorithms').split(',') 39 | chroma_blur_randomize = config.getboolean('chroma', 'randomize') 40 | chroma_horizontal_blur_range = tuple(map(int, config.get('chroma', 'horizontal_range').split(','))) 41 | chroma_vertical_blur_range = tuple(map(int, config.get('chroma', 'vertical_range').split(','))) 42 | chroma_blur_scale_factor = config.getfloat('chroma', 'scale_factor') 43 | jpeg_quality_range = tuple(map(int, config.get('compression', 'jpeg_quality_range').split(','))) 44 | webp_quality_range = tuple(map(int, config.get('compression', 'webp_quality_range').split(','))) 45 | h264_crf_level_range = tuple(map(int, config.get('compression', 'h264_crf_level_range').split(','))) 46 | hevc_crf_level_range = tuple(map(int, config.get('compression', 'hevc_crf_level_range').split(','))) 47 | vp9_crf_level_range = tuple(int(x) for x in config.get('compression', 'vp9_crf_level_range').split(',')) 48 | mpeg_qscale_range = tuple(map(int, config.get('compression', 'mpeg_qscale_range').split(','))) 49 | mpeg2_qscale_range = tuple(map(int, config.get('compression', 'mpeg2_qscale_range').split(','))) 50 | size_factor = config.getfloat('scale', 'size_factor') 51 | scale_algorithms = config.get('scale', 'algorithms').split(',') 52 | down_up_scale_algorithms = config.get('scale', 'down_up_algorithms').split(',') 53 | scale_randomize = config.getboolean('scale', 'randomize') 54 | scale_range = tuple(map(float, config.get('scale', 'range').split(','))) 55 | unsharp_mask_radius_range = tuple(map(float, config.get('unsharp_mask', 'radius_range').split(','))) 56 | unsharp_mask_percent_range = tuple(map(float, config.get('unsharp_mask', 'percent_range').split(','))) 57 | unsharp_mask_threshold_range = tuple(map(int, config.get('unsharp_mask', 'threshold_range').split(','))) 58 | print_to_image = config.getboolean('main', 'print') 59 | print_to_textfile = config.getboolean('main', 'textfile') 60 | path_to_textfile = config.get('main', 'textfile_path') 61 | 62 | # Add config values for quantization 63 | quantization_algorithms = config.get('quantization', 'algorithms').split(',') 64 | quantization_randomize = config.getboolean('quantization', 'randomize') 65 | quantization_range = tuple(map(int, config.get('quantization', 'range').split(','))) 66 | 67 | # Add new config values for likelihood of each degradation 68 | blur_likelihood = config.getfloat('likelihood', 'blur', fallback=0.3) 69 | noise_likelihood = config.getfloat('likelihood', 'noise', fallback=0.3) 70 | compression_likelihood = config.getfloat('likelihood', 'compression', fallback=0.2) 71 | scale_likelihood = config.getfloat('likelihood', 'scale', fallback=0.1) 72 | quantization_likelihood = config.getfloat('likelihood', 'quantization', fallback=0.1) 73 | unsharp_mask_likelihood = config.getfloat('likelihood', 'unsharp_mask', fallback=0.1) 74 | chroma_likelihood = config.getfloat('likelihood', 'chroma', fallback=0.3) 75 | 76 | def print_text_to_image(image, text, order): 77 | h, w = image.shape[:2] 78 | # More robust font scaling 79 | font_scale = min(w, h) / 1000 # Normalized scaling 80 | font_thickness = max(1, int(font_scale * 2)) 81 | 82 | # Break long text into multiple lines 83 | max_line_length = 40 # Adjust this value as needed 84 | lines = [] 85 | while len(text) > max_line_length: 86 | # Find the last space before max_line_length 87 | split_index = text[:max_line_length].rfind(' ') 88 | if split_index == -1: 89 | split_index = max_line_length 90 | lines.append(text[:split_index]) 91 | text = text[split_index:].strip() 92 | lines.append(text) 93 | 94 | # Use red in BGR color space 95 | color = (0, 0, 255) # Red in BGR 96 | 97 | # Calculate text size to adjust vertical positioning 98 | text_sizes = [cv2.getTextSize(line, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)[0] for line in lines] 99 | text_heights = [size[1] for size in text_sizes] 100 | 101 | x = 10 102 | y = int(order * text_heights[0] * 1.5) + 10 103 | 104 | # Draw each line of text 105 | for i, line in enumerate(lines): 106 | current_y = y + i * int(text_heights[0] * 1.5) 107 | cv2.putText(image, f"{order}. {line}" if i == 0 else line, 108 | (x, current_y), 109 | cv2.FONT_HERSHEY_SIMPLEX, font_scale, 110 | color, font_thickness, cv2.LINE_AA) 111 | 112 | return image 113 | 114 | # Append given text as a new line at the end of file (if file not exists it creates and inserts line, otherwise it just appends newline) 115 | def print_text_to_textfile(file_name, text_to_append): 116 | # Open the file in append & read mode ('a+') 117 | with open(file_name, "a+") as file_object: 118 | # Move read cursor to the start of file. 119 | file_object.seek(0) 120 | # If file is not empty then append '\n' 121 | data = file_object.read(100) 122 | if len(data) > 0: 123 | file_object.write("\n") 124 | # Append text at the end of file 125 | file_object.write(text_to_append) 126 | 127 | def apply_blur(image): 128 | text = '' 129 | # Choose blur algorithm 130 | if blur_randomize: 131 | algorithm = choice(blur_algorithms) 132 | else: 133 | algorithm = blur_algorithms[0] 134 | 135 | # Normalize the image to the range [0, 1] 136 | image = image.astype(float) / 255 137 | 138 | # Apply blur with chosen algorithm 139 | if algorithm == 'average': 140 | ksize = randint(*blur_range) 141 | ksize = int(ksize * blur_scale_factor) # Scale down ksize by blur_scale_factor 142 | ksize = ksize if ksize % 2 == 1 else ksize + 1 # Ensure ksize is an odd integer 143 | image = cv2.blur(image, (ksize, ksize)) 144 | text = f"{algorithm} ksize={ksize}" 145 | elif algorithm == 'gaussian': 146 | ksize = randint(*blur_range) | 1 147 | ksize = int(ksize * blur_scale_factor) # Scale down ksize by blur_scale_factor 148 | ksize = ksize if ksize % 2 == 1 else ksize + 1 # Ensure ksize is an odd integer 149 | image = cv2.GaussianBlur(image, (ksize, ksize), 0) 150 | text = f"{algorithm} ksize={ksize}" 151 | elif algorithm == 'anisotropic': 152 | # Apply anisotropic blur using a Gaussian filter with different standard deviations in the x and y directions 153 | sigma_x = randint(*blur_range) 154 | sigma_y = randint(*blur_range) 155 | angle = uniform(0, 360) 156 | 157 | # Scale down sigma by blur_scale_factor 158 | sigma_x *= blur_scale_factor 159 | sigma_y *= blur_scale_factor 160 | 161 | # Convert angle to radians 162 | angle = np.deg2rad(angle) 163 | 164 | # Create a 2D Gaussian kernel with the desired direction 165 | kernel_size = max(2 * int(4 * max(sigma_x, sigma_y) + 0.5) + 1, 3) 166 | y, x = np.mgrid[-kernel_size//2 + 1:kernel_size//2 + 1, -kernel_size//2 + 1:kernel_size//2 + 1] 167 | rotx = x * np.cos(angle) - y * np.sin(angle) # Rotate x by the angle 168 | roty = x * np.sin(angle) + y * np.cos(angle) # Rotate y by the angle 169 | kernel = np.exp(-(rotx**2/(2*sigma_x**2) + roty**2/(2*sigma_y**2))) 170 | 171 | # Normalize the kernel 172 | kernel /= np.sum(kernel) 173 | 174 | # Apply the kernel to the image 175 | image = cv2.filter2D(image, -1, kernel) 176 | 177 | text = f"{algorithm} sigma_x={sigma_x} sigma_y={sigma_y} angle={np.rad2deg(angle)}" 178 | 179 | # Scale the image back to the range [0, 255] 180 | image = (image * 255).astype(np.uint8) 181 | 182 | return image, text 183 | 184 | def apply_noise(image): 185 | # Normalize the image to the range [0, 1] 186 | image = image.astype(float) / 255 187 | 188 | text = '' 189 | # Choose noise algorithm 190 | if noise_randomize: 191 | algorithm = choice(noise_algorithms) 192 | else: 193 | algorithm = noise_algorithms[0] 194 | 195 | # Apply noise with chosen algorithm 196 | if algorithm == 'uniform': 197 | intensity = randint(*noise_range) 198 | intensity *= noise_scale_factor # Scale down intensity by noise_scale_factor 199 | noise = np.random.uniform(-intensity, intensity, image.shape) 200 | image += noise 201 | text = f"{algorithm} intensity={intensity}" 202 | 203 | elif algorithm == 'gaussian': 204 | mean = 0 205 | var = randint(*noise_range) 206 | var *= noise_scale_factor # Scale down variance by noise_scale_factor 207 | sigma = var**0.5 208 | noise = np.random.normal(mean, sigma, image.shape) 209 | image += noise 210 | text = f"{algorithm} variance={var}" 211 | 212 | elif algorithm == 'color': 213 | noise = np.zeros_like(image) 214 | m = (0, 0, 0) 215 | s = (randint(*noise_range) * noise_scale_factor, randint(*noise_range) * noise_scale_factor, randint(*noise_range) * noise_scale_factor) 216 | cv2.randn(noise, m, s) 217 | image += noise 218 | text = f"{algorithm} s={s}" 219 | 220 | elif algorithm == 'gray': 221 | gray_noise = np.zeros((image.shape[0], image.shape[1])) 222 | m = (0,) 223 | s = (randint(*noise_range) * noise_scale_factor,) 224 | cv2.randn(gray_noise, m, s) 225 | for i in range(image.shape[2]): # Add noise to each channel separately 226 | image[..., i] += gray_noise 227 | text = f"{algorithm} s={s}" 228 | 229 | elif algorithm == 'salt-and-pepper': 230 | # Salt-and-pepper noise 231 | intensity = randint(*sp_noise_range) 232 | intensity *= sp_noise_scale_factor # Scale down intensity by sp_noise_scale_factor 233 | 234 | # Pepper mode 235 | num_pepper = np.ceil(intensity * image.size * 0.25) # Reduced to 25% of the image size 236 | x_pepper = np.random.randint(0, image.shape[1], int(num_pepper)) 237 | y_pepper = np.random.randint(0, image.shape[0], int(num_pepper)) 238 | image[y_pepper, x_pepper] = 0 239 | 240 | # Salt mode 241 | num_salt = np.ceil(intensity * image.size * 0.5) 242 | x_salt = np.random.randint(0, image.shape[1], int(num_salt)) 243 | y_salt = np.random.randint(0, image.shape[0], int(num_salt)) 244 | image[y_salt, x_salt] = 1 245 | text = f"{algorithm} intensity={intensity}" 246 | 247 | # Clip the values to the range [0, 1] and scale back to the range [0, 255] 248 | image = np.clip(image, 0, 1) 249 | image = (image * 255).astype(np.uint8) 250 | 251 | return image, text 252 | 253 | def apply_chroma(image): 254 | assert len(image.shape) == 3, "Input image must have 3 dimensions (height, width, channels)" 255 | 256 | text = '' 257 | 258 | # Choose chroma blur algorithm 259 | if chroma_blur_randomize: 260 | algorithm = choice(chroma_blur_algorithms) 261 | else: 262 | algorithm = chroma_blur_algorithms[0] 263 | 264 | # Convert RGB to YUV, split YUV channels 265 | yuv_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) 266 | Y, U, V = cv2.split(yuv_image) 267 | 268 | if algorithm == 'gaussian': 269 | # Generate random kernel sizes within specified horizontal and vertical ranges 270 | horizontal_ksize = randint(*chroma_horizontal_blur_range) 271 | vertical_ksize = randint(*chroma_vertical_blur_range) 272 | 273 | # Ensure kernel sizes are odd integers 274 | horizontal_ksize = horizontal_ksize if horizontal_ksize % 2 == 1 else horizontal_ksize + 1 275 | vertical_ksize = vertical_ksize if vertical_ksize % 2 == 1 else vertical_ksize + 1 276 | 277 | # Blur U and V channels (chroma channels) with Gaussian blur 278 | blurred_U = cv2.GaussianBlur(U, (horizontal_ksize, vertical_ksize), 0) 279 | blurred_V = cv2.GaussianBlur(V, (horizontal_ksize, vertical_ksize), 0) 280 | 281 | # Merge blurred U and V channels with original Y channel 282 | blurred_yuv_image = cv2.merge([Y, blurred_U, blurred_V]) 283 | 284 | # Convert back to RGB 285 | image = cv2.cvtColor(blurred_yuv_image, cv2.COLOR_YUV2RGB) 286 | 287 | text = f"{algorithm} horizontal_ksize={horizontal_ksize} vertical_ksize={vertical_ksize}" 288 | 289 | return image, text 290 | 291 | def apply_quantization(image): 292 | # Assert that the input image has 3 dimensions 293 | assert len(image.shape) == 3, "Input image must have 3 dimensions (height, width, channels)" 294 | 295 | text = '' 296 | # Choose quantization algorithm 297 | if quantization_randomize: 298 | algorithm = choice(quantization_algorithms) 299 | else: 300 | algorithm = quantization_algorithms[0] 301 | 302 | # Map string algorithm names to DiffusionAlgorithm enum values 303 | algorithm_mapping = { 304 | 'floyd_steinberg': DiffusionAlgorithm.FloydSteinberg, 305 | 'jarvis_judice_ninke': DiffusionAlgorithm.JarvisJudiceNinke, 306 | 'stucki': DiffusionAlgorithm.Stucki, 307 | 'atkinson': DiffusionAlgorithm.Atkinson, 308 | 'burkes': DiffusionAlgorithm.Burkes, 309 | 'sierra': DiffusionAlgorithm.Sierra, 310 | 'two_row_sierra': DiffusionAlgorithm.TwoRowSierra, 311 | 'sierra_lite': DiffusionAlgorithm.SierraLite, 312 | } 313 | 314 | # Apply quantization with chosen algorithm 315 | if algorithm in algorithm_mapping: 316 | colors_per_channel = randint(*quantization_range) 317 | quant = UniformQuantization(colors_per_channel=colors_per_channel) 318 | image_np = np.array(image).astype(np.float32) / 255.0 319 | 320 | # Apply the chosen dithering algorithm to each color channel separately 321 | for i in range(image_np.shape[2]): 322 | dithered_channel = error_diffusion_dither(image_np[..., i], quant, algorithm_mapping[algorithm]) 323 | # Reshape the output to (height, width) if necessary 324 | if len(dithered_channel.shape) == 3: 325 | dithered_channel = dithered_channel.squeeze(-1) 326 | image_np[..., i] = dithered_channel 327 | 328 | # Convert the numpy array back to an image 329 | dithered_image_np = np.round(image_np * 255).astype(np.uint8) # Round before converting to uint8 330 | image = Image.fromarray(dithered_image_np) 331 | 332 | text = f"{algorithm} colors_per_channel={colors_per_channel}" 333 | else: 334 | raise ValueError(f"Unsupported quantization algorithm: {algorithm}") 335 | 336 | # Convert the image back to a numpy array before returning 337 | image = np.array(image) 338 | 339 | return image, text 340 | 341 | def apply_unsharp_mask(image, config): 342 | text = '' 343 | # Choose unsharp mask parameters 344 | radius = np.random.uniform(unsharp_mask_radius_range[0], unsharp_mask_radius_range[1]) 345 | percent = np.random.uniform(unsharp_mask_percent_range[0], unsharp_mask_percent_range[1]) 346 | threshold = np.random.randint(unsharp_mask_threshold_range[0], unsharp_mask_threshold_range[1]) 347 | 348 | # Apply unsharp mask with chosen parameters 349 | blurred = cv2.GaussianBlur(image, (0, 0), radius) 350 | sharpened = cv2.addWeighted(image, 1.0 + percent, blurred, -percent, threshold) 351 | image = np.clip(sharpened, 0, 255).astype(np.uint8) # Clip values to 8-bit range 352 | 353 | text = f"unsharp_mask radius={radius} percent={percent} threshold={threshold}" 354 | 355 | return image, text 356 | 357 | def apply_compression(image): 358 | text = '' 359 | # Choose compression algorithm 360 | if compression_randomize: 361 | algorithm = choice(compression_algorithms) 362 | else: 363 | algorithm = compression_algorithms[0] 364 | 365 | # Apply compression with chosen algorithm 366 | if algorithm == 'jpeg': 367 | quality = randint(*jpeg_quality_range) 368 | encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality] 369 | result, encimg = cv2.imencode('.jpg', image, encode_param) 370 | image = cv2.imdecode(encimg, 1).copy() 371 | text = f"{algorithm} quality={quality}" 372 | 373 | elif algorithm == 'webp': 374 | quality = randint(*webp_quality_range) 375 | encode_param = [int(cv2.IMWRITE_WEBP_QUALITY), quality] 376 | result, encimg = cv2.imencode('.webp', image, encode_param) 377 | image = cv2.imdecode(encimg, 1).copy() 378 | text = f"{algorithm} quality={quality}" 379 | 380 | elif algorithm in ['h264', 'hevc', 'mpeg', 'mpeg2', 'vp9']: 381 | # Convert image to video format 382 | height, width, _ = image.shape 383 | codec = algorithm 384 | container = 'mpeg' 385 | input_args = {} 386 | 387 | if algorithm == 'mpeg': 388 | codec = 'mpeg1video' 389 | # Add required parameters for MPEG-1 390 | input_args = {'framerate': '25'} # MPEG-1 needs a framerate 391 | output_args = { 392 | 'qscale:v': str(randint(*mpeg_qscale_range)), 393 | 'g': '1', # One keyframe per frame 394 | 'bf': '0' # No B-frames 395 | } 396 | 397 | elif algorithm == 'mpeg2': 398 | codec = 'mpeg2video' 399 | input_args = {'framerate': '25'} 400 | output_args = { 401 | 'qscale:v': str(randint(*mpeg2_qscale_range)), 402 | 'g': '1', 403 | 'bf': '0' 404 | } 405 | 406 | elif algorithm == 'h264': 407 | crf_level = randint(*h264_crf_level_range) 408 | output_args = {'crf': crf_level} 409 | 410 | elif algorithm == 'hevc': 411 | crf_level = randint(*hevc_crf_level_range) 412 | output_args = {'crf': crf_level, 'x265-params': 'log-level=0'} 413 | 414 | elif algorithm == 'vp9': 415 | codec = 'libvpx-vp9' 416 | container = 'webm' 417 | crf_level = randint(*vp9_crf_level_range) 418 | output_args = {'crf': str(crf_level), 'b:v': '0', 'cpu-used': '5'} 419 | 420 | process1 = None 421 | process2 = None 422 | 423 | try: 424 | # Encode and decode using ffmpeg 425 | process1 = ( 426 | ffmpeg 427 | .input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{width}x{height}', **input_args) 428 | .output('pipe:', format=container, vcodec=codec, **output_args) 429 | .global_args('-loglevel', 'fatal') 430 | .global_args('-max_muxing_queue_size', '300000') 431 | .run_async(pipe_stdin=True, pipe_stdout=True) 432 | ) 433 | 434 | # Write input image 435 | process1.stdin.write(image.tobytes()) 436 | process1.stdin.close() 437 | 438 | # Read compressed output 439 | compressed_output = process1.stdout.read() 440 | process1.wait(timeout=10) # 10 second timeout 441 | 442 | if process1.returncode != 0: 443 | raise RuntimeError(f"FFmpeg encoding failed with return code {process1.returncode}") 444 | 445 | process2 = ( 446 | ffmpeg 447 | .input('pipe:', format=container) 448 | .output('pipe:', format='rawvideo', pix_fmt='bgr24') 449 | .global_args('-loglevel', 'fatal') 450 | .run_async(pipe_stdin=True, pipe_stdout=True) 451 | ) 452 | 453 | # Write compressed data 454 | process2.stdin.write(compressed_output) 455 | process2.stdin.close() 456 | 457 | # Read all output data 458 | out = process2.stdout.read() 459 | process2.wait(timeout=10) 460 | 461 | if process2.returncode != 0: 462 | raise RuntimeError(f"FFmpeg decoding failed with return code {process2.returncode}") 463 | 464 | # Take only the bytes we need and reshape 465 | image = np.frombuffer(out, np.uint8)[:(height * width * 3)].reshape([height, width, 3]).copy() 466 | 467 | except Exception as e: 468 | logging.error(f"FFmpeg processing failed: {str(e)}") 469 | # Clean up processes if they're still running 470 | for p in [process1, process2]: 471 | try: 472 | if p and p.poll() is None: 473 | p.kill() 474 | except Exception: 475 | logging.exception("Error cleaning up processes") 476 | raise 477 | 478 | first_arg = list(output_args.items())[0] 479 | text = f"{algorithm} {first_arg[0]}={first_arg[1]}" 480 | 481 | return image, text 482 | 483 | def apply_scale(image): 484 | # Convert image to float32 and normalize pixel values 485 | image = np.float32(image) / 255.0 486 | 487 | text = '' 488 | # Calculate new size 489 | h, w = image.shape[:2] 490 | new_h = int(h * size_factor) 491 | new_w = int(w * size_factor) 492 | 493 | # Choose scale algorithm 494 | if scale_randomize: 495 | algorithm = choice(scale_algorithms) 496 | else: 497 | algorithm = scale_algorithms[0] 498 | 499 | interpolation_map = { 500 | 'nearest': ResizeFilter.Nearest, 501 | 'box' : ResizeFilter.Box, 502 | 'hermite' : ResizeFilter.Hermite, 503 | 'hamming' : ResizeFilter.Hamming, 504 | 'linear': ResizeFilter.Linear, 505 | 'hann' : ResizeFilter.Hann, 506 | 'lagrange' : ResizeFilter.Lagrange, 507 | 'cubic_catrom': ResizeFilter.CubicCatrom, 508 | 'cubic_mitchell': ResizeFilter.CubicMitchell, 509 | 'cubic_bspline': ResizeFilter.CubicBSpline, 510 | 'lanczos': ResizeFilter.Lanczos, 511 | 'gauss': ResizeFilter.Gauss 512 | } 513 | 514 | # Determine if gamma correction should be applied 515 | # Skip gamma correction for nearest neighbor since it doesn't interpolate 516 | use_gamma = algorithm != 'nearest' 517 | 518 | if algorithm == 'down_up': 519 | if scale_randomize: 520 | algorithm1 = choice(down_up_scale_algorithms) 521 | algorithm2 = choice(down_up_scale_algorithms) 522 | else: 523 | algorithm1 = down_up_scale_algorithms[0] 524 | algorithm2 = down_up_scale_algorithms[-1] 525 | scale_factor = np.random.uniform(*scale_range) 526 | # Apply gamma correction based on the algorithms used 527 | use_gamma1 = algorithm1 != 'nearest' 528 | use_gamma2 = algorithm2 != 'nearest' 529 | image = resize(image, (int(w * scale_factor), int(h * scale_factor)), interpolation_map[algorithm1], gamma_correction=use_gamma1) 530 | image = resize(image, (new_w, new_h), interpolation_map[algorithm2], gamma_correction=use_gamma2) 531 | if print_to_image: 532 | text = f"{algorithm} scale1factor={scale_factor:.2f} scale1algorithm={algorithm1} scale2factor={size_factor/scale_factor:.2f} scale2algorithm={algorithm2}" 533 | if print_to_textfile: 534 | text = f"{algorithm} scale1factor={scale_factor:.2f} scale1algorithm={algorithm1} scale2factor={size_factor/scale_factor:.2f} scale2algorithm={algorithm2}" 535 | else: 536 | image = resize(image, (new_w, new_h), interpolation_map[algorithm], gamma_correction=use_gamma) 537 | if print_to_image: 538 | text = f"{algorithm} size factor={size_factor}" 539 | if print_to_textfile: 540 | text = f"{algorithm} size factor={size_factor}" 541 | 542 | # Convert image back to uint8 after resizing for script compatibility 543 | image = (image * 255).astype(np.uint8) 544 | 545 | return image, text 546 | 547 | def process_image(image_path): 548 | image = cv2.imread(image_path) 549 | if image is None: 550 | print(f"Failed to load image at {image_path}") 551 | return 552 | 553 | degradation_order = [] 554 | all_text = [] 555 | 556 | if degradations_randomize: 557 | for degradation in degradations: 558 | if degradation == 'blur' and random() < blur_likelihood: 559 | degradation_order.append('blur') 560 | elif degradation == 'noise' and random() < noise_likelihood: 561 | degradation_order.append('noise') 562 | elif degradation == 'chroma' and random() < chroma_likelihood: 563 | degradation_order.append('chroma') 564 | elif degradation == 'compression' and random() < compression_likelihood: 565 | degradation_order.append('compression') 566 | elif degradation == 'scale' and random() < scale_likelihood: 567 | degradation_order.append('scale') 568 | elif degradation == 'quantization' and random() < quantization_likelihood: 569 | degradation_order.append('quantization') 570 | elif degradation == 'unsharp_mask' and random() < unsharp_mask_likelihood: 571 | degradation_order.append('unsharp_mask') 572 | shuffle(degradation_order) 573 | else: 574 | degradation_order = degradations.copy() 575 | 576 | for order, degradation in enumerate(degradation_order, 1): 577 | if degradation == 'blur': 578 | image, text = apply_blur(image) 579 | elif degradation == 'noise': 580 | image, text = apply_noise(image) 581 | elif degradation == 'chroma': 582 | image, text = apply_chroma(image) 583 | elif degradation == 'compression': 584 | image, text = apply_compression(image) 585 | elif degradation == 'scale': 586 | image, text = apply_scale(image) 587 | elif degradation == 'quantization': 588 | image, text = apply_quantization(image) 589 | elif degradation == 'unsharp_mask': 590 | image, text = apply_unsharp_mask(image, config) 591 | all_text.append(f"{degradation}: {text}") 592 | 593 | if print_to_image: 594 | for order, text in enumerate(all_text, 1): 595 | image = print_text_to_image(image, text, order) 596 | 597 | # Save image 598 | output_path = os.path.join(output_folder, os.path.relpath(image_path, input_folder)) 599 | output_path = os.path.splitext(output_path)[0] + '.' + output_format 600 | 601 | # Create output folder if it doesn't exist 602 | os.makedirs(os.path.dirname(output_path), exist_ok=True) 603 | cv2.imwrite(output_path, image) 604 | 605 | if print_to_textfile: 606 | print_text_to_textfile(path_to_textfile + "/applied_degradations.txt", os.path.basename(output_path) + ' - ' + ', '.join(all_text)) 607 | 608 | # Process images recursively 609 | image_paths = [] 610 | for subdir, dirs, files in os.walk(input_folder): 611 | for file in files: 612 | image_paths.append(os.path.join(subdir, file)) 613 | 614 | if __name__ == "__main__": 615 | with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor: 616 | futures = {executor.submit(process_image, image_path) for image_path in image_paths} 617 | kwargs = { 618 | 'total': len(futures), 619 | 'unit': 'it', 620 | 'unit_scale': True, 621 | 'leave': True 622 | } 623 | try: 624 | for f in tqdm(concurrent.futures.as_completed(futures), **kwargs): 625 | # Disable this block and replace with "pass" to hide exceptions 626 | try: 627 | f.result() # This will raise the exception if one was thrown 628 | except Exception as e: 629 | print(f"An error occurred: {e}") 630 | 631 | except KeyboardInterrupt: 632 | print("Interrupted by user, terminating processes...") 633 | executor.shutdown(wait=False) 634 | for future in futures: 635 | future.cancel() 636 | 637 | -------------------------------------------------------------------------------- /Dataset Destroyer/requirements.txt: -------------------------------------------------------------------------------- 1 | configparser 2 | opencv-python 3 | numpy 4 | ffmpeg-python 5 | tqdm 6 | pillow 7 | chainner_ext 8 | -------------------------------------------------------------------------------- /De-dupe Images/ImageDeDupe.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import shutil 4 | from pathlib import Path 5 | from hashlib import sha256 6 | 7 | from PIL import Image 8 | 9 | 10 | # Set up logging 11 | logging.basicConfig(level=logging.INFO) 12 | 13 | # Prompt the user for the file paths and the alignment mode 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("--hr", type=str, required=True, help="Path to the HR (ground-truth) folder of images") 16 | parser.add_argument("--lr", type=str, required=True, help="Path to the LR (low-res) folder of images") 17 | parser.add_argument("--delete", type=bool, required=False, default=False, help="Delete duplicates instead of moving them to a temporary directory") 18 | 19 | # Parse arguments 20 | args = parser.parse_args() 21 | 22 | # ==== 23 | 24 | HR_PATH = Path(args.hr) 25 | LR_PATH = Path(args.lr) 26 | 27 | if not HR_PATH.exists(): 28 | logging.error(f"The `--hr` path specified does not exist: {HR_PATH}") 29 | if HR_PATH.is_file(): 30 | logging.error(f"The `--hr` path specified is a file path, not a directory: {HR_PATH}") 31 | 32 | if not LR_PATH.exists(): 33 | logging.error(f"The `--lr` path specified does not exist: {HR_PATH}") 34 | if LR_PATH.is_file(): 35 | logging.error(f"The `--lr` path specified is a file path, not a directory: {HR_PATH}") 36 | 37 | HR_MOVED_PATH = HR_PATH.parent / f"{HR_PATH.name}_dupes" 38 | LR_MOVED_PATH = LR_PATH.parent / f"{LR_PATH.name}_dupes" 39 | 40 | if not args.delete: 41 | HR_MOVED_PATH.mkdir(parents=True, exist_ok=True) 42 | LR_MOVED_PATH.mkdir(parents=True, exist_ok=True) 43 | 44 | # ==== 45 | 46 | hashed_files = {} 47 | 48 | for hr_img_path in sorted(HR_PATH.iterdir()): 49 | if hr_img_path.suffix not in (".png", ".jpg", ".jpeg"): 50 | continue 51 | with Image.open(hr_img_path) as hr_img: 52 | image_hash = sha256(hr_img.tobytes()).digest() 53 | for prev_file, prev_hash in hashed_files.items(): 54 | if prev_hash == image_hash: 55 | lr_img_path = LR_PATH / hr_img_path.name 56 | if args.delete: 57 | hr_img_path.unlink() 58 | lr_img_path.unlink() 59 | op = "Deleted" 60 | else: 61 | shutil.move(hr_img_path, HR_MOVED_PATH / hr_img_path.name) 62 | shutil.move(lr_img_path, LR_MOVED_PATH / hr_img_path.name) 63 | op = "Moved" 64 | logging.info(f"{op} duplicate image {hr_img_path.name}. Matching file: {prev_file.name}") 65 | break 66 | else: 67 | hashed_files[hr_img_path] = image_hash 68 | 69 | logging.info("Done!") 70 | -------------------------------------------------------------------------------- /De-dupe Images/README.md: -------------------------------------------------------------------------------- 1 | ## Just use a tool like [czkawka](https://github.com/qarmin/czkawka) instead 2 | 3 | 4 | 5 | This script moves (or deletes) duplicate images from an Image Pair Dataset (HR/LR dataset). 6 | 7 | It only checks for duplicates in the HR images. It then duplicates the move or delete operation in the LR images folder by matching the filename. 8 | This means the LR files may not strictly beduplicates, but the differences would be caused by mis-alignment with HR or differences in compression. 9 | I recommend ensuring the LR and HR dataset matches up correctly by filename. 10 | 11 | It checks for duplicates by matching each image's SHA256 hash against all other images in the HR folder. This means images are only considered a 12 | duplicate if they match exactly. Also note that by default it moves duplicate images to another folder so you can verify the operation, but if you 13 | wish to delete them instead, then pass the `--delete` flag. 14 | 15 | `ImageDeDupe.py [-h] --hr HR_OR_GT_FOLDER --lr LR_OR_LQ_FOLDER [--delete]` 16 | 17 | **Required Packages** 18 | * pillow 19 | -------------------------------------------------------------------------------- /Directory Tree Maker/README.md: -------------------------------------------------------------------------------- 1 | # Directory Tree Maker 2 | 3 | This script generates a visual representation of a directory, using emojis to represent different file types. It's designed to make trees that are easier to read 🙂. 4 | 5 | ## Features 6 | - Generate a pretty tree structure for any directory. 7 | - Categorize files with emojis 8 | - Folders: 📁 9 | - Images: 🖼️ 10 | - Audio: 🎵 11 | - Videos: 🎬 12 | - Documents: 📄 13 | - Code files: 📝 14 | - Archives: 📦 15 | - Executables: ⚙️ 16 | - Fonts: 🔤 17 | - Others: 📄 18 | - Ignore common system folders (`.git`, `__pycache__`, etc.). 19 | - Output is saved to a `directory_tree.md` file in the specified directory for later reference. 20 | 21 | ## Usage 22 | 1. Install requirements: 23 | ```bash 24 | pip install -r requirements.txt 25 | ``` 26 | 27 | 2. Run the script from the terminal: 28 | 29 | ```bash 30 | python directory_tree.py [path] [--ignore PATTERN ...] 31 | ``` 32 | 33 | - `path`: Path to the directory to generate the tree for. Defaults to the current directory. 34 | - `--ignore`: (Optional) Additional patterns to ignore (e.g., temporary or hidden files). 35 | - Example: `python directory_tree.py [path] --ignore .git node_modules` 36 | 37 | ## Example Output 38 | Here’s what the tree output looks like: 39 | 40 | ``` 41 | Directory Tree for: /example/path 42 | 43 | ├── 📁 test1 44 | │ ├── 📁 media 45 | │ │ ├── 🖼️ image2.png 46 | │ │ └── 🎬 video.mp4 47 | │ └── 🖼️ image1.png 48 | ├── 📁 test2 49 | │ ├── 🎬 video1.mp4 50 | │ └── 🎬 video2.mp4 51 | ├── 📦 compressed.zip 52 | ├── 📄 random_text_doc.odt 53 | └── 📄 text_files!.txt 54 | ``` 55 | -------------------------------------------------------------------------------- /Directory Tree Maker/directory_tree.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | from typing import Optional 4 | 5 | try: 6 | import magic 7 | except ImportError: 8 | magic = None 9 | print("❌ Error: The 'python-magic' library is not installed. Please install it using 'pip install python-magic' before running this script.") 10 | exit(1) 11 | 12 | class DirectoryTreeGenerator: 13 | def __init__(self): 14 | # Define categories with appropriate emojis 15 | self.categories = { 16 | 'directory': '📁', 17 | 'image': '🖼️', 18 | 'audio': '🎵', 19 | 'video': '🎬', 20 | 'document': '📄', 21 | 'executable': '⚙️', 22 | 'archive': '📦', 23 | 'code': '📝', 24 | 'data': '📊', 25 | 'web': '🌐', 26 | '3d': '💠', 27 | 'font': '🔤', 28 | 'other': '📄', # Default category 29 | } 30 | 31 | def get_file_emoji(self, file_path: str) -> str: 32 | """Get the appropriate emoji for a file based on its type.""" 33 | if os.path.isdir(file_path): 34 | return self.categories['directory'] 35 | 36 | try: 37 | mime_type = magic.from_file(file_path, mime=True) 38 | if mime_type.startswith('image'): 39 | return self.categories['image'] 40 | elif mime_type.startswith('audio'): 41 | return self.categories['audio'] 42 | elif mime_type.startswith('video'): 43 | return self.categories['video'] 44 | elif mime_type.startswith('text'): 45 | if 'html' in mime_type or 'xml' in mime_type: 46 | return self.categories['web'] 47 | elif 'javascript' in mime_type or 'python' in mime_type: 48 | return self.categories['code'] 49 | else: 50 | return self.categories['document'] 51 | elif mime_type.startswith('application'): 52 | if 'zip' in mime_type or 'x-tar' in mime_type or 'x-rar' in mime_type: 53 | return self.categories['archive'] 54 | elif 'pdf' in mime_type: 55 | return self.categories['document'] 56 | elif 'json' in mime_type or 'xml' in mime_type: 57 | return self.categories['data'] 58 | elif 'octet-stream' in mime_type: 59 | return self.categories['executable'] 60 | elif 'font' in mime_type: 61 | return self.categories['font'] 62 | else: 63 | return self.categories['other'] 64 | else: 65 | return self.categories['other'] 66 | except Exception as e: 67 | print(f"⚠️ Error detecting file type for {file_path}: {e}. Please ensure all required libraries are installed.") 68 | return self.categories['other'] 69 | 70 | def generate_tree(self, root_path: str, prefix: str = '', ignore_patterns: Optional[list] = None, is_subdir: bool = False) -> str: 71 | """Generate a tree structure starting from the root path.""" 72 | if ignore_patterns is None: 73 | ignore_patterns = ['.git', '__pycache__', 'node_modules', '.idea'] 74 | 75 | output = [] 76 | root_path = os.path.abspath(root_path) 77 | 78 | try: 79 | items = os.listdir(root_path) 80 | except PermissionError: 81 | return f"{prefix}└── ⛔ Permission Denied\n" 82 | 83 | items.sort(key=lambda x: (not os.path.isdir(os.path.join(root_path, x)), x.lower())) 84 | items = [item for item in items if not any(pattern in item for pattern in ignore_patterns)] 85 | 86 | for index, item in enumerate(items): 87 | item_path = os.path.join(root_path, item) 88 | is_last_item = index == len(items) - 1 89 | 90 | current_prefix = '└── ' if is_last_item else '├── ' 91 | next_prefix = ' ' if is_last_item else '│ ' 92 | 93 | if os.path.isdir(item_path): 94 | output.append(f"{prefix}{current_prefix}{self.get_file_emoji(item_path)} {item}\n") 95 | output.append(self.generate_tree( 96 | item_path, 97 | prefix + next_prefix, 98 | ignore_patterns, 99 | is_subdir=True 100 | )) 101 | else: 102 | emoji = self.get_file_emoji(item_path) 103 | output.append(f"{prefix}{current_prefix}{emoji} {item}\n") 104 | 105 | return ''.join(output) 106 | 107 | def main(): 108 | """Main function to run the directory tree generator.""" 109 | import argparse 110 | 111 | parser = argparse.ArgumentParser(description='Generate a directory tree with emojis') 112 | parser.add_argument('path', help='Path to generate tree from') 113 | parser.add_argument('--ignore', nargs='+', help='Patterns to ignore', default=[]) 114 | 115 | args = parser.parse_args() 116 | 117 | if not args.path: 118 | print("❌ Error: The 'path' argument is required.") 119 | exit(1) 120 | 121 | tree_generator = DirectoryTreeGenerator() 122 | ignore_patterns = ['.git', '__pycache__', 'node_modules', '.idea'] + args.ignore 123 | 124 | print(f"Directory Tree for: {os.path.abspath(args.path)}\n") 125 | print(tree_generator.generate_tree(args.path, ignore_patterns=ignore_patterns)) 126 | 127 | # Generate the tree and save to a Markdown file 128 | tree_output = tree_generator.generate_tree(args.path, ignore_patterns=ignore_patterns) 129 | markdown_file = os.path.join(os.path.abspath(args.path), "directory_tree.md") 130 | 131 | with open(markdown_file, "w", encoding="utf-8") as md_file: 132 | md_file.write(f"# Directory Tree for: {os.path.abspath(args.path)}\n\n") 133 | md_file.write(tree_output) 134 | 135 | print(f"\nDirectory tree saved to: {markdown_file}") 136 | 137 | if __name__ == "__main__": 138 | main() 139 | -------------------------------------------------------------------------------- /Directory Tree Maker/requirements.txt: -------------------------------------------------------------------------------- 1 | python-magic==0.4.27 2 | python-magic-bin==0.4.14 3 | libmagic==1.0 -------------------------------------------------------------------------------- /Extract Video Frames/README.md: -------------------------------------------------------------------------------- 1 | *Written by ChatGPT* 2 | 3 | This script extracts frames from a given input video and saves them as images in a specified output folder. This was tested with videos in various formats. 4 | 5 | Features: 6 | * FPS Setting 7 | * Start and End frame settings 8 | * Output format option 9 | * Progress bar 10 | 11 | Usage: 12 | `python VideoFrameExtract.py [-h] [-f FPS] [-fmt FORMAT] [-s START] [-e END] input_video output_folder` 13 | 14 | input_video: path to the input video file from which the frames will be extracted. 15 | output_folder: path to the output folder where the extracted frames will be saved. 16 | -f, --fps: frame rate at which to extract the frames. This argument is optional. If not specified, the frames will be extracted at the frame rate of the input video. 17 | -fmt, --format: image format in which to save the frames. This argument is optional. If not specified, the frames will be saved as PNG images. 18 | -s, --start: start frame number. This argument is optional. If not specified, the extraction will start from the first frame. 19 | -e, --end: end frame number. This argument is optional. If not specified, the extraction will end at the last frame. 20 | 21 | **Requires these packages:** 22 | * opencv-python 23 | * tqdm 24 | -------------------------------------------------------------------------------- /Extract Video Frames/VideoFrameExtract.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | import sys 4 | import argparse 5 | from tqdm import tqdm 6 | 7 | # Parse the command line arguments 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument("input_video", help="path to the input video file") 10 | parser.add_argument("output_folder", help="path to the output folder where the frames will be saved") 11 | parser.add_argument("-f", "--fps", type=float, help="frame rate at which to extract the frames") 12 | parser.add_argument("-fmt", "--format", help="image format in which to save the frames (e.g. jpg, png)") 13 | parser.add_argument("-s", "--start", type=int, help="start frame number") 14 | parser.add_argument("-e", "--end", type=int, help="end frame number") 15 | args = parser.parse_args() 16 | 17 | # Open the video file 18 | video = cv2.VideoCapture(args.input_video) 19 | 20 | # Check if the video file was opened successfully 21 | if not video.isOpened(): 22 | print(f"Error: unable to open video file {args.input_video}") 23 | sys.exit() 24 | 25 | # Get the total number of frames in the video 26 | total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) 27 | 28 | # Set the start and end frame numbers 29 | if args.start: 30 | start_frame = args.start 31 | else: 32 | start_frame = 0 33 | if args.end: 34 | end_frame = args.end 35 | else: 36 | end_frame = total_frames 37 | 38 | # Check if the start and end frame numbers are valid 39 | if start_frame >= total_frames or end_frame > total_frames or start_frame >= end_frame: 40 | print("Error: invalid start and end frame numbers") 41 | sys.exit() 42 | 43 | # Create the output folder if it doesn't exist 44 | if not os.path.exists(args.output_folder): 45 | os.makedirs(args.output_folder) 46 | 47 | # Get the frame rate of the video 48 | video_fps = video.get(cv2.CAP_PROP_FPS) 49 | 50 | # Calculate the number of frames to skip between each extracted frame 51 | if args.fps: 52 | skip_frames = int(video_fps / args.fps) 53 | else: 54 | skip_frames = 1 55 | 56 | # Extract the frames and save them to the output folder 57 | for frame_number in tqdm(range(start_frame, end_frame, skip_frames)): 58 | # Read the current frame 59 | success, frame = video.read() 60 | 61 | # Save the frame to the output folder 62 | if args.format: 63 | cv2.imwrite(f"{args.output_folder}/{frame_number}.{args.format}", frame) 64 | else: 65 | cv2.imwrite(f"{args.output_folder}/{frame_number}.png", frame) 66 | 67 | # Release the video file 68 | video.release() 69 | 70 | -------------------------------------------------------------------------------- /Find Alpha Images/README.md: -------------------------------------------------------------------------------- 1 | This script allows you to quickly identify images in a given folder that have alpha layers. 2 | 3 | **Required Packages:** 4 | 5 | * pillow 6 | 7 | How to use: `python findAlphaImages.py /path/to/folder` 8 | 9 | This is an experimental script. I've tested it lightly, but use at your own risk 10 | -------------------------------------------------------------------------------- /Find Alpha Images/findAlphaImages.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from PIL import Image 4 | 5 | def has_alpha_layer(image_path): 6 | try: 7 | image = Image.open(image_path) 8 | return image.mode.endswith('A') 9 | except Exception as e: 10 | print(f"Error processing {image_path}: {e}") 11 | return False 12 | 13 | def find_images_with_alpha(folder_path): 14 | images_with_alpha = [] 15 | for root, dirs, files in os.walk(folder_path): 16 | for file in files: 17 | if file.lower().endswith(('.png', '.tiff', '.bmp', '.gif')): 18 | image_path = os.path.join(root, file) 19 | if has_alpha_layer(image_path): 20 | images_with_alpha.append(image_path) 21 | return images_with_alpha 22 | 23 | if __name__ == "__main__": 24 | if len(sys.argv) != 2: 25 | print(f"Usage: python {sys.argv[0]} ") 26 | sys.exit(1) 27 | 28 | folder_path = sys.argv[1] 29 | if not os.path.isdir(folder_path): 30 | print(f"{folder_path} is not a valid directory.") 31 | sys.exit(1) 32 | 33 | images_with_alpha = find_images_with_alpha(folder_path) 34 | if images_with_alpha: 35 | print("Images with alpha layers:") 36 | for image_path in images_with_alpha: 37 | print(image_path) 38 | else: 39 | print("No images with alpha layers found.") -------------------------------------------------------------------------------- /Find Misaligned Images/README.md: -------------------------------------------------------------------------------- 1 | *Written with the assistance of GitHub Copilot* 2 | 3 | This script allows you easily detect and move paired misaligned images automatically move them to a specified output folder 4 | 5 | **Features:** 6 | 7 | * Compare images from two folders based on their similarity scores 8 | * Move images with low similarity scores to a specified output folder 9 | * Overlay the moved images and save them in the output folder to showcase misalignment 10 | * Multithreading support for faster image comparison and moving 11 | 12 | **Required Packages:** 13 | * OpenCV 14 | * numpy 15 | 16 | **How to use:** `python test.py` 17 | 18 | Before running the script, make sure to set the `hr_path`, `lr_path`, and `output_path` paths. Set these to your hr, lr, and output folders respectively. 19 | 20 | There's an adjustable `threshold` value in the script. Increasing this makes detection more strict, though it can lead to false positives. For cartoons an anime, the current value of 0.7 has been the most accurate in my experience. 21 | -------------------------------------------------------------------------------- /Find Misaligned Images/findMisalignedImages.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import cv2 4 | import concurrent.futures 5 | import numpy as np 6 | import logging 7 | 8 | def confirm_paths(hr_path, lr_path, output_path): 9 | # Find the first pair of images to ensure paths are correct 10 | try: 11 | hr_files = {os.path.relpath(os.path.join(root, file), hr_path) for root, dirs, files in os.walk(hr_path) for file in files} 12 | lr_files = {os.path.relpath(os.path.join(root, file), lr_path) for root, dirs, files in os.walk(lr_path) for file in files} 13 | common_files = hr_files & lr_files 14 | sample_file = next(iter(common_files)) 15 | except StopIteration: 16 | sample_file = "No common files found in HR and LR paths" 17 | 18 | # Confirm paths are correct 19 | print(f"HR Path: {hr_path} (Sample file: {os.path.join(hr_path, sample_file)})") 20 | print(f"LR Path: {lr_path} (Sample file: {os.path.join(lr_path, sample_file)})") 21 | print(f"Output Path: {output_path}") 22 | confirm = input("Are these paths correct? (yes/no): ") 23 | if confirm.lower() != 'yes': 24 | print("Please correct the paths and run the script again.") 25 | exit() 26 | 27 | # Paths 28 | hr_path = 'path\to\hr\folder' 29 | lr_path = 'path\to\lr\folder' 30 | output_path = 'path\to\output\folder' 31 | 32 | confirm_paths(hr_path, lr_path, output_path) 33 | 34 | # Set up logging 35 | log_file_path = os.path.join(output_path, 'image_comparator.log') 36 | logging.basicConfig(level=logging.INFO, 37 | format='%(message)s', 38 | handlers=[logging.FileHandler(log_file_path, mode='w'), logging.StreamHandler()]) 39 | 40 | # List to store 'Moved' messages 41 | moved_files = [] 42 | 43 | class ImageComparator: 44 | def __init__(self, threshold=0.7): 45 | self.threshold = threshold 46 | 47 | def overlay_images(self, img1_path, img2_path, dest_path): 48 | img1 = cv2.imread(img1_path) 49 | img2 = cv2.imread(img2_path) 50 | overlay = cv2.addWeighted(img1, 0.5, img2, 0.5, 0) 51 | cv2.imwrite(dest_path, overlay) 52 | 53 | def compare_images(self, img1_path, img2_path): 54 | img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE) 55 | img2 = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE) 56 | _, shift = cv2.phaseCorrelate(np.float32(img1), np.float32(img2)) 57 | score = np.linalg.norm(shift) 58 | logging.info(f'{img1_path}, {img2_path}, Score: {score}') 59 | return score 60 | 61 | def process_image(self, filename, folder1, folder2, dest_folder): 62 | img1_path = os.path.join(folder1, filename) 63 | img2_path = os.path.join(folder2, filename) 64 | 65 | if os.path.isfile(img1_path) and os.path.isfile(img2_path): 66 | score = self.compare_images(img1_path, img2_path) 67 | 68 | if score < self.threshold: 69 | # Get the relative paths of the source images 70 | rel_path1 = os.path.relpath(img1_path, folder1) 71 | rel_path2 = os.path.relpath(img2_path, folder2) 72 | 73 | # Create the destination paths using the relative paths 74 | dest_path1 = os.path.join(dest_folder, 'hr', rel_path1) 75 | dest_path2 = os.path.join(dest_folder, 'lr', rel_path2) 76 | overlay_path = os.path.join(dest_folder, 'overlays', filename) 77 | 78 | os.makedirs(os.path.dirname(dest_path1), exist_ok=True) 79 | os.makedirs(os.path.dirname(dest_path2), exist_ok=True) 80 | os.makedirs(os.path.dirname(overlay_path), exist_ok=True) 81 | 82 | shutil.move(img1_path, dest_path1) 83 | shutil.move(img2_path, dest_path2) 84 | self.overlay_images(dest_path1, dest_path2, overlay_path) 85 | moved_message = f'Moved {filename} from {img1_path} and {img2_path} to {dest_folder} due to low similarity score' 86 | moved_files.append(moved_message) 87 | logging.info(moved_message) 88 | 89 | def scan_and_compare(self, folder1, folder2, dest_folder): 90 | try: 91 | with concurrent.futures.ThreadPoolExecutor() as executor: 92 | for dirpath, _, filenames in os.walk(folder1): 93 | # Get the relative path to the current directory from folder1 94 | rel_dir = os.path.relpath(dirpath, folder1) 95 | 96 | # Construct the corresponding directory path in folder2 97 | dirpath2 = os.path.join(folder2, rel_dir) 98 | 99 | # Check if the corresponding directory exists in folder2 100 | if os.path.exists(dirpath2): 101 | futures = list(executor.map(self.process_image, filenames, [dirpath]*len(filenames), [dirpath2]*len(filenames), [dest_folder]*len(filenames))) 102 | except KeyboardInterrupt: 103 | print("Interrupted by user. Exiting...") 104 | return 105 | 106 | # Usage 107 | comparator = ImageComparator() 108 | comparator.scan_and_compare(os.path.normpath(hr_path), os.path.normpath(lr_path), os.path.normpath(output_path)) 109 | 110 | # Log moved files at the end of the log file 111 | if moved_files: 112 | for moved_file in moved_files: 113 | logging.info(moved_file) 114 | else: 115 | logging.info("No images were moved.") 116 | -------------------------------------------------------------------------------- /Find Misaligned Images/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | numpy 3 | -------------------------------------------------------------------------------- /Hue Adjustment/README.md: -------------------------------------------------------------------------------- 1 | *Written with the assistance of GitHub Copilot* 2 | 3 | This script allows you easily adjust hue, brightness, and contrast in a dataset. It is also capable of duplicating the original images with various adjustments applied to them. 4 | 5 | You may consider using this script for use with particularly small image super resolution datasets. It can help reduce color based artifacts. 6 | 7 | **Features:** 8 | 9 | * Adjust hue, brightness, and contrast separately 10 | * Duplicate images with adjustments applied to expand dataset 11 | 12 | **Required Packages:** 13 | * OpenCV 14 | * numpy 15 | * Pillow 16 | 17 | **How to use:** `python hue_adjustment.py -b -c -u -d 10` 18 | 19 | Before running the script, make sure to set the `hr_dir`, `lr_dir`, and `output_hr_dir` / `output_lr_dir` paths. Set these to your hr, lr, and output folders respectively. 20 | 21 | There are adjustable range values in the script. Set these to the desired ranges. The defaults should be good enough 22 | -------------------------------------------------------------------------------- /Hue Adjustment/hue_adjustment.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import cv2 3 | import os 4 | import random 5 | import numpy as np 6 | from PIL import Image, ImageEnhance 7 | 8 | # Define the directories 9 | hr_dir = 'hr' 10 | lr_dir = 'lr' 11 | output_hr_dir = 'hr5' 12 | output_lr_dir = 'lr5' 13 | 14 | # Create output directories if they don't exist 15 | os.makedirs(output_hr_dir, exist_ok=True) 16 | os.makedirs(output_lr_dir, exist_ok=True) 17 | 18 | # Define the range for the brightness adjustments 19 | min_brightness = 0.6 20 | max_brightness = 1.4 21 | 22 | # Define the range for the contrast adjustments 23 | min_contrast = 0.7 24 | max_contrast = 1.3 25 | 26 | # Define the range for the hue adjustments 27 | min_hue = 0 28 | max_hue = 180 29 | 30 | # Parse command line arguments 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('-d', '--duplicates', type=int, help='Number of duplicates to create', default=1) 33 | parser.add_argument('-b', '--brightness', action='store_true', help='Enable brightness shifting') 34 | parser.add_argument('-c', '--contrast', action='store_true', help='Enable contrast shifting') 35 | parser.add_argument('-u', '--hue', action='store_true', help='Enable hue shifting') 36 | args = parser.parse_args() 37 | 38 | # Iterate over the images in the directories 39 | for filename in os.listdir(hr_dir): 40 | if filename.endswith('.png'): 41 | # Load the images 42 | hr_image = cv2.imread(os.path.join(hr_dir, filename)) 43 | lr_image = cv2.imread(os.path.join(lr_dir, filename)) 44 | 45 | # Generate random adjustments and apply them 46 | for i in range(args.duplicates): 47 | print(f"Processing duplicate {i+1} for {filename}...") 48 | hr_image_copy = hr_image.copy() 49 | lr_image_copy = lr_image.copy() 50 | 51 | if args.brightness: 52 | brightness = random.uniform(min_brightness, max_brightness) 53 | hr_image_pil = Image.fromarray(cv2.cvtColor(hr_image_copy, cv2.COLOR_BGR2RGB)) 54 | lr_image_pil = Image.fromarray(cv2.cvtColor(lr_image_copy, cv2.COLOR_BGR2RGB)) 55 | enhancer = ImageEnhance.Brightness(hr_image_pil) 56 | hr_image_copy = cv2.cvtColor(np.array(enhancer.enhance(brightness)), cv2.COLOR_RGB2BGR) 57 | enhancer = ImageEnhance.Brightness(lr_image_pil) 58 | lr_image_copy = cv2.cvtColor(np.array(enhancer.enhance(brightness)), cv2.COLOR_RGB2BGR) 59 | 60 | if args.contrast: 61 | contrast = random.uniform(min_contrast, max_contrast) 62 | hr_image_pil = Image.fromarray(cv2.cvtColor(hr_image_copy, cv2.COLOR_BGR2RGB)) 63 | lr_image_pil = Image.fromarray(cv2.cvtColor(lr_image_copy, cv2.COLOR_BGR2RGB)) 64 | enhancer = ImageEnhance.Contrast(hr_image_pil) 65 | hr_image_copy = cv2.cvtColor(np.array(enhancer.enhance(contrast)), cv2.COLOR_RGB2BGR) 66 | enhancer = ImageEnhance.Contrast(lr_image_pil) 67 | lr_image_copy = cv2.cvtColor(np.array(enhancer.enhance(contrast)), cv2.COLOR_RGB2BGR) 68 | 69 | if args.hue: 70 | # Convert the image to HSV 71 | hr_image_hsv = cv2.cvtColor(hr_image_copy, cv2.COLOR_BGR2HSV) 72 | lr_image_hsv = cv2.cvtColor(lr_image_copy, cv2.COLOR_BGR2HSV) 73 | 74 | # Change the hue 75 | hue_shift = random.randint(min_hue, max_hue) 76 | hr_image_hsv[..., 0] = (hr_image_hsv[..., 0] + hue_shift) % 180 77 | lr_image_hsv[..., 0] = (lr_image_hsv[..., 0] + hue_shift) % 180 78 | 79 | # Convert the image back to BGR 80 | hr_image_copy = cv2.cvtColor(hr_image_hsv, cv2.COLOR_HSV2BGR) 81 | lr_image_copy = cv2.cvtColor(lr_image_hsv, cv2.COLOR_HSV2BGR) 82 | 83 | # Save the images 84 | base_filename, ext = os.path.splitext(filename) 85 | new_filename = f"{base_filename}_{i}{ext}" 86 | print(f"Saving new image: {new_filename}") 87 | cv2.imwrite(os.path.join(output_hr_dir, new_filename), hr_image_copy) 88 | cv2.imwrite(os.path.join(output_lr_dir, new_filename), lr_image_copy) -------------------------------------------------------------------------------- /Hue Adjustment/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | numpy 3 | pillow 4 | -------------------------------------------------------------------------------- /ICC to sRGB/README.md: -------------------------------------------------------------------------------- 1 | # ICC to sRGB Conversion Script 2 | 3 | This script converts images by applying their embedded ICC profiles and converting them to sRGB color space. It preserves alpha channels and supports batch processing through folder inputs. 4 | 5 | **Features:** 6 | * Apply embedded ICC profiles to images 7 | * Convert images to sRGB color space 8 | * Preserve transparency/alpha channels 9 | * Support for batch processing via folders 10 | * Preserve directory structure in output 11 | * Convert multiple image formats (PNG, JPEG, TIFF, BMP, WebP) 12 | 13 | **Required Packages:** 14 | * pillow 15 | 16 | How to use: `python icc_to_srgb.py input_folder output_folder` 17 | 18 | **Arguments:** 19 | ### Arguments: 20 | * `input_path` - Path to a single image file or a directory containing images to process. 21 | * `output_path` - Path to the output file (if processing a single image) or directory where processed images will be saved (if processing a folder). 22 | 23 | **Supported Input Formats:** 24 | * PNG 25 | * JPEG/JPG 26 | * TIFF 27 | * BMP 28 | * WebP 29 | 30 | **Notes:** 31 | * All output images are saved as PNG to ensure alpha channel support 32 | * Images without ICC profiles are simply converted to RGB/RGBA 33 | * Original alpha channels are preserved during the conversion process 34 | * Directory structure is preserved when processing folders 35 | * Subdirectories are automatically created in the output folder 36 | 37 | **Examples:** 38 | 39 | Process a folder of images: 40 | ```bash 41 | python icc_to_srgb.py input_folder output_folder 42 | ``` 43 | Process a single image: 44 | ```bash 45 | python icc_to_srgb.py input_image.jpg output_image.png 46 | ``` 47 | -------------------------------------------------------------------------------- /ICC to sRGB/icc_to_srgb.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | from PIL import Image 4 | import PIL.ImageCms as ImageCms 5 | from io import BytesIO 6 | 7 | def process_image(input_path, output_path): 8 | """Process a single image by applying its ICC profile and converting to sRGB while preserving alpha.""" 9 | try: 10 | # Open the image 11 | img = Image.open(input_path) 12 | 13 | # Store alpha channel if it exists 14 | has_alpha = 'A' in img.getbands() 15 | if has_alpha: 16 | # Extract alpha channel 17 | alpha = img.split()[-1] 18 | # Convert to RGB for color processing 19 | rgb_img = img.convert('RGB') 20 | else: 21 | rgb_img = img 22 | 23 | # Check if image has an ICC profile 24 | if 'icc_profile' in img.info: 25 | # Create profile objects 26 | input_profile = ImageCms.ImageCmsProfile(BytesIO(img.info['icc_profile'])) 27 | srgb_profile = ImageCms.createProfile('sRGB') 28 | 29 | # Convert the image 30 | rgb_converted = ImageCms.profileToProfile( 31 | rgb_img, 32 | input_profile, 33 | srgb_profile, 34 | outputMode='RGB' 35 | ) 36 | else: 37 | # If no ICC profile, just use RGB conversion 38 | rgb_converted = rgb_img 39 | 40 | # Reapply alpha channel if it existed 41 | if has_alpha: 42 | channels = list(rgb_converted.split()) 43 | channels.append(alpha) 44 | final_image = Image.merge('RGBA', channels) 45 | else: 46 | final_image = rgb_converted 47 | 48 | # Save the converted image 49 | final_image.save(output_path, 'PNG', icc_profile=None) 50 | print(f"Processed: {input_path} -> {output_path}") 51 | 52 | except Exception as e: 53 | print(f"Error processing {input_path}: {str(e)}") 54 | 55 | def process_folder(input_folder, output_folder): 56 | """Process all images in a directory.""" 57 | if not os.path.exists(output_folder): 58 | os.makedirs(output_folder) 59 | 60 | # Process each file in input directory 61 | for root, dirs, files in os.walk(input_folder): 62 | for file in files: 63 | if file.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.webp')): 64 | input_path = os.path.join(root, file) 65 | relative_path = os.path.relpath(input_path, input_folder) 66 | output_path = os.path.join(output_folder, os.path.splitext(relative_path)[0] + '.png') 67 | 68 | # Create necessary subdirectories in output folder 69 | os.makedirs(os.path.dirname(output_path), exist_ok=True) 70 | process_image(input_path, output_path) 71 | 72 | def process_input(input_path, output_path): 73 | """Process a single image or a folder of images.""" 74 | if os.path.isfile(input_path): 75 | # Process a single image 76 | process_image(input_path, output_path) 77 | elif os.path.isdir(input_path): 78 | # Process a folder of images 79 | process_folder(input_path, output_path) 80 | else: 81 | print(f"Invalid input: {input_path} is neither a file nor a directory.") 82 | 83 | if __name__ == '__main__': 84 | parser = argparse.ArgumentParser(description='Convert images with ICC profiles to sRGB.') 85 | parser.add_argument('input_path', type=str, help='Input file or folder containing images') 86 | parser.add_argument('output_path', type=str, help='Output file or folder to save converted images') 87 | args = parser.parse_args() 88 | 89 | process_input(args.input_path, args.output_path) 90 | -------------------------------------------------------------------------------- /ICC to sRGB/requirements.txt: -------------------------------------------------------------------------------- 1 | pillow -------------------------------------------------------------------------------- /Image Tiling/README.md: -------------------------------------------------------------------------------- 1 | This script allows quick tiling of your images with multithreading. ~2000% faster than MagickUtils. 2 | 3 | **Features:** 4 | 5 | * Split images into tiles of any size 6 | * Generate a specified number of tiles per image or generate as many non-overlapping tiles as possible 7 | * Convert tiles to grayscale 8 | * Skip tiles below a minimum size 9 | * Choose between random and "best" tile selection methods 10 | * Set a scale factor for "best" tile selection to improve performance 11 | 12 | **Required Packages:** 13 | 14 | * opencv-python 15 | * numpy 16 | 17 | How to use: `python TileImages.py /path/to/image/folder /path/to/output/folder` 18 | 19 | **Additional Arguments:** 20 | * `-t` - Specify the size of the tiles to take from the image. Use like so: `-t 512 512` 21 | * `-n` - The number of tiles to save per image. This will take a set amount of tiles from each image, pulled from random locations or using the "best" selection method. This helps with increasing variety in your dataset without saving unnecessary tiles 22 | * `-g` - Saves your images in grayscale 23 | * `-m` - Sets a minimum size for tiles. If any tiles are below the specified size, they will not be saved 24 | * `--selection` - Choose the tile selection method: 'random' (default) or 'best' 25 | * `-s` - Sets a seed for the random number generator. This is for usage with random selection method 26 | * `-c` - Sets a scale factor for the "best" tile selection method to improve performance 27 | 28 | **Credits:** 29 | Thank you @umzi2 for the best_tile code, which was used as a basis for the "best" function 30 | -------------------------------------------------------------------------------- /Image Tiling/TileImages.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import numpy as np 4 | import cv2 5 | from multiprocessing import Pool 6 | import math 7 | 8 | def best_tile(img, tile_size, scale=1): 9 | if len(img.shape) == 3: 10 | img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 11 | else: 12 | img_gray = img 13 | 14 | laplacian_abs = np.abs(cv2.Laplacian(img_gray, -1)) 15 | 16 | if scale > 1: 17 | laplacian_abs = cv2.resize(laplacian_abs, (img_gray.shape[1] // scale, img_gray.shape[0] // scale), 18 | interpolation=cv2.INTER_AREA) 19 | kernel_size = tile_size[0] // scale 20 | else: 21 | kernel_size = tile_size[0] 22 | 23 | kernel = np.ones((kernel_size, kernel_size), dtype=np.float32) / (kernel_size * kernel_size) 24 | conv = cv2.filter2D(laplacian_abs, -1, kernel) 25 | y, x = np.unravel_index(np.argmax(conv), conv.shape) 26 | return np.array([y, x]) * scale 27 | 28 | def process_image(image_path, output_folder, tile_size, num_tiles, grayscale, min_size, seed, selection_method, scale): 29 | try: 30 | img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) 31 | if img is None: 32 | print(f"Error: Unable to read image {image_path}") 33 | return 34 | 35 | if grayscale: 36 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 37 | else: 38 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 39 | 40 | height, width = img.shape[:2] 41 | tiles_per_row = width // tile_size[0] 42 | tiles_per_col = height // tile_size[1] 43 | total_tiles = tiles_per_row * tiles_per_col 44 | 45 | if num_tiles == 0 or num_tiles > total_tiles: 46 | num_tiles = total_tiles 47 | 48 | if selection_method == 'best': 49 | segments = math.ceil(math.sqrt(num_tiles)) 50 | segment_height = height // segments 51 | segment_width = width // segments 52 | tiles = [] 53 | 54 | for i in range(segments): 55 | for j in range(segments): 56 | if len(tiles) >= num_tiles: 57 | break 58 | 59 | y_start = i * segment_height 60 | x_start = j * segment_width 61 | y_end = min((i + 1) * segment_height, height) 62 | x_end = min((j + 1) * segment_width, width) 63 | 64 | segment = img[y_start:y_end, x_start:x_end] 65 | best_tile_coord = best_tile(segment, tile_size, scale) 66 | 67 | global_y = y_start + best_tile_coord[0] 68 | global_x = x_start + best_tile_coord[1] 69 | 70 | if global_y + tile_size[0] <= height and global_x + tile_size[1] <= width: 71 | tiles.append((global_y, global_x)) 72 | else: # 'random' selection method 73 | rng = np.random.default_rng(seed) 74 | tile_indices = rng.choice(total_tiles, size=num_tiles, replace=False) 75 | tiles = [(idx // tiles_per_row * tile_size[1], idx % tiles_per_row * tile_size[0]) for idx in tile_indices] 76 | 77 | tiles_saved = 0 78 | for i, (y, x) in enumerate(tiles): 79 | tile = img[y:y+tile_size[1], x:x+tile_size[0]] 80 | if min_size and (tile.shape[1] < min_size[0] or tile.shape[0] < min_size[1]): 81 | continue 82 | output_path = os.path.join(output_folder, f"{os.path.splitext(os.path.basename(image_path))[0]}_{selection_method}_tile_{i}.png") 83 | cv2.imwrite(output_path, cv2.cvtColor(tile, cv2.COLOR_RGB2BGR)) 84 | tiles_saved += 1 85 | 86 | print(f"{tiles_saved} tiles saved from {image_path}") 87 | except Exception as e: 88 | print(f"Error processing {image_path}: {e}") 89 | 90 | def process_folder(input_folder, output_folder, tile_size, num_tiles, grayscale, min_size, seed, selection_method, scale): 91 | if not os.path.exists(output_folder): 92 | os.makedirs(output_folder) 93 | pool = Pool() 94 | for root, dirs, files in os.walk(input_folder): 95 | for file in files: 96 | if file.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')): 97 | image_path = os.path.join(root, file) 98 | pool.apply_async(process_image, (image_path, output_folder, tile_size, num_tiles, grayscale, min_size, seed, selection_method, scale)) 99 | pool.close() 100 | pool.join() 101 | 102 | if __name__ == '__main__': 103 | parser = argparse.ArgumentParser(description='Tile images from a folder.') 104 | parser.add_argument('input_folder', type=str, help='Input folder containing images') 105 | parser.add_argument('output_folder', type=str, help='Output folder to save tiles') 106 | parser.add_argument('-t', '--tile-size', type=int, nargs=2, help='Size of tiles (width height)', default=(512, 512)) 107 | parser.add_argument('-n', '--num-tiles', type=int, help='Number of tiles to save per image (0 for all possible tiles)', default=1) 108 | parser.add_argument('-g', '--grayscale', action='store_true', help='Convert tiles to grayscale') 109 | parser.add_argument('-m', '--min-size', type=int, nargs=2, help='Minimum size of tiles to save (width height)') 110 | parser.add_argument('-s', '--seed', type=int, help='Seed for random number generator (only used with random selection)') 111 | parser.add_argument('--selection', choices=['random', 'best'], default='random', help='Tile selection method') 112 | parser.add_argument('-c', '--scale', type=int, default=1, help='Scale factor for best tile selection (only used with best selection)') 113 | args = parser.parse_args() 114 | 115 | process_folder(args.input_folder, args.output_folder, args.tile_size, args.num_tiles, 116 | args.grayscale, args.min_size, args.seed, args.selection, args.scale) 117 | -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "in_folder": "/run/media/umzi/H/dat/ModernAnimation1080_v2/HR", 3 | "out_folder": "/run/media/umzi/H/dat/tile_test/1", 4 | "tiler": { 5 | "type": "best", 6 | "scale": 16 7 | }, 8 | "tile_size": 1024, 9 | "shuffle": true, 10 | "real_name": true, 11 | "process": "for" 12 | } -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/readme.md: -------------------------------------------------------------------------------- 1 | This is an alternative version of the main image tiling script, thanks to umzi for submitting it. You may like it more :) 2 | 3 | ## Config: 4 | ```json 5 | { 6 | "in_folder": "input", 7 | "out_folder": "output", 8 | "tiler": { 9 | }, 10 | "tile_size": 1024, 11 | "shuffle": true, 12 | "real_name": false, 13 | "process": "for" 14 | } 15 | 16 | ``` 17 | optional = * 18 | 19 | `in_folder` - input folder 20 | 21 | `out_folder` - output folder 22 | 23 | `tiler` - dictionary with tile type and settings 24 | - `type` - one of the types "linear", "random", "overlap" or "best" 25 | * `type` = "linear" - standard division into tiles from the upper left to the lower right 26 | * `n_tiles` - maximum number of tiles default: 10k 27 | * `type` = "random" - splits the image into n tiles at random coordinates 28 | * `n_tiles` - number of tiles default: 1 29 | * `type` = "overlap" - splits the image into overlapping tiles 30 | * `n_tiles` - maximum number of tiles default: 10k 31 | * `overlap` -overlap percentage 0.25 is approximately equal to 25% default: 0.25 32 | * `type` = "best" - finds the best tie for the image and saves it 33 | * `scale` - scale - reduces the image by the chosen factor, which speeds up the search for the best tile, although with some loss of accuracy. Additionally, the larger the value, the more the script focuses on larger details. default: 1 34 | 35 | `tile_size` - final tile size 36 | 37 | `shuffle` - mixes tiles 38 | 39 | `real_name` - saves the image not by index but by real name. Makes shuffle useless when enabled 40 | 41 | `process` - Valid processing types: process, thread and for 42 | * If you run into issues on Windows, swap to thread 43 | -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/requirements.txt: -------------------------------------------------------------------------------- 1 | chainner-ext~=0.3.9 2 | numpy~=1.26.4 3 | tqdm~=4.66.2 4 | opencv-python~=4.9.0 5 | pepeline~=0.3.13 -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/src/__init__.py: -------------------------------------------------------------------------------- 1 | from .process import Tiler 2 | 3 | __all__ = ['Tiler'] 4 | -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/src/process.py: -------------------------------------------------------------------------------- 1 | from os import listdir, makedirs 2 | from os.path import exists, join 3 | 4 | import cv2 5 | import numpy as np 6 | from chainner_ext import resize, ResizeFilter 7 | 8 | from .tile_scripts import lin_tile, random_tile, overlap_tile, best_tile_list_index 9 | from pepeline import read, save, cvt_color, CvtType, best_tile, ImgColor, ImgFormat 10 | from tqdm import tqdm 11 | from tqdm.contrib.concurrent import process_map, thread_map 12 | from numpy.random import shuffle 13 | 14 | TILE_FUNC = { 15 | "linear": lin_tile, 16 | "random": random_tile 17 | } 18 | 19 | 20 | class Tiler: 21 | def __init__(self, config: dict): 22 | """ 23 | Initializes the Tiler object with the given configuration. 24 | 25 | Args: 26 | config (dict): Configuration dictionary containing: 27 | - "in_folder" (str): Input folder path. 28 | - "out_folder" (str): Output folder path. 29 | - "tiler" (dict): Tiler configuration with type and other parameters. 30 | - "process" (str): Processing mode ("thread" or "process"). 31 | - "num_work" (int): Number of workers. 32 | - "tile_size" (int, optional): Size of each tile. Defaults to 512. 33 | - "shuffle" (bool, optional): Whether to shuffle the tile indices. Defaults to False. 34 | 35 | Raises: 36 | ValueError: If mandatory keys are missing in the configuration. 37 | """ 38 | self.in_folder = config.get("in_folder") 39 | self.out_folder = config.get("out_folder") 40 | tiler = config.get("tiler") 41 | self.process_map = config.get("process", "thread") 42 | self.num_work = config.get("num_work") 43 | self.real_name = config.get("real_name") 44 | if self.out_folder is None: 45 | raise ValueError("You didn't include out_folder in config") 46 | elif self.in_folder is None: 47 | raise ValueError("You didn't include in_folder in config") 48 | elif tiler is None: 49 | raise ValueError("You didn't include tiler in config") 50 | if not exists(self.out_folder): 51 | makedirs(self.out_folder) 52 | self.img_list = listdir(self.in_folder) 53 | self.tile_size = config.get("tile_size", 512) 54 | self.tiler_type = tiler.get("type", "linear") 55 | if self.tiler_type in ["linear", "random", "overlap"]: 56 | number_tiles = tiler.get("n_tiles") 57 | if self.tiler_type == "overlap": 58 | overlap = tiler.get("overlap", 0.25) 59 | self.img_dict, self.list_index = overlap_tile(self.in_folder, self.img_list, self.tile_size, 60 | number_tiles, overlap) 61 | else: 62 | tile_func = TILE_FUNC[self.tiler_type] 63 | self.img_dict, self.list_index = tile_func(self.in_folder, self.img_list, self.tile_size, number_tiles) 64 | self.img_list = list(self.img_dict.keys()) 65 | elif self.tiler_type == "best": 66 | self.scale = tiler.get("scale", 1) 67 | self.img_list = best_tile_list_index(self.in_folder, self.img_list, self.tile_size) 68 | self.list_index = self.img_list.copy() 69 | else: 70 | raise ValueError("Unknown type") 71 | if config.get("shuffle"): 72 | shuffle(self.list_index) 73 | 74 | def __tile(self, origin_img, tile_cord: list[int, int]) -> np.ndarray: 75 | """ 76 | Extracts a tile from the original image based on given coordinates. 77 | 78 | Args: 79 | origin_img (np.ndarray): The original image array. 80 | tile_cord (list[int, int]): Coordinates of the top-left corner of the tile. 81 | 82 | Returns: 83 | np.ndarray: The extracted tile. 84 | """ 85 | return origin_img[ 86 | tile_cord[0]:tile_cord[0] + self.tile_size, 87 | tile_cord[1]:tile_cord[1] + self.tile_size 88 | ] 89 | 90 | def __name(self, img_name: str, cord: list | None) -> str: 91 | if self.real_name: 92 | base_name = ".".join(img_name.split(".")[:-1]) 93 | if cord is None: 94 | name = base_name + ".png" 95 | else: 96 | name = f"{base_name}_{cord[0]}_{cord[1]}.png" 97 | 98 | else: 99 | if cord is None: 100 | name = str(self.list_index.index(img_name)) + ".png" 101 | else: 102 | name = str(self.list_index.index(img_name + str(cord))) + ".png" 103 | 104 | return name 105 | 106 | def best_tile(self, img_name: str) -> None: 107 | """ 108 | Finds and saves the best tile for an image based on the Laplacian focus measure. 109 | 110 | Args: 111 | img_name (str): The image filename. 112 | """ 113 | img = read(join(self.in_folder, img_name), ImgColor.RGB, ImgFormat.F32) 114 | img_shape = img.shape 115 | result_name = self.__name(img_name,None) 116 | if img_shape[0] == self.tile_size or img_shape[1] == self.tile_size: 117 | save(img, join(self.out_folder, result_name)) 118 | return 119 | img_gray = cvt_color(img, CvtType.RGB2GrayBt2020) 120 | laplacian_abs = np.abs(cv2.Laplacian(img_gray, -1)) 121 | if self.scale > 1: 122 | laplacian_abs = resize(laplacian_abs, (img_shape[1] // self.scale, img_shape[0] // self.scale), 123 | ResizeFilter.Box, False).squeeze() 124 | left_up_cord = best_tile(laplacian_abs, self.tile_size // self.scale) * self.scale 125 | else: 126 | left_up_cord = best_tile(laplacian_abs, self.tile_size // self.scale) 127 | save(img[left_up_cord[0]:left_up_cord[0] + self.tile_size, left_up_cord[1]:left_up_cord[1] + self.tile_size], 128 | join(self.out_folder, result_name)) 129 | 130 | def process(self, img_name: str) -> None: 131 | """ 132 | Processes an image by generating and saving tiles. 133 | 134 | Args: 135 | img_name (str): The image filename. 136 | """ 137 | img_path = join(self.in_folder, img_name) 138 | img = read(img_path, ImgColor.RGB, ImgFormat.U8) 139 | for tile_cord in self.img_dict[img_name]: 140 | out_name = self.__name(img_name, tile_cord) 141 | tile_img = self.__tile(img, tile_cord) 142 | save(tile_img, join(self.out_folder, out_name)) 143 | 144 | def run(self): 145 | """ 146 | Runs the tiling process using the specified processing method. 147 | """ 148 | if self.tiler_type in ["linear", "random", "overlap"]: 149 | process = self.process 150 | elif self.tiler_type == "best": 151 | process = self.best_tile 152 | else: 153 | raise ValueError("Unknown type") 154 | if self.process_map == "thread": 155 | thread_map(process, self.img_list, max_workers=self.num_work, desc="Process") 156 | elif self.process_map == "process": 157 | process_map(process, self.img_list, max_workers=self.num_work, desc="Process") 158 | else: 159 | for img_name in tqdm(self.img_list, desc="Process"): 160 | process(img_name) 161 | -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/src/tile_scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from .tiles_scripts import lin_tile, random_tile, overlap_tile, best_tile_list_index 2 | 3 | __all__ = ['lin_tile', 'random_tile', 'overlap_tile', 'best_tile_list_index'] 4 | -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/src/tile_scripts/tiles_scripts.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | from .utils import img_size 3 | import numpy as np 4 | from numpy.random import randint 5 | 6 | 7 | def lin_tile(input_folder: str, img_list: list[str], tile_size: int, number_tiles: int | None) -> (dict, list): 8 | """ 9 | Generates a linear grid of tiles for each image in the provided list. 10 | 11 | Args: 12 | input_folder (str): Path to the folder containing images. 13 | img_list (list[str]): List of image filenames. 14 | tile_size (int): Size of each tile (both width and height). 15 | number_tiles (int | None): Maximum number of tiles to generate per image. 16 | If None, defaults to 10000. 17 | 18 | Returns: 19 | (dict, list): A dictionary with image filenames as keys and arrays of tile coordinates as values, 20 | and a list of indices of generated tiles. 21 | """ 22 | if number_tiles is None: 23 | number_tiles = 10000 24 | img_dict = {} 25 | list_index = [] 26 | for img_name in tqdm(img_list, desc="Tile generation"): 27 | try: 28 | w, h = img_size(input_folder, img_name) 29 | except OSError as e: 30 | continue 31 | if w < tile_size or h < tile_size: 32 | continue 33 | x_cords = np.arange(0, w // tile_size) * tile_size 34 | y_cords = np.arange(0, h // tile_size) * tile_size 35 | x_grid, y_grid = np.meshgrid(x_cords, y_cords) 36 | 37 | tiles = np.column_stack((y_grid.ravel(), x_grid.ravel()))[:number_tiles] 38 | list_index.extend([img_name + str(tile) for tile in tiles]) 39 | img_dict[img_name] = tiles 40 | return img_dict, list_index 41 | 42 | 43 | def random_tile(input_folder: str, img_list: list[str], tile_size: int, number_tiles: int | None) -> (dict, list): 44 | """ 45 | Generates a random set of tiles for each image in the provided list. 46 | 47 | Args: 48 | input_folder (str): Path to the folder containing images. 49 | img_list (list[str]): List of image filenames. 50 | tile_size (int): Size of each tile (both width and height). 51 | number_tiles (int | None): Number of tiles to generate per image. 52 | If None, defaults to 1. 53 | 54 | Returns: 55 | (dict, list): A dictionary with image filenames as keys and arrays of tile coordinates as values, 56 | and a list of indices of generated tiles. 57 | """ 58 | if number_tiles is None: 59 | number_tiles = 1 60 | img_dict = {} 61 | list_index = [] 62 | for img_name in tqdm(img_list, desc="Tile generation"): 63 | w, h = img_size(input_folder, img_name) 64 | if w < tile_size or h < tile_size: 65 | continue 66 | x_cords = randint(0, w - tile_size - 1, number_tiles) 67 | y_cords = randint(0, h - tile_size - 1, number_tiles) 68 | 69 | tiles = np.column_stack((y_cords.ravel(), x_cords.ravel())) 70 | list_index.extend([img_name + str(tile) for tile in tiles]) 71 | img_dict[img_name] = tiles 72 | return img_dict, list_index 73 | 74 | 75 | def overlap_tile(input_folder: str, img_list: list[str], tile_size: int, number_tiles: int | None, 76 | overlap: int = 0.25) -> (dict, list): 77 | """ 78 | Generates a set of overlapping tiles for each image in the provided list. 79 | 80 | Args: 81 | input_folder (str): Path to the folder containing images. 82 | img_list (list[str]): List of image filenames. 83 | tile_size (int): Size of each tile (both width and height). 84 | number_tiles (int | None): Maximum number of tiles to generate per image. 85 | If None, defaults to 10000. 86 | overlap (int): Overlap fraction between tiles. Defaults to 0.25. 87 | 88 | Returns: 89 | (dict, list): A dictionary with image filenames as keys and arrays of tile coordinates as values, 90 | and a list of indices of generated tiles. 91 | """ 92 | if number_tiles is None: 93 | number_tiles = 10000 94 | img_dict = {} 95 | list_index = [] 96 | for img_name in tqdm(img_list, desc="Tile generation"): 97 | w, h = img_size(input_folder, img_name) 98 | if w < tile_size or h < tile_size: 99 | continue 100 | x_cords = np.arange(0, w // tile_size, overlap) * tile_size 101 | y_cords = np.arange(0, h // tile_size, overlap) * tile_size 102 | x_grid, y_grid = np.meshgrid(x_cords.astype(np.uint32), y_cords.astype(np.uint32)) 103 | 104 | tiles = np.column_stack((y_grid.ravel(), x_grid.ravel()))[:number_tiles] 105 | list_index.extend([img_name + str(tile) for tile in tiles]) 106 | img_dict[img_name] = tiles 107 | return img_dict, list_index 108 | 109 | 110 | def best_tile_list_index(input_folder: str, img_list: list[str], tile_size: int) -> list: 111 | """ 112 | Generates a list of image names that can accommodate tiles of the specified size. 113 | 114 | Args: 115 | input_folder (str): Path to the folder containing images. 116 | img_list (list[str]): List of image filenames. 117 | tile_size (int): Size of each tile (both width and height). 118 | 119 | Returns: 120 | list: A list of image filenames that can accommodate tiles of the specified size. 121 | """ 122 | list_index = [] 123 | for img_name in tqdm(img_list, desc="Tile generation"): 124 | w, h = img_size(input_folder, img_name) 125 | if w < tile_size or h < tile_size: 126 | continue 127 | list_index.append(img_name) 128 | return list_index 129 | -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/src/tile_scripts/utils.py: -------------------------------------------------------------------------------- 1 | from pepeline import read_size 2 | from os.path import join 3 | 4 | 5 | def img_size(in_folder: str, img_name: str): 6 | return read_size(join(in_folder, img_name)) 7 | -------------------------------------------------------------------------------- /Image Tiling/WTP Image Tiling/tiling.py: -------------------------------------------------------------------------------- 1 | import json 2 | from src import Tiler 3 | with open(r"config.json") as f: 4 | config = json.load(f) 5 | process = Tiler(config) 6 | process.run() 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Move Files/README.md: -------------------------------------------------------------------------------- 1 | *Written with the assistance of Github Copilot* 2 | 3 | This script's main purpose is to easily allow you to move files from one directory to another, with various controls. 4 | 5 | Main features: 6 | - Moves files from a source directory to a destination directory 7 | - Option to move only half of the files 8 | - Option to move only files with a specific extension 9 | - Progress bar to track the file moving process 10 | 11 | Usage: 12 | - Download the script 13 | - Run the script with the source directory and destination directory as arguments 14 | - Optional arguments include: 15 | - `-m` or `--move_half` to move only half of the files 16 | - `-f` or `--file_extension` to move only files with a specific extension 17 | - Enjoy! 18 | -------------------------------------------------------------------------------- /Move Files/moveFiles.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import argparse 4 | import random 5 | from tqdm import tqdm 6 | 7 | def move_files(source_folder, destination_folder, move_percentage=100, file_extension=None, seed=None): 8 | # Set the random seed to ensure consistent file selection 9 | if seed is not None: 10 | random.seed(seed) 11 | 12 | for root, dirs, files in os.walk(source_folder): 13 | files_to_move = [] 14 | for file in files: 15 | if file_extension and not file.endswith(file_extension): 16 | continue 17 | files_to_move.append(os.path.join(root, file)) 18 | 19 | random.shuffle(files_to_move) 20 | num_files_to_move = int(len(files_to_move) * (move_percentage / 100)) 21 | files_to_move = files_to_move[:num_files_to_move] 22 | 23 | for source_path in tqdm(files_to_move, desc="Moving files", unit="file"): 24 | relative_path = os.path.relpath(source_path, source_folder) 25 | destination_path = os.path.join(destination_folder, relative_path) 26 | os.makedirs(os.path.dirname(destination_path), exist_ok=True) 27 | shutil.move(source_path, destination_path) 28 | 29 | if __name__ == "__main__": 30 | parser = argparse.ArgumentParser(description="Move files from a folder to a secondary folder.") 31 | parser.add_argument("source_folder", help="Path to the source folder") 32 | parser.add_argument("destination_folder", help="Path to the destination folder") 33 | parser.add_argument("-p", "--move_percentage", type=float, default=100, help="Percentage of files to move (default: 100%)") 34 | parser.add_argument("-f", "--file_extension", help="Move only files with the specified extension type") 35 | parser.add_argument("-s", "--seed", type=int, help="Seed value to ensure consistent file selection") 36 | args = parser.parse_args() 37 | 38 | print(f"Source folder: {args.source_folder}") 39 | print(f"Destination folder: {args.destination_folder}") 40 | move_files(args.source_folder, args.destination_folder, args.move_percentage, args.file_extension, args.seed) 41 | -------------------------------------------------------------------------------- /Move Files/requirements.txt: -------------------------------------------------------------------------------- 1 | tqdm 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/J3J3BCC3L) 2 | 3 | This is a collection of various scripts written with the intention of improving your experience training AI models (specifically aimed at image upscaling/restoration). Hopefully they're of help! 4 | -------------------------------------------------------------------------------- /Re-Save Images/README.md: -------------------------------------------------------------------------------- 1 | This script was originally made by sudo. His github page can be found here: https://github.com/styler00dollar 2 | 3 | The script's main function is to re-save images in a given directory. For example, this can be used to "fix" corrupted images to allow training to continue without finding the corrupted image manually. It doesn't actually fix the corruption, but it allows training software to read it as a valid file. 4 | 5 | I asked ChatGPT to modify it and add additional functions such as: 6 | * Grayscale output support 7 | * Subfolder support 8 | * Exception handling 9 | * Progress bar 10 | * Commandline Argument Support 11 | * Parallel Processing 12 | 13 | **Required Packages:** 14 | * argparse 15 | * opencv-python 16 | * tqdm 17 | -------------------------------------------------------------------------------- /Re-Save Images/resaveImages.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import concurrent.futures 3 | import os 4 | import cv2 5 | from tqdm import tqdm 6 | 7 | def process_image(input_path, dest_dir, grayscale): 8 | try: 9 | # Read the image 10 | image = cv2.imread(input_path) 11 | 12 | # Convert the image to grayscale if specified 13 | if grayscale: 14 | image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 15 | 16 | # Get the base name of the file and split it to get the file name and extension 17 | filename_only = os.path.basename(input_path) 18 | file_name, file_ext = os.path.splitext(filename_only) 19 | 20 | # Create the output path 21 | new_path = os.path.join(dest_dir, file_name + ".png") 22 | 23 | # Write the image to the output path 24 | cv2.imwrite(new_path, image) 25 | except Exception as e: 26 | # Print an error message if there was an issue processing the image 27 | print(f"Error processing {input_path}: {e}") 28 | 29 | def main(input_dir, dest_dir, grayscale): 30 | # Create a list of input paths 31 | input_paths = [] 32 | with os.scandir(input_dir) as entries: 33 | for entry in entries: 34 | # Skip non-regular files and directories 35 | if not entry.is_file() or not entry.name.endswith(".png"): 36 | continue 37 | input_paths.append(entry.path) 38 | 39 | # Use a progress bar to track the progress of the image processing 40 | with tqdm(total=len(input_paths)) as pbar: 41 | with concurrent.futures.ProcessPoolExecutor() as executor: 42 | # Process the images in parallel 43 | futures = [] 44 | for input_path in input_paths: 45 | futures.append(executor.submit(process_image, input_path, dest_dir, grayscale)) 46 | for future in concurrent.futures.as_completed(futures): 47 | pbar.update(1) 48 | 49 | if __name__ == "__main__": 50 | # Parse command-line arguments 51 | parser = argparse.ArgumentParser() 52 | parser.add_argument("input_dir", help="Directory containing the input images") 53 | parser.add_argument("dest_dir", help="Directory to store the output images") 54 | parser.add_argument("--grayscale", action="store_true", help="Convert the output images to grayscale") 55 | args = parser.parse_args() 56 | 57 | # Call the main function 58 | main(args.input_dir, args.dest_dir, args.grayscale) 59 | -------------------------------------------------------------------------------- /Upscale Script/README.md: -------------------------------------------------------------------------------- 1 | # Image Upscaling Script 2 | 3 | This script allows for quick upscaling of your images with support for [spandrel](https://github.com/chaiNNer-org/spandrel). Just provide an input & output folder and the model you want to use. 4 | 5 | **Features:** 6 | * Upscale images using AI models 7 | * Support for [multiple AI architectures](https://github.com/chaiNNer-org/spandrel?tab=readme-ov-file#model-architecture-support) via [spandrel](https://github.com/chaiNNer-org/spandrel) 8 | * Convert output to various formats (PNG, JPEG, WebP, etc.) 9 | * Enforce precision settings (FP32, FP16, BF16, or auto) 10 | 11 | **Required Packages:** 12 | * torch 13 | * spandrel 14 | * spandrel_extra_arches 15 | * pillow 16 | * numpy 17 | * tqdm 18 | * chainner-ext 19 | 20 | How to use: `python upscale-script.py --input /path/to/image/folder --output /path/to/output/folder --model /path/to/model/file` 21 | 22 | **Additional Arguments:** 23 | * `--input` - Path to an image file or directory containing images to upscale 24 | * `--output` - Directory where upscaled images will be saved 25 | * `--model` - Path to the AI model file used for upscaling. [Supported models here](https://github.com/chaiNNer-org/spandrel?tab=readme-ov-file#model-architecture-support) 26 | 27 | **Configuration Options (in config.ini):** 28 | * `TileSize` - Size of tiles for processing. Use "native" for no tiling or specify a number (e.g., 512) 29 | * `Precision` - Set computation precision ("auto", "fp32", "fp16", or "bf16") 30 | * `ThreadPoolWorkers` - Number of worker threads for CPU tasks 31 | * `OutputFormat` - Output image format (e.g., "png", "jpg", "webp") 32 | * `AlphaHandling` - Whether to resize, upscale, or discard the alpha channel 33 | * `GammaCorrection` - Whether or not to gamma correct the resized alpha channel 34 | 35 | **Notes:** 36 | * Experiment with different `TileSize` values to find the optimal setting for your hardware 37 | * Adjust `ThreadPoolWorkers` based on your CPU capabilities 38 | * If you encounter memory issues with large images, try reducing the `TileSize` 39 | -------------------------------------------------------------------------------- /Upscale Script/config.ini: -------------------------------------------------------------------------------- 1 | [Processing] 2 | ; TileSize = native,512,384,etc. 3 | TileSize = native 4 | ; Precision = auto,bf16,fp16,fp32 5 | Precision = auto 6 | ; ThreadPoolWorkers = number of threads for processing 7 | ThreadPoolWorkers = 4 8 | ; OutputFormat = png,jpeg,tga,etc. 9 | OutputFormat = png 10 | ; AlphaHandling = resize,upscale,discard 11 | AlphaHandling = resize 12 | ; GammaCorrection = true, false | Selects whether or not to apply gamma correction while AlphaHandling = resize 13 | GammaCorrection = False 14 | -------------------------------------------------------------------------------- /Upscale Script/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.24.3 2 | Pillow==10.4.0 3 | spandrel==0.4.0 4 | spandrel_extra_arches==0.2.0 5 | torch==2.4.1+cu124 6 | tqdm==4.66.5 7 | chainner-ext==0.3.10 8 | -------------------------------------------------------------------------------- /Upscale Script/upscale-script.py: -------------------------------------------------------------------------------- 1 | import os 2 | import configparser 3 | import torch 4 | from PIL import Image 5 | import spandrel 6 | import spandrel_extra_arches 7 | import numpy as np 8 | from concurrent.futures import ThreadPoolExecutor 9 | import time 10 | import traceback 11 | import gc 12 | import argparse 13 | import sys 14 | from tqdm import tqdm 15 | import chainner_ext 16 | 17 | # Install extra architectures 18 | spandrel_extra_arches.install() 19 | 20 | # Read configuration 21 | config = configparser.ConfigParser() 22 | config.read('config.ini') 23 | 24 | TILE_SIZE = config['Processing'].get('TileSize', '512').lower() 25 | PRECISION = config['Processing'].get('Precision', 'auto').lower() 26 | THREAD_POOL_WORKERS = int(config['Processing'].get('ThreadPoolWorkers', 1)) 27 | OUTPUT_FORMAT = config['Processing'].get('OutputFormat', 'png').lower() 28 | ALPHA_HANDLING = config['Processing'].get('AlphaHandling', 'resize').lower() 29 | GAMMA_CORRECTION = config['Processing'].getboolean('GammaCorrection', False) 30 | 31 | # Create a ThreadPoolExecutor for running CPU-bound tasks 32 | thread_pool = ThreadPoolExecutor(max_workers=THREAD_POOL_WORKERS) 33 | 34 | # Supported image formats 35 | SUPPORTED_FORMATS = ('.png', '.jpg', '.jpeg', '.webp', '.tga', '.bmp', '.tiff') 36 | 37 | def upscale_tensor(img_tensor, model, tile_size): 38 | _, _, h, w = img_tensor.shape 39 | output_h, output_w = h * model.scale, w * model.scale 40 | 41 | output_dtype = torch.float32 if PRECISION == 'fp32' else torch.float16 42 | output_tensor = torch.zeros((1, img_tensor.shape[1], output_h, output_w), dtype=output_dtype, device='cuda') 43 | 44 | if tile_size == "native": 45 | tile_size = max(h, w) 46 | 47 | tile_size = int(tile_size) 48 | 49 | for y in range(0, h, tile_size): 50 | for x in range(0, w, tile_size): 51 | tile = img_tensor[:, :, y:min(y+tile_size, h), x:min(x+tile_size, w)] 52 | 53 | with torch.inference_mode(): 54 | if model.supports_bfloat16 and PRECISION in ['auto', 'bf16']: 55 | with torch.autocast(device_type='cuda', dtype=torch.bfloat16): 56 | upscaled_tile = model(tile) 57 | elif model.supports_half and PRECISION in ['auto', 'fp16']: 58 | with torch.autocast(device_type='cuda', dtype=torch.float16): 59 | upscaled_tile = model(tile) 60 | else: 61 | upscaled_tile = model(tile) 62 | 63 | output_tensor[:, :, y*model.scale:min((y+tile_size)*model.scale, output_h), 64 | x*model.scale:min((x+tile_size)*model.scale, output_w)].copy_(upscaled_tile) 65 | 66 | return output_tensor 67 | 68 | def load_model(model_path): 69 | if not os.path.exists(model_path): 70 | raise ValueError(f"Model file not found: {model_path}") 71 | 72 | try: 73 | model = spandrel.ModelLoader().load_from_file(model_path) 74 | if isinstance(model, spandrel.ImageModelDescriptor): 75 | return model.cuda().eval() 76 | else: 77 | raise ValueError(f"Invalid model type for {model_path}") 78 | except Exception as e: 79 | print(f"Failed to load model {model_path}: {str(e)}") 80 | raise 81 | 82 | def upscale_image(image, model, tile_size, alpha_handling, gamma_correction): 83 | has_alpha = image.mode == 'RGBA' 84 | if has_alpha: 85 | rgb_image, alpha = image.convert('RGB'), image.split()[3] 86 | else: 87 | rgb_image = image 88 | 89 | # Upscale RGB 90 | rgb_tensor = torch.from_numpy(np.array(rgb_image)).permute(2, 0, 1).float().div_(255.0).unsqueeze(0).cuda() 91 | upscaled_rgb_tensor = upscale_tensor(rgb_tensor, model, tile_size) 92 | upscaled_rgb = Image.fromarray((upscaled_rgb_tensor[0].permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)) 93 | 94 | if has_alpha: 95 | if alpha_handling == 'upscale': 96 | # Create a 3-channel tensor from the alpha channel 97 | alpha_array = np.array(alpha) 98 | alpha_3channel = np.stack([alpha_array, alpha_array, alpha_array], axis=2) 99 | alpha_tensor = torch.from_numpy(alpha_3channel).permute(2, 0, 1).float().div_(255.0).unsqueeze(0).cuda() 100 | 101 | # Upscale the 3-channel alpha tensor 102 | upscaled_alpha_tensor = upscale_tensor(alpha_tensor, model, tile_size) 103 | 104 | # Extract a single channel from the result 105 | upscaled_alpha = Image.fromarray((upscaled_alpha_tensor[0, 0].cpu().numpy() * 255).astype(np.uint8)) 106 | elif alpha_handling == 'resize': 107 | # Resize alpha using chainner_ext.resize with CubicMitchell filter 108 | alpha_np = np.array(alpha, dtype=np.float32) / 255.0 # Normalize to [0, 1] 109 | alpha_np = alpha_np.reshape(alpha_np.shape[0], alpha_np.shape[1], 1) # Add channel dimension 110 | upscaled_alpha_np = chainner_ext.resize( 111 | alpha_np, 112 | (upscaled_rgb.width, upscaled_rgb.height), 113 | chainner_ext.ResizeFilter.CubicMitchell, 114 | gamma_correction=gamma_correction 115 | ) 116 | # Convert back to 0-255 range and clip values 117 | upscaled_alpha_np = np.clip(upscaled_alpha_np * 255, 0, 255) 118 | upscaled_alpha = Image.fromarray(upscaled_alpha_np.squeeze().astype(np.uint8)) 119 | elif alpha_handling == 'discard': 120 | # Discard alpha 121 | return upscaled_rgb 122 | 123 | # Merge upscaled RGB and alpha 124 | upscaled_rgba = upscaled_rgb.copy() 125 | upscaled_rgba.putalpha(upscaled_alpha) 126 | return upscaled_rgba 127 | else: 128 | return upscaled_rgb 129 | 130 | def process_image(input_path, output_path, model): 131 | try: 132 | image = Image.open(input_path) 133 | 134 | start_time = time.time() 135 | 136 | result = upscale_image(image, model, TILE_SIZE, ALPHA_HANDLING, GAMMA_CORRECTION) 137 | 138 | upscale_time = time.time() - start_time 139 | 140 | # Ensure the output directory exists 141 | os.makedirs(os.path.dirname(output_path), exist_ok=True) 142 | 143 | result.save(output_path, OUTPUT_FORMAT.upper()) 144 | 145 | save_time = time.time() - start_time - upscale_time 146 | total_time = time.time() - start_time 147 | 148 | return total_time 149 | 150 | except Exception as e: 151 | print(f"Error processing {input_path}: {str(e)}") 152 | traceback.print_exc() 153 | return None 154 | 155 | def process_directory(input_dir, output_dir, model): 156 | image_files = [] 157 | for root, _, files in os.walk(input_dir): 158 | for file in files: 159 | if file.lower().endswith(SUPPORTED_FORMATS): 160 | image_files.append((root, file)) 161 | 162 | print(f"Found {len(image_files)} image(s) to process.") 163 | 164 | total_time = 0 165 | successful_images = 0 166 | 167 | with tqdm(total=len(image_files), desc="Processing Images", unit="image") as pbar: 168 | for root, file in image_files: 169 | input_path = os.path.join(root, file) 170 | relative_path = os.path.relpath(input_path, input_dir) 171 | output_path = os.path.join(output_dir, relative_path) 172 | output_path = os.path.splitext(output_path)[0] + f'.{OUTPUT_FORMAT}' 173 | 174 | processing_time = process_image(input_path, output_path, model) 175 | 176 | if processing_time is not None: 177 | total_time += processing_time 178 | successful_images += 1 179 | 180 | pbar.update(1) 181 | 182 | print(f"Successfully processed {successful_images} out of {len(image_files)} images.") 183 | print(f"Total processing time: {total_time:.2f} seconds") 184 | if successful_images > 0: 185 | print(f"Average time per image: {total_time / successful_images:.2f} seconds") 186 | 187 | def main(): 188 | parser = argparse.ArgumentParser(description="Image Upscaling Tool") 189 | parser.add_argument("--input", required=True, help="Input image file or directory") 190 | parser.add_argument("--output", required=True, help="Output directory") 191 | parser.add_argument("--model", required=True, help="Path to the model file") 192 | args = parser.parse_args() 193 | 194 | print(f"Input path: {args.input}") 195 | print(f"Output path: {args.output}") 196 | print(f"Model path: {args.model}") 197 | print(f"Tile size: {TILE_SIZE}") 198 | print(f"Output format: {OUTPUT_FORMAT}") 199 | 200 | if not os.path.exists(args.input): 201 | print(f"Error: Input path not found: {args.input}") 202 | return 203 | 204 | if not os.path.exists(args.output): 205 | print(f"Creating output directory: {args.output}") 206 | os.makedirs(args.output) 207 | 208 | if not os.path.exists(args.model): 209 | print(f"Error: Model file not found: {args.model}") 210 | return 211 | 212 | try: 213 | print("Loading model...") 214 | model = load_model(args.model) 215 | print("Model loaded successfully.") 216 | 217 | if os.path.isfile(args.input): 218 | print(f"Processing single file: {args.input}") 219 | output_path = os.path.join(args.output, os.path.basename(args.input)) 220 | output_path = os.path.splitext(output_path)[0] + f'.{OUTPUT_FORMAT}' 221 | process_image(args.input, output_path, model) 222 | else: 223 | print(f"Processing directory: {args.input}") 224 | process_directory(args.input, args.output, model) 225 | 226 | print("All processing completed.") 227 | 228 | except Exception as e: 229 | print(f"Error: {str(e)}") 230 | traceback.print_exc() 231 | finally: 232 | torch.cuda.empty_cache() 233 | gc.collect() 234 | print("Cleanup completed.") 235 | 236 | if __name__ == "__main__": 237 | main() 238 | print("Script execution finished.") 239 | -------------------------------------------------------------------------------- /Verify Images/README.md: -------------------------------------------------------------------------------- 1 | *Written with the assistance of GitHub Copilot* 2 | 3 | This script's main purpose is to search for corrupted image files in a directory, with additional arguments for finer control. 4 | 5 | Main features: 6 | - Searches for corrupted image files in a specified directory 7 | - By default searches for '.jpg', '.jpeg', '.png', '.gif', '.bmp', '.ico', '.tiff', '.webp' 8 | - Option to search for a specific image file type 9 | - Progress bar to track the file searching process 10 | - Creates a log file ('searchlog.txt') in the script's directory listing all searched files and detected corrupted files 11 | 12 | Usage: 13 | - Download the script 14 | - Run the script with the directory to be searched as an argument 15 | - Optional arguments include: 16 | - `-f` or `--file_type` to search only for a specific image file type 17 | - `-d` or `--deep` to do a "deep" scan. It loads the images individually to ensure they are not corrupted. Much slower than the default method, but it may catch more corrupted images 18 | - Enjoy! 19 | 20 | Example: 21 | ```bash 22 | python test.py /path/to/directory -f .jpg 23 | ``` 24 | This will search for corrupted '.jpg' files in the specified directory. If no file type is specified, the script will search for all supported image types. 25 | -------------------------------------------------------------------------------- /Verify Images/requirements.txt: -------------------------------------------------------------------------------- 1 | tqdm 2 | pillow 3 | -------------------------------------------------------------------------------- /Verify Images/verifyImages.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | from PIL import Image 4 | from tqdm import tqdm 5 | 6 | # Supported image types 7 | IMAGE_TYPES = ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.ico', '.tiff', '.webp'] 8 | 9 | def is_image_file(filename, file_type=None): 10 | if file_type: 11 | return filename.endswith(file_type) 12 | return any(filename.endswith(image_type) for image_type in IMAGE_TYPES) 13 | 14 | def check_image(file_path, in_depth): 15 | try: 16 | with Image.open(file_path) as img: 17 | if in_depth: 18 | img.load() 19 | else: 20 | img.verify() 21 | return True 22 | except (IOError, SyntaxError): 23 | return False 24 | 25 | def search_for_corrupted_files(input_folder, file_type=None, in_depth=False): 26 | searched_files = [] 27 | corrupted_files = [] 28 | 29 | # Get the total number of files to be searched for the progress bar 30 | total_files = sum([len(files) for r, d, files in os.walk(input_folder)]) 31 | 32 | with tqdm(total=total_files, desc="Processing files", bar_format="{l_bar}{bar} [ time left: {remaining} ]") as pbar: 33 | for dirpath, dirnames, filenames in os.walk(input_folder): 34 | for filename in filenames: 35 | if is_image_file(filename, file_type): 36 | file_path = os.path.join(dirpath, filename) 37 | if not check_image(file_path, in_depth): 38 | corrupted_files.append(file_path) 39 | searched_files.append(file_path) 40 | pbar.update() # update progress bar 41 | 42 | return searched_files, corrupted_files 43 | 44 | def write_log(searched_files, corrupted_files): 45 | with open('searchlog.txt', 'w') as log_file: 46 | log_file.write('Searched Files:\n') 47 | log_file.write('\n'.join(searched_files)) 48 | log_file.write('\n\nCorrupted Files:\n') 49 | log_file.write('\n'.join(corrupted_files)) 50 | 51 | print('Search log has been created.') 52 | 53 | if __name__ == "__main__": 54 | parser = argparse.ArgumentParser(description='Search for corrupted files in a directory.') 55 | parser.add_argument('input_folder', type=str, help='The input folder to search.') 56 | parser.add_argument('-f', '--file_type', type=str, default=None, help='The file type to search for.') 57 | parser.add_argument('-d', '--deep', action='store_true', help='Perform an in-depth scan.') 58 | args = parser.parse_args() 59 | 60 | searched_files, corrupted_files = search_for_corrupted_files(args.input_folder, args.file_type, args.deep) 61 | write_log(searched_files, corrupted_files) 62 | -------------------------------------------------------------------------------- /Video Frame Extractor/README.md: -------------------------------------------------------------------------------- 1 | # Video Frame Extraction Script 2 | 3 | This script enables precise frame extraction from paired high-resolution (HR) and low-resolution (LR) videos using scene detection. It's designed to extract synchronized frames at scene changes, making it ideal for training video super-resolution models. 4 | 5 | Thank you to Bendel on Enhance Everything! for providing the original script as a baseline. 6 | 7 | **Features:** 8 | * Scene-based frame extraction using PySceneDetect 9 | * Synchronized extraction from HR and LR video pairs 10 | * Support for frame offset compensation between video pairs 11 | * Configurable number of consecutive frames per scene 12 | * Adjustable scene detection threshold 13 | 14 | **Required Packages:** 15 | * opencv-python 16 | * numpy 17 | * scenedetect 18 | 19 | How to use: `python vidpair.py --hr /path/to/hr/video --lr /path/to/lr/video --output /path/to/output/folder` 20 | 21 | **Required Arguments:** 22 | * `--hr` - Path to the high-resolution video file 23 | * `--lr` - Path to the low-resolution video file 24 | * `--output` - Directory where extracted frames will be saved 25 | 26 | **Optional Arguments:** 27 | * `--frames` - Number of consecutive frames to extract per scene (default: 10) 28 | * `--offset` - Frame offset between HR and LR videos (default: 0) 29 | * `--threshold` - Scene detection threshold (default: 30.0) 30 | 31 | **Notes:** 32 | * Higher threshold values result in fewer detected scenes 33 | * Adjust `--frames` based on your training requirements 34 | * Use `--offset` if your HR and LR videos are not perfectly synchronized 35 | * Frame pairs are saved as PNG files to preserve quality 36 | * Frames are numbered sequentially within each detected scene 37 | * Each scene's frames are prefixed with "showXXXX" where XXXX is the scene number 38 | 39 | **Example Usage:** 40 | ```bash 41 | # Extract 5 frames per scene with default settings 42 | python vidpair.py --hr video_hr.mp4 --lr video_lr.mp4 --output ./frames --frames 5 43 | 44 | # Compensate for 2-frame offset between videos 45 | python vidpair.py --hr video_hr.mp4 --lr video_lr.mp4 --output ./frames --offset 2 46 | 47 | # Adjust scene detection sensitivity 48 | python vidpair.py --hr video_hr.mp4 --lr video_lr.mp4 --output ./frames --threshold 25.0 49 | ``` 50 | -------------------------------------------------------------------------------- /Video Frame Extractor/vidpair.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import cv2 3 | import os 4 | import numpy as np 5 | from scenedetect import SceneManager, open_video, ContentDetector 6 | 7 | def detect_scenes(video_path, threshold=30.0): 8 | """Detect scene changes in video using the current PySceneDetect API.""" 9 | print(f"Detecting scenes in {video_path} with threshold {threshold}...") 10 | 11 | # Open video with the new API 12 | video = open_video(video_path) 13 | 14 | # Create scene manager and add detector 15 | scene_manager = SceneManager() 16 | scene_manager.add_detector(ContentDetector(threshold=threshold)) 17 | 18 | # Detect scenes 19 | scene_manager.detect_scenes(video) 20 | 21 | # Get list of scenes 22 | scene_list = scene_manager.get_scene_list() 23 | print(f"Detected {len(scene_list)} scenes") 24 | 25 | # Print scene information for debugging 26 | for i, scene in enumerate(scene_list): 27 | start_frame = scene[0].get_frames() 28 | end_frame = scene[1].get_frames() 29 | print(f"Scene {i}: frames {start_frame} to {end_frame} (length: {end_frame - start_frame})") 30 | 31 | return scene_list 32 | 33 | def extract_frames(hr_path, lr_path, output_dir, frames_per_scene=10, lr_offset=0, threshold=30.0): 34 | """Extract consecutive frames from HR and LR videos at scene changes.""" 35 | # Create output directories 36 | hr_output = os.path.join(output_dir, 'hr') 37 | lr_output = os.path.join(output_dir, 'lr') 38 | os.makedirs(hr_output, exist_ok=True) 39 | os.makedirs(lr_output, exist_ok=True) 40 | 41 | print(f"Output directories created: {hr_output} and {lr_output}") 42 | 43 | # Detect scenes in HR video 44 | scene_list = detect_scenes(hr_path, threshold) 45 | 46 | if not scene_list: 47 | print("No scenes detected. Try adjusting the threshold value.") 48 | return 49 | 50 | # Open both videos 51 | hr_cap = cv2.VideoCapture(hr_path) 52 | lr_cap = cv2.VideoCapture(lr_path) 53 | 54 | if not hr_cap.isOpened(): 55 | print(f"Error: Could not open HR video {hr_path}") 56 | return 57 | 58 | if not lr_cap.isOpened(): 59 | print(f"Error: Could not open LR video {lr_path}") 60 | hr_cap.release() 61 | return 62 | 63 | # Get video properties 64 | hr_fps = hr_cap.get(cv2.CAP_PROP_FPS) 65 | lr_fps = lr_cap.get(cv2.CAP_PROP_FPS) 66 | hr_total_frames = int(hr_cap.get(cv2.CAP_PROP_FRAME_COUNT)) 67 | lr_total_frames = int(lr_cap.get(cv2.CAP_PROP_FRAME_COUNT)) 68 | 69 | print(f"HR video: {hr_total_frames} frames at {hr_fps} fps") 70 | print(f"LR video: {lr_total_frames} frames at {lr_fps} fps") 71 | print(f"LR offset: {lr_offset} frames") 72 | 73 | # Check if fps is the same 74 | if abs(hr_fps - lr_fps) > 0.1: 75 | print(f"Warning: HR and LR videos have different frame rates: {hr_fps} vs {lr_fps}") 76 | 77 | frames_extracted = 0 78 | 79 | # Initialize frame counters 80 | hr_current_frame = 0 81 | lr_current_frame = 0 82 | frames_extracted = 0 83 | frame_number = 1 # Start frame numbering from 1 84 | 85 | # Process each scene 86 | for i, scene in enumerate(scene_list): 87 | scene_start, scene_end = scene 88 | scene_start_frame = scene_start.get_frames() 89 | scene_end_frame = scene_end.get_frames() 90 | scene_length = scene_end_frame - scene_start_frame 91 | 92 | print(f"Processing scene {i}: {scene_length} frames") 93 | 94 | if scene_length < frames_per_scene: 95 | print(f"Scene {i} is too short ({scene_length} frames), skipping") 96 | continue 97 | 98 | # Seek to scene start by reading frames sequentially 99 | while hr_current_frame < scene_start_frame: 100 | ret = hr_cap.read()[0] 101 | if not ret: 102 | print(f"Error: Could not read HR frame while seeking to scene start") 103 | break 104 | hr_current_frame += 1 105 | 106 | # Extract consecutive frames from the beginning of the scene 107 | start_frame = scene_start_frame 108 | end_frame = min(scene_start_frame + frames_per_scene, scene_end_frame) 109 | 110 | print(f"Extracting consecutive frames from {start_frame} to {end_frame-1}") 111 | 112 | # Extract frames 113 | for frame_idx in range(start_frame, end_frame): 114 | # Read HR frame sequentially 115 | ret, hr_frame = hr_cap.read() 116 | if not ret: 117 | print(f"Warning: Could not read HR frame {frame_idx}") 118 | continue 119 | hr_current_frame += 1 120 | 121 | # Calculate LR frame index with offset 122 | lr_frame_idx = frame_idx - lr_offset 123 | 124 | # Seek LR frame by reading sequentially 125 | while lr_current_frame < lr_frame_idx: 126 | ret = lr_cap.read()[0] 127 | if not ret: 128 | print(f"Error: Could not read LR frame while seeking") 129 | break 130 | lr_current_frame += 1 131 | 132 | # Read LR frame 133 | ret, lr_frame = lr_cap.read() 134 | if not ret: 135 | print(f"Warning: Could not read LR frame {lr_frame_idx}") 136 | continue 137 | lr_current_frame += 1 138 | 139 | # Calculate frame index relative to the scene start 140 | relative_frame_idx = frame_idx - scene_start_frame 141 | 142 | # Save frames with new naming convention 143 | hr_frame_path = os.path.join(hr_output, f'show{i+1:04d}_frame{relative_frame_idx+1:04d}.png') 144 | lr_frame_path = os.path.join(lr_output, f'show{i+1:04d}_frame{relative_frame_idx+1:04d}.png') 145 | 146 | cv2.imwrite(hr_frame_path, hr_frame) 147 | cv2.imwrite(lr_frame_path, lr_frame) 148 | 149 | frames_extracted += 1 150 | frame_number += 1 # Increment frame number for next iteration 151 | 152 | if frames_extracted % 10 == 0: 153 | print(f"Saved {frames_extracted} frame pairs so far...") 154 | 155 | print(f"Extracted {end_frame - start_frame} consecutive frames from scene {i}") 156 | 157 | # Release video captures 158 | hr_cap.release() 159 | lr_cap.release() 160 | 161 | print(f"Extraction complete. Total frames extracted: {frames_extracted}") 162 | 163 | def main(): 164 | parser = argparse.ArgumentParser(description='Extract consecutive frames from HR and LR videos at scene changes') 165 | parser.add_argument('--hr', required=True, help='Path to high resolution video') 166 | parser.add_argument('--lr', required=True, help='Path to low resolution video') 167 | parser.add_argument('--output', required=True, help='Output directory') 168 | parser.add_argument('--frames', type=int, default=10, help='Number of consecutive frames to extract per scene') 169 | parser.add_argument('--offset', type=int, default=0, help='Frame offset for LR video') 170 | parser.add_argument('--threshold', type=float, default=30.0, help='Scene detection threshold') 171 | 172 | args = parser.parse_args() 173 | 174 | extract_frames( 175 | args.hr, 176 | args.lr, 177 | args.output, 178 | frames_per_scene=args.frames, 179 | lr_offset=args.offset, 180 | threshold=args.threshold 181 | ) 182 | 183 | if __name__ == '__main__': 184 | main() 185 | --------------------------------------------------------------------------------