├── .streamlit ├── config.toml └── credentials.toml ├── original_code ├── PHOG.zip ├── BalanceMeasures_RonaldHuebner.zip ├── Fourier_Slope_and_Sigma_Redies.zip ├── pp-spatiotemp-main_Isherwood_Spehar.zip ├── edge_density_and_edge_orientation_entropy.zip ├── 3D_FractalDimension_and_ FourierSlope_GeorgeMather.zip └── 2D_FractalDimension_Spehar.m ├── AT ├── bvlc_alexnet_conv1.npy ├── AT_misc.py ├── color_and_simple_qips.py ├── fractal_dimension_qips.py ├── edge_entropy_qips.py ├── CNN_qips.py ├── resize_functions.py ├── PHOG_qips.py ├── balance_qips.py └── fourier_qips.py ├── images ├── toolbox_screenshot.png ├── GestatltReVision_Logo.png ├── LogoDesign EAJ final.png └── GestatltReVision_Logo_mod.png ├── supplemental_material └── QIP_correlations │ ├── random_phase_images_corr_pearson.jpg │ ├── JA_corr_pearson.csv │ └── AVA_1000_corr_pearson.csv ├── requirements.txt ├── LICENSE ├── .devcontainer └── devcontainer.json ├── docs ├── Supplemental_material.md ├── InstallationInstructions_Linux_MacOS.md └── InstallationInstructions_Windows.md ├── README.md ├── aesthetics_toolbox.py ├── pages ├── 4_📚_References.py ├── 3_📒_QIP_Documentation.py ├── 2_🔧_Image_preprocessing.py └── 1_📊_QIP_Machine.py └── QIP_machine_script.py /.streamlit/config.toml: -------------------------------------------------------------------------------- 1 | [theme] 2 | base="light" 3 | -------------------------------------------------------------------------------- /.streamlit/credentials.toml: -------------------------------------------------------------------------------- 1 | [general] 2 | email = "" 3 | -------------------------------------------------------------------------------- /original_code/PHOG.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/original_code/PHOG.zip -------------------------------------------------------------------------------- /AT/bvlc_alexnet_conv1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/AT/bvlc_alexnet_conv1.npy -------------------------------------------------------------------------------- /images/toolbox_screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/images/toolbox_screenshot.png -------------------------------------------------------------------------------- /images/GestatltReVision_Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/images/GestatltReVision_Logo.png -------------------------------------------------------------------------------- /images/LogoDesign EAJ final.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/images/LogoDesign EAJ final.png -------------------------------------------------------------------------------- /images/GestatltReVision_Logo_mod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/images/GestatltReVision_Logo_mod.png -------------------------------------------------------------------------------- /original_code/BalanceMeasures_RonaldHuebner.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/original_code/BalanceMeasures_RonaldHuebner.zip -------------------------------------------------------------------------------- /original_code/Fourier_Slope_and_Sigma_Redies.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/original_code/Fourier_Slope_and_Sigma_Redies.zip -------------------------------------------------------------------------------- /original_code/pp-spatiotemp-main_Isherwood_Spehar.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/original_code/pp-spatiotemp-main_Isherwood_Spehar.zip -------------------------------------------------------------------------------- /original_code/edge_density_and_edge_orientation_entropy.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/original_code/edge_density_and_edge_orientation_entropy.zip -------------------------------------------------------------------------------- /original_code/3D_FractalDimension_and_ FourierSlope_GeorgeMather.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/original_code/3D_FractalDimension_and_ FourierSlope_GeorgeMather.zip -------------------------------------------------------------------------------- /supplemental_material/QIP_correlations/random_phase_images_corr_pearson.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RBartho/Aesthetics-Toolbox/HEAD/supplemental_material/QIP_correlations/random_phase_images_corr_pearson.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file may be used to create an environment using: 2 | # $ conda create --name --file 3 | streamlit==1.34.0 4 | numpy 5 | Pillow 6 | scikit-image 7 | scipy 8 | statsmodels 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 The Python Packaging Authority 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /original_code/2D_FractalDimension_Spehar.m: -------------------------------------------------------------------------------- 1 | function box_count(filename); 2 | 3 | % calculates fractal dimension using box counting 4 | % CC/ZJI 5 | % 6 | 7 | b = imread(filename) 8 | 9 | threshold = mean(mean(b)); 10 | 11 | b = double(b) 6 21 | x(i) = size(b,1); 22 | y(i) = sum(sum(b)); 23 | c = zeros(size(b)./2); 24 | for xx = 1:size(c,1) 25 | for yy = 1:size(c,2) 26 | c(xx,yy) = b(xx*2,yy*2)+b(xx*2-1,yy*2)+b(xx*2,yy*2-1)+b(xx*2-1,yy*2-1); 27 | end 28 | end 29 | b = c>0 & c<4; 30 | i = i+1; 31 | end 32 | 33 | params = polyfit(log2(x(2:end)'),log2(y(2:end)'),1) 34 | %params = regress(log(y'),log(x')) 35 | %nlinfit(log(x),log(y),'linear',[1 1]); 36 | 37 | D = params(1); 38 | title(D) 39 | 40 | subplot 122 41 | plot(log2(x(2:end)),log2(y(2:end)),'bo'); 42 | hold on 43 | plot([0 8],[params(2) (params(2)+8*params(1))],'r-') 44 | %plot([0 8],[0 8*params(1)],'r-') 45 | axis([0 8 0 16]); 46 | hold off 47 | 48 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python 3", 3 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 4 | "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye", 5 | "customizations": { 6 | "codespaces": { 7 | "openFiles": [ 8 | "README.md", 9 | "aesthetics_toolbox.py" 10 | ] 11 | }, 12 | "vscode": { 13 | "settings": {}, 14 | "extensions": [ 15 | "ms-python.python", 16 | "ms-python.vscode-pylance" 17 | ] 18 | } 19 | }, 20 | "updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y =h: 79 | if w<1024: 80 | return True 81 | else: 82 | if h<1024: 83 | return True 84 | else: 85 | raise('Not implemented error') 86 | 87 | 88 | return False 89 | 90 | def callback_upload_img_files(): 91 | st.session_state.new_files_uploaded = True 92 | 93 | def build_heading(head,notes): 94 | 95 | st.markdown(""" """, unsafe_allow_html=True) 98 | 99 | st.markdown(""" """, unsafe_allow_html=True) 102 | 103 | image1 = Image.open('images/LogoDesign EAJ final.png') 104 | image2 = Image.open('images/GestatltReVision_Logo_mod.png') 105 | 106 | #Create two columns with different width 107 | col1, col2, col3 = st.columns( [0.10, 0.7, 0.2]) 108 | with col2: # To display the header text using css style 109 | st.markdown('

' + head + '

', unsafe_allow_html=True) 110 | st.markdown('

' + notes + '

', unsafe_allow_html=True) 111 | with col1: 112 | st.image(image1, use_column_width=True) 113 | with col3: 114 | st.image(image2, use_column_width=True) 115 | 116 | 117 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Aesthetics Toolbox v1.0.2 2 | 3 | This project contains Python scripts to run the streamlit application "Aesthetics Toolbox" in your browser. The Toolbox includes an interface to compute a number of commonly studied quantitative image properties (QIPs) for aesthetic research and also contains many common methods for image preprocessing. 4 | 5 | # Scientific paper 6 | 7 | A detailed description of the toolbox and the implemented image properties is available here: https://doi.org/10.3758/s13428-025-02632-3. If you use the Aesthetics Toolbox in your work, please consider citing the published paper. 8 | 9 | # Cloud version with limited resources (but no installation) 10 | 11 | You can test the toolbox without installation on the Streamlit Community Cloud: https://aesthetics-toolbox.streamlit.app/. Please note the privacy and security information below when using the cloud version. 12 | 13 | # Local installation instructions 14 | 15 | For local installation, download all the files from this GitHub repository to your computer. (Download the ZIP file under the green "Code" button.) Then follow the installation instructions for your operating system: 16 | 17 | [Linux and MacOS Installation](docs/InstallationInstructions_Linux_MacOS.md) \ 18 | [Windows Installation](docs/InstallationInstructions_Windows.md) 19 | 20 | # Starting the Toolbox locally (after installation) 21 | 22 | 1. On MacOS and Linux, open a terminal. On Windows open an Anaconda prompt. Navigate to the downloaded folder containing the aesthetics_toolbox.py file. 23 | 24 | 2. Activate the created Python environment by typing into the terminal: 25 | ```shell 26 | conda activate aesthetics_toolbox 27 | ``` 28 | 3. Now start the toolbox with: 29 | 30 | ```shell 31 | python -m streamlit run aesthetics_toolbox.py 32 | ``` 33 | 34 | # Notes on using the Toolbox 35 | 36 | 1. To restart the Toolbox, just refresh your browser. All loaded data will be removed and all active calculations will stop. 37 | 38 | 2. If you want to close the toolbox, just close the tab in your browser and the terminal or the Anaconda prompt. 39 | 40 | 3. While computations are running do not interact with the Toolbox (do not select QIPs, Sidebar, upload or delete images) This would refresh the Toolbox and all calculated data will be lost. 41 | 42 | 4. Multithreading is not supported, as it would limit platform independence. To speed up calculations, you may want to consider installing the local version, splitting the data, and running multiple instances of the toolbox. 43 | 44 | 5. The number of images you can load into the toolbox at one time is limited by the amount of RAM on your computer (or the server). Also, large images require more processing time than smaller images. 45 | 46 | # Script version of the QIP Machine 47 | 48 | The file QIP_machine_script.py is a pure script version (no GUI) of the QIP machine interface of the Toolbox. It can be used to run multiple local instances of the QIP machine or for deployment on an HPC. 49 | 50 | # Supplemental material 51 | 52 | Detailed information about the data provided in the supplemental material can be found [here](docs/Supplemental_material.md). 53 | 54 | # Privacy and security 55 | If you use the local installation version, all calculations and data transfers of the Toolbox will take place on your local computer. The browser is used only as an interface. No data is uploaded to the Internet. The opposite is true for the Streamlit Community Cloud version. 56 | 57 | # Contributors 58 | Ralf Bartho: Toolbox concept, code development, maintenance, bugfixes, Development Dataset feature
59 | Christoph Redies: Toolbox concept, supervision of the project, documentation of image properties
60 | Gregor Hayn-Leichsenring: Toolbox concept
61 | Lisa Kossmann, Johan Wagemans: Development Dataset feature
62 | Branka Spehar: Provided code to compute image properties
63 | Ronald Hübner: Provided code to compute image properties
64 | George Mather: Provided code to compute image properties
65 | 66 | -------------------------------------------------------------------------------- /AT/color_and_simple_qips.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import stats 3 | from skimage.measure import shannon_entropy 4 | 5 | 6 | 7 | ################################# Size QIPs ################################ 8 | 9 | 10 | def aspect_ratio(img_rgb): 11 | ''' 12 | Calculates the "Aspect ratio" QIP 13 | 14 | Input: Takes a rgb image in Pillow format as input. Grayscale works as well. 15 | Output: Aspect ratio 16 | 17 | Usage: 18 | Import Image from PIL 19 | img_gray = np.asarray(Image.open( path_to_image_file )) 20 | aspect_ratio(img_rgb) 21 | ''' 22 | return img_rgb.shape[1] / img_rgb.shape[0] 23 | 24 | 25 | 26 | 27 | def image_size(img_rgb, kind = 'sum'): 28 | ''' 29 | Calculates the "Image size" QIP 30 | 31 | Input: Takes a rgb image in Pillow format as input. Grayscale works as well. 32 | kind: Select Type of image size, default = 'sum' ; valid alternatives are: sum, num_pixels, diagonal, average, minumum, maximum 33 | Output: Image size 34 | 35 | Usage: 36 | Import Image from PIL 37 | img_rgb = np.asarray(Image.open( path_to_image_file )) 38 | image_size(img_rgb) 39 | ''' 40 | 41 | if kind == 'sum': 42 | return img_rgb.shape[1] + img_rgb.shape[0] 43 | 44 | elif kind == 'num_pixel': 45 | return img_rgb.shape[1] * img_rgb.shape[0] 46 | 47 | elif kind == 'diagonal': 48 | return int( np.linalg.norm([ img_rgb.shape[1] , img_rgb.shape[0]]) ) 49 | 50 | elif kind == 'average': 51 | return int( np.mean([ img_rgb.shape[1] , img_rgb.shape[0]]) ) 52 | 53 | elif kind == 'minimum': 54 | return np.min([ img_rgb.shape[1] , img_rgb.shape[0]]) 55 | 56 | elif kind == 'maximum': 57 | return np.max([ img_rgb.shape[1] , img_rgb.shape[0]]) 58 | 59 | 60 | else: 61 | raise NotImplementedError ('not implemented, wrong kind image size selected') 62 | 63 | 64 | 65 | ################################# Color QIPs ################################ 66 | 67 | 68 | def mean_channels(img): 69 | ''' 70 | Calculates the "Mean values for color channels" 71 | 72 | Input: RGB or grayscale images in Pillow format 73 | Output: returns 1 mean value for grayscale and 3 mean color channels values for rgb, hsv or Lab images 74 | 75 | Usage: 76 | Import Image from PIL 77 | img_gray = np.asarray(Image.open( path_to_image_file )) 78 | mean_channels(img_rgb) 79 | ''' 80 | return np.mean(img, axis=(0,1)) # returns 1 value for grayscale and 3 values for color channels 81 | 82 | 83 | ### STD of RGB, HSV, LAB; STD(L)="RMSContrast" 84 | def std_channels(img): 85 | ''' 86 | Calculates the "Standard deviation for color channels" 87 | 88 | Input: RGB or grayscale images in Pillow format 89 | Output: returns 1 std value for grayscale and 3 std color channels values for rgb, hsv or Lab images 90 | 91 | Usage: 92 | Import Image from PIL 93 | img_gray = np.asarray(Image.open( path_to_image_file )) 94 | std_channels(img_rgb) 95 | ''' 96 | return np.std(img, axis=(0,1)) 97 | 98 | 99 | def circ_stats(img_hsv): 100 | ''' 101 | Calculates the "Circular mean and circular standard deviation for color channels" 102 | 103 | Input: hsv images in Pillow format 104 | Output: returns circular mean and std for H channel of the hsv color space 105 | 106 | Usage: 107 | Import Image from PIL 108 | from skimage import color 109 | img_rgb = Image.open( path_to_image_file ).convert('RGB') 110 | img_hsv = color.rgb2hsv(img_rgb) 111 | circ_stats(img_hsv) 112 | ''' 113 | hue = img_hsv[:,:,0].astype("float") 114 | circ_mean = stats.circmean(hue, high=1, low=0) 115 | circ_std = stats.circstd(hue, high=1, low=0) 116 | return circ_mean, circ_std 117 | 118 | 119 | def shannonentropy_channels(img): 120 | ''' 121 | Used to calculate the "Color entropy" and "Ligtness entropy) QIPs 122 | 123 | Input: single color channel, for Color entropy use Pillow hsv image, for ligthness entropy Lab image 124 | Output: returns shannon entropy for color channels 125 | 126 | Usage: 127 | Import Image from PIL 128 | from skimage import color 129 | img_rgb = Image.open( path_to_image_file ).convert('RGB') 130 | img_hsv = color.rgb2hsv(img_rgb) 131 | color_entropy = shannonentropy_channels(img_hsv[:,:,0]) 132 | ''' 133 | 134 | # change range of values to 256 bins [0-255] 135 | if np.max(img) > 0: 136 | img = img / np.max(img) 137 | img = img*255 138 | img = np.round(img) 139 | 140 | return shannon_entropy(img) 141 | 142 | 143 | 144 | -------------------------------------------------------------------------------- /AT/fractal_dimension_qips.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PIL 3 | 4 | 5 | 6 | ######################################################################################################################## 7 | ######################################## BOX Count Algos ############################################################# 8 | ######################################################################################################################## 9 | 10 | 11 | ### Branka Spehar, "2D" Box Count Algo using binary images 12 | def fractal_dimension_2d(img_gray): 13 | ''' 14 | Calculates the "2-dimensional Fractal Dimension" QIP 15 | 16 | Input: Takes a grayscale image in Pillow format as input. 17 | Output: 2-dimensional Fractal Dimension QIP 18 | 19 | Usage: 20 | Load images like this: 21 | 22 | Import Image from PIL 23 | 24 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 25 | fractal_dimension_2d(img_gray) 26 | ''' 27 | 28 | nr, nc = img_gray.shape # get y & x dimensions of image 29 | 30 | # Scale image to square if not already square 31 | if nr > nc: 32 | img_gray = np.asarray(PIL.Image.fromarray(img_gray).resize((nc, nc))) 33 | elif nr < nc: 34 | img_gray = np.asarray(PIL.Image.fromarray(img_gray).resize((nr, nr))) 35 | 36 | threshold = np.mean(img_gray) 37 | b = img_gray < threshold 38 | b = b.astype(np.uint8) 39 | 40 | x, y = [], [] 41 | i = 1 42 | while b.shape[0] > 6: 43 | x.append(b.shape[0]) 44 | y.append(np.sum(b)) 45 | 46 | c = np.zeros((b.shape[0]//2, b.shape[1]//2)) 47 | for xx in range(c.shape[0]): 48 | for yy in range(c.shape[1]): 49 | 50 | c[xx, yy] = np.sum ( b[xx*2 : xx*2 + 2 , yy*2 : yy*2 + 2 ] ) 51 | 52 | b = (c > 0) & (c < 4) 53 | i += 1 54 | 55 | params = np.polyfit(np.log2(x[1:]), np.log2(y[1:]), 1) 56 | D = params[0] 57 | return D 58 | 59 | 60 | 61 | ### George Mather, "3D" Box Count Algo unsing graylevels 62 | def fractal_dimension_3d(img_gray): 63 | ''' 64 | Calculates the "3-dimensional Fractal Dimension" QIP 65 | 66 | Input: Takes a grayscale image in Pillow format as input. 67 | Output: 3-dimensional Fractal Dimension QIP 68 | 69 | Usage: 70 | Load images like this: 71 | 72 | Import Image from PIL 73 | 74 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 75 | fractal_dimension_3d(img_gray) 76 | ''' 77 | 78 | ### center crop largest rectangle image with power of 2 79 | nr, nc = img_gray.shape 80 | # Find largest square that is a power of 2 (for box count) 81 | if nr < nc: 82 | nk = 2**np.ceil(np.log2(nr)) 83 | nnr = 2**(np.ceil(np.log2(nr)) - 1) if nr < nk else 2**np.ceil(np.log2(nr)) 84 | dc = nc - nnr 85 | dr = nr - nnr 86 | nnc = nnr 87 | else: 88 | nk = 2**np.ceil(np.log2(nc)) 89 | nnc = 2**(np.ceil(np.log2(nc)) - 1) if nc < nk else 2**np.ceil(np.log2(nc)) 90 | dr = nr - nnc 91 | dc = nc - nnc 92 | nnr = nnc 93 | nr, nc = int(nnr), int(nnc) 94 | # Centred crop 95 | I = img_gray[int(round(dr / 2)):int(round(dr / 2 + nr)), int(round( dc / 2)):int(round(dc / 2 + nc))] 96 | 97 | ### calc box counts 98 | nr, nc = I.shape 99 | # Calculate min and max box sizes 100 | minpow = int(np.ceil(np.log2(nr**(1/3)))) 101 | 102 | bmin = 2**minpow 103 | bmax = bmin 104 | 105 | while np.ceil(nr / bmax + 1) <= np.ceil(nr / (bmax - 1)): 106 | bmax = bmax + 1 107 | 108 | boxes = np.arange(bmin, bmax + 1, 2) 109 | 110 | boxHeights = boxes * (1 / nr) # box size in greylevels 111 | 112 | boxCounts = np.zeros(len(boxes)) 113 | boxSizes = np.zeros(len(boxes)) 114 | 115 | # loop through the box sizes 116 | for b in range(len(boxes)): 117 | 118 | bs = boxes[b] 119 | bh = boxHeights[b] # box size in graylevels 120 | 121 | # Divide the image into a grid of boxes (bs x bs). 122 | # Loop through the cells in the grid, calculating the box count for 123 | # each and adding it to the running total. 124 | # Overlap columns by one x-pixel 125 | boxCount = 0 126 | for by in range(1, nc - bs, bs): 127 | 128 | for bx in range(1, nr - bs + 1, bs-1): 129 | submat = I[by-1: by + bs - 1 , bx-1 : bx + bs - 1 ] 130 | l = np.max(submat) 131 | k = np.min(submat) 132 | 133 | if l == k: 134 | b1 = 1 135 | else: 136 | b1 = np.ceil((l - k) / bh) 137 | boxCount = boxCount + b1 138 | 139 | # Now use the range of box sizes to calculate D 140 | boxCounts[b] = boxCount 141 | boxSizes[b] = 1.0 / bs 142 | 143 | dfit = np.polyfit(np.log(boxSizes), np.log(boxCounts), 1) 144 | D = dfit[0] 145 | return D 146 | -------------------------------------------------------------------------------- /aesthetics_toolbox.py: -------------------------------------------------------------------------------- 1 | #Import the required libraries 2 | import streamlit as st 3 | from PIL import Image 4 | 5 | from AT import AT_misc 6 | 7 | Image.MAX_IMAGE_PIXELS = 1e14 8 | 9 | 10 | st.set_page_config(layout="wide") 11 | 12 | 13 | 14 | 15 | AT_misc.build_heading(head= 'Aesthetics Toolbox', 16 | notes= 'This is a toolbox for aesthetics research. \ 17 | The features of this toolbox can be selected from the sidebar and are briefly explained below. \ 18 | The toolbox is designed as an open source project and we hereby encourage any feedback \ 19 | or extensions to the toolbox (see contacts below). A detailed description of the toolbox \ 20 | and the implemented image properties is available here: https://doi.org/10.3758/s13428-025-02632-3. \ 21 | If you use the Aesthetics Toolbox in your work, please consider citing the published paper.' 22 | 23 | ) 24 | 25 | st.divider() 26 | 27 | 28 | 29 | ### Define custom markdowns 30 | st.markdown(""" """, unsafe_allow_html=True) 33 | 34 | 35 | st.markdown(""" """, unsafe_allow_html=True) 38 | 39 | st.markdown(""" """, unsafe_allow_html=True) 42 | 43 | st.markdown(""" """, unsafe_allow_html=True) 46 | 47 | 48 | st.markdown('

Toolbox Features

', unsafe_allow_html=True) 49 | 50 | 51 | left, cen, right = st.columns( [ 0.45, 0.1 , 0.45]) 52 | with left: 53 | 54 | ### QIP Machine 55 | st.markdown('

QIP Machine

', unsafe_allow_html=True) 56 | st.markdown('

The QIP Machine is an interface for calculating commonly \ 57 | studied quantitative image properties (QIPs).

', unsafe_allow_html=True) 58 | st.page_link("pages/1_📊_QIP_Machine.py", label='Go to the QIP Machine', icon="▶️") 59 | 60 | with right: 61 | ### QIP Documentation 62 | st.markdown('

QIP documentation

', unsafe_allow_html=True) 63 | st.markdown('

The QIP documentation provides the user with detailed information for each of the \ 64 | quantitative image properties (QIPs) that can be calculated with the QIP Machine.

', unsafe_allow_html=True) 65 | st.page_link("pages/3_📒_QIP_Documentation.py", label='Go to the QIP Documentation', icon="▶️") 66 | 67 | # st.write(' ') 68 | # left, cen, right = st.columns( [ 0.45, 0.1 , 0.45]) 69 | 70 | # with left: 71 | # ### Aesthetics datasets 72 | # st.markdown('

Aesthetics Datasets

', unsafe_allow_html=True) 73 | # st.markdown('

This feature lists an extensive collection of image datasets \ 74 | # used in aesthetics research, along with important metrics for each dataset. \ 75 | # It allows you to search and filter for specific datasets. A download link is provided for each dataset.

', unsafe_allow_html=True) 76 | # with right: 77 | # ### Datasets Documentation 78 | # st.markdown('

Datasets Documentation

', unsafe_allow_html=True) 79 | # st.markdown('

Addition information for each aesthetics dataset can be found in the Dataset Documentation.

', unsafe_allow_html=True) 80 | 81 | 82 | st.write(' ') 83 | left, cen, right = st.columns( [ 0.45, 0.1 , 0.45]) 84 | 85 | 86 | with left: 87 | ### Resizing and Cropping 88 | st.markdown('

Image Preprocessing

', unsafe_allow_html=True) 89 | st.markdown('

This feature allows you to preprocess images. \ 90 | A variety of resizing, cropping, padding and other options are implemented here.

', unsafe_allow_html=True) 91 | st.page_link("pages/2_🔧_Image_preprocessing.py", label='Go to the Image preprocessing', icon="▶️") 92 | 93 | with right: 94 | ### References 95 | st.markdown('

References

', unsafe_allow_html=True) 96 | st.markdown('

Lists all references cited in this toolbox.

', unsafe_allow_html=True) 97 | st.page_link("pages/4_📚_References.py", label='Go to the References', icon="▶️") 98 | 99 | st.divider() 100 | 101 | st.markdown('

Contributers

', unsafe_allow_html=True) 102 | st.markdown('Ralf Bartho: Toolbox concept, code development, maintenance, bugfixes', unsafe_allow_html=True) 103 | st.markdown('Christoph Redies: Toolbox concept, supervision of the project, QIP documentation', unsafe_allow_html=True) 104 | st.markdown('Gregor Hayn-Leichsenring: Toolbox concept', unsafe_allow_html=True) 105 | st.markdown('Lisa Kossmann, Johan Wagemanns: Development of the dataset feature', unsafe_allow_html=True) 106 | st.markdown('Branka Spehar, Ronald Hübner, George Mather: Provided code to compute image properties', unsafe_allow_html=True) 107 | 108 | 109 | st.write('') 110 | 111 | st.markdown('

Contact and GitHub

', unsafe_allow_html=True) 112 | st.markdown('Questions, suggestions, bugs: ralf.bartho@gmail.com', unsafe_allow_html=True) 113 | st.markdown('GitHub repository: https://github.com/RBartho/Aesthetics-Toolbox', unsafe_allow_html=True) 114 | st.markdown('GitHub repository: https://github.com/RBartho/Aesthetics-Toolbox', unsafe_allow_html=True) 115 | 116 | 117 | 118 | -------------------------------------------------------------------------------- /AT/edge_entropy_qips.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.ndimage import convolve 3 | import PIL 4 | import warnings 5 | 6 | 7 | 8 | def create_gabor(size, theta=0, octave=3): 9 | 10 | amplitude = 1.0 11 | phase = np.pi/2.0 12 | frequency = 0.5**octave # 0.5**(octave+0.5) 13 | hrsf = 4 # half response spatial frequency bandwith 14 | sigma = 1/(np.pi*frequency) * np.sqrt(np.log(2)/2) * (2.0**hrsf+1)/(2.0**hrsf-1) 15 | valsy = np.linspace(-size//2+1, size//2, size) 16 | valsx = np.linspace(-size//2+1, size//2, size) 17 | xgr,ygr = np.meshgrid(valsx, valsy); 18 | 19 | omega = 2*np.pi*frequency 20 | gaussian = np.exp(-(xgr*xgr + ygr*ygr)/(2*sigma*sigma)) 21 | slant = xgr*(omega*np.sin(theta)) + ygr*(omega*np.cos(theta)) 22 | 23 | gabor = np.round(gaussian, decimals=4) * amplitude*np.cos(slant + phase); 24 | # e^(-(x^2+y^2)/(2*1.699^2)) *cos(pi/4*(x*sin(2)+y*cos(2)) + pi/2) 25 | 26 | return np.round(gabor, decimals=4) 27 | 28 | def create_filterbank(flt_size=31 , num_filters=24): 29 | flt_raw = np.zeros([num_filters, flt_size, flt_size]) 30 | BINS_VEC = np.linspace(0, 2*np.pi, num_filters+1)[:-1] 31 | for i in range(num_filters): 32 | flt_raw[i,:,:] = create_gabor(flt_size, theta=BINS_VEC[i], octave=3) 33 | #print(i, flt_size, BINS_VEC[i]) 34 | return flt_raw 35 | 36 | 37 | def run_filterbank(flt_raw, img, num_filters=24): 38 | (h, w) = img.shape 39 | num_filters = flt_raw.shape[0] 40 | image_flt = np.zeros((num_filters,h,w)) 41 | 42 | for i in range(num_filters): 43 | image_flt[i,:,:] = convolve(img, flt_raw[i,:,:]) 44 | 45 | resp_bin = np.argmax(image_flt, axis=0) 46 | resp_val = np.max(image_flt, axis=0) 47 | 48 | return resp_bin, resp_val 49 | 50 | 51 | def edge_density(resp_val): 52 | normalize_fac = float(resp_val.shape[0] * resp_val.shape[1]) 53 | edge_d = np.sum(resp_val)/normalize_fac 54 | return edge_d 55 | 56 | 57 | def do_counting(resp_val, resp_bin, CIRC_BINS=48, GABOR_BINS=24, MAX_DIAGONAL = 500): 58 | """creates histogram (distance, relative orientation in image, relative gradient)""" 59 | 60 | h, w = resp_val.shape; 61 | 62 | # cutoff minor filter responses 63 | cutoff = np.sort(resp_val.flatten())[-10000] # get 10000th highest response for cutting of beneath 64 | resp_val[resp_val=MAX_DIAGONAL] = MAX_DIAGONAL-1 83 | 84 | direction = np.round(np.arctan2(ey-ey[cp], ex-ex[cp]) / (2.0*np.pi)*CIRC_BINS + (orientations[cp]/float(GABOR_BINS)*CIRC_BINS)).astype("uint32") 85 | direction = np.mod(direction+CIRC_BINS, CIRC_BINS) 86 | np.add.at(counts, tuple([distance_rel, direction, orientations_rel]), resp_val[ey,ex] * resp_val[ey[cp],ex[cp]]) 87 | 88 | return counts, resp_val 89 | 90 | 91 | def entropy(a): 92 | if np.sum(a)!=1.0 and np.sum(a)>0: 93 | a = a / np.sum(a) 94 | v = a>0.0 95 | return -np.sum(a[v] * np.log2(a[v])) 96 | 97 | 98 | def do_statistics(counts, GABOR_BINS=24): 99 | # normalize by sum 100 | counts_sum = np.sum(counts, axis=2) + 0.00001 101 | normalized_counts = counts / (counts_sum[:,:,np.newaxis]) 102 | d,a,_ = normalized_counts.shape 103 | shannon_nan = np.zeros((d,a)) 104 | for di in range(d): 105 | for ai in range(a): 106 | if counts_sum[di,ai]>1: ## ignore bins without pixels 107 | shannon_nan[di,ai] = entropy(normalized_counts[di,ai,:]) 108 | else: 109 | shannon_nan[di,ai] = np.nan 110 | return shannon_nan 111 | 112 | 113 | def edge_resize (img_gray_np, max_pixels = 300*400): 114 | if max_pixels != None: 115 | img_gray_PIL = PIL.Image.fromarray(img_gray_np) 116 | s0,s1 = img_gray_PIL.size 117 | a = np.sqrt(max_pixels / float(s0*s1)) 118 | img_gray_PIL_rez = img_gray_PIL.resize((int(s0*a),int(s1*a)), PIL.Image.LANCZOS) 119 | img_gray_np = np.asarray(img_gray_PIL_rez, dtype='float') 120 | return img_gray_np 121 | 122 | 123 | def do_first_and_second_order_entropy_and_edge_density (img_gray, GABOR_BINS=24): 124 | ''' 125 | Calculates the 'Edge density, 1st-order and 2nd-order Edge orientation entropy' QIPs 126 | 127 | Input: 8 bit grayscale image in Pillow format 128 | Output: Edge density, 1st-order and 2nd-order Edge orientation entropy 129 | 130 | Usage: 131 | Import Image from PIL 132 | 133 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 134 | do_first_and_second_order_entropy_and_edge_density (img_gray) 135 | ''' 136 | 137 | flt_raw = create_filterbank() 138 | img = edge_resize (img_gray) 139 | resp_bin, resp_val = run_filterbank(flt_raw, img) 140 | 141 | ### edge density 142 | edge_d = edge_density(resp_val) 143 | 144 | ### do_counting must run before first_order_entropy but after edge density because it modifies resp_val!!! 145 | counts, resp_val = do_counting(resp_val, resp_bin) 146 | 147 | ### first order entropy 148 | first_order_bin = np.zeros(GABOR_BINS) 149 | for b in range(GABOR_BINS): 150 | first_order_bin[b] = np.sum(resp_val[resp_bin==b]) 151 | first_order = entropy(first_order_bin) 152 | ###second order entropy 153 | shannon_nan = do_statistics(counts) 154 | ## suppress "mean of empty slice warning" 155 | with warnings.catch_warnings(): 156 | warnings.simplefilter("ignore", category=RuntimeWarning) 157 | second_order = np.nanmean(np.nanmean(shannon_nan, axis=1)[20:240]) 158 | return first_order, second_order, edge_d 159 | 160 | -------------------------------------------------------------------------------- /AT/CNN_qips.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.signal import correlate 3 | from skimage.transform import resize 4 | 5 | ################################ helper functions ##################################### 6 | 7 | 8 | def resize_and_add_ImageNet_mean(img): 9 | ### resize img to desired dimension 10 | img = resize(img, [512,512], order=1) ### Not the same "resize" function as the old Caffe code, which leads to different results depending on the extent of the resizing. 11 | ### normalize with image_net mean 12 | img = img - np.array([104.00698793 , 116.66876762 , 122.67891434]) 13 | ### add new additional axis and return 14 | return img 15 | 16 | 17 | def conv2d(input_img, kernel, bias): 18 | 19 | input_img = input_img[:,:,(2,1,0)].astype(np.float32) ## Caffe Net used different channel orders 20 | 21 | input_img = resize_and_add_ImageNet_mean(input_img ) 22 | 23 | # Get input data dimensions 24 | in_height, in_width, in_channels = input_img.shape 25 | 26 | # Get kernel dimensions 27 | k_height, k_width, in_channels, out_channels = kernel.shape 28 | 29 | # Calculate output dimensions 30 | out_height = int(np.ceil(float(in_height - k_height + 1) / float(4))) 31 | out_width = int(np.ceil(float(in_width - k_width + 1) / float(4))) 32 | 33 | # Allocate output data 34 | output_data = np.zeros((out_height, out_width, out_channels)) 35 | 36 | # Convolve each input channel with its corresponding kernel and sum the results 37 | for j in range(out_channels): 38 | for i in range(in_channels): 39 | output_data[:, :, j] += correlate( 40 | input_img[:, :, i], 41 | kernel[:, :, i, j], 42 | mode='valid' 43 | )[::4, ::4] 44 | 45 | # Add bias to the output 46 | output_data[:, :, j] += bias[j] 47 | 48 | ## relu activation function 49 | output_data[output_data < 0] = 0 50 | 51 | ### swap axis to order: filters, dim1_filters, dim2_filters (96,126,126) 52 | output_data = np.swapaxes(output_data,2,0) 53 | output_data = np.swapaxes(output_data,1,2) 54 | 55 | return output_data 56 | 57 | 58 | def max_pooling (resp, patches ): 59 | (i_filters, ih, iw) = resp.shape 60 | max_pool_map = np.zeros((patches,patches,i_filters)) 61 | patch_h = ih/float(patches) 62 | patch_w = iw/float(patches) 63 | 64 | for h in range(patches): 65 | for w in range(patches): 66 | ph = h*patch_h 67 | pw = w*patch_w 68 | patch_val = resp[:,int(ph):int(ph+patch_h), int(pw):int(pw+patch_w)] 69 | 70 | for b in range(i_filters): 71 | max_pool_map[h,w,b] = np.max(patch_val[b]) 72 | 73 | max_pool_map_sum = np.sum(max_pool_map, axis=2) 74 | normalized_max_pool_map = max_pool_map / max_pool_map_sum[:,:,np.newaxis] 75 | 76 | return max_pool_map, normalized_max_pool_map 77 | 78 | 79 | def get_differences(max_pooling_map_orig, max_pooling_map_flip): 80 | assert(max_pooling_map_orig.shape == max_pooling_map_flip.shape) 81 | sum_abs = np.sum(np.abs(max_pooling_map_orig - max_pooling_map_flip)) 82 | sum_max = np.sum(np.maximum(max_pooling_map_orig, max_pooling_map_flip)) 83 | return 1.0 - sum_abs / sum_max 84 | 85 | 86 | ###################### Variances #################################################### 87 | 88 | 89 | def CNN_Variance(normalized_max_pool_map, kind): 90 | ''' 91 | Calculates the 'variability' or 'sparseness' QIP 92 | 93 | Input: Takes the CNN-features of the first layer of an AlexNet as input 94 | Output: CNN_Variance, Sparseness or Variability 95 | 96 | Usage: 97 | Import Image from PIL 98 | 99 | img_rgb = np.asarray(Image.open( path_to_image_file ).convert('RGB')) 100 | 101 | patches = 12 # default is 12 for variability and 22 for sparseness 102 | kind = 'variability' or 'sparseness' 103 | 104 | [kernel,bias] = np.load(open("AT/bvlc_alexnet_conv1.npy", "rb"), encoding="latin1", allow_pickle=True) 105 | resp_scipy = CNN_qips.conv2d(img_rgb, kernel, bias) 106 | _, normalized_max_pooling_map_Variability = CNN_qips.max_pooling (resp_scipy, patches=patches ) 107 | variability = CNN_qips.CNN_Variance (normalized_max_pooling_map_Variability , kind=kind ) 108 | ''' 109 | 110 | result = 0 111 | if kind == 'sparseness': 112 | result = np.var( normalized_max_pool_map) 113 | elif kind == 'variability': 114 | result = np.median(np.var(normalized_max_pool_map , axis=(0,1))) 115 | else: 116 | raise ValueError("Wrong input for kind of CNN_Variance. Use sparseness or variability") 117 | return result 118 | 119 | 120 | 121 | ################### Self-Similarity ################################ 122 | 123 | def CNN_selfsimilarity(histogram_ground, histogram_level): 124 | ''' 125 | Calculates the 'CNN-based Self-similarity' QIP 126 | 127 | Input: Takes the CNN-features of the first layer of an AlexNet as input 128 | Output: CNN-based Self-similarity 129 | 130 | Usage: 131 | Import Image from PIL 132 | 133 | img_rgb = np.asarray(Image.open( path_to_image_file ).convert('RGB')) 134 | 135 | [kernel,bias] = np.load(open("AT/bvlc_alexnet_conv1.npy", "rb"), encoding="latin1", allow_pickle=True) 136 | resp_scipy = CNN_qips.conv2d(img_rgb, kernel, bias) 137 | _, normalized_max_pooling_map_8 = CNN_qips.max_pooling (resp_scipy, patches=8 ) 138 | _, normalized_max_pooling_map_1 = CNN_qips.max_pooling (resp_scipy, patches=1 ) 139 | cnn_self_sym = CNN_qips.CNN_selfsimilarity (normalized_max_pooling_map_1 , normalized_max_pooling_map_8 ) 140 | ''' 141 | 142 | ph, pw, n = histogram_level.shape 143 | hiks = [] 144 | for ih in range(ph): 145 | for iw in range(pw): 146 | hiks.append( np.sum(np.minimum( histogram_ground, histogram_level[ih,iw])) ) 147 | sesim = np.median(hiks) 148 | return sesim 149 | 150 | 151 | ################### CNN Symmetry ################################ 152 | 153 | 154 | def CNN_symmetry(input_img, kernel, bias): 155 | ''' 156 | Calculates the 'CNN-feature-based Symmetry' QIP 157 | 158 | Input: Takes the CNN-features of the first layer of an AlexNet as input 159 | Output: CNN-based Symmetry, left-rigth Symmetry, up-down Symmetry and left-right-up-down Symmetry 160 | 161 | Usage: 162 | Import Image from PIL 163 | 164 | img_rgb = np.asarray(Image.open( path_to_image_file ).convert('RGB')) 165 | 166 | [kernel,bias] = np.load(open("AT/bvlc_alexnet_conv1.npy", "rb"), encoding="latin1", allow_pickle=True) 167 | sym_lr,sym_ud,sym_lrud = CNN_qips.CNN_symmetry(img_rgb, kernel, bias) 168 | ''' 169 | 170 | ### get max pooling map for orig. image 171 | resp_orig = conv2d(input_img, kernel, bias) 172 | max_pooling_map_orig, _ = max_pooling (resp_orig, patches=17) 173 | 174 | ### get max pooling map for left-right fliped image 175 | img_lr = np.fliplr(input_img) 176 | resp_lr = conv2d(img_lr, kernel, bias) 177 | max_pooling_map_lr, _ = max_pooling (resp_lr, patches=17) 178 | sym_lr = get_differences(max_pooling_map_orig, max_pooling_map_lr) 179 | 180 | ### get max pooling map for up-down fliped image 181 | img_ud = np.flipud(input_img) 182 | resp_ud = conv2d(img_ud, kernel, bias) 183 | max_pooling_map_ud, _ = max_pooling (resp_ud, patches=17) 184 | sym_ud = get_differences(max_pooling_map_orig, max_pooling_map_ud) 185 | 186 | ### get max pooling map for up-down and left-right fliped image 187 | img_lrud = np.fliplr(np.flipud(input_img)) 188 | resp_lrud = conv2d(img_lrud, kernel, bias) 189 | max_pooling_map_lrud, _ = max_pooling (resp_lrud, patches=17) 190 | sym_lrud = get_differences(max_pooling_map_orig, max_pooling_map_lrud) 191 | 192 | return sym_lr, sym_ud, sym_lrud 193 | -------------------------------------------------------------------------------- /pages/4_📚_References.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | st.set_page_config(layout="wide") 4 | 5 | st.markdown(""" """, unsafe_allow_html=True) 8 | 9 | st.markdown('

References

', unsafe_allow_html=True) 10 | 11 | st.write( 12 | 13 | """ 14 | Bosch, A., Zisserman, A., & Munoz, X. (2007). Representing shape with a spatial pyramid kernel. Proceedings of the 6th ACM International Conference on Image and Video Retrieval, 401-408. https://doi.org/10.1145/1282280.1282340 15 | 16 | Brachmann, A., Barth, E., & Redies, C. (2017). Using CNN features to better understand what makes visual artworks special. Frontiers in Psychology, 8, 830. https://doi.org/10.3389/fpsyg.2017.00830 17 | 18 | Brachmann, A., & Redies, C. (2016). Using convolutional neural network filters to measure left-right mirror symmetry in images. Symmetry, 8, 144. https://doi.org/10.3390/sym8120144 19 | 20 | Brachmann, A., & Redies, C. (2017). Defining self-similarity of images using features learned by convolutional neural networks Electronic Imaging, Human Vision and Electronic Imaging 2017, Burlingame, CA. 21 | 22 | Burton, G. J., & Moorhead, I. R. (1987). Color and spatial structure in natural scenes. Applied Physics, 26, 157-170. 23 | 24 | Dalal, N., & Triggs, B. (2005). Histograms of oriented gradients for human detection. International Conference on Computer Vision & Pattern Recognition, 2, 886-893. https://doi.org/10.1109/CVPR.2005.177 25 | 26 | Datta, R., Joshi, D., Li, J., & Wang, J. Z. (2006). Studying aesthetics in photographic images using a computational approach. Lecture Notes in Computer Science, 3953, 288-301. https://doi.org/10.1007/11744078_23 27 | 28 | Geller, H. A., Bartho, R., Thommes, K., & Redies, C. (2022). Statistical image properties predict aesthetic ratings in abstract paintings created by neural style transfer. Frontiers in Neuroscience, 16, 999720. https://doi.org/10.3389/fnins.2022.999720 29 | 30 | Graham, D. J., & Field, D. J. (2007). Statistical regularities of art images and natural scenes: spectra, sparseness and nonlinearities. Spatial Vision, 21(1-2), 149-164. https://doi.org/10.1163/156856807782753877 31 | 32 | Graham, D. J., & Field, D. J. (2008). Variations in intensity statistics for representational and abstract art, and for art from the Eastern and Western hemispheres. Perception, 37(9), 1341-1352. 33 | 34 | Hübner, R., & Fillinger, M. G. (2016). Comparison of objective measures for predicting perceptual balance and visual aesthetic preference. Frontiers in Psychology, 7, 335. https://doi.org/10.3389/fpsyg.2016.00335 35 | 36 | Iigaya, K., Yi, S., Wahle, I. A., Tanwisuth, K., & O'Doherty, J. P. (2021). Aesthetic preference for art can be predicted from a mixture of low- and high-level visual features. Nature Human Behaviour, 5(6), 743-755. https://doi.org/10.1038/s41562-021-01124-6 37 | 38 | Isherwood, Z. J., Schira, M. M., & Spehar, B. (2017). The tuning of human visual cortex to variations in the 1/f amplitude spectra and fractal properties of synthetic noise images. Neuroimage, 146, 642-657. https://doi.org/10.1016/j.neuroimage.2016.10.013 39 | 40 | Kersten, D. (1987). Predictability and redundancy of natural images. Journal of the Optical Society of America, Series A, 4(12), 2395-2400. http://www.ncbi.nlm.nih.gov/pubmed/3430226 41 | 42 | Koch, M., Denzler, J., & Redies, C. (2010). 1/f2 Characteristics and isotropy in the Fourier power spectra of visual art, cartoons, comics, mangas, and different categories of photographs. PLoS One, 5(8), e12268. https://doi.org/10.1371/journal.pone.0012268 43 | 44 | Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2012). Imagenet classification with deep convolutional neural networks. Advances in Neural Information Processing Systems, 25, 1097-1105. 45 | 46 | Li, C., & Chen, T. (2009). Aesthetic visual quality assessment of paintings. IEEE Journal of Selected Topics in Signal Processing, 3(2), 236-252. 47 | 48 | Li, J., Datta, R., Joshi, D., & Wang, J. (2006). Studying aesthetics in photographic images using a computational approach. Lecture Notes in Computer Science, 3953, 288-301. 49 | 50 | Mallon, B., Redies, C., & Hayn-Leichsenring, G. U. (2014). Beauty in abstract paintings: Perceptual contrast and statistical properties. Frontiers in Human Neuroscience, 8, 161. https://doi.org/10.3389/fnhum.2014.00161 51 | 52 | Mandelbrot, B. (1983). The fractal geometry of nature. San Francisco: W. H. Freeman. 53 | 54 | Mather, G. (2014). Artistic adjustment of image spectral slope. Art & Perception, 2, 11-22. 55 | 56 | Mather, G. (2018). Visual image statistics in the history of Western art. Art & Perception, 6(2-3), 97-115. https://doi.org/10.1163/22134913-20181092 57 | 58 | McManus, I. C., Stöver, K., & Kim, D. (2011). Arnheim's Gestalt theory of visual balance: Examining the compositional structure of art photographs and abstract images. i-Perception, 2, 615-647. 59 | 60 | Nakauchi, S., Kondo, T., Kinzuka, Y., Taniyama, Y., Tamura, H., Higashi, H., Hine, K., Minami, T., Linhares, J. M. M., & Nascimento, S. M. C. (2022). Universality and superiority in preference for chromatic composition of art paintings. Scientific Reports, 12(1). https://doi.org/10.1038/s41598-022-08365-z 61 | 62 | Peng, Y. (2022). Athec: A Python library for computational aesthetic analysis of visual media in social science research. Computational Communication Research, 4.1, 323-349. https://doi.org/10.5117CCR2022.1.009.PENG 63 | 64 | Redies, C., Brachmann, A., & Wagemans, J. (2017). High entropy of edge orientations characterizes visual artworks from diverse cultural backgrounds. Vision Research, 133, 130-144. https://doi.org/10.1016/j.visres.2017.02.004 65 | 66 | Redies, C., & Gross, F. (2013). Frames as visual links between paintings and the museum environment: an analysis of statistical image properties. Frontiers in Psychology, 4, 831. https://doi.org/10.3389/fpsyg.2013.00831 67 | 68 | Redies, C., Hasenstein, J., & Denzler, J. (2007). Fractal-like image statistics in visual art: similarity to natural scenes. Spatial Vision, 21(1-2), 137-148. https://doi.org/10.1163/156856807782753921 69 | 70 | Schifanella, R., Redi, M., Aiello, L.M. (2015). An image is worth more than a thousand favorites: Surfacing the hidden beauty of flickr pictures. Proceedings of the International AAAI Conference on Web and Social Media, 9, 397-406. https://doi.org/10.1609/icwsm.v9i1.14612 71 | 72 | Shannon, C. E. (1948). A mathematical theory of communication. Bell System Technical Journal, 27(4), 623-656. https://doi.org/10.1002/j.1538-7305.1948.tb00917.x 73 | 74 | Spehar, B., Clifford, C. W. G., Newell, B. R., & Taylor, R. P. (2003). Universal aesthetic of fractals. Computers & Graphics, 27, 813-820. https://doi.org/10.1016/S0097-8493(03)00154-7 75 | 76 | Spehar, B., & Taylor, R. P. (2013). Fractals in art and nature: Why do we like them? S&T/SPIE Electronic Imaging, Burlingame, California, United States, 8651, 865118. https://doi.org/https://doi.org/10.1117/12.2012076 77 | 78 | Taylor, R. P. (2002). Order in Pollock's chaos - Computer analysis is helping to explain the appeal of Jackson Pollock's paintings. Scientific American, 287(6), 116-121. https://doi.org/10.1038/scientificamerican1202-116 79 | 80 | Thieleking, R., Medawar, E., Disch, L., & Witte, A. V. (2020). Art. pics database: An open access database for art stimuli for experimental research. Frontiers in Psychology, 11, 3537. 81 | 82 | Viengkham, C., Isherwood, Z., & Spehar, B. (2022). Fractal‑scaling properties as aesthetic primitives in vision and touch. Axiomathes, 32, 869-888. 83 | 84 | Viengkham, C., & Spehar, B. (2018). Preference for fractal-scaling properties across synthetic noise images and artworks. Frontiers in Psychology, 9, 1439. https://doi.org/10.3389/fpsyg.2018.01439 85 | 86 | Wagemans, J. (1995). Detection of visual symmetries. Spatial Vision, 9(1), 9-32. https://doi.org/10.1163/156856895x00098 87 | 88 | Wilson, A., & Chatterjee, A. (2005). The assessment of preference for balance: Introducing a new test. Empirical Studies of the Arts, 23, 165-180. 89 | 90 | 91 | """ 92 | ) 93 | 94 | -------------------------------------------------------------------------------- /AT/resize_functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PIL 3 | import io 4 | from skimage.color import lab2rgb, rgb2lab 5 | 6 | 7 | PIL.Image.MAX_IMAGE_PIXELS = 933120000 8 | 9 | 10 | 11 | ### helper functions 12 | 13 | def file_process_in_memory(images): 14 | """ Converts PIL image objects into BytesIO in-memory bytes buffers. """ 15 | 16 | for i, (image_name, pil_image) in enumerate(images): 17 | file_object = io.BytesIO() 18 | pil_image.save(file_object, "PNG") 19 | pil_image.close() 20 | images[i][1] = file_object # Replace PIL image object with BytesIO memory buffer. 21 | 22 | return images # Return modified list. 23 | 24 | 25 | ############################################### 26 | 27 | def resize_using_longer_side_kepp_aspect_ratio(img, longer_side): 28 | ''' 29 | 'Input 8 bit img in PILLOW format 30 | ''' 31 | 32 | if img.size[0] >= img.size[1]: 33 | a = longer_side / float(img.size[0]) 34 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 35 | else: 36 | a = longer_side / float(img.size[1]) 37 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 38 | return img 39 | 40 | 41 | def resize_using_shorter_side_kepp_aspect_ratio(img, shorter_side): 42 | ''' 43 | 'Input 8 bit img in PILLOW format 44 | ''' 45 | if img.size[0] <= img.size[1]: 46 | a = shorter_side / float(img.size[0]) 47 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 48 | else: 49 | a = shorter_side / float(img.size[1]) 50 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 51 | return img 52 | 53 | 54 | def resize_width_keep_aspect_ratio(img, width=1000): 55 | ''' 56 | 'Input 8 bit img in PILLOW format 57 | ''' 58 | a = width / float(img.size[0]) 59 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 60 | return img 61 | 62 | def resize_height_keep_aspect_ratio(img, height=1000): 63 | ''' 64 | 'Input 8 bit img in PILLOW format 65 | ''' 66 | a = height / float(img.size[1]) 67 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 68 | return img 69 | 70 | def resize_to_resolution(img, width=1000, height=1000): 71 | ''' 72 | 'Input 8 bit img in PILLOW format 73 | ''' 74 | img = img.resize((int(width),int(height)), PIL.Image.Resampling.LANCZOS) 75 | return img 76 | 77 | def resize_to_number_of_pixels_keep_aspect_ratio(img, num_pixels=100000): 78 | ''' 79 | 'Input 8 bit img in PILLOW format 80 | ''' 81 | s = img.size 82 | old_num_pixels = s[0]*s[1] 83 | d = np.sqrt(num_pixels/old_num_pixels) 84 | s_new = np.round([s[0]*d, s[1]*d]).astype(np.int32) 85 | img = img.resize((s_new[0],s_new[1]), PIL.Image.Resampling.LANCZOS) 86 | return img 87 | 88 | def resize_to_fit_display(img, disp_width=1920, disp_height=1080): 89 | ''' 90 | 'Input 8 bit img in PILLOW format 91 | ''' 92 | disp_ratio = disp_width/disp_height 93 | s = img.size 94 | img_ratio = s[0] / s[1] 95 | if img_ratio > disp_ratio: # --> resize img to disp_hight,while maintaining image aspect ratio 96 | a = disp_width / float(img.size[0]) 97 | img = img.resize((int(s[0]*a),int(s[1]*a)), PIL.Image.Resampling.LANCZOS) 98 | else: # resize img to disp_width ,while maintaining image aspect ratio 99 | a = disp_height / float(img.size[1]) 100 | img = img.resize((int(s[0]*a),int(s[1]*a)), PIL.Image.Resampling.LANCZOS) 101 | return img 102 | 103 | ########################### cropping 104 | 105 | def center_crop (img): 106 | ''' 107 | 'Input 8 bit img in PILLOW format 108 | ''' 109 | width, height = img.size # Get dimensions 110 | 111 | if width > height: 112 | # center 113 | c_width = np.floor(width/2) 114 | c_height = np.floor(height/2) 115 | # define corp borders 116 | top=0 117 | bottom = height 118 | left = c_width - c_height 119 | right = c_width + c_height 120 | if height%2 == 1: # for uneven pixels 121 | right += 1 122 | img = img.crop((left, top, right, bottom)) 123 | 124 | elif width < height: 125 | # center 126 | c_width = np.floor(width/2) 127 | c_height = np.floor(height/2) 128 | # define corp borders 129 | top = c_height - c_width 130 | bottom = c_height + c_width 131 | left = 0 132 | right = width 133 | if width%2 == 1: # for uneven pixels 134 | bottom += 1 135 | img = img.crop((left, top, right, bottom)) 136 | else: 137 | img = img 138 | 139 | return img 140 | 141 | 142 | def center_crop_to_square_power_of_two (img): 143 | ''' 144 | 'Input 8 bit img in PILLOW format 145 | ''' 146 | # crop to largest center square with power of two 147 | width, height = img.size # Get dimensions 148 | 149 | # find largest power of two in pixel 150 | if width > height: 151 | npow = np.floor(np.log2(height)) 152 | else: 153 | npow = np.floor(np.log2(width)) 154 | 155 | c_new = 2**(npow-1) # middle of new img length 156 | 157 | # find image center 158 | c_width = np.floor(width/2) 159 | c_height = np.floor(height/2) 160 | 161 | # define corp borders 162 | top = c_height - c_new 163 | bottom = c_height + c_new 164 | left = c_width - c_new 165 | right = c_width + c_new 166 | 167 | img = img.crop((left, top, right, bottom)) 168 | 169 | return img 170 | 171 | 172 | def resize_to_image_size(img, des_img_size=900): 173 | ''' 174 | 'Input 8 bit img in PILLOW format 175 | ''' 176 | 177 | width, height = img.size 178 | img_size = width + height 179 | 180 | resize_faktor = des_img_size / img_size 181 | 182 | n_width = int(width*resize_faktor) 183 | n_height = int(height*resize_faktor) 184 | 185 | img = img.resize((n_width,n_height), PIL.Image.Resampling.LANCZOS) 186 | 187 | return img 188 | 189 | 190 | 191 | def padding_and_resizing_to_square_X_pixel(img, resize_to=1024): 192 | ''' 193 | 'Input 8 bit img in PILLOW format 194 | ''' 195 | mean = np.round(np.mean(img, axis=(0,1))).astype(np.uint8) # mean RGB values or mean gray value for padding 196 | 197 | ### resize first if needes 198 | if resize_to != -1: 199 | ### resize longer side while maintaining aspect ratio 200 | if img.size[0] >= img.size[1]: 201 | a = resize_to / float(img.size[0]) 202 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 203 | else: 204 | a = resize_to / float(img.size[1]) 205 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 206 | 207 | ### padding with mean gray value or mean RGB values 208 | img = np.asarray(img) 209 | h,w = img.shape[:2] 210 | w_c = int(w/2) 211 | h_c = int(h/2) 212 | if h > w: 213 | img_pad = np.full([h,h,3], mean) 214 | if w%2 == 1: # uneven width 215 | img_pad[ : , h_c - w_c : h_c + w_c +1 ] = img 216 | else: 217 | img_pad[ : , h_c - w_c : h_c + w_c ] = img 218 | img = img_pad 219 | elif h < w: 220 | img_pad = np.full([w,w,3], mean) 221 | if h%2 == 1: # uneven height 222 | img_pad[w_c - h_c : w_c + h_c +1 , : ] = img 223 | else: 224 | img_pad[w_c - h_c : w_c + h_c , : ] = img 225 | img = img_pad 226 | 227 | return PIL.Image.fromarray(img) 228 | 229 | ################################# LAB Color rotation ################################ 230 | 231 | 232 | def cart2pol(x, y): 233 | rho = np.sqrt(x**2 + y**2) 234 | phi = np.arctan2(y, x) 235 | return rho, phi 236 | 237 | def pol2cart(rho, phi): 238 | x = rho * np.cos(phi) 239 | y = rho * np.sin(phi) 240 | return x, y 241 | 242 | 243 | def rotate_image_in_LAB_colorspace(img_rgb, degree): 244 | ''' 245 | 'Input 8 bit RGB image in PILLOW format 246 | ''' 247 | 248 | img_lab = rgb2lab(img_rgb) 249 | 250 | degree_in_pi = degree/(180) 251 | 252 | ### get polar coordinates for each pixel 253 | rho, phi = cart2pol(img_lab[:,:,1], img_lab[:,:,2]) 254 | 255 | ### change angle 256 | phi = phi+ degree_in_pi * np.pi 257 | 258 | ### convert back to polar coordinates 259 | x, y = pol2cart(rho, phi) 260 | 261 | ## assign to image, ceeping original luminance 262 | img_lab[:,:,1] = x 263 | img_lab[:,:,2] = y 264 | 265 | # convert to RGB 266 | img_RGB_rotated = lab2rgb(img_lab) * 255 267 | 268 | return PIL.Image.fromarray(img_RGB_rotated.astype(np.uint8)) -------------------------------------------------------------------------------- /AT/PHOG_qips.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from skimage.transform import resize 3 | from skimage.color import rgb2lab 4 | 5 | 6 | ################################# PHOG SIPs (Self-similarity, Anisotropy, Complexity) ######################################## 7 | 8 | 9 | def resize_img(img, re): 10 | if re>1: 11 | s = img.shape 12 | b = s[0]*s[1] 13 | d = np.sqrt(re/b) 14 | s_new = np.round([s[0]*d, s[1]*d]).astype(np.int32) 15 | img = resize(img, s_new, order=3, anti_aliasing=True) *255 16 | return img.astype(np.uint8) 17 | 18 | else: 19 | return img 20 | 21 | 22 | def absmaxND(a, axis=2): 23 | amax = a.max(axis) 24 | amin = a.min(axis) 25 | return np.where(-amin > amax, amin, amax) 26 | 27 | 28 | 29 | 30 | def maxGradient_fast(Img): 31 | 32 | gradY_t, gradX_t = np.gradient(Img, axis = [0,1], edge_order=1) 33 | 34 | gradientX = absmaxND(gradX_t) 35 | gradientY = absmaxND(gradY_t) 36 | 37 | return gradientX, gradientY 38 | 39 | 40 | 41 | def computeDescriptor(GradientValue, GradientAngle, bins, angle, levels, section, is_global=False): 42 | descriptor = [] 43 | 44 | intervalSize = angle / bins 45 | halfIntervalSize = (angle / bins) / 2 46 | 47 | # Level 0 48 | ind = ((GradientAngle >= angle - halfIntervalSize) | (GradientAngle < halfIntervalSize)) 49 | descriptor.append(np.sum(GradientValue[ind])) 50 | 51 | for b in range(1, bins): 52 | ind = ((GradientAngle >= (b * intervalSize) - halfIntervalSize) & (GradientAngle < ((b + 1) * intervalSize) - halfIntervalSize)) 53 | descriptor.append(np.sum(GradientValue[ind])) 54 | 55 | ### local normaliszation for global 56 | if is_global: 57 | descriptor = normalizeDescriptor(descriptor, bins) 58 | 59 | # Other levels 60 | for l in range(1, levels + 1): 61 | cellSizeX = GradientAngle.shape[1] / (section ** l) 62 | cellSizeY = GradientAngle.shape[0] / (section ** l) 63 | 64 | if cellSizeX < 1 or cellSizeY < 1: 65 | raise ValueError("Cell size is less than 1. Adjust the number of levels.") 66 | 67 | for j in range(1, section ** l + 1): 68 | 69 | leftX = 1 + np.round((j - 1) * cellSizeX).astype(np.int64) 70 | rightX = np.round(j * cellSizeX).astype(np.int64) 71 | 72 | for i in range(1, section ** l + 1): 73 | 74 | topY = 1 + np.round((i - 1) * cellSizeY).astype(np.int64) 75 | bottomY = np.round(i * cellSizeY).astype(np.int64) 76 | 77 | GradientValueCell = GradientValue[topY - 1:bottomY, leftX - 1:rightX] 78 | GradientAngleCell = GradientAngle[topY - 1:bottomY, leftX - 1:rightX] 79 | 80 | ind = ((GradientAngleCell >= angle - halfIntervalSize) | (GradientAngleCell < halfIntervalSize)) 81 | local_descriptor = [np.sum(GradientValueCell[ind])] 82 | 83 | for b in range(1, bins): 84 | ind = ((GradientAngleCell >= (b * intervalSize) - halfIntervalSize) & (GradientAngleCell < ((b + 1) * intervalSize) - halfIntervalSize)) 85 | local_descriptor.append(np.sum(GradientValueCell[ind])) 86 | 87 | if is_global: 88 | local_descriptor = normalizeDescriptor(local_descriptor, bins); 89 | descriptor.extend(local_descriptor) 90 | else: 91 | descriptor.extend(local_descriptor) 92 | 93 | if is_global: 94 | descriptorglobal = normalizeDescriptorGlobal(descriptor) 95 | return descriptorglobal 96 | else: 97 | return descriptor 98 | 99 | 100 | def computePHOGLAB(Img, angle, bins, levels, section): 101 | 102 | GradientX, GradientY = maxGradient_fast(Img) 103 | 104 | # Calculate the norm (strength) of Gradient values 105 | GradientValue = np.sqrt( np.square(GradientX) + np.square(GradientY) ) 106 | 107 | # Replace zeros in GradientX with a small value to avoid Zero division 108 | GradientX[np.where(GradientX == 0)] = 1e-5 109 | 110 | YX = GradientY / GradientX 111 | 112 | if angle == 180: 113 | GradientAngle = ((np.arctan(YX) + (np.pi / 2)) * 180) / np.pi 114 | elif angle == 360: 115 | GradientAngle = ((np.arctan2(GradientY, GradientX) + np.pi) * 180) / np.pi 116 | else: 117 | raise ValueError("Invalid angle value. Use 180 or 360.") 118 | 119 | descriptor = computeDescriptor(GradientValue, GradientAngle, bins, angle, levels, section) 120 | 121 | return descriptor, GradientValue, GradientAngle 122 | 123 | 124 | def convert_to_matlab_lab(img_rgb): 125 | ''' 126 | Matlab has a different range for the channels of the LAB color space. 127 | We need to scale to the Matlab ranges, to get the same results 128 | ''' 129 | 130 | img = rgb2lab(img_rgb) 131 | 132 | #L: 0 to 100, a: -127 to 128, b: -128 to 127 133 | img[:,:,0] = np.round(np.array((img[:,:,0] / 100) * 255 )).astype(np.int32) 134 | img[:,:,1] = np.round(np.array(img[:,:,1] + 128).astype(np.int32)) 135 | img[:,:,2]= np.round(np.array(img[:,:,2] + 128).astype(np.int32)) 136 | 137 | return img.astype(np.uint16) 138 | 139 | 140 | def normalizeDescriptor(descriptor, bins): 141 | b = np.reshape(descriptor, (bins, len(descriptor) // bins), order='F') 142 | c = np.sum(b, axis=0) 143 | s = b.shape 144 | 145 | temp = np.zeros((s[0], s[1])) 146 | 147 | for i in range(s[1]): 148 | if c[i] != 0: 149 | temp[:, i] = b[:, i] / c[i] 150 | else: 151 | temp[:, i] = b[:, i] 152 | 153 | normalizeddescriptor = np.reshape(temp, len(descriptor), order='F') 154 | return list(normalizeddescriptor) 155 | 156 | 157 | def normalizeDescriptorGlobal(descriptor): 158 | if np.sum(descriptor) != 0: 159 | normalizeddescriptorGlobal = descriptor / np.sum(descriptor) 160 | return normalizeddescriptorGlobal 161 | else: 162 | return list(descriptor) 163 | 164 | 165 | def computeWeightedDistances(descriptor, bins, levels, section, descriptornn): 166 | distances = [] 167 | 168 | comparisonglobal = descriptor[:bins] 169 | 170 | temp = np.zeros((levels, 2), dtype=int) 171 | 172 | temp[0, 0] = bins + 1 173 | temp[0, 1] = section ** (2) * bins + temp[0, 0] - 1 174 | 175 | for i in range(1, levels): 176 | temp[i, 0] = temp[i - 1, 1] + 1 177 | temp[i, 1] = section ** ((i+1) * 2) * bins + temp[i, 0] - 1 178 | 179 | distances.append(np.sum(comparisonglobal)) 180 | 181 | for i in range(levels): 182 | for j in range(temp[i, 0], temp[i, 1] + 1, bins): 183 | j = j-1 184 | part = descriptor[j:j + bins] 185 | 186 | if (np.max(comparisonglobal) > 1e-8) and (np.max(part) > 1e-8): 187 | dist1 = np.sum(np.minimum(comparisonglobal, part)) 188 | 189 | m1 = np.mean(descriptornn[:bins]) 190 | m2 = np.mean(descriptornn[j:j + bins]) 191 | 192 | area = section ** ((i+1) * 2) 193 | m2 = m2 * area 194 | 195 | if m1 < 1e-8 or m2 < 1e-8: 196 | strengthsimilarity = 0 197 | elif m1 > m2: 198 | strengthsimilarity = m2 / m1 199 | else: 200 | strengthsimilarity = m1 / m2 201 | 202 | dist1 = dist1 * strengthsimilarity 203 | distances.append(dist1) 204 | else: 205 | distances.append(0) 206 | 207 | return distances 208 | 209 | def computeSD(descriptorglobal, bins, levels, section): 210 | temp = np.zeros((levels, 2), dtype=int) 211 | temp[0, 0] = bins + 1 212 | temp[0, 1] = section ** (2) * bins + temp[0, 0] - 1 213 | 214 | for i in range(1, levels): 215 | temp[i, 0] = temp[i - 1, 1] + 1 216 | temp[i, 1] = section ** ((i+1) * 2) * bins + temp[i, 0] - 1 217 | 218 | descript = descriptorglobal[temp[levels - 1, 0]-1 : temp[levels - 1, 1]] 219 | 220 | sdvalue = np.std(descript) 221 | return sdvalue 222 | 223 | 224 | def displayDistances(distances, bins, levels, section): 225 | distanceatlevel = [] 226 | 227 | temp3 = np.zeros([levels+1, 2], dtype=int) 228 | # print('###########', temp3, levels) 229 | temp3[0, 0] = 1 230 | temp3[0, 1] = 1 231 | 232 | for i in range(levels): 233 | temp3[i+1, 0] = section ** (2 * (i+1)) 234 | temp3[i+1, 1] = temp3[i+1, 0] + temp3[i, 1] 235 | 236 | distanceatlevel = np.median(distances[temp3[levels - 1, 1] :temp3[levels, 1]]) 237 | 238 | return distanceatlevel 239 | 240 | 241 | def PHOGfromImage(img_rgb, section=2, bins=16, angle=360, levels=3, re=-1, sesfweight=[1,1,1] ): 242 | ''' 243 | Calculates the PHOG QIPs 'Anisotropy, Complexity, PHOG-based Self-similarity' 244 | 245 | Input: 8 bit rgb image in Pillow format 246 | Output: Anisotropy, Complexity, PHOG-based Self-similarity 247 | 248 | Usage: 249 | Import Image from PIL 250 | 251 | img_rgb = np.asarray(Image.open( path_to_image_file ).convert('RGB')) 252 | PHOGfromImage(img_rgb) 253 | ''' 254 | 255 | img = resize_img(img_rgb, re) 256 | img = convert_to_matlab_lab(img) 257 | 258 | descriptor, GradientValue , GradientAngle = computePHOGLAB(img, angle, bins, levels, section) 259 | descriptornn = descriptor 260 | 261 | descriptor=normalizeDescriptor(descriptor,bins) 262 | 263 | descriptorglobal = computeDescriptor(GradientValue, GradientAngle, bins, angle, levels, section, is_global=True) 264 | 265 | distances=computeWeightedDistances(descriptor,bins,levels,section,descriptornn); 266 | 267 | anisotropy = computeSD(descriptorglobal, bins, levels, section) 268 | complexity = np.mean(GradientValue) 269 | 270 | # self_sim 271 | distancesatlevel=0; 272 | distancesateachlevel = [] 273 | for i in range(levels): 274 | distancesateachlevel.append(displayDistances(distances,bins,i+1,section)); 275 | distancesatlevel=distancesatlevel+distancesateachlevel[i]*sesfweight[i]; 276 | self_sim=distancesatlevel/sum(sesfweight); 277 | 278 | return self_sim, complexity, anisotropy 279 | 280 | 281 | -------------------------------------------------------------------------------- /AT/balance_qips.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from skimage.transform import rotate 3 | from skimage.filters import threshold_otsu 4 | 5 | ######################################################################################## 6 | ################################# Huebner Group ######################################## 7 | ######################################################################################## 8 | 9 | 10 | def Balance(img_gray): 11 | ''' 12 | Calculates the "Balance" QIP from Ronald Huebner Group 13 | 14 | Input: Takes a grayscale image in Pillow format as input. 15 | Output: Balance QIP 16 | 17 | Usage: 18 | Load images like this: 19 | 20 | Import Image from PIL 21 | 22 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 23 | Balance(img_gray) 24 | ''' 25 | 26 | height, width = img_gray.shape 27 | 28 | hist = np.histogram(img_gray, bins=256, range=(0, 256)) 29 | 30 | counts = hist[0] 31 | 32 | thres = 128 33 | 34 | sum1 = sum(counts[:thres]) 35 | sum2 = sum(counts[thres-1:]) # in the Matlab code, the treshold value 126 is added twice. This programming error has hardly any effect on the results and has been adopted here in the Python code. 36 | 37 | if sum1 <= sum2: 38 | im_comp = 255 - img_gray 39 | else: 40 | im_comp = img_gray 41 | 42 | nall = np.sum(im_comp) 43 | 44 | ## to avoid division 0 45 | if nall == 0: 46 | nall = 1 47 | 48 | # Horizontal balance 49 | w = width // 2 50 | 51 | s1 = np.sum(im_comp[:, :w], dtype=int) 52 | s2 = np.sum(im_comp[:, -w:], dtype=int) 53 | bh = (abs(s1 - s2) / nall) * 100 54 | 55 | w2 = width // 4 # adding center row of w to middle area if w uneven 56 | 57 | s1 = np.sum(im_comp[:, :w2], dtype=int) 58 | s2 = np.sum(im_comp[:, -w2:], dtype=int) 59 | 60 | bioh = (abs((nall - (s1 + s2)) - (s1 + s2)) / nall) * 100 # % inner-outer horizontal 61 | 62 | # Vertical balance 63 | h = height // 2 64 | 65 | s1 = np.sum(im_comp[:h, :], dtype=int) 66 | s2 = np.sum(im_comp[-h:, :], dtype=int) 67 | bv = (abs(s1 - s2) / nall) * 100 68 | 69 | h2 = height // 4 70 | s1 = np.sum(im_comp[:h2, :], dtype=int) 71 | s2 = np.sum(im_comp[-h2:, :], dtype=int) 72 | 73 | biov = (abs((nall - (s1 + s2)) - (s1 + s2)) / nall) * 100 74 | 75 | # Main diagonal and inner-outer (bottom right top left) 76 | s1 = np.sum(np.triu(im_comp, 1), dtype=int) 77 | s2 = np.sum(np.tril(im_comp, -1), dtype=int) 78 | bmd = (abs(s1 - s2) / nall) * 100 79 | 80 | prop = 1 / np.sqrt(2) 81 | b1 = height - int(height * prop) 82 | b2 = width - int(width * prop) 83 | s1 = np.sum(np.tril(im_comp, -b1), dtype=int) 84 | s2 = np.sum(np.triu(im_comp, b2), dtype=int) 85 | biomd = (abs((nall - (s1 + s2)) - (s1 + s2)) / nall) * 100 86 | 87 | # Anti-diagonal and inner-outer (bottom right top left) 88 | im_comp = np.rot90(im_comp) 89 | s1 = np.sum(np.triu(im_comp, 1), dtype=int) 90 | s2 = np.sum(np.tril(im_comp, -1), dtype=int) 91 | bad = (abs(s1 - s2) / nall) * 100 92 | 93 | s1 = np.sum(np.tril(im_comp, -b2), dtype=int) 94 | s2 = np.sum(np.triu(im_comp, b1), dtype=int) 95 | bioad = (abs((nall - (s1 + s2)) - (s1 + s2)) / nall) * 100 96 | 97 | bs = (bh + bv + bioh + biov + bmd + biomd + bad + bioad) / 8 98 | 99 | return bs 100 | 101 | 102 | def DCM(img_gray): 103 | ''' 104 | Calculates the "DCM" QIP from Ronald Huebner Group 105 | 106 | Input: Takes a grayscale image in Pillow format as input. 107 | Output: DCM QIP 108 | 109 | Usage: 110 | Load images like this: 111 | 112 | Import Image from PIL 113 | 114 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 115 | DCM(img_gray) 116 | ''' 117 | 118 | height, width = img_gray.shape 119 | 120 | hist = np.histogram(img_gray, bins=256, range=(0, 256)) 121 | counts = hist[0] 122 | 123 | thres = 128 124 | 125 | sum1 = sum(counts[:thres]) 126 | sum2 = sum(counts[thres:]) 127 | 128 | if sum1 <= sum2: 129 | im_comp = 255 - img_gray # Invert image 130 | else: 131 | im_comp = img_gray 132 | 133 | nall = np.sum(im_comp) ### Number of Pixels with value of 0 134 | 135 | # Horizontal balance point 136 | r = 0 137 | for i in range(width): 138 | w = np.sum(im_comp[:, i],dtype=float) 139 | r += w * i 140 | Rh = np.round(r / nall) + 1 # x position of fulcrum 141 | Rhnorm = Rh / width # Normalized 142 | 143 | # Vertical balance point 144 | r = 0 145 | for i in range(height): 146 | w = np.sum(im_comp[i, :],dtype=float) 147 | r += w * i 148 | Rv = np.round(r / nall) + 1 # y position of fulcrum 149 | Rvnorm = Rv / height # Normalized 150 | 151 | htmp = 0.5 - Rhnorm 152 | vtmp = 0.5 - Rvnorm 153 | 154 | dist = np.sqrt(htmp ** 2 + vtmp ** 2) 155 | rdist = (dist / 0.5) * 100 156 | 157 | return rdist, htmp, vtmp 158 | 159 | 160 | def Mirror_symmetry(img_gray): 161 | ''' 162 | Calculates the "Mirror symmetry" QIP from Ronald Huebner Group 163 | 164 | Input: Takes a grayscale image in Pillow format as input. 165 | Output: Mirror symmetry QIP 166 | 167 | Usage: 168 | Load images like this: 169 | 170 | Import Image from PIL 171 | 172 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 173 | Mirror_symmetry(img_gray) 174 | ''' 175 | 176 | # Automatically find optimal threshold level 177 | level = threshold_otsu(img_gray) 178 | 179 | # Convert image to binary 180 | BW = img_gray <= level 181 | 182 | s = BW.shape 183 | height = s[0] 184 | width = s[1] 185 | 186 | # Horizontal axis of reflection (vertical reflection) 187 | if height % 2 == 0: # even number 188 | h2 = height // 2 189 | else: 190 | h2 = (height - 1) // 2 191 | n1 = h2 - 1 # why? 192 | 193 | 194 | sym = 0 195 | for i in range(width): 196 | for j in range(h2): 197 | #print(i,j, sym) 198 | sym += (BW[j, i] * BW[ (height-1) - (j), i]) * (1 + j / n1) 199 | Sh = sym * (2 / (3 * width * h2)) 200 | 201 | # Vertical axis of reflection (horizontal reflection) 202 | if width % 2 == 0: # even number 203 | w2 = width // 2 204 | else: 205 | w2 = (width - 1) // 2 206 | n1 = w2 - 1 207 | 208 | sym = 0 209 | for i in range(height): 210 | for j in range(w2): 211 | sym += (BW[i, j] * BW[i, (width-1) - j]) * (1 + j / n1) 212 | Sv = sym * (2 / (3 * height * w2)) 213 | 214 | if width == height: 215 | # Major diagonal of reflection (ONLY FOR SQUARES) 216 | sym = 0 217 | n = 1 # Pixels until diagonal 218 | for i in range(1,height): 219 | for j in range(n): 220 | #print(i,j,n) 221 | sym += (BW[i, j] * BW[j, i]) * (1 + (j+1) / n) 222 | n += 1 223 | 224 | Smd = sym * (2 / (3 * height * (width - 1) / 2)) 225 | 226 | # Minor diagonal of reflection (ONLY FOR SQUARES) 227 | BW = rotate(BW, 90) 228 | sym = 0 229 | n = 1 # Pixels until diagonal 230 | for i in range(1, height): 231 | for j in range(n): 232 | sym += (BW[i, j] * BW[j, i]) * (1 + (j+1) / n) 233 | n += 1 234 | Sad = sym * (2 / (3 * height * (width - 1) / 2)) 235 | 236 | ms = ((Sh + Sv + Smd + Sad) / 4) * 100 237 | else: 238 | ms = ((Sh + Sv) / 2) * 100 239 | 240 | return ms 241 | 242 | 243 | def Homogeneity(img_gray): 244 | ''' 245 | Calculates the "Homogeneity" QIP from Ronald Huebner Group 246 | 247 | Input: Takes a grayscale image in Pillow format as input. 248 | Output: Homogeneity QIP 249 | 250 | Usage: 251 | Load images like this: 252 | 253 | Import Image from PIL 254 | 255 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 256 | Homogeneity(img_gray) 257 | ''' 258 | 259 | # number of bins taken from original paper: Hübner & Fillinger. Comparison of Objective Measures for Predicting Perceptual Balance and Visual Aesthetic Preference. Page: 4 260 | hbins = 10; 261 | vbins = 10; 262 | 263 | height, width = img_gray.shape 264 | 265 | hist = np.histogram(img_gray, bins=256, range=(0, 256)) 266 | counts = hist[0] 267 | thres = 128 268 | sum1 = sum(counts[:thres]) 269 | sum2 = sum(counts[thres-1:]) # hier Fehler in Berechnungen, aber kaum Auswirkunge auf Ergebnisse 270 | if sum1 <= sum2: 271 | im = 255 - img_gray # Invert image 272 | #print('inverted') 273 | else: 274 | im = img_gray 275 | 276 | level = threshold_otsu(im) 277 | 278 | BW = im > level 279 | 280 | hinc = width // hbins 281 | vinc = height // vbins 282 | 283 | x = np.zeros((vbins, hbins)) 284 | 285 | ## summing up black pixels in cells 286 | for i in range(hbins+1): 287 | for j in range(vbins+1): 288 | if (i!=hbins) and (j!=vbins): # inner pieces 289 | x[j,i] = np.sum( BW[ vinc * j : (j+1) * vinc , hinc*i : hinc * (i+1) ] ) 290 | elif (i len(val): 29 | to_val = len(val) 30 | # mean 31 | i-=1 32 | bin_mean_bar[0, i] = np.mean(np.arange(from_val, to_val + 1)) # x-val 33 | bin_mean_bar[1, i] = np.mean(val[np.arange(from_val-1, to_val)]) # y-val 34 | bin_x_val[i] = from_val 35 | 36 | return bin_mean_bar, bin_x_val 37 | 38 | 39 | def var_eigen(log_m, log_n, log_x, log_y): 40 | fit = log_m * log_x + log_n 41 | diff = fit - log_y 42 | return np.var(diff) 43 | 44 | 45 | def rotavg(array): 46 | N = array.shape[0] 47 | X, Y = np.meshgrid(np.arange(-N//2, N//2), np.arange(-N//2, N//2)) 48 | theta, rho = np.arctan2(Y, X), np.sqrt(X**2 + Y**2) 49 | rho = np.round(rho).astype(int) 50 | I = np.zeros((N//2+1, 9)) 51 | f = np.zeros((N//2+1, 9)) 52 | a = np.arange(0, np.pi + np.pi/8, np.pi/8) 53 | 54 | for i in range(rho.shape[0]): 55 | for j in range(rho.shape[1]): 56 | rh = rho[i, j] 57 | value = array[i, j] 58 | 59 | if rh <= N/2: 60 | I[rh, 8] += 1 61 | if I[rh, 8] == 1: 62 | f[rh, 8] = value 63 | #print(value) 64 | else: 65 | f[rh, 8] = f[rh, 8] + (value - f[rh, 8]) / I[rh, 8] 66 | 67 | for k in range(8): 68 | if a[k] <= theta[i, j] < a[k+1]: 69 | if rh <= N/2: 70 | I[rh, k] += 1 71 | if I[rh, k] == 1: 72 | f[rh, k] = value 73 | else: 74 | f[rh, k] = f[rh, k] + (value - f[rh, k]) / I[rh, k] 75 | break 76 | return f 77 | 78 | 79 | def padding_and_resizing_to_square_1024_pixel(img): 80 | 81 | 82 | mean = np.round(np.mean(img)).astype(np.uint8) # mean gray value for padding 83 | 84 | ### resize longer side to 1024 pixels, maintain aspect ratio 85 | img = PIL.Image.fromarray(img) 86 | if img.size[0] >= img.size[1]: 87 | a = 1024 / float(img.size[0]) 88 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 89 | else: 90 | a = 1024 / float(img.size[1]) 91 | img = img.resize((int(img.size[0]*a),int(img.size[1]*a)), PIL.Image.Resampling.LANCZOS) 92 | 93 | ### padding with mean gray value 94 | img = np.asarray(img) 95 | h,w = img.shape 96 | w_c = int(w/2) 97 | h_c = int(h/2) 98 | if h > w: 99 | img_pad = np.full([h,h], mean) 100 | if w%2 == 1: # uneven width 101 | img_pad[ : , h_c - w_c : h_c + w_c +1 ] = img 102 | else: 103 | img_pad[ : , h_c - w_c : h_c + w_c ] = img 104 | img = img_pad 105 | elif h < w: 106 | img_pad = np.full([w,w], mean) 107 | if h%2 == 1: # uneven height 108 | img_pad[w_c - h_c : w_c + h_c +1 , : ] = img 109 | else: 110 | img_pad[w_c - h_c : w_c + h_c , : ] = img 111 | img = img_pad 112 | 113 | return img 114 | 115 | 116 | def fourier_redies(img_gray, bin_size=2, cycles_min=10, cycles_max=256): 117 | ''' 118 | Calculates the 'Fourier Slope **Redies** and the Fourier Sigma' QIPs 119 | 120 | Input: 8 bit grayscale image in Pillow format 121 | Output: Fourier Slope **Redies** and the Fourier Sigma 122 | 123 | Usage: 124 | Import Image from PIL 125 | 126 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 127 | fourier_redies(img_gray) 128 | ''' 129 | 130 | 131 | ### center crop image 132 | img_gray_resized = padding_and_resizing_to_square_1024_pixel(img_gray) 133 | 134 | power = np.fft.fftshift(np.fft.fft2(img_gray_resized.astype(float))) 135 | A = rotavg(np.abs(power)**2) 136 | A = A[:,8].copy() 137 | 138 | ### limit max frequency for small images 139 | if cycles_max > len(A): 140 | cycles_max = len(A) 141 | 142 | rang = np.arange(cycles_min, cycles_max + 1) - 1 143 | 144 | x_min = rang[0] 145 | x_max = rang[-1] 146 | 147 | using_range = A[rang] 148 | log_rang = np.log10(rang+1) 149 | log_using_range = np.log10(using_range) 150 | 151 | 152 | bin_mean_bar, bin_x_val = calc_bin(A, bin_size, x_min, x_max) 153 | 154 | param = np.linalg.lstsq(np.vstack([np.ones(len(bin_mean_bar[1])), np.log10(bin_mean_bar[0])]).T, np.log10(bin_mean_bar[1]), rcond=None)[0] 155 | 156 | log_m_bin = param[1] 157 | log_n_bin = param[0] 158 | 159 | SIGMA = var_eigen(log_m_bin, log_n_bin, log_rang, log_using_range) 160 | SLOPE = log_m_bin 161 | 162 | return SIGMA, SLOPE 163 | 164 | 165 | ################################ Zoey Isherwoods & Branka Spehar ################################# 166 | 167 | 168 | ''' 169 | Translation to Python 3 from Zoey Isherwoods Matlab Code on GitHub: 170 | https://github.com/zoeyisherwood/pp-spatiotemp 171 | 172 | Isherwood, Z. J., Clifford, C. W. G., Schira, M. M., Roberts, M. M. & Spehar, B. (2021) 173 | Nice and slow: Measuring sensitivity and visual preference toward naturalistic stimuli 174 | varying in their amplitude spectra in space and time. Vision Research 181, 47-60, 175 | doi:10.1016/j.visres.2021.01.001. 176 | 177 | rot_avg.m function by Bruno Olshausen 178 | 179 | Before fitting the data, outliers are removed in the linear fit (not the log-log fit) if Cook's distance > n/4. 180 | This basically removes the low frequencies for most images. 181 | 182 | ''' 183 | 184 | 185 | def rot_avg(array): 186 | """ 187 | rotavg.m - Matlab function to compute rotational average of (square) array 188 | by Bruno Olshausen 189 | N should be even. 190 | """ 191 | N, N = array.shape 192 | 193 | X, Y = np.meshgrid(np.arange(-N/2, N/2), np.arange(-N/2, N/2)) 194 | rho = np.sqrt(X**2 + Y**2).round().astype(int) 195 | 196 | f = np.zeros((N//2 + 1)) 197 | 198 | for r in range(N//2 + 1): 199 | mask = (rho == r) 200 | if np.any(mask): 201 | f[r] = np.mean(array[mask]) 202 | 203 | return f 204 | 205 | 206 | def CooksDistance_SM(X, y): 207 | ''' 208 | computes the Cook's distance using the statsmodel package' 209 | ''' 210 | 211 | # add constant value 212 | X = sm.add_constant(X.reshape(-1, 1)) 213 | # fit the model 214 | model = sm.OLS(y,X).fit() 215 | # Get influence measures 216 | influence = model.get_influence() 217 | # Calculate Cook's distance 218 | cooks_d = influence.cooks_distance[0] 219 | # Output the results 220 | return cooks_d 221 | 222 | 223 | def fourier_slope_branka_Spehar_Isherwood(img_gray): 224 | ''' 225 | Calculates the 'Fourier Slope **Spehar**' QIP 226 | 227 | Input: 8 bit grayscale image in Pillow format 228 | Output: Fourier Slope **Spehar** 229 | 230 | Usage: 231 | Import Image from PIL 232 | 233 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 234 | fourier_slope_branka_Spehar_Isherwood(img_gray) 235 | ''' 236 | 237 | img_gray = np.asarray(img_gray) 238 | 239 | 240 | # Adjust image size to be even 241 | if img_gray.shape[0] % 2 == 1: 242 | img_gray = img_gray[:-1, :] 243 | if img_gray.shape[1] % 2 == 1: 244 | img_gray = img_gray[:, :-1] 245 | 246 | # Ensure the input is square, center cropping to larges square with power of 2 247 | if img_gray.shape[0] != img_gray.shape[1]: 248 | s_original = img_gray.shape 249 | s_trim = min(2**int(np.log2(s_original[0])), 2**int(np.log2(s_original[1]))) 250 | center_x, center_y = s_original[0] // 2, s_original[1] // 2 251 | img_gray = img_gray[center_x - s_trim // 2:center_x + s_trim // 2, 252 | center_y - s_trim // 2:center_y + s_trim // 2] 253 | 254 | # Calculate spatial slope 255 | # remove outliers/e.g. low frequencies first 256 | xsize, ysize = img_gray.shape 257 | 258 | imf = np.fft.fftshift(np.fft.fft2(img_gray.astype(float))) 259 | 260 | impf = np.abs(imf) 261 | Pf = rot_avg(impf) 262 | x_vec = np.arange(1, xsize // 2 + 1) 263 | y_vec = Pf[1:ysize // 2 + 1] 264 | 265 | cook_distance = CooksDistance_SM(x_vec, y_vec) 266 | outliers = cook_distance > 4 / len(x_vec) 267 | x_vec = x_vec[~outliers] 268 | y_vec = y_vec[~outliers] 269 | 270 | # Log-log fit 271 | A_loglog = np.log(x_vec) 272 | B_loglog = np.log(y_vec) 273 | b_loglog, _ = np.polyfit(A_loglog, B_loglog, 1) 274 | 275 | spatialSlope_logFit = b_loglog 276 | 277 | return spatialSlope_logFit 278 | 279 | 280 | ################################ George Mather ################################# 281 | 282 | 283 | def rotavg_mather(array): 284 | N, _ = array.shape 285 | X, Y = np.meshgrid(np.arange(-N/2, N/2), np.arange(-N/2, N/2)) 286 | rho = np.round(np.sqrt(X**2 + Y**2)).astype(int) 287 | f = np.zeros(int(N/2) + 1) 288 | for r in range(int(N/2) + 1): 289 | f[r] = np.mean(array[np.where(rho == r)]) 290 | return f 291 | 292 | def center_crop_mather (img_rgb): 293 | # crop to largest center square with power of two 294 | 295 | img_PIL = PIL.Image.fromarray(img_rgb) 296 | width, height = img_PIL.size # Get dimensions 297 | 298 | # find largest power of two in pixel 299 | if width > height: 300 | npow = np.floor(np.log2(height)) 301 | else: 302 | npow = np.floor(np.log2(width)) 303 | 304 | c_new = 2**(npow-1) # middle of new img length 305 | 306 | # find image center 307 | c_width = np.floor(width/2) 308 | c_height = np.floor(height/2) 309 | 310 | # define corp borders 311 | top = c_height - c_new 312 | bottom = c_height + c_new 313 | left = c_width - c_new 314 | right = c_width + c_new 315 | 316 | img = img_PIL.crop((left, top, right, bottom)) 317 | 318 | return np.asarray(img) 319 | 320 | 321 | def fourier_slope_mather(img_rgb): 322 | ''' 323 | Calculates the 'Fourier Slope **Mather**' QIP 324 | 325 | Input: 8 bit grayscale image in Pillow format 326 | Output: Fourier Slope **Mather** 327 | 328 | Usage: 329 | Import Image from PIL 330 | 331 | img_gray = np.asarray(Image.open( path_to_image_file ).convert('L')) 332 | fourier_slope_mather(img_gray) 333 | ''' 334 | 335 | ci = center_crop_mather (img_rgb) 336 | 337 | [nr,nc, _] = ci.shape 338 | 339 | ci = color.rgb2lab(ci) 340 | 341 | tmp = (ci[:, :, 0] / 100.0) *255.0 # Assuming Lab space with 'L' channel 342 | cg = tmp.astype(np.uint8) 343 | 344 | # Spectral slope calculation 345 | minf = int(np.round((nr / 2) * (1 / 25))) 346 | maxf = int(np.round((nr / 2) * 0.5)) 347 | 348 | f = np.fft.fftshift(np.fft.fft2(cg.astype(float) / 255)) 349 | impf = np.abs(f)**2 350 | 351 | # Calculate magnitude spectrum 352 | Pf = rotavg_mather(impf) 353 | f1 = np.arange(nr // 2 + 1) 354 | 355 | f11 = f1[minf-1:maxf] 356 | Pf1 = Pf[minf-1:maxf] 357 | c = np.polyfit(np.log(f11), np.log(Pf1), 1) 358 | slope = c[0] / 2 359 | 360 | return slope 361 | -------------------------------------------------------------------------------- /pages/3_📒_QIP_Documentation.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | st.set_page_config(layout="wide") 4 | 5 | st.markdown(""" """, unsafe_allow_html=True) 8 | 9 | st.markdown(""" """, unsafe_allow_html=True) 12 | 13 | st.markdown(""" """, unsafe_allow_html=True) 16 | 17 | def build_entry(QIP , Image_preproc, CComplex, Range, API, References, Notes): 18 | 19 | Button = st.sidebar.checkbox(QIP , value=True) 20 | 21 | if Button: 22 | 23 | st.divider() 24 | st.write('') 25 | 26 | 27 | st.markdown('

' + QIP + '

', unsafe_allow_html=True) 28 | 29 | col1, col2 = st.columns( [0.2, 0.8]) 30 | with col1: 31 | st.markdown('

Image preprocessing:

', unsafe_allow_html=True) 32 | st.markdown('

Computational complexity:

', unsafe_allow_html=True) 33 | st.markdown('

Range of values:

', unsafe_allow_html=True) 34 | st.markdown('

API:

', unsafe_allow_html=True) 35 | st.markdown('

References:

', unsafe_allow_html=True) 36 | st.markdown('

Notes:

', unsafe_allow_html=True) 37 | 38 | 39 | with col2: 40 | st.markdown('

' + Image_preproc + '

', unsafe_allow_html=True) 41 | st.markdown('

' + CComplex + '

', unsafe_allow_html=True) 42 | st.markdown('

' + Range + '

', unsafe_allow_html=True) 43 | st.markdown('

' + API + '

', unsafe_allow_html=True) 44 | st.markdown('

' + References + '

', unsafe_allow_html=True) 45 | st.markdown('

' + Notes + '

', unsafe_allow_html=True) 46 | 47 | 48 | st.markdown('

QIP Documentation

', unsafe_allow_html=True) 49 | st.markdown('

This is the documentation for the QIP machine. Detailed information on the individual QIPs can be found in the publication: \ 50 | Christoph Redies, Ralf Bartho, Lisa Koßmann, Branka Spehar, Ronald Hübner, Johan Wagemans, and Gregor U. Hayn-Leichsenring: \ 51 | A toolbox for calculating objective image properties in aesthetics research. \ 52 | The publication describes the motivation for using each QIP, the algorithm itself \ 53 | and related publications. (https://arxiv.org/abs/2408.10616)

', unsafe_allow_html=True) 54 | 55 | build_entry( 56 | QIP = 'Image size' , 57 | Image_preproc = 'RGB image, no resizing', 58 | CComplex = 'low', 59 | Range = '[0 - inf)', 60 | API = 'AT.color_and_simple_qips.image_size(img_RGB)', 61 | References = 'Datta et al., 2006', 62 | Notes = 'Default: Image size = image width + image height in pixel. There are several different ways implemented to calculate this property, including the product of height and width, the diagonal, or the maximum, average, or minimum of height and width.' 63 | ) 64 | 65 | build_entry( 66 | QIP = 'Aspect ratio' , 67 | Image_preproc = 'RGB image, no resizing', 68 | CComplex = 'low', 69 | Range = '(0 - inf)', 70 | API = 'AT.color_and_simple_qips.aspect_ratio(img_RGB)', 71 | References = 'Datta et al., 2006; Li et al., 2006; Iigaya et al., 2021', 72 | Notes = 'Aspect ratio = image_width / image_height' 73 | ) 74 | 75 | build_entry( 76 | QIP = 'RMS contrast' , 77 | Image_preproc = 'Converting to L*a*b* color space, no resizing', 78 | CComplex = 'low', 79 | Range = '[0 - 50)', 80 | API = 'AT.color_and_simple_qips.std_channels(img_LAB)', 81 | References = 'Peli, 1990', 82 | Notes = 'RMS contrast = standard deviation of the Lightness channel of the L*a*b* color space' 83 | ) 84 | 85 | 86 | 87 | build_entry( 88 | QIP = 'Ligthness entropy' , 89 | Image_preproc = 'Converting to L*a*b* color space, scaling pixel range to [0-255]', 90 | CComplex = 'low', 91 | Range = '[0 - 8]', 92 | API = 'AT.color_and_simple_qips.shannonentropy_channels(img_LAB)', 93 | References = 'Shannon, 1948; Kersten, 1987; Mather, 2018', 94 | Notes = 'Ligtness entropy = shannon entropy of the Lightness channel (L*a*b*)' 95 | ) 96 | 97 | build_entry( 98 | QIP = 'HOG Anisotropy, HOG Complexity, HOG Self-similarity' , 99 | Image_preproc = 'Converting to Matlabs L*a*b* color space, optional resizing to number of pixel (-1 = no resizing)', 100 | CComplex = 'high', 101 | Range = '', 102 | API = 'AT.PHOG_qips.PHOGfromImage(img_rgb, section, bins, angle, levels, re, sesfweight )', 103 | References = 'Braun et al., 2013; Redies and Groß, 2013; Dalal and Triggs, 2005; Bosch et al.,2007', 104 | Notes = 'By default, image resizing is disabled (parameter is set to -1). The resize function of the original \ 105 | Matlab script is different from the available Python implementations. Calculating HOG-derived QIPs with resizing will \ 106 | give results different from the original Matlab script. Without resizing, the results are the same.' 107 | ) 108 | 109 | build_entry( 110 | QIP = 'Edge density, 1st-order and 2nd-order Edge_orientation entropy' , 111 | Image_preproc = 'Converting to 8-bit grayscale image, resizing to 120.000 pixel while maintaining aspect ratio', 112 | CComplex = 'very high', 113 | Range = '', 114 | API = 'AT.edge_entropy_qips.do_first_and_second_order_entropy_and_edge_density (img_gray)', 115 | References = 'Redies et al., 2017', 116 | Notes = 'The 2nd-order Edge_orientation entropy compares the orientation and strength of the 10,000 strongest edge pixels in pairs, \ 117 | which is computationally intensive. A fast C++ implementation of this QIP can be found in the following \ 118 | Github repository: https://github.com/RBartho/C-version-2nd-Order-Edge-Orientation-Entropy' 119 | ) 120 | 121 | build_entry( 122 | QIP = 'Color entropy' , 123 | Image_preproc = 'Converting to HSV color space, no resizing', 124 | CComplex = 'low', 125 | Range = '[0 - 8] for range of hue values [0-255]', 126 | API = 'AT.color_and_simple_qips.shannonentropy_channels(img_HSV)', 127 | References = 'Geller et al., 2022', 128 | Notes = 'Color entropy is calculated as the Shannon entropy of the Hue channel of the HSV color space.' 129 | ) 130 | 131 | build_entry( 132 | QIP = 'Mean value of RGB, HSV and L*a*b* color channels' , 133 | Image_preproc = 'Converting to respective color spaces (RGB, HSV, L*a*b*), no resizing', 134 | CComplex = 'low', 135 | Range = 'RGB [0 - 255] ; HSV[0 - 1] ; L* [0-100], a* [] b* []', 136 | API = 'AT.color_and_simple_qips.mean_channels(img) and AT.color_and_simple_qips.circ_stats(img_hsv)' , 137 | References = 'Datta et al., 2006; Geller et al., 2022; Iigaya et al., 2021; Li & Chen, 2009; Li et al., 2006; \ 138 | Mallon et al., 2014; Nakauchi et al., 2022; Peng, 2022; Schifanella, 2015; Thieleking et al., 2020', 139 | Notes = 'The Hue channel of the HSV colour space is a cicular value, so the normal arithmetic mean \ 140 | cannot be applied here. Therefore, the circular mean is calculated for the Hue channel. \ 141 | For all other channels, the normal arithmetic mean is used.' 142 | ) 143 | 144 | build_entry( 145 | QIP = 'Standard deviation of RGB, HSV, L*a*b* color channels' , 146 | Image_preproc = 'Converting to respective color spaces (RGB, HSV, L*a*b*), no resizing', 147 | CComplex = 'low', 148 | Range = '', 149 | API = 'AT.color_and_simple_qips.std_channels(img), AT.color_and_simple_qips.circ_stats(img_hsv)' , 150 | References = 'Datta et al., 2006; Li et al., 2006; Li & Chen, 2009; Mallon et al., 2014; Schifanella, 2015; \ 151 | Thieleking et al., 2020; Iigaya et al., 2021; Geller et al., 2022; Nakauchi et al., 2022; Peng, 2022', 152 | Notes = 'The Hue channel of the HSV colour space is a circular value, so the normal standard deviation \ 153 | cannot be applied here. Therefore, the circular standard deviation is calculated for the Hue channel. \ 154 | For all other channels, the normal standard deviation is used. Note that the standard deviation of \ 155 | the ligthness channel of L*a*b* color space is the RMS contrast.' 156 | ) 157 | 158 | build_entry( 159 | QIP = 'Balance, DCM, Homogeneity' , 160 | Image_preproc = 'conversion to 8-bit grayscale image, no resizing', 161 | CComplex = 'medium', 162 | Range = 'All three QIPs are percentage values [0 - 100].', 163 | API = 'AT.balance_qips.Balance(img_gray), AT.balance_qips.DCM(img_gray), AT.balance_qips.Homogeneity(img_gray)' , 164 | References = 'Hübner & Fillinger, 2016 ; Wagemans, 1995; McManus et al., 2011; Wilson and Chatterjee, 2005', 165 | Notes = '' 166 | ) 167 | 168 | build_entry( 169 | QIP = 'Mirror symmetry' , 170 | Image_preproc = 'Converting to binary image, no resizing', 171 | CComplex = 'low', 172 | Range = 'Percentage value [0 - 100].', 173 | API = 'AT.balance_qips.Mirror_symmetry(img_gray)' , 174 | References = 'Wagemans, 1995; Hübner & Fillinger, 2016', 175 | Notes = '' 176 | ) 177 | 178 | build_entry( 179 | QIP = 'CNN-feature-based QIPs: Symmetry, Self-similarity, Sparseness and Variability' , 180 | Image_preproc = 'RGB image resized to 512*512 pixel', 181 | CComplex = 'high', 182 | Range = 'Empirically found values: Sparseness (0 - 0.0014) and Variability (0 - 0.0001)' , 183 | API = 'AT.CNN_qips', 184 | References = 'Brachmann & Redies (2016); Brachmann & Redies 2017; Brachmann et al. (2017)', 185 | Notes = 'All CNN image properties are based on feature maps of the first layer of an Alex-Net (Krizhevsky et al., 2012) trained \ 186 | on Image Net from the Caffe module, which is no longer maintained. Weights of this Caffe module have been extracted \ 187 | and the folding and max-pooling operation has been reimplemented with Numpy to keep the \ 188 | toolbox memory footprint small (no Tensorflow or Pytorch needed).' 189 | ) 190 | 191 | build_entry( 192 | QIP = 'Fourier slope and Fourier sigma' , 193 | Image_preproc = 'differs strongly between methods by Spehar, Mather and Redies, see notes below', 194 | CComplex = 'high', 195 | Range = 'empirical ranges: Redies [0 - 5), Spehar and Mather [0-2.5] , theoretical ranges [0 - inf)' , 196 | API = 'AT.fourier_qips', 197 | References = 'Graham & Field, 2007; Redies et al., 2007; Graham & Redies, 2010; Koch et al., 2010; Spehar & Taylor, 2013; Mather, 2014; Isherwood et al., 2021', 198 | Notes = 'Detailed information on the differences between the three methods can be found in the publication. (Table 2)' 199 | ) 200 | 201 | build_entry( 202 | QIP = '2d Fractal dimension' , 203 | Image_preproc = 'Input 8-bit grayscale image. Converting to binary image and resizing to square image', 204 | CComplex = 'medium', 205 | Range = '(1,2)', 206 | API = 'AT.box_count_qips.box_count_2d(img_gray)', 207 | References = 'Mandelbrot, 1983; Taylor, 2002; Spehar et al., 2003; Spehar & Taylor, 2013; Viengkham & Spehar, 2018', 208 | Notes = '' 209 | ) 210 | 211 | build_entry( 212 | QIP = '3d Fractal dimension' , 213 | Image_preproc = 'Input 8-bit grayscale. Crop to the largest square that has a side length with a multiple of 2.', 214 | CComplex = 'high', 215 | Range = '(2,3)', 216 | API = 'AT.box_count_qips.custom_differential_box_count(img_gray)', 217 | References = 'Mather, 2018', 218 | Notes = '' 219 | ) 220 | 221 | -------------------------------------------------------------------------------- /pages/2_🔧_Image_preprocessing.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import io 3 | from PIL import Image 4 | from zipfile import ZipFile 5 | 6 | from AT import resize_functions, AT_misc 7 | 8 | st.set_page_config(layout="wide") 9 | 10 | AT_misc.build_heading(head= 'Resizing, Cropping, Padding, Color rotation', 11 | notes= 'Common options for image preprocessing in aesthetic research.' 12 | ) 13 | 14 | 15 | upload_file = st.file_uploader('Load image files', type=['jpg','png','jpeg','tif'], accept_multiple_files=True, label_visibility="collapsed" )# Check to see if a file has been uploaded 16 | 17 | 18 | 19 | st.markdown(""" """, unsafe_allow_html=True) 22 | 23 | st.markdown(""" """, unsafe_allow_html=True) 26 | 27 | if upload_file: 28 | st.write('Examples of loaded images:') 29 | if len(upload_file) >=20: 30 | st.image(upload_file[:20], width=120 ) 31 | else: 32 | st.image(upload_file, width=120 ) 33 | 34 | 35 | st.divider() 36 | 37 | 38 | resizing_selectbox = st.session_state.get( "resizing_selectbox", None) 39 | if upload_file: 40 | 41 | st.markdown(""" 42 | """,unsafe_allow_html=True) 47 | st.markdown('

Select the type of resizing:

', unsafe_allow_html=True) 48 | 49 | resizing_selectbox = st.radio( 50 | "sacling_selectbox", 51 | label_visibility="collapsed", 52 | options=['**Resize longer side**', 53 | '**Resize shorter side**', 54 | '**Resize image width**', 55 | '**Resize image height**', 56 | '**Resize to fit display**', 57 | '**Resize to number of pixel**', 58 | '**Resize to fixed resolution**', 59 | '**Resize to image size**', 60 | '**Padding to square**', 61 | '**Center crop to square**', 62 | '**Center crop to power of two**', 63 | '**Color rotation in Lab color space**'], 64 | captions = ["Resize longer side \n while maintaining \n aspect ratio.", ### use two spaces for "\n" to get a line brake 65 | "Resize shorter side \n while maintaining \n aspect ratio.", 66 | "Resize width \n while maintaining \n aspect ratio.", 67 | "Resize height \n while maintaining \n aspect ratio.", 68 | "Fit image to resolution \n on the given display while \n maintaining aspect ratio.", 69 | "Resize image to a given number \n of pixels while maintaining \n aspect ratio.", 70 | "Resize image to the given resolution \n **not** maintaining the \n aspect ratio.", 71 | "Resize image to the given \n image size (width+height) maintaining the \n aspect ratio.", 72 | 'Pad image to square using \n the mean gray values or the \n mean RGB values, resizing optional.', 73 | "Center crop image to \n largest possible square \n image.", 74 | 'Center crop image to \n largest square with side \n length of power of two.', 75 | 'Rotate the color of \n all images to a specified \n degree in the Lab color space.' ], 76 | horizontal=True 77 | ) 78 | 79 | st.session_state.resizing_selectbox = resizing_selectbox 80 | 81 | 82 | if resizing_selectbox: 83 | with st.form('Parameter for Resizing'): 84 | if resizing_selectbox == '**Resize longer side**': 85 | st.markdown('

Parameters for resizing longer side:

', unsafe_allow_html=True) 86 | longer_side = int(st.text_input('To how many pixels should the longer side be resized?:', value="1024", help=None, label_visibility="visible")) 87 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 88 | 89 | elif resizing_selectbox == '**Resize shorter side**': 90 | st.markdown('

Parameters for resizing shorter side:

', unsafe_allow_html=True) 91 | shorter_side = int(st.text_input('To how many pixels should the shorter side be resized?:', value="1024", help=None, label_visibility="visible")) 92 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 93 | 94 | elif resizing_selectbox == '**Resize image width**': 95 | st.markdown('

Parameters for resizing image width:

', unsafe_allow_html=True) 96 | img_width = int(st.text_input('To how many pixels should the image width be resized?:', value="1024", help=None, label_visibility="visible")) 97 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 98 | 99 | elif resizing_selectbox == '**Resize image height**': 100 | st.markdown('

Parameters for resizing image height:

', unsafe_allow_html=True) 101 | img_height = int(st.text_input('To how many pixels should the image height be resized?:', value="1024", help=None, label_visibility="visible")) 102 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 103 | 104 | elif resizing_selectbox == '**Resize to fit display**': 105 | st.markdown('

Parameters for resizing to fit display:

', unsafe_allow_html=True) 106 | disp_width = int(st.text_input('Whats is the width of the display you want to fit?:', value="1920", help=None, label_visibility="visible")) 107 | disp_height = int(st.text_input('Whats is the height of the display you want to fit?:', value="1080", help=None, label_visibility="visible")) 108 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 109 | 110 | elif resizing_selectbox == '**Resize to number of pixel**': 111 | st.markdown('

Parameters for resizing to number of pixels:

', unsafe_allow_html=True) 112 | num_pixels = int(st.text_input('To how many pixels should the image be resized?:', value="100000", help='Number of pixels = height*width', label_visibility="visible")) 113 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 114 | 115 | elif resizing_selectbox == '**Resize to fixed resolution**': 116 | st.markdown('

Parameters for resizing to fixed resolution:

', unsafe_allow_html=True) 117 | img_width = int(st.text_input('To what width do you want to resize the images?:', value="1024", help=None, label_visibility="visible")) 118 | img_height = int(st.text_input('To what height do you want to resize the images?:', value="1024", help=None, label_visibility="visible")) 119 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 120 | 121 | elif resizing_selectbox == '**Resize to Image size**': 122 | st.markdown('

Parameters for resizing to image size:

', unsafe_allow_html=True) 123 | des_img_size = int(st.text_input('To what image size do you want to resize the images?:', value="1024", help=None, label_visibility="visible")) 124 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 125 | 126 | elif resizing_selectbox == '**Padding to square**': 127 | st.markdown('

Parameters for padding image to square:

', unsafe_allow_html=True) 128 | pad_resize_to = int(st.text_input('Do you want to resize the longer image side before padding (-1 = no resizing, or enter the desired pixel lenght for the longer side)?:', value="-1", help=None, label_visibility="visible")) 129 | st.form_submit_button("**Commit resize parameter selection**", on_click=AT_misc.click_sub_params_resizing) 130 | 131 | elif resizing_selectbox == '**Color rotaion in LAB color space**': 132 | st.markdown('

Parameters for color space rotation:

', unsafe_allow_html=True) 133 | rotaion_degree = int(st.text_input('How many degrees do you want to rotate the images?:', value="45", help=None, label_visibility="visible")) 134 | st.form_submit_button("**Commit parameter selection**", on_click=AT_misc.click_sub_params_resizing) 135 | 136 | elif resizing_selectbox == '**Center crop to square**': 137 | st.session_state.params_resizing_submitted = True 138 | 139 | elif resizing_selectbox == '**Center crop to power of two**': 140 | st.session_state.params_resizing_submitted = True 141 | 142 | else: 143 | raise('wrong resizing option selected, not implemented error') 144 | 145 | 146 | 147 | params_resizing_submitted = st.session_state.get("params_resizing_submitted", None) 148 | ###################################### 149 | 150 | if params_resizing_submitted: 151 | run = st.button('**Start resizing, cropping or color rotation**' ) 152 | 153 | placeholder = st.empty() 154 | if run: 155 | if upload_file: 156 | #progress_text = "Operation in progress. Please wait." 157 | my_bar = st.progress(0) 158 | # create folder to save resized images 159 | #os.makedirs(os.path.join(selected_download_path_r , 'Resized_images'), exist_ok=True) 160 | with st.spinner("Operation in progress. Please wait and don't refresh your browser."): 161 | 162 | 163 | name_images_pairs = [] 164 | 165 | for n in range(len(upload_file)): 166 | 167 | img_PIL = Image.open(upload_file[n]).convert('RGB') 168 | 169 | 170 | placeholder.text('Resizing image: ' + upload_file[n].name) 171 | 172 | #file_path = list_of_images_r[n] 173 | if resizing_selectbox == '**Resize longer side**': 174 | img_resized = resize_functions.resize_using_longer_side_kepp_aspect_ratio(img_PIL, longer_side) 175 | 176 | elif resizing_selectbox == '**Resize shorter side**': 177 | img_resized = resize_functions.resize_using_shorter_side_kepp_aspect_ratio(img_PIL, shorter_side) 178 | 179 | elif resizing_selectbox == '**Resize image width**': 180 | img_resized = resize_functions.resize_width_keep_aspect_ratio(img_PIL, img_width) 181 | 182 | elif resizing_selectbox == '**Resize image height**': 183 | img_resized = resize_functions.resize_height_keep_aspect_ratio(img_PIL, img_height) 184 | 185 | elif resizing_selectbox == '**Resize to fit display**': 186 | img_resized = resize_functions.resize_to_fit_display(img_PIL, disp_width, disp_height) 187 | 188 | elif resizing_selectbox == '**Resize to number of pixel**': 189 | img_resized = resize_functions.resize_to_number_of_pixels_keep_aspect_ratio(img_PIL, num_pixels) 190 | 191 | elif resizing_selectbox == '**Resize to fixed resolution**': 192 | img_resized = resize_functions.resize_to_resolution(img_PIL, img_width, img_height) 193 | 194 | elif resizing_selectbox == '**Resize to Image size**': 195 | img_resized = resize_functions.resize_to_image_size(img_PIL, des_img_size = des_img_size) 196 | 197 | elif resizing_selectbox == '**Center crop to square**': 198 | img_resized = resize_functions.center_crop (img_PIL) 199 | 200 | elif resizing_selectbox == '**Center crop to power of two**': 201 | img_resized = resize_functions.center_crop_to_square_power_of_two (img_PIL) 202 | 203 | elif resizing_selectbox == '**Padding to square**': 204 | img_resized = resize_functions.padding_and_resizing_to_square_X_pixel(img_PIL, resize_to=pad_resize_to) 205 | 206 | elif resizing_selectbox == '**Color rotaion in LAB color space**': 207 | 208 | img_resized = resize_functions.rotate_image_in_LAB_colorspace(img_PIL, degree=rotaion_degree) 209 | 210 | else: 211 | raise('wrong resizing option selected, not implemented error') 212 | 213 | img_name = upload_file[n].name 214 | name_images_pairs.append([img_name[:-4], img_resized]) 215 | 216 | my_bar.progress( int( (n+1)/len(upload_file) * 100) ) 217 | 218 | images = resize_functions.file_process_in_memory(name_images_pairs ) 219 | zip_file_bytes_io = io.BytesIO() 220 | 221 | with ZipFile(zip_file_bytes_io, 'w') as zip_file: 222 | for image_name, bytes_stream in images: 223 | zip_file.writestr(image_name+".png", bytes_stream.getvalue()) 224 | 225 | 226 | 227 | else: 228 | st.write('No image files found. Load images first.') 229 | 230 | 231 | enable_download = False 232 | if run and upload_file: 233 | enable_download = True 234 | 235 | if enable_download: 236 | st.download_button('Download resized images', zip_file_bytes_io, file_name='resized_or_cropped_images.zip') # Defaults to 'text/plain' 237 | st.success('Resizing finished.', icon="✅") 238 | 239 | 240 | -------------------------------------------------------------------------------- /QIP_machine_script.py: -------------------------------------------------------------------------------- 1 | #Import the required Libraries 2 | import numpy as np 3 | from PIL import Image 4 | from skimage import color 5 | import os 6 | import pandas as pd 7 | from tqdm import tqdm 8 | 9 | ### custom import 10 | from AT import balance_qips, CNN_qips, color_and_simple_qips, edge_entropy_qips, fourier_qips, fractal_dimension_qips, PHOG_qips 11 | 12 | 13 | ########################################## set image paths and results.csv ########################## 14 | 15 | ### set path to save the results csv files 16 | results_path = '/home/ralf/Documents/18_SIP_Machine/Full_Datasets_SIPs/Full_dataset_stats_new/' 17 | 18 | 19 | ### each entry is a pair of the name of the csv file and the path to the image folder, you can enter several datasets/pairs 20 | datasets = [ 21 | ['results.csv' , 'path_to_images'], 22 | ] 23 | 24 | ####################################### set wanted QIPs to 'True', otherwise "False" ######################### 25 | 26 | check_dict = {} 27 | 28 | check_dict['Image size (pixels)'] = True 29 | check_dict['Aspect ratio'] = True 30 | check_dict['RMS contrast'] = True 31 | check_dict['Luminance entropy'] = True 32 | check_dict['Complexity'] = True 33 | check_dict['Edge density'] = True 34 | check_dict['Color entropy'] = True 35 | check_dict['means RGB'] = True 36 | check_dict['means Lab'] = True 37 | check_dict['means HSV'] = True 38 | check_dict['std RGB'] = True 39 | check_dict['std Lab'] = True 40 | check_dict['std HSV'] = True 41 | check_dict['Mirror symmetry'] = True 42 | check_dict['DCM'] = True 43 | check_dict['Balance'] = True 44 | check_dict['left-right'] = True 45 | check_dict['up-down'] = True 46 | check_dict['left-right & up-down'] = True 47 | check_dict['Slope Redies'] = True 48 | check_dict['Slope Spehar'] = True 49 | check_dict['Slope Mather'] = True 50 | check_dict['Sigma'] = True 51 | check_dict['2-dimensional'] = True 52 | check_dict['3-dimensional'] = True 53 | check_dict['PHOG-based'] = True 54 | check_dict['CNN-based'] = True 55 | check_dict['Anisotropy'] = True 56 | check_dict['Homogeneity'] = True 57 | check_dict['1st-order'] = True 58 | check_dict['2nd-order'] = True 59 | check_dict['Sparseness'] = True 60 | check_dict['Variability'] = True 61 | 62 | 63 | ####################################################################################################### 64 | ##################################### run script ###################################################### 65 | ####################################################################################################### 66 | 67 | 68 | Image.MAX_IMAGE_PIXELS = 1e10 69 | 70 | dict_of_multi_measures = { 71 | 'means RGB' : ['mean R channel', 'mean G channel' , 'mean B channel (RGB)'], 72 | 'means Lab' : ['mean L channel', 'mean a channel' , 'mean b channel (Lab)'], 73 | 'means HSV' : ['mean H channel', 'mean S channel' , 'mean V channel'], 74 | 'std RGB' : ['std R channel', 'std G channel' , 'std B channel'], 75 | 'std Lab' : ['std L channel', 'std a channel' , 'std b channel (Lab)'], 76 | 'std HSV' : ['std H channel', 'std S channel' , 'std V channel'], 77 | 'DCM' : ['DCM distance', 'DCM x position' , 'DCM y position'], 78 | } 79 | 80 | dict_full_names_QIPs = { 81 | 'left-right' : 'CNN symmetry left-right', 82 | 'up-down' : 'CNN symmetry up-down', 83 | 'left-right & up-down' : 'CNN symmetry left-right & up-down' , 84 | '2-dimensional' : '2D Fractal dimension', 85 | '3-dimensional' : '3D Fractal dimension', 86 | 'Sigma' : 'Fourier sigma', 87 | 'PHOG-based' : 'Self-similarity (PHOG)', 88 | 'CNN-based' : 'Self-similarity (CNN)', 89 | '1st-order' : '1st-order EOE', 90 | '2nd-order' : '2nd-order EOE', 91 | } 92 | 93 | def custom_round(num): 94 | ''' 95 | if values are smaler than 1, round to 3 digits after the first nonzero digit, 96 | since measures have very different range 97 | ''' 98 | if num < 1: 99 | ### convert to scientific_notation 100 | scientific_notation = "{:e}".format(num) 101 | ### get the e-value 102 | e_val = scientific_notation[-2:] 103 | return np.round(num , 3 + int(e_val)) 104 | 105 | else: 106 | return np.round(num,3) 107 | 108 | ###################################### 109 | 110 | for entry in datasets: 111 | csv_name = entry[0] 112 | image_path = entry[1] 113 | print('##########################') 114 | print(csv_name) 115 | print('##########################') 116 | 117 | 118 | 119 | 120 | 121 | ### load values for CNN kernel and bias 122 | [kernel,bias] = np.load(open("AT/bvlc_alexnet_conv1.npy", "rb"), encoding="latin1", allow_pickle=True) 123 | 124 | #progress_text = "Operation in progress. Please wait." 125 | 126 | file_names = [] 127 | for root, dirs, files in os.walk(image_path): 128 | for file in files: 129 | file_names.append( os.path.join(root,file) ) 130 | 131 | ## create new CSV file, if it does not already exists 132 | if not os.path.exists(results_path + csv_name): 133 | with open(results_path + csv_name, 'w') as log: 134 | log.write('img_file,') 135 | for key in check_dict: 136 | if check_dict[key]: 137 | if key in dict_of_multi_measures: 138 | 139 | for sub_key in dict_of_multi_measures[key]: 140 | log.write(sub_key + ',') 141 | else: 142 | log.write(dict_full_names_QIPs.get(key,key) + ',') 143 | log.write('\n') 144 | 145 | file_names = [] 146 | for root, dirs, files in os.walk(image_path): 147 | for file in files: 148 | file_names.append( os.path.join(root,file) ) 149 | 150 | else: 151 | df = pd.read_csv(results_path + csv_name, sep=',') 152 | exist_img_list = list(df['img_file']) 153 | 154 | file_names = [] 155 | for root, dirs, files in os.walk(image_path): 156 | for file in files: 157 | if file not in exist_img_list: 158 | file_names.append( os.path.join(root,file) ) 159 | 160 | for file in tqdm(file_names, total=len(file_names)): 161 | try: 162 | # print(' ') 163 | # print('Finished percent: ' , np.round(100* img_counter/num_images), ' Calculating image: ' , file_name) 164 | file_dir = os.path.join( image_path , file) 165 | 166 | #replace_commas: 167 | file_name = file.split('/')[-1] 168 | file_name = file_name.replace(",", "_") 169 | 170 | with open(results_path + csv_name, 'a') as log: 171 | log.write(str(file_name) + ',') 172 | 173 | ### load images in different color spaces 174 | img_plain_PIL = Image.open(file_dir) 175 | img_plain_np = np.asarray(img_plain_PIL) 176 | img_rgb = np.asarray(img_plain_PIL.convert('RGB')) 177 | img_lab = color.rgb2lab(img_rgb) 178 | img_hsv = color.rgb2hsv(img_rgb) 179 | img_gray = np.asarray(Image.open(file_dir).convert('L')) ## color uses range [0-1], PIL uses Range [0-256] for intensity 180 | 181 | 182 | # temp vals for edge entropy 183 | first_ord = None 184 | sec_ord = None 185 | edge_d = None 186 | # temp vals for CNN symmetry 187 | sym_lr = None 188 | sym_ud = None 189 | sym_lrud = None 190 | # temp vals for Fourier vals 191 | sigma = None 192 | slope = None 193 | # temp vals for PHOG 194 | self_sim = None 195 | complexity = None 196 | anisotropy = None 197 | 198 | 199 | for key in check_dict: 200 | 201 | if (key == 'means RGB') and check_dict[key]: 202 | #if gray_scale_img == False: 203 | res = color_and_simple_qips.mean_channels(img_rgb) 204 | with open(results_path + csv_name, 'a') as log: 205 | log.write(str(custom_round(res[0])) + ',') 206 | log.write(str(custom_round(res[1])) + ',') 207 | log.write(str(custom_round(res[2])) + ',') 208 | 209 | 210 | elif (key == 'means Lab') and check_dict[key]: 211 | #if gray_scale_img == False: 212 | res = color_and_simple_qips.mean_channels(img_lab) 213 | with open(results_path + csv_name, 'a') as log: 214 | log.write(str(custom_round(res[0])) + ',') 215 | log.write(str(custom_round(res[1])) + ',') 216 | log.write(str(custom_round(res[2])) + ',') 217 | 218 | 219 | elif (key == 'means HSV') and check_dict[key]: 220 | ## get circular statistic for H channel 221 | circ_mean, _ = color_and_simple_qips.circ_stats(img_hsv) 222 | # get normal mean for S and V 223 | res = color_and_simple_qips.mean_channels(img_hsv) 224 | with open(results_path + csv_name, 'a') as log: 225 | log.write(str(custom_round(circ_mean)) + ',') 226 | log.write(str(custom_round(res[1])) + ',') 227 | log.write(str(custom_round(res[2])) + ',') 228 | 229 | 230 | elif (key == 'std RGB') and check_dict[key]: 231 | res = color_and_simple_qips.std_channels(img_rgb) 232 | with open(results_path + csv_name, 'a') as log: 233 | log.write(str(custom_round(res[0])) + ',') 234 | log.write(str(custom_round(res[1])) + ',') 235 | log.write(str(custom_round(res[2])) + ',') 236 | 237 | 238 | elif (key == 'std Lab') and check_dict[key]: 239 | res = color_and_simple_qips.std_channels(img_lab) 240 | with open(results_path + csv_name, 'a') as log: 241 | log.write(str(custom_round(res[0])) + ',') 242 | log.write(str(custom_round(res[1])) + ',') 243 | log.write(str(custom_round(res[2])) + ',') 244 | 245 | elif (key == 'std HSV') and check_dict[key]: 246 | ## get circular statistic for H channel 247 | _ , circ_std = color_and_simple_qips.circ_stats(img_hsv) 248 | res = color_and_simple_qips.std_channels(img_hsv) 249 | with open(results_path + csv_name, 'a') as log: 250 | log.write(str(custom_round(circ_std)) + ',') 251 | log.write(str(custom_round(res[1])) + ',') 252 | log.write(str(custom_round(res[2])) + ',') 253 | 254 | 255 | elif (key == 'Color entropy') and check_dict[key]: 256 | res = color_and_simple_qips.shannonentropy_channels(img_hsv[:,:,0]) 257 | with open(results_path + csv_name, 'a') as log: 258 | log.write(str(custom_round(res)) + ',') 259 | 260 | elif ((key == '1st-order') and check_dict[key]) or ((key == '2nd-order') and check_dict[key]) or ((key == 'Edge density') and check_dict[key]): 261 | 262 | # if already first or second order entropy has been calculated 263 | if first_ord != None: 264 | with open(results_path + csv_name, 'a') as log: 265 | if key == '1st-order': 266 | log.write(str(custom_round(first_ord)) + ',') 267 | elif key == '2nd-order': 268 | log.write(str(custom_round(sec_ord)) + ',') 269 | elif key == 'Edge density': 270 | log.write(str(custom_round(edge_d)) + ',') 271 | # if not jet calculated, calculate both 272 | else: 273 | res = edge_entropy_qips.do_first_and_second_order_entropy_and_edge_density (img_gray) 274 | first_ord = res[0] 275 | sec_ord = res[1] 276 | edge_d = res[2] 277 | with open(results_path + csv_name, 'a') as log: 278 | if key == '1st-order': 279 | log.write(str(custom_round(first_ord)) + ',') 280 | elif key == '2nd-order': 281 | log.write(str(custom_round(sec_ord)) + ',') 282 | elif key == 'Edge density': 283 | log.write(str(custom_round(edge_d)) + ',') 284 | 285 | elif (key == 'Luminance entropy') and check_dict[key]: 286 | res = color_and_simple_qips.shannonentropy_channels(img_lab[:,:,0]) 287 | with open(results_path + csv_name, 'a') as log: 288 | log.write(str(custom_round(res)) + ',') 289 | 290 | elif (key == 'Image size (pixels)') and check_dict[key]: 291 | res = color_and_simple_qips.image_size(img_rgb) 292 | with open(results_path + csv_name, 'a') as log: 293 | log.write(str(custom_round(res)) + ',') 294 | 295 | 296 | elif (key == 'Aspect ratio') and check_dict[key]: 297 | res = color_and_simple_qips.aspect_ratio(img_rgb) 298 | with open(results_path + csv_name, 'a') as log: 299 | log.write(str(custom_round(res)) + ',') 300 | 301 | elif ((key == 'left-right') and check_dict[key]) or ((key == 'up-down') and check_dict[key]) or ((key == 'left-right & up-down') and check_dict[key]): 302 | 303 | # if one CNN sym has already been calculated, the others have been calculated as well 304 | if sym_lr != None: 305 | with open(results_path + csv_name, 'a') as log: 306 | if key == 'left-right': 307 | log.write(str(custom_round(sym_lr)) + ',') 308 | elif key == 'up-down': 309 | log.write(str(custom_round(sym_ud)) + ',') 310 | elif key == 'left-right & up-down': 311 | log.write(str(custom_round(sym_lrud)) + ',') 312 | 313 | # if not jet calculated, calculate all syms together and store results 314 | else: 315 | 316 | sym_lr,sym_ud,sym_lrud = CNN_qips.CNN_symmetry(img_rgb, kernel, bias) 317 | with open(results_path + csv_name, 'a') as log: 318 | if key == 'left-right': 319 | log.write(str(custom_round(sym_lr)) + ',') 320 | elif key == 'up-down': 321 | log.write(str(custom_round(sym_ud)) + ',') 322 | elif key == 'left-right & up-down': 323 | log.write(str(custom_round(sym_lrud)) + ',') 324 | 325 | 326 | elif (key == 'Sparseness') and check_dict[key]: 327 | resp_scipy = CNN_qips.conv2d(img_rgb, kernel, bias) 328 | _, normalized_max_pooling_map_Sparseness = CNN_qips.max_pooling (resp_scipy, patches=22 ) 329 | sparseness = CNN_qips.CNN_Variance (normalized_max_pooling_map_Sparseness , kind='sparseness' ) 330 | with open(results_path + csv_name, 'a') as log: 331 | log.write(str(custom_round(sparseness)) + ',') 332 | 333 | 334 | elif (key == 'Variability') and check_dict[key]: 335 | resp_scipy = CNN_qips.conv2d(img_rgb, kernel, bias) 336 | _, normalized_max_pooling_map_Variability = CNN_qips.max_pooling (resp_scipy, patches=12 ) 337 | variability = CNN_qips.CNN_Variance (normalized_max_pooling_map_Variability , kind='variability' ) 338 | with open(results_path + csv_name, 'a') as log: 339 | log.write(str(custom_round(variability)) + ',') 340 | 341 | elif (key == 'CNN-based') and check_dict[key]: 342 | resp_scipy = CNN_qips.conv2d(img_rgb, kernel, bias) 343 | _, normalized_max_pooling_map_8 = CNN_qips.max_pooling (resp_scipy, patches=8 ) 344 | _, normalized_max_pooling_map_1 = CNN_qips.max_pooling (resp_scipy, patches=1 ) 345 | cnn_self_sym = CNN_qips.CNN_selfsimilarity (normalized_max_pooling_map_1 , normalized_max_pooling_map_8 ) 346 | with open(results_path + csv_name, 'a') as log: 347 | log.write(str(custom_round(cnn_self_sym)) + ',') 348 | 349 | 350 | 351 | elif ((key == 'Sigma') and check_dict[key]) or ((key == 'Slope Redies') and check_dict[key]): 352 | 353 | # if one of both fourier measures has already been calc 354 | if sigma != None: 355 | with open(results_path + csv_name, 'a') as log: 356 | if key == 'Sigma': 357 | log.write(str(custom_round(sigma)) + ',') 358 | elif key == 'Slope Redies': 359 | log.write(str(custom_round(slope)) + ',') 360 | else: 361 | with open(results_path + csv_name, 'a') as log: 362 | sigma , slope = fourier_qips.fourier_redies(img_gray, bin_size = 2, cycles_min = 10, cycles_max=256) 363 | if key == 'Sigma': 364 | log.write(str(custom_round(sigma)) + ',') 365 | elif key == 'Slope Redies': 366 | log.write(str(custom_round(slope)) + ',') 367 | 368 | elif (key == 'Slope Spehar') and check_dict[key]: 369 | res = fourier_qips.fourier_slope_branka_Spehar_Isherwood(img_gray) 370 | with open(results_path + csv_name, 'a') as log: 371 | log.write(str(custom_round(res)) + ',') 372 | 373 | elif (key == 'Slope Mather') and check_dict[key]: 374 | res = fourier_qips.fourier_slope_mather(img_rgb) 375 | with open(results_path + csv_name, 'a') as log: 376 | log.write(str(custom_round(res)) + ',') 377 | 378 | elif (key == 'RMS contrast') and check_dict[key]: 379 | res = color_and_simple_qips.std_channels(img_lab)[0] 380 | with open(results_path + csv_name, 'a') as log: 381 | log.write(str(custom_round(res)) + ',') 382 | 383 | elif (key == 'Balance') and check_dict[key]: 384 | res = balance_qips.Balance(img_gray) 385 | with open(results_path + csv_name, 'a') as log: 386 | log.write(str(custom_round(res)) + ',') 387 | 388 | elif (key == 'DCM') and check_dict[key]: 389 | res = balance_qips.DCM(img_gray) 390 | with open(results_path + csv_name, 'a') as log: 391 | log.write(str(custom_round(res[0])) + ',') 392 | log.write(str(custom_round(res[1])) + ',') 393 | log.write(str(custom_round(res[2])) + ',') 394 | 395 | elif (key == 'Mirror symmetry') and check_dict[key]: 396 | res = balance_qips.Mirror_symmetry(img_gray) 397 | with open(results_path + csv_name, 'a') as log: 398 | log.write(str(custom_round(res)) + ',') 399 | 400 | elif (key == 'Homogeneity') and check_dict[key]: 401 | res = balance_qips.Homogeneity(img_gray) 402 | with open(results_path + csv_name, 'a') as log: 403 | log.write(str(custom_round(res)) + ',') 404 | 405 | elif (key == '2-dimensional') and check_dict[key]: 406 | res = fractal_dimension_qips.fractal_dimension_2d(img_gray) 407 | with open(results_path + csv_name, 'a') as log: 408 | log.write(str(custom_round(res)) + ',') 409 | 410 | elif (key == '3-dimensional') and check_dict[key]: 411 | res = fractal_dimension_qips.fractal_dimension_3d(img_gray) 412 | with open(results_path + csv_name, 'a') as log: 413 | log.write(str(custom_round(res)) + ',') 414 | 415 | 416 | 417 | ### PHOG 418 | elif ((key == 'PHOG-based') and check_dict[key]) or ((key == 'Complexity') and check_dict[key]) or ((key == 'Anisotropy') and check_dict[key]): 419 | 420 | # if one PHOG measure has already been calculated, the others have been calculated as well 421 | if self_sim != None: 422 | with open(results_path + csv_name, 'a') as log: 423 | if key == 'PHOG-based': 424 | log.write(str(custom_round(self_sim)) + ',') 425 | elif key == 'Complexity': 426 | log.write(str(custom_round(complexity)) + ',') 427 | elif key == 'Anisotropy': 428 | log.write(str(custom_round(anisotropy)) + ',') 429 | 430 | else: 431 | self_sim, complexity, anisotropy = PHOG_qips.PHOGfromImage(img_rgb, section=2, bins=16, angle=360, levels=3, re=-1, sesfweight=[1,1,1] ) 432 | with open(results_path + csv_name, 'a') as log: 433 | if key == 'PHOG-based': 434 | log.write(str(custom_round(self_sim)) + ',') 435 | elif key == 'Complexity': 436 | log.write(str(custom_round(complexity)) + ',') 437 | elif key == 'Anisotropy': 438 | log.write(str(custom_round(anisotropy)) + ',') 439 | 440 | 441 | with open(results_path + csv_name, 'a') as log: 442 | log.write('\n') 443 | except: 444 | print('############ ', file_name , ' an error occured. QIPs for file not calculated!') 445 | -------------------------------------------------------------------------------- /pages/1_📊_QIP_Machine.py: -------------------------------------------------------------------------------- 1 | #Import the required libraries 2 | import streamlit as st 3 | import numpy as np 4 | from PIL import Image 5 | from skimage import color 6 | import timeit 7 | import sys 8 | import io 9 | from zipfile import ZipFile 10 | 11 | from AT import balance_qips, CNN_qips, color_and_simple_qips, edge_entropy_qips, fourier_qips, fractal_dimension_qips, PHOG_qips, AT_misc 12 | 13 | st.set_page_config(layout="wide") 14 | 15 | 16 | version = 'v1.0.2' 17 | 18 | 19 | AT_misc.build_heading(head= 'QIP Machine', 20 | notes= 'This is an interface to calculate Quantitative Image Properties (QIPs) for images.' 21 | ) 22 | 23 | 24 | st.markdown(""" """, unsafe_allow_html=True) 27 | 28 | st.markdown(""" """, unsafe_allow_html=True) 31 | 32 | 33 | st.markdown( 34 | """ 35 | 43 | """, 44 | unsafe_allow_html=True, 45 | ) 46 | 47 | upload_file = st.file_uploader('Load image files', type=['jpg','jpeg','png','tif'], accept_multiple_files=True, label_visibility="collapsed", on_change=AT_misc.callback_upload_img_files )# Check to see if a file has been uploaded 48 | 49 | 50 | if st.session_state.get('new_files_uploaded' , False): # check if upload files have been changed, and only then do checks again 51 | st.session_state.upload_files = upload_file 52 | # check for commas and large files 53 | st.session_state.commas = False 54 | for file in upload_file: 55 | if ',' in file.name: 56 | st.session_state.commas = True 57 | if sys.getsizeof(file) > 6e+6: 58 | st.session_state.large_files = True 59 | st.session_state.new_files_uploaded = False 60 | 61 | 62 | if upload_file: 63 | st.write('Examples of loaded images:') 64 | if len(upload_file) >=20: 65 | st.image(upload_file[:20], width=120 ) 66 | else: 67 | st.image(upload_file, width=120 ) 68 | 69 | if st.session_state.get('large_files', None): 70 | st.warning('Some loaded images are quite large (more than 6 MB). Consider reducing their size, as for most QIPs the calculation time increases (exponentially) with the image resolution.', icon="⚠️") 71 | 72 | if st.session_state.get('commas', None): 73 | st.warning('Commas found in image filenames. This is not recommended as commas are the delimiters in the result.csv file. Commas will be replaced with underscores in the image names in the CSV file.', icon="⚠️") 74 | 75 | 76 | 77 | zip_file_name = st.text_input('Type filename for download:', value="results.zip", help='File should have .zip extension to be recognized by standard software.' , label_visibility="visible") 78 | 79 | 80 | 81 | ###################################### 82 | 83 | dict_of_multi_measures = { 84 | 'means RGB' : ['mean R channel', 'mean G channel' , 'mean B channel (RGB)'], 85 | 'means Lab' : ['mean L channel', 'mean a channel' , 'mean b channel (Lab)'], 86 | 'means HSV' : ['mean H channel', 'mean S channel' , 'mean V channel'], 87 | 'std RGB' : ['std R channel', 'std G channel' , 'std B channel'], 88 | 'std Lab' : ['std L channel', 'std a channel' , 'std b channel (Lab)'], 89 | 'std HSV' : ['std H channel', 'std S channel' , 'std V channel'], 90 | 'DCM' : ['DCM distance', 'DCM x position' , 'DCM y position'], 91 | } 92 | 93 | dict_full_names_QIPs = { 94 | 'left-right' : 'CNN symmetry left-right', 95 | 'up-down' : 'CNN symmetry up-down', 96 | 'left-right & up-down' : 'CNN symmetry left-right & up-down' , 97 | '2-dimensional' : '2D Fractal dimension', 98 | '3-dimensional' : '3D Fractal dimension', 99 | 'Slope' : 'Fourier slope', 100 | 'Sigma' : 'Fourier sigma', 101 | 'PHOG-based' : 'Self-similarity (PHOG)', 102 | 'CNN-based' : 'Self-similarity (CNN)', 103 | '1st-order' : '1st-order EOE', 104 | '2nd-order' : '2nd-order EOE', 105 | } 106 | 107 | 108 | 109 | check_dict = st.session_state.get("check_dict", None) 110 | if upload_file: 111 | 112 | st.divider() 113 | 114 | 115 | ALL_QIPS = st.checkbox("Calculate all QIPs") 116 | 117 | 118 | with st.form('QIP Selection'): 119 | 120 | st.markdown(""" 121 | """,unsafe_allow_html=True) 126 | st.markdown('

Choose QIPs to calculate:

', unsafe_allow_html=True) 127 | 128 | 129 | # Define the number of columns in the layout 130 | num_columns = 5 131 | columns = st.columns(num_columns) 132 | check_dict = {} 133 | # define each column 134 | with columns[0]: 135 | st.markdown('

' + 'Image dimensions' + '

', unsafe_allow_html=True) 136 | check_dict['Image size (pixels)'] = st.checkbox('Image size' , help='Image size = width + height', value=ALL_QIPS) 137 | check_dict['Aspect ratio'] = st.checkbox('Aspect ratio' , help='Aspect ratio = width / height', value=ALL_QIPS) 138 | st.markdown('

' + 'Lightness & Complexity & Contrast' + '

', unsafe_allow_html=True) 139 | check_dict['RMS contrast'] = st.checkbox('RMS contrast', help='RMS contrast = standard deviation of the Lightness channel (Lab)', value=ALL_QIPS) 140 | check_dict['Lightness entropy'] = st.checkbox('Lightness entropy' , help='Lightness entropy = Shannon entropy of the Lightness channel (Lab)', value=ALL_QIPS) 141 | check_dict['Complexity'] = st.checkbox('Complexity', help='Complexity = mean of gradient strengths across the image (HOG method)' , value=ALL_QIPS) 142 | check_dict['Edge density'] = st.checkbox('Edge density', help='Edge density = density of edges in the image (Gabor filters)' , value=ALL_QIPS) 143 | with columns[1]: 144 | st.markdown('

' + 'Color' + '

', unsafe_allow_html=True) 145 | st.write('**Channel mean**') 146 | check_dict['means RGB'] = st.checkbox('RGB', key='mean RGB' , help='Arithmetic mean for each color channel (RGB)', value=ALL_QIPS) 147 | check_dict['means Lab'] = st.checkbox('Lab', key='mean Lab' , help='Arithmetic mean for each channel (Lab)' , value=ALL_QIPS) 148 | check_dict['means HSV'] = st.checkbox('HSV', key='mean HSV', help='Arithmetic mean for S and V channel. Circular mean for H channel.' , value=ALL_QIPS) 149 | st.write('**Channel standard deviation**') 150 | check_dict['std RGB'] = st.checkbox('RGB', key='std RGB', help='Standard deviation for each color channel (RGB)' , value=ALL_QIPS) 151 | check_dict['std Lab'] = st.checkbox('Lab', key='std LAB', help='Standard deviation for each channel (Lab)', value=ALL_QIPS) 152 | check_dict['std HSV'] = st.checkbox('HSV', key='std HSV', help='Standard deviation for S and V channel. Circular standard deviation for H channel' , value=ALL_QIPS) 153 | st.write('**Channel entropy**') 154 | check_dict['Color entropy'] = st.checkbox('Color entropy', help='Color entropy = Shannon entropy of the Hue channel (HSV)' , value=ALL_QIPS) 155 | with columns[2]: 156 | st.markdown('

' + 'Symmetry & Balance' + '

', unsafe_allow_html=True) 157 | st.write('**Pixel-based**') 158 | check_dict['Mirror symmetry'] = st.checkbox('Mirror symmetry' , help = 'Left-right symmetry along the vertical image axis', value=ALL_QIPS) 159 | check_dict['Balance'] = st.checkbox('Balance', help = 'Average symmetry of the vertical, horizontal and diagonal image axes' , value=ALL_QIPS) 160 | check_dict['DCM'] = st.checkbox('DCM', help = 'DCM = **D**eviation of the **C**enter of **M**ass from the image center' , value=ALL_QIPS) 161 | st.write('**CNN feature-based symmetry**') 162 | check_dict['left-right'] = st.checkbox('left-right', help = 'Left-right (vertical) symmetry of CNN layer feature maps', value=ALL_QIPS) 163 | check_dict['up-down'] = st.checkbox('up-down', help = 'Up-down (horizontal) symmetry of CNN layer feature maps.', value=ALL_QIPS) 164 | check_dict['left-right & up-down'] = st.checkbox('left-right & up-down', help = 'CNN symmetry between the original image and a left-right & up-down flipped image based on CNN-layer feature maps.', value=ALL_QIPS) 165 | with columns[3]: 166 | st.markdown('

' + 'Scale invariance & Self-similarity' + '

', unsafe_allow_html=True) 167 | st.write('**Fourier spectrum**') 168 | check_dict['Slope'] = st.checkbox('Slope', help = 'Slope of straight line fitted to log-log plot of Fourier power vs. spatial frequency', value=ALL_QIPS) 169 | check_dict['Sigma'] = st.checkbox('Sigma', help = 'Deviation of Fourier spectral power curve from a straight line in log-log plot', value=ALL_QIPS) 170 | st.write('**Fractal dimension**') 171 | check_dict['2-dimensional'] = st.checkbox('2-dimensional', help = '2d fractal dimension: two spatial axes for binarized image', value=ALL_QIPS) 172 | check_dict['3-dimensional'] = st.checkbox('3-dimensional', help = '3d fractal dimension: two spatial axes and a pixel intensity axis', value=ALL_QIPS) 173 | st.write('**Self-similarity**') 174 | check_dict['PHOG-based'] = st.checkbox('PHOG-based', help = 'Self-similarity based on pyramid of histograms of oriented gradients (PHOG)', value=ALL_QIPS) 175 | check_dict['CNN-based'] = st.checkbox('CNN-based', help = 'Self-similarity based on low-level features of a convolutional neural network (CNN)', value=ALL_QIPS) 176 | with columns[4]: 177 | st.markdown('

' + 'Feature distribution & Entropy' + '

', unsafe_allow_html=True) 178 | check_dict['Homogeneity'] = st.checkbox('Homogeneity', help = 'Relative Shannon entropy of black pixel frequency in binary image', value=ALL_QIPS) 179 | check_dict['Anisotropy'] = st.checkbox('Anisotropy', help ='Variance in the gradient strength across orientations (HOG method)', value=ALL_QIPS) 180 | st.write('**Edge-orientation entropy (EOE)**') 181 | check_dict['1st-order'] = st.checkbox('1st-order EOE', help = '1st-order Shannon entropy of the histogram of edge orientations across an image', value=ALL_QIPS) 182 | check_dict['2nd-order'] = st.checkbox('2nd-order EOE', help = '2nd-order Shannon entropy based on pairwise statistics of edge orientations across an image', value=ALL_QIPS) 183 | st.write('**CNN feature variance**') 184 | check_dict['Sparseness'] = st.checkbox('Sparseness', help = 'Total variance (Pa[n]) over all low-level CNN filter entries of all n ✕ n subregions of an image', value=ALL_QIPS) 185 | check_dict['Variability'] = st.checkbox('Variability', help = 'Median over the variance of entries for each CNN filter for all n ✕ n subregions of an image (Pf[n])', value=ALL_QIPS) 186 | 187 | ### always add check for gray_scale images and check for upscaling images 188 | check_dict['gray_scale'] = True 189 | check_dict['upscaled'] = True 190 | 191 | st.form_submit_button('**Commit selection**' , on_click=AT_misc.click_sub_QIPs, args=(check_dict,), use_container_width=True) 192 | 193 | ######################################### 194 | ###### ADD Parameters for individual QIPs 195 | ######################################### 196 | 197 | 198 | if st.session_state.get("commit_qips", None): 199 | 200 | if check_dict['Sparseness'] or check_dict['Variability'] or check_dict['Anisotropy'] or check_dict['Complexity'] or check_dict['PHOG-based'] or check_dict['Slope'] or check_dict['Image size (pixels)']: 201 | 202 | st.divider() 203 | 204 | if check_dict['Slope']: 205 | st.markdown('

Select Type and Parameters for Fourier Slope:

', unsafe_allow_html=True) 206 | slope_selectbox = st.radio( 207 | "slope_selectbox", 208 | label_visibility="collapsed", 209 | options=[ '**Redies**', 210 | '**Spehar**', 211 | '**Mather**',], 212 | 213 | horizontal=True 214 | ) 215 | 216 | if slope_selectbox == '**Redies**': 217 | lower_bound = int(st.text_input('Enter minimal cycles/img:', value="10", help=None, label_visibility="visible")) 218 | upper_bound = int(st.text_input('Enter maximal cycles/img:', value="256", help=None, label_visibility="visible")) 219 | bins = int(st.text_input('Enter width of bins:', value="2", help=None, label_visibility="visible")) 220 | 221 | st.divider() 222 | 223 | if check_dict['Image size (pixels)']: 224 | st.markdown('

Select Type of Image size. Default is the sum of image height and width:

', unsafe_allow_html=True) 225 | img_size_selectbox = st.radio( 226 | "img_size_selectbox", 227 | label_visibility="collapsed", 228 | options=[ 'Sum of height and width', 229 | 'Product of height and width (number of pixels)', 230 | 'Image diagonal', 231 | 'Average of height and width', 232 | 'Minimum of height and width', 233 | 'Maximum of height and width'], 234 | 235 | horizontal=True 236 | ) 237 | 238 | st.divider() 239 | 240 | if check_dict['Sparseness'] or check_dict['Variability']: 241 | st.markdown('

Parameters for Sparseness and Variability:

', unsafe_allow_html=True) 242 | 243 | if check_dict['Sparseness']: 244 | p22_Sparseness = int(st.text_input('Enter Configuration for Sparseness Measure. How many image Partitions should be used?', value="22", help=None, label_visibility="visible")) 245 | 246 | if check_dict['Variability']: 247 | p12_Variability = int(st.text_input('Enter Configuration for Variability Measure. How many image Partitions should be used?', value="12", help=None, label_visibility="visible")) 248 | 249 | st.divider() 250 | 251 | if check_dict['Anisotropy'] or check_dict['Complexity'] or check_dict['PHOG-based']: 252 | st.markdown('

Parameters for PHOG Measures (Complexity, Anisotropy or PHOG-based Self-similarity):

', unsafe_allow_html=True) 253 | 254 | col1, col2 = st.columns(2) 255 | 256 | with col1: 257 | PHOG_resizing = int(st.text_input('Resize images to number of pixels (-1 = no resizing):', value="-1", help=None, label_visibility="visible")) 258 | 259 | 260 | 261 | bins = int(st.text_input('Number of Bins:', value="16", help=None, label_visibility="visible")) 262 | angle = int(st.text_input('Angle:', value="360", help=None, label_visibility="visible")) 263 | with col2: 264 | 265 | levels = st.radio( 266 | "Number of levels", 267 | #label_visibility="visib", 268 | options=[ '1', 269 | '2', 270 | '3',], 271 | horizontal=True, 272 | index=2 273 | ) 274 | 275 | col2a, col2b, col2c = st.columns(3) 276 | with col2a: 277 | weigths1 = int(st.text_input('Weights for level1:', value=1, help=None, label_visibility="visible")) 278 | with col2b: 279 | weigths2 = int(st.text_input('Weights for level2:', value=1, help=None, label_visibility="visible")) 280 | with col2c: 281 | weigths3 = int(st.text_input('Weights for level3:', value=1, help=None, label_visibility="visible")) 282 | 283 | 284 | Commit_Parameter_Selection = st.button("**Commit parameter selection**") 285 | if Commit_Parameter_Selection: 286 | st.session_state.params_submitted = Commit_Parameter_Selection 287 | else: 288 | AT_misc.click_sub_params() 289 | 290 | 291 | counter_checked_keys = 0 292 | run = st.session_state.get("run", None) 293 | if st.session_state.get("params_submitted", None): 294 | 295 | st.divider() 296 | run = st.button('**Run calculation**' ) 297 | st.session_state.run = run 298 | 299 | placeholder = st.empty() 300 | placeholder_QIP = st.empty() 301 | placeholder_remaining_time = st.empty() 302 | 303 | if run: 304 | if upload_file: 305 | ## check if at least one QIP is selected 306 | for key in check_dict: 307 | counter_checked_keys += check_dict[key] 308 | if counter_checked_keys > 0: 309 | # create results csv, write QIP parameters and write headings. write Notes 310 | 311 | sep = ',' 312 | 313 | result_csv = 'sep='+sep + '\n' # denote column seperator for Excel software 314 | result_csv += 'img_file,' 315 | 316 | 317 | for key in check_dict: 318 | if check_dict[key]: 319 | if key in dict_of_multi_measures: 320 | for sub_key in dict_of_multi_measures[key]: 321 | result_csv += sub_key + ',' 322 | else: 323 | result_csv += dict_full_names_QIPs.get(key,key) + sep 324 | 325 | result_csv += 'external_color_profile_found' + sep 326 | result_csv = result_csv[:-1] + '\n' 327 | 328 | ### load values for CNN kernel and bias 329 | [kernel,bias] = np.load(open("AT/bvlc_alexnet_conv1.npy", "rb"), encoding="latin1", allow_pickle=True) 330 | 331 | #progress_text = "Operation in progress. Please wait." 332 | my_bar = st.progress(0) 333 | 334 | 335 | 336 | with st.spinner("Operation in progress. Please wait and don't refresh your browser."): 337 | 338 | num_images = len(upload_file) 339 | start = timeit.default_timer() 340 | expected_time_h = "..." 341 | expected_time_m = "..." 342 | for n in range(num_images): 343 | try: 344 | 345 | file_name = upload_file[n].name 346 | 347 | if st.session_state.commas != None: 348 | file_name = file_name.replace(",", "_") 349 | 350 | result_csv += file_name + sep 351 | 352 | 353 | ### load images in different color spaces 354 | img_plain_PIL = Image.open(upload_file[n]) 355 | img_plain_np = np.asarray(img_plain_PIL) 356 | img_rgb = np.asarray(img_plain_PIL.convert('RGB')) 357 | img_lab = color.rgb2lab(img_rgb) 358 | img_hsv = color.rgb2hsv(img_rgb) 359 | img_gray = np.asarray(Image.open(upload_file[n]).convert('L')) ## color uses range [0-1], PIL uses Range [0-256] for intensity 360 | 361 | 362 | # temp vals for edge entropy 363 | first_ord = None 364 | sec_ord = None 365 | edge_d = None 366 | # temp vals for CNN symmetry 367 | sym_lr = None 368 | sym_ud = None 369 | sym_lrud = None 370 | # temp vals for Fourier vals 371 | sigma = None 372 | slope = None 373 | # temp vals for PHOG 374 | self_sim = None 375 | complexity = None 376 | anisotropy = None 377 | 378 | calculated_QIP = '' 379 | for key in check_dict: 380 | 381 | if check_dict[key] and key not in ['upscaled', 'gray_scale']: 382 | calculated_QIP = key 383 | 384 | placeholder.text('Number of completed images: ' + str(n) + ' Number of remaining images: ' + str(num_images - (n)) ) 385 | placeholder_QIP.text('Calculating image: ' + file_name + ' Calculating QIP: ' + dict_full_names_QIPs.get(calculated_QIP, calculated_QIP)) 386 | placeholder_remaining_time.text( 'Remaining time: ' + expected_time_h + ' hours and ' + expected_time_m + ' minutes.' ) 387 | 388 | 389 | if (key == 'means RGB') and check_dict[key]: 390 | res = color_and_simple_qips.mean_channels(img_rgb) 391 | result_csv += str(AT_misc.custom_round(res[0])) + sep 392 | result_csv += str(AT_misc.custom_round(res[1])) + sep 393 | result_csv += str(AT_misc.custom_round(res[2])) + sep 394 | 395 | 396 | elif (key == 'means Lab') and check_dict[key]: 397 | res = color_and_simple_qips.mean_channels(img_lab) 398 | result_csv += str(AT_misc.custom_round(res[0])) + sep 399 | result_csv += str(AT_misc.custom_round(res[1])) + sep 400 | result_csv += str(AT_misc.custom_round(res[2])) + sep 401 | 402 | 403 | elif (key == 'means HSV') and check_dict[key]: 404 | ## get circular statistic for H channel 405 | circ_mean, _ = color_and_simple_qips.circ_stats(img_hsv) 406 | # get normal mean for S and V 407 | res = color_and_simple_qips.mean_channels(img_hsv) 408 | result_csv += str(AT_misc.custom_round(circ_mean)) + sep 409 | result_csv += str(AT_misc.custom_round(res[1])) + sep 410 | result_csv += str(AT_misc.custom_round(res[2])) + sep 411 | 412 | 413 | elif (key == 'std RGB') and check_dict[key]: 414 | res = color_and_simple_qips.std_channels(img_rgb) 415 | result_csv += str(AT_misc.custom_round(res[0])) + sep 416 | result_csv += str(AT_misc.custom_round(res[1])) + sep 417 | result_csv += str(AT_misc.custom_round(res[2])) + sep 418 | 419 | elif (key == 'std Lab') and check_dict[key]: 420 | res = color_and_simple_qips.std_channels(img_lab) 421 | result_csv += str(AT_misc.custom_round(res[0])) + sep 422 | result_csv += str(AT_misc.custom_round(res[1])) + sep 423 | result_csv += str(AT_misc.custom_round(res[2])) + sep 424 | 425 | elif (key == 'std HSV') and check_dict[key]: 426 | ## get circular statistic for H channel 427 | _ , circ_std = color_and_simple_qips.circ_stats(img_hsv) 428 | res = color_and_simple_qips.std_channels(img_hsv) 429 | result_csv += str(AT_misc.custom_round(circ_std)) + sep 430 | result_csv += str(AT_misc.custom_round(res[1])) + sep 431 | result_csv += str(AT_misc.custom_round(res[2])) + sep 432 | 433 | 434 | elif (key == 'Color entropy') and check_dict[key]: 435 | res = color_and_simple_qips.shannonentropy_channels(img_hsv[:,:,0]) 436 | result_csv += str(AT_misc.custom_round(res)) + sep 437 | 438 | 439 | elif ((key == '1st-order' ) and check_dict['1st-order']) or ((key == '2nd-order' ) and check_dict['2nd-order']) or ((key == 'Edge density' ) and check_dict['Edge density']): 440 | 441 | # if already first or second order entropy has been calculated 442 | if first_ord != None: 443 | 444 | if key == '1st-order': 445 | result_csv += str(AT_misc.custom_round(first_ord)) + sep 446 | elif key == '2nd-order': 447 | result_csv += str(AT_misc.custom_round(sec_ord)) + sep 448 | elif key == 'Edge density': 449 | result_csv += str(AT_misc.custom_round(edge_d)) + sep 450 | 451 | # if not jet calculated, calculate both 452 | else: 453 | res = edge_entropy_qips.do_first_and_second_order_entropy_and_edge_density (img_gray) 454 | first_ord = res[0] 455 | sec_ord = res[1] 456 | edge_d = res[2] 457 | if key == '1st-order': 458 | result_csv += str(AT_misc.custom_round(first_ord)) + sep 459 | elif key == '2nd-order': 460 | result_csv += str(AT_misc.custom_round(sec_ord)) + sep 461 | elif key == 'Edge density': 462 | result_csv += str(AT_misc.custom_round(edge_d)) + sep 463 | 464 | elif (key == 'Lightness entropy') and check_dict[key]: 465 | res = color_and_simple_qips.shannonentropy_channels(img_lab[:,:,0]) 466 | result_csv += str(AT_misc.custom_round(res)) + sep 467 | 468 | 469 | elif (key == 'Image size (pixels)') and check_dict[key]: 470 | 471 | if img_size_selectbox == 'Sum of height and width': 472 | res = color_and_simple_qips.image_size(img_rgb, kind = 'sum') 473 | result_csv += str(AT_misc.custom_round(res)) + sep 474 | 475 | elif img_size_selectbox == 'Product of height and width (number of pixels)': 476 | res = color_and_simple_qips.image_size(img_rgb, kind = 'num_pixel') 477 | result_csv += str(AT_misc.custom_round(res)) + sep 478 | 479 | elif img_size_selectbox == 'Image diagonal': 480 | res = color_and_simple_qips.image_size(img_rgb, kind = 'diagonal') 481 | result_csv += str(AT_misc.custom_round(res)) + sep 482 | 483 | elif img_size_selectbox == 'Average of height and width': 484 | res = color_and_simple_qips.image_size(img_rgb, kind = 'average') 485 | result_csv += str(AT_misc.custom_round(res)) + sep 486 | 487 | elif img_size_selectbox == 'Minimum of height and width': 488 | res = color_and_simple_qips.image_size(img_rgb, kind = 'minimum') 489 | result_csv += str(AT_misc.custom_round(res)) + sep 490 | 491 | elif img_size_selectbox == 'Maximum of height and width': 492 | res = color_and_simple_qips.image_size(img_rgb, kind = 'maximum') 493 | result_csv += str(AT_misc.custom_round(res)) + sep 494 | 495 | elif (key == 'Aspect ratio') and check_dict[key]: 496 | res = color_and_simple_qips.aspect_ratio(img_rgb) 497 | result_csv += str(AT_misc.custom_round(res)) + sep 498 | 499 | elif ((key == 'left-right') and check_dict[key]) or ((key == 'up-down') and check_dict[key]) or ((key == 'left-right & up-down') and check_dict[key]): 500 | 501 | 502 | # if one CNN sym has already been calculated, the others have been calculated as well 503 | if sym_lr != None: 504 | 505 | if key == 'left-right': 506 | result_csv += str(AT_misc.custom_round(sym_lr)) + sep 507 | elif key == 'up-down': 508 | result_csv += str(AT_misc.custom_round(sym_ud)) + sep 509 | elif key == 'left-right & up-down': 510 | result_csv += str(AT_misc.custom_round(sym_lrud)) + sep 511 | 512 | # if not jet calculated, calculate all syms together and store results 513 | else: 514 | 515 | sym_lr,sym_ud,sym_lrud = CNN_qips.CNN_symmetry(img_rgb, kernel, bias) 516 | if key == 'left-right': 517 | result_csv += str(AT_misc.custom_round(sym_lr)) + sep 518 | elif key == 'up-down': 519 | result_csv += str(AT_misc.custom_round(sym_ud)) + sep 520 | elif key == 'left-right & up-down': 521 | result_csv += str(AT_misc.custom_round(sym_lrud)) + sep 522 | 523 | 524 | elif (key == 'Sparseness') and check_dict[key]: 525 | 526 | resp_scipy = CNN_qips.conv2d(img_rgb, kernel, bias) 527 | _, normalized_max_pooling_map_Sparseness = CNN_qips.max_pooling (resp_scipy, patches=p22_Sparseness ) 528 | sparseness = CNN_qips.CNN_Variance (normalized_max_pooling_map_Sparseness , kind='sparseness' ) 529 | result_csv += str(AT_misc.custom_round(sparseness)) + sep 530 | 531 | 532 | elif (key == 'Variability') and check_dict[key]: 533 | 534 | resp_scipy = CNN_qips.conv2d(img_rgb, kernel, bias) 535 | _, normalized_max_pooling_map_Variability = CNN_qips.max_pooling (resp_scipy, patches=p12_Variability ) 536 | variability = CNN_qips.CNN_Variance (normalized_max_pooling_map_Variability , kind='variability' ) 537 | result_csv += str(AT_misc.custom_round(variability)) + sep 538 | 539 | elif (key == 'CNN-based') and check_dict[key]: 540 | 541 | resp_scipy = CNN_qips.conv2d(img_rgb, kernel, bias) 542 | _, normalized_max_pooling_map_8 = CNN_qips.max_pooling (resp_scipy, patches=8 ) 543 | _, normalized_max_pooling_map_1 = CNN_qips.max_pooling (resp_scipy, patches=1 ) 544 | cnn_self_sym = CNN_qips.CNN_selfsimilarity (normalized_max_pooling_map_1 , normalized_max_pooling_map_8 ) 545 | 546 | result_csv += str(AT_misc.custom_round(cnn_self_sym)) + sep 547 | 548 | 549 | elif ((key == 'Slope') and check_dict[key]): 550 | 551 | if slope_selectbox == '**Redies**': 552 | 553 | _, slope = fourier_qips.fourier_redies(img_gray, bin_size = bins, cycles_min = lower_bound, cycles_max=upper_bound) 554 | result_csv += str(AT_misc.custom_round(slope)) + sep 555 | 556 | elif slope_selectbox == '**Spehar**': 557 | slope = fourier_qips.fourier_slope_branka_Spehar_Isherwood(img_gray) 558 | 559 | result_csv += str(AT_misc.custom_round(slope)) + sep 560 | 561 | elif slope_selectbox == '**Mather**': 562 | slope = fourier_qips.fourier_slope_mather(img_rgb) 563 | result_csv += str(AT_misc.custom_round(slope)) + sep 564 | 565 | elif ((key == 'Sigma') and check_dict[key]): 566 | sigma, _ = fourier_qips.fourier_redies(img_gray, bin_size = 2, cycles_min = 10, cycles_max=256) 567 | result_csv += str(AT_misc.custom_round(sigma)) + sep 568 | 569 | elif (key == 'RMS contrast') and check_dict[key]: 570 | res = color_and_simple_qips.std_channels(img_lab)[0] 571 | result_csv += str(AT_misc.custom_round(res)) + sep 572 | 573 | 574 | elif (key == 'Balance') and check_dict[key]: 575 | res = balance_qips.Balance(img_gray) 576 | result_csv += str(AT_misc.custom_round(res)) + sep 577 | 578 | elif (key == 'DCM') and check_dict[key]: 579 | res = balance_qips.DCM(img_gray) 580 | result_csv += str(AT_misc.custom_round(res[0])) + sep 581 | result_csv += str(AT_misc.custom_round(res[1])) + sep 582 | result_csv += str(AT_misc.custom_round(res[2])) + sep 583 | 584 | elif (key == 'Mirror symmetry') and check_dict[key]: 585 | res = balance_qips.Mirror_symmetry(img_gray) 586 | result_csv += str(AT_misc.custom_round(res)) + sep 587 | 588 | elif (key == 'Homogeneity') and check_dict[key]: 589 | res = balance_qips.Homogeneity(img_gray) 590 | result_csv += str(AT_misc.custom_round(res)) + sep 591 | 592 | elif (key == '2-dimensional') and check_dict[key]: 593 | res = fractal_dimension_qips.fractal_dimension_2d(img_gray) 594 | result_csv += str(AT_misc.custom_round(res)) + sep 595 | 596 | 597 | elif (key == '3-dimensional') and check_dict[key]: 598 | res = fractal_dimension_qips.fractal_dimension_3d(img_gray) 599 | result_csv += str(AT_misc.custom_round(res)) + sep 600 | 601 | 602 | ### PHOG 603 | elif ((key == 'PHOG-based') and check_dict[key]) or ((key == 'Complexity') and check_dict[key]) or ((key == 'Anisotropy') and check_dict[key]): 604 | 605 | # if one PHOG measure has already been calculated, the others have been calculated as well 606 | if self_sim != None: 607 | if key == 'PHOG-based': 608 | result_csv += str(AT_misc.custom_round(self_sim)) + sep 609 | elif key == 'Complexity': 610 | result_csv += str(AT_misc.custom_round(complexity)) + sep 611 | elif key == 'Anisotropy': 612 | result_csv += str(AT_misc.custom_round(anisotropy)) + sep 613 | 614 | else: 615 | self_sim, complexity, anisotropy = PHOG_qips.PHOGfromImage(img_rgb, section=2, bins=bins, angle=angle, levels=int(levels), re=PHOG_resizing, sesfweight=[weigths1,weigths2,weigths3] ) 616 | if key == 'PHOG-based': 617 | result_csv += str(AT_misc.custom_round(self_sim)) + sep 618 | elif key == 'Complexity': 619 | result_csv += str(AT_misc.custom_round(complexity)) + sep 620 | elif key == 'Anisotropy': 621 | result_csv += str(AT_misc.custom_round(anisotropy)) + sep 622 | 623 | 624 | 625 | 626 | ### predict remaining time 627 | 628 | if n < 3: 629 | expected_time_h = "..." 630 | expected_time_m = "..." 631 | else: 632 | stop = timeit.default_timer() 633 | 634 | temp_time = int( np.round( ((stop - start)/n) * (num_images - n)/60 )) 635 | 636 | expected_time_h = str( temp_time // 60 ) 637 | expected_time_m = str( temp_time % 60 ) 638 | 639 | 640 | 641 | 642 | 643 | ### outside of key loop 644 | 645 | ### check if images are upscaled, check for grayscale, always in last columns of results 646 | # check for grayscale with the QIP "Color entropy" 647 | color_check = color_and_simple_qips.shannonentropy_channels(img_hsv[:,:,0]) 648 | if color_check < 0.01: 649 | result_csv += '1' + sep 650 | else: 651 | result_csv += '0' + sep 652 | 653 | ### check for upscaled images 654 | # PHOG scaling 655 | upscaled = False 656 | if check_dict['PHOG-based'] or check_dict['Complexity'] or check_dict['Anisotropy']: 657 | if AT_misc.check_upscaling_img(img_plain_PIL, res_type='PHOG', PHOG_Pixel = PHOG_resizing): 658 | upscaled = True 659 | # CNN scaling 660 | if check_dict['left-right'] or check_dict['up-down'] or check_dict['left-right & up-down'] or check_dict['Sparseness'] or check_dict['Variability'] or check_dict['Sparseness'] or check_dict['CNN-based']: 661 | if AT_misc.check_upscaling_img(img_plain_PIL, res_type='CNN'): 662 | upscaled = True 663 | 664 | # EOE scaling 665 | if check_dict['1st-order'] or check_dict['2nd-order'] or check_dict['Edge density']: 666 | if AT_misc.check_upscaling_img(img_plain_PIL, res_type='EOE'): 667 | upscaled = True 668 | 669 | # Fourier Scaling 670 | if check_dict['Sigma'] or (check_dict['Slope'] and (slope_selectbox == '**Redies**')): 671 | if AT_misc.check_upscaling_img(img_plain_PIL, res_type='Fourier'): 672 | upscaled = True 673 | 674 | if upscaled: 675 | result_csv += '1' + sep 676 | else: 677 | result_csv += '0' + sep 678 | 679 | ### check if images have color profile 680 | if img_plain_PIL.info.get("icc_profile"): 681 | st.session_state.color_profile = True 682 | result_csv += '1' + sep 683 | elif img_plain_PIL.info.get("icc_profile") == None: 684 | result_csv += '0' + sep 685 | 686 | 687 | 688 | ## finish line in result.csv 689 | result_csv += '\n' 690 | 691 | my_bar.progress( int( (n+1)/len(upload_file) * 100) ) 692 | except: 693 | 694 | file_name = upload_file[n].name 695 | 696 | print('An error occured', file_name) 697 | 698 | if st.session_state.commas != None: 699 | file_name = file_name.replace(",", "_") 700 | 701 | result_csv += file_name + sep 702 | result_csv += 'This image could not be processed. Check image properties (image may be truncated)' 703 | result_csv += '\n' 704 | 705 | 706 | placeholder.text('') 707 | 708 | else: 709 | st.write('Select QIP(s) to compute first.') 710 | else: 711 | st.write('No image files found. Load images first.') 712 | 713 | enable_download = False 714 | if run and upload_file and (counter_checked_keys>0): 715 | enable_download = True 716 | 717 | ### write used QIP params to info csv 718 | params_vers_csv = 'Aesthetics Toolbox version used:' + sep + version + '\n' 719 | if check_dict['Slope']: 720 | params_vers_csv += 'Fourier Slope Type:' + sep + slope_selectbox + '\n' 721 | if slope_selectbox == '**Redies**': 722 | params_vers_csv += 'Fourier Slope Redies min cycles:' + sep + str(lower_bound) + '\n' 723 | params_vers_csv += 'Fourier Slope Redies max cycles:' + sep + str(upper_bound) + '\n' 724 | params_vers_csv += 'Fourier Slope Redies width of bins:' + sep + str(bins) + '\n' 725 | if check_dict['Sparseness']: 726 | params_vers_csv += 'Parameter for Sparseness:' + sep + str(p22_Sparseness) + '\n' 727 | if check_dict['Variability']: 728 | params_vers_csv += 'Parameter for Variability:' + sep + str(p12_Variability) + '\n' 729 | if check_dict['PHOG-based'] or check_dict['Complexity'] or check_dict['Anisotropy']: 730 | params_vers_csv += 'Parameter for PHOG: Resize images to number of pixels:' + sep + str(PHOG_resizing) + '\n' 731 | params_vers_csv += 'Parameter for PHOG: Number of bins:' + sep + str(bins) + '\n' 732 | params_vers_csv += 'Parameter for PHOG: Range of angles:' + sep + str(angle) + '\n' 733 | params_vers_csv += 'Parameter for PHOG: Number of levels:' + sep + str(levels) + '\n' 734 | params_vers_csv += 'Parameter for PHOG: Weight Level 1:' + sep + str(weigths1) + '\n' 735 | params_vers_csv += 'Parameter for PHOG: Weight Level 2:' + sep + str(weigths2) + '\n' 736 | params_vers_csv += 'Parameter for PHOG: Weight Level 3:' + sep + str(weigths3) + '\n' 737 | if check_dict['Image size (pixels)']: 738 | params_vers_csv += 'Type of Image size:' + sep + str(img_size_selectbox) + '\n' 739 | 740 | 741 | zip_file_bytes_io = io.BytesIO() 742 | with ZipFile(zip_file_bytes_io, 'w') as zip_file: 743 | zip_file.writestr('QIP_results.csv', result_csv) 744 | zip_file.writestr('QIP_parameters_used_and_Toolbox_version.csv', params_vers_csv) 745 | 746 | if enable_download: 747 | if st.session_state.get('color_profile', None): 748 | st.warning('Some images have specific color profiles (e.g. Photoshop RGB or similar). Make sure that all your images have the same color profile as this may affect the QIP results.', icon="⚠️") 749 | st.success('Calculations finished. A zip file with the calculated QIPs and used parameters is ready for download.', icon="✅") 750 | st.download_button('Download Results', file_name=zip_file_name, data=zip_file_bytes_io) 751 | 752 | --------------------------------------------------------------------------------