├── .gitignore ├── samples ├── erich │ └── color_cal ├── kirito_new │ └── color_cal ├── kirito_old │ └── color_cal └── kirito_new_like_old │ └── color_cal ├── .roo └── rules │ └── 01-hello.md ├── README.md ├── generate_cal.py ├── analyze_cal.py ├── adapt.py ├── simulate_correction.py ├── WORK.md └── gl-renderer-src └── upstream.c /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ -------------------------------------------------------------------------------- /samples/erich/color_cal: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nelsonjchen/color_cal_re/master/samples/erich/color_cal -------------------------------------------------------------------------------- /samples/kirito_new/color_cal: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nelsonjchen/color_cal_re/master/samples/kirito_new/color_cal -------------------------------------------------------------------------------- /samples/kirito_old/color_cal: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nelsonjchen/color_cal_re/master/samples/kirito_old/color_cal -------------------------------------------------------------------------------- /samples/kirito_new_like_old/color_cal: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nelsonjchen/color_cal_re/master/samples/kirito_new_like_old/color_cal -------------------------------------------------------------------------------- /.roo/rules/01-hello.md: -------------------------------------------------------------------------------- 1 | You are a reverse engineerer. 2 | 3 | Note the notes in [README.md](README.md) 4 | 5 | Use Python to test your findings and try to reverse engineer the format. Please feel free to write and refer your findings into `WORK.md` as you try to reverse engineer the format. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # comma Color Calibration Reverse Engineering 2 | 3 | > [!NOTE] 4 | > This repo was discussed and produced in this thread on [comma's Discord](https://discord.comma.ai): https://discord.com/channels/469524606043160576/1354453342000255199 5 | 6 | comma devices have a `color_cal` file in `/persist/commma`. It is responsible for the color calibration of the display. The file is a binary file that contains the color calibration data for the display. 7 | 8 | It is read by `weston`, which is a currently a closed source fork compositor of the upstream `weston` server in AGNOS, the operating system used by comma devices. 9 | 10 | The format is undocumented. This repository contains a reverse engineering effort to document the format and provide tools for working with the `color_cal` file. 11 | 12 | The goal is to understand the format of the `color_cal` file for the purpose of creating a custom color calibration file for the display in the future. 13 | 14 | Much of this work was done by Roo Code with Gemini 2 backing it. See the rules files for the prompt. 15 | 16 | ## Notes 17 | 18 | Repair displays directly from [comma's shop](https://comma.ai/shop) come with this sticker. 19 | 20 | ![Image](https://github.com/user-attachments/assets/74c5acc8-50a3-4e76-96f9-dbd0521a09c0) 21 | 22 | It has a QR Code which appears to have the contents of the `color_cal` file in hexadecimal format. 23 | 24 | 4b3d9e388b29b1184333273a9f2645320032c63b003cf53d92430000000024 25 | 26 | The following text is on the sticker as well, next to the QR Code: 27 | 28 | ``` 29 | Gamma: 1.32 30 | CCM: 31 | 0.58 0.04 0. 32 | 0.23 0.77 0.03 33 | 0.2 0.19 0.97 34 | WB gains: 35 | 1.00 1.49 3.78 36 | 37 | 2025-03-10 09:24:40 38 | ``` 39 | 40 | ### Samples 41 | 42 | Erich provided a sample of the `color_cal` file from his c3x. It is located in `samples/erich/color_cal`. This is from a finished c3x so there's no sticker data to correlate it to. 43 | 44 | #### gl-renderer-src 45 | 46 | `gl-renderer.so` has the log strings for the color calibration data with comma's modifications. This is linked by `weston`. 47 | 48 | `gl-renderer-src/comma-modified-decompiled.c` is the decompiled with Ghidra's decompiler. 49 | 50 | `gl-renderer-src/upstream.c` is the upstream version of the file from [weston's project](https://gitlab.freedesktop.org/wayland/weston/-/raw/master/libweston/renderer-gl/gl-renderer.c). It's unknown how closely this matches the version used in AGNOS. It's there for reference. 51 | -------------------------------------------------------------------------------- /generate_cal.py: -------------------------------------------------------------------------------- 1 | import struct 2 | import sys 3 | import binascii 4 | import os 5 | 6 | def generate_color_cal(output_path, gamma, ccm_values, wb_gains): 7 | """ 8 | Generates a 31-byte color_cal file from calibration values. 9 | 10 | Args: 11 | output_path: Path to save the generated binary file. 12 | gamma: Float gamma value. 13 | ccm_values: List of 9 float CCM values (row-major). 14 | wb_gains: List of 3 float WB gains [R, G, B]. 15 | """ 16 | if len(ccm_values) != 9: 17 | raise ValueError("ccm_values must contain exactly 9 floats.") 18 | if len(wb_gains) != 3: 19 | raise ValueError("wb_gains must contain exactly 3 floats.") 20 | 21 | all_values = [gamma] + ccm_values + wb_gains 22 | 23 | if len(all_values) != 13: 24 | # This should not happen with the checks above, but as a safeguard 25 | raise ValueError("Internal error: Expected 13 values total.") 26 | 27 | try: 28 | # Create the output directory if it doesn't exist 29 | output_dir = os.path.dirname(output_path) 30 | if output_dir: 31 | os.makedirs(output_dir, exist_ok=True) 32 | 33 | # Pack the 13 floats as little-endian half-precision floats 34 | packed_data = struct.pack('<13e', *all_values) # Use '<' for little-endian 35 | 36 | # Add the 5 ignored bytes (using null bytes as a default) 37 | # The actual value doesn't matter to weston based on decompiled code 38 | ignored_bytes = b'\x00\x00\x00\x00\x00' 39 | final_data = packed_data + ignored_bytes 40 | 41 | if len(final_data) != 31: 42 | raise RuntimeError(f"Internal error: Generated data length is {len(final_data)}, expected 31.") 43 | 44 | with open(output_path, 'wb') as f: 45 | f.write(final_data) 46 | 47 | print(f"Successfully generated {output_path} ({len(final_data)} bytes)") 48 | print(f"Hex: {binascii.hexlify(final_data).decode()}") 49 | 50 | except struct.error as e: 51 | print(f"Error packing data: {e}") 52 | except IOError as e: 53 | print(f"Error writing file: {e}") 54 | except Exception as e: 55 | print(f"An unexpected error occurred: {e}") 56 | 57 | if __name__ == "__main__": 58 | # --- Generate Identity Calibration File --- 59 | print("Generating identity color_cal file (identity.bin)...") 60 | identity_gamma = 1.0 61 | identity_ccm = [1.0, 0.0, 0.0, 62 | 0.0, 1.0, 0.0, 63 | 0.0, 0.0, 1.0] 64 | identity_wb = [1.0, 1.0, 1.0] 65 | generate_color_cal("identity.bin", identity_gamma, identity_ccm, identity_wb) 66 | 67 | # --- Example: Generate file from Erich's sample values (for verification) --- 68 | # print("\nGenerating file from Erich's sample values (erich_regen.bin)...") 69 | # erich_gamma = 0.9399 70 | # erich_ccm = [0.6777, 0.0483, 0.0100, 0.2363, 0.8604, 0.0364, 0.0860, 0.0915, 0.9536] 71 | # erich_wb = [1.0000, 1.1211, 2.0840] 72 | # generate_color_cal("erich_regen.bin", erich_gamma, erich_ccm, erich_wb) 73 | 74 | # --- How to use for custom values --- 75 | # You would call generate_color_cal() with your desired values, e.g.: 76 | # my_gamma = 1.2 77 | # my_ccm = [ ... your 9 values ... ] 78 | # my_wb = [ ... your 3 values ... ] 79 | # generate_color_cal("my_custom_cal.bin", my_gamma, my_ccm, my_wb) -------------------------------------------------------------------------------- /analyze_cal.py: -------------------------------------------------------------------------------- 1 | import binascii 2 | import struct 3 | import sys 4 | import os 5 | 6 | def analyze_color_cal(file_path): 7 | """Analyzes a color_cal binary file.""" 8 | if not os.path.exists(file_path): 9 | print(f"Error: File not found at {file_path}") 10 | return 11 | 12 | print(f"--- Analyzing file: {file_path} ---") 13 | try: 14 | with open(file_path, 'rb') as f: 15 | binary_data = f.read() 16 | except IOError as e: 17 | print(f"Error reading file: {e}") 18 | return 19 | 20 | file_size = len(binary_data) 21 | print(f"File Size: {file_size} bytes") 22 | print(f"Binary Data (hex): {binascii.hexlify(binary_data).decode()}") 23 | 24 | if file_size != 31: 25 | print(f"\nWarning: Expected file size of 31 bytes, but got {file_size}. Analysis assumes 31-byte structure.") 26 | # Continue analysis, but results might be unreliable if size differs 27 | 28 | # --- Analysis Logic --- 29 | num_expected_halfs = 13 30 | bytes_for_halfs = num_expected_halfs * 2 # 26 bytes 31 | 32 | print(f"\nAttempting to unpack first {bytes_for_halfs} bytes as {num_expected_halfs} half-precision floats (little-endian, format 'e'):") 33 | if file_size >= bytes_for_halfs: 34 | try: 35 | calibration_data_bytes = binary_data[:bytes_for_halfs] 36 | half_floats = struct.unpack(f'<{num_expected_halfs}e', calibration_data_bytes) 37 | 38 | print("Unpacked Calibration Values (Half-Floats):") 39 | gamma = half_floats[0] 40 | ccm = list(half_floats[1:10]) # 9 values 41 | wb_gains = list(half_floats[10:13]) # 3 values 42 | 43 | print(f" Gamma: {gamma:.4f}") 44 | print(" CCM (Color Correction Matrix):") 45 | print(f" [{ccm[0]:.4f}, {ccm[1]:.4f}, {ccm[2]:.4f}]") 46 | print(f" [{ccm[3]:.4f}, {ccm[4]:.4f}, {ccm[5]:.4f}]") 47 | print(f" [{ccm[6]:.4f}, {ccm[7]:.4f}, {ccm[8]:.4f}]") 48 | print(" WB Gains (White Balance):") 49 | print(f" [R={wb_gains[0]:.4f}, G={wb_gains[1]:.4f}, B={wb_gains[2]:.4f}]") 50 | 51 | except struct.error as e: 52 | print(f"Could not unpack as half-floats: {e}") 53 | except Exception as e: 54 | print(f"An error occurred during half-float unpacking: {e}") 55 | else: 56 | print(f"Binary data is too short ({file_size} bytes) to unpack {num_expected_halfs} half-floats.") 57 | 58 | # Analyze remaining bytes 59 | if file_size > bytes_for_halfs: 60 | remaining_bytes = binary_data[bytes_for_halfs:] 61 | num_remaining = len(remaining_bytes) 62 | print(f"\nRemaining {num_remaining} bytes: {remaining_bytes}") 63 | print(f"Hex representation: {binascii.hexlify(remaining_bytes).decode()}") 64 | 65 | # Checksum hypothesis test (only if file size is exactly 31) 66 | if file_size == 31 and num_remaining == 5: 67 | calculated_checksum = sum(calibration_data_bytes) % 256 68 | last_byte = remaining_bytes[-1] 69 | print(f"\nChecksum Test (Simple Sum Mod 256):") 70 | print(f" Sum of first {bytes_for_halfs} bytes: {sum(calibration_data_bytes)}") 71 | print(f" Sum mod 256: {calculated_checksum}") 72 | print(f" Last byte of data: {last_byte} (0x{last_byte:02x})") 73 | if calculated_checksum == last_byte: 74 | print(" Checksum matches!") 75 | else: 76 | print(" Checksum does NOT match.") 77 | elif file_size == 31: # Should have 5 remaining bytes if size is 31 78 | print("\nCould not perform checksum test: Incorrect number of remaining bytes for a 31-byte file.") 79 | elif file_size == bytes_for_halfs: 80 | print("\nNo remaining bytes after unpacking floats.") 81 | else: # file_size < bytes_for_halfs 82 | print("\nNo remaining bytes to analyze (file too short).") 83 | 84 | print(f"\n--- Analysis complete for: {file_path} ---") 85 | 86 | 87 | if __name__ == "__main__": 88 | if len(sys.argv) != 2: 89 | print("Usage: python analyze_cal.py ") 90 | sys.exit(1) 91 | 92 | input_file = sys.argv[1] 93 | analyze_color_cal(input_file) -------------------------------------------------------------------------------- /adapt.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from simulate_correction import read_calibration_values_from_file 4 | from generate_cal import generate_color_cal 5 | 6 | def adapt_calibration(old_cal_path, new_cal_path, output_path): 7 | """ 8 | Creates a new calibration file to make a new screen look like an old screen. 9 | """ 10 | print(f"Reading old calibration file: {old_cal_path}") 11 | old_cal = read_calibration_values_from_file(old_cal_path) 12 | if not old_cal: 13 | sys.exit(1) 14 | gamma_old, ccm_old, wb_gains_old = old_cal 15 | 16 | print(f"Reading new calibration file: {new_cal_path}") 17 | new_cal = read_calibration_values_from_file(new_cal_path) 18 | if not new_cal: 19 | sys.exit(1) 20 | gamma_new, ccm_new, wb_gains_new = new_cal 21 | 22 | # --- Adaptation Logic --- 23 | 24 | # 1. Adapt Gamma 25 | # The gamma values are applied as powers, so they multiply in the exponent. 26 | # To go from new to old: adapt_gamma / 2.2 = (gamma_old / 2.2) / (gamma_new / 2.2) 27 | # This simplifies to adapt_gamma = gamma_old / gamma_new, but since the 28 | # shader uses gamma_cal / 2.2, and we want the final exponent to be 29 | # (gamma_old / 2.2), we must combine the operations. 30 | # Let T(C) = C^(g/2.2). We want T_adapt(T_new(C)) = T_old(C). 31 | # ( (C^(g_new/2.2))^(g_adapt/2.2) ) = C^(g_old/2.2) 32 | # g_new * g_adapt / (2.2^2) = g_old / 2.2 33 | # g_adapt = g_old * 2.2 / g_new 34 | # This seems too complex. Let's reconsider the transformation chain. 35 | # The net effect is a power transformation. To combine them, we multiply the gamma values. 36 | # To go from native_new -> identity -> native_old, the gamma factor is combined. 37 | # Total_gamma = gamma_old * (1 / gamma_new) * gamma_new_screen_cal 38 | # We are creating the calibration file, so we just need the final values. 39 | 40 | # Let's simplify: The transformation is a chain. 41 | # Corrected = T_old(Input) 42 | # We want NewCorrected = T_adapt(T_new(Input)) to be Corrected. 43 | # T_adapt(T_new(Input)) = T_old(Input) 44 | # T_adapt = T_old * T_new^-1 45 | 46 | # For gamma, the exponents add. So g_adapt = g_old - g_new is wrong. 47 | # The powers multiply. So gamma_adapt = gamma_old / gamma_new is also wrong. 48 | # The actual operation is pow(color, gamma_cal / 2.2). 49 | # To combine transformations, we multiply the gamma values. 50 | # So, gamma_adapt = gamma_old 51 | 52 | # Let's re-verify the shader logic from WORK.md 53 | # d. Apply Gamma correction and forward EOTF (`pow(..., gamma_cal / 2.2)`). 54 | # The gamma from the file is a direct factor in the final exponent. 55 | # To make the new screen behave like the old one, the final exponent must be the same. 56 | # Therefore, the gamma value in the *adapted* file should simply be the *old* gamma value. 57 | gamma_adapted = gamma_old 58 | 59 | # 2. Adapt White Balance Gains 60 | # WB is applied by multiplying by the inverse gains. 61 | # Corrected_wb = Linear * (1 / gains_adapt) * (1 / gains_new) 62 | # We want: Corrected_wb = Linear * (1 / gains_old) 63 | # So: (1 / gains_adapt) * (1 / gains_new) = (1 / gains_old) 64 | # gains_adapt = gains_old / gains_new 65 | wb_gains_old_np = np.array(wb_gains_old) 66 | wb_gains_new_np = np.array(wb_gains_new) 67 | # Avoid division by zero 68 | wb_gains_new_np[wb_gains_new_np == 0] = 1e-9 69 | wb_gains_adapted = wb_gains_old_np / wb_gains_new_np 70 | 71 | # 3. Adapt Color Correction Matrix (CCM) 72 | # The CCMs are multiplied. 73 | # Corrected_ccm = CCM_adapt * CCM_new * Corrected_wb 74 | # We want: Corrected_ccm = CCM_old * Corrected_wb 75 | # So: CCM_adapt * CCM_new = CCM_old 76 | # CCM_adapt = CCM_old * inverse(CCM_new) 77 | ccm_old_matrix = np.array(ccm_old).reshape((3, 3)) 78 | ccm_new_matrix = np.array(ccm_new).reshape((3, 3)) 79 | ccm_new_inv_matrix = np.linalg.inv(ccm_new_matrix) 80 | ccm_adapted_matrix = np.dot(ccm_old_matrix, ccm_new_inv_matrix) 81 | ccm_adapted_flat = ccm_adapted_matrix.flatten().tolist() 82 | 83 | print("\n--- Calculated Adapted Calibration ---") 84 | print(f" Adapted Gamma: {gamma_adapted:.4f}") 85 | print(" Adapted CCM:") 86 | print(f" [{ccm_adapted_flat[0]:.4f}, {ccm_adapted_flat[1]:.4f}, {ccm_adapted_flat[2]:.4f}]") 87 | print(f" [{ccm_adapted_flat[3]:.4f}, {ccm_adapted_flat[4]:.4f}, {ccm_adapted_flat[5]:.4f}]") 88 | print(f" [{ccm_adapted_flat[6]:.4f}, {ccm_adapted_flat[7]:.4f}, {ccm_adapted_flat[8]:.4f}]") 89 | print(" Adapted WB Gains:") 90 | print(f" [R={wb_gains_adapted[0]:.4f}, G={wb_gains_adapted[1]:.4f}, B={wb_gains_adapted[2]:.4f}]") 91 | 92 | # Generate the new file 93 | print(f"\nGenerating new calibration file: {output_path}") 94 | generate_color_cal(output_path, gamma_adapted, ccm_adapted_flat, wb_gains_adapted.tolist()) 95 | 96 | 97 | if __name__ == "__main__": 98 | if len(sys.argv) != 4: 99 | print("Usage: python adapt.py ") 100 | sys.exit(1) 101 | 102 | old_file = sys.argv[1] 103 | new_file = sys.argv[2] 104 | output_file = sys.argv[3] 105 | 106 | adapt_calibration(old_file, new_file, output_file) -------------------------------------------------------------------------------- /simulate_correction.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import struct 4 | import binascii 5 | import sys 6 | import os 7 | 8 | def read_calibration_values_from_file(file_path): 9 | """Reads 13 half-floats from a color_cal binary file.""" 10 | if not os.path.exists(file_path): 11 | print(f"Error: File not found at {file_path}") 12 | return None 13 | 14 | try: 15 | with open(file_path, 'rb') as f: 16 | binary_data = f.read(26) # Read only the first 26 bytes 17 | except IOError as e: 18 | print(f"Error reading file: {e}") 19 | return None 20 | 21 | if len(binary_data) < 26: 22 | print(f"Error: File too short ({len(binary_data)} bytes), expected 26.") 23 | return None 24 | 25 | try: 26 | # '<' for little-endian, '13e' for 13 half-precision floats 27 | half_floats = struct.unpack('<13e', binary_data) 28 | gamma = half_floats[0] 29 | ccm = list(half_floats[1:10]) # 9 values 30 | wb_gains = list(half_floats[10:13]) # 3 values 31 | return gamma, ccm, wb_gains 32 | except struct.error as e: 33 | print(f"Could not unpack as half-floats: {e}") 34 | return None 35 | except Exception as e: 36 | print(f"An error occurred during half-float unpacking: {e}") 37 | return None 38 | 39 | 40 | def apply_color_correction_simulation(rgb_in, gamma_cal, ccm_values, wb_gains): 41 | """ 42 | Simulates the color correction shader logic in Python. 43 | rgb_in: list/tuple/array of [r, g, b] floats (0.0 to 1.0) 44 | gamma_cal: float gamma value from color_cal 45 | ccm_values: list of 9 float CCM values from color_cal (row-major) 46 | wb_gains: list of 3 float WB gains [R, G, B] from color_cal 47 | """ 48 | 49 | # Ensure input is numpy array for vectorized operations 50 | rgb = np.array(rgb_in, dtype=float) 51 | 52 | # --- Clamp input to avoid issues with pow --- 53 | rgb = np.clip(rgb, 0.0, 1.0) 54 | 55 | # 1. Apply inverse EOTF (Linearize from approx. sRGB gamma 2.2) 56 | # Avoid pow(0, x) issues if component is 0 57 | rgb = np.power(rgb + 1e-9, 2.2) # Add small epsilon 58 | 59 | # 2. Calculate and apply inverse WB gains 60 | # Avoid division by zero if gains are zero 61 | inv_gains = np.array([1.0 / g if g != 0 else 0 for g in wb_gains]) 62 | rgb *= inv_gains 63 | 64 | # 3. Apply CCM 65 | # Reshape CCM values into a 3x3 matrix (row-major) 66 | ccm_matrix = np.array(ccm_values).reshape((3, 3)) 67 | # Perform matrix multiplication: Corrected_RGB = CCM * WB_Corrected_RGB 68 | rgb = np.dot(ccm_matrix, rgb) 69 | 70 | # Clamp negative values resulting from CCM 71 | rgb = np.maximum(rgb, 0.0) 72 | 73 | # 4. Apply Gamma correction and forward EOTF 74 | # Avoid division by zero if 2.2 is somehow zero 75 | gamma_factor = gamma_cal / 2.2 if 2.2 != 0 else 1.0 76 | # Avoid issues if gamma_factor is zero or negative, or if rgb components are zero 77 | if gamma_factor > 0: 78 | rgb = np.power(rgb + 1e-9, gamma_factor) # Add small epsilon 79 | else: 80 | # If gamma factor is invalid, maybe return linear rgb clamped? 81 | rgb = np.clip(rgb, 0.0, 1.0) # Return linear clamped value as fallback 82 | 83 | # Clamp final result to [0.0, 1.0] 84 | rgb = np.clip(rgb, 0.0, 1.0) 85 | 86 | return rgb.tolist() 87 | 88 | if __name__ == "__main__": 89 | if len(sys.argv) != 2: 90 | print("Usage: python simulate_correction.py ") 91 | sys.exit(1) 92 | 93 | cal_file_path = sys.argv[1] 94 | calibration_data = read_calibration_values_from_file(cal_file_path) 95 | 96 | if calibration_data: 97 | gamma, ccm, wb = calibration_data 98 | print(f"Read calibration data from: {cal_file_path}") 99 | print(f" Gamma: {gamma:.4f}") 100 | print(f" CCM: {ccm}") 101 | print(f" WB Gains: {wb}") 102 | 103 | # --- Example Simulations --- 104 | print("\n--- Simulation Examples ---") 105 | 106 | # Test with pure red 107 | input_color_red = [1.0, 0.0, 0.0] 108 | corrected_red = apply_color_correction_simulation(input_color_red, gamma, ccm, wb) 109 | print(f"Input Red: {input_color_red}") 110 | print(f"Corrected Red: [{corrected_red[0]:.4f}, {corrected_red[1]:.4f}, {corrected_red[2]:.4f}]") 111 | 112 | # Test with pure green 113 | input_color_green = [0.0, 1.0, 0.0] 114 | corrected_green = apply_color_correction_simulation(input_color_green, gamma, ccm, wb) 115 | print(f"\nInput Green: {input_color_green}") 116 | print(f"Corrected Green: [{corrected_green[0]:.4f}, {corrected_green[1]:.4f}, {corrected_green[2]:.4f}]") 117 | 118 | # Test with pure blue 119 | input_color_blue = [0.0, 0.0, 1.0] 120 | corrected_blue = apply_color_correction_simulation(input_color_blue, gamma, ccm, wb) 121 | print(f"\nInput Blue: {input_color_blue}") 122 | print(f"Corrected Blue: [{corrected_blue[0]:.4f}, {corrected_blue[1]:.4f}, {corrected_blue[2]:.4f}]") 123 | 124 | # Test with white 125 | input_color_white = [1.0, 1.0, 1.0] 126 | corrected_white = apply_color_correction_simulation(input_color_white, gamma, ccm, wb) 127 | print(f"\nInput White: {input_color_white}") 128 | print(f"Corrected White: [{corrected_white[0]:.4f}, {corrected_white[1]:.4f}, {corrected_white[2]:.4f}]") 129 | 130 | # Test with gray (0.5) 131 | input_color_gray = [0.5, 0.5, 0.5] 132 | corrected_gray = apply_color_correction_simulation(input_color_gray, gamma, ccm, wb) 133 | print(f"\nInput Gray (0.5): {input_color_gray}") 134 | print(f"Corrected Gray (0.5): [{corrected_gray[0]:.4f}, {corrected_gray[1]:.4f}, {corrected_gray[2]:.4f}]") 135 | 136 | else: 137 | print(f"Failed to read or parse calibration data from {cal_file_path}") 138 | sys.exit(1) -------------------------------------------------------------------------------- /WORK.md: -------------------------------------------------------------------------------- 1 | # Color Calibration Reverse Engineering Work Log 2 | 3 | This document tracks the reverse engineering process for the `color_cal` file format. 4 | 5 | ## Initial Data (from README.md) 6 | 7 | - **Hex String (from QR Code):** `4b3d9e388b29b1184333273a9f2645320032c63b003cf53d92430000000024` 8 | - **Text Data (from Sticker):** 9 | - Gamma: 1.32 10 | - CCM: 11 | - 0.58 0.04 0. 12 | - 0.23 0.77 0.03 13 | - 0.2 0.19 0.97 14 | - WB gains: 15 | - 1.00 1.49 3.78 16 | - Timestamp: 2025-03-10 09:24:40 17 | 18 | ## Analysis Steps & Findings 19 | 20 | 1. **Decode Hex:** The hex string `4b3d9e388b29b1184333273a9f2645320032c63b003cf53d92430000000024` decodes to 31 bytes: `b"K=\x9e8\x8b)\xb1\x18C3':\x9f&E2\x002\xc6;\x00<\xf5=\x92C\x00\x00\x00\x00$"`. 21 | 2. **Interpret Data Types:** Initial attempts with single-precision floats failed. Testing with half-precision floats (binary16, little-endian) was successful. 22 | 3. **Correlate Values:** The first 26 bytes correspond to the 13 known calibration values in the order presented on the sticker: 23 | * Bytes 0-1: Gamma (1.32) 24 | * Bytes 2-3: CCM[0,0] (0.58) 25 | * Bytes 4-5: CCM[0,1] (0.04) 26 | * Bytes 6-7: CCM[0,2] (0.0) 27 | * Bytes 8-9: CCM[1,0] (0.23) 28 | * Bytes 10-11: CCM[1,1] (0.77) 29 | * Bytes 12-13: CCM[1,2] (0.03) 30 | * Bytes 14-15: CCM[2,0] (0.2) 31 | * Bytes 16-17: CCM[2,1] (0.19) 32 | * Bytes 18-19: CCM[2,2] (0.97) 33 | * Bytes 20-21: WB gains[0] (R) (1.00) 34 | * Bytes 22-23: WB gains[1] (G) (1.49) 35 | * Bytes 24-25: WB gains[2] (B) (3.78) 36 | * All values are stored as **little-endian half-precision floats (binary16)**. 37 | 38 | 4. **Remaining Bytes:** The last 5 bytes are `b'\x00\x00\x00\x00$'` (hex: `0000000024`). 39 | * Bytes 26-29: `0x00000000` - Purpose unknown (Padding? Reserved?). 40 | * Byte 30: `0x24` (ASCII '$') - Purpose unknown (Checksum? Version? Delimiter?). Unlikely related to the sticker timestamp. 41 | 42 | ## Next Steps & Findings (cont.) 43 | 44 | 5. **Checksum Test:** A simple checksum (sum of the first 26 bytes modulo 256) was calculated. 45 | * Sum = 2148 46 | * Sum mod 256 = 100 47 | * Last byte = 36 (0x24) 48 | * **Result:** The checksum does **not** match. The purpose of the final byte `0x24` remains unknown. It is not a simple sum checksum. 49 | 50 | ## Current Understanding of Format (31 bytes total) 51 | 52 | ## Sample 2: Erich's C3X (`samples/erich/color_cal`) 53 | 54 | Analysis performed using the updated `analyze_cal.py` script. 55 | 56 | - **File Size:** 31 bytes 57 | - **Hex Data:** `853b6c392e2a25219033e23aa928812ddb2da13b003c7c3c2b400000000064` 58 | - **Unpacked Values (13 half-floats):** 59 | - Gamma: 0.9399 60 | - CCM: [0.6777, 0.0483, 0.0100], [0.2363, 0.8604, 0.0364], [0.0860, 0.0915, 0.9536] 61 | - WB Gains: [R=1.0000, G=1.1211, B=2.0840] 62 | - **Remaining Bytes (5 bytes):** `b'\x00\x00\x00\x00d'` (hex: `0000000064`) 63 | - **Checksum Test (Simple Sum Mod 256):** Failed (Sum mod 256 = 164, Last byte = 100 (0x64)). 64 | 65 | **Observations:** 66 | - Consistent 31-byte structure. 67 | - First 26 bytes are calibration data (13 half-floats). 68 | - Next 4 bytes are consistently null (`0x00000000`). 69 | - Final byte varies between samples (`0x24` vs `0x64`), ruling out a fixed delimiter and simple sum checksum. Its purpose remains unknown. 70 | 71 | 72 | * **Bytes 0-25 (26 bytes):** Calibration Data 73 | * 13 x little-endian half-precision floats (binary16) 74 | * Order: Gamma (1), CCM (9), WB Gains (3) 75 | * **Bytes 26-29 (4 bytes):** Unknown (`0x00000000`) 76 | * Likely padding or reserved. 77 | * **Byte 30 (1 byte):** Unknown (`0x24`) 78 | * Not a simple checksum. Could be version, display type ID, delimiter, etc. 79 | 80 | ## Further Investigation Ideas 81 | 82 | ## Shader Logic and Simulation 83 | 84 | Analysis of the `color_correction_get_shader` function in the decompiled code revealed how the calibration values are used: 85 | 86 | 1. **Half-Float Decoding:** The `uint16_t` values are manually decoded into standard floats. 87 | 2. **GLSL Generation:** A GLSL fragment shader snippet is dynamically generated, hardcoding the calibration values (or derived values like inverse WB gains and gamma factor). 88 | 3. **Correction Steps (in GLSL):** 89 | a. Linearize input color (inverse EOTF using `pow(..., 2.2)`). 90 | b. Apply White Balance correction (multiply by inverse gains). 91 | c. Apply Color Correction Matrix (CCM) via matrix multiplication. 92 | d. Apply Gamma correction and forward EOTF (`pow(..., gamma_cal / 2.2)`). 93 | 94 | **Simulation Tool:** 95 | A Python script `simulate_correction.py` has been created to replicate this process. It reads a `color_cal` file and applies the same sequence of transformations to input RGB values, allowing for testing and verification of the correction effect. 96 | 97 | 98 | * ~~Obtain more `color_cal` examples (different devices/batches) to see how the last 5 bytes vary.~~ (Still useful for understanding *generation*, but not *usage* by weston). 99 | * **Analyze the `weston` binary:** Done via decompiled `gl-renderer-src/comma-modified-decompiled.c`. 100 | * ~~Check if the timestamp `2025-03-10 09:24:40` relates to the data in any non-obvious way (unlikely given the remaining bytes).~~ 101 | 102 | ## Final Findings from Decompiled Code (`gl-renderer-src/comma-modified-decompiled.c`) 103 | 104 | Analysis of the `read_correction_values` function (responsible for loading the calibration data) revealed the following: 105 | 106 | 1. **Structure Definition:** The code uses a `struct color_correction_values` defined as: 107 | ```c 108 | struct color_correction_values { 109 | uint16_t gamma; // 2 bytes 110 | uint16_t ccm[9]; // 18 bytes 111 | uint16_t rgb_color_gains[3]; // 6 bytes 112 | }; // Total: 26 bytes 113 | ``` 114 | This matches the 13 half-float values identified earlier. The `uint16_t` values are treated as half-float bit patterns. 115 | 116 | 2. **Memory Allocation & Reading:** The function allocates exactly 26 bytes (`malloc(0x1a)`) for this structure and reads exactly 26 bytes from the `color_cal` file using `fread(ptr, 0x1a, 1, stream)`. 117 | 118 | 3. **Ignored Bytes:** The code **does not read or use any data beyond the first 26 bytes**. 119 | 120 | **Conclusion:** The final 5 bytes (`00000000` + `XX`) present in the `color_cal` files are **ignored** by the `weston` gl-renderer component responsible for applying the color calibration. Their purpose might relate to the manufacturing/calibration process or other tools, but they do not affect the runtime color correction performed by `weston` based on this code. 121 | ## Color Adaptation 122 | 123 | The primary goal of this reverse engineering effort was to create a method to make a new screen display colors as if it were an old screen. This was achieved by creating an `adapt.py` script that generates a new `color_cal` file. 124 | 125 | ### Theory 126 | 127 | The color transformation pipeline is a series of mathematical operations. To make the new screen look like the old one, we need to apply a transformation that is equivalent to applying the old screen's calibration and undoing the new screen's calibration. 128 | 129 | The transformation `T_adapt` is calculated as: `T_adapt = T_old * T_new^-1` 130 | 131 | This is broken down for each component: 132 | 133 | * **Gamma:** The adapted gamma is simply the old gamma value. `gamma_adapted = gamma_old` 134 | * **White Balance (WB):** The adapted gains are the old gains divided by the new gains. `gains_adapted = gains_old / gains_new` 135 | * **Color Correction Matrix (CCM):** The adapted CCM is the old CCM multiplied by the inverse of the new CCM. `CCM_adapted = CCM_old * inverse(CCM_new)` 136 | 137 | ### Implementation 138 | 139 | The `adapt.py` script implements this theory. It reads the two calibration files, calculates the new adapted values, and generates a new `color_cal` file. 140 | -------------------------------------------------------------------------------- /gl-renderer-src/upstream.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright © 2012 Intel Corporation 3 | * Copyright © 2015,2019,2021 Collabora, Ltd. 4 | * Copyright © 2016 NVIDIA Corporation 5 | * 6 | * Permission is hereby granted, free of charge, to any person obtaining 7 | * a copy of this software and associated documentation files (the 8 | * "Software"), to deal in the Software without restriction, including 9 | * without limitation the rights to use, copy, modify, merge, publish, 10 | * distribute, sublicense, and/or sell copies of the Software, and to 11 | * permit persons to whom the Software is furnished to do so, subject to 12 | * the following conditions: 13 | * 14 | * The above copyright notice and this permission notice (including the 15 | * next paragraph) shall be included in all copies or substantial 16 | * portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 21 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 22 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 23 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 24 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | * SOFTWARE. 26 | */ 27 | 28 | #include "config.h" 29 | 30 | #include 31 | #include 32 | #include 33 | 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | 44 | #include "linux-sync-file.h" 45 | #include "timeline.h" 46 | 47 | #include "gl-renderer.h" 48 | #include "gl-renderer-internal.h" 49 | #include "vertex-clipping.h" 50 | #include "linux-dmabuf.h" 51 | #include "linux-dmabuf-unstable-v1-server-protocol.h" 52 | #include "linux-explicit-synchronization.h" 53 | #include "pixel-formats.h" 54 | 55 | #include "shared/fd-util.h" 56 | #include "shared/helpers.h" 57 | #include "shared/platform.h" 58 | #include "shared/timespec-util.h" 59 | #include "shared/weston-drm-fourcc.h" 60 | #include "shared/weston-egl-ext.h" 61 | 62 | #define BUFFER_DAMAGE_COUNT 2 63 | 64 | enum gl_border_status { 65 | BORDER_STATUS_CLEAN = 0, 66 | BORDER_TOP_DIRTY = 1 << GL_RENDERER_BORDER_TOP, 67 | BORDER_LEFT_DIRTY = 1 << GL_RENDERER_BORDER_LEFT, 68 | BORDER_RIGHT_DIRTY = 1 << GL_RENDERER_BORDER_RIGHT, 69 | BORDER_BOTTOM_DIRTY = 1 << GL_RENDERER_BORDER_BOTTOM, 70 | BORDER_ALL_DIRTY = 0xf, 71 | BORDER_SIZE_CHANGED = 0x10 72 | }; 73 | 74 | struct gl_border_image { 75 | GLuint tex; 76 | int32_t width, height; 77 | int32_t tex_width; 78 | void *data; 79 | }; 80 | 81 | struct gl_fbo_texture { 82 | GLuint fbo; 83 | GLuint tex; 84 | int32_t width; 85 | int32_t height; 86 | }; 87 | 88 | struct gl_output_state { 89 | EGLSurface egl_surface; 90 | pixman_region32_t buffer_damage[BUFFER_DAMAGE_COUNT]; 91 | int buffer_damage_index; 92 | enum gl_border_status border_damage[BUFFER_DAMAGE_COUNT]; 93 | struct gl_border_image borders[4]; 94 | enum gl_border_status border_status; 95 | bool swap_behavior_is_preserved; 96 | 97 | struct weston_matrix output_matrix; 98 | 99 | EGLSyncKHR begin_render_sync, end_render_sync; 100 | 101 | /* struct timeline_render_point::link */ 102 | struct wl_list timeline_render_point_list; 103 | 104 | struct gl_fbo_texture shadow; 105 | }; 106 | 107 | enum buffer_type { 108 | BUFFER_TYPE_NULL, 109 | BUFFER_TYPE_SOLID, /* internal solid color surfaces without a buffer */ 110 | BUFFER_TYPE_SHM, 111 | BUFFER_TYPE_EGL 112 | }; 113 | 114 | struct gl_renderer; 115 | 116 | struct egl_image { 117 | struct gl_renderer *renderer; 118 | EGLImageKHR image; 119 | int refcount; 120 | }; 121 | 122 | enum import_type { 123 | IMPORT_TYPE_INVALID, 124 | IMPORT_TYPE_DIRECT, 125 | IMPORT_TYPE_GL_CONVERSION 126 | }; 127 | 128 | struct dmabuf_image { 129 | struct linux_dmabuf_buffer *dmabuf; 130 | int num_images; 131 | struct egl_image *images[3]; 132 | struct wl_list link; 133 | 134 | enum import_type import_type; 135 | enum gl_shader_texture_variant shader_variant; 136 | }; 137 | 138 | struct dmabuf_format { 139 | uint32_t format; 140 | struct wl_list link; 141 | 142 | uint64_t *modifiers; 143 | unsigned *external_only; 144 | int num_modifiers; 145 | }; 146 | 147 | struct yuv_plane_descriptor { 148 | int width_divisor; 149 | int height_divisor; 150 | uint32_t format; 151 | int plane_index; 152 | }; 153 | 154 | enum texture_type { 155 | TEXTURE_Y_XUXV_WL, 156 | TEXTURE_Y_UV_WL, 157 | TEXTURE_Y_U_V_WL, 158 | TEXTURE_XYUV_WL 159 | }; 160 | 161 | struct yuv_format_descriptor { 162 | uint32_t format; 163 | int input_planes; 164 | int output_planes; 165 | enum texture_type texture_type; 166 | struct yuv_plane_descriptor plane[4]; 167 | }; 168 | 169 | struct gl_surface_state { 170 | GLfloat color[4]; 171 | 172 | GLuint textures[3]; 173 | int num_textures; 174 | bool needs_full_upload; 175 | pixman_region32_t texture_damage; 176 | 177 | /* These are only used by SHM surfaces to detect when we need 178 | * to do a full upload to specify a new internal texture 179 | * format */ 180 | GLenum gl_format[3]; 181 | GLenum gl_pixel_type; 182 | 183 | struct egl_image* images[3]; 184 | int num_images; 185 | enum gl_shader_texture_variant shader_variant; 186 | 187 | struct weston_buffer_reference buffer_ref; 188 | struct weston_buffer_release_reference buffer_release_ref; 189 | enum buffer_type buffer_type; 190 | int pitch; /* in pixels */ 191 | int height; /* in pixels */ 192 | bool y_inverted; 193 | bool direct_display; 194 | 195 | /* Extension needed for SHM YUV texture */ 196 | int offset[3]; /* offset per plane */ 197 | int hsub[3]; /* horizontal subsampling per plane */ 198 | int vsub[3]; /* vertical subsampling per plane */ 199 | 200 | struct weston_surface *surface; 201 | 202 | /* Whether this surface was used in the current output repaint. 203 | Used only in the context of a gl_renderer_repaint_output call. */ 204 | bool used_in_output_repaint; 205 | 206 | struct wl_listener surface_destroy_listener; 207 | struct wl_listener renderer_destroy_listener; 208 | }; 209 | 210 | enum timeline_render_point_type { 211 | TIMELINE_RENDER_POINT_TYPE_BEGIN, 212 | TIMELINE_RENDER_POINT_TYPE_END 213 | }; 214 | 215 | struct timeline_render_point { 216 | struct wl_list link; /* gl_output_state::timeline_render_point_list */ 217 | 218 | enum timeline_render_point_type type; 219 | int fd; 220 | struct weston_output *output; 221 | struct wl_event_source *event_source; 222 | }; 223 | 224 | static uint32_t 225 | gr_gl_version(uint16_t major, uint16_t minor) 226 | { 227 | return ((uint32_t)major << 16) | minor; 228 | } 229 | 230 | static int 231 | gr_gl_version_major(uint32_t ver) 232 | { 233 | return ver >> 16; 234 | } 235 | 236 | static int 237 | gr_gl_version_minor(uint32_t ver) 238 | { 239 | return ver & 0xffff; 240 | } 241 | 242 | static inline const char * 243 | dump_format(uint32_t format, char out[4]) 244 | { 245 | #if BYTE_ORDER == BIG_ENDIAN 246 | format = __builtin_bswap32(format); 247 | #endif 248 | memcpy(out, &format, 4); 249 | return out; 250 | } 251 | 252 | static inline struct gl_output_state * 253 | get_output_state(struct weston_output *output) 254 | { 255 | return (struct gl_output_state *)output->renderer_state; 256 | } 257 | 258 | static int 259 | gl_renderer_create_surface(struct weston_surface *surface); 260 | 261 | static inline struct gl_surface_state * 262 | get_surface_state(struct weston_surface *surface) 263 | { 264 | if (!surface->renderer_state) 265 | gl_renderer_create_surface(surface); 266 | 267 | return (struct gl_surface_state *)surface->renderer_state; 268 | } 269 | 270 | static bool 271 | shadow_exists(const struct gl_output_state *go) 272 | { 273 | return go->shadow.fbo != 0; 274 | } 275 | 276 | static void 277 | timeline_render_point_destroy(struct timeline_render_point *trp) 278 | { 279 | wl_list_remove(&trp->link); 280 | wl_event_source_remove(trp->event_source); 281 | close(trp->fd); 282 | free(trp); 283 | } 284 | 285 | static int 286 | timeline_render_point_handler(int fd, uint32_t mask, void *data) 287 | { 288 | struct timeline_render_point *trp = data; 289 | const char *tp_name = trp->type == TIMELINE_RENDER_POINT_TYPE_BEGIN ? 290 | "renderer_gpu_begin" : "renderer_gpu_end"; 291 | 292 | if (mask & WL_EVENT_READABLE) { 293 | struct timespec tspec = { 0 }; 294 | 295 | if (weston_linux_sync_file_read_timestamp(trp->fd, 296 | &tspec) == 0) { 297 | TL_POINT(trp->output->compositor, tp_name, TLP_GPU(&tspec), 298 | TLP_OUTPUT(trp->output), TLP_END); 299 | } 300 | } 301 | 302 | timeline_render_point_destroy(trp); 303 | 304 | return 0; 305 | } 306 | 307 | static EGLSyncKHR 308 | create_render_sync(struct gl_renderer *gr) 309 | { 310 | static const EGLint attribs[] = { EGL_NONE }; 311 | 312 | if (!gr->has_native_fence_sync) 313 | return EGL_NO_SYNC_KHR; 314 | 315 | return gr->create_sync(gr->egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, 316 | attribs); 317 | } 318 | 319 | static void 320 | timeline_submit_render_sync(struct gl_renderer *gr, 321 | struct weston_output *output, 322 | EGLSyncKHR sync, 323 | enum timeline_render_point_type type) 324 | { 325 | struct gl_output_state *go; 326 | struct wl_event_loop *loop; 327 | int fd; 328 | struct timeline_render_point *trp; 329 | 330 | if (!weston_log_scope_is_enabled(gr->compositor->timeline) || 331 | !gr->has_native_fence_sync || 332 | sync == EGL_NO_SYNC_KHR) 333 | return; 334 | 335 | go = get_output_state(output); 336 | loop = wl_display_get_event_loop(gr->compositor->wl_display); 337 | 338 | fd = gr->dup_native_fence_fd(gr->egl_display, sync); 339 | if (fd == EGL_NO_NATIVE_FENCE_FD_ANDROID) 340 | return; 341 | 342 | trp = zalloc(sizeof *trp); 343 | if (trp == NULL) { 344 | close(fd); 345 | return; 346 | } 347 | 348 | trp->type = type; 349 | trp->fd = fd; 350 | trp->output = output; 351 | trp->event_source = wl_event_loop_add_fd(loop, fd, 352 | WL_EVENT_READABLE, 353 | timeline_render_point_handler, 354 | trp); 355 | 356 | wl_list_insert(&go->timeline_render_point_list, &trp->link); 357 | } 358 | 359 | static struct egl_image* 360 | egl_image_create(struct gl_renderer *gr, EGLenum target, 361 | EGLClientBuffer buffer, const EGLint *attribs) 362 | { 363 | struct egl_image *img; 364 | 365 | img = zalloc(sizeof *img); 366 | img->renderer = gr; 367 | img->refcount = 1; 368 | img->image = gr->create_image(gr->egl_display, EGL_NO_CONTEXT, 369 | target, buffer, attribs); 370 | 371 | if (img->image == EGL_NO_IMAGE_KHR) { 372 | free(img); 373 | return NULL; 374 | } 375 | 376 | return img; 377 | } 378 | 379 | static struct egl_image* 380 | egl_image_ref(struct egl_image *image) 381 | { 382 | image->refcount++; 383 | 384 | return image; 385 | } 386 | 387 | static int 388 | egl_image_unref(struct egl_image *image) 389 | { 390 | struct gl_renderer *gr = image->renderer; 391 | 392 | assert(image->refcount > 0); 393 | 394 | image->refcount--; 395 | if (image->refcount > 0) 396 | return image->refcount; 397 | 398 | gr->destroy_image(gr->egl_display, image->image); 399 | free(image); 400 | 401 | return 0; 402 | } 403 | 404 | static struct dmabuf_image* 405 | dmabuf_image_create(void) 406 | { 407 | struct dmabuf_image *img; 408 | 409 | img = zalloc(sizeof *img); 410 | wl_list_init(&img->link); 411 | 412 | return img; 413 | } 414 | 415 | static void 416 | dmabuf_image_destroy(struct dmabuf_image *image) 417 | { 418 | int i; 419 | 420 | for (i = 0; i < image->num_images; ++i) 421 | egl_image_unref(image->images[i]); 422 | 423 | if (image->dmabuf) 424 | linux_dmabuf_buffer_set_user_data(image->dmabuf, NULL, NULL); 425 | 426 | wl_list_remove(&image->link); 427 | free(image); 428 | } 429 | 430 | #define max(a, b) (((a) > (b)) ? (a) : (b)) 431 | #define min(a, b) (((a) > (b)) ? (b) : (a)) 432 | 433 | /* 434 | * Compute the boundary vertices of the intersection of the global coordinate 435 | * aligned rectangle 'rect', and an arbitrary quadrilateral produced from 436 | * 'surf_rect' when transformed from surface coordinates into global coordinates. 437 | * The vertices are written to 'ex' and 'ey', and the return value is the 438 | * number of vertices. Vertices are produced in clockwise winding order. 439 | * Guarantees to produce either zero vertices, or 3-8 vertices with non-zero 440 | * polygon area. 441 | */ 442 | static int 443 | calculate_edges(struct weston_view *ev, pixman_box32_t *rect, 444 | pixman_box32_t *surf_rect, GLfloat *ex, GLfloat *ey) 445 | { 446 | 447 | struct clip_context ctx; 448 | int i, n; 449 | GLfloat min_x, max_x, min_y, max_y; 450 | struct polygon8 surf = { 451 | { surf_rect->x1, surf_rect->x2, surf_rect->x2, surf_rect->x1 }, 452 | { surf_rect->y1, surf_rect->y1, surf_rect->y2, surf_rect->y2 }, 453 | 4 454 | }; 455 | 456 | ctx.clip.x1 = rect->x1; 457 | ctx.clip.y1 = rect->y1; 458 | ctx.clip.x2 = rect->x2; 459 | ctx.clip.y2 = rect->y2; 460 | 461 | /* transform surface to screen space: */ 462 | for (i = 0; i < surf.n; i++) 463 | weston_view_to_global_float(ev, surf.x[i], surf.y[i], 464 | &surf.x[i], &surf.y[i]); 465 | 466 | /* find bounding box: */ 467 | min_x = max_x = surf.x[0]; 468 | min_y = max_y = surf.y[0]; 469 | 470 | for (i = 1; i < surf.n; i++) { 471 | min_x = min(min_x, surf.x[i]); 472 | max_x = max(max_x, surf.x[i]); 473 | min_y = min(min_y, surf.y[i]); 474 | max_y = max(max_y, surf.y[i]); 475 | } 476 | 477 | /* First, simple bounding box check to discard early transformed 478 | * surface rects that do not intersect with the clip region: 479 | */ 480 | if ((min_x >= ctx.clip.x2) || (max_x <= ctx.clip.x1) || 481 | (min_y >= ctx.clip.y2) || (max_y <= ctx.clip.y1)) 482 | return 0; 483 | 484 | /* Simple case, bounding box edges are parallel to surface edges, 485 | * there will be only four edges. We just need to clip the surface 486 | * vertices to the clip rect bounds: 487 | */ 488 | if (!ev->transform.enabled) 489 | return clip_simple(&ctx, &surf, ex, ey); 490 | 491 | /* Transformed case: use a general polygon clipping algorithm to 492 | * clip the surface rectangle with each side of 'rect'. 493 | * The algorithm is Sutherland-Hodgman, as explained in 494 | * http://www.codeguru.com/cpp/misc/misc/graphics/article.php/c8965/Polygon-Clipping.htm 495 | * but without looking at any of that code. 496 | */ 497 | n = clip_transformed(&ctx, &surf, ex, ey); 498 | 499 | if (n < 3) 500 | return 0; 501 | 502 | return n; 503 | } 504 | 505 | static bool 506 | merge_down(pixman_box32_t *a, pixman_box32_t *b, pixman_box32_t *merge) 507 | { 508 | if (a->x1 == b->x1 && a->x2 == b->x2 && a->y1 == b->y2) { 509 | merge->x1 = a->x1; 510 | merge->x2 = a->x2; 511 | merge->y1 = b->y1; 512 | merge->y2 = a->y2; 513 | return true; 514 | } 515 | return false; 516 | } 517 | 518 | static int 519 | compress_bands(pixman_box32_t *inrects, int nrects, pixman_box32_t **outrects) 520 | { 521 | bool merged = false; 522 | pixman_box32_t *out, merge_rect; 523 | int i, j, nout; 524 | 525 | if (!nrects) { 526 | *outrects = NULL; 527 | return 0; 528 | } 529 | 530 | /* nrects is an upper bound - we're not too worried about 531 | * allocating a little extra 532 | */ 533 | out = malloc(sizeof(pixman_box32_t) * nrects); 534 | out[0] = inrects[0]; 535 | nout = 1; 536 | for (i = 1; i < nrects; i++) { 537 | for (j = 0; j < nout; j++) { 538 | merged = merge_down(&inrects[i], &out[j], &merge_rect); 539 | if (merged) { 540 | out[j] = merge_rect; 541 | break; 542 | } 543 | } 544 | if (!merged) { 545 | out[nout] = inrects[i]; 546 | nout++; 547 | } 548 | } 549 | *outrects = out; 550 | return nout; 551 | } 552 | 553 | static int 554 | texture_region(struct weston_view *ev, 555 | pixman_region32_t *region, 556 | pixman_region32_t *surf_region) 557 | { 558 | struct gl_surface_state *gs = get_surface_state(ev->surface); 559 | struct weston_compositor *ec = ev->surface->compositor; 560 | struct gl_renderer *gr = get_renderer(ec); 561 | GLfloat *v, inv_width, inv_height; 562 | unsigned int *vtxcnt, nvtx = 0; 563 | pixman_box32_t *rects, *surf_rects; 564 | pixman_box32_t *raw_rects; 565 | int i, j, k, nrects, nsurf, raw_nrects; 566 | bool used_band_compression; 567 | raw_rects = pixman_region32_rectangles(region, &raw_nrects); 568 | surf_rects = pixman_region32_rectangles(surf_region, &nsurf); 569 | 570 | if (raw_nrects < 4) { 571 | used_band_compression = false; 572 | nrects = raw_nrects; 573 | rects = raw_rects; 574 | } else { 575 | nrects = compress_bands(raw_rects, raw_nrects, &rects); 576 | used_band_compression = true; 577 | } 578 | /* worst case we can have 8 vertices per rect (ie. clipped into 579 | * an octagon): 580 | */ 581 | v = wl_array_add(&gr->vertices, nrects * nsurf * 8 * 4 * sizeof *v); 582 | vtxcnt = wl_array_add(&gr->vtxcnt, nrects * nsurf * sizeof *vtxcnt); 583 | 584 | inv_width = 1.0 / gs->pitch; 585 | inv_height = 1.0 / gs->height; 586 | 587 | for (i = 0; i < nrects; i++) { 588 | pixman_box32_t *rect = &rects[i]; 589 | for (j = 0; j < nsurf; j++) { 590 | pixman_box32_t *surf_rect = &surf_rects[j]; 591 | GLfloat sx, sy, bx, by; 592 | GLfloat ex[8], ey[8]; /* edge points in screen space */ 593 | int n; 594 | 595 | /* The transformed surface, after clipping to the clip region, 596 | * can have as many as eight sides, emitted as a triangle-fan. 597 | * The first vertex in the triangle fan can be chosen arbitrarily, 598 | * since the area is guaranteed to be convex. 599 | * 600 | * If a corner of the transformed surface falls outside of the 601 | * clip region, instead of emitting one vertex for the corner 602 | * of the surface, up to two are emitted for two corresponding 603 | * intersection point(s) between the surface and the clip region. 604 | * 605 | * To do this, we first calculate the (up to eight) points that 606 | * form the intersection of the clip rect and the transformed 607 | * surface. 608 | */ 609 | n = calculate_edges(ev, rect, surf_rect, ex, ey); 610 | if (n < 3) 611 | continue; 612 | 613 | /* emit edge points: */ 614 | for (k = 0; k < n; k++) { 615 | weston_view_from_global_float(ev, ex[k], ey[k], 616 | &sx, &sy); 617 | /* position: */ 618 | *(v++) = ex[k]; 619 | *(v++) = ey[k]; 620 | /* texcoord: */ 621 | weston_surface_to_buffer_float(ev->surface, 622 | sx, sy, 623 | &bx, &by); 624 | *(v++) = bx * inv_width; 625 | if (gs->y_inverted) { 626 | *(v++) = by * inv_height; 627 | } else { 628 | *(v++) = (gs->height - by) * inv_height; 629 | } 630 | } 631 | 632 | vtxcnt[nvtx++] = n; 633 | } 634 | } 635 | 636 | if (used_band_compression) 637 | free(rects); 638 | return nvtx; 639 | } 640 | 641 | /** Create a texture and a framebuffer object 642 | * 643 | * \param fbotex To be initialized. 644 | * \param width Texture width in pixels. 645 | * \param height Texture heigh in pixels. 646 | * \param internal_format See glTexImage2D. 647 | * \param format See glTexImage2D. 648 | * \param type See glTexImage2D. 649 | * \return True on success, false otherwise. 650 | */ 651 | static bool 652 | gl_fbo_texture_init(struct gl_fbo_texture *fbotex, 653 | int32_t width, 654 | int32_t height, 655 | GLint internal_format, 656 | GLenum format, 657 | GLenum type) 658 | { 659 | int fb_status; 660 | GLuint shadow_fbo; 661 | GLuint shadow_tex; 662 | 663 | glActiveTexture(GL_TEXTURE0); 664 | glGenTextures(1, &shadow_tex); 665 | glBindTexture(GL_TEXTURE_2D, shadow_tex); 666 | glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, 667 | format, type, NULL); 668 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); 669 | glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); 670 | glBindTexture(GL_TEXTURE_2D, 0); 671 | 672 | glGenFramebuffers(1, &shadow_fbo); 673 | glBindFramebuffer(GL_FRAMEBUFFER, shadow_fbo); 674 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, 675 | GL_TEXTURE_2D, shadow_tex, 0); 676 | 677 | fb_status = glCheckFramebufferStatus(GL_FRAMEBUFFER); 678 | 679 | glBindFramebuffer(GL_FRAMEBUFFER, 0); 680 | 681 | if (fb_status != GL_FRAMEBUFFER_COMPLETE) { 682 | glDeleteFramebuffers(1, &shadow_fbo); 683 | glDeleteTextures(1, &shadow_tex); 684 | return false; 685 | } 686 | 687 | fbotex->fbo = shadow_fbo; 688 | fbotex->tex = shadow_tex; 689 | fbotex->width = width; 690 | fbotex->height = height; 691 | 692 | return true; 693 | } 694 | 695 | static void 696 | gl_fbo_texture_fini(struct gl_fbo_texture *fbotex) 697 | { 698 | glDeleteFramebuffers(1, &fbotex->fbo); 699 | fbotex->fbo = 0; 700 | glDeleteTextures(1, &fbotex->tex); 701 | fbotex->tex = 0; 702 | } 703 | 704 | static void 705 | gl_renderer_send_shader_error(struct weston_view *view) 706 | { 707 | struct wl_resource *resource = view->surface->resource; 708 | 709 | if (!resource) 710 | return; 711 | 712 | wl_client_post_implementation_error(wl_resource_get_client(resource), 713 | "Weston GL-renderer shader failed for wl_surface@%u", 714 | wl_resource_get_id(resource)); 715 | } 716 | 717 | static void 718 | triangle_fan_debug(struct gl_renderer *gr, 719 | const struct gl_shader_config *sconf, 720 | int first, int count) 721 | { 722 | int i; 723 | GLushort *buffer; 724 | GLushort *index; 725 | int nelems; 726 | static int color_idx = 0; 727 | struct gl_shader_config alt; 728 | const GLfloat *col; 729 | static const GLfloat color[][4] = { 730 | { 1.0, 0.0, 0.0, 1.0 }, 731 | { 0.0, 1.0, 0.0, 1.0 }, 732 | { 0.0, 0.0, 1.0, 1.0 }, 733 | { 1.0, 1.0, 1.0, 1.0 }, 734 | }; 735 | 736 | col = color[color_idx++ % ARRAY_LENGTH(color)]; 737 | alt = (struct gl_shader_config) { 738 | .req = { 739 | .variant = SHADER_VARIANT_SOLID, 740 | }, 741 | .projection = sconf->projection, 742 | .view_alpha = 1.0f, 743 | .unicolor = { col[0], col[1], col[2], col[3] }, 744 | }; 745 | 746 | gl_renderer_use_program(gr, &alt); 747 | 748 | nelems = (count - 1 + count - 2) * 2; 749 | 750 | buffer = malloc(sizeof(GLushort) * nelems); 751 | index = buffer; 752 | 753 | for (i = 1; i < count; i++) { 754 | *index++ = first; 755 | *index++ = first + i; 756 | } 757 | 758 | for (i = 2; i < count; i++) { 759 | *index++ = first + i - 1; 760 | *index++ = first + i; 761 | } 762 | 763 | glDrawElements(GL_LINES, nelems, GL_UNSIGNED_SHORT, buffer); 764 | 765 | free(buffer); 766 | 767 | gl_renderer_use_program(gr, sconf); 768 | } 769 | 770 | static void 771 | repaint_region(struct gl_renderer *gr, 772 | struct weston_view *ev, 773 | pixman_region32_t *region, 774 | pixman_region32_t *surf_region, 775 | const struct gl_shader_config *sconf) 776 | { 777 | GLfloat *v; 778 | unsigned int *vtxcnt; 779 | int i, first, nfans; 780 | 781 | /* The final region to be painted is the intersection of 782 | * 'region' and 'surf_region'. However, 'region' is in the global 783 | * coordinates, and 'surf_region' is in the surface-local 784 | * coordinates. texture_region() will iterate over all pairs of 785 | * rectangles from both regions, compute the intersection 786 | * polygon for each pair, and store it as a triangle fan if 787 | * it has a non-zero area (at least 3 vertices, actually). 788 | */ 789 | nfans = texture_region(ev, region, surf_region); 790 | 791 | v = gr->vertices.data; 792 | vtxcnt = gr->vtxcnt.data; 793 | 794 | /* position: */ 795 | glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4 * sizeof *v, &v[0]); 796 | glEnableVertexAttribArray(0); 797 | 798 | /* texcoord: */ 799 | glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4 * sizeof *v, &v[2]); 800 | glEnableVertexAttribArray(1); 801 | 802 | if (!gl_renderer_use_program(gr, sconf)) { 803 | gl_renderer_send_shader_error(ev); 804 | /* continue drawing with the fallback shader */ 805 | } 806 | 807 | for (i = 0, first = 0; i < nfans; i++) { 808 | glDrawArrays(GL_TRIANGLE_FAN, first, vtxcnt[i]); 809 | if (gr->fan_debug) 810 | triangle_fan_debug(gr, sconf, first, vtxcnt[i]); 811 | first += vtxcnt[i]; 812 | } 813 | 814 | glDisableVertexAttribArray(1); 815 | glDisableVertexAttribArray(0); 816 | 817 | gr->vertices.size = 0; 818 | gr->vtxcnt.size = 0; 819 | } 820 | 821 | static int 822 | use_output(struct weston_output *output) 823 | { 824 | static int errored; 825 | struct gl_output_state *go = get_output_state(output); 826 | struct gl_renderer *gr = get_renderer(output->compositor); 827 | EGLBoolean ret; 828 | 829 | ret = eglMakeCurrent(gr->egl_display, go->egl_surface, 830 | go->egl_surface, gr->egl_context); 831 | 832 | if (ret == EGL_FALSE) { 833 | if (errored) 834 | return -1; 835 | errored = 1; 836 | weston_log("Failed to make EGL context current.\n"); 837 | gl_renderer_print_egl_error_state(); 838 | return -1; 839 | } 840 | 841 | return 0; 842 | } 843 | 844 | static int 845 | ensure_surface_buffer_is_ready(struct gl_renderer *gr, 846 | struct gl_surface_state *gs) 847 | { 848 | EGLint attribs[] = { 849 | EGL_SYNC_NATIVE_FENCE_FD_ANDROID, 850 | -1, 851 | EGL_NONE 852 | }; 853 | struct weston_surface *surface = gs->surface; 854 | struct weston_buffer *buffer = gs->buffer_ref.buffer; 855 | EGLSyncKHR sync; 856 | EGLint wait_ret; 857 | EGLint destroy_ret; 858 | 859 | if (!buffer) 860 | return 0; 861 | 862 | if (surface->acquire_fence_fd < 0) 863 | return 0; 864 | 865 | /* We should only get a fence if we support EGLSyncKHR, since 866 | * we don't advertise the explicit sync protocol otherwise. */ 867 | assert(gr->has_native_fence_sync); 868 | /* We should only get a fence for non-SHM buffers, since surface 869 | * commit would have failed otherwise. */ 870 | assert(wl_shm_buffer_get(buffer->resource) == NULL); 871 | 872 | attribs[1] = dup(surface->acquire_fence_fd); 873 | if (attribs[1] == -1) { 874 | linux_explicit_synchronization_send_server_error( 875 | gs->surface->synchronization_resource, 876 | "Failed to dup acquire fence"); 877 | return -1; 878 | } 879 | 880 | sync = gr->create_sync(gr->egl_display, 881 | EGL_SYNC_NATIVE_FENCE_ANDROID, 882 | attribs); 883 | if (sync == EGL_NO_SYNC_KHR) { 884 | linux_explicit_synchronization_send_server_error( 885 | gs->surface->synchronization_resource, 886 | "Failed to create EGLSyncKHR object"); 887 | close(attribs[1]); 888 | return -1; 889 | } 890 | 891 | wait_ret = gr->wait_sync(gr->egl_display, sync, 0); 892 | if (wait_ret == EGL_FALSE) { 893 | linux_explicit_synchronization_send_server_error( 894 | gs->surface->synchronization_resource, 895 | "Failed to wait on EGLSyncKHR object"); 896 | /* Continue to try to destroy the sync object. */ 897 | } 898 | 899 | 900 | destroy_ret = gr->destroy_sync(gr->egl_display, sync); 901 | if (destroy_ret == EGL_FALSE) { 902 | linux_explicit_synchronization_send_server_error( 903 | gs->surface->synchronization_resource, 904 | "Failed to destroy on EGLSyncKHR object"); 905 | } 906 | 907 | return (wait_ret == EGL_TRUE && destroy_ret == EGL_TRUE) ? 0 : -1; 908 | } 909 | 910 | 911 | /* Checks if a view needs to be censored on an output 912 | * Checks for 2 types of censor requirements 913 | * - recording_censor: Censor protected view when a 914 | * protected view is captured. 915 | * - unprotected_censor: Censor regions of protected views 916 | * when displayed on an output which has lower protection capability. 917 | * If censoring is needed, smashes the GL shader config. 918 | */ 919 | static void 920 | maybe_censor_override(struct gl_shader_config *sconf, 921 | struct weston_output *output, 922 | struct weston_view *ev) 923 | { 924 | const struct gl_shader_config alt = { 925 | .req = { 926 | .variant = SHADER_VARIANT_SOLID, 927 | }, 928 | .projection = sconf->projection, 929 | .view_alpha = sconf->view_alpha, 930 | .unicolor = { 0.40, 0.0, 0.0, 1.0 }, 931 | }; 932 | struct gl_surface_state *gs = get_surface_state(ev->surface); 933 | bool recording_censor = 934 | (output->disable_planes > 0) && 935 | (ev->surface->desired_protection > WESTON_HDCP_DISABLE); 936 | 937 | bool unprotected_censor = 938 | (ev->surface->desired_protection > output->current_protection); 939 | 940 | if (gs->direct_display) { 941 | *sconf = alt; 942 | return; 943 | } 944 | 945 | /* When not in enforced mode, the client is notified of the protection */ 946 | /* change, so content censoring is not required */ 947 | if (ev->surface->protection_mode != 948 | WESTON_SURFACE_PROTECTION_MODE_ENFORCED) 949 | return; 950 | 951 | if (recording_censor || unprotected_censor) 952 | *sconf = alt; 953 | } 954 | 955 | static void 956 | gl_shader_config_set_input_textures(struct gl_shader_config *sconf, 957 | struct gl_surface_state *gs) 958 | { 959 | int i; 960 | 961 | sconf->req.variant = gs->shader_variant; 962 | 963 | for (i = 0; i < 4; i++) 964 | sconf->unicolor[i] = gs->color[i]; 965 | 966 | assert(gs->num_textures <= GL_SHADER_INPUT_TEX_MAX); 967 | for (i = 0; i < gs->num_textures; i++) 968 | sconf->input_tex[i] = gs->textures[i]; 969 | for (; i < GL_SHADER_INPUT_TEX_MAX; i++) 970 | sconf->input_tex[i] = 0; 971 | } 972 | 973 | static bool 974 | gl_shader_config_init_for_view(struct gl_shader_config *sconf, 975 | struct weston_view *view, 976 | struct weston_output *output, 977 | GLint filter) 978 | { 979 | struct gl_surface_state *gs = get_surface_state(view->surface); 980 | struct gl_output_state *go = get_output_state(output); 981 | 982 | *sconf = (struct gl_shader_config) { 983 | .projection = go->output_matrix, 984 | .view_alpha = view->alpha, 985 | .input_tex_filter = filter, 986 | }; 987 | 988 | gl_shader_config_set_input_textures(sconf, gs); 989 | 990 | return true; 991 | } 992 | 993 | static void 994 | draw_view(struct weston_view *ev, struct weston_output *output, 995 | pixman_region32_t *damage) /* in global coordinates */ 996 | { 997 | struct weston_compositor *ec = ev->surface->compositor; 998 | struct gl_renderer *gr = get_renderer(ec); 999 | struct gl_surface_state *gs = get_surface_state(ev->surface); 1000 | /* repaint bounding region in global coordinates: */ 1001 | pixman_region32_t repaint; 1002 | /* opaque region in surface coordinates: */ 1003 | pixman_region32_t surface_opaque; 1004 | /* non-opaque region in surface coordinates: */ 1005 | pixman_region32_t surface_blend; 1006 | GLint filter; 1007 | struct gl_shader_config sconf; 1008 | 1009 | /* In case of a runtime switch of renderers, we may not have received 1010 | * an attach for this surface since the switch. In that case we don't 1011 | * have a valid buffer or a proper shader set up so skip rendering. */ 1012 | if (gs->shader_variant == SHADER_VARIANT_NONE && !gs->direct_display) 1013 | return; 1014 | 1015 | pixman_region32_init(&repaint); 1016 | pixman_region32_intersect(&repaint, 1017 | &ev->transform.boundingbox, damage); 1018 | pixman_region32_subtract(&repaint, &repaint, &ev->clip); 1019 | 1020 | if (!pixman_region32_not_empty(&repaint)) 1021 | goto out; 1022 | 1023 | if (ensure_surface_buffer_is_ready(gr, gs) < 0) 1024 | goto out; 1025 | 1026 | glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA); 1027 | 1028 | if (ev->transform.enabled || output->zoom.active || 1029 | output->current_scale != ev->surface->buffer_viewport.buffer.scale) 1030 | filter = GL_LINEAR; 1031 | else 1032 | filter = GL_NEAREST; 1033 | 1034 | if (!gl_shader_config_init_for_view(&sconf, ev, output, filter)) 1035 | goto out; 1036 | 1037 | /* blended region is whole surface minus opaque region: */ 1038 | pixman_region32_init_rect(&surface_blend, 0, 0, 1039 | ev->surface->width, ev->surface->height); 1040 | if (ev->geometry.scissor_enabled) 1041 | pixman_region32_intersect(&surface_blend, &surface_blend, 1042 | &ev->geometry.scissor); 1043 | pixman_region32_subtract(&surface_blend, &surface_blend, 1044 | &ev->surface->opaque); 1045 | 1046 | /* XXX: Should we be using ev->transform.opaque here? */ 1047 | pixman_region32_init(&surface_opaque); 1048 | if (ev->geometry.scissor_enabled) 1049 | pixman_region32_intersect(&surface_opaque, 1050 | &ev->surface->opaque, 1051 | &ev->geometry.scissor); 1052 | else 1053 | pixman_region32_copy(&surface_opaque, &ev->surface->opaque); 1054 | 1055 | maybe_censor_override(&sconf, output, ev); 1056 | 1057 | if (pixman_region32_not_empty(&surface_opaque)) { 1058 | struct gl_shader_config alt = sconf; 1059 | 1060 | if (alt.req.variant == SHADER_VARIANT_RGBA) { 1061 | /* Special case for RGBA textures with possibly 1062 | * bad data in alpha channel: use the shader 1063 | * that forces texture alpha = 1.0. 1064 | * Xwayland surfaces need this. 1065 | */ 1066 | alt.req.variant = SHADER_VARIANT_RGBX; 1067 | } 1068 | 1069 | if (ev->alpha < 1.0) 1070 | glEnable(GL_BLEND); 1071 | else 1072 | glDisable(GL_BLEND); 1073 | 1074 | repaint_region(gr, ev, &repaint, &surface_opaque, &alt); 1075 | gs->used_in_output_repaint = true; 1076 | } 1077 | 1078 | if (pixman_region32_not_empty(&surface_blend)) { 1079 | glEnable(GL_BLEND); 1080 | repaint_region(gr, ev, &repaint, &surface_blend, &sconf); 1081 | gs->used_in_output_repaint = true; 1082 | } 1083 | 1084 | pixman_region32_fini(&surface_blend); 1085 | pixman_region32_fini(&surface_opaque); 1086 | 1087 | out: 1088 | pixman_region32_fini(&repaint); 1089 | } 1090 | 1091 | static void 1092 | repaint_views(struct weston_output *output, pixman_region32_t *damage) 1093 | { 1094 | struct weston_compositor *compositor = output->compositor; 1095 | struct weston_view *view; 1096 | 1097 | wl_list_for_each_reverse(view, &compositor->view_list, link) 1098 | if (view->plane == &compositor->primary_plane) 1099 | draw_view(view, output, damage); 1100 | } 1101 | 1102 | static int 1103 | gl_renderer_create_fence_fd(struct weston_output *output); 1104 | 1105 | /* Updates the release fences of surfaces that were used in the current output 1106 | * repaint. Should only be used from gl_renderer_repaint_output, so that the 1107 | * information in gl_surface_state.used_in_output_repaint is accurate. 1108 | */ 1109 | static void 1110 | update_buffer_release_fences(struct weston_compositor *compositor, 1111 | struct weston_output *output) 1112 | { 1113 | struct weston_view *view; 1114 | 1115 | wl_list_for_each_reverse(view, &compositor->view_list, link) { 1116 | struct gl_surface_state *gs; 1117 | struct weston_buffer_release *buffer_release; 1118 | int fence_fd; 1119 | 1120 | if (view->plane != &compositor->primary_plane) 1121 | continue; 1122 | 1123 | gs = get_surface_state(view->surface); 1124 | buffer_release = gs->buffer_release_ref.buffer_release; 1125 | 1126 | if (!gs->used_in_output_repaint || !buffer_release) 1127 | continue; 1128 | 1129 | fence_fd = gl_renderer_create_fence_fd(output); 1130 | 1131 | /* If we have a buffer_release then it means we support fences, 1132 | * and we should be able to create the release fence. If we 1133 | * can't, something has gone horribly wrong, so disconnect the 1134 | * client. 1135 | */ 1136 | if (fence_fd == -1) { 1137 | linux_explicit_synchronization_send_server_error( 1138 | buffer_release->resource, 1139 | "Failed to create release fence"); 1140 | fd_clear(&buffer_release->fence_fd); 1141 | continue; 1142 | } 1143 | 1144 | /* At the moment it is safe to just replace the fence_fd, 1145 | * discarding the previous one: 1146 | * 1147 | * 1. If the previous fence fd represents a sync fence from 1148 | * a previous repaint cycle, that fence fd is now not 1149 | * sufficient to provide the release guarantee and should 1150 | * be replaced. 1151 | * 1152 | * 2. If the fence fd represents a sync fence from another 1153 | * output in the same repaint cycle, it's fine to replace 1154 | * it since we are rendering to all outputs using the same 1155 | * EGL context, so a fence issued for a later output rendering 1156 | * is guaranteed to signal after fences for previous output 1157 | * renderings. 1158 | * 1159 | * Note that the above is only valid if the buffer_release 1160 | * fences only originate from the GL renderer, which guarantees 1161 | * a total order of operations and fences. If we introduce 1162 | * fences from other sources (e.g., plane out-fences), we will 1163 | * need to merge fences instead. 1164 | */ 1165 | fd_update(&buffer_release->fence_fd, fence_fd); 1166 | } 1167 | } 1168 | 1169 | static void 1170 | draw_output_border_texture(struct gl_renderer *gr, 1171 | struct gl_output_state *go, 1172 | struct gl_shader_config *sconf, 1173 | enum gl_renderer_border_side side, 1174 | int32_t x, int32_t y, 1175 | int32_t width, int32_t height) 1176 | { 1177 | struct gl_border_image *img = &go->borders[side]; 1178 | static GLushort indices [] = { 0, 1, 3, 3, 1, 2 }; 1179 | 1180 | if (!img->data) { 1181 | if (img->tex) { 1182 | glDeleteTextures(1, &img->tex); 1183 | img->tex = 0; 1184 | } 1185 | 1186 | return; 1187 | } 1188 | 1189 | if (!img->tex) { 1190 | glGenTextures(1, &img->tex); 1191 | glBindTexture(GL_TEXTURE_2D, img->tex); 1192 | 1193 | glTexParameteri(GL_TEXTURE_2D, 1194 | GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); 1195 | glTexParameteri(GL_TEXTURE_2D, 1196 | GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); 1197 | } else { 1198 | glBindTexture(GL_TEXTURE_2D, img->tex); 1199 | } 1200 | 1201 | if (go->border_status & (1 << side)) { 1202 | glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, 0); 1203 | glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 0); 1204 | glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, 0); 1205 | glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, 1206 | img->tex_width, img->height, 0, 1207 | GL_BGRA_EXT, GL_UNSIGNED_BYTE, img->data); 1208 | } 1209 | 1210 | sconf->input_tex_filter = GL_NEAREST; 1211 | sconf->input_tex[0] = img->tex; 1212 | gl_renderer_use_program(gr, sconf); 1213 | 1214 | GLfloat texcoord[] = { 1215 | 0.0f, 0.0f, 1216 | (GLfloat)img->width / (GLfloat)img->tex_width, 0.0f, 1217 | (GLfloat)img->width / (GLfloat)img->tex_width, 1.0f, 1218 | 0.0f, 1.0f, 1219 | }; 1220 | 1221 | GLfloat verts[] = { 1222 | x, y, 1223 | x + width, y, 1224 | x + width, y + height, 1225 | x, y + height 1226 | }; 1227 | 1228 | glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, verts); 1229 | glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, texcoord); 1230 | glEnableVertexAttribArray(0); 1231 | glEnableVertexAttribArray(1); 1232 | 1233 | glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices); 1234 | 1235 | glDisableVertexAttribArray(1); 1236 | glDisableVertexAttribArray(0); 1237 | } 1238 | 1239 | static int 1240 | output_has_borders(struct weston_output *output) 1241 | { 1242 | struct gl_output_state *go = get_output_state(output); 1243 | 1244 | return go->borders[GL_RENDERER_BORDER_TOP].data || 1245 | go->borders[GL_RENDERER_BORDER_RIGHT].data || 1246 | go->borders[GL_RENDERER_BORDER_BOTTOM].data || 1247 | go->borders[GL_RENDERER_BORDER_LEFT].data; 1248 | } 1249 | 1250 | static void 1251 | draw_output_borders(struct weston_output *output, 1252 | enum gl_border_status border_status) 1253 | { 1254 | struct gl_shader_config sconf = { 1255 | .req = { 1256 | .variant = SHADER_VARIANT_RGBA, 1257 | }, 1258 | .view_alpha = 1.0f, 1259 | }; 1260 | struct gl_output_state *go = get_output_state(output); 1261 | struct gl_renderer *gr = get_renderer(output->compositor); 1262 | struct gl_border_image *top, *bottom, *left, *right; 1263 | int full_width, full_height; 1264 | 1265 | if (border_status == BORDER_STATUS_CLEAN) 1266 | return; /* Clean. Nothing to do. */ 1267 | 1268 | top = &go->borders[GL_RENDERER_BORDER_TOP]; 1269 | bottom = &go->borders[GL_RENDERER_BORDER_BOTTOM]; 1270 | left = &go->borders[GL_RENDERER_BORDER_LEFT]; 1271 | right = &go->borders[GL_RENDERER_BORDER_RIGHT]; 1272 | 1273 | full_width = output->current_mode->width + left->width + right->width; 1274 | full_height = output->current_mode->height + top->height + bottom->height; 1275 | 1276 | glDisable(GL_BLEND); 1277 | glViewport(0, 0, full_width, full_height); 1278 | 1279 | weston_matrix_init(&sconf.projection); 1280 | weston_matrix_translate(&sconf.projection, 1281 | -full_width / 2.0, -full_height / 2.0, 0); 1282 | weston_matrix_scale(&sconf.projection, 1283 | 2.0 / full_width, -2.0 / full_height, 1); 1284 | 1285 | glActiveTexture(GL_TEXTURE0); 1286 | 1287 | if (border_status & BORDER_TOP_DIRTY) 1288 | draw_output_border_texture(gr, go, &sconf, GL_RENDERER_BORDER_TOP, 1289 | 0, 0, 1290 | full_width, top->height); 1291 | if (border_status & BORDER_LEFT_DIRTY) 1292 | draw_output_border_texture(gr, go, &sconf, GL_RENDERER_BORDER_LEFT, 1293 | 0, top->height, 1294 | left->width, output->current_mode->height); 1295 | if (border_status & BORDER_RIGHT_DIRTY) 1296 | draw_output_border_texture(gr, go, &sconf, GL_RENDERER_BORDER_RIGHT, 1297 | full_width - right->width, top->height, 1298 | right->width, output->current_mode->height); 1299 | if (border_status & BORDER_BOTTOM_DIRTY) 1300 | draw_output_border_texture(gr, go, &sconf, GL_RENDERER_BORDER_BOTTOM, 1301 | 0, full_height - bottom->height, 1302 | full_width, bottom->height); 1303 | } 1304 | 1305 | static void 1306 | output_get_border_damage(struct weston_output *output, 1307 | enum gl_border_status border_status, 1308 | pixman_region32_t *damage) 1309 | { 1310 | struct gl_output_state *go = get_output_state(output); 1311 | struct gl_border_image *top, *bottom, *left, *right; 1312 | int full_width, full_height; 1313 | 1314 | if (border_status == BORDER_STATUS_CLEAN) 1315 | return; /* Clean. Nothing to do. */ 1316 | 1317 | top = &go->borders[GL_RENDERER_BORDER_TOP]; 1318 | bottom = &go->borders[GL_RENDERER_BORDER_BOTTOM]; 1319 | left = &go->borders[GL_RENDERER_BORDER_LEFT]; 1320 | right = &go->borders[GL_RENDERER_BORDER_RIGHT]; 1321 | 1322 | full_width = output->current_mode->width + left->width + right->width; 1323 | full_height = output->current_mode->height + top->height + bottom->height; 1324 | if (border_status & BORDER_TOP_DIRTY) 1325 | pixman_region32_union_rect(damage, damage, 1326 | 0, 0, 1327 | full_width, top->height); 1328 | if (border_status & BORDER_LEFT_DIRTY) 1329 | pixman_region32_union_rect(damage, damage, 1330 | 0, top->height, 1331 | left->width, output->current_mode->height); 1332 | if (border_status & BORDER_RIGHT_DIRTY) 1333 | pixman_region32_union_rect(damage, damage, 1334 | full_width - right->width, top->height, 1335 | right->width, output->current_mode->height); 1336 | if (border_status & BORDER_BOTTOM_DIRTY) 1337 | pixman_region32_union_rect(damage, damage, 1338 | 0, full_height - bottom->height, 1339 | full_width, bottom->height); 1340 | } 1341 | 1342 | static void 1343 | output_get_damage(struct weston_output *output, 1344 | pixman_region32_t *buffer_damage, uint32_t *border_damage) 1345 | { 1346 | struct gl_output_state *go = get_output_state(output); 1347 | struct gl_renderer *gr = get_renderer(output->compositor); 1348 | EGLint buffer_age = 0; 1349 | EGLBoolean ret; 1350 | int i; 1351 | 1352 | if (gr->has_egl_buffer_age) { 1353 | ret = eglQuerySurface(gr->egl_display, go->egl_surface, 1354 | EGL_BUFFER_AGE_EXT, &buffer_age); 1355 | if (ret == EGL_FALSE) { 1356 | weston_log("buffer age query failed.\n"); 1357 | gl_renderer_print_egl_error_state(); 1358 | } 1359 | } else if (go->swap_behavior_is_preserved) { 1360 | buffer_age = 1; 1361 | } 1362 | 1363 | if (buffer_age == 0 || buffer_age - 1 > BUFFER_DAMAGE_COUNT) { 1364 | pixman_region32_copy(buffer_damage, &output->region); 1365 | *border_damage = BORDER_ALL_DIRTY; 1366 | } else { 1367 | for (i = 0; i < buffer_age - 1; i++) 1368 | *border_damage |= go->border_damage[(go->buffer_damage_index + i) % BUFFER_DAMAGE_COUNT]; 1369 | 1370 | if (*border_damage & BORDER_SIZE_CHANGED) { 1371 | /* If we've had a resize, we have to do a full 1372 | * repaint. */ 1373 | *border_damage |= BORDER_ALL_DIRTY; 1374 | pixman_region32_copy(buffer_damage, &output->region); 1375 | } else { 1376 | for (i = 0; i < buffer_age - 1; i++) 1377 | pixman_region32_union(buffer_damage, 1378 | buffer_damage, 1379 | &go->buffer_damage[(go->buffer_damage_index + i) % BUFFER_DAMAGE_COUNT]); 1380 | } 1381 | } 1382 | } 1383 | 1384 | static void 1385 | output_rotate_damage(struct weston_output *output, 1386 | pixman_region32_t *output_damage, 1387 | enum gl_border_status border_status) 1388 | { 1389 | struct gl_output_state *go = get_output_state(output); 1390 | struct gl_renderer *gr = get_renderer(output->compositor); 1391 | 1392 | if (!gr->has_egl_buffer_age) 1393 | return; 1394 | 1395 | go->buffer_damage_index += BUFFER_DAMAGE_COUNT - 1; 1396 | go->buffer_damage_index %= BUFFER_DAMAGE_COUNT; 1397 | 1398 | pixman_region32_copy(&go->buffer_damage[go->buffer_damage_index], output_damage); 1399 | go->border_damage[go->buffer_damage_index] = border_status; 1400 | } 1401 | 1402 | /** 1403 | * Given a region in Weston's (top-left-origin) global co-ordinate space, 1404 | * translate it to the co-ordinate space used by GL for our output 1405 | * rendering. This requires shifting it into output co-ordinate space: 1406 | * translating for output offset within the global co-ordinate space, 1407 | * multiplying by output scale to get buffer rather than logical size. 1408 | * 1409 | * Finally, if borders are drawn around the output, we translate the area 1410 | * to account for the border region around the outside, and add any 1411 | * damage if the borders have been redrawn. 1412 | * 1413 | * @param output The output whose co-ordinate space we are after 1414 | * @param global_region The affected region in global co-ordinate space 1415 | * @param[out] rects Y-inverted quads in {x,y,w,h} order; caller must free 1416 | * @param[out] nrects Number of quads (4x number of co-ordinates) 1417 | */ 1418 | static void 1419 | pixman_region_to_egl_y_invert(struct weston_output *output, 1420 | struct pixman_region32 *global_region, 1421 | EGLint **rects, 1422 | EGLint *nrects) 1423 | { 1424 | struct gl_output_state *go = get_output_state(output); 1425 | pixman_region32_t transformed; 1426 | struct pixman_box32 *box; 1427 | int buffer_height; 1428 | EGLint *d; 1429 | int i; 1430 | 1431 | /* Translate from global to output co-ordinate space. */ 1432 | pixman_region32_init(&transformed); 1433 | pixman_region32_copy(&transformed, global_region); 1434 | pixman_region32_translate(&transformed, -output->x, -output->y); 1435 | weston_transformed_region(output->width, output->height, 1436 | output->transform, 1437 | output->current_scale, 1438 | &transformed, &transformed); 1439 | 1440 | /* If we have borders drawn around the output, shift our output damage 1441 | * to account for borders being drawn around the outside, adding any 1442 | * damage resulting from borders being redrawn. */ 1443 | if (output_has_borders(output)) { 1444 | pixman_region32_translate(&transformed, 1445 | go->borders[GL_RENDERER_BORDER_LEFT].width, 1446 | go->borders[GL_RENDERER_BORDER_TOP].height); 1447 | output_get_border_damage(output, go->border_status, 1448 | &transformed); 1449 | } 1450 | 1451 | /* Convert from a Pixman region into {x,y,w,h} quads, flipping in the 1452 | * Y axis to account for GL's lower-left-origin co-ordinate space. */ 1453 | box = pixman_region32_rectangles(&transformed, nrects); 1454 | *rects = malloc(*nrects * 4 * sizeof(EGLint)); 1455 | 1456 | buffer_height = go->borders[GL_RENDERER_BORDER_TOP].height + 1457 | output->current_mode->height + 1458 | go->borders[GL_RENDERER_BORDER_BOTTOM].height; 1459 | 1460 | d = *rects; 1461 | for (i = 0; i < *nrects; ++i) { 1462 | *d++ = box[i].x1; 1463 | *d++ = buffer_height - box[i].y2; 1464 | *d++ = box[i].x2 - box[i].x1; 1465 | *d++ = box[i].y2 - box[i].y1; 1466 | } 1467 | 1468 | pixman_region32_fini(&transformed); 1469 | } 1470 | 1471 | static void 1472 | blit_shadow_to_output(struct weston_output *output, 1473 | pixman_region32_t *output_damage) 1474 | { 1475 | struct gl_output_state *go = get_output_state(output); 1476 | const struct gl_shader_config sconf = { 1477 | .req = { 1478 | .variant = SHADER_VARIANT_RGBA, 1479 | }, 1480 | .projection = { 1481 | .d = { /* transpose */ 1482 | 2.0f, 0.0f, 0.0f, 0.0f, 1483 | 0.0f, 2.0f, 0.0f, 0.0f, 1484 | 0.0f, 0.0f, 1.0f, 0.0f, 1485 | -1.0f, -1.0f, 0.0f, 1.0f 1486 | }, 1487 | .type = WESTON_MATRIX_TRANSFORM_SCALE | 1488 | WESTON_MATRIX_TRANSFORM_TRANSLATE, 1489 | }, 1490 | .view_alpha = 1.0f, 1491 | .input_tex_filter = GL_NEAREST, 1492 | .input_tex[0] = go->shadow.tex, 1493 | }; 1494 | struct gl_renderer *gr = get_renderer(output->compositor); 1495 | double width = output->current_mode->width; 1496 | double height = output->current_mode->height; 1497 | pixman_box32_t *rects; 1498 | int n_rects; 1499 | int i; 1500 | pixman_region32_t translated_damage; 1501 | GLfloat verts[4 * 2]; 1502 | 1503 | pixman_region32_init(&translated_damage); 1504 | 1505 | gl_renderer_use_program(gr, &sconf); 1506 | glDisable(GL_BLEND); 1507 | 1508 | /* output_damage is in global coordinates */ 1509 | pixman_region32_intersect(&translated_damage, output_damage, 1510 | &output->region); 1511 | /* Convert to output pixel coordinates in-place */ 1512 | weston_output_region_from_global(output, &translated_damage); 1513 | 1514 | rects = pixman_region32_rectangles(&translated_damage, &n_rects); 1515 | for (i = 0; i < n_rects; i++) { 1516 | 1517 | verts[0] = rects[i].x1 / width; 1518 | verts[1] = (height - rects[i].y1) / height; 1519 | verts[2] = rects[i].x2 / width; 1520 | verts[3] = (height - rects[i].y1) / height; 1521 | 1522 | verts[4] = rects[i].x2 / width; 1523 | verts[5] = (height - rects[i].y2) / height; 1524 | verts[6] = rects[i].x1 / width; 1525 | verts[7] = (height - rects[i].y2) / height; 1526 | 1527 | glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, verts); 1528 | glEnableVertexAttribArray(0); 1529 | 1530 | /* texcoord: */ 1531 | glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, verts); 1532 | glEnableVertexAttribArray(1); 1533 | 1534 | glDrawArrays(GL_TRIANGLE_FAN, 0, 4); 1535 | } 1536 | 1537 | glBindTexture(GL_TEXTURE_2D, 0); 1538 | pixman_region32_fini(&translated_damage); 1539 | } 1540 | 1541 | /* NOTE: We now allow falling back to ARGB gl visuals when XRGB is 1542 | * unavailable, so we're assuming the background has no transparency 1543 | * and that everything with a blend, like drop shadows, will have something 1544 | * opaque (like the background) drawn underneath it. 1545 | * 1546 | * Depending on the underlying hardware, violating that assumption could 1547 | * result in seeing through to another display plane. 1548 | */ 1549 | static void 1550 | gl_renderer_repaint_output(struct weston_output *output, 1551 | pixman_region32_t *output_damage) 1552 | { 1553 | struct gl_output_state *go = get_output_state(output); 1554 | struct weston_compositor *compositor = output->compositor; 1555 | struct gl_renderer *gr = get_renderer(compositor); 1556 | EGLBoolean ret; 1557 | static int errored; 1558 | /* areas we've damaged since we last used this buffer */ 1559 | pixman_region32_t previous_damage; 1560 | /* total area we need to repaint this time */ 1561 | pixman_region32_t total_damage; 1562 | enum gl_border_status border_status = BORDER_STATUS_CLEAN; 1563 | struct weston_view *view; 1564 | 1565 | if (use_output(output) < 0) 1566 | return; 1567 | 1568 | /* Clear the used_in_output_repaint flag, so that we can properly track 1569 | * which surfaces were used in this output repaint. */ 1570 | wl_list_for_each_reverse(view, &compositor->view_list, link) { 1571 | if (view->plane == &compositor->primary_plane) { 1572 | struct gl_surface_state *gs = 1573 | get_surface_state(view->surface); 1574 | gs->used_in_output_repaint = false; 1575 | } 1576 | } 1577 | 1578 | if (go->begin_render_sync != EGL_NO_SYNC_KHR) 1579 | gr->destroy_sync(gr->egl_display, go->begin_render_sync); 1580 | if (go->end_render_sync != EGL_NO_SYNC_KHR) 1581 | gr->destroy_sync(gr->egl_display, go->end_render_sync); 1582 | 1583 | go->begin_render_sync = create_render_sync(gr); 1584 | 1585 | /* Calculate the global GL matrix */ 1586 | go->output_matrix = output->matrix; 1587 | weston_matrix_translate(&go->output_matrix, 1588 | -(output->current_mode->width / 2.0), 1589 | -(output->current_mode->height / 2.0), 0); 1590 | weston_matrix_scale(&go->output_matrix, 1591 | 2.0 / output->current_mode->width, 1592 | -2.0 / output->current_mode->height, 1); 1593 | 1594 | /* If using shadow, redirect all drawing to it first. */ 1595 | if (shadow_exists(go)) { 1596 | /* XXX: Shadow code does not support resizing. */ 1597 | assert(output->current_mode->width == go->shadow.width); 1598 | assert(output->current_mode->height == go->shadow.height); 1599 | 1600 | glBindFramebuffer(GL_FRAMEBUFFER, go->shadow.fbo); 1601 | glViewport(0, 0, go->shadow.width, go->shadow.height); 1602 | } else { 1603 | glBindFramebuffer(GL_FRAMEBUFFER, 0); 1604 | glViewport(go->borders[GL_RENDERER_BORDER_LEFT].width, 1605 | go->borders[GL_RENDERER_BORDER_BOTTOM].height, 1606 | output->current_mode->width, 1607 | output->current_mode->height); 1608 | } 1609 | 1610 | /* In fan debug mode, redraw everything to make sure that we clear any 1611 | * fans left over from previous draws on this buffer. 1612 | * This precludes the use of EGL_EXT_swap_buffers_with_damage and 1613 | * EGL_KHR_partial_update, since we damage the whole area. */ 1614 | if (gr->fan_debug) { 1615 | pixman_region32_t undamaged; 1616 | pixman_region32_init(&undamaged); 1617 | pixman_region32_subtract(&undamaged, &output->region, 1618 | output_damage); 1619 | gr->fan_debug = false; 1620 | repaint_views(output, &undamaged); 1621 | gr->fan_debug = true; 1622 | pixman_region32_fini(&undamaged); 1623 | } 1624 | 1625 | /* previous_damage covers regions damaged in previous paints since we 1626 | * last used this buffer */ 1627 | pixman_region32_init(&previous_damage); 1628 | pixman_region32_init(&total_damage); /* total area to redraw */ 1629 | 1630 | /* Update previous_damage using buffer_age (if available), and store 1631 | * current damaged region for future use. */ 1632 | output_get_damage(output, &previous_damage, &border_status); 1633 | output_rotate_damage(output, output_damage, go->border_status); 1634 | 1635 | /* Redraw both areas which have changed since we last used this buffer, 1636 | * as well as the areas we now want to repaint, to make sure the 1637 | * buffer is up to date. */ 1638 | pixman_region32_union(&total_damage, &previous_damage, output_damage); 1639 | border_status |= go->border_status; 1640 | 1641 | if (gr->has_egl_partial_update && !gr->fan_debug) { 1642 | int n_egl_rects; 1643 | EGLint *egl_rects; 1644 | 1645 | /* For partial_update, we need to pass the region which has 1646 | * changed since we last rendered into this specific buffer; 1647 | * this is total_damage. */ 1648 | pixman_region_to_egl_y_invert(output, &total_damage, 1649 | &egl_rects, &n_egl_rects); 1650 | gr->set_damage_region(gr->egl_display, go->egl_surface, 1651 | egl_rects, n_egl_rects); 1652 | free(egl_rects); 1653 | } 1654 | 1655 | if (shadow_exists(go)) { 1656 | /* Repaint into shadow. */ 1657 | if (compositor->test_data.test_quirks.gl_force_full_redraw_of_shadow_fb) 1658 | repaint_views(output, &output->region); 1659 | else 1660 | repaint_views(output, output_damage); 1661 | 1662 | glBindFramebuffer(GL_FRAMEBUFFER, 0); 1663 | glViewport(go->borders[GL_RENDERER_BORDER_LEFT].width, 1664 | go->borders[GL_RENDERER_BORDER_BOTTOM].height, 1665 | output->current_mode->width, 1666 | output->current_mode->height); 1667 | blit_shadow_to_output(output, &total_damage); 1668 | } else { 1669 | repaint_views(output, &total_damage); 1670 | } 1671 | 1672 | pixman_region32_fini(&total_damage); 1673 | pixman_region32_fini(&previous_damage); 1674 | 1675 | draw_output_borders(output, border_status); 1676 | 1677 | wl_signal_emit(&output->frame_signal, output_damage); 1678 | 1679 | go->end_render_sync = create_render_sync(gr); 1680 | 1681 | if (gr->swap_buffers_with_damage && !gr->fan_debug) { 1682 | int n_egl_rects; 1683 | EGLint *egl_rects; 1684 | 1685 | /* For swap_buffers_with_damage, we need to pass the region 1686 | * which has changed since the previous SwapBuffers on this 1687 | * surface - this is output_damage. */ 1688 | pixman_region_to_egl_y_invert(output, output_damage, 1689 | &egl_rects, &n_egl_rects); 1690 | ret = gr->swap_buffers_with_damage(gr->egl_display, 1691 | go->egl_surface, 1692 | egl_rects, n_egl_rects); 1693 | free(egl_rects); 1694 | } else { 1695 | ret = eglSwapBuffers(gr->egl_display, go->egl_surface); 1696 | } 1697 | 1698 | if (ret == EGL_FALSE && !errored) { 1699 | errored = 1; 1700 | weston_log("Failed in eglSwapBuffers.\n"); 1701 | gl_renderer_print_egl_error_state(); 1702 | } 1703 | 1704 | go->border_status = BORDER_STATUS_CLEAN; 1705 | 1706 | /* We have to submit the render sync objects after swap buffers, since 1707 | * the objects get assigned a valid sync file fd only after a gl flush. 1708 | */ 1709 | timeline_submit_render_sync(gr, output, go->begin_render_sync, 1710 | TIMELINE_RENDER_POINT_TYPE_BEGIN); 1711 | timeline_submit_render_sync(gr, output, go->end_render_sync, 1712 | TIMELINE_RENDER_POINT_TYPE_END); 1713 | 1714 | update_buffer_release_fences(compositor, output); 1715 | 1716 | gl_renderer_garbage_collect_programs(gr); 1717 | } 1718 | 1719 | static int 1720 | gl_renderer_read_pixels(struct weston_output *output, 1721 | pixman_format_code_t format, void *pixels, 1722 | uint32_t x, uint32_t y, 1723 | uint32_t width, uint32_t height) 1724 | { 1725 | GLenum gl_format; 1726 | struct gl_output_state *go = get_output_state(output); 1727 | 1728 | x += go->borders[GL_RENDERER_BORDER_LEFT].width; 1729 | y += go->borders[GL_RENDERER_BORDER_BOTTOM].height; 1730 | 1731 | switch (format) { 1732 | case PIXMAN_a8r8g8b8: 1733 | gl_format = GL_BGRA_EXT; 1734 | break; 1735 | case PIXMAN_a8b8g8r8: 1736 | gl_format = GL_RGBA; 1737 | break; 1738 | default: 1739 | return -1; 1740 | } 1741 | 1742 | if (use_output(output) < 0) 1743 | return -1; 1744 | 1745 | glPixelStorei(GL_PACK_ALIGNMENT, 1); 1746 | glReadPixels(x, y, width, height, gl_format, 1747 | GL_UNSIGNED_BYTE, pixels); 1748 | 1749 | return 0; 1750 | } 1751 | 1752 | static GLenum 1753 | gl_format_from_internal(GLenum internal_format) 1754 | { 1755 | switch (internal_format) { 1756 | case GL_R8_EXT: 1757 | return GL_RED_EXT; 1758 | case GL_RG8_EXT: 1759 | return GL_RG_EXT; 1760 | default: 1761 | return internal_format; 1762 | } 1763 | } 1764 | 1765 | static void 1766 | gl_renderer_flush_damage(struct weston_surface *surface) 1767 | { 1768 | const struct weston_testsuite_quirks *quirks = 1769 | &surface->compositor->test_data.test_quirks; 1770 | struct gl_surface_state *gs = get_surface_state(surface); 1771 | struct weston_buffer *buffer = gs->buffer_ref.buffer; 1772 | struct weston_view *view; 1773 | bool texture_used; 1774 | pixman_box32_t *rectangles; 1775 | uint8_t *data; 1776 | int i, j, n; 1777 | 1778 | pixman_region32_union(&gs->texture_damage, 1779 | &gs->texture_damage, &surface->damage); 1780 | 1781 | if (!buffer) 1782 | return; 1783 | 1784 | /* Avoid upload, if the texture won't be used this time. 1785 | * We still accumulate the damage in texture_damage, and 1786 | * hold the reference to the buffer, in case the surface 1787 | * migrates back to the primary plane. 1788 | */ 1789 | texture_used = false; 1790 | wl_list_for_each(view, &surface->views, surface_link) { 1791 | if (view->plane == &surface->compositor->primary_plane) { 1792 | texture_used = true; 1793 | break; 1794 | } 1795 | } 1796 | if (!texture_used) 1797 | return; 1798 | 1799 | if (!pixman_region32_not_empty(&gs->texture_damage) && 1800 | !gs->needs_full_upload) 1801 | goto done; 1802 | 1803 | data = wl_shm_buffer_get_data(buffer->shm_buffer); 1804 | 1805 | glActiveTexture(GL_TEXTURE0); 1806 | 1807 | if (gs->needs_full_upload || quirks->gl_force_full_upload) { 1808 | glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 0); 1809 | glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, 0); 1810 | wl_shm_buffer_begin_access(buffer->shm_buffer); 1811 | for (j = 0; j < gs->num_textures; j++) { 1812 | glBindTexture(GL_TEXTURE_2D, gs->textures[j]); 1813 | glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, 1814 | gs->pitch / gs->hsub[j]); 1815 | glTexImage2D(GL_TEXTURE_2D, 0, 1816 | gs->gl_format[j], 1817 | gs->pitch / gs->hsub[j], 1818 | buffer->height / gs->vsub[j], 1819 | 0, 1820 | gl_format_from_internal(gs->gl_format[j]), 1821 | gs->gl_pixel_type, 1822 | data + gs->offset[j]); 1823 | } 1824 | wl_shm_buffer_end_access(buffer->shm_buffer); 1825 | goto done; 1826 | } 1827 | 1828 | rectangles = pixman_region32_rectangles(&gs->texture_damage, &n); 1829 | wl_shm_buffer_begin_access(buffer->shm_buffer); 1830 | for (i = 0; i < n; i++) { 1831 | pixman_box32_t r; 1832 | 1833 | r = weston_surface_to_buffer_rect(surface, rectangles[i]); 1834 | 1835 | for (j = 0; j < gs->num_textures; j++) { 1836 | glBindTexture(GL_TEXTURE_2D, gs->textures[j]); 1837 | glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, 1838 | gs->pitch / gs->hsub[j]); 1839 | glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 1840 | r.x1 / gs->hsub[j]); 1841 | glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, 1842 | r.y1 / gs->hsub[j]); 1843 | glTexSubImage2D(GL_TEXTURE_2D, 0, 1844 | r.x1 / gs->hsub[j], 1845 | r.y1 / gs->vsub[j], 1846 | (r.x2 - r.x1) / gs->hsub[j], 1847 | (r.y2 - r.y1) / gs->vsub[j], 1848 | gl_format_from_internal(gs->gl_format[j]), 1849 | gs->gl_pixel_type, 1850 | data + gs->offset[j]); 1851 | } 1852 | } 1853 | wl_shm_buffer_end_access(buffer->shm_buffer); 1854 | 1855 | done: 1856 | pixman_region32_fini(&gs->texture_damage); 1857 | pixman_region32_init(&gs->texture_damage); 1858 | gs->needs_full_upload = false; 1859 | 1860 | weston_buffer_reference(&gs->buffer_ref, NULL); 1861 | weston_buffer_release_reference(&gs->buffer_release_ref, NULL); 1862 | } 1863 | 1864 | static void 1865 | ensure_textures(struct gl_surface_state *gs, GLenum target, int num_textures) 1866 | { 1867 | int i; 1868 | 1869 | if (num_textures <= gs->num_textures) 1870 | return; 1871 | 1872 | glActiveTexture(GL_TEXTURE0); 1873 | 1874 | for (i = gs->num_textures; i < num_textures; i++) { 1875 | glGenTextures(1, &gs->textures[i]); 1876 | glBindTexture(target, gs->textures[i]); 1877 | glTexParameteri(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); 1878 | glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); 1879 | } 1880 | gs->num_textures = num_textures; 1881 | glBindTexture(target, 0); 1882 | } 1883 | 1884 | static void 1885 | gl_renderer_attach_shm(struct weston_surface *es, struct weston_buffer *buffer, 1886 | struct wl_shm_buffer *shm_buffer) 1887 | { 1888 | struct weston_compositor *ec = es->compositor; 1889 | struct gl_renderer *gr = get_renderer(ec); 1890 | struct gl_surface_state *gs = get_surface_state(es); 1891 | GLenum gl_format[3] = {0, 0, 0}; 1892 | GLenum gl_pixel_type; 1893 | int pitch; 1894 | int num_planes; 1895 | 1896 | buffer->shm_buffer = shm_buffer; 1897 | buffer->width = wl_shm_buffer_get_width(shm_buffer); 1898 | buffer->height = wl_shm_buffer_get_height(shm_buffer); 1899 | 1900 | num_planes = 1; 1901 | gs->offset[0] = 0; 1902 | gs->hsub[0] = 1; 1903 | gs->vsub[0] = 1; 1904 | 1905 | switch (wl_shm_buffer_get_format(shm_buffer)) { 1906 | case WL_SHM_FORMAT_XRGB8888: 1907 | gs->shader_variant = SHADER_VARIANT_RGBX; 1908 | pitch = wl_shm_buffer_get_stride(shm_buffer) / 4; 1909 | gl_format[0] = GL_BGRA_EXT; 1910 | gl_pixel_type = GL_UNSIGNED_BYTE; 1911 | es->is_opaque = true; 1912 | break; 1913 | case WL_SHM_FORMAT_ARGB8888: 1914 | gs->shader_variant = SHADER_VARIANT_RGBA; 1915 | pitch = wl_shm_buffer_get_stride(shm_buffer) / 4; 1916 | gl_format[0] = GL_BGRA_EXT; 1917 | gl_pixel_type = GL_UNSIGNED_BYTE; 1918 | es->is_opaque = false; 1919 | break; 1920 | case WL_SHM_FORMAT_RGB565: 1921 | gs->shader_variant = SHADER_VARIANT_RGBX; 1922 | pitch = wl_shm_buffer_get_stride(shm_buffer) / 2; 1923 | gl_format[0] = GL_RGB; 1924 | gl_pixel_type = GL_UNSIGNED_SHORT_5_6_5; 1925 | es->is_opaque = true; 1926 | break; 1927 | case WL_SHM_FORMAT_YUV420: 1928 | gs->shader_variant = SHADER_VARIANT_Y_U_V; 1929 | pitch = wl_shm_buffer_get_stride(shm_buffer); 1930 | gl_pixel_type = GL_UNSIGNED_BYTE; 1931 | num_planes = 3; 1932 | gs->offset[1] = gs->offset[0] + (pitch / gs->hsub[0]) * 1933 | (buffer->height / gs->vsub[0]); 1934 | gs->hsub[1] = 2; 1935 | gs->vsub[1] = 2; 1936 | gs->offset[2] = gs->offset[1] + (pitch / gs->hsub[1]) * 1937 | (buffer->height / gs->vsub[1]); 1938 | gs->hsub[2] = 2; 1939 | gs->vsub[2] = 2; 1940 | if (gr->has_gl_texture_rg) { 1941 | gl_format[0] = GL_R8_EXT; 1942 | gl_format[1] = GL_R8_EXT; 1943 | gl_format[2] = GL_R8_EXT; 1944 | } else { 1945 | gl_format[0] = GL_LUMINANCE; 1946 | gl_format[1] = GL_LUMINANCE; 1947 | gl_format[2] = GL_LUMINANCE; 1948 | } 1949 | es->is_opaque = true; 1950 | break; 1951 | case WL_SHM_FORMAT_NV12: 1952 | pitch = wl_shm_buffer_get_stride(shm_buffer); 1953 | gl_pixel_type = GL_UNSIGNED_BYTE; 1954 | num_planes = 2; 1955 | gs->offset[1] = gs->offset[0] + (pitch / gs->hsub[0]) * 1956 | (buffer->height / gs->vsub[0]); 1957 | gs->hsub[1] = 2; 1958 | gs->vsub[1] = 2; 1959 | if (gr->has_gl_texture_rg) { 1960 | gs->shader_variant = SHADER_VARIANT_Y_UV; 1961 | gl_format[0] = GL_R8_EXT; 1962 | gl_format[1] = GL_RG8_EXT; 1963 | } else { 1964 | gs->shader_variant = SHADER_VARIANT_Y_XUXV; 1965 | gl_format[0] = GL_LUMINANCE; 1966 | gl_format[1] = GL_LUMINANCE_ALPHA; 1967 | } 1968 | es->is_opaque = true; 1969 | break; 1970 | case WL_SHM_FORMAT_YUYV: 1971 | gs->shader_variant = SHADER_VARIANT_Y_XUXV; 1972 | pitch = wl_shm_buffer_get_stride(shm_buffer) / 2; 1973 | gl_pixel_type = GL_UNSIGNED_BYTE; 1974 | num_planes = 2; 1975 | gs->offset[1] = 0; 1976 | gs->hsub[1] = 2; 1977 | gs->vsub[1] = 1; 1978 | if (gr->has_gl_texture_rg) 1979 | gl_format[0] = GL_RG8_EXT; 1980 | else 1981 | gl_format[0] = GL_LUMINANCE_ALPHA; 1982 | gl_format[1] = GL_BGRA_EXT; 1983 | es->is_opaque = true; 1984 | break; 1985 | case WL_SHM_FORMAT_XYUV8888: 1986 | /* 1987 | * [31:0] X:Y:Cb:Cr 8:8:8:8 little endian 1988 | * a:b: g: r in SHADER_VARIANT_XYUV 1989 | */ 1990 | gs->shader_variant = SHADER_VARIANT_XYUV; 1991 | pitch = wl_shm_buffer_get_stride(shm_buffer) / 4; 1992 | gl_format[0] = GL_RGBA; 1993 | gl_pixel_type = GL_UNSIGNED_BYTE; 1994 | es->is_opaque = true; 1995 | break; 1996 | default: 1997 | weston_log("warning: unknown shm buffer format: %08x\n", 1998 | wl_shm_buffer_get_format(shm_buffer)); 1999 | return; 2000 | } 2001 | 2002 | /* Only allocate a texture if it doesn't match existing one. 2003 | * If a switch from DRM allocated buffer to a SHM buffer is 2004 | * happening, we need to allocate a new texture buffer. */ 2005 | if (pitch != gs->pitch || 2006 | buffer->height != gs->height || 2007 | gl_format[0] != gs->gl_format[0] || 2008 | gl_format[1] != gs->gl_format[1] || 2009 | gl_format[2] != gs->gl_format[2] || 2010 | gl_pixel_type != gs->gl_pixel_type || 2011 | gs->buffer_type != BUFFER_TYPE_SHM) { 2012 | gs->pitch = pitch; 2013 | gs->height = buffer->height; 2014 | gs->gl_format[0] = gl_format[0]; 2015 | gs->gl_format[1] = gl_format[1]; 2016 | gs->gl_format[2] = gl_format[2]; 2017 | gs->gl_pixel_type = gl_pixel_type; 2018 | gs->buffer_type = BUFFER_TYPE_SHM; 2019 | gs->needs_full_upload = true; 2020 | gs->y_inverted = true; 2021 | gs->direct_display = false; 2022 | 2023 | gs->surface = es; 2024 | 2025 | ensure_textures(gs, GL_TEXTURE_2D, num_planes); 2026 | } 2027 | } 2028 | 2029 | static void 2030 | gl_renderer_attach_egl(struct weston_surface *es, struct weston_buffer *buffer, 2031 | uint32_t format) 2032 | { 2033 | struct weston_compositor *ec = es->compositor; 2034 | struct gl_renderer *gr = get_renderer(ec); 2035 | struct gl_surface_state *gs = get_surface_state(es); 2036 | EGLint attribs[3]; 2037 | GLenum target; 2038 | int i, num_planes; 2039 | 2040 | buffer->legacy_buffer = (struct wl_buffer *)buffer->resource; 2041 | gr->query_buffer(gr->egl_display, buffer->legacy_buffer, 2042 | EGL_WIDTH, &buffer->width); 2043 | gr->query_buffer(gr->egl_display, buffer->legacy_buffer, 2044 | EGL_HEIGHT, &buffer->height); 2045 | gr->query_buffer(gr->egl_display, buffer->legacy_buffer, 2046 | EGL_WAYLAND_Y_INVERTED_WL, &buffer->y_inverted); 2047 | 2048 | for (i = 0; i < gs->num_images; i++) { 2049 | egl_image_unref(gs->images[i]); 2050 | gs->images[i] = NULL; 2051 | } 2052 | gs->num_images = 0; 2053 | es->is_opaque = false; 2054 | switch (format) { 2055 | case EGL_TEXTURE_RGB: 2056 | es->is_opaque = true; 2057 | /* fallthrough */ 2058 | case EGL_TEXTURE_RGBA: 2059 | default: 2060 | num_planes = 1; 2061 | gs->shader_variant = SHADER_VARIANT_RGBA; 2062 | break; 2063 | case EGL_TEXTURE_EXTERNAL_WL: 2064 | num_planes = 1; 2065 | gs->shader_variant = SHADER_VARIANT_EXTERNAL; 2066 | break; 2067 | case EGL_TEXTURE_Y_UV_WL: 2068 | num_planes = 2; 2069 | gs->shader_variant = SHADER_VARIANT_Y_UV; 2070 | es->is_opaque = true; 2071 | break; 2072 | case EGL_TEXTURE_Y_U_V_WL: 2073 | num_planes = 3; 2074 | gs->shader_variant = SHADER_VARIANT_Y_U_V; 2075 | es->is_opaque = true; 2076 | break; 2077 | case EGL_TEXTURE_Y_XUXV_WL: 2078 | num_planes = 2; 2079 | gs->shader_variant = SHADER_VARIANT_Y_XUXV; 2080 | es->is_opaque = true; 2081 | break; 2082 | } 2083 | 2084 | target = gl_shader_texture_variant_get_target(gs->shader_variant); 2085 | ensure_textures(gs, target, num_planes); 2086 | for (i = 0; i < num_planes; i++) { 2087 | attribs[0] = EGL_WAYLAND_PLANE_WL; 2088 | attribs[1] = i; 2089 | attribs[2] = EGL_NONE; 2090 | gs->images[i] = egl_image_create(gr, 2091 | EGL_WAYLAND_BUFFER_WL, 2092 | buffer->legacy_buffer, 2093 | attribs); 2094 | if (!gs->images[i]) { 2095 | weston_log("failed to create img for plane %d\n", i); 2096 | continue; 2097 | } 2098 | gs->num_images++; 2099 | 2100 | glActiveTexture(GL_TEXTURE0 + i); 2101 | glBindTexture(target, gs->textures[i]); 2102 | gr->image_target_texture_2d(target, gs->images[i]->image); 2103 | } 2104 | 2105 | gs->pitch = buffer->width; 2106 | gs->height = buffer->height; 2107 | gs->buffer_type = BUFFER_TYPE_EGL; 2108 | gs->y_inverted = buffer->y_inverted; 2109 | } 2110 | 2111 | static void 2112 | gl_renderer_destroy_dmabuf(struct linux_dmabuf_buffer *dmabuf) 2113 | { 2114 | struct dmabuf_image *image = linux_dmabuf_buffer_get_user_data(dmabuf); 2115 | 2116 | dmabuf_image_destroy(image); 2117 | } 2118 | 2119 | static struct egl_image * 2120 | import_simple_dmabuf(struct gl_renderer *gr, 2121 | struct dmabuf_attributes *attributes) 2122 | { 2123 | struct egl_image *image; 2124 | EGLint attribs[50]; 2125 | int atti = 0; 2126 | bool has_modifier; 2127 | 2128 | /* This requires the Mesa commit in 2129 | * Mesa 10.3 (08264e5dad4df448e7718e782ad9077902089a07) or 2130 | * Mesa 10.2.7 (55d28925e6109a4afd61f109e845a8a51bd17652). 2131 | * Otherwise Mesa closes the fd behind our back and re-importing 2132 | * will fail. 2133 | * https://bugs.freedesktop.org/show_bug.cgi?id=76188 2134 | */ 2135 | 2136 | attribs[atti++] = EGL_WIDTH; 2137 | attribs[atti++] = attributes->width; 2138 | attribs[atti++] = EGL_HEIGHT; 2139 | attribs[atti++] = attributes->height; 2140 | attribs[atti++] = EGL_LINUX_DRM_FOURCC_EXT; 2141 | attribs[atti++] = attributes->format; 2142 | 2143 | if (attributes->modifier[0] != DRM_FORMAT_MOD_INVALID) { 2144 | if (!gr->has_dmabuf_import_modifiers) 2145 | return NULL; 2146 | has_modifier = true; 2147 | } else { 2148 | has_modifier = false; 2149 | } 2150 | 2151 | if (attributes->n_planes > 0) { 2152 | attribs[atti++] = EGL_DMA_BUF_PLANE0_FD_EXT; 2153 | attribs[atti++] = attributes->fd[0]; 2154 | attribs[atti++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT; 2155 | attribs[atti++] = attributes->offset[0]; 2156 | attribs[atti++] = EGL_DMA_BUF_PLANE0_PITCH_EXT; 2157 | attribs[atti++] = attributes->stride[0]; 2158 | if (has_modifier) { 2159 | attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT; 2160 | attribs[atti++] = attributes->modifier[0] & 0xFFFFFFFF; 2161 | attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT; 2162 | attribs[atti++] = attributes->modifier[0] >> 32; 2163 | } 2164 | } 2165 | 2166 | if (attributes->n_planes > 1) { 2167 | attribs[atti++] = EGL_DMA_BUF_PLANE1_FD_EXT; 2168 | attribs[atti++] = attributes->fd[1]; 2169 | attribs[atti++] = EGL_DMA_BUF_PLANE1_OFFSET_EXT; 2170 | attribs[atti++] = attributes->offset[1]; 2171 | attribs[atti++] = EGL_DMA_BUF_PLANE1_PITCH_EXT; 2172 | attribs[atti++] = attributes->stride[1]; 2173 | if (has_modifier) { 2174 | attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT; 2175 | attribs[atti++] = attributes->modifier[1] & 0xFFFFFFFF; 2176 | attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT; 2177 | attribs[atti++] = attributes->modifier[1] >> 32; 2178 | } 2179 | } 2180 | 2181 | if (attributes->n_planes > 2) { 2182 | attribs[atti++] = EGL_DMA_BUF_PLANE2_FD_EXT; 2183 | attribs[atti++] = attributes->fd[2]; 2184 | attribs[atti++] = EGL_DMA_BUF_PLANE2_OFFSET_EXT; 2185 | attribs[atti++] = attributes->offset[2]; 2186 | attribs[atti++] = EGL_DMA_BUF_PLANE2_PITCH_EXT; 2187 | attribs[atti++] = attributes->stride[2]; 2188 | if (has_modifier) { 2189 | attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT; 2190 | attribs[atti++] = attributes->modifier[2] & 0xFFFFFFFF; 2191 | attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT; 2192 | attribs[atti++] = attributes->modifier[2] >> 32; 2193 | } 2194 | } 2195 | 2196 | if (gr->has_dmabuf_import_modifiers) { 2197 | if (attributes->n_planes > 3) { 2198 | attribs[atti++] = EGL_DMA_BUF_PLANE3_FD_EXT; 2199 | attribs[atti++] = attributes->fd[3]; 2200 | attribs[atti++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT; 2201 | attribs[atti++] = attributes->offset[3]; 2202 | attribs[atti++] = EGL_DMA_BUF_PLANE3_PITCH_EXT; 2203 | attribs[atti++] = attributes->stride[3]; 2204 | attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT; 2205 | attribs[atti++] = attributes->modifier[3] & 0xFFFFFFFF; 2206 | attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT; 2207 | attribs[atti++] = attributes->modifier[3] >> 32; 2208 | } 2209 | } 2210 | 2211 | attribs[atti++] = EGL_NONE; 2212 | 2213 | image = egl_image_create(gr, EGL_LINUX_DMA_BUF_EXT, NULL, 2214 | attribs); 2215 | 2216 | return image; 2217 | } 2218 | 2219 | struct yuv_format_descriptor yuv_formats[] = { 2220 | { 2221 | .format = DRM_FORMAT_YUYV, 2222 | .input_planes = 1, 2223 | .output_planes = 2, 2224 | .texture_type = TEXTURE_Y_XUXV_WL, 2225 | {{ 2226 | .width_divisor = 1, 2227 | .height_divisor = 1, 2228 | .format = DRM_FORMAT_GR88, 2229 | .plane_index = 0 2230 | }, { 2231 | .width_divisor = 2, 2232 | .height_divisor = 1, 2233 | .format = DRM_FORMAT_ARGB8888, 2234 | .plane_index = 0 2235 | }} 2236 | }, { 2237 | .format = DRM_FORMAT_NV12, 2238 | .input_planes = 2, 2239 | .output_planes = 2, 2240 | .texture_type = TEXTURE_Y_UV_WL, 2241 | {{ 2242 | .width_divisor = 1, 2243 | .height_divisor = 1, 2244 | .format = DRM_FORMAT_R8, 2245 | .plane_index = 0 2246 | }, { 2247 | .width_divisor = 2, 2248 | .height_divisor = 2, 2249 | .format = DRM_FORMAT_GR88, 2250 | .plane_index = 1 2251 | }} 2252 | }, { 2253 | .format = DRM_FORMAT_YUV420, 2254 | .input_planes = 3, 2255 | .output_planes = 3, 2256 | .texture_type = TEXTURE_Y_U_V_WL, 2257 | {{ 2258 | .width_divisor = 1, 2259 | .height_divisor = 1, 2260 | .format = DRM_FORMAT_R8, 2261 | .plane_index = 0 2262 | }, { 2263 | .width_divisor = 2, 2264 | .height_divisor = 2, 2265 | .format = DRM_FORMAT_R8, 2266 | .plane_index = 1 2267 | }, { 2268 | .width_divisor = 2, 2269 | .height_divisor = 2, 2270 | .format = DRM_FORMAT_R8, 2271 | .plane_index = 2 2272 | }} 2273 | }, { 2274 | .format = DRM_FORMAT_YUV444, 2275 | .input_planes = 3, 2276 | .output_planes = 3, 2277 | .texture_type = TEXTURE_Y_U_V_WL, 2278 | {{ 2279 | .width_divisor = 1, 2280 | .height_divisor = 1, 2281 | .format = DRM_FORMAT_R8, 2282 | .plane_index = 0 2283 | }, { 2284 | .width_divisor = 1, 2285 | .height_divisor = 1, 2286 | .format = DRM_FORMAT_R8, 2287 | .plane_index = 1 2288 | }, { 2289 | .width_divisor = 1, 2290 | .height_divisor = 1, 2291 | .format = DRM_FORMAT_R8, 2292 | .plane_index = 2 2293 | }} 2294 | }, { 2295 | .format = DRM_FORMAT_XYUV8888, 2296 | .input_planes = 1, 2297 | .output_planes = 1, 2298 | .texture_type = TEXTURE_XYUV_WL, 2299 | {{ 2300 | .width_divisor = 1, 2301 | .height_divisor = 1, 2302 | .format = DRM_FORMAT_XBGR8888, 2303 | .plane_index = 0 2304 | }} 2305 | } 2306 | }; 2307 | 2308 | static struct egl_image * 2309 | import_dmabuf_single_plane(struct gl_renderer *gr, 2310 | const struct dmabuf_attributes *attributes, 2311 | struct yuv_plane_descriptor *descriptor) 2312 | { 2313 | struct dmabuf_attributes plane; 2314 | struct egl_image *image; 2315 | char fmt[4]; 2316 | 2317 | plane.width = attributes->width / descriptor->width_divisor; 2318 | plane.height = attributes->height / descriptor->height_divisor; 2319 | plane.format = descriptor->format; 2320 | plane.n_planes = 1; 2321 | plane.fd[0] = attributes->fd[descriptor->plane_index]; 2322 | plane.offset[0] = attributes->offset[descriptor->plane_index]; 2323 | plane.stride[0] = attributes->stride[descriptor->plane_index]; 2324 | plane.modifier[0] = attributes->modifier[descriptor->plane_index]; 2325 | 2326 | image = import_simple_dmabuf(gr, &plane); 2327 | if (!image) { 2328 | weston_log("Failed to import plane %d as %.4s\n", 2329 | descriptor->plane_index, 2330 | dump_format(descriptor->format, fmt)); 2331 | return NULL; 2332 | } 2333 | 2334 | return image; 2335 | } 2336 | 2337 | static bool 2338 | import_yuv_dmabuf(struct gl_renderer *gr, 2339 | struct dmabuf_image *image) 2340 | { 2341 | unsigned i; 2342 | int j; 2343 | int ret; 2344 | struct yuv_format_descriptor *format = NULL; 2345 | struct dmabuf_attributes *attributes = &image->dmabuf->attributes; 2346 | char fmt[4]; 2347 | 2348 | for (i = 0; i < ARRAY_LENGTH(yuv_formats); ++i) { 2349 | if (yuv_formats[i].format == attributes->format) { 2350 | format = &yuv_formats[i]; 2351 | break; 2352 | } 2353 | } 2354 | 2355 | if (!format) { 2356 | weston_log("Error during import, and no known conversion for format " 2357 | "%.4s in the renderer\n", 2358 | dump_format(attributes->format, fmt)); 2359 | return false; 2360 | } 2361 | 2362 | if (attributes->n_planes != format->input_planes) { 2363 | weston_log("%.4s dmabuf must contain %d plane%s (%d provided)\n", 2364 | dump_format(format->format, fmt), 2365 | format->input_planes, 2366 | (format->input_planes > 1) ? "s" : "", 2367 | attributes->n_planes); 2368 | return false; 2369 | } 2370 | 2371 | for (j = 0; j < format->output_planes; ++j) { 2372 | image->images[j] = import_dmabuf_single_plane(gr, attributes, 2373 | &format->plane[j]); 2374 | if (!image->images[j]) { 2375 | while (j) { 2376 | ret = egl_image_unref(image->images[--j]); 2377 | assert(ret == 0); 2378 | } 2379 | return false; 2380 | } 2381 | } 2382 | 2383 | image->num_images = format->output_planes; 2384 | 2385 | switch (format->texture_type) { 2386 | case TEXTURE_Y_XUXV_WL: 2387 | image->shader_variant = SHADER_VARIANT_Y_XUXV; 2388 | break; 2389 | case TEXTURE_Y_UV_WL: 2390 | image->shader_variant = SHADER_VARIANT_Y_UV; 2391 | break; 2392 | case TEXTURE_Y_U_V_WL: 2393 | image->shader_variant = SHADER_VARIANT_Y_U_V; 2394 | break; 2395 | case TEXTURE_XYUV_WL: 2396 | image->shader_variant = SHADER_VARIANT_XYUV; 2397 | break; 2398 | default: 2399 | assert(false); 2400 | } 2401 | 2402 | return true; 2403 | } 2404 | 2405 | static void 2406 | gl_renderer_query_dmabuf_modifiers_full(struct gl_renderer *gr, int format, 2407 | uint64_t **modifiers, 2408 | unsigned **external_only, 2409 | int *num_modifiers); 2410 | 2411 | static struct dmabuf_format* 2412 | dmabuf_format_create(struct gl_renderer *gr, uint32_t format) 2413 | { 2414 | struct dmabuf_format *dmabuf_format; 2415 | 2416 | dmabuf_format = calloc(1, sizeof(struct dmabuf_format)); 2417 | if (!dmabuf_format) 2418 | return NULL; 2419 | 2420 | dmabuf_format->format = format; 2421 | 2422 | gl_renderer_query_dmabuf_modifiers_full(gr, format, 2423 | &dmabuf_format->modifiers, 2424 | &dmabuf_format->external_only, 2425 | &dmabuf_format->num_modifiers); 2426 | 2427 | if (dmabuf_format->num_modifiers == 0) { 2428 | free(dmabuf_format); 2429 | return NULL; 2430 | } 2431 | 2432 | wl_list_insert(&gr->dmabuf_formats, &dmabuf_format->link); 2433 | return dmabuf_format; 2434 | } 2435 | 2436 | static void 2437 | dmabuf_format_destroy(struct dmabuf_format *format) 2438 | { 2439 | free(format->modifiers); 2440 | free(format->external_only); 2441 | wl_list_remove(&format->link); 2442 | free(format); 2443 | } 2444 | 2445 | static GLenum 2446 | choose_texture_target(struct gl_renderer *gr, 2447 | struct dmabuf_attributes *attributes) 2448 | { 2449 | struct dmabuf_format *tmp, *format = NULL; 2450 | 2451 | wl_list_for_each(tmp, &gr->dmabuf_formats, link) { 2452 | if (tmp->format == attributes->format) { 2453 | format = tmp; 2454 | break; 2455 | } 2456 | } 2457 | 2458 | if (!format) 2459 | format = dmabuf_format_create(gr, attributes->format); 2460 | 2461 | if (format) { 2462 | int i; 2463 | 2464 | for (i = 0; i < format->num_modifiers; ++i) { 2465 | if (format->modifiers[i] == attributes->modifier[0]) { 2466 | if(format->external_only[i]) 2467 | return GL_TEXTURE_EXTERNAL_OES; 2468 | else 2469 | return GL_TEXTURE_2D; 2470 | } 2471 | } 2472 | } 2473 | 2474 | if (attributes->n_planes > 1) 2475 | return GL_TEXTURE_EXTERNAL_OES; 2476 | 2477 | switch (attributes->format & ~DRM_FORMAT_BIG_ENDIAN) { 2478 | case DRM_FORMAT_YUYV: 2479 | case DRM_FORMAT_YVYU: 2480 | case DRM_FORMAT_UYVY: 2481 | case DRM_FORMAT_VYUY: 2482 | case DRM_FORMAT_AYUV: 2483 | case DRM_FORMAT_XYUV8888: 2484 | return GL_TEXTURE_EXTERNAL_OES; 2485 | default: 2486 | return GL_TEXTURE_2D; 2487 | } 2488 | } 2489 | 2490 | static struct dmabuf_image * 2491 | import_dmabuf(struct gl_renderer *gr, 2492 | struct linux_dmabuf_buffer *dmabuf) 2493 | { 2494 | struct egl_image *egl_image; 2495 | struct dmabuf_image *image; 2496 | GLenum target; 2497 | 2498 | image = dmabuf_image_create(); 2499 | image->dmabuf = dmabuf; 2500 | 2501 | egl_image = import_simple_dmabuf(gr, &dmabuf->attributes); 2502 | if (egl_image) { 2503 | image->num_images = 1; 2504 | image->images[0] = egl_image; 2505 | image->import_type = IMPORT_TYPE_DIRECT; 2506 | target = choose_texture_target(gr, &dmabuf->attributes); 2507 | 2508 | switch (target) { 2509 | case GL_TEXTURE_2D: 2510 | image->shader_variant = SHADER_VARIANT_RGBA; 2511 | break; 2512 | default: 2513 | image->shader_variant = SHADER_VARIANT_EXTERNAL; 2514 | } 2515 | } else { 2516 | if (!import_yuv_dmabuf(gr, image)) { 2517 | dmabuf_image_destroy(image); 2518 | return NULL; 2519 | } 2520 | image->import_type = IMPORT_TYPE_GL_CONVERSION; 2521 | } 2522 | 2523 | return image; 2524 | } 2525 | 2526 | static void 2527 | gl_renderer_query_dmabuf_formats(struct weston_compositor *wc, 2528 | int **formats, int *num_formats) 2529 | { 2530 | struct gl_renderer *gr = get_renderer(wc); 2531 | static const int fallback_formats[] = { 2532 | DRM_FORMAT_ARGB8888, 2533 | DRM_FORMAT_XRGB8888, 2534 | DRM_FORMAT_YUYV, 2535 | DRM_FORMAT_NV12, 2536 | DRM_FORMAT_YUV420, 2537 | DRM_FORMAT_YUV444, 2538 | DRM_FORMAT_XYUV8888, 2539 | }; 2540 | bool fallback = false; 2541 | EGLint num; 2542 | 2543 | assert(gr->has_dmabuf_import); 2544 | 2545 | if (!gr->has_dmabuf_import_modifiers || 2546 | !gr->query_dmabuf_formats(gr->egl_display, 0, NULL, &num)) { 2547 | num = gr->has_gl_texture_rg ? ARRAY_LENGTH(fallback_formats) : 2; 2548 | fallback = true; 2549 | } 2550 | 2551 | *formats = calloc(num, sizeof(int)); 2552 | if (*formats == NULL) { 2553 | *num_formats = 0; 2554 | return; 2555 | } 2556 | 2557 | if (fallback) { 2558 | memcpy(*formats, fallback_formats, num * sizeof(int)); 2559 | *num_formats = num; 2560 | return; 2561 | } 2562 | 2563 | if (!gr->query_dmabuf_formats(gr->egl_display, num, *formats, &num)) { 2564 | *num_formats = 0; 2565 | free(*formats); 2566 | return; 2567 | } 2568 | 2569 | *num_formats = num; 2570 | } 2571 | 2572 | static void 2573 | gl_renderer_query_dmabuf_modifiers_full(struct gl_renderer *gr, int format, 2574 | uint64_t **modifiers, 2575 | unsigned **external_only, 2576 | int *num_modifiers) 2577 | { 2578 | int num; 2579 | 2580 | assert(gr->has_dmabuf_import); 2581 | 2582 | if (!gr->has_dmabuf_import_modifiers || 2583 | !gr->query_dmabuf_modifiers(gr->egl_display, format, 0, NULL, 2584 | NULL, &num) || 2585 | num == 0) { 2586 | *num_modifiers = 0; 2587 | return; 2588 | } 2589 | 2590 | *modifiers = calloc(num, sizeof(uint64_t)); 2591 | if (*modifiers == NULL) { 2592 | *num_modifiers = 0; 2593 | return; 2594 | } 2595 | if (external_only) { 2596 | *external_only = calloc(num, sizeof(unsigned)); 2597 | if (*external_only == NULL) { 2598 | *num_modifiers = 0; 2599 | free(*modifiers); 2600 | return; 2601 | } 2602 | } 2603 | if (!gr->query_dmabuf_modifiers(gr->egl_display, format, 2604 | num, *modifiers, external_only ? 2605 | *external_only : NULL, &num)) { 2606 | *num_modifiers = 0; 2607 | free(*modifiers); 2608 | if (external_only) 2609 | free(*external_only); 2610 | return; 2611 | } 2612 | 2613 | *num_modifiers = num; 2614 | } 2615 | 2616 | static void 2617 | gl_renderer_query_dmabuf_modifiers(struct weston_compositor *wc, int format, 2618 | uint64_t **modifiers, 2619 | int *num_modifiers) 2620 | { 2621 | struct gl_renderer *gr = get_renderer(wc); 2622 | 2623 | gl_renderer_query_dmabuf_modifiers_full(gr, format, modifiers, NULL, 2624 | num_modifiers); 2625 | } 2626 | 2627 | static bool 2628 | gl_renderer_import_dmabuf(struct weston_compositor *ec, 2629 | struct linux_dmabuf_buffer *dmabuf) 2630 | { 2631 | struct gl_renderer *gr = get_renderer(ec); 2632 | struct dmabuf_image *image; 2633 | int i; 2634 | 2635 | assert(gr->has_dmabuf_import); 2636 | 2637 | for (i = 0; i < dmabuf->attributes.n_planes; i++) { 2638 | /* return if EGL doesn't support import modifiers */ 2639 | if (dmabuf->attributes.modifier[i] != DRM_FORMAT_MOD_INVALID) 2640 | if (!gr->has_dmabuf_import_modifiers) 2641 | return false; 2642 | 2643 | /* return if modifiers passed are unequal */ 2644 | if (dmabuf->attributes.modifier[i] != 2645 | dmabuf->attributes.modifier[0]) 2646 | return false; 2647 | } 2648 | 2649 | /* reject all flags we do not recognize or handle */ 2650 | if (dmabuf->attributes.flags & ~ZWP_LINUX_BUFFER_PARAMS_V1_FLAGS_Y_INVERT) 2651 | return false; 2652 | 2653 | image = import_dmabuf(gr, dmabuf); 2654 | if (!image) 2655 | return false; 2656 | 2657 | wl_list_insert(&gr->dmabuf_images, &image->link); 2658 | linux_dmabuf_buffer_set_user_data(dmabuf, image, 2659 | gl_renderer_destroy_dmabuf); 2660 | 2661 | return true; 2662 | } 2663 | 2664 | static bool 2665 | import_known_dmabuf(struct gl_renderer *gr, 2666 | struct dmabuf_image *image) 2667 | { 2668 | switch (image->import_type) { 2669 | case IMPORT_TYPE_DIRECT: 2670 | image->images[0] = import_simple_dmabuf(gr, &image->dmabuf->attributes); 2671 | if (!image->images[0]) 2672 | return false; 2673 | image->num_images = 1; 2674 | break; 2675 | 2676 | case IMPORT_TYPE_GL_CONVERSION: 2677 | if (!import_yuv_dmabuf(gr, image)) 2678 | return false; 2679 | break; 2680 | 2681 | default: 2682 | weston_log("Invalid import type for dmabuf\n"); 2683 | return false; 2684 | } 2685 | 2686 | return true; 2687 | } 2688 | 2689 | static bool 2690 | dmabuf_is_opaque(struct linux_dmabuf_buffer *dmabuf) 2691 | { 2692 | const struct pixel_format_info *info; 2693 | 2694 | info = pixel_format_get_info(dmabuf->attributes.format & 2695 | ~DRM_FORMAT_BIG_ENDIAN); 2696 | if (!info) 2697 | return false; 2698 | 2699 | return pixel_format_is_opaque(info); 2700 | } 2701 | 2702 | static void 2703 | gl_renderer_attach_dmabuf(struct weston_surface *surface, 2704 | struct weston_buffer *buffer, 2705 | struct linux_dmabuf_buffer *dmabuf) 2706 | { 2707 | struct gl_renderer *gr = get_renderer(surface->compositor); 2708 | struct gl_surface_state *gs = get_surface_state(surface); 2709 | struct dmabuf_image *image; 2710 | GLenum target; 2711 | int i; 2712 | 2713 | if (!gr->has_dmabuf_import) { 2714 | linux_dmabuf_buffer_send_server_error(dmabuf, 2715 | "EGL dmabuf import not supported"); 2716 | return; 2717 | } 2718 | 2719 | buffer->width = dmabuf->attributes.width; 2720 | buffer->height = dmabuf->attributes.height; 2721 | 2722 | /* 2723 | * GL-renderer uses the OpenGL convention of texture coordinates, where 2724 | * the origin is at bottom-left. Because dmabuf buffers have the origin 2725 | * at top-left, we must invert the Y_INVERT flag to get the image right. 2726 | */ 2727 | buffer->y_inverted = 2728 | !(dmabuf->attributes.flags & ZWP_LINUX_BUFFER_PARAMS_V1_FLAGS_Y_INVERT); 2729 | 2730 | for (i = 0; i < gs->num_images; i++) 2731 | egl_image_unref(gs->images[i]); 2732 | gs->num_images = 0; 2733 | 2734 | gs->pitch = buffer->width; 2735 | gs->height = buffer->height; 2736 | gs->buffer_type = BUFFER_TYPE_EGL; 2737 | gs->y_inverted = buffer->y_inverted; 2738 | gs->direct_display = dmabuf->direct_display; 2739 | surface->is_opaque = dmabuf_is_opaque(dmabuf); 2740 | 2741 | /* 2742 | * We try to always hold an imported EGLImage from the dmabuf 2743 | * to prevent the client from preventing re-imports. But, we also 2744 | * need to re-import every time the contents may change because 2745 | * GL driver's caching may need flushing. 2746 | * 2747 | * Here we release the cache reference which has to be final. 2748 | */ 2749 | if (dmabuf->direct_display) 2750 | return; 2751 | 2752 | image = linux_dmabuf_buffer_get_user_data(dmabuf); 2753 | 2754 | /* The dmabuf_image should have been created during the import */ 2755 | assert(image != NULL); 2756 | 2757 | for (i = 0; i < image->num_images; ++i) 2758 | egl_image_unref(image->images[i]); 2759 | 2760 | if (!import_known_dmabuf(gr, image)) { 2761 | linux_dmabuf_buffer_send_server_error(dmabuf, "EGL dmabuf import failed"); 2762 | return; 2763 | } 2764 | 2765 | gs->num_images = image->num_images; 2766 | for (i = 0; i < gs->num_images; ++i) 2767 | gs->images[i] = egl_image_ref(image->images[i]); 2768 | 2769 | target = gl_shader_texture_variant_get_target(image->shader_variant); 2770 | ensure_textures(gs, target, gs->num_images); 2771 | for (i = 0; i < gs->num_images; ++i) { 2772 | glActiveTexture(GL_TEXTURE0 + i); 2773 | glBindTexture(target, gs->textures[i]); 2774 | gr->image_target_texture_2d(target, gs->images[i]->image); 2775 | } 2776 | 2777 | gs->shader_variant = image->shader_variant; 2778 | } 2779 | 2780 | static void 2781 | gl_renderer_attach(struct weston_surface *es, struct weston_buffer *buffer) 2782 | { 2783 | struct weston_compositor *ec = es->compositor; 2784 | struct gl_renderer *gr = get_renderer(ec); 2785 | struct gl_surface_state *gs = get_surface_state(es); 2786 | struct wl_shm_buffer *shm_buffer; 2787 | struct linux_dmabuf_buffer *dmabuf; 2788 | EGLint format; 2789 | int i; 2790 | 2791 | weston_buffer_reference(&gs->buffer_ref, buffer); 2792 | weston_buffer_release_reference(&gs->buffer_release_ref, 2793 | es->buffer_release_ref.buffer_release); 2794 | 2795 | if (!buffer) { 2796 | for (i = 0; i < gs->num_images; i++) { 2797 | egl_image_unref(gs->images[i]); 2798 | gs->images[i] = NULL; 2799 | } 2800 | gs->num_images = 0; 2801 | glDeleteTextures(gs->num_textures, gs->textures); 2802 | gs->num_textures = 0; 2803 | gs->buffer_type = BUFFER_TYPE_NULL; 2804 | gs->y_inverted = true; 2805 | gs->direct_display = false; 2806 | es->is_opaque = false; 2807 | return; 2808 | } 2809 | 2810 | shm_buffer = wl_shm_buffer_get(buffer->resource); 2811 | 2812 | if (shm_buffer) 2813 | gl_renderer_attach_shm(es, buffer, shm_buffer); 2814 | else if (gr->has_bind_display && 2815 | gr->query_buffer(gr->egl_display, (void *)buffer->resource, 2816 | EGL_TEXTURE_FORMAT, &format)) 2817 | gl_renderer_attach_egl(es, buffer, format); 2818 | else if ((dmabuf = linux_dmabuf_buffer_get(buffer->resource))) 2819 | gl_renderer_attach_dmabuf(es, buffer, dmabuf); 2820 | else { 2821 | weston_log("unhandled buffer type!\n"); 2822 | if (gr->has_bind_display) { 2823 | weston_log("eglQueryWaylandBufferWL failed\n"); 2824 | gl_renderer_print_egl_error_state(); 2825 | } 2826 | weston_buffer_reference(&gs->buffer_ref, NULL); 2827 | weston_buffer_release_reference(&gs->buffer_release_ref, NULL); 2828 | gs->buffer_type = BUFFER_TYPE_NULL; 2829 | gs->y_inverted = true; 2830 | es->is_opaque = false; 2831 | weston_buffer_send_server_error(buffer, 2832 | "disconnecting due to unhandled buffer type"); 2833 | } 2834 | } 2835 | 2836 | static void 2837 | gl_renderer_surface_set_color(struct weston_surface *surface, 2838 | float red, float green, float blue, float alpha) 2839 | { 2840 | struct gl_surface_state *gs = get_surface_state(surface); 2841 | 2842 | gs->color[0] = red; 2843 | gs->color[1] = green; 2844 | gs->color[2] = blue; 2845 | gs->color[3] = alpha; 2846 | gs->buffer_type = BUFFER_TYPE_SOLID; 2847 | gs->pitch = 1; 2848 | gs->height = 1; 2849 | 2850 | gs->shader_variant = SHADER_VARIANT_SOLID; 2851 | } 2852 | 2853 | static void 2854 | gl_renderer_surface_get_content_size(struct weston_surface *surface, 2855 | int *width, int *height) 2856 | { 2857 | struct gl_surface_state *gs = get_surface_state(surface); 2858 | 2859 | if (gs->buffer_type == BUFFER_TYPE_NULL) { 2860 | *width = 0; 2861 | *height = 0; 2862 | } else { 2863 | *width = gs->pitch; 2864 | *height = gs->height; 2865 | } 2866 | } 2867 | 2868 | static uint32_t 2869 | pack_color(pixman_format_code_t format, float *c) 2870 | { 2871 | uint8_t r = round(c[0] * 255.0f); 2872 | uint8_t g = round(c[1] * 255.0f); 2873 | uint8_t b = round(c[2] * 255.0f); 2874 | uint8_t a = round(c[3] * 255.0f); 2875 | 2876 | switch (format) { 2877 | case PIXMAN_a8b8g8r8: 2878 | return (a << 24) | (b << 16) | (g << 8) | r; 2879 | default: 2880 | assert(0); 2881 | return 0; 2882 | } 2883 | } 2884 | 2885 | static int 2886 | gl_renderer_surface_copy_content(struct weston_surface *surface, 2887 | void *target, size_t size, 2888 | int src_x, int src_y, 2889 | int width, int height) 2890 | { 2891 | static const GLfloat verts[4 * 2] = { 2892 | 0.0f, 0.0f, 2893 | 1.0f, 0.0f, 2894 | 1.0f, 1.0f, 2895 | 0.0f, 1.0f 2896 | }; 2897 | static const GLfloat projmat_normal[16] = { /* transpose */ 2898 | 2.0f, 0.0f, 0.0f, 0.0f, 2899 | 0.0f, 2.0f, 0.0f, 0.0f, 2900 | 0.0f, 0.0f, 1.0f, 0.0f, 2901 | -1.0f, -1.0f, 0.0f, 1.0f 2902 | }; 2903 | static const GLfloat projmat_yinvert[16] = { /* transpose */ 2904 | 2.0f, 0.0f, 0.0f, 0.0f, 2905 | 0.0f, -2.0f, 0.0f, 0.0f, 2906 | 0.0f, 0.0f, 1.0f, 0.0f, 2907 | -1.0f, 1.0f, 0.0f, 1.0f 2908 | }; 2909 | struct gl_shader_config sconf = { 2910 | .view_alpha = 1.0f, 2911 | .input_tex_filter = GL_NEAREST, 2912 | }; 2913 | const pixman_format_code_t format = PIXMAN_a8b8g8r8; 2914 | const size_t bytespp = 4; /* PIXMAN_a8b8g8r8 */ 2915 | const GLenum gl_format = GL_RGBA; /* PIXMAN_a8b8g8r8 little-endian */ 2916 | struct gl_renderer *gr = get_renderer(surface->compositor); 2917 | struct gl_surface_state *gs = get_surface_state(surface); 2918 | int cw, ch; 2919 | GLuint fbo; 2920 | GLuint tex; 2921 | GLenum status; 2922 | int ret = -1; 2923 | 2924 | gl_renderer_surface_get_content_size(surface, &cw, &ch); 2925 | 2926 | switch (gs->buffer_type) { 2927 | case BUFFER_TYPE_NULL: 2928 | return -1; 2929 | case BUFFER_TYPE_SOLID: 2930 | *(uint32_t *)target = pack_color(format, gs->color); 2931 | return 0; 2932 | case BUFFER_TYPE_SHM: 2933 | gl_renderer_flush_damage(surface); 2934 | /* fall through */ 2935 | case BUFFER_TYPE_EGL: 2936 | break; 2937 | } 2938 | 2939 | gl_shader_config_set_input_textures(&sconf, gs); 2940 | 2941 | glActiveTexture(GL_TEXTURE0); 2942 | glGenTextures(1, &tex); 2943 | glBindTexture(GL_TEXTURE_2D, tex); 2944 | glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, cw, ch, 2945 | 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); 2946 | glBindTexture(GL_TEXTURE_2D, 0); 2947 | 2948 | glGenFramebuffers(1, &fbo); 2949 | glBindFramebuffer(GL_FRAMEBUFFER, fbo); 2950 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, 2951 | GL_TEXTURE_2D, tex, 0); 2952 | 2953 | status = glCheckFramebufferStatus(GL_FRAMEBUFFER); 2954 | if (status != GL_FRAMEBUFFER_COMPLETE) { 2955 | weston_log("%s: fbo error: %#x\n", __func__, status); 2956 | goto out; 2957 | } 2958 | 2959 | glViewport(0, 0, cw, ch); 2960 | glDisable(GL_BLEND); 2961 | if (gs->y_inverted) 2962 | memcpy(sconf.projection.d, projmat_normal, sizeof projmat_normal); 2963 | else 2964 | memcpy(sconf.projection.d, projmat_yinvert, sizeof projmat_yinvert); 2965 | sconf.projection.type = WESTON_MATRIX_TRANSFORM_SCALE | 2966 | WESTON_MATRIX_TRANSFORM_TRANSLATE; 2967 | 2968 | if (!gl_renderer_use_program(gr, &sconf)) 2969 | goto out; 2970 | 2971 | /* position: */ 2972 | glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, verts); 2973 | glEnableVertexAttribArray(0); 2974 | 2975 | /* texcoord: */ 2976 | glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, verts); 2977 | glEnableVertexAttribArray(1); 2978 | 2979 | glDrawArrays(GL_TRIANGLE_FAN, 0, 4); 2980 | 2981 | glDisableVertexAttribArray(1); 2982 | glDisableVertexAttribArray(0); 2983 | 2984 | glPixelStorei(GL_PACK_ALIGNMENT, bytespp); 2985 | glReadPixels(src_x, src_y, width, height, gl_format, 2986 | GL_UNSIGNED_BYTE, target); 2987 | ret = 0; 2988 | 2989 | out: 2990 | glDeleteFramebuffers(1, &fbo); 2991 | glDeleteTextures(1, &tex); 2992 | 2993 | return ret; 2994 | } 2995 | 2996 | static void 2997 | surface_state_destroy(struct gl_surface_state *gs, struct gl_renderer *gr) 2998 | { 2999 | int i; 3000 | 3001 | wl_list_remove(&gs->surface_destroy_listener.link); 3002 | wl_list_remove(&gs->renderer_destroy_listener.link); 3003 | 3004 | gs->surface->renderer_state = NULL; 3005 | 3006 | glDeleteTextures(gs->num_textures, gs->textures); 3007 | 3008 | for (i = 0; i < gs->num_images; i++) 3009 | egl_image_unref(gs->images[i]); 3010 | 3011 | weston_buffer_reference(&gs->buffer_ref, NULL); 3012 | weston_buffer_release_reference(&gs->buffer_release_ref, NULL); 3013 | pixman_region32_fini(&gs->texture_damage); 3014 | free(gs); 3015 | } 3016 | 3017 | static void 3018 | surface_state_handle_surface_destroy(struct wl_listener *listener, void *data) 3019 | { 3020 | struct gl_surface_state *gs; 3021 | struct gl_renderer *gr; 3022 | 3023 | gs = container_of(listener, struct gl_surface_state, 3024 | surface_destroy_listener); 3025 | 3026 | gr = get_renderer(gs->surface->compositor); 3027 | 3028 | surface_state_destroy(gs, gr); 3029 | } 3030 | 3031 | static void 3032 | surface_state_handle_renderer_destroy(struct wl_listener *listener, void *data) 3033 | { 3034 | struct gl_surface_state *gs; 3035 | struct gl_renderer *gr; 3036 | 3037 | gr = data; 3038 | 3039 | gs = container_of(listener, struct gl_surface_state, 3040 | renderer_destroy_listener); 3041 | 3042 | surface_state_destroy(gs, gr); 3043 | } 3044 | 3045 | static int 3046 | gl_renderer_create_surface(struct weston_surface *surface) 3047 | { 3048 | struct gl_surface_state *gs; 3049 | struct gl_renderer *gr = get_renderer(surface->compositor); 3050 | 3051 | gs = zalloc(sizeof *gs); 3052 | if (gs == NULL) 3053 | return -1; 3054 | 3055 | /* A buffer is never attached to solid color surfaces, yet 3056 | * they still go through texcoord computations. Do not divide 3057 | * by zero there. 3058 | */ 3059 | gs->pitch = 1; 3060 | gs->y_inverted = true; 3061 | gs->direct_display = false; 3062 | 3063 | gs->surface = surface; 3064 | 3065 | pixman_region32_init(&gs->texture_damage); 3066 | surface->renderer_state = gs; 3067 | 3068 | gs->surface_destroy_listener.notify = 3069 | surface_state_handle_surface_destroy; 3070 | wl_signal_add(&surface->destroy_signal, 3071 | &gs->surface_destroy_listener); 3072 | 3073 | gs->renderer_destroy_listener.notify = 3074 | surface_state_handle_renderer_destroy; 3075 | wl_signal_add(&gr->destroy_signal, 3076 | &gs->renderer_destroy_listener); 3077 | 3078 | if (surface->buffer_ref.buffer) { 3079 | gl_renderer_attach(surface, surface->buffer_ref.buffer); 3080 | gl_renderer_flush_damage(surface); 3081 | } 3082 | 3083 | return 0; 3084 | } 3085 | 3086 | void 3087 | gl_renderer_log_extensions(const char *name, const char *extensions) 3088 | { 3089 | const char *p, *end; 3090 | int l; 3091 | int len; 3092 | 3093 | l = weston_log("%s:", name); 3094 | p = extensions; 3095 | while (*p) { 3096 | end = strchrnul(p, ' '); 3097 | len = end - p; 3098 | if (l + len > 78) 3099 | l = weston_log_continue("\n" STAMP_SPACE "%.*s", 3100 | len, p); 3101 | else 3102 | l += weston_log_continue(" %.*s", len, p); 3103 | for (p = end; isspace(*p); p++) 3104 | ; 3105 | } 3106 | weston_log_continue("\n"); 3107 | } 3108 | 3109 | static void 3110 | log_egl_info(EGLDisplay egldpy) 3111 | { 3112 | const char *str; 3113 | 3114 | str = eglQueryString(egldpy, EGL_VERSION); 3115 | weston_log("EGL version: %s\n", str ? str : "(null)"); 3116 | 3117 | str = eglQueryString(egldpy, EGL_VENDOR); 3118 | weston_log("EGL vendor: %s\n", str ? str : "(null)"); 3119 | 3120 | str = eglQueryString(egldpy, EGL_CLIENT_APIS); 3121 | weston_log("EGL client APIs: %s\n", str ? str : "(null)"); 3122 | 3123 | str = eglQueryString(egldpy, EGL_EXTENSIONS); 3124 | gl_renderer_log_extensions("EGL extensions", str ? str : "(null)"); 3125 | } 3126 | 3127 | static void 3128 | log_gl_info(void) 3129 | { 3130 | const char *str; 3131 | 3132 | str = (char *)glGetString(GL_VERSION); 3133 | weston_log("GL version: %s\n", str ? str : "(null)"); 3134 | 3135 | str = (char *)glGetString(GL_SHADING_LANGUAGE_VERSION); 3136 | weston_log("GLSL version: %s\n", str ? str : "(null)"); 3137 | 3138 | str = (char *)glGetString(GL_VENDOR); 3139 | weston_log("GL vendor: %s\n", str ? str : "(null)"); 3140 | 3141 | str = (char *)glGetString(GL_RENDERER); 3142 | weston_log("GL renderer: %s\n", str ? str : "(null)"); 3143 | 3144 | str = (char *)glGetString(GL_EXTENSIONS); 3145 | gl_renderer_log_extensions("GL extensions", str ? str : "(null)"); 3146 | } 3147 | 3148 | static void 3149 | gl_renderer_output_set_border(struct weston_output *output, 3150 | enum gl_renderer_border_side side, 3151 | int32_t width, int32_t height, 3152 | int32_t tex_width, unsigned char *data) 3153 | { 3154 | struct gl_output_state *go = get_output_state(output); 3155 | 3156 | if (go->borders[side].width != width || 3157 | go->borders[side].height != height) 3158 | /* In this case, we have to blow everything and do a full 3159 | * repaint. */ 3160 | go->border_status |= BORDER_SIZE_CHANGED | BORDER_ALL_DIRTY; 3161 | 3162 | if (data == NULL) { 3163 | width = 0; 3164 | height = 0; 3165 | } 3166 | 3167 | go->borders[side].width = width; 3168 | go->borders[side].height = height; 3169 | go->borders[side].tex_width = tex_width; 3170 | go->borders[side].data = data; 3171 | go->border_status |= 1 << side; 3172 | } 3173 | 3174 | static int 3175 | gl_renderer_setup(struct weston_compositor *ec, EGLSurface egl_surface); 3176 | 3177 | static EGLSurface 3178 | gl_renderer_create_window_surface(struct gl_renderer *gr, 3179 | EGLNativeWindowType window_for_legacy, 3180 | void *window_for_platform, 3181 | const uint32_t *drm_formats, 3182 | unsigned drm_formats_count) 3183 | { 3184 | EGLSurface egl_surface = EGL_NO_SURFACE; 3185 | EGLConfig egl_config; 3186 | 3187 | egl_config = gl_renderer_get_egl_config(gr, EGL_WINDOW_BIT, 3188 | drm_formats, drm_formats_count); 3189 | if (egl_config == EGL_NO_CONFIG_KHR) 3190 | return EGL_NO_SURFACE; 3191 | 3192 | log_egl_config_info(gr->egl_display, egl_config); 3193 | 3194 | if (gr->create_platform_window) 3195 | egl_surface = gr->create_platform_window(gr->egl_display, 3196 | egl_config, 3197 | window_for_platform, 3198 | NULL); 3199 | else 3200 | egl_surface = eglCreateWindowSurface(gr->egl_display, 3201 | egl_config, 3202 | window_for_legacy, NULL); 3203 | 3204 | return egl_surface; 3205 | } 3206 | 3207 | static int 3208 | gl_renderer_output_create(struct weston_output *output, 3209 | EGLSurface surface) 3210 | { 3211 | struct gl_output_state *go; 3212 | struct gl_renderer *gr = get_renderer(output->compositor); 3213 | const struct weston_testsuite_quirks *quirks; 3214 | GLint internal_format; 3215 | bool ret; 3216 | int i; 3217 | 3218 | quirks = &output->compositor->test_data.test_quirks; 3219 | 3220 | go = zalloc(sizeof *go); 3221 | if (go == NULL) 3222 | return -1; 3223 | 3224 | go->egl_surface = surface; 3225 | 3226 | for (i = 0; i < BUFFER_DAMAGE_COUNT; i++) 3227 | pixman_region32_init(&go->buffer_damage[i]); 3228 | 3229 | wl_list_init(&go->timeline_render_point_list); 3230 | 3231 | go->begin_render_sync = EGL_NO_SYNC_KHR; 3232 | go->end_render_sync = EGL_NO_SYNC_KHR; 3233 | 3234 | if (output->use_renderer_shadow_buffer) { 3235 | assert(gr->gl_half_float_type); 3236 | 3237 | if (gr->gl_half_float_type == GL_HALF_FLOAT_OES) 3238 | internal_format = GL_RGBA; 3239 | else 3240 | internal_format = GL_RGBA16F; 3241 | 3242 | ret = gl_fbo_texture_init(&go->shadow, 3243 | output->current_mode->width, 3244 | output->current_mode->height, 3245 | internal_format, 3246 | GL_RGBA, 3247 | gr->gl_half_float_type); 3248 | if (ret) { 3249 | weston_log("Output %s uses 16F shadow.\n", 3250 | output->name); 3251 | } else { 3252 | weston_log("Output %s failed to create 16F shadow.\n", 3253 | output->name); 3254 | free(go); 3255 | return -1; 3256 | } 3257 | } else if (quirks->gl_force_full_redraw_of_shadow_fb) { 3258 | weston_log("ERROR: gl_force_full_redraw_of_shadow_fb quirk used but shadow fb was not enabled.\n"); 3259 | abort(); 3260 | } 3261 | 3262 | output->renderer_state = go; 3263 | 3264 | return 0; 3265 | } 3266 | 3267 | static int 3268 | gl_renderer_output_window_create(struct weston_output *output, 3269 | const struct gl_renderer_output_options *options) 3270 | { 3271 | struct weston_compositor *ec = output->compositor; 3272 | struct gl_renderer *gr = get_renderer(ec); 3273 | EGLSurface egl_surface = EGL_NO_SURFACE; 3274 | int ret = 0; 3275 | 3276 | egl_surface = gl_renderer_create_window_surface(gr, 3277 | options->window_for_legacy, 3278 | options->window_for_platform, 3279 | options->drm_formats, 3280 | options->drm_formats_count); 3281 | if (egl_surface == EGL_NO_SURFACE) { 3282 | weston_log("failed to create egl surface\n"); 3283 | return -1; 3284 | } 3285 | 3286 | ret = gl_renderer_output_create(output, egl_surface); 3287 | if (ret < 0) 3288 | weston_platform_destroy_egl_surface(gr->egl_display, egl_surface); 3289 | 3290 | return ret; 3291 | } 3292 | 3293 | static int 3294 | gl_renderer_output_pbuffer_create(struct weston_output *output, 3295 | const struct gl_renderer_pbuffer_options *options) 3296 | { 3297 | struct gl_renderer *gr = get_renderer(output->compositor); 3298 | struct gl_output_state *go; 3299 | EGLConfig pbuffer_config; 3300 | EGLSurface egl_surface; 3301 | EGLint value = 0; 3302 | int ret; 3303 | EGLint pbuffer_attribs[] = { 3304 | EGL_WIDTH, options->width, 3305 | EGL_HEIGHT, options->height, 3306 | EGL_NONE 3307 | }; 3308 | 3309 | pbuffer_config = gl_renderer_get_egl_config(gr, EGL_PBUFFER_BIT, 3310 | options->drm_formats, 3311 | options->drm_formats_count); 3312 | if (pbuffer_config == EGL_NO_CONFIG_KHR) { 3313 | weston_log("failed to choose EGL config for PbufferSurface\n"); 3314 | return -1; 3315 | } 3316 | 3317 | log_egl_config_info(gr->egl_display, pbuffer_config); 3318 | 3319 | egl_surface = eglCreatePbufferSurface(gr->egl_display, pbuffer_config, 3320 | pbuffer_attribs); 3321 | if (egl_surface == EGL_NO_SURFACE) { 3322 | weston_log("failed to create egl surface\n"); 3323 | gl_renderer_print_egl_error_state(); 3324 | return -1; 3325 | } 3326 | 3327 | eglSurfaceAttrib(gr->egl_display, egl_surface, 3328 | EGL_SWAP_BEHAVIOR, EGL_BUFFER_PRESERVED); 3329 | if (!eglQuerySurface(gr->egl_display, egl_surface, 3330 | EGL_SWAP_BEHAVIOR, &value) || 3331 | value != EGL_BUFFER_PRESERVED) { 3332 | weston_log("Error: pbuffer surface does not support EGL_BUFFER_PRESERVED, got 0x%x." 3333 | " Continuing anyway.\n", value); 3334 | } 3335 | 3336 | ret = gl_renderer_output_create(output, egl_surface); 3337 | if (ret < 0) { 3338 | eglDestroySurface(gr->egl_display, egl_surface); 3339 | } else { 3340 | go = get_output_state(output); 3341 | go->swap_behavior_is_preserved = true; 3342 | } 3343 | 3344 | return ret; 3345 | } 3346 | 3347 | static void 3348 | gl_renderer_output_destroy(struct weston_output *output) 3349 | { 3350 | struct gl_renderer *gr = get_renderer(output->compositor); 3351 | struct gl_output_state *go = get_output_state(output); 3352 | struct timeline_render_point *trp, *tmp; 3353 | int i; 3354 | 3355 | for (i = 0; i < 2; i++) 3356 | pixman_region32_fini(&go->buffer_damage[i]); 3357 | 3358 | if (shadow_exists(go)) 3359 | gl_fbo_texture_fini(&go->shadow); 3360 | 3361 | eglMakeCurrent(gr->egl_display, 3362 | EGL_NO_SURFACE, EGL_NO_SURFACE, 3363 | EGL_NO_CONTEXT); 3364 | 3365 | weston_platform_destroy_egl_surface(gr->egl_display, go->egl_surface); 3366 | 3367 | if (!wl_list_empty(&go->timeline_render_point_list)) 3368 | weston_log("warning: discarding pending timeline render" 3369 | "objects at output destruction"); 3370 | 3371 | wl_list_for_each_safe(trp, tmp, &go->timeline_render_point_list, link) 3372 | timeline_render_point_destroy(trp); 3373 | 3374 | if (go->begin_render_sync != EGL_NO_SYNC_KHR) 3375 | gr->destroy_sync(gr->egl_display, go->begin_render_sync); 3376 | if (go->end_render_sync != EGL_NO_SYNC_KHR) 3377 | gr->destroy_sync(gr->egl_display, go->end_render_sync); 3378 | 3379 | free(go); 3380 | } 3381 | 3382 | static int 3383 | gl_renderer_create_fence_fd(struct weston_output *output) 3384 | { 3385 | struct gl_output_state *go = get_output_state(output); 3386 | struct gl_renderer *gr = get_renderer(output->compositor); 3387 | int fd; 3388 | 3389 | if (go->end_render_sync == EGL_NO_SYNC_KHR) 3390 | return -1; 3391 | 3392 | fd = gr->dup_native_fence_fd(gr->egl_display, go->end_render_sync); 3393 | if (fd == EGL_NO_NATIVE_FENCE_FD_ANDROID) 3394 | return -1; 3395 | 3396 | return fd; 3397 | } 3398 | 3399 | static void 3400 | gl_renderer_destroy(struct weston_compositor *ec) 3401 | { 3402 | struct gl_renderer *gr = get_renderer(ec); 3403 | struct dmabuf_image *image, *next; 3404 | struct dmabuf_format *format, *next_format; 3405 | 3406 | wl_signal_emit(&gr->destroy_signal, gr); 3407 | 3408 | if (gr->has_bind_display) 3409 | gr->unbind_display(gr->egl_display, ec->wl_display); 3410 | 3411 | gl_renderer_shader_list_destroy(gr); 3412 | if (gr->fallback_shader) 3413 | gl_shader_destroy(gr, gr->fallback_shader); 3414 | 3415 | /* Work around crash in egl_dri2.c's dri2_make_current() - when does this apply? */ 3416 | eglMakeCurrent(gr->egl_display, 3417 | EGL_NO_SURFACE, EGL_NO_SURFACE, 3418 | EGL_NO_CONTEXT); 3419 | 3420 | wl_list_for_each_safe(image, next, &gr->dmabuf_images, link) 3421 | dmabuf_image_destroy(image); 3422 | 3423 | wl_list_for_each_safe(format, next_format, &gr->dmabuf_formats, link) 3424 | dmabuf_format_destroy(format); 3425 | 3426 | if (gr->dummy_surface != EGL_NO_SURFACE) 3427 | weston_platform_destroy_egl_surface(gr->egl_display, 3428 | gr->dummy_surface); 3429 | 3430 | eglTerminate(gr->egl_display); 3431 | eglReleaseThread(); 3432 | 3433 | wl_list_remove(&gr->output_destroy_listener.link); 3434 | 3435 | wl_array_release(&gr->vertices); 3436 | wl_array_release(&gr->vtxcnt); 3437 | 3438 | if (gr->fragment_binding) 3439 | weston_binding_destroy(gr->fragment_binding); 3440 | if (gr->fan_binding) 3441 | weston_binding_destroy(gr->fan_binding); 3442 | 3443 | weston_log_scope_destroy(gr->shader_scope); 3444 | free(gr); 3445 | } 3446 | 3447 | static void 3448 | output_handle_destroy(struct wl_listener *listener, void *data) 3449 | { 3450 | struct gl_renderer *gr; 3451 | struct weston_output *output = data; 3452 | 3453 | gr = container_of(listener, struct gl_renderer, 3454 | output_destroy_listener); 3455 | 3456 | if (wl_list_empty(&output->compositor->output_list)) 3457 | eglMakeCurrent(gr->egl_display, gr->dummy_surface, 3458 | gr->dummy_surface, gr->egl_context); 3459 | } 3460 | 3461 | static int 3462 | gl_renderer_create_pbuffer_surface(struct gl_renderer *gr) { 3463 | EGLConfig pbuffer_config; 3464 | static const EGLint pbuffer_attribs[] = { 3465 | EGL_WIDTH, 10, 3466 | EGL_HEIGHT, 10, 3467 | EGL_NONE 3468 | }; 3469 | 3470 | pbuffer_config = gr->egl_config; 3471 | if (pbuffer_config == EGL_NO_CONFIG_KHR) { 3472 | pbuffer_config = 3473 | gl_renderer_get_egl_config(gr, EGL_PBUFFER_BIT, 3474 | NULL, 0); 3475 | } 3476 | if (pbuffer_config == EGL_NO_CONFIG_KHR) { 3477 | weston_log("failed to choose EGL config for PbufferSurface\n"); 3478 | return -1; 3479 | } 3480 | 3481 | gr->dummy_surface = eglCreatePbufferSurface(gr->egl_display, 3482 | pbuffer_config, 3483 | pbuffer_attribs); 3484 | 3485 | if (gr->dummy_surface == EGL_NO_SURFACE) { 3486 | weston_log("failed to create PbufferSurface\n"); 3487 | return -1; 3488 | } 3489 | 3490 | return 0; 3491 | } 3492 | 3493 | static int 3494 | gl_renderer_display_create(struct weston_compositor *ec, 3495 | const struct gl_renderer_display_options *options) 3496 | { 3497 | struct gl_renderer *gr; 3498 | 3499 | gr = zalloc(sizeof *gr); 3500 | if (gr == NULL) 3501 | return -1; 3502 | 3503 | gr->compositor = ec; 3504 | wl_list_init(&gr->shader_list); 3505 | gr->platform = options->egl_platform; 3506 | 3507 | gr->shader_scope = gl_shader_scope_create(gr); 3508 | if (!gr->shader_scope) 3509 | goto fail; 3510 | 3511 | if (gl_renderer_setup_egl_client_extensions(gr) < 0) 3512 | goto fail; 3513 | 3514 | gr->base.read_pixels = gl_renderer_read_pixels; 3515 | gr->base.repaint_output = gl_renderer_repaint_output; 3516 | gr->base.flush_damage = gl_renderer_flush_damage; 3517 | gr->base.attach = gl_renderer_attach; 3518 | gr->base.surface_set_color = gl_renderer_surface_set_color; 3519 | gr->base.destroy = gl_renderer_destroy; 3520 | gr->base.surface_get_content_size = 3521 | gl_renderer_surface_get_content_size; 3522 | gr->base.surface_copy_content = gl_renderer_surface_copy_content; 3523 | 3524 | if (gl_renderer_setup_egl_display(gr, options->egl_native_display) < 0) 3525 | goto fail; 3526 | 3527 | log_egl_info(gr->egl_display); 3528 | 3529 | ec->renderer = &gr->base; 3530 | 3531 | if (gl_renderer_setup_egl_extensions(ec) < 0) 3532 | goto fail_with_error; 3533 | 3534 | if (!gr->has_configless_context) { 3535 | EGLint egl_surface_type = options->egl_surface_type; 3536 | 3537 | if (!gr->has_surfaceless_context) 3538 | egl_surface_type |= EGL_PBUFFER_BIT; 3539 | 3540 | gr->egl_config = 3541 | gl_renderer_get_egl_config(gr, 3542 | egl_surface_type, 3543 | options->drm_formats, 3544 | options->drm_formats_count); 3545 | if (gr->egl_config == EGL_NO_CONFIG_KHR) { 3546 | weston_log("failed to choose EGL config\n"); 3547 | goto fail_terminate; 3548 | } 3549 | } 3550 | 3551 | ec->capabilities |= WESTON_CAP_ROTATION_ANY; 3552 | ec->capabilities |= WESTON_CAP_CAPTURE_YFLIP; 3553 | ec->capabilities |= WESTON_CAP_VIEW_CLIP_MASK; 3554 | if (gr->has_native_fence_sync && gr->has_wait_sync) 3555 | ec->capabilities |= WESTON_CAP_EXPLICIT_SYNC; 3556 | 3557 | wl_list_init(&gr->dmabuf_images); 3558 | if (gr->has_dmabuf_import) { 3559 | gr->base.import_dmabuf = gl_renderer_import_dmabuf; 3560 | gr->base.query_dmabuf_formats = 3561 | gl_renderer_query_dmabuf_formats; 3562 | gr->base.query_dmabuf_modifiers = 3563 | gl_renderer_query_dmabuf_modifiers; 3564 | } 3565 | wl_list_init(&gr->dmabuf_formats); 3566 | 3567 | if (gr->has_surfaceless_context) { 3568 | weston_log("EGL_KHR_surfaceless_context available\n"); 3569 | gr->dummy_surface = EGL_NO_SURFACE; 3570 | } else { 3571 | weston_log("EGL_KHR_surfaceless_context unavailable. " 3572 | "Trying PbufferSurface\n"); 3573 | 3574 | if (gl_renderer_create_pbuffer_surface(gr) < 0) 3575 | goto fail_with_error; 3576 | } 3577 | 3578 | wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_RGB565); 3579 | wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUV420); 3580 | wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_NV12); 3581 | wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUYV); 3582 | wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_XYUV8888); 3583 | 3584 | wl_signal_init(&gr->destroy_signal); 3585 | 3586 | if (gl_renderer_setup(ec, gr->dummy_surface) < 0) { 3587 | if (gr->dummy_surface != EGL_NO_SURFACE) 3588 | weston_platform_destroy_egl_surface(gr->egl_display, 3589 | gr->dummy_surface); 3590 | goto fail_with_error; 3591 | } 3592 | 3593 | if (gr->gl_half_float_type != 0) 3594 | ec->capabilities |= WESTON_CAP_COLOR_OPS; 3595 | 3596 | return 0; 3597 | 3598 | fail_with_error: 3599 | gl_renderer_print_egl_error_state(); 3600 | fail_terminate: 3601 | eglTerminate(gr->egl_display); 3602 | fail: 3603 | weston_log_scope_destroy(gr->shader_scope); 3604 | free(gr); 3605 | ec->renderer = NULL; 3606 | return -1; 3607 | } 3608 | 3609 | static void 3610 | fragment_debug_binding(struct weston_keyboard *keyboard, 3611 | const struct timespec *time, 3612 | uint32_t key, void *data) 3613 | { 3614 | struct weston_compositor *ec = data; 3615 | struct gl_renderer *gr = get_renderer(ec); 3616 | struct weston_output *output; 3617 | 3618 | gr->fragment_shader_debug = !gr->fragment_shader_debug; 3619 | 3620 | wl_list_for_each(output, &ec->output_list, link) 3621 | weston_output_damage(output); 3622 | } 3623 | 3624 | static void 3625 | fan_debug_repaint_binding(struct weston_keyboard *keyboard, 3626 | const struct timespec *time, 3627 | uint32_t key, void *data) 3628 | { 3629 | struct weston_compositor *compositor = data; 3630 | struct gl_renderer *gr = get_renderer(compositor); 3631 | 3632 | gr->fan_debug = !gr->fan_debug; 3633 | weston_compositor_damage_all(compositor); 3634 | } 3635 | 3636 | static uint32_t 3637 | get_gl_version(void) 3638 | { 3639 | const char *version; 3640 | int major, minor; 3641 | 3642 | version = (const char *) glGetString(GL_VERSION); 3643 | if (version && 3644 | (sscanf(version, "%d.%d", &major, &minor) == 2 || 3645 | sscanf(version, "OpenGL ES %d.%d", &major, &minor) == 2) && 3646 | major > 0 && minor >= 0) { 3647 | return gr_gl_version(major, minor); 3648 | } 3649 | 3650 | weston_log("warning: failed to detect GLES version, defaulting to 2.0.\n"); 3651 | return gr_gl_version(2, 0); 3652 | } 3653 | 3654 | static int 3655 | gl_renderer_setup(struct weston_compositor *ec, EGLSurface egl_surface) 3656 | { 3657 | struct gl_renderer *gr = get_renderer(ec); 3658 | const char *extensions; 3659 | EGLBoolean ret; 3660 | 3661 | EGLint context_attribs[16] = { 3662 | EGL_CONTEXT_CLIENT_VERSION, 0, 3663 | }; 3664 | unsigned int nattr = 2; 3665 | 3666 | if (!eglBindAPI(EGL_OPENGL_ES_API)) { 3667 | weston_log("failed to bind EGL_OPENGL_ES_API\n"); 3668 | gl_renderer_print_egl_error_state(); 3669 | return -1; 3670 | } 3671 | 3672 | /* 3673 | * Being the compositor we require minimum output latency, 3674 | * so request a high priority context for ourselves - that should 3675 | * reschedule all of our rendering and its dependencies to be completed 3676 | * first. If the driver doesn't permit us to create a high priority 3677 | * context, it will fallback to the default priority (MEDIUM). 3678 | */ 3679 | if (gr->has_context_priority) { 3680 | context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_LEVEL_IMG; 3681 | context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_HIGH_IMG; 3682 | } 3683 | 3684 | assert(nattr < ARRAY_LENGTH(context_attribs)); 3685 | context_attribs[nattr] = EGL_NONE; 3686 | 3687 | /* try to create an OpenGLES 3 context first */ 3688 | context_attribs[1] = 3; 3689 | gr->egl_context = eglCreateContext(gr->egl_display, gr->egl_config, 3690 | EGL_NO_CONTEXT, context_attribs); 3691 | if (gr->egl_context == NULL) { 3692 | /* and then fallback to OpenGLES 2 */ 3693 | context_attribs[1] = 2; 3694 | gr->egl_context = eglCreateContext(gr->egl_display, 3695 | gr->egl_config, 3696 | EGL_NO_CONTEXT, 3697 | context_attribs); 3698 | if (gr->egl_context == NULL) { 3699 | weston_log("failed to create context\n"); 3700 | gl_renderer_print_egl_error_state(); 3701 | return -1; 3702 | } 3703 | } 3704 | 3705 | if (gr->has_context_priority) { 3706 | EGLint value = EGL_CONTEXT_PRIORITY_MEDIUM_IMG; 3707 | 3708 | eglQueryContext(gr->egl_display, gr->egl_context, 3709 | EGL_CONTEXT_PRIORITY_LEVEL_IMG, &value); 3710 | 3711 | if (value != EGL_CONTEXT_PRIORITY_HIGH_IMG) { 3712 | weston_log("Failed to obtain a high priority context.\n"); 3713 | /* Not an error, continue on as normal */ 3714 | } 3715 | } 3716 | 3717 | ret = eglMakeCurrent(gr->egl_display, egl_surface, 3718 | egl_surface, gr->egl_context); 3719 | if (ret == EGL_FALSE) { 3720 | weston_log("Failed to make EGL context current.\n"); 3721 | gl_renderer_print_egl_error_state(); 3722 | return -1; 3723 | } 3724 | 3725 | gr->gl_version = get_gl_version(); 3726 | log_gl_info(); 3727 | 3728 | gr->image_target_texture_2d = 3729 | (void *) eglGetProcAddress("glEGLImageTargetTexture2DOES"); 3730 | 3731 | extensions = (const char *) glGetString(GL_EXTENSIONS); 3732 | if (!extensions) { 3733 | weston_log("Retrieving GL extension string failed.\n"); 3734 | return -1; 3735 | } 3736 | 3737 | if (!weston_check_egl_extension(extensions, "GL_EXT_texture_format_BGRA8888")) { 3738 | weston_log("GL_EXT_texture_format_BGRA8888 not available\n"); 3739 | return -1; 3740 | } 3741 | 3742 | if (weston_check_egl_extension(extensions, "GL_EXT_read_format_bgra")) 3743 | ec->read_format = PIXMAN_a8r8g8b8; 3744 | else 3745 | ec->read_format = PIXMAN_a8b8g8r8; 3746 | 3747 | if (gr->gl_version < gr_gl_version(3, 0) && 3748 | !weston_check_egl_extension(extensions, "GL_EXT_unpack_subimage")) { 3749 | weston_log("GL_EXT_unpack_subimage not available.\n"); 3750 | return -1; 3751 | } 3752 | 3753 | if (gr->gl_version >= gr_gl_version(3, 0) || 3754 | weston_check_egl_extension(extensions, "GL_EXT_texture_rg")) 3755 | gr->has_gl_texture_rg = true; 3756 | 3757 | if (weston_check_egl_extension(extensions, "GL_OES_EGL_image_external")) 3758 | gr->has_egl_image_external = true; 3759 | 3760 | if (weston_check_egl_extension(extensions, "GL_EXT_color_buffer_half_float")) { 3761 | if (gr->gl_version >= gr_gl_version(3, 0)) 3762 | gr->gl_half_float_type = GL_HALF_FLOAT; 3763 | else if (weston_check_egl_extension(extensions, "GL_OES_texture_half_float")) 3764 | gr->gl_half_float_type = GL_HALF_FLOAT_OES; 3765 | } 3766 | 3767 | glActiveTexture(GL_TEXTURE0); 3768 | 3769 | gr->fallback_shader = gl_renderer_create_fallback_shader(gr); 3770 | if (!gr->fallback_shader) { 3771 | weston_log("Error: compiling fallback shader failed.\n"); 3772 | return -1; 3773 | } 3774 | 3775 | gr->fragment_binding = 3776 | weston_compositor_add_debug_binding(ec, KEY_S, 3777 | fragment_debug_binding, 3778 | ec); 3779 | gr->fan_binding = 3780 | weston_compositor_add_debug_binding(ec, KEY_F, 3781 | fan_debug_repaint_binding, 3782 | ec); 3783 | 3784 | gr->output_destroy_listener.notify = output_handle_destroy; 3785 | wl_signal_add(&ec->output_destroyed_signal, 3786 | &gr->output_destroy_listener); 3787 | 3788 | weston_log("GL ES %d.%d - renderer features:\n", 3789 | gr_gl_version_major(gr->gl_version), 3790 | gr_gl_version_minor(gr->gl_version)); 3791 | weston_log_continue(STAMP_SPACE "read-back format: %s\n", 3792 | ec->read_format == PIXMAN_a8r8g8b8 ? "BGRA" : "RGBA"); 3793 | weston_log_continue(STAMP_SPACE "EGL Wayland extension: %s\n", 3794 | gr->has_bind_display ? "yes" : "no"); 3795 | 3796 | return 0; 3797 | } 3798 | 3799 | WL_EXPORT struct gl_renderer_interface gl_renderer_interface = { 3800 | .display_create = gl_renderer_display_create, 3801 | .output_window_create = gl_renderer_output_window_create, 3802 | .output_pbuffer_create = gl_renderer_output_pbuffer_create, 3803 | .output_destroy = gl_renderer_output_destroy, 3804 | .output_set_border = gl_renderer_output_set_border, 3805 | .create_fence_fd = gl_renderer_create_fence_fd, 3806 | }; --------------------------------------------------------------------------------