├── EOSVRConverter.py ├── EOSVRConverter_Red.py ├── MITLicense.md ├── README.md ├── Rotator3D.py ├── VideoCombiner.py ├── enableJpgs.py └── requirements.txt /EOSVRConverter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | from subprocess import Popen 4 | from multiprocessing import Pool 5 | from subprocess import Popen 6 | from tqdm import tqdm 7 | from os import mkdir, listdir, cpu_count 8 | from os.path import exists, join 9 | from glob import glob 10 | import pickle 11 | 12 | TOPAZ_BIN = r"C:\Program Files\Topaz Labs LLC\Topaz Sharpen AI\Topaz Sharpen AI.exe" 13 | 14 | # Code adapted from https://gist.github.com/HViktorTsoi/8e8b0468a9fb07842669aa368382a7df 15 | # Did some changes to speed up by 33% while nearly not changed the result 16 | def shadowHighlightSaturationAdjustment( 17 | img, 18 | shadow_amount_percent, shadow_tone_percent, shadow_radius, 19 | highlight_amount_percent, highlight_tone_percent, highlight_radius, 20 | color_percent 21 | ): 22 | """ 23 | Image Shadow / Highlight Correction. The same function as it in Photoshop / GIMP 24 | :param img: input RGB image numpy array of shape (height, width, 3) 25 | :param shadow_amount_percent [0.0 ~ 1.0]: Controls (separately for the highlight and shadow values in the image) how much of a correction to make. 26 | :param shadow_tone_percent [0.0 ~ 1.0]: Controls the range of tones in the shadows or highlights that are modified. 27 | :param shadow_radius [>0]: Controls the size of the local neighborhood around each pixel 28 | :param highlight_amount_percent [0.0 ~ 1.0]: Controls (separately for the highlight and shadow values in the image) how much of a correction to make. 29 | :param highlight_tone_percent [0.0 ~ 1.0]: Controls the range of tones in the shadows or highlights that are modified. 30 | :param highlight_radius [>0]: Controls the size of the local neighborhood around each pixel 31 | :param color_percent [-1.0 ~ 1.0]: 32 | :return: 33 | """ 34 | shadow_tone = shadow_tone_percent * 255 35 | highlight_tone = 255 - highlight_tone_percent * 255 36 | 37 | shadow_gain = 1 + shadow_amount_percent * 6 38 | highlight_gain = 1 + highlight_amount_percent * 6 39 | 40 | # The entire correction process is carried out in YUV space, 41 | # adjust highlights/shadows in Y space, and adjust colors in UV space 42 | # convert to Y channel (grey intensity) and UV channel (color) 43 | height, width = img.shape[:2] 44 | imgYUV = cv2.cvtColor(img, cv2.COLOR_BGR2YUV).astype('float32') 45 | img_Y, img_U, img_V = imgYUV[..., 0].reshape(-1), imgYUV[..., 1].reshape(-1), imgYUV[..., 2].reshape(-1) 46 | img_U -= 127 47 | img_V -= 127 48 | 49 | # extract shadow / highlight 50 | shadow_map = 255 - img_Y * 255 / shadow_tone 51 | shadow_map[np.where(img_Y >= shadow_tone)] = 0 52 | highlight_map = 255 - (255 - img_Y) * 255 / (255 - highlight_tone) 53 | highlight_map[np.where(img_Y <= highlight_tone)] = 0 54 | 55 | # // Gaussian blur on tone map, for smoother transition 56 | if shadow_amount_percent * shadow_radius > 0: 57 | # shadow_map = cv2.GaussianBlur(shadow_map.reshape(height, width), ksize=(shadow_radius, shadow_radius), sigmaX=0).reshape(-1) 58 | shadow_map = cv2.blur(shadow_map.reshape(height, width), ksize=(shadow_radius, shadow_radius)).reshape(-1) 59 | 60 | if highlight_amount_percent * highlight_radius > 0: 61 | # highlight_map = cv2.GaussianBlur(highlight_map.reshape(height, width), ksize=(highlight_radius, highlight_radius), sigmaX=0).reshape(-1) 62 | highlight_map = cv2.blur(highlight_map.reshape(height, width), ksize=(highlight_radius, highlight_radius)).reshape(-1) 63 | 64 | # Tone LUT 65 | t = np.arange(256) 66 | LUT_shadow = (1 - np.power(1 - t * (1 / 255), shadow_gain)) * 255 67 | LUT_shadow = np.maximum(0, np.minimum(255, np.int_(LUT_shadow + .5))) 68 | LUT_highlight = np.power(t * (1 / 255), highlight_gain) * 255 69 | LUT_highlight = np.maximum(0, np.minimum(255, np.int_(LUT_highlight + .5))) 70 | 71 | # adjust tone 72 | shadow_map = shadow_map * (1 / 255) 73 | highlight_map = highlight_map * (1 / 255) 74 | 75 | iH = (1 - shadow_map) * img_Y + shadow_map * LUT_shadow[np.int_(img_Y)] 76 | iH = (1 - highlight_map) * iH + highlight_map * LUT_highlight[np.int_(iH)] 77 | img_Y = iH 78 | 79 | # adjust color 80 | if color_percent != 0: 81 | # color LUT 82 | if color_percent > 0: 83 | LUT = (1 - np.sqrt(np.arange(32768)) * (1 / 128)) * color_percent + 1 84 | else: 85 | LUT = np.sqrt(np.arange(32768)) * (1 / 128) * color_percent + 1 86 | 87 | # adjust color saturation adaptively according to highlights/shadows 88 | color_gain = LUT[np.int_(img_U ** 2 + img_V ** 2 + .5)] 89 | w = 1 - np.minimum(2 - (shadow_map + highlight_map), 1) 90 | img_U = w * img_U + (1 - w) * img_U * color_gain 91 | img_V = w * img_V + (1 - w) * img_V * color_gain 92 | 93 | # re convert to RGB channel 94 | img_Y = img_Y.astype('uint8') 95 | img_U = (img_U + 127).astype('uint8') 96 | img_V = (img_V + 127).astype('uint8') 97 | imgYUV = np.row_stack([img_Y, img_U, img_V]).T.reshape(height, width, 3) 98 | output = cv2.cvtColor(imgYUV, cv2.COLOR_YUV2BGR) 99 | return output 100 | 101 | 102 | # From https://github.com/kylemcdonald/FisheyeToEquirectangular 103 | class FisheyeToEquirectangular: 104 | def __init__(self, n=4096, side=3600, blending=0, aperture=1): 105 | self.blending = blending 106 | blending_ratio = blending / n 107 | if exists('fisheye.npy'): 108 | data = np.load('fisheye.npy') 109 | self.x, self.y = data 110 | else: 111 | x_samples = np.linspace(0-blending_ratio, 1+blending_ratio, n+blending*2) 112 | y_samples = np.linspace(-1, 1, n) 113 | 114 | # equirectangular 115 | x, y = np.meshgrid(x_samples, y_samples) 116 | 117 | # longitude/latitude 118 | longitude = x * np.pi 119 | latitude = y * np.pi / 2 120 | 121 | # 3d vector 122 | Px = np.cos(latitude) * np.cos(longitude) 123 | Py = np.cos(latitude) * np.sin(longitude) 124 | Pz = np.sin(latitude) 125 | 126 | # 2d fisheye 127 | aperture *= np.pi 128 | r = 2 * np.arctan2(np.sqrt(Px*Px + Pz*Pz), Py) / aperture 129 | theta = np.arctan2(Pz, Px) 130 | theta += np.pi 131 | x = r * np.cos(theta) 132 | y = r * np.sin(theta) 133 | 134 | x = np.clip(x, -1, 1) 135 | y = np.clip(y, -1, 1) 136 | 137 | x = (-x + 1) * side / 2 138 | y = (y + 1) * side / 2 139 | 140 | self.x = x.astype(np.float32) 141 | self.y = y.astype(np.float32) 142 | data = [self.x, self.y] 143 | np.save('fisheye.npy', data) 144 | 145 | def unwarp_single(self, img, interpolation=cv2.INTER_LINEAR, border=cv2.BORDER_REFLECT): 146 | return cv2.remap( 147 | img, self.x, self.y, 148 | interpolation=interpolation, 149 | borderMode=border 150 | ) 151 | 152 | def getLeftRightFisheyeImage(self, imgfn): 153 | img = cv2.imread(imgfn) 154 | h, w, c = img.shape 155 | centerLx = w // 4 - 50 156 | centerRx = w * 3 // 4 + 50 157 | centerY = h // 2 + 50 158 | fisheyeR = 1800 159 | imgL = img[centerY - fisheyeR: centerY + fisheyeR, centerLx - fisheyeR: centerLx + fisheyeR, :] 160 | imgR = img[centerY - fisheyeR: centerY + fisheyeR, centerRx - fisheyeR: centerRx + fisheyeR, :] 161 | return imgL, imgR 162 | 163 | def correctForImage(self, imgfn, outfn, shallAdjust=False): 164 | imgL, imgR = self.getLeftRightFisheyeImage(imgfn) 165 | newimg = self.unwarp_single(imgL) 166 | newimgR = cv2.rotate(newimg, cv2.ROTATE_180) 167 | newimg = self.unwarp_single(imgR) 168 | newimgL = cv2.rotate(newimg, cv2.ROTATE_180) 169 | newimg = np.hstack((newimgL, newimgR)) 170 | if shallAdjust: 171 | # Perform some image processing 172 | newimg = shadowHighlightSaturationAdjustment(newimg, 0.05, 0.4, 50, 0.1, 0.4, 50, 0.4) 173 | cv2.imwrite(outfn, newimg) 174 | 175 | # Correct all images under the correct directory in place 176 | def correctAllImages(self, pool): 177 | fns = glob('*.jpg') 178 | pool.starmap(self.correctForImage, tqdm([(fn, fn) for fn in fns])) 179 | # Use Topaz Sharpen AI to enhance resolution 180 | command = [TOPAZ_BIN] + fns 181 | process = Popen(command) 182 | process.wait() 183 | 184 | # Extract frames from the video using ffmpeg, and then perform correction for each frame (in place) 185 | # Note the video here could be exported from Premiere or other software, and not necessarily the 186 | # out-of-body mp4 files. So even RAW could be supported (indirectly). 187 | # We also give another example of directly reading in the video file (not RAW though, could be ALL-I) 188 | # before color grading, and invoke ffmpeg to do color grading. 189 | def correctForVideo(self, videofn, outdir, pool): 190 | if not exists(outdir): 191 | mkdir(outdir) 192 | # Example 1: don't do color grading 193 | # ffmpegCommand = ['ffmpeg', '-i', videofn, '-qscale:v', '2', join(outdir, "%5d.png")] 194 | # Example 2: do color grading. Change the cube file path to your case. 195 | # Cube files can be downloaded from Canon website. 196 | ffmpegCommand = ['ffmpeg', '-i', videofn, '-qscale:v', '2', '-vf', 'lut3d=BT2020_CanonLog3-to-BT709_WideDR_33_FF_Ver.2.0.cube', join(outdir, "%5d.png")] 197 | exe = Popen(ffmpegCommand) 198 | exe.wait() 199 | # Also extract the audio to be combined to the final video 200 | ffmpegAudioCommand = ['ffmpeg', '-i', videofn, '-vn', '-acodec', 'copy', join(outdir + '_audio.aac')] 201 | exe = Popen(ffmpegAudioCommand) 202 | exe.wait() 203 | # Perform the mapping and adjustment (slow) in parallel 204 | fns = [join(outdir, x) for x in listdir(outdir)] 205 | #pool.starmap(self.correctForImage, tqdm([(fn, fn) for fn in fns])) 206 | pool.starmap(self.correctForImage, tqdm([(fn, fn, True) for fn in fns])) 207 | # Get an initial version without sharpening for quick review 208 | command = ['ffmpeg', '-r', '30', '-i', f'{outdir}/%5d.png', '-i', f'{outdir}_audio.aac', '-c:v', 'libx264', '-vf', 'scale=8192x4096', '-preset', 'fast', '-crf', '18', '-x264-params', 'mvrange=511', '-maxrate', '100M', '-bufsize', '25M', '-pix_fmt', 'yuv420p', '-c:a', 'aac', '-b:a', '160k', '-movflags', 'faststart', videofn.lower().replace('.mp4', '_VR.mp4')] 209 | exe = Popen(command) # We don't need to wait here 210 | 211 | def correctAllVideos(self): 212 | fns = glob('*.mp4') 213 | for fn in fns: 214 | print(f'Processing {fn}...') 215 | self.correctForVideo(fn, f'{fn.replace(".mp4", "")}_Frames') 216 | 217 | if __name__ == '__main__': 218 | # We don't have a command line interface for now to provide maximum 219 | # efficiency (e.g. no need to intialize FisheyeToEquirectangular every time) 220 | # Sample usage: 221 | procCount = min(56, cpu_count() // 2) 222 | print(f'Creating a process pool with {procCount} processed...') 223 | pool = Pool(procCount) 224 | converter = FisheyeToEquirectangular() 225 | #converter.correctForImage('./tmpframes2/00001.png', './tmpframes2/00001_corrected.png') 226 | #converter.correctForVideo('../VRVideoRaw/IMG_3880.MP4', 'tmpframes3880') 227 | #converter.correctAllImages() 228 | #for i in list(reversed(range(4014, 4019))): 229 | #fn = f'IMG_{i}.MP4' 230 | #newfn = f'IMG_{i}' 231 | #converter.correctForVideo(fn, newfn, pool) 232 | converter.correctAllImages(pool) 233 | #converter.correctAllVideos() 234 | -------------------------------------------------------------------------------- /EOSVRConverter_Red.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script is specifically designed for the RED V-Raptor VV, which is compatible with the Canon RF mount and can be sued with the US VR lens. The differences between this script and the original Canon one are as follows: 3 | 4 | 1. The camera parameters are specifically calibrated for the V-Raptor VV. 5 | 2. This version employs Ray for distributed computing, which offers improved robustness compared to the original multiprocessing-based method. 6 | 3. The workflow is changed. We expect that color grading will be conducted in DaVinci, not FFmpeg. We've also removed the shadow and highlight adjustments for the same reason. 7 | 4. We expect the script's output to be further processed, typically in Topaz Sharpener AI and DaVinci for the final sharpening, rendering and encoding. As a result, we've eliminated the FFmpeg encoding section. 8 | """ 9 | 10 | import numpy as np 11 | import cv2 12 | from subprocess import Popen 13 | from random import randint 14 | from subprocess import Popen 15 | from tqdm import tqdm 16 | from os import getcwd, listdir, cpu_count, mkdir 17 | from os.path import exists, join 18 | from glob import glob 19 | from time import sleep 20 | from Rotator3D import Rotator3D 21 | from shutil import move 22 | import logging 23 | import ray 24 | from ray.util.queue import Queue 25 | 26 | logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) 27 | TOPAZ_BIN = r"C:\Program Files\Topaz Labs LLC\Topaz Sharpen AI\Topaz Sharpen AI.exe" 28 | FILE_EXTNAME = '.tif' 29 | 30 | # From https://github.com/kylemcdonald/FisheyeToEquirectangular 31 | class FisheyeToEquirectangular: 32 | FISHEYE_FILENAME = join(getcwd(), 'fisheye_red.npy') 33 | 34 | def __init__(self, n=4096, side=3200, blending=0, aperture=1): 35 | """ 36 | :param shadow_amount_percent [0.0 ~ 1.0]: Controls (separately for the highlight and shadow values in the image) how much of a correction to make. 37 | :param shadow_tone_percent [0.0 ~ 1.0]: Controls the range of tones in the shadows or highlights that are modified. 38 | :param shadow_radius [>0]: Controls the size of the local neighborhood around each pixel 39 | :param highlight_amount_percent [0.0 ~ 1.0]: Controls (separately for the highlight and shadow values in the image) how much of a correction to make. 40 | :param highlight_tone_percent [0.0 ~ 1.0]: Controls the range of tones in the shadows or highlights that are modified. 41 | :param highlight_radius [>0]: Controls the size of the local neighborhood around each pixel 42 | :param color_percent [-1.0 ~ 1.0]: 43 | """ 44 | self.blending = blending 45 | blending_ratio = blending / n 46 | if exists(FisheyeToEquirectangular.FISHEYE_FILENAME): 47 | data = np.load(FisheyeToEquirectangular.FISHEYE_FILENAME) 48 | self.x, self.y = data 49 | # print(f'Loaded data: {data}') 50 | else: 51 | x_samples = np.linspace(0-blending_ratio, 1+blending_ratio, n+blending*2) 52 | y_samples = np.linspace(-1, 1, n) 53 | 54 | # equirectangular 55 | x, y = np.meshgrid(x_samples, y_samples) 56 | 57 | # longitude/latitude 58 | longitude = x * np.pi 59 | latitude = y * np.pi / 2 60 | 61 | # 3d vector 62 | Px = np.cos(latitude) * np.cos(longitude) 63 | Py = np.cos(latitude) * np.sin(longitude) 64 | Pz = np.sin(latitude) 65 | 66 | # 2d fisheye 67 | aperture *= np.pi 68 | r = 2 * np.arctan2(np.sqrt(Px*Px + Pz*Pz), Py) / aperture 69 | theta = np.arctan2(Pz, Px) 70 | theta += np.pi 71 | x = r * np.cos(theta) 72 | y = r * np.sin(theta) 73 | 74 | x = np.clip(x, -1, 1) 75 | y = np.clip(y, -1, 1) 76 | 77 | x = (-x + 1) * side / 2 78 | y = (y + 1) * side / 2 79 | 80 | self.x = x.astype(np.float32) 81 | self.y = y.astype(np.float32) 82 | data = [self.x, self.y] 83 | np.save(FisheyeToEquirectangular.FISHEYE_FILENAME, data) 84 | 85 | def unwarp_single(self, img, interpolation=cv2.INTER_LINEAR, border=cv2.BORDER_REFLECT): 86 | return cv2.remap( 87 | img, self.x, self.y, 88 | interpolation=interpolation, 89 | borderMode=border 90 | ) 91 | 92 | def getLeftRightFisheyeImage(self, img): 93 | h, w, c = img.shape 94 | # Use the intrinsic matrix from actual calibration 95 | centerLx = 2248 96 | centerRx = 5967 97 | centerY = 2250 98 | fisheyeR = 1600 99 | imgL = img[centerY - fisheyeR: centerY + fisheyeR, centerLx - fisheyeR: centerLx + fisheyeR, :] 100 | imgR = img[centerY - fisheyeR: centerY + fisheyeR, centerRx - fisheyeR: centerRx + fisheyeR, :] 101 | return imgL, imgR 102 | 103 | def correctForImage(self, img, outfn, rotator=None): 104 | if type(img) == str: 105 | imgpath = img 106 | try: 107 | img = cv2.imread(img) 108 | except: 109 | print(f'Failed to load image {imgpath}') 110 | return None 111 | if img is None: 112 | print(f'Failed to load image {imgpath}') 113 | return None 114 | imgL, imgR = self.getLeftRightFisheyeImage(img) 115 | newimg = self.unwarp_single(imgL) 116 | newimgR = cv2.rotate(newimg, cv2.ROTATE_180) 117 | if rotator is None: 118 | newimg = self.unwarp_single(imgR) 119 | newimgL = cv2.rotate(newimg, cv2.ROTATE_180) 120 | newimg = np.hstack((newimgL, newimgR)) 121 | else: 122 | # For the rotating case, we only need one eye 123 | newimg = rotator.transformOneImage(newimgR) 124 | cv2.imwrite(outfn, newimg) 125 | 126 | # Correct all images under the correct directory in place 127 | def correctAllImages(self, pool): 128 | fns = glob('*.jpg') 129 | pool.starmap(self.correctForImage, tqdm([(fn, fn) for fn in fns])) 130 | # Use Topaz Sharpen AI to enhance resolution 131 | command = [TOPAZ_BIN] + fns 132 | process = Popen(command) 133 | process.wait() 134 | 135 | @ray.remote(num_cpus=1, memory=3*1024*1024*1024) 136 | def launchWarpTask(correctorRef, fn): 137 | img = cv2.imread(fn, -1) 138 | return correctorRef.correctForImage(img, fn, None) 139 | 140 | def correctForVideo(outdir): 141 | if not exists(outdir): 142 | mkdir(outdir) 143 | 144 | # Perform the mapping in parallel 145 | fns = glob(f'{outdir}/*{FILE_EXTNAME}') 146 | fns = [join(getcwd(), fn) for fn in fns] 147 | corrector = FisheyeToEquirectangular() 148 | correctorRef = ray.put(corrector) 149 | 150 | tasks = [] 151 | for fn in tqdm(fns, desc='submitting jobs...'): 152 | tasks.append(launchWarpTask.remote(correctorRef, fn)) 153 | tasks = [x for x in tasks if x is not None] 154 | for t in tqdm(tasks): 155 | ray.get(t) 156 | 157 | if __name__ == '__main__': 158 | ray.init() 159 | correctForVideo('RedTest') 160 | -------------------------------------------------------------------------------- /MITLicense.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 Kyle McDonald 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EOS VR Converter 2 | 3 | This repo provides an alternative workflow than the EOS VR Utility for Canon's great VR lens. 4 | The workflow aims to handle the current painpoints of the EOS VR Utility: 5 | 6 | * RAW formats are unsupported for both still and video; 7 | * Operation efficiency is not a focus of the software design, which has many blocking long operations. The users often have to wait for minutes before able to begin export; 8 | * There are some artifacts in the parallex correction, sometimes resulting in dizziness or cross eyes. 9 | 10 | The utility allows for the following workflows instead: 11 | 12 | ### Photos 13 | 14 | * [Optional] Perform image adjustments from RAW files in your favorite editing tools. Export the result to JPG files. 15 | * Use the `EOSVRConverter.py` to convert the images to equirectangular format in batch, in parallel, in a set and go manner. 16 | * [Optional] If resolution is a concern, I usually use Topaz Sharpen AI to boost the clarity. 17 | 18 | ### Videos 19 | 20 | * Perform RAW decoding, video color grading (if you use Canon Log) and other adjustment in your favorite editing tools. Export the result to MP4 files. 21 | * If you use All-I format (instead of RAW formats), it's possible to skip the previous step and directly begin from the mp4 files right from the body. In this case, use the `EOSVRConverter.py` to do color grading, auto adjustment, and equirectangular transform to png files. It also extracts audio for future use. Otherwise, one can also use `EOSVRConverter.py` for equirectangular transform purpose only, similarly in a parallel, efficient, and set-and-go manner. 22 | * Use `VideoCombiner.py` to optionally launch Topaz Sharpen AI to boost the clarity. And then the python script combines the frames and the audio into a MP4 file which can be played on VR goggles. 23 | 24 | `EOSVRConverter_Red.py` provides support on RED V-Raptor VV. Check the code for more details. 25 | 26 | `enableJpgs.py` is provided for legacy use only. 27 | 28 | ## Technical Details 29 | 30 | This utility converts an image or video from R5 + EOS VR lens to an equirectangular form, which could be rendered in VR goggles. 31 | I used industry standard algorithms to develop this tool so the result might be different from EOS VR Utility's result. 32 | Currently it doesn't depend on the EXIF data, so it doesn't require the image to be from the body, and thus could support jpg files derived from the RAW file. 33 | However, this also limits its capability to perform automatic horizontal correction. 34 | So there is no gaurantee the horizon is correct. 35 | When the photo was taken with too much derivation from the horizonal position (tilt etc.) the photo may bring disorientation and dizziness. 36 | The speed is about 10~20x faster than the EOS VR Utility though. 37 | Similarly, the utility also supports RAW videos from R5 (indirectly), with multi-core acceleration. 38 | 39 | This is a python script, so one needs to have some basic understanding of python in order to use it. 40 | First install python and dependencies in the requirement. 41 | And then modify the code in the `EOSVRConverter.py` as you like, epsecially the `main` function. 42 | It has quite some personalized adjustment embedded in, so I strongly suggest to first read the code instead of execute it blindly. 43 | 44 | ## [Legacy] EnableJpgs 45 | 46 | When using the great EOS VR system, I got two pain points. 47 | The first is the software doesn't accept RAW files, even for images. 48 | We have to use the output jpg files directly from the body, instead of the more flexible and capable RAW files. 49 | The second is the entire EOS VR Utility software is implemented with blocking behaviors. 50 | That is, when clicking anything such as selecting a file, the UI will completely freeze while doing (usually time consuming) computation. 51 | This is really annoying when one has tens or hundreds of photos to process. 52 | 53 | This utility doesn't solve the two issues directly, but greatly alleviate the issues. 54 | For the first one, it allows the EOS VR Utility to recognize the derived jpgs from RAW files. 55 | And for the second one, it enabled "Horizontal Correction" by default, so we don't need to click into each image twice and wait for two minutes to click the "Hozirontal Correction" button. 56 | We still need to click into image to calculate the parallax correction parameter though. 57 | 58 | ### Usage 59 | 60 | This is a python script. 61 | So please first install python 3, and also make sure `exittool` is installed and put into the system PATH. 62 | Assuming the jpg files derived from the RAW files are stored in the folder `./jpgs`, and the original jpg files right from the camera are stored in the folder `./raw_jpgs`, then we can execute the following command: 63 | 64 | ``` 65 | python ./enableJpgs.py --jpgdir ./jpgs --rawdir ./raw_jpgs 66 | ``` 67 | 68 | This will write corresponding xml files to `./jpgs`, and patch the jpg files so the EOS VR Utility could recognize them. -------------------------------------------------------------------------------- /Rotator3D.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | from math import atan2, sqrt, sin, cos, acos, asin 4 | from tqdm import tqdm 5 | from os.path import exists 6 | 7 | # Input is an LR 180 image from Canon VR 8 | # Output is an UD 360 image 9 | class Rotator3D: 10 | def __init__(self): 11 | self.h = 4096 12 | self.w = 4096 13 | self.data = None 14 | self.npfn = 'rotate.npy' 15 | 16 | # Currently hard coded 17 | def constructTransformMatrix(self): 18 | print('Constructing transform matrix...') 19 | h = self.h 20 | w = self.w 21 | xs = np.zeros((h, w * 2), 'float32') 22 | ys = np.zeros((h, w * 2), 'float32') 23 | cx = w // 2 24 | cy = h // 2 25 | rx = w // 2 26 | ry = h // 2 27 | # rotMat = np.asarray([[0, 0, -1], [0, 1, 0], [1, 0, 0]]) 28 | for lat in tqdm(range(h // 2)): 29 | for lon in range(w * 2): 30 | latF = (h - lat) / h * np.pi - np.pi / 2 31 | lonF = lon / w * np.pi 32 | #latF = 45 / 180 * np.pi 33 | #lonF = 180 / 180 * np.pi 34 | x = cos(latF) * cos(np.pi - lonF) 35 | y = cos(latF) * sin(np.pi - lonF) 36 | z = sin(latF) 37 | #print() 38 | #print(x, y, z) 39 | x, y, z = x, z, -y 40 | #print(x, y, z) 41 | oldLatF = asin(z) 42 | oldLonF = np.pi - atan2(y, x) 43 | #print(latF, lonF, oldLatF, oldLonF) 44 | #import pdb; pdb.set_trace() 45 | oldLat = h - (oldLatF + np.pi / 2) / np.pi * h 46 | oldLon = oldLonF / np.pi * w 47 | xs[lat, lon] = oldLon 48 | ys[lat, lon] = oldLat 49 | self.data = [xs, ys] 50 | np.save(self.npfn, self.data) 51 | 52 | def loadOrConstructTransformMatrix(self, forceReconstruct=False): 53 | if exists(self.npfn) and not forceReconstruct: 54 | self.data = np.load(self.npfn) 55 | else: 56 | self.constructTransformMatrix() 57 | 58 | def transformOneImage(self, img): 59 | h, w, c = img.shape 60 | assert(h == self.h) 61 | assert(w == self.w) 62 | if self.data is None: 63 | self.loadOrConstructTransformMatrix() 64 | img2 = cv2.remap( 65 | img, self.data[0], self.data[1], 66 | interpolation=cv2.INTER_LINEAR, 67 | borderMode=cv2.BORDER_REFLECT 68 | ) 69 | img2[h // 2:, ...] = 0 70 | return img2 71 | 72 | def transform(self, img): 73 | #img = cv2.imread(imgfn) 74 | h, w, c = img.shape 75 | assert(h == self.h) 76 | assert(w == self.w * 2) 77 | leftImg = img[:, :w // 2, :] 78 | rightImg = img[:, w // 2:, :] 79 | leftImg = self.transformOneImage(leftImg) 80 | rightImg = self.transformOneImage(rightImg) 81 | newimg = np.vstack((leftImg, rightImg)) 82 | return newimg 83 | 84 | 85 | if __name__ == '__main__': 86 | obj = Rotator() 87 | img = obj.transform('./IMG_9649/00001.png') 88 | cv2.imwrite('tmp.png', img) 89 | -------------------------------------------------------------------------------- /VideoCombiner.py: -------------------------------------------------------------------------------- 1 | from glob import glob 2 | from os.path import join 3 | from subprocess import Popen 4 | from sys import exit 5 | 6 | TOPAZ_BIN = r"C:\Program Files\Topaz Labs LLC\Topaz Sharpen AI\Topaz Sharpen AI.exe" 7 | TOPAZ_INSTANCE = 8 8 | WORK_DIRS = [ 9 | f'./IMG_{i}' 10 | for i in [4055, 4056, 4058, 4059, 4060, 4061, 4065, 4066, 4068, 4069, 4070, 4075] 11 | ] 12 | OUT_FNS = [ 13 | f'{d}_VR_Enlarge.mp4' 14 | for d in WORK_DIRS 15 | ] 16 | 17 | if __name__ == '__main__': 18 | # Invoke Topaz Sharpen AI to enhance clarity 19 | # Use TOPAZ_INSTANCE instances by default 20 | fns = [] 21 | for d in WORK_DIRS: 22 | fns += glob(join(d, '*.png')) 23 | n = len(fns) 24 | batchSize = n // TOPAZ_INSTANCE 25 | fnsForEachInstance = [fns[batchSize * i: batchSize * (i + 1)] for i in range(TOPAZ_INSTANCE)] 26 | if n > batchSize * TOPAZ_INSTANCE - 1: 27 | fnsForEachInstance[-1].extend(fns[batchSize * TOPAZ_INSTANCE:]) 28 | print([len(f) for f in fnsForEachInstance]) 29 | isContinue = input('Continue (y/n)?') 30 | if isContinue != 'y': 31 | exit(1) 32 | processes = [] 33 | for f in fnsForEachInstance: 34 | command = [TOPAZ_BIN] + f 35 | processes.append(Popen(command)) 36 | for p in processes: 37 | p.wait() 38 | 39 | # Invoke ffmpeg to recombine the video 40 | # In this case, SharpenAI was launched in plugin mode, and will overwrite the original files 41 | # So we don't need to do any renaming 42 | processes = [] 43 | for workdir, outfn in zip(WORK_DIRS, OUT_FNS): 44 | command = ['ffmpeg', '-r', '30', '-i', f'{workdir}/%5d.png', '-i', f'{workdir}_audio.aac', '-c:v', 'libx264', '-vf', 'scale=8192x4096', '-preset', 'fast', '-crf', '18', '-x264-params', 'mvrange=511', '-maxrate', '100M', '-bufsize', '25M', '-pix_fmt', 'yuv420p', '-c:a', 'aac', '-b:a', '160k', '-movflags', 'faststart', outfn] 45 | p = Popen(command) 46 | processes.append(p) 47 | p.wait() 48 | -------------------------------------------------------------------------------- /enableJpgs.py: -------------------------------------------------------------------------------- 1 | from glob import glob 2 | from os.path import join, basename 3 | from os import system 4 | import argparse 5 | 6 | # Note the horizontal correction is turned on by default. 7 | XML_TEMPLATE = """ 8 | 1.0CanonYOUR_CAMERA_SNCanon EOS R5Firmware Version 1.5.0RF5.2mm F2.8 L DUAL FISHEYEON0ON000""" 9 | 10 | def enableJpgs(jpgDir, rawDir): 11 | jpgFns = glob(f'{jpgDir}/*.jpg') 12 | for fn in jpgFns: 13 | rawJpgFn = join(rawDir, basename(fn)) 14 | cmd = f'exiftool -tagsfromfile {rawJpgFn} {fn}' 15 | system(cmd) 16 | xmlFn = fn + '.xml' 17 | open(xmlFn, 'w').write(XML_TEMPLATE) 18 | 19 | if __name__ == '__main__': 20 | parser = argparse.ArgumentParser(description='A small utility to allow Canon EOS VR Utility to indirectly process RAW files, and turn on horizontal correction by default.') 21 | parser.add_argument('--jpgdir', help='Directory holding the jpg files, which could be derived from RAW.', required=True) 22 | parser.add_argument('--rawdir', help='Directory holding the original jpg files from the camera body. We will copy the EXIF info from these files to the corresponding files.', required=True) 23 | args = parser.parse_args() 24 | enableJpgs(args.jpgdir, args.rawdir) 25 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | tqdm 3 | --------------------------------------------------------------------------------