├── .circleci └── config.yml ├── .gitattributes ├── .gitignore ├── Blender_Imagery ├── CORE3D_Perspective_Imagery.py ├── README.md └── init.py ├── Dockerfile ├── MANIFEST.in ├── README.md ├── aoi-example ├── README.md ├── aoi-example.config └── aoi-example.json ├── core3dmetrics ├── __init__.py ├── __main__.py ├── geometrics │ ├── __init__.py │ ├── ang.py │ ├── config.py │ ├── config_schema.json │ ├── image.py │ ├── image_pair_plot.py │ ├── metrics_util.py │ ├── objectwise_metrics.py │ ├── plot.py │ ├── registration.py │ ├── relative_accuracy_metrics.py │ ├── terrain_accuracy_metrics.py │ ├── threshold_geometry_metrics.py │ └── threshold_material_metrics.py ├── instancemetrics │ ├── Building_Classes.py │ ├── GeospatialConversions.py │ ├── MetricsCalculator.py │ ├── MetricsContainer.py │ ├── TileEvaluator.py │ ├── __init__.py │ └── instance_metrics.py ├── run_geometrics.py └── summarize_metrics.py ├── docker-compose.yml ├── entrypoint.bsh ├── rj_functions.m ├── setup.py ├── test ├── __init__.py └── test_geometry_metrics.py └── utils ├── OrthoImage.py ├── __init__.py └── align3d.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | working_directory: ~/repo 5 | docker: 6 | - image: docker:18.03.0-ce-git 7 | steps: 8 | 9 | - checkout 10 | 11 | - setup_remote_docker 12 | 13 | - run: 14 | name: Install software 15 | command: | 16 | apk update 17 | apk add py-pip 18 | pip install docker-compose 19 | docker-compose --version 20 | docker ps 21 | 22 | - run: 23 | name: Smuggle repo to remote docker 24 | command: tar zc --exclude .git . | docker run -i -v /root/repo:/repo -w /repo alpine:3.6 tar zx 25 | 26 | - run: 27 | name: Build all dockers 28 | command: docker-compose build 29 | 30 | - run: 31 | name: Unit tests 32 | command: docker-compose run --rm geometrics_develop test 33 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | Dockerfile merge=ours 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.csv 2 | .idea/ 3 | __pycache__/ 4 | .venv/ 5 | private_data/ 6 | build/* 7 | *.egg* 8 | *~ 9 | *.pptx 10 | *.ppt 11 | dist/* 12 | /Notes 13 | reporting/ 14 | *.cer 15 | -------------------------------------------------------------------------------- /Blender_Imagery/CORE3D_Perspective_Imagery.py: -------------------------------------------------------------------------------- 1 | ################################################################################# 2 | # IARPA-CORE3D Blender Work 3 | 4 | # This script takes a perspective image of an object, where the camera elevation angle, focal length, and radial distance to the object are controlled by input parameters 5 | # View README for information on command line arguments required 6 | 7 | # Dependency: pip install bpy-cuda && bpy_post_install 8 | 9 | # Author: Erika Rashka, JHU APL, July 2020 10 | # References: 11 | # diffuse_to_emissive() function (and all function dependencies): authored by Robert H. Forsman Jr. on Blender Stack exchange, source code from: https://blender.stackexchange.com/questions/79595/change-diffuse-shader-to-emission-shader-without-affecting-shader-color 12 | # Changes made by Erika Rashka and Shea Hagstrom 13 | 14 | ################################################################################ 15 | 16 | 17 | ############################################ 18 | # Imports and functions 19 | ############################################ 20 | 21 | import bpy 22 | import os 23 | import math 24 | from math import radians 25 | from mathutils import Vector 26 | import sys, getopt 27 | from math import radians, degrees 28 | from pathlib import Path 29 | 30 | import geojson 31 | 32 | 33 | def render_on_gpu(): 34 | scene = bpy.data.scenes["Scene"] 35 | bpy.context.scene.cycles.device = 'GPU' 36 | for scene in bpy.data.scenes: 37 | scene.cycles.device = 'GPU' 38 | scene.render.resolution_percentage = 100 39 | bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA' 40 | for devices in bpy.context.preferences.addons['cycles'].preferences.get_devices(): 41 | for d in devices: 42 | d.use = True 43 | if d.type == 'CPU': 44 | d.use = False 45 | 46 | def replace_with_emission(node, node_tree): 47 | new_node = node_tree.nodes.new('ShaderNodeEmission') 48 | connected_sockets_out = [] 49 | sock = node.inputs[0] 50 | if len(sock.links)>0: 51 | color_link = sock.links[0].from_socket 52 | else: 53 | color_link=None 54 | defaults_in = sock.default_value[:] 55 | 56 | for sock in node.outputs: 57 | if len(sock.links)>0: 58 | connected_sockets_out.append( sock.links[0].to_socket) 59 | else: 60 | connected_sockets_out.append(None) 61 | 62 | new_node.location = (node.location.x, node.location.y) 63 | 64 | if color_link is not None: 65 | node_tree.links.new(new_node.inputs[0], color_link) 66 | new_node.inputs[0].default_value = defaults_in 67 | 68 | if connected_sockets_out[0] is not None: 69 | node_tree.links.new(connected_sockets_out[0], new_node.outputs[0]) 70 | 71 | def material_diffuse_to_emission(mat): 72 | doomed=[] 73 | if mat.use_nodes: 74 | for node in mat.node_tree.nodes: 75 | if (node.type=='BSDF_PRINCIPLED') or (node.type=='BSDF_DIFFUSE'): 76 | replace_with_emission(node, mat.node_tree) 77 | doomed.append(node) 78 | for node in doomed: 79 | mat.node_tree.nodes.remove(node) 80 | 81 | def replace_on_selected_objects(): 82 | mats = set() 83 | for obj in bpy.context.scene.objects: 84 | if obj.select_get(): 85 | for slot in obj.material_slots: 86 | mats.add(slot.material) 87 | for mat in mats: 88 | material_diffuse_to_emission(mat) 89 | 90 | def replace_in_all_materials(): 91 | for mat in bpy.data.materials: 92 | material_diffuse_to_emission(mat) 93 | 94 | def diffuse_to_emissive(): 95 | if False: 96 | replace_on_selected_objects() 97 | else: 98 | replace_in_all_materials() 99 | 100 | def rotate_and_render(cam, empty, output_dir, output_file_format, rotation_steps, rotation_angle, elev_ang, radius): 101 | theta = (math.pi/2)- radians(elev_ang) #degrees off nadir 102 | phi = radians(rotation_angle)/ rotation_steps #azimuth step 103 | 104 | # #if N = 4, this creates the N S E and W renders 105 | # for step in range(0, rotation_steps): 106 | # azimuth = step*phi 107 | # cam.location[0] = empty.location[0] + (radius * math.sin(theta) * math.cos(azimuth)) 108 | # cam.location[1] = empty.location[1] + (radius * math.sin(theta) * math.sin(azimuth)) 109 | # cam.location[2] = empty.location[2] + (radius * math.cos(theta)) 110 | # bpy.context.scene.render.filepath = output_dir + (output_file_format % step) 111 | # bpy.ops.render.render(write_still=True, use_viewport=True) 112 | #Nadir render 113 | cam.location[0] = empty.location[0] 114 | cam.location[1] = empty.location[1] 115 | cam.location[2] = empty.location[2] + radius 116 | bpy.context.scene.render.filepath = output_dir + (output_file_format % rotation_steps) 117 | bpy.ops.render.render(write_still=True, use_viewport=True) 118 | 119 | def read_in_args(argumentlist): 120 | 121 | path = '' 122 | gsd = 0.5 #default GSD 123 | x1 = 0.0 124 | x2 = 0.0 125 | y1 = 0.0 126 | y2 = 0.0 127 | N = 1 128 | elev_ang = 360.0 129 | f_length = 0.0 130 | radius = 0.0 131 | test = 0.0 132 | z_up = True #default import without specifying +Z up 133 | 134 | 135 | #Options 136 | options = "hp:g:x:y:X:Y:z:N:e:f:r:" 137 | 138 | #Long options 139 | long_options = ["path=","gsd=", "x=", "y=", "X=", "Y=", "z=", "N=", "e=", "f=", "r="] 140 | 141 | try: 142 | opts, args = getopt.getopt(argumentlist,options,long_options) 143 | for opt, arg in opts: 144 | if opt == '-h': 145 | print('test.py -- -p -g -x -y -X -Y -z <+z up?> -N -e -f -r ') 146 | sys.exit() 147 | elif opt in ("-p", "--path"): 148 | path = arg 149 | elif opt in ("-g", "--gsd"): 150 | gsd = arg 151 | elif opt in ("-x", "--x"): 152 | x1 = arg 153 | elif opt in ("-y", "--y"): 154 | y1 = arg 155 | elif opt in ("-X", "--X"): 156 | x2 = arg 157 | elif opt in ("-Y", "--Y"): 158 | y2 = arg 159 | elif opt in ("-z", "--z"): 160 | z_up = arg.lower() == 'true' # Z_up will save as bool True if string matches 161 | elif opt in ("-N", "--N"): 162 | N = arg 163 | elif opt in ("-e", "--e"): 164 | elev_ang = arg 165 | elif opt in ("-f", "--f"): 166 | f_length = float(arg) 167 | elif opt in ("-r", "--r"): 168 | radius = arg 169 | 170 | 171 | except getopt.error as err: 172 | print('test.py -- -p -g -x -y -X -Y -z <+z up?> -N -e -f -r ') 173 | print('Please add in a filepath directory (string), GSD value (float), coordinates startpoint(x,y) and endpoint (X,Y) (float), a boolean indicator if you want to load file with +Z up loaded, the number of frames (N), the elevation angle (e), the camera focal length in mm(f), and the range(r) from the center of AOI') 174 | print(str(err)) 175 | sys.exit(2) 176 | 177 | 178 | return str(path), float(gsd), float(x1), float(y1), float(x2), float(y2), bool(z_up), int(N), float(elev_ang), float(f_length), float(radius), float(test) 179 | 180 | 181 | def generate_blender_images(path, gsd=1.0, z_up=True, N=0, elev_ang=60.0, f_length=30.0, radius=8000.0, savepath=''): 182 | global cam 183 | ##### 184 | # Get tile location information 185 | ##### 186 | tile_num = 0 187 | tile_centers_x = [] 188 | tile_centers_y = [] 189 | 190 | substring = "BoundingBox.geojson" 191 | for root, dirs, files in os.walk(path): 192 | for file in files: 193 | file_path = os.path.join(root, file) 194 | if substring in file: #file.contains(".geojson"): #metadata file: BoundingBox.geojson in subdirectories 195 | tile_num = tile_num + 1 196 | with open(file_path) as f: 197 | gj = geojson.load(f) 198 | features = gj['features'][0]['geometry']['coordinates'][0] #Array of sub arrays, number of x,y coordinate pairs 199 | 200 | x_max = features[0][0] 201 | y_max = features[0][1] 202 | x_min = x_max 203 | y_min = y_max 204 | #for index, xy_pair in enumerate(features): 205 | for xy_pair in features: 206 | if xy_pair[0] > x_max: 207 | x_max = xy_pair[0] 208 | elif xy_pair[0] < x_min: 209 | x_min = xy_pair[0] 210 | if xy_pair[1] > y_max: 211 | y_max = xy_pair[1] 212 | elif xy_pair[1] < y_min: 213 | y_min = xy_pair[1] 214 | new_center_x = ((x_max - x_min)/2) + x_min 215 | new_center_y = ((y_max - y_min) / 2) + y_min 216 | tile_centers_x.append(new_center_x) 217 | tile_centers_y.append(new_center_y) 218 | 219 | # delete all objects in scene 220 | bpy.ops.object.select_all(action='SELECT') 221 | bpy.ops.object.delete(use_global=False) 222 | 223 | # Create file_list to be aware of all potential models in path directory 224 | file_list = [] 225 | #check = os.path.isdir(path) 226 | 227 | for root, dirs, files in os.walk(path): 228 | for file in files: 229 | file_path = os.path.join(root, file) 230 | if file.endswith(".obj"): # OBJ model 231 | file_list.append(file_path) 232 | 233 | elif file.endswith(".glb"): # glTF 2.0 model (2.0 only supported by Blender) 234 | file_list.append(file_path) 235 | 236 | elif file.endswith(".dae"): # Collada tile 237 | file_list.append(file_path) 238 | num_models = len(file_list) 239 | print("Number of models found: " + str(num_models)) 240 | 241 | if num_models == 0: 242 | print( 243 | "ERROR: Missing .obj, .glb, or .dae file. Please add path to one or more of these files. \n") 244 | sys.exit(1) 245 | 246 | n = 0 247 | obj_tiles = False 248 | 249 | for file_path in file_list: # [:40]: 250 | # load models of interest 251 | if file_path.find('combined.obj') > 0: 252 | print('loading OBJ model...') 253 | obj_tiles = True 254 | if z_up: 255 | bpy.ops.import_scene.obj(filepath=file_path, axis_forward='Y', axis_up='Z') 256 | else: 257 | bpy.ops.import_scene.obj(filepath=file_path) 258 | n = n + 1 259 | # define the model as the currently selected object (last loaded part) 260 | model = bpy.context.selected_objects[0] 261 | 262 | # Data handling 263 | if tile_num == 0 and num_models > 1: 264 | print(num_models) 265 | print( 266 | "ERROR: No tile metadata files found, please add the file BoundingBox.geojson to directory path of each tile.\n") 267 | sys.exit(1) 268 | if tile_num < n and num_models > 1: 269 | print( 270 | "ERROR: Missing tile metadata for one or more tiles, please add the file BoundingBox.geojson to directory path of each tile. \n") 271 | sys.exit(1) 272 | 273 | # Move the tiles to the proper location 274 | if num_models > 1: 275 | model.location[0] = tile_centers_x[n - 1] # tile_bb_center_x 276 | model.location[1] = tile_centers_y[n - 1] # tile_bb_center_y 277 | 278 | elif file_path.find('.glb') > 0: 279 | # only supports glTF 2.0 280 | print('loading glTF 2.0 model...') 281 | bpy.ops.import_scene.gltf(filepath=file_path) 282 | elif file_path.find('.dae') > 0: 283 | print('loading Collada model...') 284 | bpy.ops.wm.collada_import(filepath=file_path) 285 | # if heavy importing needed- option below to save .blend file after each tile is loaded 286 | # bpy.ops.wm.save_as_mainfile(filepath=path + "all_collada_tiles.blend") 287 | 288 | # define the model as the currently selected object (last loaded part) 289 | model = bpy.context.selected_objects[0] 290 | # define image size and factor for scaling focal length to match 291 | if obj_tiles and num_models > 1: 292 | pixels = int(max(model.dimensions*(tile_num/1.9)) / gsd) 293 | else: 294 | pixels = int(max(model.dimensions) / gsd) 295 | 296 | half_width_meters = (pixels / 2) * gsd * 1.1 297 | lens_factor = radius / half_width_meters 298 | # force object rotation to zero 299 | model.rotation_euler[0] = 0 300 | model.rotation_euler[1] = 0 301 | model.rotation_euler[2] = 0 302 | # Get coordinates of the center of the object bounding box 303 | # Code snippet below for bounding box center coordinates pulled from: 304 | # https://blender.stackexchange.com/questions/62040/get-center-of-geometry-of-an-object?noredirect=1&lq=1 305 | local_bbox_center = 0.125 * sum((Vector(b) for b in model.bound_box), Vector()) 306 | global_bbox_center = model.matrix_world @ local_bbox_center 307 | print('Bounding box: ', global_bbox_center) 308 | 309 | # determine the global bounding box centroid that includes all models if multiple were loaded 310 | global_bbox_center_allmod = Vector((0.0, 0.0, 0.0)) 311 | print(bpy.context.scene.objects) 312 | if num_models > 1: 313 | for obj in bpy.context.scene.objects: 314 | if obj.type == 'MESH': 315 | print('new object') 316 | print(obj.name) 317 | local_center = 0.125 * sum((Vector(b) for b in obj.bound_box), Vector()) 318 | global_center = model.matrix_world @ local_bbox_center 319 | print(global_center) 320 | print(obj.dimensions) 321 | global_bbox_center_allmod[0] += global_center[0] 322 | global_bbox_center_allmod[1] += global_center[1] 323 | global_bbox_center_allmod[2] += global_center[2] 324 | global_bbox_center_allmod = global_bbox_center_allmod / num_models; 325 | print('Bounding box for all models: ', global_bbox_center_allmod) 326 | 327 | #if multiple models are loaded in, adjust the coordinates 328 | if num_models > 1: 329 | if obj_tiles: 330 | global_x = 0 331 | global_y = 0 332 | for i in range(tile_num): 333 | global_x = global_x + global_bbox_center_allmod[0] + tile_centers_x[i] 334 | global_y = global_y + global_bbox_center_allmod[1] + tile_centers_y[i] 335 | global_model_center_x = global_x / tile_num 336 | global_model_center_y = global_y / tile_num 337 | 338 | center_x = global_model_center_x #sum(tile_centers_x)/len(tile_centers_x) #center coordinates that is the centroid of all model centers 339 | center_y = global_model_center_y #sum(tile_centers_y)/len(tile_centers_y) 340 | center_z = global_bbox_center_allmod[2] 341 | else: 342 | center_x = global_bbox_center_allmod[0] #center coordinates that is the centroid of all model centers 343 | center_y = global_bbox_center_allmod[1] 344 | center_z = global_bbox_center_allmod[2] 345 | else: 346 | center_x = global_bbox_center[0] # center coordinates for first object selected 347 | center_y = global_bbox_center[1] 348 | center_z = global_bbox_center[2] 349 | 350 | # Create camera based on coordinate system setup 351 | if z_up: 352 | print('option 1 - z is up') 353 | camx = center_x 354 | camy = center_y 355 | else: 356 | print('option 2 - z is down') 357 | camx = center_x 358 | camy = center_z 359 | # add a camera just above the scene and rotate_and_render() changes the location 360 | bpy.ops.object.camera_add(align='VIEW', location=(camx, camy, 0)) 361 | cam = bpy.context.selected_objects[0] 362 | print('CAMX = ', camx) 363 | print('CAMY = ', camy) 364 | # Change focal length of camera 365 | cam.data.type = 'PERSP' 366 | cam.data.lens_unit = 'MILLIMETERS' 367 | cam.data.lens = 1 # f_length 368 | cam.data.sensor_width = 2 # * bpy.context.object.data.lens 369 | cam.data.sensor_height = 2 # * bpy.context.object.data.lens 370 | cam.data.lens *= lens_factor 371 | cam.data.clip_end = radius * 1000 372 | cam.data.clip_start = radius / 1000 373 | 374 | # define scene for rendering 375 | scene = bpy.context.scene 376 | scene.camera = cam 377 | scene.render.resolution_x = pixels 378 | scene.render.resolution_y = pixels 379 | scene.render.resolution_percentage = 100 380 | print('Resolution of image: (Y,X): ', pixels, pixels) 381 | # set cycles rendering options 382 | scene.render.engine = 'CYCLES' 383 | scene.cycles.samples = 8 384 | render_on_gpu() 385 | # set blender background color 386 | world = bpy.data.worlds['World'] 387 | world.use_nodes = True 388 | bg = world.node_tree.nodes['Background'] 389 | bg.inputs[0].default_value[:3] = (float(153) / 255, float(204) / 255, float(255) / 255) 390 | bg.inputs[1].default_value = 1.0 391 | # add locked track to camera (use empty) 392 | # see example of how-to here: https://www.youtube.com/watch?v=ageV_llb0Hk 393 | # add empty object to use for tracking and center at center of scene with zero z 394 | if z_up: 395 | # Create an empty at combined.obj location 396 | bpy.ops.object.add(type='EMPTY') 397 | empty = bpy.context.selected_objects[0] 398 | empty.location[0] = center_x 399 | empty.location[1] = center_y # global_bbox_center[2] 400 | empty.location[2] = 0 # (-1 * global_bbox_center[1]) 401 | else: 402 | # Create an empty at combined.obj location 403 | bpy.ops.object.add(type='EMPTY') 404 | empty = bpy.context.selected_objects[0] 405 | empty.location[0] = center_x 406 | empty.location[1] = center_z # global_bbox_center[2] 407 | empty.location[2] = 0 # (-1 * global_bbox_center[1]) 408 | bpy.data.objects['Camera'].select_set(True) 409 | bpy.context.view_layer.objects.active = bpy.data.objects['Camera'] 410 | bpy.ops.object.constraint_add(type='TRACK_TO') 411 | bpy.context.object.constraints["Track To"].target = empty 412 | bpy.context.object.constraints["Track To"].track_axis = 'TRACK_NEGATIVE_Z' 413 | bpy.context.object.constraints["Track To"].up_axis = 'UP_Y' 414 | 415 | # Run emissive code to render, on all selected objects 416 | objects = bpy.context.scene.objects 417 | for obj in objects: 418 | obj.select_set(obj.type == "MESH") 419 | 420 | diffuse_to_emissive() 421 | # Write the images 422 | imgname = 'CHECK_persp_image_' + str(pixels) + '_' + str(pixels) + '_gsd' + str(gsd) + '_z_' + str(z_up) + '_N_' + str( 423 | 424 | N) + '_elev_' + str(elev_ang) + '_flen_' + str(f_length) + '_radius_' + str(radius) + '_' 425 | savepath = str(Path(savepath, imgname).absolute()) 426 | print(savepath) 427 | rotate_and_render(cam, empty, savepath, 'render%d.png', int(N), 360.0, elev_ang, radius) 428 | return savepath 429 | # Note: There is an exception upon termination in engine.free() call in cycles\__init__.py 430 | # This is a known issue: https://developer.blender.org/T52203 431 | 432 | 433 | if __name__ == '__main__': 434 | 435 | #argv = sys.argv[1:] 436 | argv = sys.argv 437 | argv = argv[argv.index("--") + 1:] # get all args after "--" 438 | total = len(sys.argv) 439 | cmdargs = str(sys.argv) 440 | 441 | #print("The total numbers of args passed to the script: %d " % total) 442 | #print("Args list: %s " % cmdargs) 443 | 444 | # redirect stdout to stderr 445 | # in command line, pipe stdout to nul: python name.py 1> nul 446 | # the rendering engine writes a lot of status messages to stdout we don't want to see 447 | sys.stdout = sys.stderr 448 | 449 | path, gsd, x1, y1, x2, y2, z_up, N, elev_ang, f_length, radius, test = read_in_args(argv) 450 | if y1 != 0.0 and y2 != 0.0: 451 | y1 = -y1 #Ensure y inputs can be + 452 | y2 = -y2 453 | print(gsd, x1, y1, x2, y2, z_up, N, elev_ang, f_length, radius, test) 454 | print(path) 455 | 456 | file_dir = os.path.dirname(path) 457 | savepath = str(Path(file_dir, 'rendered_images').absolute()) 458 | generate_blender_images(path, gsd, z_up, N, elev_ang, f_length, radius, savepath) 459 | 460 | -------------------------------------------------------------------------------- /Blender_Imagery/README.md: -------------------------------------------------------------------------------- 1 | # Blender Rendered Images at Unique Perspective Angles 2 | 3 | Saves Blender rendered images of .OBJ files based on user input of desired orthographic or perspective imagery and relevant input parameters. Orthographic overhead images can be specified with unique areas of interest (AOI) in xy coordinates. Perspective images can also be created, and additional command line arguments such as focal length, elevation angle, radial distance, and number of orbital viewpoints can be specified. 4 | 5 | ## Getting Started 6 | 7 | These instructions will show you how to run Blender in the background and have the relevant python scripts run within Blender via the command line. There are examples provided below to demsontrate the proper command line arguments to be passed in and their required data types. 8 | 9 | ### Prerequisites 10 | 11 | You will need Blender 2.79b (or another compatible Blender version). 12 | 13 | Model assumptions: 14 | Typically when loading .OBJ files into Blender, one is required to change from a Y up coordinate system to Z up coordinate system. The .OBJ files typically treat Y as up, therefore the user has the option to have Blender recognize the coordinate system change during import. To do this, change the -z parameter to True. If this parameter is set to False, the .OBJ model will not be reoriented such that +Z is considered up, but instead the coordinate system for all translations and calculations is adjusted separately to treat +Z as up and provide reasonable imagery. 15 | For more information on coordinate system requirements, see [Blender Wavefront OBJ](https://docs.blender.org/manual/en/2.80/addons/io_scene_obj.html "Blender Wavefront OBJ"). 16 | 17 | 18 | ## Examples 19 | Input arguments for camera control parameters are specified as follows: 20 | 21 | | Input Details | Data Type | 22 | | ------ | ------ | 23 | | -p: Path to the .obj file location | (float) | 24 | | -g: Desired pixel Ground Sample Distance (GSD) | (float) | 25 | | -x: Coordinate x1 of bounding box around AOI | (float) | 26 | | -y: Coordinate y1 of bounding box around AOI | (float) | 27 | | -X: Coordinate x2 of bounding box around AOI | (float) | 28 | | -Y: Coordinate y2 of bounding box around AOI | (float) | 29 | | -z: Boolean specifier for loading in the .obj file with coordinate system oriented +Z up (True) or no specification (False) | (bool) | 30 | | -N: Number of orbital locations to take images at (360/N is the rotation around the z axis), and therefore the number of images saved. Also known as frame # | (int) | 31 | | -e: Camera elevation angle (measured from xy plane) (deg) | (float) | 32 | | -f: Camera focal length (mm) | (float) | 33 | | -r: Camera radial distance from center of object bounding box | (float) | 34 | 35 | Below examples are written for Windows, for other OS please use command line equivalent 36 | 37 | ``` 38 | #From the command line, navigate to the Blender folder on your pc 39 | cd C:\Program Files\Blender Foundation\Blender 40 | ``` 41 | 42 | ``` 43 | #Run CORE3D_Overhead_Imagery.py to save overhead orthographic images 44 | #Note: 45 | #The first directory specified should be the saved location of CORE3D_Overhead_Imagery.py 46 | #The second directory specified should be the path where the .obj file is saved. 47 | #If you use -h for help, Blender -help options will be provided 48 | 49 | blender --background --python C:\Your_Directory\CORE3D_Overhead_Imagery.py -- -p "C:/Users/Your_OBJ_Directory/example.obj" -g 1.0 -x 200.0 -y 200.0 -X 400.0 -Y 400.0 -z False 50 | ``` 51 | 52 | ``` 53 | #Run CORE3D_Perspective_Imagery.py to save perspective images 54 | #Note: 55 | #Perspective imagery code does not allow specification of AOI bounding box, however this can be controlled via camera focal length adjustments 56 | #The first directory specified should be the saved location of CORE3D_Perspective_Imagery.py 57 | #The second directory specified should be the path where the .obj file is saved. 58 | #If you use -h for help, Blender -help options will be provided 59 | 60 | blender --background --python C:\Your_Directory\CORE3D_Perpsective_Imagery.py -- -p "C:/Users/Your_OBJ_Directory/example.obj" -g 1.0 -z False -N 4 -e 60.0 -f 30.0 -r 1800.0 61 | ``` 62 | 63 | ``` 64 | #General command line formatting 65 | test.py -- -p -g -x -y -X -Y -z <+z up?> -N -e -f -r ' 66 | ``` 67 | -------------------------------------------------------------------------------- /Blender_Imagery/init.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pubgeo/core3d-metrics/fcda0d56869f8b2b3c506a3b9601b2c6ab491617/Blender_Imagery/init.py -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jhuapl/pubgeo:latest 2 | 3 | RUN apt update && apt upgrade -y && \ 4 | DEBIAN_FRONTEND=noninteractive apt install -y --fix-missing --no-install-recommends \ 5 | git \ 6 | build-essential \ 7 | libglib2.0-0 \ 8 | libsm6 \ 9 | python3 \ 10 | python3-dev \ 11 | python3-pip \ 12 | python3-gdal \ 13 | python3-tk \ 14 | python3-scipy 15 | 16 | RUN apt autoremove -y && rm -rf /var/lib/apt/lists/* 17 | RUN pip3 install future-fstrings 18 | RUN pip3 install wheel 19 | RUN pip3 install "matplotlib==3.0.3" laspy setuptools "jsonschema==2.6.0" "numpy==1.16.2" "opencv-python==4.0.0.21" "Pillow" simplekml tqdm "mathutils==2.81.2" 20 | WORKDIR / 21 | 22 | ARG DOCKER_DEPLOY=true 23 | ENV DOCKER_DEPLOY=$DOCKER_DEPLOY 24 | RUN if [ "$DOCKER_DEPLOY" = true ] ; then \ 25 | pip3 install --no-deps git+https://github.com/pubgeo/core3d-metrics; \ 26 | fi 27 | 28 | RUN apt purge -y \ 29 | git 30 | 31 | ADD entrypoint.bsh / 32 | RUN chmod 755 /entrypoint.bsh 33 | ENTRYPOINT ["/entrypoint.bsh"] 34 | 35 | CMD ["echo", "Please run GeoMetrics with a valid AOI configuration", \ 36 | "\ndocker run --rm -v /home/ubuntu/data:/data jhuapl/geometrics core3dmetrics -c "] 37 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include core3dmetrics/geometrics/config_schema.json 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## core3d-metrics 2 | JHU/APL supported the IARPA CORE3D program by providing independent test and evaluation of the performer team solutions for building 3D models based on satellite images and other sources. Metric evaluation code was maintained here for transparency and to enable collaboration for improvements with performer teams. Legacy MATLAB code is also now archived here for reference. None of this code is being actively maintained at this time. 3 | 4 | Preliminary metrics are described in the following paper: 5 | 6 | M. Bosch, A. Leichtman, D. Chilcott, H. Goldberg, M. Brown. “Metric Evaluation Pipeline for 3D Modeling of Urban Scenes”, ISPRS Archives, 2017 [pdf](https://www.int-arch-photogramm-remote-sens-spatial-inf-sci.net/XLII-1-W1/239/2017/isprs-archives-XLII-1-W1-239-2017.pdf). 7 | 8 | ### Requirements 9 | The following python3 libraries (and their dependencies) are required: 10 | 11 | * gdal 12 | * laspy 13 | * matplotlib 14 | * numpy 15 | * scipy 16 | * tk 17 | 18 | Alternatively, you can use the provided docker [container](Dockerfile). 19 | 20 | ### Installation 21 | Recommend: use a [virtual environment](https://docs.python.org/3/tutorial/venv.html) 22 | 23 | python3 setup.py install 24 | python3 setup.py install --prefix=$MY_ROOT 25 | 26 | ### Usage 27 | If installed 28 | 29 | # from command line 30 | core3d-metrics --help 31 | core3d-metrics -c 32 | python3 -m core3dmetrics -c 33 | 34 | # in use code: 35 | import core3dmetrics.geometrics as geo 36 | geo.registration.align3d(reference_filename, test_filename) 37 | core3dmetrics.main(['--help"]) 38 | 39 | If not installed 40 | 41 | cd core3dmetrics 42 | python3 run_geometrics.py -c [-o -r -t ] 43 | 44 | One of the first steps is to align your dataset to the ground truth. This is performed using pubgeo's [ALIGN3D](https://github.com/pubgeo/pubgeo/#align3d) algorithm. 45 | The algorithm then calculates metrics for 2D, 3D, and spectral classification against the ground truth. 46 | 47 | ###### Usage Statement 48 | usage: core3dmetrics [-h] -c [-r] [-t] [-o] [--align | --no-align] [--test-ignore] [--save-plots | --skip-save-plots] [--save-aligned] 49 | core3dmetrics entry point 50 | optional arguments: 51 | -h, --help show this help message and exit 52 | -c , --config Configuration file 53 | -r , --reference Reference data folder 54 | -t , --test Test data folder 55 | -o , --output Output folder 56 | --align Enable alignment (default) 57 | --no-align Disable alignment 58 | --save-aligned Save aligned images as geoTIFF (not enabled by default) 59 | --test-ignore Enable NoDataValue pixels in test CLS image to be 60 | ignored during evaluation 61 | --save-plots Enable saving plots (overrides config file setting) 62 | --skip-save-plots Disable saving plots (overrides config file setting) 63 | 64 | #### Input 65 | _AOI Configuration_ is a configuration file using python's ConfigParser that is further described in [aoi-config.md](aoi-example/aoi-config.md). 66 | This configuration file defines which files to analyze and what to compare against (ground truth). Additionally the config is used to toggle various software settings. 67 | 68 | #### Example Output 69 | python3 -m core3dmetrics -c aoi.config 70 | This command would perform metric analysis on the test dataset provided by the aoi.config file. This analysis will also generate the following files (in place): 71 | * < test dataset >_metrics.json 72 | 73 | These files contain the determined metrics for completeness, correctness, f-score, Jaccard Index, Branching Factor, and the Align3d offsets. 74 | -------------------------------------------------------------------------------- /aoi-example/README.md: -------------------------------------------------------------------------------- 1 | This document accompanies the [AOI Example Configuration](aoi-example.config) to elaborate on proper configuration setup. 2 | 3 | The structure of the data follows closely to Vricon satellite imagery packages. 4 | 5 | # Reference Inputs 6 | This section is denoted by the \[INPUT.REF\] tag and is used to identify ground truth files to use for metrics analysis. 7 | 8 | #### DSMFilename 9 | Relative or absolute path to associated DSM (Digital Surface Model) GeoTIFF file - a DSM represents the first reflective surface 10 | #### DTMFilename 11 | Relative or absolute path to associated DTM (Digital Terrain Model) GeoTIFF file - a DTM represents the bare earth surface 12 | #### CLSFilename 13 | Relative or absolute path to associated CLS (landcover classification) GeoTIFF file - buildings and other man-made structures are labeled 14 | #### NDXFilename 15 | Relative or absolute path to associated NDX (unique index for each man-made structure) GeoTIFF file 16 | #### MTLFilename 17 | Relative or absolute path to associated MTL (material label) GeoTIFF file. This field is options. If not specified material metrics are not computed. 18 | #### CLSMatchValue 19 | Classification value for man-made structure type (e.g., building) to evaluate using the metrics. This is specified as an single value or array of arrays. 20 | 21 | # Test Inputs 22 | This section is denoted by the \[INPUT.TEST\] tag and is used to identify the test data set to be compared with the ground truth files. 23 | 24 | #### DSMFilename 25 | Relative or absolute path to associated DSM (Digital Surface Model) GeoTIFF file - a DSM represents the first reflective surface 26 | #### DTMFilename 27 | Relative or absolute path to associated DTM (Digital Terrain Model) GeoTIFF file - a DTM represents the bare earth surface. This field is optional. If not specified, \[INPUT.REF\]\[DTMFilename\] is used in it's place. 28 | #### CLSFilename 29 | Relative or absolute path to associated CLS (landcover classification) GeoTIFF file - buildings and other man-made structures are labeled 30 | #### MTLFilename 31 | Relative or absolute path to associated MTL (material label) GeoTIFF file. This field is options. If not specified material metrics are not computed. 32 | #### CLSMatchValue 33 | Classification value for man-made structure type (e.g., building) to evaluate using the metrics. This field is optional. Value defaults to \[INPUT.REF\]\[CLSMatchValue\]. 34 | 35 | # Options 36 | This section is denoted by the \[OPTIONS\] tag and is used to configure optional parameters for metric analysis. 37 | #### QuantizeHeight 38 | This boolean flag is used to turn height quantization on or off. Suggested default is 'True' 39 | #### TerrainZErrorThreshold 40 | Threshold value used to determine height error in terrain accuracy metrics 41 | # Plots 42 | This section is denoted by the \[PLOTS\] tag and is used to set options for drawing and saving visualization plots. 43 | #### ShowPlots 44 | Boolean flag to enable displaying plots. 45 | #### SavePlots 46 | Boolean flag to enable saving plots to file. ShowPlots does not need to be enabled to save plots. 47 | 48 | # Registration Executable Path 49 | This optional section is denoted by the \[REGEXEPATH\] tag and is used to locate executable files. By default, the application will search the $PATH variable for an align3d executable. 50 | #### Align3DPath 51 | Relative or absolute path to a custom pubgeo Align3d executable 52 | 53 | # Materials Reference 54 | This section is denoted by the \[MATERIALS.REF\] tag and is used to describe material labels 55 | #### MaterialIndices 56 | A comma separated list of integer indices to use for material labeling 57 | #### MaterialNames 58 | A comma separated list of strings labels associated with the material types 59 | #### MaterialIndicesToIgnore 60 | A comma separated list of integer indices to be ignored in metric analysis 61 | -------------------------------------------------------------------------------- /aoi-example/aoi-example.config: -------------------------------------------------------------------------------- 1 | [INPUT.REF] 2 | DSMFilename = /path_to/ground_truth-DSM.tif 3 | DTMFilename = /path_to/ground_truth-DTM.tif 4 | CLSFilename = /path_to/ground_truth-CLS.tif 5 | NDXFilename = /path_to/ground_truth-NDX.tif 6 | CLSMatchValue = [6,17] 7 | 8 | [INPUT.TEST] 9 | DSMFilename = /path_to/test_model-DSM.tif 10 | DTMFilename = /path_to/test_model-DTM.tif 11 | CLSFilename = /path_to/test_model-CLS.tif 12 | 13 | [OPTIONS] 14 | QuantizeHeight = false 15 | 16 | [PLOTS] 17 | ShowPlots = false 18 | SavePlots = true 19 | 20 | # This default works for docker image 21 | #[REGEXEPATH] 22 | #Align3DPath = "/path/to/custom/align3d" 23 | 24 | [MATERIALS.REF] 25 | MaterialIndices = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 26 | MaterialNames = Unclassified,Asphalt,Concrete/Stone,Glass,Tree,Non-tree vegetation,Metal,Ceramic,Soil,Solar panel,Water,Polymer,Unscored,Indeterminate,Indeterminate asphalt/concrete 27 | MaterialIndicesToIgnore = 0,12,13 28 | -------------------------------------------------------------------------------- /aoi-example/aoi-example.json: -------------------------------------------------------------------------------- 1 | { 2 | "INPUT.REF": { 3 | "DSMFilename": "/path_to/ground_truth-DSM.tif", 4 | "DTMFilename": "/path_to/ground_truth-DTM.tif", 5 | "CLSFilename": "/path_to/ground_truth-CLS.tif", 6 | "NDXFilename": "/path_to/ground_truth-NDX.tif", 7 | "CLSMatchValue": [[6,65], [17], [6,17,65]] 8 | }, 9 | "INPUT.TEST": { 10 | "DSMFilename": "/path_to/test_model-DSM.tif", 11 | "DTMFilename": "/path_to/test_model-DTM.tif", 12 | "CLSFilename": "/path_to/test_model-CLS.tif", 13 | "CLSMatchValue": [[6], [17], [6,17]] 14 | }, 15 | "OPTIONS": { 16 | "QuantizeHeight": true 17 | }, 18 | "PLOTS": { 19 | "ShowPlots": false 20 | "SavePlots": true 21 | }, 22 | "REGEXEPATH": { 23 | "Align3DPath": "" 24 | }, 25 | "MATERIALS.REF": { 26 | "MaterialIndices": [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14], 27 | "MaterialNames": ["Unclassified","Asphalt","Concrete/Stone","Glass","Tree","Non-tree vegetation","Metal","Ceramic","Soil","Solar panel","Water","Polymer","Unscored","Indeterminate","Indeterminate asphalt/concrete"], 28 | "MaterialIndicesToIgnore": [0,12,13] 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /core3dmetrics/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from . __main__ import main 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /core3dmetrics/__main__.py: -------------------------------------------------------------------------------- 1 | 2 | from core3dmetrics.run_geometrics import main 3 | 4 | if __name__ == "__main__": 5 | main() 6 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from .image import * 4 | from .plot import * 5 | from .config import * 6 | from .metrics_util import * 7 | from .threshold_material_metrics import * 8 | from .threshold_geometry_metrics import * 9 | from .registration import * 10 | from .relative_accuracy_metrics import * 11 | from .terrain_accuracy_metrics import * 12 | from .objectwise_metrics import * 13 | 14 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/ang.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | import time 4 | from osgeo import gdal 5 | import argparse 6 | import glob 7 | import math 8 | import csv 9 | from tqdm import tqdm 10 | 11 | 12 | def saveTiffMultiBand(outputFilename, imageData, outputDataType): 13 | [nBands,nRows,nCols] = imageData.shape 14 | 15 | #start creating output file 16 | outDriver = gdal.GetDriverByName('GTiff') 17 | ds = gdal.GetDriverByName('MEM').Create( '', nCols, nRows, nBands, outputDataType)#intermediary driver needed for some formats 18 | 19 | for i in range(0,nBands):#loop through all bands 20 | #write band data 21 | outband = ds.GetRasterBand(i + 1) 22 | outband.WriteArray(imageData[i,:,:]) 23 | 24 | #create true output driver with createcopy for compatibility with more output formats 25 | options=[] 26 | outdata = outDriver.CreateCopy(outputFilename, ds, 0, options) 27 | 28 | #close down image objects 29 | outdata.FlushCache()#save to disk 30 | outdata = None 31 | ds.FlushCache()#save to disk 32 | ds = None 33 | 34 | def saveTiffSimple(outputFilename, imageData, outputDataType): 35 | [nRows,nCols] = imageData.shape 36 | nBands=1 37 | 38 | #start creating output file 39 | outDriver = gdal.GetDriverByName('GTiff') 40 | ds = gdal.GetDriverByName('MEM').Create( '', nCols, nRows, nBands, outputDataType)#intermediary driver needed for some formats 41 | 42 | for i in range(0,nBands):#loop through all bands 43 | #write band data 44 | outband = ds.GetRasterBand(i + 1) 45 | outband.WriteArray(imageData) 46 | 47 | #create true output driver with createcopy for compatibility with more output formats 48 | options=[] 49 | outdata = outDriver.CreateCopy(outputFilename, ds, 0, options) 50 | 51 | #close down image objects 52 | outdata.FlushCache()#save to disk 53 | outdata = None 54 | ds.FlushCache()#save to disk 55 | ds = None 56 | 57 | #return eigenvalues, eigenvectors, and whether all data used for statistics is valid 58 | def getSortedEigens(points, nodataValue): 59 | isValid=True 60 | if nodataValue in points[2]: isValid=False#mark kernels containing nodata values as invalid 61 | if np.isnan(np.sum(points[2])): isValid=False#mark kernels containing NaN values as invalid 62 | 63 | cov=np.cov(points)#get xyz covariance 64 | eigenValues,eigenVectors = np.linalg.eig(cov)#do eigendecomposition 65 | 66 | #sort eigenvalues and eigenvectors by eigenvalues in descending order 67 | # eigenvectors are already normalized 68 | idx = eigenValues.argsort()[::-1] 69 | eigenValues = eigenValues[idx] 70 | eigenVectors = eigenVectors[:,idx] 71 | 72 | return [eigenValues,eigenVectors,isValid] 73 | 74 | #assumes a coordinate system with equal unit vectors is used (e.g., UTM but not Lat/Lon) 75 | #assumes that images are already aligned and resampled to the same resolution and bounds 76 | def computeAngleInfo(kernelRadius, pixelGSD, refDSM, refCLS, testDSM, testCLS, nodataValue, outputPath): 77 | start = time.time() 78 | 79 | #create circular kernel pixel mask 80 | irad=math.ceil(kernelRadius) 81 | kernel=np.zeros((irad*2+1,irad*2+1)) 82 | for y in range(-irad,irad+1): 83 | for x in range(-irad,irad+1): 84 | if y*y+x*x <= kernelRadius*kernelRadius: kernel[y+irad,x+irad]=1 85 | print("Kernel mask\n", kernel) 86 | kernel1D=kernel.ravel() 87 | inds=np.where(kernel1D==1)#1D pixel indices in kernel where value==1 88 | xInds=np.tile(np.arange(-irad,irad+1)*pixelGSD,(irad*2+1,1)) 89 | yInds=np.transpose(xInds) 90 | xPositions=xInds.ravel()[inds] 91 | yPositions=yInds.ravel()[inds] 92 | 93 | #storage for interesting features 94 | tmpData=np.zeros((9,refDSM.shape[0],refDSM.shape[1])) 95 | 96 | #find pixel coordinates where building label occurs in reference 97 | prows,pcols = np.where(refCLS==6) 98 | 99 | #computed angle info for pixels where it can be computed 100 | refSurfaceAngles=[] 101 | testSurfaceAngles=[] 102 | angleErrors=[] 103 | refPixelXs=[] 104 | refPixelYs=[] 105 | 106 | #loop through all building-labeled pixels 107 | print("Computing angles") 108 | for i in tqdm(range(0,len(prows))):#loop through all pixels with refCLS==6 109 | #for i in range(0,int(len(prows)*0.033)):#loop through some pixels with refCLS==6 110 | if prows[i]>=irad and prows[i]=irad and pcols[i]0.2:#is reference data good enough to score? 130 | #compute test model geometry 131 | nums=testDSM[y-irad:y+irad+1,x-irad:x+irad+1].ravel()[inds]#get DSM z-values within kernel around pixel 132 | points=np.stack((xPositions,yPositions,nums))#xyz pixel positions as [[],[],[]] 133 | [testEigenValues, testEigenVectors, isTestValid] = getSortedEigens(points, nodataValue) 134 | testNormal=testEigenVectors[:,2]#last eigenvector is the surface normal 135 | if testNormal[2]<0: testNormal *= -1#flip normal if z-component is not pointing up 136 | testAngleFromUp=math.acos(np.dot(testNormal,[0,0,1]))*(180/math.pi)#roof slope angle in degrees 137 | 138 | #angle in degrees between reference and test normals 139 | # note that this is the angle between normals, not the difference in slopes measured from nadir 140 | angleDiff=math.acos(min(np.dot(testNormal,normal),1))*(180/math.pi) 141 | 142 | #statistic values 143 | refSurfaceAngles.append(refAngleFromUp) 144 | testSurfaceAngles.append(testAngleFromUp) 145 | angleErrors.append(angleDiff) 146 | refPixelXs.append(x) 147 | refPixelYs.append(y) 148 | else:#reference data is not good enough 149 | isRefValid=False 150 | testAngleFromUp=0 151 | angleDiff=0 152 | 153 | #store temp data for records 154 | tmpData[0, y, x]=normal[0] 155 | tmpData[1, y, x]=normal[1] 156 | tmpData[2, y, x]=normal[2] 157 | tmpData[3, y, x]=int(isRefValid==True)#0=invalid, 1=valid 158 | tmpData[4, y, x]=eigCurvature 159 | tmpData[5, y, x]=eigPlanarity 160 | tmpData[6, y, x]=refAngleFromUp 161 | tmpData[7, y, x]=testAngleFromUp 162 | tmpData[8, y, x]=angleDiff 163 | 164 | # report progress every 10000 values 165 | #if i%10000==0:print(i*100/len(prows), "percent complete") 166 | 167 | # write angular statistics data 168 | myfile = open(os.path.join(outputPath,'angleData.csv'), 'w', newline='') 169 | wr = csv.writer(myfile) 170 | wr.writerow(["Pixel x", "Pixel y", "Ref angle from up","Test angle from up", "Angle error"]) 171 | wr.writerows(np.transpose([refPixelXs, refPixelYs, np.around(np.array(refSurfaceAngles),5), np.around(np.array(testSurfaceAngles),5), np.around(np.array(angleErrors),5)])) 172 | 173 | # report scores 174 | angleErrors.sort() 175 | print("68% error value", angleErrors[int(len(angleErrors)*0.68)]) 176 | 177 | # report time 178 | end = time.time() 179 | print("time =", "{:.3f}".format(end - start),"sec") 180 | 181 | # save temp data used during angle computations 182 | saveTiffMultiBand(os.path.join(outputPath,'angleTmpData.tif'), tmpData, gdal.GDT_Float32) 183 | 184 | # order RMS, stable slope mask, ref slope, angle error 185 | return [angleErrors[int(len(angleErrors)*0.68)], tmpData[3,:,:], tmpData[6,:,:], tmpData[8,:,:]] 186 | 187 | def computeIOUs(refDSM, refDTM, refCLS, testDSM, testNDSM, testCLS, stableAngleMask, angleError, outputPath): 188 | start = time.time() 189 | 190 | #FP and FN label pixel counts 191 | FPC=0 192 | FNC=0 193 | 194 | #correct/incorrect masks 195 | correctLabel=np.zeros((refDSM.shape[0],refDSM.shape[1])) 196 | correctHeight=np.zeros((refDSM.shape[0],refDSM.shape[1])) 197 | correctAngle=np.zeros((refDSM.shape[0],refDSM.shape[1])) 198 | correctAGL=np.zeros((refDSM.shape[0],refDSM.shape[1])) 199 | 200 | #count TP/FP/FN components 201 | prows,pcols = np.where(refCLS==6) 202 | for y in range(0,refDSM.shape[0]): 203 | for x in range(0,refDSM.shape[1]): 204 | if refCLS[y,x]==6: 205 | #label 206 | if refCLS[y,x]==testCLS[y,x]: correctLabel[y,x]=1 207 | else: FNC=FNC+1 208 | #z error 209 | if abs(refDSM[y,x]-testDSM[y,x])<1: correctHeight[y,x]=1 210 | #angle 211 | if stableAngleMask[y,x]==1: 212 | if abs(angleError[y,x])<5: correctAngle[y,x]=1 213 | else: 214 | correctAngle[y,x]=1#assume angle is correct if no stable angle can be used to test 215 | #AGL 216 | if abs((refDSM[y,x]-refDTM[y,x])-testNDSM[y,x])<1: correctAGL[y,x]=1 217 | else: 218 | if refCLS[y,x]!=65 and testCLS[y,x]==6: FPC=FPC+1#label FP occurred 219 | 220 | saveTiffSimple(os.path.join(outputPath, 'delCCorrectMask.tif'), correctLabel, gdal.GDT_Float32) 221 | saveTiffSimple(os.path.join(outputPath, 'delZCorrectMask.tif'), correctHeight, gdal.GDT_Float32) 222 | saveTiffSimple(os.path.join(outputPath, 'delAGLCorrectMask.tif'), correctAGL, gdal.GDT_Float32) 223 | saveTiffSimple(os.path.join(outputPath, 'delANGCorrectMask.tif'), correctAngle, gdal.GDT_Float32) 224 | # Combined Rasters 225 | saveTiffSimple(os.path.join(outputPath, 'Roof_CLS_IOU.tif'), correctLabel, gdal.GDT_Float32) 226 | saveTiffSimple(os.path.join(outputPath, 'Roof_CLS_Z_IOU.tif'), np.multiply(correctLabel,correctHeight), gdal.GDT_Float32) 227 | saveTiffSimple(os.path.join(outputPath, 'Roof_CLS_Z_SLOPE_IOU.tif'), np.multiply(np.multiply(correctLabel, correctHeight), correctAngle), 228 | gdal.GDT_Float32) 229 | saveTiffSimple(os.path.join(outputPath, 'Roof_CLS_AGL_IOU.tif'), 230 | np.multiply(correctLabel, correctAGL), 231 | gdal.GDT_Float32) 232 | TPC = np.sum(correctLabel) 233 | TPZ = np.sum(correctHeight) 234 | TPAGL = np.sum(correctAGL) 235 | TPANG = np.sum(correctAngle) 236 | TPZTot = np.sum(np.multiply(correctLabel, correctHeight)) 237 | # TPAGLTot=np.sum(np.multiply(np.multiply(correctLabel, correctHeight), correctAGL)) 238 | # TPMTot=np.sum(np.multiply(np.multiply(np.multiply(correctLabel, correctHeight), correctAGL), correctAngle)) 239 | TPAGLTot = np.sum(np.multiply(correctLabel, correctAGL)) 240 | TPMZTot = np.sum(np.multiply(np.multiply(correctLabel, correctHeight), correctAngle)) 241 | TPMAGLTot = np.sum(np.multiply(np.multiply(correctLabel, correctAGL), correctAngle)) 242 | 243 | IOUC = TPC / (TPC + FPC + FNC) 244 | IOUZ = TPZTot / (TPC + FPC + FNC) 245 | IOUAGL = TPAGLTot / (TPC + FPC + FNC) 246 | # IOUM=TPMTot/(TPC+FPC+FNC) 247 | IOUMZ = TPMZTot / (TPC + FPC + FNC) 248 | IOUMAGL = TPMAGLTot / (TPC + FPC + FNC) 249 | print("TPC", TPC) 250 | print("TPZ", TPZ) 251 | print("TPAGL", TPAGL) 252 | print("TPA", TPANG) 253 | print("TPZTot", TPZTot) 254 | print("TPAGLTot", TPAGLTot) 255 | # print("TPMTot", TPMTot) 256 | print("TPMTot", TPMZTot) 257 | print("TPMTot", TPMAGLTot) 258 | print("FPC", FPC) 259 | print("FNC", FNC) 260 | print("IOUC", IOUC) # label only 261 | print("IOUZ", IOUZ) # label and z-error 262 | print("IOUAGL", IOUAGL) # label and z-error and AGL-error 263 | # print("IOUM", IOUM)#label and z-error and AGL-error and angle-error 264 | print("IOUMZ", IOUMZ) # label and z-error and angle-error 265 | print("IOUMAGL", IOUMAGL) # label and AGL-error and angle-error 266 | end = time.time() 267 | print("time =", "{:.3f}".format(end - start), "sec") 268 | return IOUC, IOUZ, IOUAGL, IOUMZ 269 | 270 | 271 | def calculate_metrics(refDSM, refDTM, refCLS, testDSM, testDTM, testCLS, tform, kernel_radius=3, 272 | output_path='./'): 273 | 274 | testNDSM = testDSM-testDTM 275 | 276 | # read image data 277 | transform = tform 278 | pixelWidth = transform[1] 279 | pixelHeight = -transform[5] 280 | 281 | # run angle finder and make good/bad truth mask 282 | [orderRMS, stableAngleMask, refSlope, angleError] = computeAngleInfo(kernel_radius, pixelWidth, refDSM, refCLS, 283 | testDSM, testCLS, -10000, output_path) 284 | 285 | #compute IOUs 286 | IOUC, IOUZ, IOUAGL, IOUMZ = computeIOUs(refDSM, refDTM, refCLS, testDSM, testNDSM, testCLS, stableAngleMask, angleError, output_path) 287 | return IOUC, IOUZ, IOUAGL, IOUMZ, orderRMS 288 | 289 | if __name__ == "__main__": 290 | print("starting") 291 | 292 | # load ndx/dsm ref files 293 | dataPath = './' 294 | refDSM = gdal.Open(os.path.join(dataPath, 'refDSM.tif')).ReadAsArray() 295 | refDTM = gdal.Open(os.path.join(dataPath, 'refDTM.tif')).ReadAsArray() 296 | refCLS = gdal.Open(os.path.join(dataPath, 'refCLS.tif')).ReadAsArray() 297 | 298 | # load ndx/dsm test files 299 | testDSM = gdal.Open(os.path.join(dataPath, 'testDSM.tif')).ReadAsArray() 300 | testNDSM = gdal.Open(os.path.join(dataPath, 'testNDSM.tif')).ReadAsArray() 301 | testCLS = gdal.Open(os.path.join(dataPath, 'testCLS.tif')).ReadAsArray() 302 | 303 | # read image data 304 | transform = gdal.Open(os.path.join(dataPath, 'refDSM.tif')).GetGeoTransform() 305 | pixelWidth = transform[1] 306 | pixelHeight = -transform[5] 307 | 308 | # set angle-finding parameters 309 | kernelRadius = 3 # pixels 310 | # run angle finder and make good/bad truth mask 311 | [orderRMS, stableAngleMask, refSlope, angleError] = computeAngleInfo(kernelRadius, pixelWidth, refDSM, refCLS, testDSM, testCLS, -10000, dataPath) 312 | 313 | # compute IOUs 314 | computeIOUs(refDSM, refDTM, refCLS, testDSM, testNDSM, testCLS, stableAngleMask, angleError, dataPath) 315 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/config.py: -------------------------------------------------------------------------------- 1 | # PROCESS GEOMETRICS CONFIGURATION FILE 2 | 3 | import os 4 | import configparser 5 | import json 6 | import glob 7 | import collections 8 | import jsonschema 9 | import pkg_resources 10 | import ast 11 | 12 | # module/package name 13 | resource_package = __name__ 14 | 15 | 16 | # HELPER: Locate absolute file path in dict via GLOB 17 | def findfiles(data, path=None): 18 | 19 | for key,file in data.items(): 20 | if not key.lower().endswith('filename'): continue 21 | 22 | print('Searching for "{}"'.format(key)) 23 | 24 | if file is None: 25 | print(' No file specified') 26 | continue 27 | 28 | # absolute path to file 29 | if not os.path.isabs(file): 30 | if path: file = os.path.join(path, file) 31 | file = os.path.abspath(file) 32 | 33 | # locate file (use glob to allow wildcards) 34 | files = glob.glob(file) 35 | 36 | if not files: 37 | print(" WARNING: unable to locate file <{}>".format(file)) 38 | file = None 39 | else: 40 | if len(files) > 1: 41 | print(' WARNING: multiple files located for <{}>, using 1st file'.format(file)) 42 | 43 | file = files[0] 44 | print(' File located <{}>'.format(file)) 45 | 46 | # save file to data 47 | data[key] = file 48 | 49 | return data 50 | 51 | 52 | # PARSE CONFIGURATION FILE 53 | def parse_config(configfile, refpath=None, testpath=None): 54 | 55 | print('\n=====CONFIGURATION=====') 56 | 57 | # check inputs 58 | if configfile and not os.path.isfile(configfile): 59 | raise IOError("Configuration file does not exist") 60 | 61 | if refpath and not os.path.isdir(refpath): 62 | raise IOError('"refpath" not a valid folder <{}>'.format(refpath)) 63 | 64 | if testpath and not os.path.isdir(testpath): 65 | raise IOError('"testpath" not a valid folder <{}>'.format(testpath)) 66 | 67 | # create schema validator object (& check schema itself) 68 | schema = json.loads(pkg_resources.resource_string( 69 | resource_package, 'config_schema.json').decode('utf-8')) 70 | validator = jsonschema.Draft4Validator(schema) 71 | validator.check_schema(schema) 72 | 73 | # load user configuration 74 | print("\nReading configuration from <{}>".format(configfile)) 75 | 76 | # JSON parsing 77 | if configfile.endswith(('.json', '.JSON')): 78 | 79 | # open & read JSON file 80 | with open(configfile, 'r') as fid: 81 | config = json.load(fid) 82 | 83 | # CONFIG parsing 84 | elif configfile.endswith(('.config','.CONFIG')): 85 | 86 | # setup config parser 87 | parser = configparser.ConfigParser() 88 | parser.optionxform = str # maintain case-sensitive items 89 | 90 | # read entire configuration file into dict 91 | if len(parser.read(configfile)) == 0: 92 | raise IOError("Unable to read selected .config file") 93 | config = {s:dict(parser.items(s)) for s in parser.sections()} 94 | 95 | # special section/item parsing 96 | s = 'INPUT.REF'; i = 'CLSMatchValue'; config[s][i] = ast.literal_eval(config[s][i]) 97 | s = 'INPUT.TEST'; i = 'CLSMatchValue' 98 | if i in config[s]: # Optional Field 99 | config[s][i] = ast.literal_eval(config[s][i]) 100 | else: 101 | config[s][i] = config['INPUT.REF'][i] 102 | 103 | s = 'OBJECTWISE' 104 | if s in config: 105 | i = 'Enable' 106 | if i in config[s]: 107 | config[s][i] = parser.getboolean(s, i) 108 | else: 109 | config[s]['Enable'] = True 110 | i = 'MergeRadius' 111 | if i in config[s]: 112 | config[s][i] = parser.getfloat(s, i) 113 | else: 114 | config[s]['MergeRadius'] = 2 # meters 115 | else: 116 | config[s] = {'Enable': True, 117 | 'MergeRadius': 2 118 | } # meters 119 | 120 | # bool(config[s][i]) does not interpret 'true'/'false' strings 121 | s = 'OPTIONS'; i = 'QuantizeHeight'; config[s][i] = parser.getboolean(s,i) 122 | s = 'OPTIONS'; i = 'AlignModel' 123 | if i in config[s]: # Optional Field 124 | config[s][i] = parser.getboolean(s, i) 125 | else: 126 | config[s][i] = True 127 | s = 'OPTIONS'; i = 'UseMultiprocessing' 128 | if i in config[s]: # Optional Field 129 | config[s][i] = parser.getboolean(s, i) 130 | else: 131 | config[s][i] = False 132 | s = 'OPTIONS'; i = 'SaveAligned' 133 | if i in config[s]: # Optional Field 134 | config[s][i] = parser.getboolean(s, i) 135 | else: 136 | config[s][i] = False 137 | s = 'PLOTS'; i = 'ShowPlots'; config[s][i] = parser.getboolean(s, i) 138 | s = 'PLOTS'; i = 'SavePlots'; config[s][i] = parser.getboolean(s, i) 139 | s = 'MATERIALS.REF'; i = 'MaterialNames'; config[s][i] = config[s][i].split(',') 140 | s = 'MATERIALS.REF'; i = 'MaterialIndicesToIgnore'; config[s][i] = [int(v) for v in config[s][i].split(',')] 141 | 142 | # Get BLENDER Options 143 | s = 'BLENDER.TEST' 144 | if s in config: 145 | i = '+Z' 146 | if i in config[s]: 147 | config[s][i] = parser.getboolean(s, i) 148 | else: 149 | config[s][i] = True 150 | i = 'OrbitalLocations' 151 | if i in config[s]: 152 | config[s][i] = parser.getint(s, i) 153 | else: 154 | config[s][i] = 0 155 | i = 'GSD' 156 | if i in config[s]: 157 | config[s][i] = parser.getfloat(s, i) 158 | else: 159 | config[s][i] = 1.0 160 | i = 'bbox' 161 | if i in config[s]: # Optional Field 162 | config[s][i] = config[s][i].split(',') 163 | else: 164 | config[s][i] = [0, 0, 0, 0] 165 | i = 'ElevationAngle' 166 | if i in config[s]: 167 | config[s][i] = parser.getfloat(s, i) 168 | else: 169 | config[s][i] = 60.0 170 | i = 'FocalLength' 171 | if i in config[s]: 172 | config[s][i] = parser.getfloat(s, i) 173 | else: 174 | config[s][i] = 30.0 175 | i = 'RadialDistance' 176 | if i in config[s]: 177 | config[s][i] = parser.getfloat(s, i) 178 | else: 179 | config[s][i] = 8000.0 180 | else: 181 | config[s] = {'+Z': True, 182 | 'GSD': 1.0, 183 | 'bbox': [0, 0, 0, 0], 184 | 'OrbitalLocations': 0, 185 | 'ElevationAngle': 60.0, 186 | 'FocalLength': 30.0, 187 | 'RadialDistance': 8000.0 188 | } # meters 189 | 190 | # unrecognized config file type 191 | else: 192 | raise IOError('Unrecognized configuration file') 193 | 194 | # locate files for each "xxxFilename" configuration parameter 195 | # this makes use of "refpath" and "testpath" arguments for relative filenames 196 | # we do this before validation to ensure required files are located 197 | for item in [('INPUT.REF', refpath), ('INPUT.TEST', testpath), ('BLENDER.TEST', refpath)]: 198 | sec = item[0] 199 | path = item[1] 200 | print('\nPROCESSING "{}" FILES'.format(sec)) 201 | config[sec] = findfiles(config[sec], path) 202 | 203 | # try: 204 | # for item in [('BLENDER.TEST', refpath)]: 205 | # sec = item[0] 206 | # path = item[1] 207 | # print('\nPROCESSING "{}" FILES'.format(sec)) 208 | # config[sec] = findfiles(config[sec], path) 209 | # except: 210 | # pass 211 | 212 | # validate final configuration against schema 213 | try: 214 | validator.validate(config) 215 | print('\nCONFIGURATION VALIDATED') 216 | 217 | except jsonschema.exceptions.ValidationError: 218 | print('\n*****INVALID CONFIGURATION FILE*****\n') 219 | for error in sorted(validator.iter_errors(config), key=str): 220 | print('ERROR: {}\n'.format(error)) 221 | 222 | raise jsonschema.exceptions.ValidationError('validation error') 223 | 224 | # for easier exploitation, ensure some configuration options are tuple/list 225 | opts = (('INPUT.TEST', 'CLSMatchValue'), ('INPUT.REF', 'CLSMatchValue'), 226 | ('MATERIALS.REF', 'MaterialIndicesToIgnore')) 227 | 228 | for opt in opts: 229 | s = opt[0]; i = opt[1]; 230 | try: 231 | _ = (v for v in config[s][i]) 232 | except: 233 | config[s][i] = [config[s][i]] 234 | 235 | 236 | # print final configuration 237 | print('\nFINAL CONFIGURATION') 238 | print(json.dumps(config,indent=2)) 239 | 240 | # cleanup 241 | return config 242 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/config_schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-04/schema#", 3 | "name": "geometrics configuration schema", 4 | "definitions": { 5 | "CLSMatchValue": { 6 | "oneOf": [ 7 | { 8 | "type": "integer", 9 | "minimum": 0, 10 | "maximum": 256 11 | }, 12 | { 13 | "type": "array", 14 | "items": { 15 | "type": "integer", 16 | "minimum": 0, 17 | "maximum": 256 18 | } 19 | } 20 | ] 21 | }, 22 | "CLSMatchValueArrayOf": { 23 | "oneOf": [ 24 | { 25 | "type": "integer", 26 | "minimum": 0, 27 | "maximum": 256 28 | }, 29 | { 30 | "type": "array", 31 | "items": { 32 | "$ref": "#/definitions/CLSMatchValue" 33 | } 34 | } 35 | ] 36 | } 37 | }, 38 | 39 | 40 | "type": "object", 41 | "required": [ 42 | "INPUT.REF", 43 | "INPUT.TEST", 44 | "OPTIONS", 45 | "PLOTS", 46 | "MATERIALS.REF" 47 | ], 48 | "additionalProperties": false, 49 | "properties": { 50 | "INPUT.REF": { 51 | "type": "object", 52 | "required": [ 53 | "DSMFilename", 54 | "DTMFilename", 55 | "CLSFilename", 56 | "CLSMatchValue" 57 | ], 58 | "optional": [ 59 | "MTLFilename", 60 | "NDXFilename" 61 | ], 62 | "additionalProperties": false, 63 | "properties": { 64 | "DSMFilename": { 65 | "type": "string" 66 | }, 67 | "DTMFilename": { 68 | "type": "string" 69 | }, 70 | "CLSFilename": { 71 | "type": "string" 72 | }, 73 | "MTLFilename": { 74 | "oneOf": [ 75 | { 76 | "type": "string" 77 | }, 78 | { 79 | "type": "null" 80 | } 81 | ] 82 | }, 83 | "NDXFilename": { 84 | "oneOf": [ 85 | { 86 | "type": "string" 87 | }, 88 | { 89 | "type": "null" 90 | } 91 | ] 92 | }, 93 | "CLSMatchValue": { 94 | "$ref": "#/definitions/CLSMatchValueArrayOf" 95 | } 96 | } 97 | }, 98 | "INPUT.TEST": { 99 | "type": "object", 100 | "required": [ 101 | "DSMFilename", 102 | "CLSFilename", 103 | "CLSMatchValue" 104 | ], 105 | "optional": [ 106 | "ImagePairFilename", 107 | "ImagePairDataFilename", 108 | "FilesChosenFilename" 109 | ], 110 | "additionalProperties": false, 111 | "properties": { 112 | "DSMFilename": { 113 | "type": "string" 114 | }, 115 | "DTMFilename": { 116 | "oneOf": [ 117 | { 118 | "type": "string" 119 | }, 120 | { 121 | "type": "null" 122 | } 123 | ] 124 | }, 125 | "CONFFilename": { 126 | "oneOf": [ 127 | { 128 | "type": "string" 129 | }, 130 | { 131 | "type": "null" 132 | } 133 | ] 134 | }, 135 | "CLSFilename": { 136 | "type": "string" 137 | }, 138 | "ImagePairFilename": { 139 | "oneOf": [ 140 | { 141 | "type": "string" 142 | }, 143 | { 144 | "type": "null" 145 | } 146 | ] 147 | }, 148 | "ImagePairDataFilename": { 149 | "oneOf": [ 150 | { 151 | "type": "string" 152 | }, 153 | { 154 | "type": "null" 155 | } 156 | ] 157 | }, 158 | "FilesChosenFilename": { 159 | "oneOf": [ 160 | { 161 | "type": "string" 162 | }, 163 | { 164 | "type": "null" 165 | } 166 | ] 167 | }, 168 | "MTLFilename": { 169 | "oneOf": [ 170 | { 171 | "type": "string" 172 | }, 173 | { 174 | "type": "null" 175 | } 176 | ] 177 | }, 178 | "CLSMatchValue": { 179 | "$ref": "#/definitions/CLSMatchValueArrayOf" 180 | } 181 | 182 | } 183 | }, 184 | "OPTIONS": { 185 | "type": "object", 186 | "required": [ 187 | "QuantizeHeight" 188 | ], 189 | "optional": [ 190 | "AlignModel", 191 | "SaveAligned", 192 | "UseMultiprocessing" 193 | ], 194 | "additionalProperties": false, 195 | "properties": { 196 | "QuantizeHeight": { 197 | "type": "boolean" 198 | }, 199 | "AlignModel": { 200 | "type": "boolean" 201 | }, 202 | "SaveAligned": { 203 | "type": "boolean" 204 | }, 205 | "UseMultiprocessing": { 206 | "type": "boolean" 207 | }, 208 | "TerrainZErrorThreshold": { 209 | "type": "number" 210 | }, 211 | "TerrainCLSIgnoreValues": { 212 | "$ref": "#/definitions/CLSMatchValue" 213 | } 214 | } 215 | }, 216 | "OBJECTWISE": { 217 | "type": "object", 218 | "optional": [ 219 | "Enable", 220 | "MergeRadius" 221 | ], 222 | "additionalProperties": false, 223 | "properties": { 224 | "Enable": { 225 | "type": "boolean" 226 | }, 227 | "MergeRadius": { 228 | "type": "number" 229 | } 230 | } 231 | }, 232 | "PLOTS": { 233 | "type": "object", 234 | "required": [ 235 | "ShowPlots", 236 | "SavePlots" 237 | ], 238 | "additionalProperties": false, 239 | "properties": { 240 | "ShowPlots": { 241 | "type": "boolean" 242 | }, 243 | "SavePlots": { 244 | "type": "boolean" 245 | } 246 | } 247 | }, 248 | "MATERIALS.REF": { 249 | "type": "object", 250 | "required": [ 251 | "MaterialNames", 252 | "MaterialIndicesToIgnore" 253 | ], 254 | "additionalProperties": true, 255 | "properties": { 256 | "MaterialNames": { 257 | "type": "array", 258 | "items": { 259 | "type": "string" 260 | } 261 | }, 262 | "MaterialIndicesToIgnore": { 263 | "oneOf": [ 264 | { 265 | "type":"integer", 266 | "minimum": 0 267 | }, 268 | { 269 | "type": "array", 270 | "items": { 271 | "type": "integer", 272 | "minimum": 0 273 | } 274 | } 275 | ] 276 | } 277 | } 278 | }, 279 | "REGEXEPATH": { 280 | "type": "object", 281 | "additionalProperties": false, 282 | "properties": { 283 | "Align3DPath": { 284 | "type": "string" 285 | } 286 | } 287 | }, 288 | "BLENDER.TEST": { 289 | "type": "object", 290 | "additionalProperties": false, 291 | "properties": { 292 | "OBJDirectoryFilename": { 293 | "type": "string" 294 | }, 295 | "GSD": { 296 | "type": "number" 297 | }, 298 | "bbox": { 299 | "type": "array", 300 | "items": { 301 | "type": "number" 302 | } 303 | }, 304 | "+Z": { 305 | "type": "boolean" 306 | }, 307 | "OrbitalLocations": { 308 | "type": "integer", 309 | "minimum": 0 310 | }, 311 | "ElevationAngle": { 312 | "type": "number" 313 | }, 314 | "FocalLength": { 315 | "type": "number" 316 | }, 317 | "RadialDistance": { 318 | "type": "number" 319 | } 320 | } 321 | } 322 | } 323 | } 324 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/image.py: -------------------------------------------------------------------------------- 1 | import os 2 | import gdal, osr 3 | import numpy as np 4 | 5 | 6 | def imageLoad(filename): 7 | im = gdal.Open(filename, gdal.GA_ReadOnly) 8 | band = im.GetRasterBand(1) 9 | img = band.ReadAsArray(0, 0, im.RasterXSize, im.RasterYSize) 10 | transform = im.GetGeoTransform() 11 | return img, transform 12 | 13 | 14 | def getNoDataValue(filename): 15 | im = gdal.Open(filename, gdal.GA_ReadOnly) 16 | band = im.GetRasterBand(1) 17 | nodata = band.GetNoDataValue() 18 | return nodata 19 | 20 | 21 | def getMetadata(inputinfo): 22 | 23 | # dataset input 24 | if isinstance(inputinfo,gdal.Dataset): 25 | dataset = inputinfo 26 | FLAG_CLOSE = False 27 | 28 | # file input 29 | elif isinstance(inputinfo,str): 30 | filename = inputinfo 31 | if not os.path.isfile(filename): 32 | raise IOError('Cannot locate file <{}>'.format(filename)) 33 | 34 | dataset = gdal.Open(filename, gdal.GA_ReadOnly) 35 | FLAG_CLOSE = True 36 | 37 | # unrecognized input 38 | else: 39 | raise IOError('Unrecognized getMetadata input') 40 | 41 | # read metadata 42 | meta = { 43 | 'RasterXSize': dataset.RasterXSize, 44 | 'RasterYSize': dataset.RasterYSize, 45 | 'RasterCount': dataset.RasterCount, 46 | 'Projection': dataset.GetProjection(), 47 | 'GeoTransform': list(dataset.GetGeoTransform()), 48 | 'BitDepth': dataset.GetRasterBand(1).DataType, 49 | 'EPSG': osr.SpatialReference(wkt=dataset.GetProjection()).GetAttrValue('AUTHORITY',1) 50 | } 51 | 52 | # cleanuo 53 | if FLAG_CLOSE: dataset = None 54 | return meta 55 | 56 | def imageWarpRGB(file_src: str, file_dst: str, offset=None, interp_method: int = gdal.gdalconst.GRA_Bilinear, noDataValue=None): 57 | 58 | # verbose display 59 | print('Loading <{}>'.format(file_src)) 60 | 61 | # destination metadata 62 | meta_dst = getMetadata(file_dst) 63 | 64 | # GDAL memory driver 65 | mem_drv = gdal.GetDriverByName('MEM') 66 | 67 | # copy source to memory 68 | tmp = gdal.Open(file_src, gdal.GA_ReadOnly) 69 | dataset_src = mem_drv.CreateCopy('',tmp) 70 | tmp = None 71 | 72 | # source metadata 73 | meta_src = getMetadata(dataset_src) 74 | 75 | # Apply registration offset 76 | if offset is not None: 77 | 78 | # offset error: offset is defined in destination projection space, 79 | # and cannot be applied if source and destination projections differ 80 | if meta_src['Projection'] != meta_dst['Projection']: 81 | print('IMAGE PROJECTION\n{}'.format(meta_src['Projection'])) 82 | print('OFFSET PROJECTION\n{}'.format(meta_dst['Projection'])) 83 | raise ValueError('Image/Offset projection mismatch') 84 | 85 | transform = meta_src['GeoTransform'] 86 | transform[0] += offset[0] 87 | transform[3] += offset[1] 88 | dataset_src.SetGeoTransform(transform) 89 | 90 | 91 | # no reprojection necessary 92 | if meta_src == meta_dst: 93 | print(' No reprojection') 94 | dataset_dst = dataset_src 95 | 96 | # reprojection 97 | else: 98 | keys = [k for k in meta_dst if meta_dst.get(k) != meta_src.get(k)] 99 | print(' REPROJECTION (adjusting {})'.format(', '.join(keys))) 100 | 101 | # file, xsz, ysz, nbands, dtype 102 | dataset_dst = mem_drv.Create('', meta_dst['RasterXSize'], meta_dst['RasterYSize'], 103 | meta_src['RasterCount'], gdal.GDT_Float32) 104 | 105 | dataset_dst.SetProjection(meta_dst['Projection']) 106 | dataset_dst.SetGeoTransform(meta_dst['GeoTransform']) 107 | 108 | # input, output, inputproj, outputproj, interp 109 | gdal.ReprojectImage(dataset_src, dataset_dst, meta_src['Projection'], 110 | meta_dst['Projection'], interp_method) 111 | 112 | # read & return image data 113 | r = dataset_dst.GetRasterBand(1).ReadAsArray() 114 | g = dataset_dst.GetRasterBand(2).ReadAsArray() 115 | b = dataset_dst.GetRasterBand(3).ReadAsArray() 116 | img = np.dstack((r,g,b)) 117 | img = np.uint8(img) 118 | return img 119 | 120 | def imageWarp(file_src: str, file_dst: str, offset=None, interp_method: int = gdal.gdalconst.GRA_Bilinear, noDataValue=None): 121 | 122 | # verbose display 123 | print('Loading <{}>'.format(file_src)) 124 | 125 | # destination metadata 126 | meta_dst = getMetadata(file_dst) 127 | 128 | # GDAL memory driver 129 | mem_drv = gdal.GetDriverByName('MEM') 130 | 131 | # copy source to memory 132 | tmp = gdal.Open(file_src, gdal.GA_ReadOnly) 133 | dataset_src = mem_drv.CreateCopy('',tmp) 134 | tmp = None 135 | 136 | # change no data value to new "noDataValue" input if necessary, 137 | # making sure to adjust the underlying pixel values 138 | band = dataset_src.GetRasterBand(1) 139 | NDV = band.GetNoDataValue() 140 | 141 | if noDataValue is not None and noDataValue != NDV: 142 | if NDV is not None: 143 | img = band.ReadAsArray() 144 | img[img==NDV] = noDataValue 145 | band.WriteArray(img) 146 | band.SetNoDataValue(noDataValue) 147 | NDV = noDataValue 148 | 149 | # source metadata 150 | meta_src = getMetadata(dataset_src) 151 | 152 | # Reproject if dst and source do not have matching projections. Reproject to dst 153 | if meta_src['Projection'] != meta_dst['Projection'] or meta_src['RasterXSize'] != meta_dst["RasterXSize"]\ 154 | or meta_src["RasterYSize"] != meta_dst["RasterYSize"]: 155 | print('IMAGE PROJECTION\n{}'.format(meta_src['Projection'])) 156 | print('OFFSET PROJECTION\n{}'.format(meta_dst['Projection'])) 157 | # raise ValueError('Image/Offset projection mismatch') 158 | 159 | # Reproject 160 | keys = [k for k in meta_dst if meta_dst.get(k) != meta_src.get(k)] 161 | print(' REPROJECTION (adjusting {})'.format(', '.join(keys))) 162 | 163 | # file, xsz, ysz, nbands, dtype 164 | dataset_dst = mem_drv.Create('', meta_dst['RasterXSize'], meta_dst['RasterYSize'], 165 | meta_src['RasterCount'], gdal.GDT_Float32) 166 | 167 | dataset_dst.SetProjection(meta_dst['Projection']) 168 | dataset_dst.SetGeoTransform(meta_dst['GeoTransform']) 169 | 170 | if NDV is not None: 171 | band = dataset_dst.GetRasterBand(1) 172 | band.SetNoDataValue(NDV) 173 | band.Fill(NDV) 174 | 175 | # input, output, inputproj, outputproj, interp 176 | gdal.ReprojectImage(dataset_src, dataset_dst, meta_src['Projection'], 177 | meta_dst['Projection'], interp_method) 178 | else: 179 | dataset_dst = dataset_src 180 | 181 | # Apply registration offset 182 | if offset is not None: 183 | # offset error: offset is defined in destination projection space, 184 | # and cannot be applied if source and destination projections differ 185 | transform = meta_src['GeoTransform'] 186 | transform[0] += offset[0] 187 | transform[3] += offset[1] 188 | dataset_src.SetGeoTransform(transform) 189 | 190 | # read & return image data 191 | img = dataset_dst.GetRasterBand(1).ReadAsArray() 192 | return img 193 | 194 | 195 | def arrayToGeotiff(image_array, out_file_name, reference_file_name, NODATA_VALUE): 196 | """ Used to save rasterized dsm of point cloud """ 197 | reference_image = gdal.Open(reference_file_name, gdal.GA_ReadOnly) 198 | transform = reference_image.GetGeoTransform() 199 | projection = reference_image.GetProjection() 200 | 201 | driver = gdal.GetDriverByName('GTiff') 202 | out_image = driver.Create(out_file_name + '.tif', image_array.shape[1], 203 | image_array.shape[0], 1, gdal.GDT_Float32) 204 | if out_image is None: 205 | print('Could not create output GeoTIFF') 206 | 207 | out_image.SetGeoTransform(transform) 208 | out_image.SetProjection(projection) 209 | 210 | out_band = out_image.GetRasterBand(1) 211 | out_band.SetNoDataValue(NODATA_VALUE) 212 | out_band.WriteArray(image_array, 0, 0) 213 | out_band.FlushCache() 214 | out_image.FlushCache() 215 | # Ignore pep warning here, aids in memory management performance 216 | out_image = None 217 | 218 | return 219 | 220 | def arrayToGeotiffRGB(image_array, out_file_name, reference_file_name, NODATA_VALUE): 221 | """ Used to save rasterized dsm of point cloud """ 222 | reference_image = gdal.Open(reference_file_name, gdal.GA_ReadOnly) 223 | transform = reference_image.GetGeoTransform() 224 | projection = reference_image.GetProjection() 225 | 226 | bands = image_array.shape[2] 227 | driver = gdal.GetDriverByName('GTiff') 228 | options = ['PHOTOMETRIC=RGB', 'PROFILE=GeoTIFF'] 229 | 230 | out_image = driver.Create(out_file_name + '.tif', image_array.shape[1], 231 | image_array.shape[0], bands, gdal.GDT_Byte, options=options) 232 | if out_image is None: 233 | print('Could not create output GeoTIFF') 234 | 235 | out_image.SetGeoTransform(transform) 236 | out_image.SetProjection(projection) 237 | 238 | for band in range(bands): 239 | out_image.GetRasterBand(band+1).WriteArray(image_array[:, :, band]) 240 | 241 | out_image.FlushCache() 242 | # Ignore pep warning here, aids in memory management performance 243 | out_image = None 244 | 245 | return 246 | 247 | # Load LAS file and generate max DSM in memory 248 | def lasToRaster(las_filename, transform, shape_out, NODATA): 249 | # Load LAS file 250 | test_las = File(las_filename, mode='r') 251 | 252 | x = test_las.x 253 | y = test_las.y 254 | z = test_las.z 255 | 256 | # Project to output image space 257 | # TODO: call map2pix 258 | map_to_pix = gdal.InvGeoTransform(transform) 259 | x0 = np.round(map_to_pix[0] + x * map_to_pix[1] + y * map_to_pix[2]) 260 | y0 = np.round(map_to_pix[3] + x * map_to_pix[4] + y * map_to_pix[5]) 261 | 262 | x0 = x0.astype(int) 263 | y0 = y0.astype(int) 264 | 265 | # Generate MAX value DSM 266 | raster = np.zeros(shape_out, np.float32) + NODATA 267 | for ii in range(0, x0.size): 268 | if (x0[ii] >= 0) & (x0[ii] < raster.shape[1]) & (y0[ii] >= 0) & ( 269 | y0[ii] < raster.shape[0]): 270 | if z[ii] > raster[y0[ii], x0[ii]]: 271 | raster[y0[ii], x0[ii]] = z[ii] 272 | 273 | return raster 274 | 275 | 276 | # refMat is a GDAL GeoTransform format 277 | def map2pix(reference_matrix, points_list): 278 | x_origin = reference_matrix[0] 279 | y_origin = reference_matrix[3] 280 | pixel_width = reference_matrix[1] 281 | pixel_height = -reference_matrix[5] 282 | 283 | xy = np.zeros(shape=(len(points_list), 2)) 284 | 285 | xy[:, 0] = (np.round((points_list[:, 0] - x_origin) / pixel_width)) 286 | xy[:, 1] = (np.round((y_origin - points_list[:, 1]) / pixel_height)) 287 | 288 | return xy 289 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/image_pair_plot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import csv 4 | from pathlib import Path 5 | from datetime import datetime 6 | 7 | 8 | class ImagePair: 9 | def __init__(self, azimuth_1, azimuth_2, off_nadir_1, off_nadir_2, month_1, month_2, gsd_1, gsd_2): 10 | self.azimuth_1 = azimuth_1 11 | self.azimuth_2 = azimuth_2 12 | self.off_nadir_1 = off_nadir_1 13 | self.off_nadir_2 = off_nadir_2 14 | self.month_1 = month_1 15 | self.month_2 = month_2 16 | self.gsd_1 = gsd_1 17 | self.gsd_2 = gsd_2 18 | 19 | 20 | class ImagePairPlot: 21 | def __init__(self, data_file_path, image_pair_file_path, files_chosen_path): 22 | self.data_file_path = data_file_path 23 | self.image_pair_file_path = image_pair_file_path 24 | self.files_chosen_path = files_chosen_path 25 | # sortedbydate file 26 | self.gsd_values = [] 27 | self.month_values = [] 28 | self.off_nadir_values = [] 29 | self.azimuth_values = [] 30 | self.order_id = [] 31 | # PairStats File 32 | self.image1_orderid = [] 33 | self.image2_orderid = [] 34 | self.pair_intersection_angle = [] 35 | self.incidence_angle = [] 36 | self.matching_filename_index_1 = [] 37 | self.matching_filename_index_2 = [] 38 | self.image_pairs = [] 39 | self.use_data_file_for_angles = False 40 | self.image_names = [] 41 | 42 | # Get list of all images used for AOI 43 | with open(self.files_chosen_path, mode='r') as infile: 44 | reader = csv.reader(infile) 45 | column_id = {k: v for v, k in enumerate(next(reader))} 46 | for rows in reader: 47 | image_name = rows[column_id['Image filename']][35:55] 48 | self.image_names.append(image_name) 49 | 50 | # PairStats 51 | with open(self.image_pair_file_path, mode='r') as infile: 52 | reader = csv.reader(infile) 53 | column_id = {k: v for v, k in enumerate(next(reader))} 54 | for rows in reader: 55 | # Ignore discarded rows 56 | try: 57 | if rows[column_id['discarded']] == 'yes': 58 | continue 59 | except KeyError: 60 | pass 61 | 62 | image1_orderID = (rows[column_id['Image 1 filename']][35:55]) 63 | image2_orderID = (rows[column_id[' Image 2 filename']][35:55]) 64 | self.image1_orderid.append(image1_orderID) 65 | self.image2_orderid.append(image2_orderID) 66 | try: 67 | self.pair_intersection_angle.append(float(rows[column_id['intersection_angle']])) 68 | self.incidence_angle.append(np.round(float(rows[column_id['incidence_angle']]), 1)) 69 | except KeyError: 70 | self.use_data_file_for_angles = True 71 | 72 | # sortedbydate 73 | with open(self.data_file_path, mode='r') as infile: 74 | reader = csv.reader(infile) 75 | column_id = {k: v for v, k in enumerate(next(reader))} 76 | for rows in reader: 77 | if not (rows[column_id['spectral range']] == 'PAN'): 78 | continue 79 | # Compare used images with data file 80 | if rows[column_id['filename']][35:55] not in self.image_names: 81 | continue 82 | self.gsd_values.append(float(rows[column_id['mean product gsd']])) 83 | self.azimuth_values.append(float(rows[column_id['mean satellite azimuth']])) 84 | self.off_nadir_values.append(90.0-float(rows[column_id['mean satellite elevation']])) 85 | year = str(int(float(rows[column_id['date']])))[0:4] 86 | month = str(int(float(rows[column_id['date']])))[4:6] 87 | self.month_values.append(int(month)) 88 | orderID = rows[column_id['order id']] 89 | try: 90 | indices = [i for i, x in enumerate(self.image1_orderid) if x == orderID] 91 | self.matching_filename_index_1.append(indices) 92 | except ValueError: 93 | self.matching_filename_index_1.append(np.nan) 94 | try: 95 | indices = [i for i, x in enumerate(self.image2_orderid) if x == orderID] 96 | self.matching_filename_index_2.append(indices) 97 | except ValueError: 98 | self.matching_filename_index_2.append(np.nan) 99 | self.order_id.append(rows[column_id['order id']]) 100 | 101 | # Sort into pairs 102 | matching_pairs = [] 103 | for i, image_1_indices in enumerate(self.matching_filename_index_1): 104 | for j, image_2_indices in enumerate(self.matching_filename_index_2): 105 | check = any(item in self.matching_filename_index_1[i] for item in self.matching_filename_index_2[j]) 106 | if check: 107 | matching_pairs.append([i, j]) 108 | 109 | for pair in matching_pairs: 110 | file_1_index = pair[0] 111 | file_2_index = pair[1] 112 | 113 | az_1 = self.azimuth_values[file_1_index] 114 | az_2 = self.azimuth_values[file_2_index] 115 | el_1 = self.off_nadir_values[file_1_index] 116 | el_2 = self.off_nadir_values[file_2_index] 117 | m_1 = self.month_values[file_1_index] 118 | m_2 = self.month_values[file_2_index] 119 | gsd_1 = self.gsd_values[file_1_index] 120 | gsd_2 = self.gsd_values[file_2_index] 121 | image_pair_temp = ImagePair(az_1, az_2, el_1, el_2, m_1, m_2, gsd_1, gsd_2) 122 | self.image_pairs.append(image_pair_temp) 123 | 124 | def create_plot(self, figNum): 125 | pair_lines = [] 126 | for i, val in enumerate(self.image_pairs): 127 | pair_lines.append([[-(val.azimuth_1 / (180/np.pi))+np.pi/2, val.off_nadir_1], 128 | [-(val.azimuth_2 / (180/np.pi))+np.pi/2, val.off_nadir_2]]) 129 | 130 | radial_label_angle = 0 131 | gsd = self.gsd_values 132 | month = self.month_values 133 | r = self.off_nadir_values 134 | theta = self.azimuth_values 135 | theta = [-(x / (180/np.pi))+np.pi/2 for x in theta] 136 | 137 | # Calculate DOP 138 | pdop, hdop, vdop = self.calculate_dop() 139 | 140 | # Create scatter with data for Month 141 | plt.figure(figNum, figsize=(17, 5)) 142 | ax = plt.subplot(121, projection='polar') 143 | # Plot connecting lines 144 | for i, points in enumerate(pair_lines): 145 | sc = ax.plot([points[0][0], points[1][0]], [points[0][1], points[1][1]], '-ro', LineWidth=0.3, 146 | MarkerSize=0.1) 147 | # Plot circles 148 | sc = ax.scatter(theta, r, s=70, c=month, alpha=1, edgecolors='black') 149 | 150 | # Format plot 151 | lines, labels = plt.thetagrids(range(0, 360, 30), 152 | ('E', '60°', '30°', 'N', '330°', '300°', 'W', '240°', '210°', 'S', '150°', 153 | '120°')) 154 | for label, angle in zip(labels, range(0, 360, 30)): 155 | label.set_rotation(90 - angle) 156 | # Add axis labels 157 | ax.text((5 * np.pi) / 6, 100, 'Azimuth (deg)', fontsize=10) 158 | ax.text((np.pi) / 11, 70, 'Off-nadir (deg)', fontsize=10) 159 | # Add color bar 160 | cbar = plt.colorbar(sc, pad=0.25) 161 | ax.set_rmax(60) 162 | ax.set_rlabel_position(radial_label_angle) # get radial labels away from plotted line 163 | ax.grid(True) 164 | ax.set_title("Month of Year", y=-0.15, fontsize=18) 165 | 166 | # Create scatter with data for GSD 167 | ax2 = plt.subplot(122, projection='polar') 168 | # Plot connecting lines 169 | for i, points in enumerate(pair_lines): 170 | sc = ax2.plot([points[0][0], points[1][0]], [points[0][1], points[1][1]], '-ro', LineWidth=0.3, 171 | MarkerSize=0.1) 172 | # Plot Circles 173 | sc = ax2.scatter(theta, r, s=70, c=gsd, alpha=1, edgecolors='black') 174 | # Format plot 175 | lines, labels = plt.thetagrids(range(0, 360, 30), 176 | ('E', '60°', '30°', 'N', '330°', '300°', 'W', '240°', '210°', 'S', '150°', 177 | '120°')) 178 | for label, angle in zip(labels, range(0, 360, 30)): 179 | label.set_rotation(90 - angle) 180 | # Add axis labels 181 | ax2.text((5 * np.pi) / 6, 100, 'Azimuth (deg)', fontsize=10) 182 | ax2.text(np.pi / 11, 70, 'Off-nadir (deg)', fontsize=10) 183 | # Add color bar 184 | cbar = plt.colorbar(sc, pad=0.25) 185 | ax2.set_rmax(60) 186 | ax2.set_rlabel_position(radial_label_angle) # get radial labels away from plotted line 187 | ax2.grid(True) 188 | ax2.set_title("Ground Sample Distance (GSD)", y=-0.15, fontsize=18) 189 | # Plot DOP calculations 190 | ax.text((7 * np.pi) / 6, 110, 191 | 'PDOP: ' + '%.3f' % pdop + '\n' 192 | + 'HDOP: ' + '%.3f' % hdop + '\n' 193 | + 'VDOP: ' + '%.3f' % vdop, fontsize=10) 194 | 195 | return plt 196 | 197 | def calculate_dop(self, altitude=643738.0, num_satellites=10): 198 | # Define platform coordinates. 199 | # Approximate with one position per platform for entire block 200 | ground_ranges = altitude * np.tan(np.array(self.off_nadir_values) * np.pi / 180) 201 | cx = np.ones((num_satellites, 1)) 202 | cy = np.ones((num_satellites, 1)) 203 | cz = np.ones((num_satellites, 1)) 204 | for i in range(0, num_satellites): 205 | angle_radians = self.azimuth_values[i] * np.pi/180 206 | rotation = np.array([[np.cos(angle_radians), -np.sin(angle_radians)], [np.sin(angle_radians), 207 | np.cos(angle_radians)]]) 208 | cp = np.matmul(rotation, np.array([ground_ranges[i], 0])) 209 | cx[i] = cp[0] 210 | cy[i] = cp[1] 211 | cz[i] = altitude 212 | # Compute Jacobian 213 | G = np.zeros([num_satellites, 3]) 214 | for p in range(0, num_satellites): 215 | range_n = np.sqrt(cx[p] ** 2 + cy[p] ** 2 + cz[p] ** 2) 216 | G[p, 0] = cx[p]/range_n 217 | G[p, 1] = cy[p]/range_n 218 | G[p, 2] = cz[p]/range_n 219 | # Computer PDOP for this point 220 | Q = np.linalg.inv(np.matmul(np.transpose(G), G)) 221 | pdop = np.sqrt(Q[0, 0] + Q[1, 1] + Q[2, 2]) 222 | hdop = np.sqrt(Q[0, 0] + Q[1, 1]) 223 | vdop = np.sqrt(Q[2, 2]) 224 | 225 | return pdop, hdop, vdop 226 | 227 | 228 | def main(): 229 | print("Debug") 230 | 231 | 232 | if __name__ == "__main__": 233 | main() 234 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/metrics_util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def calcMops(true_positives, false_negatives, false_positives): 4 | 5 | # when user gets nothing correct 6 | if (true_positives == 0): 7 | s = { 8 | 'recall': 0, 9 | 'precision': 0, 10 | 'jaccardIndex': 0, 11 | 'branchingFactor': np.nan, 12 | 'missFactor': np.nan, 13 | 'completeness': 0, 14 | 'correctness': 0, 15 | 'fscore': np.nan 16 | } 17 | 18 | else: 19 | s = { 20 | 'recall': true_positives / (true_positives + false_negatives), 21 | 'precision': true_positives / (true_positives + false_positives), 22 | 'jaccardIndex': true_positives / (true_positives + false_negatives + false_positives), 23 | 'branchingFactor': false_positives / true_positives, 24 | 'missFactor': false_negatives / true_positives, 25 | } 26 | s['completeness'] = s['recall'] 27 | s['correctness'] = s['precision'] 28 | s['fscore'] = (2 * s['recall'] * s['precision']) / (s['recall'] + s['precision']) 29 | 30 | # append actual TP/FN/FP to report 31 | s['TP'] = float(true_positives) 32 | s['FN'] = float(false_negatives) 33 | s['FP'] = float(false_positives) 34 | 35 | return s 36 | 37 | 38 | def getUnitArea(tform): 39 | return abs(tform[1] * tform[5]) 40 | 41 | 42 | def getUnitHeight(tform): 43 | return (abs(tform[1]) + abs(tform[5])) / 2 44 | 45 | def getUnitWidth(tform): 46 | return (abs(tform[1]) + abs(tform[5])) / 2 47 | 48 | # Checks is match values are present as CLS values, and 49 | # expands any special cases 50 | # 51 | # CLS range from 0 to 255 per ASPRS 52 | # (American Society for Photogrammetry and Remote Sensing) 53 | # LiDAR point cloud classification LAS standard 54 | # 55 | # Any special cases are values outside [0 255] 56 | def validateMatchValues(matchValues, clsValues): 57 | 58 | if not isinstance(matchValues, (list)): matchValues = [matchValues] 59 | 60 | outValues = [] 61 | for v in matchValues: 62 | 63 | if v is 256: 64 | # All non-zero classes 65 | [outValues.append(c) if c!=0 else None for c in clsValues] 66 | else: 67 | # Keep match value only if exists as CLS value 68 | if v in clsValues: outValues.append(v) 69 | 70 | return outValues 71 | 72 | def getMatchValueSets(refCLS_matchSets, testCLS_matchSets, refCLS_classes, testCLS_classes): 73 | 74 | # Classes present in CLS input images 75 | print("Reference classification values: " + str(refCLS_classes)) 76 | print("Test classification values: " + str(testCLS_classes)) 77 | 78 | # Sets of classes specified for evaluation 79 | if len(refCLS_matchSets) != len(testCLS_matchSets): 80 | print("WARNING: Inconsistent number of sets specified by CLSMatchValue") 81 | testCLS_matchSets.clear() 82 | refCLS_matchSets.clear() 83 | 84 | refCLS_matchSetsValid = [] 85 | testCLS_matchSetsValid = [] 86 | for index, (refMatchValue, testMatchValue) in enumerate(zip(refCLS_matchSets, testCLS_matchSets)): 87 | refMatchValueValid = validateMatchValues(refMatchValue, refCLS_classes) 88 | testMatchValueValid = validateMatchValues(testMatchValue, testCLS_classes) 89 | 90 | if len(refMatchValueValid) and len(testMatchValueValid): 91 | refCLS_matchSetsValid.append(refMatchValueValid) 92 | testCLS_matchSetsValid.append(testMatchValueValid) 93 | 94 | 95 | return refCLS_matchSetsValid, testCLS_matchSetsValid 96 | 97 | 98 | # Classification Values used are defined by 99 | # ASPRS Standard LIDAR Point Classes 1.4 100 | # http://www.asprs.org/wp-content/uploads/2010/12/LAS_1_4_r13.pdf 101 | def clsDecoderRing(): 102 | 103 | decoderRing = { 104 | 0: "Never classified", 105 | 1: "Unassigned", 106 | 2: "Ground", 107 | 3: "Low Vegetation", 108 | 4: "Medium Vegetation", 109 | 5: "High Vegetation", 110 | 6: "Building", 111 | 7: "Low Point", 112 | 9: "Water", 113 | 10: "Rail", 114 | 11: "Road Surface", 115 | 13: "Wire - Guard(Shield)", 116 | 14: "Wire - Conductor(Phase)", 117 | 15: "Transmission Tower", 118 | 16: "Wire - Structure Connector(Insulator)", 119 | 17: "Bridge Deck", 120 | 18: "High Noise"} 121 | 122 | # 8: Reserved 123 | # 12: Reserved 124 | # 19 - 63: Reserved 125 | # 64 - 255: User Definable 126 | 127 | return decoderRing 128 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/registration.py: -------------------------------------------------------------------------------- 1 | # 2 | # Align two gridded 3D models using align3d executable. 3 | # 4 | 5 | import os 6 | import platform 7 | import numpy as np 8 | import gdal 9 | from utils.align3d import AlignParameters, AlignTarget2Reference 10 | from datetime import datetime 11 | import sys 12 | from pathlib import Path 13 | 14 | 15 | def align3d_python(reference_filename, target_filename, gsd=1.0, maxt=10.0, maxdz=0.0): 16 | """ 17 | Runs the python port of align3d 18 | :param reference_filename: ground truth reference file 19 | :param target_filename: target/performer file 20 | :param gsd: Ground Sample Distance (GSD) for gridding point cloud (meters); default = 1.0 21 | :param maxt: Maximum horizontal translation in search (meters); default = 10.0 22 | :param maxdz: Max local Z difference (meters) for matching; default = 2*gsd 23 | :return: xyz offsets 24 | """ 25 | 26 | offset= None 27 | params = AlignParameters() 28 | params.gsd = gsd 29 | params.maxt = maxt 30 | params.maxdz = maxdz 31 | # Default MAXDZ = GSD x 2 to ensure reliable performance on steep slopes 32 | if params.maxdz == 0.0: 33 | params.maxdz = params.gsd * 2.0 34 | print("Selected Parameters:") 35 | print(f" ref = {reference_filename}") 36 | print(f" tgt = {target_filename}") 37 | print(f" gsd = {params.gsd}") 38 | print(f" maxdz = {params.maxdz}") 39 | print(f" maxt = {params.maxt}") 40 | 41 | # Intialiize timer 42 | start_time = datetime.now() 43 | try: 44 | AlignTarget2Reference(Path(reference_filename), Path(target_filename), params) 45 | except: 46 | print("Unexpected error:", sys.exc_info()[0]) 47 | raise 48 | end_time = datetime.now() - start_time 49 | print(f" Total time elapsed = {end_time} seconds") 50 | 51 | registered_filename = os.path.join(target_filename[0:-4] + '_aligned.tif') 52 | offset_filename = os.path.join(target_filename[0:-4] + '_offsets.txt') 53 | 54 | # Open permissions on output files 55 | unroot(registered_filename) 56 | unroot(offset_filename) 57 | 58 | # Read XYZ offset from align3d output file. 59 | offsets = readXYZoffset(offset_filename) 60 | 61 | return offset 62 | 63 | def align3d(reference_filename, test_filename, exec_path=None): 64 | 65 | # align3d executable (typically on the system $PATH) 66 | exec_filename = 'align3d' 67 | if platform.system() == "Windows": 68 | exec_filename = exec_filename + ".exe" 69 | 70 | # locate align3d executable 71 | if exec_path: 72 | exec_filename = os.path.abspath(os.path.join(exec_path,exec_filename)) 73 | if not os.path.isfile(exec_filename): 74 | raise IOError('"align3d" executable not found at <{}>'.format(exec_filename)) 75 | 76 | # In case file names have relative paths, convert to absolute paths. 77 | reference_filename = os.path.abspath(reference_filename) 78 | test_filename = os.path.abspath(test_filename) 79 | 80 | # Run align3d. 81 | command = exec_filename + " " + reference_filename + " " + test_filename + ' maxt=10.0' 82 | print("") 83 | print("Registering test model to reference model to determine XYZ offset.") 84 | print("") 85 | print(command) 86 | print("") 87 | os.system(command) 88 | 89 | # Names of files produced by registration process 90 | registered_filename = os.path.join(test_filename[0:-4] + '_aligned.tif') 91 | offset_filename = os.path.join(test_filename[0:-4] + '_offsets.txt') 92 | 93 | # TODO: This is here for docker scenarios where new files are owned by root 94 | # Open permissions on output files 95 | unroot(registered_filename) 96 | unroot(offset_filename) 97 | # TODO: registration makes more files that may need 'un-rooting' 98 | 99 | # Read XYZ offset from align3d output file. 100 | offsets = readXYZoffset(offset_filename) 101 | 102 | return offsets 103 | 104 | 105 | def readXYZoffset(filename): 106 | with open(filename, "r") as fid: 107 | offsetstr = fid.readlines() 108 | cc = offsetstr[1].split(' ') 109 | xyzoffset = [float(v) for v in [cc[0],cc[2],cc[4]]] 110 | return xyzoffset 111 | 112 | 113 | def getXYZoffsetFilename(testFilename): 114 | offset_filename = os.path.join(testFilename[0:-4] + '_offsets.txt') 115 | return offset_filename 116 | 117 | 118 | def unroot(filename): 119 | os.chmod(filename, 0o644) 120 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/relative_accuracy_metrics.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from scipy.signal import convolve2d 4 | from scipy.spatial import cKDTree 5 | from scipy.stats import skew, kurtosis 6 | 7 | def run_relative_accuracy_metrics(refDSM, testDSM, refMask, testMask, ignoreMask, gsd, for_objectwise=False, plot=None): 8 | 9 | PLOTS_ENABLE = True 10 | if plot is None: PLOTS_ENABLE = False 11 | 12 | # valid mask (opposite of ignore mask) 13 | validMask = ~ignoreMask 14 | 15 | # Compute relative vertical accuracy 16 | # Consider only objects selected in both reference and test masks. 17 | 18 | # Calculate Z percentile errors. 19 | # Z68 approximates ZRMSE assuming normal error distribution. 20 | delta = testDSM - refDSM 21 | overlap = refMask & testMask & validMask 22 | signed_z_errors = delta[overlap] 23 | try: 24 | zrmse_explicit = np.sqrt(sum(signed_z_errors ** 2)/len(signed_z_errors)) 25 | except ZeroDivisionError: 26 | print("Error") 27 | zrmse_explicit = np.nan 28 | signed_z_errors = np.array([np.nan]) 29 | if np.unique(overlap).size is 1: 30 | z68 = 100 31 | z50 = 100 32 | z90 = 100 33 | else: 34 | z68 = np.percentile(abs(delta[overlap]), 68) 35 | z50 = np.percentile(abs(delta[overlap]), 50) 36 | z90 = np.percentile(abs(delta[overlap]), 90) 37 | 38 | # Generate relative vertical accuracy plots 39 | PLOTS_ENABLE = False 40 | if PLOTS_ENABLE: 41 | errorMap = np.copy(delta) 42 | errorMap[~overlap] = np.nan 43 | plot.make(errorMap, 'Object Height Error', 581, saveName="relVertAcc_hgtErr", colorbar=True) 44 | plot.make(errorMap, 'Object Height Error (Clipped)', 582, saveName="relVertAcc_hgtErr_clipped", colorbar=True, 45 | vmin=-5, vmax=5) 46 | 47 | # Compute relative horizontal accuracy 48 | # Consider only objects selected in reference mask. 49 | 50 | # Find region edge pixels 51 | if np.histogram(refMask)[0][9] != 1: 52 | kernel = np.ones((3, 3), np.int) 53 | refEdge = convolve2d(refMask.astype(np.int), kernel, mode="same", boundary="symm") 54 | testEdge = convolve2d(testMask.astype(np.int), kernel, mode="same", boundary="symm") 55 | validEdge = convolve2d(validMask.astype(np.int), kernel, mode="same", boundary="symm") 56 | refEdge = (refEdge < 9) & refMask & (validEdge == 9) 57 | testEdge = (testEdge < 9) & testMask & (validEdge == 9) 58 | refPts = refEdge.nonzero() 59 | testPts = testEdge.nonzero() 60 | if (np.unique(testEdge).__len__() == 1 and np.unique(testEdge)[0] == False) or \ 61 | (np.unique(refEdge).__len__() == 1 and np.unique(refEdge)[0] == False): 62 | refPts = np.where(refMask == True) 63 | testPts = np.where(testMask == True) 64 | else: 65 | refPts = np.where(refMask == True) 66 | testPts = np.where(testMask == True) 67 | 68 | # Use KD Tree to find test point nearest each reference point 69 | refPts_transpose = np.transpose(refPts) 70 | testPts_transpose = np.transpose(testPts) 71 | signed_x_errors = [] 72 | signed_y_errors = [] 73 | try: 74 | tree = cKDTree(np.transpose(testPts)) 75 | dist, indexes = tree.query(np.transpose(refPts)) 76 | # Store the x and y distances 77 | for refPt_index, testPt_index in enumerate(indexes): 78 | refPt = refPts_transpose[refPt_index] 79 | testPt = testPts_transpose[testPt_index] 80 | signed_x_errors.append(testPt[1] - refPt[1]) 81 | signed_y_errors.append(testPt[0] - refPt[0]) 82 | signed_x_errors = np.array(signed_x_errors) * gsd 83 | signed_y_errors = np.array(signed_y_errors) * gsd 84 | dist = dist * gsd 85 | # Sanity check 86 | for i, x in enumerate(signed_x_errors): 87 | if np.sqrt(signed_x_errors[i] ** 2 + signed_y_errors[i] ** 2) != dist[i]: 88 | print("Error!") 89 | except ValueError: 90 | dist = np.nan 91 | 92 | # Calculate horizontal percentile errors. 93 | # H63 approximates HRMSE assuming binormal error distribution. 94 | h63 = np.percentile(abs(dist), 63) 95 | h50 = np.percentile(abs(dist), 50) 96 | h90 = np.percentile(abs(dist), 90) 97 | hrmse_explicit = np.sqrt(sum(dist ** 2)/len(dist)) 98 | 99 | # Get statistics for data 100 | signed_x_errors_skew = skew(signed_x_errors) 101 | signed_y_errors_skew = skew(signed_y_errors) 102 | signed_z_errors_skew = skew(signed_z_errors) 103 | 104 | signed_x_errors_kurtosis = kurtosis(signed_x_errors) 105 | signed_y_errors_kurtosis = kurtosis(signed_y_errors) 106 | signed_z_errors_kurtosis = kurtosis(signed_z_errors) 107 | 108 | signed_x_errors_median = float(np.median(signed_x_errors).item()) 109 | signed_y_errors_median = float(np.median(signed_y_errors).item()) 110 | signed_z_errors_median = float(np.median(signed_z_errors).item()) 111 | 112 | signed_x_errors_var = float(np.var(signed_x_errors).item()) 113 | signed_y_errors_var = float(np.var(signed_y_errors).item()) 114 | signed_z_errors_var = float(np.var(signed_z_errors).item()) 115 | 116 | signed_x_errors_mean = float(np.mean(signed_x_errors).item()) 117 | signed_y_errors_mean = float(np.mean(signed_y_errors).item()) 118 | signed_z_errors_mean = float(np.mean(signed_z_errors).item()) 119 | 120 | bin_range_horz = 1.0 # meters 121 | bin_range_vert = 0.5 # meters 122 | number_of_bins_x = int(np.ceil(abs(signed_x_errors.max() - signed_x_errors.min())/bin_range_horz)) 123 | number_of_bins_y = int(np.ceil(abs(signed_y_errors.max() - signed_y_errors.min()) / bin_range_horz)) 124 | try: 125 | number_of_bins_z = int(np.ceil(abs(signed_z_errors.max() - signed_z_errors.min())/bin_range_vert)) 126 | except ValueError: 127 | number_of_bins_z = np.nan 128 | # Generate histogram 129 | if not for_objectwise: 130 | plot.make_distance_histogram(signed_x_errors, fig=593, plot_title='Signed X Errors', bin_width=bin_range_horz, bins=number_of_bins_x) 131 | plot.make_distance_histogram(signed_y_errors, fig=594, plot_title='Signed Y Errors', bin_width=bin_range_horz, bins=number_of_bins_y) 132 | try: 133 | plot.make_distance_histogram(signed_z_errors, fig=595, plot_title='Signed Z Errors', bin_width=bin_range_vert, bins=number_of_bins_z) 134 | except Exception: 135 | print("Couldn't make z error plot. Something went wrong...") 136 | 137 | # Generate relative horizontal accuracy plots 138 | PLOTS_ENABLE = False # Turn off this feature unless otherwise because it takes a lot of time 139 | if PLOTS_ENABLE: 140 | plot.make(refEdge, 'Reference Model Perimeters', 591, 141 | saveName="relHorzAcc_edgeMapRef", cmap='Greys') 142 | plot.make(testEdge, 'Test Model Perimeters', 592, 143 | saveName="relHorzAcc_edgeMapTest", cmap='Greys') 144 | 145 | plt = plot.make(None,'Relative Horizontal Accuracy') 146 | plt.imshow(refMask & validMask, cmap='Greys') 147 | plt.plot(refPts[1], refPts[0], 'r,') 148 | plt.plot(testPts[1], testPts[0], 'b,') 149 | try: 150 | plt.plot((refPts[1], testPts[1][indexes]), (refPts[0], testPts[0][indexes]), 'y', linewidth=0.05) 151 | plot.save("relHorzAcc_nearestPoints") 152 | except NameError: 153 | # Not possible to calculate HRMSE because lack of performer polygon, namely indexes variable 154 | pass 155 | 156 | metrics = { 157 | 'z50': z50, 158 | 'zrmse': z68, 159 | 'z90': z90, 160 | 'h50': h50, 161 | 'hrmse': h63, 162 | 'h90': h90, 163 | 'zrmse_explicit': zrmse_explicit, 164 | 'hrmse_explicit': hrmse_explicit, 165 | 'signed_x_errors_kurtosis': signed_x_errors_kurtosis, 166 | 'signed_y_errors_kurtosis': signed_y_errors_kurtosis, 167 | 'signed_z_errors_kurtosis': signed_z_errors_kurtosis, 168 | 'signed_x_errors_skew': signed_x_errors_skew, 169 | 'signed_y_errors_skew': signed_y_errors_skew, 170 | 'signed_z_errors_skew': signed_z_errors_skew, 171 | 'signed_x_errors_median': signed_x_errors_median, 172 | 'signed_y_errors_median': signed_y_errors_median, 173 | 'signed_z_errors_median': signed_z_errors_median, 174 | 'signed_x_errors_var': signed_x_errors_var, 175 | 'signed_y_errors_var': signed_y_errors_var, 176 | 'signed_z_errors_var': signed_z_errors_var, 177 | 'signed_x_errors_mean': signed_x_errors_mean, 178 | 'signed_y_errors_mean': signed_y_errors_mean, 179 | 'signed_z_errors_mean': signed_z_errors_mean 180 | } 181 | return metrics 182 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/terrain_accuracy_metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | 5 | from .metrics_util import calcMops 6 | 7 | def run_terrain_accuracy_metrics(refDTM, testDTM, refMask, threshold=1, no_data_value = -10000, plot=None): 8 | 9 | PLOTS_ENABLE = True 10 | if plot is None: PLOTS_ENABLE = False 11 | 12 | # Compute Z error percentiles. 13 | # Ignore objects identified by the reference mask because the 14 | # ground is not expected to be observable under those objects. 15 | delta = testDTM - refDTM 16 | # Account for no data 17 | refMask[refDTM == no_data_value] = 1 18 | # Get delta mask 19 | delta_minus_mask = delta[np.where(refMask == 0)] 20 | z68 = np.percentile(abs(delta_minus_mask),68) 21 | z50 = np.percentile(abs(delta_minus_mask),50) 22 | z90 = np.percentile(abs(delta_minus_mask),90) 23 | 24 | # Compute DTM completeness. 25 | match = abs(testDTM - refDTM) < threshold 26 | completeness = np.sum(match)/np.size(match) 27 | 28 | # This is a hack to avoid water flattening at z = -1 in reference DTM files from 133 US Cities 29 | # from corrupting the results. The water should be properly labeled instead, but this 30 | # is unlikely to cause problems for our test areas. Just keep an eye on it. 31 | distanceFromWater = abs(refDTM + 1.0) 32 | match = match[np.where(distanceFromWater > 0.2)] 33 | completeness_water_removed = np.sum(match)/np.size(match) 34 | 35 | if PLOTS_ENABLE: 36 | errorMap = delta 37 | errorMap[refMask] = np.nan 38 | errorMap[errorMap > 5] = 5 39 | errorMap[errorMap < -5] = -5 40 | plot.make(delta, 'Terrain Model - Height Error', 481, saveName="terrainAcc_HgtErr", colorbar=True) 41 | 42 | metrics = { 43 | 'z50': z50, 44 | 'zrmse': z68, 45 | 'z90': z90, 46 | 'completeness': completeness, 47 | 'completeness_water_removed': completeness_water_removed 48 | } 49 | 50 | return metrics -------------------------------------------------------------------------------- /core3dmetrics/geometrics/threshold_geometry_metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import json 4 | import math 5 | from scipy.ndimage.measurements import label 6 | from scipy.stats import pearsonr 7 | 8 | from .metrics_util import calcMops 9 | from .metrics_util import getUnitArea 10 | 11 | 12 | def run_threshold_geometry_metrics(refDSM, refDTM, refMask, testDSM, testDTM, testMask, 13 | tform, ignoreMask, plot=None, for_objectwise=False, testCONF=None, verbose=True): 14 | # INPUT PARSING========== 15 | 16 | # parse plot input 17 | if plot is None or for_objectwise is True: 18 | PLOTS_ENABLE = False 19 | else: 20 | PLOTS_ENABLE = True 21 | PLOTS_SAVE_PREFIX = "thresholdGeometry_" 22 | 23 | # Determine evaluation units. 24 | unitArea = getUnitArea(tform) 25 | 26 | # 2D footprints for evaluation 27 | ref_footprint = refMask & ~ignoreMask 28 | test_footprint = testMask & ~ignoreMask 29 | 30 | # building height (DSM-DTM, with zero elevation outside footprint) 31 | ref_height = refDSM.astype(np.float64) - refDTM.astype(np.float64) 32 | ref_height[~ref_footprint] = 0 33 | 34 | # refDTM is purposfully used twice for consistency 35 | test_height = testDSM.astype(np.float64) - refDTM.astype(np.float64) 36 | # TestDTM is used here to make the images correct 37 | # test_height_for_image = testDSM.astype(np.float64) - testDTM.astype(np.float64) 38 | test_height[~test_footprint] = 0 39 | #test_height_for_image[~test_footprint] = 0 40 | 41 | # total 2D area (in pixels) 42 | ref_total_area = np.sum(ref_footprint, dtype=np.uint64) 43 | test_total_area = np.sum(test_footprint, dtype=np.uint64) 44 | 45 | # total 3D volume (in meters^3) 46 | ref_total_volume = np.sum(np.absolute(ref_height)) * unitArea 47 | test_total_volume = np.sum(np.absolute(test_height)) * unitArea 48 | 49 | # verbose reporting 50 | if verbose: 51 | print('REF height range [mn,mx] = [{},{}]'.format(np.amin(ref_height),np.amax(ref_height))) 52 | print('TEST height range [mn,mx] = [{},{}]'.format(np.amin(test_height),np.amax(test_height))) 53 | print('REF area (px), volume (m^3) = [{},{}]'.format(ref_total_area,ref_total_volume)) 54 | print('TEST area (px), volume (m^3) = [{},{}]'.format(test_total_area,test_total_volume)) 55 | 56 | # plot 57 | error_height_fn = None 58 | if PLOTS_ENABLE: 59 | print('Input plots...') 60 | 61 | plot.make(ref_footprint, 'Reference Object Regions', 211, saveName=PLOTS_SAVE_PREFIX+"refObjMask") 62 | plot.make(ref_height, 'Reference Object Height', 212, saveName=PLOTS_SAVE_PREFIX+"refObjHgt", colorbar=True) 63 | 64 | plot.make(test_footprint, 'Test Object Regions', 251, saveName=PLOTS_SAVE_PREFIX+"testObjMask") 65 | plot.make(test_height, 'Test Object Height', 252, saveName=PLOTS_SAVE_PREFIX+"testObjHgt", colorbar=True) 66 | 67 | errorMap = (test_height-ref_height) 68 | errorMap[~ref_footprint & ~test_footprint] = np.nan 69 | plot.make(errorMap, 'Height Error', 291, saveName=PLOTS_SAVE_PREFIX+"errHgt", colorbar=True) 70 | plot.make(errorMap, 'Height Error (clipped)', 292, saveName=PLOTS_SAVE_PREFIX+"errHgtClipped", colorbar=True, 71 | vmin=-5, vmax=5) 72 | error_height_fn = plot.make_error_map(error_map=errorMap, ref=ref_footprint, saveName=PLOTS_SAVE_PREFIX+"errHgtImageOnly", 73 | ignore=ignoreMask) 74 | 75 | # 2D ANALYSIS========== 76 | 77 | # 2D metric arrays 78 | tp_2D_array = test_footprint & ref_footprint 79 | fn_2D_array = ~test_footprint & ref_footprint 80 | fp_2D_array = test_footprint & ~ref_footprint 81 | 82 | # 2D total area (in pixels) 83 | tp_total_area = np.sum(tp_2D_array, dtype=np.uint64) 84 | fn_total_area = np.sum(fn_2D_array, dtype=np.uint64) 85 | fp_total_area = np.sum(fp_2D_array, dtype=np.uint64) 86 | 87 | # error check (exact, as this is an integer comparison) 88 | if (tp_total_area + fn_total_area) != ref_total_area: 89 | raise ValueError('2D TP+FN ({}+{}) does not equal ref area ({})'.format( 90 | tp_total_area, fn_total_area, ref_total_area)) 91 | elif (tp_total_area + fp_total_area) != test_total_area: 92 | raise ValueError('2D TP+FP ({}+{}) does not equal test area ({})'.format( 93 | tp_total_area, fp_total_area, test_total_area)) 94 | 95 | # verbose reporting 96 | if verbose: 97 | print('2D TP+FN ({}+{}) equals ref area ({})'.format( 98 | tp_total_area, fn_total_area, ref_total_area)) 99 | print('2D TP+FP ({}+{}) equals test area ({})'.format( 100 | tp_total_area, fp_total_area, test_total_area)) 101 | 102 | # plot 103 | stoplight_fn = None 104 | if PLOTS_ENABLE: 105 | print('2D analysis plots...') 106 | plot.make(tp_2D_array, 'True Positive Regions', 283, saveName=PLOTS_SAVE_PREFIX+"truePositive") 107 | plot.make(fn_2D_array, 'False Negative Regions', 281, saveName=PLOTS_SAVE_PREFIX+"falseNegative") 108 | plot.make(fp_2D_array, 'False Positive Regions', 282, saveName=PLOTS_SAVE_PREFIX+"falsePositive") 109 | stoplight_fn = plot.make_stoplight_plot(fp_image=fp_2D_array, fn_image=fn_2D_array, ref=ref_footprint, saveName=PLOTS_SAVE_PREFIX+"stoplight") 110 | 111 | layer = np.zeros_like(ref_footprint).astype(np.uint8) + 3 # Initialize as True Negative 112 | layer[tp_2D_array] = 1 # TP 113 | layer[fp_2D_array] = 2 # FP 114 | layer[fn_2D_array] = 4 # FN 115 | cmap = [[1, 1, 1], # TP 116 | [0, 0, 1], # FP 117 | [1, 1, 1], # TN 118 | [1, 0, 0]] # FN 119 | plot.make(layer, 'Object Footprint Errors', 293, saveName=PLOTS_SAVE_PREFIX + "errFootprint", colorbar=False, cmap=cmap) 120 | 121 | # 3D ANALYSIS========== 122 | 123 | # Flip underground reference structures 124 | # flip all heights where ref_height is less than zero, allowing subsequent calculations 125 | # to only consider difference relative to positive/absolute reference structures 126 | tf = ref_height < 0 127 | ref_height[tf] = -ref_height[tf] 128 | test_height[tf] = -test_height[tf] 129 | 130 | # separate test height into above & below ground sets 131 | test_above = np.copy(test_height) 132 | test_above[test_height < 0] = 0 133 | 134 | test_below = np.copy(test_height) 135 | test_below[test_height > 0] = 0 136 | test_below = np.absolute(test_below) 137 | 138 | # 3D metric arrays 139 | tp_3D_array = np.minimum(ref_height, test_above) # ref/test height overlap 140 | fn_3D_array = (ref_height - tp_3D_array) # test too short 141 | fp_3D_array = (test_above - tp_3D_array) + test_below # test too tall OR test below ground 142 | 143 | # 3D metric total volume (in meters^3) 144 | tp_total_volume = np.sum(tp_3D_array)*unitArea 145 | fn_total_volume = np.sum(fn_3D_array)*unitArea 146 | fp_total_volume = np.sum(fp_3D_array)*unitArea 147 | 148 | # error check (floating point comparison via math.isclose) 149 | if not math.isclose((tp_total_volume + fn_total_volume), ref_total_volume): 150 | raise ValueError('3D TP+FN ({}+{}) does not equal ref volume ({})'.format( 151 | tp_total_volume, fn_total_volume, ref_total_volume)) 152 | elif not math.isclose((tp_total_volume + fp_total_volume), test_total_volume): 153 | raise ValueError('3D TP+FP ({}+{}) does not equal test volume ({})'.format( 154 | tp_total_volume, fp_total_volume, test_total_volume)) 155 | 156 | # verbose reporting 157 | if verbose: 158 | print('3D TP+FN ({}+{}) equals ref volume ({})'.format( 159 | tp_total_volume, fn_total_volume, ref_total_volume)) 160 | print('3D TP+FP ({}+{}) equals test volume ({})'.format( 161 | tp_total_volume, fp_total_volume, test_total_volume)) 162 | 163 | # Confidence Metrics 164 | if testCONF is not None: 165 | # check for all common NODATA values 166 | def nodata_to_nan(img): 167 | nodata = -9999 168 | img[img == nodata] = np.nan 169 | nodata = -10000 170 | img[img == nodata] = np.nan 171 | return (img) 172 | 173 | # compute differences 174 | testDSM_filt = nodata_to_nan(testDSM.copy()) 175 | refDSM_filt = nodata_to_nan(refDSM.copy()) 176 | testCONF_filt = nodata_to_nan(testCONF.copy()) 177 | valid_mask = np.logical_not(np.logical_or(np.logical_or(np.isnan(refDSM_filt), np.isnan(testDSM_filt)), np.isnan(testCONF_filt))) 178 | building_mask = np.logical_or(test_footprint, ref_footprint) 179 | w = np.logical_and(valid_mask, building_mask) 180 | 181 | # since not running registration for now, remove z offset 182 | dz = np.nanmedian(refDSM_filt - testDSM_filt) 183 | tgt_dsm = testDSM_filt + dz 184 | dz = np.nanmedian(refDSM_filt - tgt_dsm) 185 | print('dz after align = ', dz) 186 | 187 | abs_delta = np.abs(testDSM_filt - refDSM_filt) 188 | abs_delta_minus_mask = abs_delta[w] 189 | confidence_minus_mask = testCONF_filt[w] 190 | # compute pearson coefficient 191 | p = pearsonr(abs_delta_minus_mask, -confidence_minus_mask) 192 | print(p) 193 | else: 194 | p = [np.nan, np.nan] 195 | 196 | # CLEANUP========== 197 | 198 | # final metrics 199 | metrics = { 200 | '2D': calcMops(tp_total_area, fn_total_area, fp_total_area), 201 | '3D': calcMops(tp_total_volume, fn_total_volume, fp_total_volume), 202 | 'area': {'reference_area': np.int(ref_total_area), 'test_area': np.int(test_total_area)}, 203 | 'volume': {'reference_volume': np.float(ref_total_volume), 'test_volume': np.float(test_total_volume)}, 204 | 'pearson': {'pearson-r': np.float(p[0]), 'pearson-pvalue': np.float(p[1])} 205 | } 206 | 207 | # verbose reporting 208 | if verbose: 209 | print('METRICS REPORT:') 210 | print(json.dumps(metrics, indent=2)) 211 | 212 | # return metric dictionary 213 | return metrics, unitArea, stoplight_fn, error_height_fn 214 | -------------------------------------------------------------------------------- /core3dmetrics/geometrics/threshold_material_metrics.py: -------------------------------------------------------------------------------- 1 | # 2 | # Run CORE3D threshold material labeling metrics and report results. 3 | # This is called by run_core3d_metrics.py 4 | # 5 | 6 | import numpy as np 7 | from collections import defaultdict 8 | 9 | # Define structure information data structure. 10 | class Structure: 11 | def __init__(self): 12 | self.pixels = [] # list of pixel coordinate (x,y) tuples 13 | self.truthPrimaryMaterial = 0 # index of truth primary structure material 14 | self.testPrimaryMaterial = 0 # index of test primary structure material 15 | 16 | 17 | # Return dictionary of structures identified by their indices. 18 | def getStructures(img): 19 | structuresDic = defaultdict(Structure) 20 | for y in range(len(img)): 21 | for x in range(len(img[y])): 22 | val = img[y][x] 23 | if val > 0: 24 | structuresDic[val].pixels.append((x, y)) # add pixel to list for this structure index 25 | return structuresDic 26 | 27 | 28 | # Determine the most abundant material index within a structure footprint 29 | # Returns -1 if no valid material present 30 | def getMaterialFromStructurePixels(img, pixels, materialIndicesToIgnore): 31 | # Count pixels of each material 32 | indexCounts = defaultdict(int) 33 | for p in range(len(pixels)): 34 | indexCounts[img[pixels[p][1]][pixels[p][0]]] += 1 35 | # Find most abundant material 36 | maxMaterialCount = -1 37 | maxMaterialCountIndex = -1 38 | for k in indexCounts.keys(): 39 | if indexCounts[k] > maxMaterialCount and k not in materialIndicesToIgnore: 40 | maxMaterialCount = indexCounts[k] 41 | maxMaterialCountIndex = k 42 | return maxMaterialCountIndex 43 | 44 | # Moves values from Asph/Con Uncertain class to appropriate asphalt/concrete classes 45 | def mergeConfusionMatrixUncertainAsphaltConcreteCells(confMatrix): 46 | sz = confMatrix.shape[0] 47 | confMatrix[1][1] += confMatrix[14][1] 48 | confMatrix[14][1] = 0 49 | confMatrix[2][2] += confMatrix[14][2] 50 | confMatrix[14][2] = 0 51 | 52 | 53 | def material_plot(refMTL, testMTL, plot): 54 | 55 | PLOTS_SAVE_PREFIX = "thresholdMaterials_" 56 | 57 | # This plot assumes material labels/indices specified in the config file are the same as defined here 58 | cmap = [ 59 | [0.00, 0.00, 0.00], 60 | [0.55, 0.55, 0.55], 61 | [0.20, 0.55, 0.65], 62 | [1.00, 1.00, 0.11], 63 | [0.03, 0.40, 0.03], 64 | [0.47, 0.63, 0.27], 65 | [0.86, 0.30, 0.10], 66 | [0.90, 0.00, 0.00], 67 | [0.31, 0.16, 0.04], 68 | [0.12, 1.00, 1.78], 69 | [0.00, 0.00, 1.00], 70 | [1.00, 1.00, 1.00], 71 | [1.00, 0.00, 1.00], 72 | [1.00, 0.39, 1.00], 73 | [1.00, 0.66, 1.00] 74 | ] 75 | 76 | labels = [ 77 | "Unclassified", 78 | "Asphalt", 79 | "Concrete/Stone", 80 | "Glass", 81 | "Tree", 82 | "Non-tree veg", 83 | "Metal", 84 | "Ceramic", 85 | "Soil", 86 | "Solar panel", 87 | "Water", 88 | "Polymer", 89 | "Unscored", 90 | "Indeterminate", 91 | "Indeterminate Asphalt/Concrete" 92 | ] 93 | 94 | ticks = list(np.arange(0,len(labels))) 95 | 96 | 97 | plot.make(refMTL, 'Reference Materials ', 340, saveName=PLOTS_SAVE_PREFIX + "ref", colorbar=True, 98 | cmap=cmap, cm_labels=labels, cm_ticks=ticks, vmin=-0.5, vmax=len(labels)-0.5) 99 | 100 | plot.make(testMTL, 'Test Materials', 340, saveName=PLOTS_SAVE_PREFIX + "test", colorbar=True, 101 | cmap=cmap, cm_labels=labels, cm_ticks=ticks, vmin=-0.5, vmax=len(labels)-0.5) 102 | 103 | # Run material labeling metrics and report results. 104 | def run_material_metrics(refNDX, refMTL, testMTL, materialNames, materialIndicesToIgnore, plot=None, verbose=True): 105 | print("Defined materials:",', '.join(materialNames)) 106 | print("Ignored materials in truth: ",', '.join([materialNames[x] for x in materialIndicesToIgnore])) 107 | 108 | print("Building dictionary of reference structure locations and labels...") 109 | structuresDic = getStructures(refNDX) 110 | print("There are ", len(structuresDic), "reference structures.") 111 | 112 | print("Selecting the most abundant material for each structure in reference model...") 113 | for k in structuresDic.keys(): 114 | maxIdx = getMaterialFromStructurePixels(refMTL, structuresDic[k].pixels, materialIndicesToIgnore) 115 | structuresDic[k].truthPrimaryMaterial = maxIdx 116 | 117 | print("Selecting the most abundant material for each structure in test model...") 118 | for k in structuresDic.keys(): 119 | maxIdx = getMaterialFromStructurePixels(testMTL, structuresDic[k].pixels, materialIndicesToIgnore) 120 | structuresDic[k].testPrimaryMaterial = maxIdx 121 | 122 | # Create pixel label confusion matrix 123 | np.set_printoptions(linewidth=120) 124 | pixelConfMatrix = np.zeros((len(materialNames), len(materialNames)), dtype=np.int32) 125 | for y in range(len(refMTL)): 126 | for x in range(len(refMTL[y])): 127 | if refNDX[y][x] != 0: # Limit evaluation to inside structure outlines 128 | if refMTL[y][x] not in materialIndicesToIgnore: # Limit evaluation to valid materials 129 | pixelConfMatrix[refMTL[y][x]][testMTL[y][x]] += 1 130 | 131 | # Re-classify indeterminate asphalt-concrete in pixel-wise confusion matrix 132 | mergeConfusionMatrixUncertainAsphaltConcreteCells(pixelConfMatrix) 133 | 134 | # Classes present in the reference model (IOU) 135 | presentRefClasses = pixelConfMatrix.sum(axis=1) > 0 136 | 137 | # Don't include 'Indeterminate asphalt/concrete' in mean IOU, values get resigned to asphalt or concrete 138 | if 'Indeterminate asphalt/concrete' in materialNames: 139 | presentRefClasses[materialNames.index('Indeterminate asphalt/concrete')] = False 140 | 141 | 142 | # Compute pixelwise intersection over union 143 | pixelIOU = np.divide(np.diag(pixelConfMatrix), 144 | (pixelConfMatrix.sum(axis=0) + pixelConfMatrix.sum(axis=1) - np.diag(pixelConfMatrix)), 145 | out=-np.ones( (1, len(pixelConfMatrix[0])), np.double), 146 | where=presentRefClasses!=0) 147 | 148 | # Mean IOU 149 | pixelMeanIOU = np.mean(pixelIOU[0][presentRefClasses]) 150 | 151 | # Dictionary of IOU for each reference matrerial for output 152 | pixelIOUkvp = dict() 153 | for x, y in enumerate(np.flatnonzero(presentRefClasses)): 154 | pixelIOUkvp[materialNames[y].strip()] = pixelIOU[0][y] 155 | 156 | # parse plot input 157 | if plot is not None: 158 | material_plot(refMTL, testMTL, plot) 159 | 160 | # Print pixel statistics 161 | print() 162 | scoredPixelsCount = np.sum(pixelConfMatrix) 163 | correctPixelsCount = np.trace(pixelConfMatrix) 164 | correctPixelsFraction = correctPixelsCount / scoredPixelsCount 165 | print("Pixel material confusion matrix:") 166 | print(pixelConfMatrix) 167 | print("Pixel material IOU:") 168 | print(pixelIOU) 169 | print("Pixel material mIOU:", pixelMeanIOU) 170 | print('Pixelwise IOU by Class:') 171 | for x in pixelIOUkvp: 172 | print('', x, ': ', pixelIOUkvp[x]) 173 | print("Total pixels scored: ", scoredPixelsCount) 174 | print("Total pixels correctly classified: ", correctPixelsCount) 175 | print("Percent pixels correctly classified: ", str(correctPixelsFraction * 100) + "%") 176 | print() 177 | 178 | # Create structure label confusion matrix 179 | unscoredCount = 0 180 | structureConfMatrix = np.zeros((len(materialNames), len(materialNames)), dtype = np.int32) 181 | for k in structuresDic.keys(): 182 | if structuresDic[k].truthPrimaryMaterial not in materialIndicesToIgnore and structuresDic[k].truthPrimaryMaterial != -1: 183 | structureConfMatrix[structuresDic[k].truthPrimaryMaterial][structuresDic[k].testPrimaryMaterial] += 1 184 | else: 185 | unscoredCount += 1 186 | 187 | # Re-classify indeterminate asphalt-concrete in structure confusion matrix 188 | mergeConfusionMatrixUncertainAsphaltConcreteCells(structureConfMatrix) 189 | 190 | # Print structure statistics 191 | scoredStructuresCount = np.sum(structureConfMatrix) 192 | correctStructuresCount = np.trace(structureConfMatrix) 193 | correctStructuresFraction = correctStructuresCount / scoredStructuresCount 194 | print("Primary structure material confusion matrix:") 195 | print(structureConfMatrix) 196 | print("Structures marked as non-scored: ", unscoredCount) 197 | print("Total structures scored: ", scoredStructuresCount) 198 | print("Total structures correctly classified: ", correctStructuresCount) 199 | print("Percent structures correctly classified: ", str(correctStructuresFraction * 100) + "%") 200 | 201 | metrics = { 202 | 'scored_structures': int(scoredStructuresCount), 203 | 'fraction_structures_correct': correctStructuresFraction, 204 | 'fraction_pixels_correct': correctPixelsFraction, 205 | 'structurewise_confusion_matrix': str(structureConfMatrix), 206 | 'pixelwise_mIOU': pixelMeanIOU, 207 | 'pixelwise_IOU': pixelIOUkvp, 208 | 'pixelwise_confusion_matrix': str(pixelConfMatrix) 209 | } 210 | 211 | return metrics 212 | 213 | 214 | -------------------------------------------------------------------------------- /core3dmetrics/instancemetrics/Building_Classes.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Building: 5 | 6 | def __init__(self, label): 7 | """ 8 | Constructor for building object 9 | :param label: unique building index 10 | """ 11 | self.label = label 12 | self.match = False 13 | self.matched_with = None 14 | self.fp_overlap_with = [] 15 | self.is_uncertain = False 16 | self.is_ignored = False 17 | self.has_error = False 18 | self.iou_score = None 19 | self.min_x = None 20 | self.min_y = None 21 | self.max_x = None 22 | self.max_y = None 23 | self.points = None 24 | self.on_boundary = False 25 | self.area = None 26 | self.perimeter = None 27 | 28 | def calculate_area(self): 29 | """ 30 | Calculates the pixel area of the building object by counting the number of points and stores it as part of the 31 | building object as well as returning the value 32 | :return: 33 | """ 34 | self.area = len(self.points) 35 | return self.area 36 | 37 | def create_individual_building_raster(self): 38 | """ 39 | Creates a minimized raster of a building object to calculate perimeter 40 | :return: raster of the building object 41 | """ 42 | canvas = np.zeros((self.max_y - self.min_y + 1, 43 | self.max_x - self.min_x + 1)) 44 | for point in self.points: 45 | canvas[point[1] - self.min_y, point[0] - self.min_x] = 1 46 | return canvas 47 | 48 | 49 | def create_raster_from_building_objects(building_list, x_res, y_res): 50 | """ 51 | Creates a raster from a list of building objects. 52 | :param building_list: List of building objects 53 | :param x_res: X resolution of raster 54 | :param y_res: Y resolution of raster 55 | :return: raster of buildings 56 | """ 57 | canvas = np.zeros((x_res, y_res)) 58 | canvas = np.uint16(canvas) 59 | for current_building in building_list.items(): 60 | for current_point in current_building[1].points: 61 | canvas[current_point[0], current_point[1]] = current_building[1].label 62 | return canvas 63 | -------------------------------------------------------------------------------- /core3dmetrics/instancemetrics/GeospatialConversions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from osgeo import gdal 3 | 4 | 5 | _print_once = {} 6 | 7 | 8 | # Helper function to keep logs minimal 9 | def print_once(string="", dump_override=False): 10 | if dump_override: 11 | for key, value in _print_once.items(): 12 | if value > 1: 13 | print("{}: {} times".format(key, value)) 14 | else: 15 | print("{}".format(key)) 16 | else: 17 | if string in _print_once: 18 | _print_once[string] += 1 19 | else: 20 | _print_once[string] = 1 21 | 22 | 23 | # example GDAL error handler function 24 | def gdal_error_handler(err_class, err_num, err_msg): 25 | errtype = { 26 | gdal.CE_None: 'None', 27 | gdal.CE_Debug: 'Debug', 28 | gdal.CE_Warning: 'Warning', 29 | gdal.CE_Failure: 'Failure', 30 | gdal.CE_Fatal: 'Fatal' 31 | } 32 | err_msg = err_msg.replace('\n', ' ') 33 | err_class = errtype.get(err_class, 'None') 34 | print('Error Number: %s' % err_num) 35 | print('Error Type: %s' % err_class) 36 | print('Error Message: %s' % err_msg) 37 | 38 | 39 | def ll2utm(lat, lon, datum='wgs84'): 40 | """ 41 | :param lat: latitude in degrees (float 64) 42 | :param lon: longitude in degrees (float64) 43 | :return: utm-x coordinate, utm-y coordinate, zone number (positive for North, negative for South) 44 | """ 45 | lat = float(lat) 46 | lon = float(lon) 47 | datums = { 48 | 'wgs84': [6378137.0, 298.257223563], 49 | 'nad83': [6378137.0, 298.257222101], 50 | 'grs80': [6378137.0, 298.257222101], 51 | 'nad27': [6378206.4, 294.978698214], 52 | 'int24': [6378388.0, 297.000000000], 53 | 'clk66': [6378206.4, 294.978698214] 54 | } 55 | 56 | # Constants 57 | D0 = 180/np.pi # conversion rad to deg 58 | K0 = 0.9996 # UTM scale factor 59 | X0 = 500000 # UTM false EAST (m) 60 | 61 | # defaults 62 | zone = None 63 | 64 | if datum in datums.keys(): 65 | A1 = datums['wgs84'][0] 66 | F1 = datums['wgs84'][1] 67 | 68 | p1 = lat/D0 69 | l1 = lon/D0 70 | 71 | # UTM zone automatic setting 72 | if zone is None: 73 | F0 = np.round((l1*D0 + 183)/6) 74 | else: 75 | F0 = zone 76 | 77 | B1 = A1*(1-(1/F1)) 78 | E1 = np.sqrt((A1 * A1 - B1 * B1)/(A1 * A1)) 79 | P0 = 0/D0 80 | L0 = (6*F0 - 183)/D0 # UTM origin longitude (rad) 81 | Y0 = 10000000*(p1 < 0) # UTM false northern (m) 82 | N = K0*A1 83 | 84 | C = calculate_projection_coefficients(E1, 0) 85 | B = C[0]*P0 + C[1]*np.sin(2*P0) + C[2]*np.sin(4*P0) + C[3]*np.sin(6*P0) + C[4]*np.sin(8*P0) 86 | 87 | YS = Y0 - N * B 88 | C = calculate_projection_coefficients(E1, 2) 89 | L = np.log(np.tan(np.pi / 4 + p1 / 2) * (((1 - E1 * np.sin(p1)) / (1 + E1 * np.sin(p1))) ** (E1 / 2))) 90 | z = (np.arctan(np.sinh(L) / np.cos(l1 - L0))) + (1j * np.log(np.tan(np.pi / 4 + np.arcsin(np.sin(l1 - L0) / np.cosh(L)) / 2))) # complex number 91 | Z = N * C[0] * z + N * (C[1] * np.sin(2 * z) + C[2] * np.sin(4 * z) + C[3] * np.sin(6 * z) + C[4] * np.sin(8 * z)) 92 | xs = np.imag(Z) + X0 93 | ys = np.real(Z) + YS 94 | 95 | f = F0 * np.sign(lat) 96 | fu = np.unique(f) 97 | if np.size(fu) == 1 and np.isscalar(fu[0]): 98 | f = fu[0] 99 | x = xs[0] 100 | y = ys[0] 101 | 102 | return x, y, f 103 | 104 | 105 | def calculate_projection_coefficients(e, m): 106 | """ 107 | COEF Projection coefficients 108 | calculate_projection_coefficients(e,m) returns a vector of 5 coefficients 109 | :param e: first ellipsoid excentricity 110 | :param m: m=0 for tranverse mercator, m=1 for transverse mercator reverse, m=2 for merdian arc 111 | :return: c = numpy array of length 5, projection coefficients 112 | """ 113 | if m == 0: 114 | c0 = np.array([[-175 / 16384, 0, -5 / 256, 0, -3 / 64, 0, -1 / 4, 0, 1], 115 | [-105 / 4096, 0, -45 / 1024, 0, -3 / 32, 0, -3 / 8, 0, 0], 116 | [525 / 16384, 0, 45 / 1024, 0, 15 / 256, 0, 0, 0, 0], 117 | [-175 / 12288, 0, -35 / 3072, 0, 0, 0, 0, 0, 0], 118 | [315 / 131072, 0, 0, 0, 0, 0, 0, 0, 0]]) 119 | elif m == 1: 120 | c0 = np.array([[-175 / 16384, 0, -5 / 256, 0, -3 / 64, 0, -1 / 4, 0, 1], 121 | [1 / 61440, 0, 7 / 2048, 0, 1 / 48, 0, 1 / 8, 0, 0], 122 | [559 / 368640, 0, 3 / 1280, 0, 1 / 768, 0, 0, 0, 0], 123 | [283 / 430080, 0, 17 / 30720, 0, 0, 0, 0, 0, 0], 124 | [4397 / 41287680, 0, 0, 0, 0, 0, 0, 0, 0]]) 125 | elif m == 2: 126 | c0 = np.array([[-175 / 16384, 0, -5 / 256, 0, -3 / 64, 0, -1 / 4, 0, 1], 127 | [-901 / 184320, 0, -9 / 1024, 0, -1 / 96, 0, 1 / 8, 0, 0], 128 | [-311 / 737280, 0, 17 / 5120, 0, 13 / 768, 0, 0, 0, 0], 129 | [899 / 430080, 0, 61 / 15360, 0, 0, 0, 0, 0, 0], 130 | [49561 / 41287680, 0, 0, 0, 0, 0, 0, 0, 0]]) 131 | else: 132 | print("Error generating coefficients...") 133 | 134 | c = np.zeros([np.shape(c0)[0], 1]) 135 | for i in range(0, np.shape(c0)[0]): 136 | c[i] = np.polyval(c0[i, :], e) 137 | 138 | return c 139 | 140 | 141 | if __name__ == "__main__": 142 | print('Debug') 143 | 144 | -------------------------------------------------------------------------------- /core3dmetrics/instancemetrics/MetricsCalculator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class MetricsCalculator: 5 | 6 | def __init__(self): 7 | self.iou_threshold = 0.5 8 | 9 | @staticmethod 10 | def calculate_iou(ground_truth_building, performer_building): 11 | """ 12 | Calculates the intersection over union (IOU) of a 2 Building objects 13 | :param ground_truth_building: ground truth building object 14 | :param performer_building: performer building object 15 | :return: IOU, intersection, and union 16 | """ 17 | if ground_truth_building.min_x > performer_building.max_x or \ 18 | ground_truth_building.min_y > performer_building.max_y or \ 19 | ground_truth_building.max_x < performer_building.min_x or \ 20 | ground_truth_building.max_y < performer_building.min_y: 21 | return 0, 0, 0 22 | if len(ground_truth_building.points) == 0 or len(performer_building.points) == 0: 23 | return 0, 0, 0 24 | ground_truth_points_set = set([tuple(x) for x in ground_truth_building.points]) 25 | performer_points_set = set([tuple(x) for x in performer_building.points]) 26 | common = ground_truth_points_set.intersection(performer_points_set) 27 | intersection = len(common) 28 | union = len(performer_building.points) + len(ground_truth_building.points) - intersection 29 | return intersection/union, intersection, union 30 | 31 | @staticmethod 32 | def calculate_perimeter(polygon_raster, pixel_size=1): 33 | """ 34 | Calculates perimeter of a solid raster polygon using improved 35 | algorithm found here: http://www.geocomputation.org/1999/076/gc_076.htm 36 | :param polygon_raster: binary raster of shape to be calculated 37 | :param pixel_size: size of pixel, default 1. This number is multiplied to the pixel perimeter 38 | :return: perimeter of shape 39 | """ 40 | key_set = list(range(0, 256)) 41 | value_set = [4, 4, 3, np.sqrt(2), 4, 4, np.sqrt(2), 2 * np.sqrt(2), 3, np.sqrt(2), 2, 2, 3, np.sqrt(2), 42 | np.sqrt(2), np.sqrt(2), 3, 3, 2, np.sqrt(2), np.sqrt(2), np.sqrt(2), 2, np.sqrt(2), 2, 2, 43 | 1, 1, 2, 2, 1, 1, 4, 4, 3, np.sqrt(2), 4, 4, 3, 3, np.sqrt(2), 2 * np.sqrt(2), np.sqrt(2), 44 | np.sqrt(2), 45 | 3, 46 | 3, np.sqrt(2), np.sqrt(2), 3, 3, 2, 2, 3, 3, 2, 2, 2, 2, 1, np.sqrt(2), 2, 2, 1, 1, 47 | 3, 3, 2, 2, 3, 3, 2, 2, 2, np.sqrt(2), 1, 1, 2, 2, 1, np.sqrt(2), 2, 2, 1, 1, np.sqrt(2), 2, 1, 48 | np.sqrt(2), 1, 1, 0, 0, 1, 2, 0, 0, np.sqrt(2), np.sqrt(2), 2, 2, 3, 3, 2, 2, 2, np.sqrt(2), 1, 1, 49 | 2, 2, 1, 1, np.sqrt(2), 2, 1, 2, np.sqrt(2), np.sqrt(2), 1, 1, 1, np.sqrt(2), 0, 0, 1, 1, 0, 0, 50 | 4, 4, 3, 3, 4, 4, np.sqrt(2), 3, 3, 3, 2, 2, 3, 3, 2, 2, np.sqrt(2), 3, np.sqrt(2), np.sqrt(2), 51 | 2 * np.sqrt(2), 3, np.sqrt(2), np.sqrt(2), 2, 2, 1, 1, 2, 2, np.sqrt(2), 1, 4, 4, 3, 3, 4, 4, 3, 3, 52 | np.sqrt(2), 3, 2, 2, 3, 3, np.sqrt(2), np.sqrt(2), np.sqrt(2), 3, 2, np.sqrt(2), 3, 3, 2, 53 | np.sqrt(2), 54 | 2, 2, 2, 1, 2, np.sqrt(2), 1, 1, np.sqrt(2), 3, 2, 2, np.sqrt(2), 3, 2, 2, np.sqrt(2), np.sqrt(2), 55 | 1, 56 | 1, 2, np.sqrt(2), 2, 1, 2, 2, 1, 1, np.sqrt(2), 2, 1, 1, 1, 1, 0, 0, np.sqrt(2), 1, 0, 0, 57 | 2 * np.sqrt(2), 3, 2, 2, 3, 3, 2, np.sqrt(2), np.sqrt(2), np.sqrt(2), np.sqrt(2), 1, 2, np.sqrt(2), 58 | 1, 1, np.sqrt(2), 2, np.sqrt(2), 1, np.sqrt(2), np.sqrt(2), 1, 1, 1, 1, 0, 0, 1, 1, 0, 0] 59 | # Define lookup table 60 | pattern_table = dict(zip(key_set, value_set)) 61 | # Define pixel algorithm mask 62 | perimeter_code_window = np.array([[1, 2, 4], [8, 0, 16], [32, 64, 128]]) 63 | 64 | # Pad the raster with single row of 0s 65 | polygon_raster = np.pad(polygon_raster, 1, 'constant', constant_values=0) 66 | 67 | # Find all indices of pixel values 68 | row, col = np.nonzero(polygon_raster) 69 | perimeter_contribution = np.zeros(row.__len__()) 70 | for i in range(0, row.__len__()): 71 | boundary_window = polygon_raster[row[i]-1:row[i]+2, col[i]-1:col[i]+2] 72 | # Apply algorithm mask 73 | applied_mask = np.multiply(boundary_window, perimeter_code_window) 74 | perimeter_code = np.sum(applied_mask) 75 | perimeter_contribution[i] = pattern_table[perimeter_code] 76 | pixel_perimeter = np.sum(perimeter_contribution)*pixel_size 77 | return pixel_perimeter 78 | 79 | @staticmethod 80 | def create_individual_building_raster(building_object): 81 | """ 82 | Creates a minimized raster of a building object to calculate perimeter 83 | :param building_object: building object to be rastrized 84 | :return: raster of the building object 85 | """ 86 | canvas = np.zeros((building_object.max_y - building_object.min_y + 1, 87 | building_object.max_x - building_object.min_x + 1)) 88 | for point in building_object.points: 89 | canvas[point[1] - building_object.min_y, point[0] - building_object.min_x] = 1 90 | return canvas 91 | 92 | @staticmethod 93 | def calculate_area(building, pixel_size=1): 94 | """ 95 | Calculates the area of a building object in the corresponding pixel_size measurement 96 | :param building: building object to be evaluated 97 | :param pixel_size: pixel size, default set to 1 98 | :return: Area of the building. Without providing a pixel size, it will just return the area in pixels 99 | """ 100 | return len(building.points) * (pixel_size**2) 101 | 102 | @staticmethod 103 | def calculate_perimeter_ratio(gt_perimeter, perf_perimeter): 104 | """ 105 | Calculates the perimeter ratio between two perimeters 106 | :param gt_perimeter: perimeter of ground truth building 107 | :param perf_perimeter: perimeter of performer building 108 | :return: perimeter ratio 109 | """ 110 | return min(gt_perimeter, perf_perimeter) / max(gt_perimeter, perf_perimeter) 111 | 112 | @staticmethod 113 | def calculate_area_ratio(gt_area, perf_area): 114 | """ 115 | Calculates the area ratio metric 116 | :param gt_area: area of ground truth building 117 | :param perf_area: area of performer building 118 | :return: area ratio 119 | """ 120 | return min(gt_area, perf_area) / max(gt_area, perf_area) 121 | 122 | @staticmethod 123 | def calculate_perimeter_diff(gt_perim, perf_perim): 124 | """ 125 | Calculates the perimeter difference metric 126 | :param gt_perim: perimeter of ground truth building 127 | :param perf_perim: perimeter of performer building 128 | :return: perimeter difference 129 | """ 130 | return abs(gt_perim - perf_perim) / gt_perim 131 | 132 | @staticmethod 133 | def calculate_area_diff(gt_area, perf_area): 134 | """ 135 | Calculates the area difference metric 136 | :param gt_area: area of ground truth building 137 | :param perf_area: area of performer building 138 | :return: area difference metric 139 | """ 140 | return abs(gt_area - perf_area) / gt_area 141 | 142 | @staticmethod 143 | def calculate_area_from_raster(polygon_raster, pixel_size=1): 144 | """ 145 | Calculates area of a polygon from raster input 146 | :param polygon_raster: binary raster of shape to be calculated 147 | :param pixel_size: size of pixel, default 1. This number is multiplied to the pixel area 148 | :return: area of polygon 149 | """ 150 | return np.sum(polygon_raster)*(pixel_size**2) 151 | -------------------------------------------------------------------------------- /core3dmetrics/instancemetrics/MetricsContainer.py: -------------------------------------------------------------------------------- 1 | import cv2 as cv 2 | 3 | 4 | class MetricsContainer: 5 | 6 | def __init__(self): 7 | """ 8 | Constructor for metrics container 9 | """ 10 | self.name = None 11 | self.TP = None 12 | self.FP = None 13 | self.FN = None 14 | self.ignored_ground_truth_ids = [] 15 | self.ignored_performer_ids = [] 16 | self.precision = None 17 | self.recall = None 18 | self.f1_score = None 19 | self.matched_gt_ids = [] 20 | self.unmatched_gt_ids = [] 21 | self.matched_performer_ids = [] 22 | self.unmatched_performer_ids = [] 23 | self.average_area_difference = None 24 | self.average_area_ratio = None 25 | self.stoplight_chart = None 26 | self.iou_per_gt_building = {} 27 | 28 | def show_stoplight_chart(self): 29 | """ 30 | Brings up a opencv window that displays the stoplight chart and waits for keypress to close 31 | :return: 32 | """ 33 | if self.stoplight_chart is None: 34 | print("Stoplight chart does not exist. Please generate the stoplight chart first!") 35 | return 36 | else: 37 | cv.namedWindow('stoplight', cv.WINDOW_NORMAL) 38 | cv.imshow('stoplight', self.stoplight_chart) 39 | cv.waitKey(0) 40 | cv.destroyAllWindows() 41 | 42 | def set_values(self, tp, fp, fn, ignored_ground_truth_ids, ignored_performer_ids, precision, recall, f1_score, 43 | matched_gt_ids, unmatched_gt_ids, matched_performer_ids, unmatched_performer_ids, 44 | average_area_difference, average_area_ratio, stoplight_chart, iou_per_gt_building): 45 | """ 46 | Setter method that simply let you set object variables in one function instead of individually 47 | :param tp: True positive value 48 | :param fp: False positive value 49 | :param fn: False negative value 50 | :param ignored_ground_truth_ids: list of ignored ground truth IDs 51 | :param ignored_performer_ids: list of ignored performer IDs 52 | :param precision: TP / (TP + FP) 53 | :param recall: TP / (TP + FN) 54 | :param f1_score: 2 * precision * recall / (precision + recall) 55 | :param matched_gt_ids: list of matched ground truth IDs 56 | :param unmatched_gt_ids: list of unmatched ground truth IDs, or list of false negative IDs 57 | :param matched_performer_ids: list of matched performer IDs 58 | :param unmatched_performer_ids: list of unmatched performer IDs, or list of false positives 59 | :param average_area_difference: mean of all matched area differences 60 | :param average_area_ratio: mean of all matched area ratios 61 | :param stoplight_chart: numpy array of stoplight chart raster 62 | :param iou_per_gt_building: dictionary of iou and centroid in image per gt building 63 | :return: 64 | """ 65 | self.TP = tp 66 | self.FP = fp 67 | self.FN = fn 68 | self.ignored_ground_truth_ids = ignored_ground_truth_ids 69 | self.ignored_performer_ids = ignored_performer_ids 70 | self.precision = precision 71 | self.recall = recall 72 | self.f1_score = f1_score 73 | self.matched_gt_ids = matched_gt_ids 74 | self.unmatched_gt_ids = unmatched_gt_ids 75 | self.matched_performer_ids = matched_performer_ids 76 | self.unmatched_performer_ids = unmatched_performer_ids 77 | self.average_area_difference = average_area_difference 78 | self.average_area_ratio = average_area_ratio 79 | self.stoplight_chart = stoplight_chart 80 | self.iou_per_gt_building = iou_per_gt_building 81 | 82 | def show_metrics(self, suppress_lists=True): 83 | """ 84 | Prints out metrics stored in object 85 | :return: 86 | """ 87 | print("Printing Results...") 88 | print("TP:" + repr(self.TP)) 89 | print("FP: " + repr(self.FP)) 90 | print("FN: " + repr(self.FN)) 91 | print("Number of ignored GT: " + repr(len(self.ignored_ground_truth_ids))) 92 | print("Ignored Perf: " + repr(len(self.ignored_performer_ids))) 93 | print("Precision: " + repr(self.precision)) 94 | print("Recall: " + repr(self.recall)) 95 | print("F1-Score: " + repr(self.f1_score)) 96 | if suppress_lists is False: 97 | print("Ignored GT Indices: " + repr(self.ignored_ground_truth_ids)) 98 | print("Unmatched GT Indices: " + repr(self.unmatched_gt_ids)) 99 | print("Ignored Perf Indices: " + repr(self.ignored_performer_ids)) 100 | print("Matched Perf Indices: " + repr(self.matched_performer_ids)) 101 | print("Unmatched Perf Indices: " + repr(self.unmatched_performer_ids)) 102 | print("Average Area Difference: " + repr(self.average_area_difference)) 103 | print("Average Area Ratio: " + repr(self.average_area_ratio)) -------------------------------------------------------------------------------- /core3dmetrics/instancemetrics/TileEvaluator.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | import cv2 as cv 4 | from random import randint 5 | import warnings 6 | import functools 7 | from .Building_Classes import Building 8 | from .MetricsCalculator import MetricsCalculator 9 | 10 | 11 | def deprecated(func): 12 | """This is a decorator which can be used to mark functions 13 | as deprecated. It will result in a warning being emitted 14 | when the function is used.""" 15 | 16 | @functools.wraps(func) 17 | def new_func(*args, **kwargs): 18 | warnings.simplefilter('always', DeprecationWarning) # turn off filter 19 | warnings.warn("Call to deprecated function {}.".format(func.__name__), 20 | category=DeprecationWarning, 21 | stacklevel=2) 22 | warnings.simplefilter('default', DeprecationWarning) # reset filter 23 | return func(*args, **kwargs) 24 | 25 | return new_func 26 | 27 | 28 | class TileEvaluator: 29 | """ 30 | Used to store and compute metric scores 31 | """ 32 | def __init__(self): 33 | self.Image_path = None 34 | 35 | @staticmethod 36 | def read_image(image_path): 37 | """ 38 | read in image from path 39 | """ 40 | im = Image.open(image_path, 'r') 41 | return np.array(im) 42 | 43 | @staticmethod 44 | def im_show(image_path): 45 | """ 46 | reads in image, thresholds it, and displays 47 | """ 48 | img = cv.imread(image_path, cv.IMREAD_ANYDEPTH) 49 | cv.namedWindow('image', cv.WINDOW_NORMAL) 50 | ret, threshed = cv.threshold(img, 0, 2 ** 16, cv.THRESH_BINARY) 51 | print(ret) 52 | print(threshed.shape, threshed.dtype) 53 | cv.imshow('image', threshed) 54 | cv.waitKey(0) 55 | cv.destroyAllWindows() 56 | 57 | @staticmethod 58 | def tabulate(image_path): 59 | """ 60 | prints building instance, building size 61 | """ 62 | img = cv.imread(image_path, cv.IMREAD_ANYDEPTH) 63 | unique, counts = np.unique(img, return_counts=True) 64 | dummy = [print(a[0], a[1]) for a in zip(unique, counts)] 65 | 66 | @staticmethod 67 | def get_instance_contours(img, contoured, instance): 68 | """ 69 | get the contour for a specific building instance and draw it 70 | """ 71 | mask = np.zeros(img.shape, dtype=np.uint16) 72 | mask[img == instance] = 1 73 | ret, threshed = cv.threshold(mask, 0, 2 ** 16, cv.THRESH_BINARY) 74 | compressed = threshed.astype(np.uint8) 75 | contours, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) 76 | cv.drawContours(contoured, contours, -1, (randint(25, 255), randint(25, 255), randint(25, 255)), 3) 77 | img2 = contours = hierarchy = mask = None 78 | 79 | @staticmethod 80 | def get_instance_bounding_box(img, bounding_boxes, instance): 81 | """ 82 | get the bounding box for a specific building instance and draw it 83 | """ 84 | mask = np.zeros(img.shape, dtype=np.uint16) 85 | mask[img == instance] = 1 86 | ret, threshed = cv.threshold(mask, 0, 2 ** 16, cv.THRESH_BINARY) 87 | compressed = threshed.astype(np.uint8) 88 | contours, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) 89 | x, y, w, h = cv.boundingRect(contours[0]) 90 | cv.rectangle(bounding_boxes, (x, y), (x + w, y + h), (randint(25, 255), randint(25, 255), randint(25, 255)), 3) 91 | img2 = contours = hierarchy = mask = None 92 | 93 | def draw_contours(self, image_path): 94 | """ 95 | for each building instance in the image, draw its contour in a different color 96 | """ 97 | img = cv.imread(image_path, cv.IMREAD_ANYDEPTH) 98 | contoured = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) 99 | unique, counts = np.unique(img, return_counts=True) 100 | for uni in unique: 101 | if uni == 0: 102 | continue 103 | self.get_instance_contours(img, contoured, uni) 104 | 105 | cv.namedWindow('building contours', cv.WINDOW_NORMAL) 106 | cv.imshow('building contours', contoured) 107 | cv.waitKey(0) 108 | cv.destroyAllWindows() 109 | 110 | def draw_bounding_boxes(self, image_path): 111 | """ 112 | for each building instance in the image, draw its bounding box in a different color 113 | """ 114 | img = cv.imread(image_path, cv.IMREAD_ANYDEPTH) 115 | bboxes = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) 116 | unique, counts = np.unique(img, return_counts=True) 117 | for uni in unique: 118 | if uni == 0: 119 | continue 120 | self.get_instance_bounding_box(img, bboxes, uni) 121 | 122 | cv.namedWindow('building bounding boxes', cv.WINDOW_NORMAL) 123 | cv.imshow('building bounding boxes', bboxes) 124 | cv.waitKey(0) 125 | cv.destroyAllWindows() 126 | 127 | @deprecated 128 | def merge_ambiguous_buildings(self, raster, kernel_size=4): 129 | """ 130 | DEPRECATED - Use merge_false_positives in run_metrics.py. 131 | Naive approach to merging ambiguous buildings. Dilation+erode to remove spaces between buildings 132 | :param raster: raster (nd.array) 133 | :param kernel_size: merge kernel size (int) 134 | :return: 135 | """ 136 | # Merge perf buildings that are close together using dilate and erode 137 | kernel = np.ones((kernel_size, kernel_size), np.uint8) 138 | dilation = cv.dilate(raster, kernel, iterations=1) 139 | erosion = cv.erode(dilation, kernel, iterations=1) 140 | # new building regions have multiple values within 141 | # turn into binary raster to get new building contours 142 | erosion[erosion > 0] = 1 143 | ret, threshed = cv.threshold(erosion, 0, 2 ** 16, cv.THRESH_BINARY) 144 | compressed = threshed.astype(np.uint8) 145 | contours, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) 146 | merged_erosion = np.zeros(erosion.shape, dtype=np.uint16) 147 | # refill contours with a single value 148 | for idx, c in enumerate(contours): 149 | cv.fillPoly(merged_erosion, [c], color=idx) 150 | return merged_erosion 151 | 152 | @staticmethod 153 | def get_num_instances(im, non_building_labels): 154 | """ 155 | get list of unique building instances 156 | non_building_labels is an array of numbers that do not represent building instances (i.e. non-building or void) 157 | """ 158 | return np.setdiff1d(im, non_building_labels) 159 | 160 | @staticmethod 161 | def get_current_building_mask(im, instance): 162 | """ 163 | return binary mask with just the current building instance 164 | return its area (number of pixels) 165 | """ 166 | current_building_mask = np.zeros(im.shape, dtype=np.uint16) 167 | current_building_mask[im == instance] = 1 168 | current_building_area = np.sum(current_building_mask) 169 | return current_building_mask, current_building_area 170 | 171 | def filter_instances_by_size(self, im, unique_instances, min_building_size): 172 | """ 173 | filters all instances in a single image by size 174 | returns a list of instances to keep evaluating and a list of instances to ignore 175 | """ 176 | # create array to store building instances to ignore 177 | ignored_instances = np.array([]) 178 | # if min_building_size is negative, error 179 | if min_building_size < 0: 180 | raise ValueError("Building size filter cannot be a negative number") 181 | # return list of instances to check and list of instances to ignore 182 | # if min_building_size is 0, return original array of instances, ignored_instances is empty 183 | if min_building_size == 0: 184 | return unique_instances, ignored_instances 185 | else: 186 | for i in range(len(unique_instances)): 187 | _, current_building_size = self.get_current_building_mask(im, unique_instances[i]) 188 | if current_building_size < min_building_size: 189 | ignored_instances = np.append(ignored_instances, i) 190 | return np.setdiff1d(unique_instances, ignored_instances), ignored_instances 191 | 192 | def filter_edge_instances(self, im, current_instance, min_building_size, unique_instances, ignored_instances): 193 | """ 194 | check if current building is on edge and also too small 195 | if so, ignore this instance 196 | updates the keep/ignore lists 197 | """ 198 | max_x = im.shape[0] - 1 199 | max_y = im.shape[1] - 1 200 | current_building_mask, current_building_size = self.get_current_building_mask(im, current_instance) 201 | # get indices of nonzero elements in current_building_mask 202 | row, col = np.nonzero(current_building_mask) 203 | if np.any(row == max_x) or np.any(col == max_y): 204 | if current_building_size < min_building_size: 205 | # revise lists if found a new instance to ignore 206 | ignored_instances = np.append(ignored_instances, current_instance) 207 | unique_instances = np.setdiff1d(unique_instances, current_instance) 208 | return unique_instances, ignored_instances 209 | 210 | @staticmethod 211 | def get_building_contour(current_building_mask): 212 | """ 213 | from current_building_mask, return its contour 214 | """ 215 | ret, threshed = cv.threshold(current_building_mask, 0, 2 ** 16, cv.THRESH_BINARY) 216 | compressed = threshed.astype(np.uint8) 217 | current_building_contour, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) 218 | return current_building_contour, hierarchy 219 | 220 | @staticmethod 221 | def get_bounding_box(current_building_contour): 222 | """ 223 | from current_building_contour, get its rectangular, non-rotated bounding box coordinates 224 | """ 225 | x, y, w, h, = cv.boundingRect(current_building_contour[0]) 226 | return x, y, w, h 227 | 228 | @staticmethod 229 | def crop_bounding_box(im, x, y, w, h): 230 | """ 231 | crop image using pixel coordinates 232 | will be used to get corresponding instances in same pixel locations of two images that we compare 233 | """ 234 | return im[y:y+h, x:x+w] 235 | 236 | @staticmethod 237 | def compute_pixel_iou(perf_building_mask, gt_building_mask): 238 | """ 239 | computes IoU between performer and ground truth building masks 240 | """ 241 | if perf_building_mask.shape != gt_building_mask.shape: 242 | raise ValueError("Dimension mismatch") 243 | intersection = np.sum(perf_building_mask & gt_building_mask) 244 | union = np.sum(perf_building_mask | gt_building_mask) 245 | iou = intersection / union 246 | return iou 247 | 248 | @staticmethod 249 | def generate_stoplight_chart(gt, perf, tp_indices, fn_indices, fp_indices, ignore_gt_indices, 250 | ignore_perf_indices, uncertain_mask=None): 251 | if gt.shape != perf.shape: 252 | raise ValueError("Dimension mismatch") 253 | stoplight_chart = np.multiply(np.ones((gt.shape[0], gt.shape[1], 3), dtype=np.uint8), 220) 254 | green = [0, 255, 0] 255 | red = [0, 0, 255] 256 | blue = [255, 0, 0] 257 | yellow = [0, 255, 255] 258 | white = [255, 255, 255] 259 | black = [0, 0, 0] 260 | # true positives 261 | for i in tp_indices: 262 | stoplight_chart[perf == i] = white 263 | # false negatives 264 | for i in fn_indices: 265 | stoplight_chart[gt == i] = blue 266 | # false positives 267 | for i in fp_indices: 268 | stoplight_chart[perf == i] = red 269 | # ignored instances 270 | for i in ignore_gt_indices: 271 | stoplight_chart[gt == i] = yellow 272 | for i in ignore_perf_indices: 273 | stoplight_chart[perf == i] = yellow 274 | # uncertain instances 275 | if uncertain_mask is not None: 276 | stoplight_chart[uncertain_mask == 1] = white 277 | # get contours of ground truth buildings 278 | gt_binary = gt.copy() 279 | gt_binary[gt != 0] = 1 280 | gt_binary = gt_binary.astype(np.uint8) 281 | ret, threshed = cv.threshold(gt_binary, 0, 255, cv.THRESH_BINARY) 282 | compressed = threshed.astype(np.uint8) 283 | contours, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) 284 | 285 | cv.drawContours(stoplight_chart, contours, -1, black, 2) 286 | return stoplight_chart 287 | 288 | def draw_iou_on_stoplight(self, stoplight_chart, iou, position): 289 | """ 290 | :param stoplight_chart: stoplight chart 291 | :param iou: list of IoUs 292 | :param position: list of positions of building centers to draw IoUs on 293 | :return: stoplight chart with IoUs on each building instance 294 | """ 295 | iou = np.around(iou, decimals= 3) 296 | stoplight_with_iou = stoplight_chart.copy() 297 | for current_iou, current_position in list(zip(iou, position)): 298 | cv.putText(stoplight_with_iou, str(current_iou), 299 | (current_position[0].astype('int'), current_position[1].astype('int')), 300 | cv.FONT_HERSHEY_PLAIN, 1, [0, 0, 0]) 301 | stoplight_with_iou_rgb = self.bgr_to_rgb(stoplight_with_iou) 302 | return stoplight_with_iou, stoplight_with_iou_rgb 303 | 304 | @staticmethod 305 | def bgr_to_rgb(bgr_image): 306 | rgb_image = bgr_image[..., ::-1] 307 | return rgb_image 308 | 309 | 310 | def merge_buildings(edge_x, edge_y, gt_buildings, performer_buildings): 311 | """ 312 | Merges the overlapping buildings from 2 sets of building objects that have already been run through metrics and 313 | outputs a new list of merged buildings. This makes evaluation more fair if performer buildings had many 314 | closely spaced buildings that all individually failed to meet the IOU threshold 315 | :param edge_x: maximum horizontal resolution 316 | :param edge_y: maximum vertical resolution 317 | :param gt_buildings: List of ground truth building objects that have already been run through metrics 318 | :param performer_buildings: List of performer building objects that have already been run against the ground truth 319 | buildings provided 320 | :return: list of merged performer buildings 321 | """ 322 | metrics_calc = MetricsCalculator() 323 | # Merge Performer Buildings 324 | merged_performer_buildings = {} 325 | used_indices = [] 326 | for _, current_perf_building in performer_buildings.items(): 327 | if current_perf_building.label in used_indices: 328 | continue 329 | current_merged_building = Building(current_perf_building.label) 330 | if current_perf_building.fp_overlap_with.__len__() == 0: 331 | merged_performer_buildings[current_merged_building.label] = current_perf_building 332 | else: 333 | if current_perf_building.fp_overlap_with.__len__() > 1: 334 | print('More than one fp overlap, defaulting to first one...') 335 | fp_gt_building = gt_buildings[current_perf_building.fp_overlap_with[0]] 336 | for i in fp_gt_building.fp_overlap_with: 337 | used_indices.append(i) 338 | if current_merged_building.points is None: 339 | current_merged_building.points = performer_buildings[i].points 340 | else: 341 | current_merged_building.points = np.concatenate((current_merged_building.points, 342 | performer_buildings[i].points), axis=0) 343 | # Calculate merged building statistics 344 | current_merged_building.min_x = min(current_merged_building.points[:, 0]) 345 | current_merged_building.min_y = min(current_merged_building.points[:, 1]) 346 | current_merged_building.max_x = max(current_merged_building.points[:, 0]) 347 | current_merged_building.max_y = max(current_merged_building.points[:, 1]) 348 | # Calculate Perimeter 349 | # TODO: Figure out what to do about perimeter 350 | current_merged_building.perimeter = 1 351 | # Calculate Area 352 | current_merged_building.area = metrics_calc.calculate_area(current_merged_building) 353 | # Create dictionary entry based on building index 354 | merged_performer_buildings[current_merged_building.label] = current_merged_building 355 | # Check if building is on the edge of AOI 356 | if current_merged_building.min_x == 0 or current_merged_building.min_y == 0 or \ 357 | current_merged_building.max_x == edge_x or current_merged_building.max_y == edge_y: 358 | current_merged_building.on_boundary = True 359 | return merged_performer_buildings 360 | 361 | 362 | def main(): 363 | print("Debug") 364 | 365 | 366 | if __name__ == "__main__": 367 | main() 368 | -------------------------------------------------------------------------------- /core3dmetrics/instancemetrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pubgeo/core3d-metrics/fcda0d56869f8b2b3c506a3b9601b2c6ab491617/core3dmetrics/instancemetrics/__init__.py -------------------------------------------------------------------------------- /core3dmetrics/instancemetrics/instance_metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from core3dmetrics.instancemetrics.TileEvaluator import TileEvaluator, merge_buildings 3 | import time as time 4 | from core3dmetrics.instancemetrics.Building_Classes import Building, create_raster_from_building_objects 5 | from core3dmetrics.instancemetrics.MetricsCalculator import MetricsCalculator as MetricsCalc 6 | from core3dmetrics.instancemetrics.MetricsContainer import MetricsContainer 7 | 8 | 9 | def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='#'): 10 | """ 11 | Call in a loop to create terminal progress bar 12 | :param iteration: current iteration (Int) 13 | :param total: total iterations (Int) 14 | :param prefix: prefix string (Str) 15 | :param suffix: suffix string (Str) 16 | :param decimals: positive number of decimals in percent complete (Int) 17 | :param length: character length of bar (Int) 18 | :param fill: bar fill character (Str) 19 | :return: 20 | """ 21 | try: 22 | percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) 23 | filled_length = int(length * iteration // total) 24 | bar = fill * filled_length + '-' * (length - filled_length) 25 | print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r') 26 | # Print New Line on Complete 27 | if iteration == total: 28 | print() 29 | except(ZeroDivisionError): 30 | print("Dividing by zero, check loop parameters...") 31 | 32 | 33 | def calculate_metrics_iterator(gt_buildings, gt_indx_raster, ignored_gt, perf_indx_raster, performer_buildings, 34 | iou_threshold, metrics_container): 35 | 36 | """ 37 | Runs metrics on list of ground truth and performer buildings 38 | :param gt_buildings: list of ground truth building objects 39 | :param gt_indx_raster: numpy array of the ground truth indexed raster. Used for stoplight chart 40 | :param ignored_gt: list of ignored ground truth indices 41 | :param perf_indx_raster: numpy array of the performer indexed raster. Used for stoplight chart 42 | :param performer_buildings: list of performer building objects 43 | :param metrics_container: metrics container object to store the metrics 44 | :param iou_threshold: IOU threshold for passing grade 45 | :return: metrics container containing all the metrics 46 | """ 47 | # Iterate through performer buildings and calculate iou 48 | iou_per_gt_building = {} 49 | matched_performer_indices = [] 50 | ignored_performer = [] 51 | fp_indices = [] 52 | all_perimeter_diff = [] 53 | all_perimeter_ratio = [] 54 | all_area_diff = [] 55 | all_area_ratio = [] 56 | ignore_threshold = 0.5 # parser arg 57 | print("Iterating through performer buildings") 58 | TP = 0 59 | FP = 0 60 | print("Total performer buildings:" + repr(performer_buildings.__len__())) 61 | print("Total ground truth buildings:" + repr(gt_buildings.__len__())) 62 | iterations = 0 63 | total_iterations = len(performer_buildings.items()) 64 | print_progress_bar(0, total_iterations, prefix='Progress:', suffix='Complete', length=50) 65 | for _, current_perf_building in performer_buildings.items(): 66 | iterations = iterations+1 67 | print_progress_bar(iterations, total_iterations, prefix='Progress:', suffix='Complete', length=50) 68 | if current_perf_building.is_ignored is True: 69 | continue 70 | for _, current_gt_building in gt_buildings.items(): 71 | # Calculate IOU 72 | iou, intersection, union = MetricsCalc.calculate_iou(current_gt_building, current_perf_building) 73 | # Check if Perf overlaps too much with ignored GT 74 | if current_gt_building.is_ignored is True: 75 | if intersection > ignore_threshold * current_perf_building.area: 76 | current_perf_building.is_ignored = True 77 | current_perf_building.match = True 78 | ignored_performer.append(current_perf_building.label) 79 | break 80 | 81 | if iou >= iou_threshold: 82 | # Record IOU of matched GT building 83 | iou_per_gt_building[current_gt_building.label] = [iou, [np.average(current_gt_building.points[:, 0]), 84 | np.average(current_gt_building.points[:, 1])]] 85 | # Do not let multiple matches with one GT building 86 | if current_gt_building.match is True: 87 | break 88 | TP = TP + 1 89 | matched_performer_indices.append(current_perf_building.label) 90 | current_gt_building.match = True 91 | current_gt_building.matched_with = current_perf_building.label 92 | current_perf_building.match = True 93 | current_perf_building.matched_with = current_gt_building.label 94 | all_perimeter_diff.append(MetricsCalc.calculate_perimeter_diff( 95 | current_gt_building.perimeter, current_perf_building.perimeter)) 96 | all_perimeter_ratio.append(MetricsCalc.calculate_perimeter_ratio( 97 | current_gt_building.perimeter, current_perf_building.perimeter)) 98 | all_area_diff.append(MetricsCalc.calculate_area_diff( 99 | current_gt_building.area, current_perf_building.area)) 100 | all_area_ratio.append(MetricsCalc.calculate_area_ratio( 101 | current_gt_building.area, current_perf_building.area)) 102 | break 103 | elif 0 < iou < iou_threshold: 104 | current_gt_building.fp_overlap_with.append(current_perf_building.label) 105 | current_perf_building.fp_overlap_with.append(current_gt_building.label) 106 | 107 | # If after going through all GT buildings and no match, its a FP 108 | if current_perf_building.match is False: 109 | fp_indices.append(current_perf_building.label) 110 | FP = FP + 1 111 | 112 | # all remaining unmatched GT are false negatives 113 | fn_indices = [idx for idx in gt_buildings if gt_buildings[idx].match is False] 114 | FN = len(fn_indices) 115 | precision = TP / (TP + FP) 116 | if TP + FN == 0: 117 | recall = 0 118 | else: 119 | recall = TP / (TP + FN) 120 | if precision+recall == 0: 121 | f1_score = 0 122 | else: 123 | f1_score = 2 * precision * recall / (precision + recall) 124 | print("Generating Stoplight Chart...") 125 | stoplight_generator = TileEvaluator() 126 | stoplight = stoplight_generator.generate_stoplight_chart(gt_indx_raster, perf_indx_raster, 127 | matched_performer_indices, fn_indices, fp_indices, 128 | ignored_gt, ignored_performer) 129 | metrics_container.set_values(TP, FP, FN, ignored_gt, ignored_performer, precision, recall, f1_score, None, 130 | fn_indices, matched_performer_indices, fp_indices, np.mean(all_area_diff), 131 | np.mean(all_area_ratio), stoplight, iou_per_gt_building) 132 | return metrics_container 133 | 134 | 135 | def eval_instance_metrics(gt_indx_raster, params, perf_indx_raster): 136 | start_time = time.time() 137 | edge_x, edge_y = np.shape(gt_indx_raster) 138 | # Get unique indices 139 | print("Getting unique indices in ground truth and performer files...") 140 | unique_performer_ids = np.unique(perf_indx_raster) 141 | unique_gt_ids = np.unique(gt_indx_raster) 142 | 143 | # GCreate ground truth building objects 144 | print("Creating ground truth building objects...") 145 | gt_buildings = {} 146 | ignored_gt = [] 147 | iterations = 0 148 | total_iterations = len(unique_gt_ids) 149 | print_progress_bar(0, total_iterations, prefix='Progress:', suffix='Complete', length=50) 150 | for current_index in unique_gt_ids: 151 | iterations = iterations + 1 152 | print_progress_bar(iterations, total_iterations, prefix='Progress:', suffix='Complete', length=50) 153 | # print(str(iterations) + " out of " + str(total_iterations)) 154 | if current_index == 0: 155 | continue 156 | current_building = Building(current_index) 157 | # Get x,y points of building pixels 158 | x_points, y_points = np.where(gt_indx_raster == current_index) 159 | if len(x_points) == 0 or len(y_points) == 0: 160 | continue 161 | # Get minimum and maximum points 162 | current_building.min_x = x_points.min() 163 | current_building.min_y = y_points.min() 164 | current_building.max_x = x_points.max() 165 | current_building.max_y = y_points.max() 166 | current_building.points = np.array(list(zip(x_points, y_points))) 167 | building_raster = MetricsCalc.create_individual_building_raster(current_building) 168 | # Calculate Perimeter 169 | current_building.perimeter = MetricsCalc.calculate_perimeter(building_raster) 170 | # Calculate Area 171 | current_building.area = MetricsCalc.calculate_area(current_building) 172 | # Create dictionary entry based on building index 173 | gt_buildings[current_index] = current_building 174 | # Check if building is on the edge of AOI 175 | if current_building.min_x == 0 or current_building.min_y == 0 or current_building.max_x == edge_x \ 176 | or current_building.max_y == edge_y: 177 | current_building.on_boundary = True 178 | # TODO: Filter by area, not only minimum size 179 | if current_building.area < params.MIN_AREA_FILTER: 180 | current_building.is_ignored = True 181 | ignored_gt.append(current_building.label) 182 | ignored_gt = list(np.unique(ignored_gt)) 183 | # Create performer building objects 184 | print("Creating performer building objects...") 185 | performer_buildings = {} 186 | iterations = 0 187 | total_iterations = len(unique_performer_ids) 188 | print_progress_bar(0, total_iterations, prefix='Progress:', suffix='Complete', length=50) 189 | for current_index in unique_performer_ids: 190 | iterations = iterations + 1 191 | print_progress_bar(iterations, total_iterations, prefix='Progress:', suffix='Complete', length=50) 192 | # print(str(iterations) + " out of " + str(total_iterations)) 193 | if current_index == 0: 194 | continue 195 | current_building = Building(current_index) 196 | # Get x,y points of building pixels 197 | x_points, y_points = np.where(perf_indx_raster == current_index) 198 | if len(x_points) == 0 or len(y_points) == 0: 199 | continue 200 | # Get minimum and maximum points 201 | current_building.min_x = x_points.min() 202 | current_building.min_y = y_points.min() 203 | current_building.max_x = x_points.max() 204 | current_building.max_y = y_points.max() 205 | current_building.points = np.array(list(zip(x_points, y_points))) 206 | building_raster = MetricsCalc.create_individual_building_raster(current_building) 207 | # Calculate Perimeter 208 | current_building.perimeter = MetricsCalc.calculate_perimeter(building_raster) 209 | # Calculate Area 210 | current_building.area = MetricsCalc.calculate_area(current_building) 211 | # Create dictionary entry based on building index 212 | performer_buildings[current_index] = current_building 213 | # Check if building is on the edge of AOI 214 | if current_building.min_x == 0 or current_building.min_y == 0 or current_building.max_x == edge_x \ 215 | or current_building.max_y == edge_y: 216 | current_building.on_boundary = True 217 | # Create a metrics container 218 | metrics_container_no_merge = MetricsContainer() 219 | metrics_container_no_merge.name = "No Merge" 220 | # Calculate Metrics 221 | print("Calculating metrics without merge...") 222 | metrics_container_no_merge = calculate_metrics_iterator(gt_buildings, gt_indx_raster, ignored_gt, perf_indx_raster, 223 | performer_buildings, params.IOU_THRESHOLD, 224 | metrics_container_no_merge) 225 | metrics_container_no_merge.show_metrics() 226 | 227 | elapsed_time = time.time() - start_time 228 | print("Elapsed time: " + repr(elapsed_time)) 229 | return metrics_container_no_merge 230 | 231 | -------------------------------------------------------------------------------- /core3dmetrics/summarize_metrics.py: -------------------------------------------------------------------------------- 1 | import json 2 | import jsonschema 3 | import numpy as np 4 | import csv 5 | import glob 6 | import os 7 | from pathlib import Path 8 | from MetricContainer import Result 9 | try: 10 | import core3dmetrics.geometrics as geo 11 | except: 12 | import geometrics as geo 13 | 14 | 15 | # BAA Thresholds 16 | class BAAThresholds: 17 | 18 | def __init__(self): 19 | self.geolocation_error = np.array([2, 1.5, 1.5, 1])*3.5 20 | self.spherical_error = np.array([.86, .86, .86, .86]) 21 | self.completeness_2d = np.array([0.8, 0.85, 0.9, 0.95]) 22 | self.correctness_2d = np.array([0.8, 0.85, 0.9, 0.95]) 23 | self.completeness_3d = np.array([0.6, 0.7, 0.8, 0.9]) 24 | self.correctness_3d = np.array([0.6, 0.7, 0.8, 0.9]) 25 | self.material_accuracy = np.array([0.85, 0.90, 0.95, 0.98]) 26 | self.model_build_time = np.array([8, 2, 2, 1]) 27 | self.fscore_2d = (2*self.completeness_2d * self.correctness_2d) / (self.completeness_2d + self.correctness_2d) 28 | self.fscore_3d = (2*self.completeness_3d * self.correctness_3d) / (self.completeness_3d + self.correctness_3d) 29 | self.jaccard_index_2d = np.round(self.fscore_2d / (2-self.fscore_2d), decimals=2) 30 | self.jaccard_index_3d = np.round(self.fscore_3d / (2-self.fscore_3d), decimals=2) 31 | 32 | 33 | def summarize_metrics(root_dir, teams, aois, ref_path=None, test_path=None): 34 | # load results 35 | is_config = True 36 | all_results = {} 37 | all_config = {} 38 | # Parse results 39 | for current_team in teams: 40 | for current_aoi in aois: 41 | metrics_json_filepath = None 42 | current_dir = Path(root_dir, current_team, current_aoi) 43 | for file in glob.glob(os.path.join(current_dir, "*.config_metrics.json")): 44 | results_path = file 45 | metrics_json_filepath = Path(results_path) 46 | # metrics_json_filepath = Path(root_dir, current_team, current_aoi, "%s.config_metrics.json" % current_aoi) 47 | if metrics_json_filepath.is_file(): 48 | with open(str(metrics_json_filepath.absolute())) as json_file: 49 | json_data = json.load(json_file) 50 | # Check offset file 51 | current_dir = Path(root_dir, current_team, current_aoi) 52 | offset_file_path = None 53 | for file in glob.glob(os.path.join(current_dir, "*offsets.txt")): 54 | offset_file_path = file 55 | offset_file_path = Path(offset_file_path) 56 | # offset_file_path = Path(root_dir, current_team, "%s.offset.txt" % current_aoi) 57 | if offset_file_path.is_file(): 58 | with open(str(offset_file_path.absolute())) as offset_file: 59 | if offset_file_path.suffix is ".json": 60 | offset_data = json.load(offset_file) 61 | else: 62 | offset_data = {} 63 | for last_line in offset_file: 64 | try: 65 | offset_data["offset"] = [float(idx) for idx in last_line.split()] 66 | except ValueError: 67 | continue 68 | n = {} 69 | n["threshold_geometry"] = json_data["threshold_geometry"] 70 | n["relative_accuracy"] = json_data["relative_accuracy"] 71 | n["registration_offset"] = offset_data["offset"] 72 | n["geolocation_error"] = np.linalg.norm(n["registration_offset"], 2) 73 | n["terrain_accuracy"] = None 74 | json_data = n 75 | del n, offset_data 76 | 77 | if "terrain_accuracy" in json_data.keys(): 78 | n = {} 79 | n["threshold_geometry"] = {} 80 | n["relative_accuracy"] = {} 81 | n["objectwise"] = {} 82 | classes_skipped = 0 83 | for cls in range(0, json_data["threshold_geometry"].__len__()): 84 | current_class = json_data["threshold_geometry"][cls]['CLSValue'][0] 85 | if np.isnan(json_data["threshold_geometry"][cls]['2D']['fscore']): 86 | classes_skipped = classes_skipped+1 87 | continue 88 | n["threshold_geometry"].update({current_class: json_data["threshold_geometry"][cls]}) 89 | n["relative_accuracy"].update({current_class: json_data["relative_accuracy"][cls-classes_skipped]}) 90 | try: 91 | n["objectwise"].update({current_class: json_data["objectwise"][cls]}) 92 | except KeyError: 93 | print('No objectwise metrics found...') 94 | except IndexError: 95 | print('Classification doesnt exist...') 96 | n["registration_offset"] = json_data["registration_offset"] 97 | n["geolocation_error"] = json_data["geolocation_error"] 98 | n["terrain_accuracy"] = None 99 | try: 100 | n["instance_f1"] = json_data["objectwise"][cls]["instance_f1"] 101 | n["instance_f1_merge_fp"] = json_data["objectwise"][cls]["instance_f1_merge_fp"] 102 | n["instance_f1_merge_fn"] = json_data["objectwise"][cls]["instance_f1_merge_fn"] 103 | except KeyError: 104 | n["instance_f1"] = np.nan 105 | n["instance_f1_merge_fp"] = np.nan 106 | n["instance_f1_merge_fn"] = np.nan 107 | except IndexError: 108 | n["instance_f1"] = np.nan 109 | n["instance_f1_merge_fp"] = np.nan 110 | n["instance_f1_merge_fn"] = np.nan 111 | print('Classification doesnt exist...') 112 | 113 | json_data = n 114 | del n 115 | 116 | container = Result(current_team, current_aoi, json_data) 117 | if current_team not in all_results.keys(): 118 | all_results[current_team] = {} 119 | all_results[current_team].update({current_aoi: container}) 120 | else: 121 | container = Result(current_team, current_aoi, "") 122 | all_results[current_team] = {current_aoi: container} 123 | 124 | # Try to find config file 125 | current_dir = Path(root_dir, current_team, current_aoi) 126 | config_path = None 127 | for file in glob.glob(os.path.join(current_dir,"*.config")): 128 | config_path = file 129 | config_path = Path(config_path) 130 | # config_path = Path(root_dir, current_team, current_aoi, current_aoi + '.config') 131 | if config_path.is_file(): 132 | config = geo.parse_config(str(config_path.absolute()), 133 | refpath=(ref_path or str(config_path.parent)), 134 | testpath=(test_path or str(config_path.parent))) 135 | elif Path(config_path.parent, config_path.stem + ".json").is_file(): 136 | print('Old config file, parsing via json...') 137 | is_config = False 138 | config_path = Path(config_path.parent, config_path.stem + ".json") 139 | with open(str(config_path.absolute())) as config_file_json: 140 | config = json.load(config_file_json) 141 | 142 | # Flatten list in case of json/config discrepencies 143 | if not is_config: 144 | config["INPUT.REF"]["CLSMatchValue"] = [item for sublist in config["INPUT.REF"]["CLSMatchValue"] 145 | for item in sublist] 146 | # Store config for each aoi 147 | if current_team not in all_config.keys(): 148 | all_config[current_team] = {} 149 | if current_aoi not in all_config.keys(): 150 | all_config[current_team][current_aoi] = {} 151 | all_config[current_team][current_aoi] = config 152 | all_config[current_team][current_aoi].update({'path': config_path}) 153 | 154 | # compute averaged metrics and write out objectwise metrics 155 | with open('objectwise_metrics.csv', mode='w') as objectwise_file: 156 | csv_writer = csv.writer(objectwise_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) 157 | csv_writer.writerow(['Building Index', '2D IOU', '3D IOU', 'HRMSE', 'ZRMSE']) 158 | averaged_results = {} 159 | for team in all_results: 160 | sum_2d_completeness = {} 161 | sum_2d_correctness = {} 162 | sum_2d_jaccard_index = {} 163 | sum_2d_fscore = {} 164 | sum_3d_completeness = {} 165 | sum_3d_correctness = {} 166 | sum_3d_jaccard_index = {} 167 | sum_3d_fscore = {} 168 | sum_geolocation_error = 0 169 | sum_hrmse = {} 170 | sum_zrmse = {} 171 | averaged_results[team] = {} 172 | evaluated_classes = [] 173 | for aoi in all_results[team]: 174 | sum_geolocation_error = sum_geolocation_error + all_results[team][aoi].results["geolocation_error"] 175 | for cls in all_results[team][aoi].results["threshold_geometry"]: 176 | evaluated_classes.append(cls) 177 | if cls not in sum_2d_completeness.keys(): 178 | sum_2d_completeness[cls] = 0 179 | sum_2d_correctness[cls] = 0 180 | sum_2d_jaccard_index[cls] = 0 181 | sum_2d_fscore[cls] = 0 182 | sum_3d_completeness[cls] = 0 183 | sum_3d_correctness[cls] = 0 184 | sum_3d_jaccard_index[cls] = 0 185 | sum_3d_fscore[cls] = 0 186 | sum_zrmse[cls] = 0 187 | sum_hrmse[cls] = 0 188 | sum_2d_completeness[cls] = sum_2d_completeness[cls] + all_results[team][aoi].results['threshold_geometry'][cls]['2D']['completeness'] 189 | sum_2d_correctness[cls] = sum_2d_correctness[cls] + all_results[team][aoi].results['threshold_geometry'][cls]['2D']['correctness'] 190 | sum_2d_jaccard_index[cls] = sum_2d_jaccard_index[cls] + all_results[team][aoi].results['threshold_geometry'][cls]['2D']['jaccardIndex'] 191 | sum_2d_fscore[cls] = sum_2d_fscore[cls] + all_results[team][aoi].results['threshold_geometry'][cls]['2D']['fscore'] 192 | sum_3d_completeness[cls] = sum_3d_completeness[cls] + all_results[team][aoi].results['threshold_geometry'][cls]['3D']['completeness'] 193 | sum_3d_correctness[cls] = sum_3d_correctness[cls] + all_results[team][aoi].results['threshold_geometry'][cls]['3D']['correctness'] 194 | sum_3d_jaccard_index[cls] = sum_3d_jaccard_index[cls] + all_results[team][aoi].results['threshold_geometry'][cls]['3D']['jaccardIndex'] 195 | sum_3d_fscore[cls] = sum_3d_fscore[cls] + all_results[team][aoi].results['threshold_geometry'][cls]['3D']['fscore'] 196 | sum_hrmse[cls] = sum_hrmse[cls] + all_results[team][aoi].results['relative_accuracy'][cls]["hrmse"] 197 | sum_zrmse[cls] = sum_zrmse[cls] + all_results[team][aoi].results['relative_accuracy'][cls]["zrmse"] 198 | # Average results for evaluated classes in config file 199 | averaged_results[team]["geolocation_error"] = np.round( 200 | sum_geolocation_error / all_results[team].__len__(), decimals=2) 201 | # TODO: Need to make config specific to each config file, but for now it doesn't matter 202 | for cls in np.unique(evaluated_classes): 203 | try: 204 | averaged_results[team][cls] = {} 205 | averaged_results[team][cls]["2d_completeness"] = np.round( 206 | sum_2d_completeness[cls] / evaluated_classes.count(cls), decimals=2) 207 | averaged_results[team][cls]["2d_correctness"] = np.round( 208 | sum_2d_correctness[cls] / evaluated_classes.count(cls), decimals=2) 209 | averaged_results[team][cls]["2d_jaccard_index"] = np.round( 210 | sum_2d_jaccard_index[cls] / evaluated_classes.count(cls), decimals=2) 211 | averaged_results[team][cls]["2d_fscore"] = np.round( 212 | sum_2d_fscore[cls] / evaluated_classes.count(cls), decimals=2) 213 | averaged_results[team][cls]["3d_completeness"] = np.round( 214 | sum_3d_completeness[cls] / evaluated_classes.count(cls), decimals=2) 215 | averaged_results[team][cls]["3d_correctness"] = np.round( 216 | sum_3d_correctness[cls] /evaluated_classes.count(cls), decimals=2) 217 | averaged_results[team][cls]["3d_jaccard_index"] = np.round( 218 | sum_3d_jaccard_index[cls] / evaluated_classes.count(cls), decimals=2) 219 | averaged_results[team][cls]["fscore"] = np.round( 220 | sum_3d_fscore[cls] / evaluated_classes.count(cls), decimals=2) 221 | averaged_results[team][cls]["hrmse"] = np.round( 222 | sum_hrmse[cls] / evaluated_classes.count(cls), decimals=2) 223 | averaged_results[team][cls]["zrmse"] = np.round( 224 | sum_zrmse[cls] / evaluated_classes.count(cls), decimals=2) 225 | except KeyError: 226 | print('Class not found, skipping...') 227 | continue 228 | 229 | return averaged_results, all_results, all_config 230 | 231 | 232 | def main(): 233 | print("For Debug") 234 | 235 | 236 | if __name__ == "__main__": 237 | main() 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.3" 2 | 3 | services: 4 | 5 | geometrics: 6 | build: 7 | context: . 8 | dockerfile: Dockerfile 9 | image: jhuapl/geometrics 10 | 11 | geometrics_develop: 12 | extends: 13 | service: geometrics 14 | build: 15 | args: 16 | - DOCKER_DEPLOY=false 17 | image: jhuapl/geometrics-develop 18 | volumes: 19 | - .:/src:ro 20 | working_dir: /src 21 | -------------------------------------------------------------------------------- /entrypoint.bsh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | # install current module in develop mode 6 | if [ "$DOCKER_DEPLOY" = false ] ; then 7 | echo "ENTERING DEVELOP MODE" 8 | echo "Installing local core3dmetrics code via 'pip install -e'" 9 | 10 | # install from linked shadow directory 11 | # this avoids a "*.egg-info" directory being created in the source directory 12 | mkdir -p /src-shadow 13 | ln -s /src/* /src-shadow 14 | pip3 install --no-deps -e /src-shadow 15 | 16 | printf "\n\n" 17 | fi 18 | 19 | # run incoming command 20 | if [ "${@+$1}" == "test" ]; then 21 | python3 -m unittest discover -v 22 | else 23 | exec "${@}" 24 | fi 25 | 26 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Always prefer setuptools over distutils 2 | from setuptools import setup, find_packages 3 | from os import path 4 | 5 | here = path.abspath(path.dirname(__file__)) 6 | 7 | long_description = """JHU/APL is supporting the IARPA CORE3D program by providing independent test and evaluation of the performer team solutions for building 3D models based on satellite images and other sources. This is a repository for the metrics being developed to support the program. Performer teams are working with JHU/APL to improve the metrics software and contribute additional metrics that may be used for the program. """ 8 | 9 | setup( 10 | name='core3dmetrics', 11 | version='0.0.0', 12 | description='JHU/APL Metrics code for IARPA/CORE3D', 13 | long_description=long_description, 14 | url='https://github.com/pubgeo/core3d-metric', 15 | author='JHU/APL', 16 | author_email='john.doe@jhuapl.edu', 17 | packages=find_packages(exclude=['aoi-example']), 18 | include_package_data=True, 19 | install_requires=['gdal', 'laspy', 'matplotlib', 'numpy', 'scipy'], 20 | entry_points = {'console_scripts': ['core3d-metrics=core3dmetrics:main']}, 21 | ## entry_points={ # Optional 22 | ## 'console_scripts': [ 23 | ## 'sample=sample:main', 24 | ## ], 25 | ## }, 26 | ) 27 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pubgeo/core3d-metrics/fcda0d56869f8b2b3c506a3b9601b2c6ab491617/test/__init__.py -------------------------------------------------------------------------------- /test/test_geometry_metrics.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | import core3dmetrics.geometrics as geo 5 | 6 | 7 | class TestGeometryMetrics(unittest.TestCase): 8 | 9 | def setUp(self): 10 | pass 11 | 12 | # common test (unittest does not run, as there is no "test" prefix) 13 | def common_test(self,reffac,testfac,testshft,metrics_expected): 14 | 15 | # base inputs 16 | DSM = np.array([[1,1,0,0],[1,1,0,0],[0,0,0,0],[0,0,0,0]],dtype=np.float32) 17 | sh = DSM.shape 18 | DTM = np.zeros(sh,dtype=np.float32) 19 | 20 | tform = [0,.5,0,0,0,.5] 21 | ignore = np.zeros(sh,dtype=np.bool) 22 | 23 | # ref DSM & footprint 24 | refDSM = reffac*DSM 25 | refMSK = (refDSM!=0) 26 | 27 | # test DSM & footprint 28 | testDSM = testfac*DSM 29 | testDSM = np.roll(testDSM,testshft[0],axis=0) 30 | testDSM = np.roll(testDSM,testshft[1],axis=1) 31 | testMSK = (testDSM!=0) 32 | 33 | # calculate metrics 34 | metrics = geo.run_threshold_geometry_metrics( 35 | refDSM, DTM, refMSK, testDSM, DTM, testMSK, tform, ignore, 36 | plot=None,verbose=False) 37 | 38 | # compare subset of metrics 39 | for section in metrics_expected: 40 | metrics_subset = {k:metrics[section].get(k) for k in metrics_expected[section]} 41 | self.assertDictEqual(metrics_subset, metrics_expected[section], 42 | '{} metrics are not as expected'.format(section)) 43 | 44 | 45 | # ref/test both above ground 46 | def test_both_positive(self): 47 | metrics_expected = { 48 | "2D": {"TP": 4.0, "FN": 0.0, "FP": 0.0}, 49 | "3D": {"TP": 1.0, "FN": 0.0, "FP": 0.0}, 50 | } 51 | self.common_test(1.0,1.0,(0,0),metrics_expected) 52 | 53 | # ref-test both below ground 54 | def test_both_negative(self): 55 | metrics_expected = { 56 | "2D": {"TP": 4.0, "FN": 0.0, "FP": 0.0}, 57 | "3D": {"TP": 1.0, "FN": 0.0, "FP": 0.0}, 58 | } 59 | self.common_test(-1.0,-1.0,(0,0),metrics_expected) 60 | 61 | # testDSM below ground 62 | def test_negative_testDSM(self): 63 | metrics_expected = { 64 | "2D": {"TP": 4.0, "FN": 0.0, "FP": 0.0}, 65 | "3D": {"TP": 0.0, "FN": 1.0, "FP": 1.0}, 66 | } 67 | self.common_test(1.0,-1.0,(0,0),metrics_expected) 68 | 69 | # refDSM below ground 70 | def test_negative_refDSM(self): 71 | metrics_expected = { 72 | "2D": {"TP": 4.0, "FN": 0.0, "FP": 0.0}, 73 | "3D": {"TP": 0.0, "FN": 1.0, "FP": 1.0}, 74 | } 75 | self.common_test(-1.0,1.0,(0,0),metrics_expected) 76 | 77 | # shift testDSM 78 | def test_shift_testDSM(self): 79 | metrics_expected = { 80 | "2D": {"TP": 1.0, "FN": 3.0, "FP": 3.0}, 81 | "3D": {"TP": 0.25, "FN": 0.75, "FP": 0.75}, 82 | } 83 | self.common_test(1.0,1.0,(1,1),metrics_expected) 84 | 85 | 86 | 87 | 88 | if __name__ == '__main__': 89 | unittest.main() 90 | 91 | -------------------------------------------------------------------------------- /utils/OrthoImage.py: -------------------------------------------------------------------------------- 1 | import gdal, ogr, gdalconst, osr 2 | from pathlib import Path 3 | import re 4 | import sys 5 | import numpy as np 6 | 7 | MAX_INT = np.iinfo(int).max 8 | MAX_FLOAT = np.finfo(float).max 9 | 10 | 11 | def gdal_error_handler(err_class, err_num, err_msg): 12 | errtype = { 13 | gdal.CE_None: 'None', 14 | gdal.CE_Debug: 'Debug', 15 | gdal.CE_Warning: 'Warning', 16 | gdal.CE_Failure: 'Failure', 17 | gdal.CE_Fatal: 'Fatal' 18 | } 19 | err_msg = err_msg.replace('\n', ' ') 20 | err_class = errtype.get(err_class, 'None') 21 | print('Error Number: %s' % err_num) 22 | print('Error Type: %s' % err_class) 23 | print('Error Message: %s' % err_msg) 24 | 25 | 26 | def get_max_value_of_datatype(datatype): 27 | if datatype == 'Int8' or datatype == np.int8: 28 | return 127 29 | elif datatype == 'Uint8' or datatype == np.uint8: 30 | return 255 31 | elif datatype == 'UInt16' or datatype == np.ushort: 32 | return 65535 33 | elif datatype == 'Int16' or datatype == np.short: 34 | return 32767 35 | elif datatype == 'Float32' or datatype == np.float32: 36 | return 3.402823466 * 10 ** 38 37 | else: 38 | return None 39 | 40 | 41 | def get_gdal_type_from_type(image_type): 42 | if image_type == np.uint16 or image_type == np.ushort: 43 | return 2 44 | if image_type == np.uint8: 45 | return 1 46 | if image_type == np.float64: 47 | return 7 48 | if image_type == np.float32: 49 | return 6 50 | if image_type == np.uint32: 51 | return 4 52 | 53 | 54 | class OrthoImage: 55 | 56 | # Default Constructor 57 | def __init__(self, TYPE=None, OrthoImage=None, easting=None, northing=None, zone=None, gsd=None): 58 | if OrthoImage is not None: 59 | self.easting = OrthoImage.easting 60 | self.northing = OrthoImage.northing 61 | self.zone = OrthoImage.zone 62 | self.gsd = OrthoImage.gsd 63 | self.TYPE = TYPE 64 | self.easting = easting 65 | self.northing = northing 66 | self.zone = zone 67 | self.gsd = gsd 68 | self.width = None 69 | self.height = None 70 | self.bands = None 71 | self.offset = None 72 | self.scale = None 73 | self.data = None 74 | self.projection = None 75 | 76 | # Read any GDAL-supported image 77 | def read(self, filename: Path): 78 | # Open the image 79 | poDataset = gdal.Open(str(filename.absolute()), gdalconst.GA_ReadOnly) 80 | # Get geospatial metadata 81 | print(f"Driver: {poDataset.GetDriver().GetDescription()} {poDataset.GetDriver().LongName}") 82 | self.width = poDataset.RasterXSize 83 | self.height = poDataset.RasterYSize 84 | self.bands = poDataset.RasterCount 85 | print(f"Width = {self.width}\nHeight = {self.height}\nBands = {self.bands}") 86 | projection = poDataset.GetProjection() 87 | self.projection = projection 88 | print(f"Projection is {projection}") 89 | adfGeoTransform = poDataset.GetGeoTransform() 90 | print(f"GeoTransform = {adfGeoTransform[0]}, {adfGeoTransform[1]}, {adfGeoTransform[2]}, " 91 | f"{adfGeoTransform[3]}, {adfGeoTransform[4]}, {adfGeoTransform[5]}") 92 | xscale = adfGeoTransform[1] 93 | yscale = -adfGeoTransform[5] 94 | self.easting = adfGeoTransform[0] - adfGeoTransform[2] * xscale 95 | self.northing = adfGeoTransform[3] - self.height * yscale 96 | myOGRS = poDataset.GetProjectionRef() 97 | srs = osr.SpatialReference(wkt=myOGRS) 98 | if srs.IsProjected: 99 | projection_string = srs.GetAttrValue('projcs') 100 | # regex to find numbers followed by a single letter in projcs string 101 | pattern = re.compile('\d+[a-zA-Z]{1}') 102 | result = re.search(pattern, projection_string) 103 | result_str = result.group() 104 | zone_number = int(result_str[:-1]) 105 | zone_hemisphere = result_str[-1] 106 | if zone_hemisphere is "S": 107 | self.zone = zone_number * -1 108 | elif zone_hemisphere is "N": 109 | self.zone = zone_number 110 | else: 111 | print("Image is not projected. Returning None...") 112 | 113 | self.gsd = (xscale+yscale) / 2 114 | print(f"UTM Easting = {self.easting}\nUTM Northing = {self.northing}\n" 115 | f"UTM Zone = {self.zone}\nGSD = {self.gsd}") 116 | 117 | # Get band information 118 | poBand = poDataset.GetRasterBand(1) 119 | if poBand is None: 120 | print("Error opening first band...") 121 | BandDataType = gdal.GetDataTypeName(poBand.DataType) 122 | 123 | noData = poBand.GetNoDataValue() 124 | # TODO: Check for floating point precision 125 | if noData is None: 126 | if self.TYPE is float: 127 | # Set noData only for floating point images 128 | noData = float(-10000) 129 | else: 130 | noData = 0 131 | 132 | # Get scale and offset values 133 | if self.TYPE is float: 134 | # Do not scale if floating point values 135 | self.scale = 1 136 | self.offset = 0 137 | else: 138 | adfMinMax = [0, 0] 139 | first_pass = True 140 | minVal = None 141 | maxVal = None 142 | for i in range(0, self.bands): 143 | poBand = poDataset.GetRasterBand(i+1) 144 | if poBand is None: 145 | print(f"Error opening band {i+1}") 146 | 147 | adfMinMax[0] = None #poBand.GetMinimum() 148 | adfMinMax[1] = None #poBand.GetMaximum() 149 | if not adfMinMax[0] or not adfMinMax[1]: 150 | min, max = poBand.ComputeRasterMinMax(True) 151 | adfMinMax[0] = min 152 | adfMinMax[1] = max 153 | if first_pass: 154 | minVal = adfMinMax[0] 155 | maxVal = adfMinMax[1] 156 | first_pass = False 157 | else: 158 | if minVal > adfMinMax[0]: 159 | minVal = float(adfMinMax[0]) 160 | if maxVal < adfMinMax[1]: 161 | maxVal = float(adfMinMax[1]) 162 | # Reserve zero fo noData value 163 | minVal -= 1 164 | maxVal += 1 165 | # TODO: Remove manual BandDataType, acquired above correctly. 166 | # BandDataType = 'UInt16' 167 | # maxImageVal = float(pow(2.0, int((np.iinfo(self.TYPE).max) * 8)) - 1) 168 | maxImageVal = get_max_value_of_datatype(self.TYPE) 169 | self.offset = minVal 170 | self.scale = (maxVal - minVal) / maxImageVal 171 | print(f"Offset = {self.offset}") 172 | print(f"Scale = {self.scale}") 173 | 174 | # Read the image, one band at a time 175 | offset_function = lambda x: (x-self.offset)/self.scale 176 | vfunc = np.vectorize(offset_function) 177 | for ib in range(0, self.bands): 178 | # Read the next row 179 | poBand = poDataset.GetRasterBand(ib+1) 180 | raster = poBand.ReadAsArray() 181 | shifted_array = vfunc(raster) 182 | self.data = shifted_array 183 | return True 184 | 185 | def fillVoidsPyramid(self, noSmoothing: bool, maxLevel=MAX_INT): 186 | # Check for voids 187 | count = np.count_nonzero(self.data == 0) 188 | if count == 0: 189 | return 190 | 191 | # Create image pyramid 192 | pyramid = [] 193 | pyramid.append(self) 194 | level = 0 195 | while count > 0 and level < maxLevel: 196 | # Create next level 197 | nextWidth = int(pyramid[level].width / 2) 198 | nextHeight = int(pyramid[level].height / 2) 199 | 200 | newImagePtr = OrthoImage(self.TYPE) 201 | newImagePtr.data = np.zeros((nextWidth, nextHeight)).astype(self.TYPE) 202 | 203 | # Fill in non-void values from level below building up the pyramid with a simple running average 204 | for i in range(0, nextHeight): 205 | for j in range(0, nextWidth): 206 | j2 = min(max(0, j * 2 + 1), pyramid[level].height - 1) 207 | i2 = min(max(0, i * 2 + 1), pyramid[level].width - 1) 208 | 209 | # Average neighboring pixels from below 210 | z = 0 211 | ct = 0 212 | neighbors = [] 213 | for jj in range(max(0, j2 - 1), min(j2+1, pyramid[level].height - 1)): 214 | for ii in range(max(0, i2 - 1), min(i2 + 1, pyramid[level].width -1)): 215 | if pyramid[level].data[jj][ii] != 0: 216 | z += pyramid[level].data[jj][ii] 217 | ct += 1 218 | if ct != 0: 219 | z = z / ct 220 | newImagePtr.data[j][i] = self.TYPE(z) 221 | pyramid.append(newImagePtr) 222 | level += 1 223 | count = np.count_nonzero(pyramid[level] == 0) 224 | 225 | # Void fill down the pyramid 226 | for k in range(level-1, 0, -1): 227 | ref = OrthoImage(OrthoImage=pyramid[k]) 228 | for j in range(0, pyramid[k].height): 229 | for i in range(0, pyramid[k].width): 230 | if pyramid[k].data[j][i] == 0: 231 | j2 = min(max(0, j / 2), pyramid[k+1].height - 1) 232 | i2 = min(max(0, i / 2), pyramid[k+1].width - 1) 233 | 234 | if noSmoothing: 235 | # Just use the closest pixel from above 236 | pyramid[k].data[j][i] = pyramid[k+1].data[j2][i2] 237 | else: 238 | # Averate neighboring pixels from around and above 239 | wts = 0 240 | ttl = 0 241 | # TODO: wtf... 242 | if j > 0: 243 | for j3 in range(j-1, j+1): 244 | if i > 0: 245 | for i3 in range(i-1, i+1): 246 | z = 0 247 | if j3 >= 0 and i3 >= 0: 248 | if j3 < pyramid[k].height and i3 < pyramid[k].width: 249 | z = ref.data[j3][i3] 250 | if not z and j3/2 < pyramid[k+1].height and i3/2 < pyramid[k+1].width: 251 | z = pyramid[k+1].data[j3/2][i3/2] 252 | if z: 253 | w = 1 + 1 * (i3 == i or j3 == j) 254 | ttl += w*z 255 | wts += w 256 | else: 257 | for i3 in range(0, i+1): 258 | z = 0 259 | if j3 >= 0 and i3 >= 0: 260 | if j3 < pyramid[k].height and i3 < pyramid[k].width: 261 | z = ref.data[j3][i3] 262 | if not z and j3 / 2 < pyramid[k + 1].height and i3 / 2 < pyramid[ 263 | k + 1].width: 264 | z = pyramid[k + 1].data[j3 / 2][i3 / 2] 265 | if z: 266 | w = 1 + 1 * (i3 == i or j3 == j) 267 | ttl += w * z 268 | wts += w 269 | else: 270 | for j3 in range(0, j+1): 271 | if i > 0: 272 | for i3 in range(i-1, i+1): 273 | z = 0 274 | if j3 >= 0 and i3 >= 0: 275 | if j3 < pyramid[k].height and i3 < pyramid[k].width: 276 | z = ref.data[j3][i3] 277 | if not z and j3 / 2 < pyramid[k + 1].height and i3 / 2 < pyramid[ 278 | k + 1].width: 279 | z = pyramid[k + 1].data[j3 / 2][i3 / 2] 280 | if z: 281 | w = 1 + 1 * (i3 == i or j3 == j) 282 | ttl += w * z 283 | wts += w 284 | else: 285 | for i3 in range(0, i+1): 286 | z = 0 287 | if j3 >= 0 and i3 >= 0: 288 | if j3 < pyramid[k].height and i3 < pyramid[k].width: 289 | z = ref.data[j3][i3] 290 | if not z and j3 / 2 < pyramid[k + 1].height and i3 / 2 < pyramid[ 291 | k + 1].width: 292 | z = pyramid[k + 1].data[j3 / 2][i3 / 2] 293 | if z: 294 | w = 1 + 1 * (i3 == i or j3 == j) 295 | ttl += w * z 296 | wts += w 297 | if wts: 298 | pyramid[k].data[j][i] = ttl / wts 299 | 300 | # Deallocate memory for all but the input dsm 301 | for i in range(1, level): 302 | del pyramid[i] 303 | 304 | def edgeFilter(self, dzScaled): 305 | dzScaled = self.TYPE(dzScaled) 306 | # TODO: Apply filter to the image 307 | return 308 | 309 | def write(self, filename: Path, convertToFloat=False, egm96=False): 310 | if convertToFloat: 311 | target_ds = gdal.GetDriverByName('GTiff').Create( 312 | str(filename.absolute()), self.width, self.height, 1, gdal.GDT_Float64) 313 | else: 314 | target_ds = gdal.GetDriverByName('GTiff').Create( 315 | str(filename.absolute()), self.width, self.height, 1, get_gdal_type_from_type(self.TYPE)) 316 | 317 | adfGeoTransform = [self.easting, self.gsd, 0, self.northing + self.height * self.gsd, 0, -1 * self.gsd] 318 | target_ds.SetGeoTransform(adfGeoTransform) 319 | target_ds.SetProjection(self.projection) 320 | target_ds.GetRasterBand(1).WriteArray(self.data) 321 | 322 | band = target_ds.GetRasterBand(1) 323 | no_data_value = 0 324 | band.SetNoDataValue(no_data_value) 325 | band.FlushCache() 326 | target_ds = None 327 | return True 328 | 329 | 330 | def main(): 331 | filename = Path(r"C:\Users\wangss1\Documents\Data\CORE3D_Phase1B_Extension\Testing_Data\GroundTruth\A-1\A-1_DSM.tif") 332 | test = OrthoImage(TYPE=np.ushort) 333 | test.read(filename) 334 | 335 | 336 | if __name__ == "__main__": 337 | main() 338 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pubgeo/core3d-metrics/fcda0d56869f8b2b3c506a3b9601b2c6ab491617/utils/__init__.py -------------------------------------------------------------------------------- /utils/align3d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pathlib import Path 3 | import math 4 | import random 5 | import sys 6 | from tqdm import tqdm 7 | 8 | try: 9 | from OrthoImage import OrthoImage 10 | except: 11 | from utils.OrthoImage import OrthoImage 12 | 13 | 14 | MAX_FLOAT = sys.float_info.max 15 | 16 | 17 | class AlignResult: 18 | def __init__(self): 19 | self.tx = None 20 | self.ty = None 21 | self.tz = None 22 | self.rms = None 23 | 24 | 25 | class AlignParameters: 26 | def __init__(self): 27 | self.gsd = None 28 | self.maxdz = None 29 | self.maxt = None 30 | 31 | 32 | class AlignBounds: 33 | def __init__(self): 34 | self.xmin = None 35 | self.xmax = None 36 | self.ymin = None 37 | self.ymax = None 38 | self.width = None 39 | self.height = None 40 | 41 | 42 | def computeRMS(dx, dy, numSamples, maxSamples, xlist, ylist, referenceDSM: OrthoImage, 43 | targetDSM: OrthoImage, medianDZ, rms, ndx, completeness) -> bool: 44 | count = 0 45 | ndx = 0 46 | differences = [] 47 | while count < numSamples and ndx < maxSamples: 48 | x = xlist[ndx] 49 | y = ylist[ndx] 50 | ndx += 1 51 | 52 | # Map the point into the target 53 | # Skip if this isn't a valid point 54 | col = int((x - targetDSM.easting + 0.5*dx) / targetDSM.gsd) 55 | row = targetDSM.height - 1 - int((y-targetDSM.northing + 0.5*dy) / targetDSM.gsd) 56 | if col < 0: 57 | continue 58 | if row < 0: 59 | continue 60 | if col >= targetDSM.width - 1: 61 | continue 62 | if row >= targetDSM.height - 1: 63 | continue 64 | if targetDSM.data[row][col] == 0: 65 | continue 66 | targetZ = targetDSM.data[row][col] * targetDSM.scale + targetDSM.offset 67 | 68 | # Map the point into the reference 69 | # Skip if this isn't a valid point 70 | col = int((x - referenceDSM.easting + 0.5 * dx) / referenceDSM.gsd) 71 | row = referenceDSM.height - 1 - int((y - referenceDSM.northing + 0.5 * dy) / referenceDSM.gsd) 72 | if col < 0: 73 | continue 74 | if row < 0: 75 | continue 76 | if col >= referenceDSM.width - 1: 77 | continue 78 | if row >= referenceDSM.height - 1: 79 | continue 80 | if referenceDSM.data[row][col] == 0: 81 | continue 82 | referenceZ = referenceDSM.data[row][col] * referenceDSM.scale + referenceDSM.offset 83 | 84 | # Keep going until we have enough points 85 | difference = referenceZ - targetZ 86 | differences.append(difference) 87 | count += 1 88 | 89 | # Skip if not enough sampled points 90 | if count < numSamples: 91 | return False 92 | 93 | # Compute median Z offset and a robust estimate of the RMS difference 94 | rms = 0 95 | differences.sort() 96 | medianDZ = differences[int(count / 2)] # TODO: double check if this could give non integer index 97 | for k in range(0, count): 98 | differences[k] = abs(differences[k] - medianDZ) 99 | 100 | differences.sort() 101 | rms = differences[int(count * 0.67)] 102 | 103 | # Compute the completeness 104 | good = 0 105 | for k in range(0, count): 106 | if differences[k] < 1.0: 107 | good += 1 108 | completeness = good / numSamples 109 | 110 | return True 111 | 112 | 113 | # TODO: Debug and make EstimageRigidbody more efficient 114 | def EstimateRigidBody(referenceDSM: OrthoImage, targetDSM: OrthoImage, maxt, bounds: AlignBounds, result: AlignResult): 115 | step = min(referenceDSM.gsd, targetDSM.gsd) 116 | numSamples = 10000 117 | maxSamples = numSamples * 10 118 | 119 | maxt = step * math.ceil(maxt / step) 120 | bins = int(maxt / step * 2) + 1 121 | 122 | # Initiate rmsArray 123 | rmsArray = np.empty((bins, bins)) 124 | 125 | # Get random samples 126 | xlist = [] 127 | ylist = [] 128 | for i in range(0, maxSamples): 129 | random.seed(0) 130 | xlist.append(random.uniform(bounds.xmin, bounds.xmax)) 131 | random.seed(0) 132 | ylist.append(random.uniform(bounds.ymin, bounds.ymax)) 133 | 134 | # Start with brute force, but sample points to reduce timeline 135 | threshold = MAX_FLOAT 136 | bestDX = 0 137 | bestDY = 0 138 | bestDZ = 0 139 | besti = 0 140 | bestj = 0 141 | bestRMS = MAX_FLOAT 142 | medianDZ = 0 143 | bestRMS = threshold 144 | bestCompleteness = 0 145 | numSampled = 0 146 | for i in tqdm(range(0, bins)): 147 | dx = -maxt + i * step 148 | for j in range(0, bins): 149 | dy = -maxt + j * step 150 | rmsArray[i][j] = 0 151 | rms = 0 152 | completeness = 0 153 | ok =computeRMS(dx, dy, numSamples, maxSamples, xlist, ylist, referenceDSM, 154 | targetDSM, medianDZ, rms, numSampled, completeness) 155 | if not ok: 156 | continue 157 | 158 | rmsArray[i][j] = rms 159 | if rms < bestRMS or (rms == bestRMS and (dx*dx + dy*dy < bestDX*bestDX+bestDY+bestDY)): 160 | bestCompleteness = completeness 161 | 162 | bestRMS = rms 163 | bestDX = dx 164 | bestDY = dy 165 | bestDZ = medianDZ 166 | besti = i 167 | bestj = j 168 | 169 | # Apply quadratic interpolation to localize the peak 170 | if besti > 0 and besti < bins - 1 and bestj > 0 and bestj < bins-1: 171 | dx = (rmsArray[besti + 1][bestj] - rmsArray[besti - 1][bestj]) / 2 172 | dy = (rmsArray[besti][bestj + 1] - rmsArray[besti][bestj - 1]) / 2 173 | dxx = (rmsArray[besti + 1][bestj] + rmsArray[besti - 1][bestj] - 2 * rmsArray[besti][bestj]) 174 | dyy = (rmsArray[besti][bestj + 1] + rmsArray[besti][bestj - 1] - 2 * rmsArray[besti][bestj]) 175 | dxy = (rmsArray[besti - 1][bestj + 1] - rmsArray[besti + 1][bestj - 1] - rmsArray[besti - 1][bestj + 1] + rmsArray[besti - 1][bestj - 1]) / 4 176 | det = dxx * dyy - dxy * dxy 177 | if det != 0: 178 | ix = besti - (dyy * dx - dxy * dy) / det 179 | iy = bestj - (dxx * dy - dxy * dx) / det 180 | bestDX = -maxt + ix * step 181 | bestDY = -maxt + iy * step 182 | 183 | # Deallocate RMS array 184 | del rmsArray 185 | 186 | # Update the result and return 187 | result.rms = bestRMS 188 | result.tx = -bestDX 189 | result.ty = -bestDY 190 | result.tz = bestDZ 191 | 192 | print(f"Percent less than 1m Z difference = {bestCompleteness * 100:6.2f}") 193 | print(f"X offset = {result.tx} m") 194 | print(f"Y offset = {result.ty} m") 195 | print(f"Z offset = {result.tz} m") 196 | print(f"Z RMS = {result.rms} m") 197 | 198 | 199 | def load_file(dsm: OrthoImage, inputFileName: Path, params: AlignParameters): 200 | ext = inputFileName.suffix 201 | if ext == ".tif": 202 | print(f"File Type = {ext}; loading as raster") 203 | if not dsm.read(inputFileName): 204 | print(f"Failed to read {str(inputFileName.absolute())}") 205 | return False 206 | else: 207 | # TODO: Point cloud to DSM, but probably unnecessary 208 | print("Cannot read point cloud...") 209 | return False 210 | # Fill small voids 211 | dsm.fillVoidsPyramid(True, 2) 212 | print("Filtering data...") 213 | # Remove points along edges which are difficult to match 214 | dsm.edgeFilter(int(params.maxdz / dsm.scale)) 215 | return True 216 | 217 | 218 | def AlignTarget2Reference(referenceFilename: Path, targetFilename: Path, params: AlignParameters): 219 | print(f"Reading reference file {str(referenceFilename.absolute())}") 220 | referenceDSM = OrthoImage(TYPE=np.ushort) 221 | if not load_file(referenceDSM, referenceFilename, params): 222 | return False 223 | if params.gsd != referenceDSM.gsd: 224 | print(f"Changing gsd to {referenceDSM.gsd} to match reference DSM") 225 | params.gsd = referenceDSM.gsd 226 | 227 | print(f"Reading target file: {str(targetFilename.absolute())}") 228 | targetDSM = OrthoImage(TYPE=np.ushort) 229 | if not load_file(targetDSM, targetFilename, params): 230 | return False 231 | 232 | # Get overlapping bounds 233 | bounds = AlignBounds() 234 | bounds.xmin = max(referenceDSM.easting, targetDSM.easting) 235 | bounds.ymin = max(referenceDSM.northing, targetDSM.northing) 236 | bounds.xmax = min(referenceDSM.easting + (referenceDSM.width * referenceDSM.gsd), 237 | targetDSM.easting + (targetDSM.width * targetDSM.gsd)) 238 | bounds.ymax = min(referenceDSM.northing + (referenceDSM.height * referenceDSM.gsd), 239 | targetDSM.northing + (targetDSM.height * targetDSM.gsd)) 240 | bounds.width = bounds.xmax - bounds.xmin 241 | bounds.height = bounds.ymax - bounds.ymin 242 | overlap_km = bounds.width / 1000 * bounds.height / 1000 243 | print(f"Overlap = {int(bounds.width)} m x {int(bounds.height)} m = {overlap_km} km") 244 | if overlap_km == 0: 245 | return False 246 | 247 | # Estimate rigid body transform to align target points to reference 248 | result = AlignResult() 249 | print("Estimating rigid body transformation.") 250 | EstimateRigidBody(referenceDSM, targetDSM, params.maxt, bounds, result) 251 | 252 | # Write offsets to text file 253 | print("Writing offsets text file.s") 254 | f1= open(Path(targetFilename.parent, targetFilename.stem + "_offsets.txt"), 'a') 255 | f1.write("X Offset Y Offset Z Offset Z RMS\n") 256 | f1.write("%08.3f %08.3f %08.3f %08.3f\n" % (result.tx, result.ty, result.tz, result.rms)) 257 | f1.close() 258 | 259 | # Write aligned TIF file 260 | print("Writing aligned TIF file.") 261 | outFileName = Path(targetFilename.parent, targetFilename.stem + "_aligned.tif") 262 | targetDSM.offset += result.tz 263 | targetDSM.easting += result.tx 264 | targetDSM.northing += result.ty 265 | targetDSM.write(outFileName, True) 266 | 267 | # Write BPF File 268 | # TODO: Write BPF File 269 | 270 | return True 271 | 272 | 273 | def main(): 274 | referenceDSMfilename = Path(r"C:\Users\wangss1\Documents\Data\CORE3D_Phase1B_Extension\Testing_Data\GroundTruth\A-1\A-1_DSM.tif") 275 | targetDSMfilename = Path(r"C:\Users\wangss1\Documents\Data\CORE3D_Phase1B_Extension\Testing_Data\VRICON\A-1\a-1_dsm.tif") 276 | params = AlignParameters() 277 | AlignTarget2Reference(referenceDSMfilename, targetDSMfilename, params) 278 | if __name__ == "__main__": 279 | main() --------------------------------------------------------------------------------