├── .gitignore
├── README.md
├── configs
├── config.toml
├── config_1000x650.toml
├── config_2000x1200.toml
├── config_2500x1500.toml
├── config_3000x2000.toml
└── config_750x500.toml
├── experiments
├── .gitignore
├── experiment_conductor.py
├── hatching
│ ├── convert_DEM_to_STL.py
│ ├── convert_STL_to_DEM.py
│ ├── crop_geotiff.py
│ ├── edge_detection.py
│ ├── flowlines_stoplines.py
│ ├── gebco_to_blender.py
│ ├── imgprocessing.py
│ ├── run.py
│ ├── scales.py
│ ├── smoothing.py
│ └── temp_experiment_flowlines_two_stage.py
├── labelplacement
│ └── parser.py
└── projection_test.py
├── hardware
└── config.yaml
├── lineworld
├── __init__.py
├── core
│ ├── flowlines.py
│ ├── hatching.py
│ ├── layerstack.py
│ ├── map.py
│ └── svgwriter.py
├── layers
│ ├── bathymetry.py
│ ├── bflowlines.py
│ ├── cities.py
│ ├── coastlines.py
│ ├── contour.py
│ ├── contour2.py
│ ├── elevation.py
│ ├── grid.py
│ ├── labels.py
│ ├── layer.py
│ ├── meta.py
│ └── oceancurrents.py
├── main.py
├── run.py
└── util
│ ├── colormaps.py
│ ├── downloader.py
│ ├── export.py
│ ├── fontsizetest.py
│ ├── gebco_grid_to_polygon.py
│ ├── geometrytools.py
│ ├── hersheyfont.py
│ ├── labelplacement.py
│ ├── rastertools.py
│ ├── scales.py
│ └── slope.py
├── media
└── header.png
├── pyproject.toml
├── svgtogcode.py
├── tests
├── conftest.py
├── test_flowlines.py
├── test_gebco_grid_to_polygon.py
├── test_hatching.py
├── test_hersheyfont.py
└── test_lineworld.py
└── uv.lock
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/*
2 | .venv/*
3 |
4 | data/*
5 | _db/*
6 | tmp/*
7 | tests_output/*
8 |
9 | *.pyc
10 | *.nc
11 | *.pstat
12 |
13 | REFERENCE_MAPS/*
14 | experiments/conductor/*
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Plotmap: a line-based world map generator
2 |
3 | Create vector line (world) maps suitable for pen plotters. Data sources include [GEBCO](https://www.gebco.net/) elevation data
4 | and [OpenStreetMap coastlines](https://osmdata.openstreetmap.de/data/coastlines.html).
5 |
6 | 
7 |
8 | ### Prerequisites
9 |
10 | * Postgres including postGIS
11 | * Python >= 3.12
12 | * ~30 GB of disk space
13 |
14 | ### Run
15 |
16 | Build a map with a rather small config file:
17 | ```python3 lineworld/run.py```
18 | This may take a while to download the GEBCO elevation data.
19 |
20 | Convert the output SVG to gcode files, layer by layer:
21 | ```python3 svgtogcode.py Lineworld750x500.svg```
22 |
23 | ### Learn more:
24 |
25 | You can find more info in a blog post about how this came to be: [A Giant Map drawn with a Pen](https://volzo.de/posts/plottermap/)
--------------------------------------------------------------------------------
/configs/config.toml:
--------------------------------------------------------------------------------
1 | # basic config
2 |
3 | name = "LineWorldBasicConfig"
4 |
5 | debug = false
6 | projection = "VAN_DER_GRINTEN_I"
7 | wrapover = true
8 |
9 | # units in mm
10 | width = 2000
11 | height = 1500
12 |
13 | offset_x = 0.0
14 | offset_y = 170
15 |
16 | tolerance = 0.1
17 | tolerance_exclusion_zones = 0.5
18 |
19 | crop = true
20 | crop_offset_x = 500
21 | crop_offset_y = 350
22 | crop_width = 1000
23 | crop_height = 500
24 |
25 | inkscape_command="/Applications/Inkscape.app/Contents/MacOS/inkscape"
26 |
27 | [main]
28 | db_connection = "postgresql+psycopg://localhost:5432/lineworld"
29 |
30 | [layer.Contour]
31 | elevation_anchors = [0, 500, 2000, 9000]
32 | num_elevation_lines = 24
33 |
34 | [layer.Bathymetry]
35 | elevation_anchors = [0, -11000]
36 | num_elevation_lines = 15
37 |
38 | [layer.BathymetryFlowlines]
39 | line_distance = [0.4, 2.0]
40 | blur_distance = true
41 | blur_distance_kernel_size = 20
42 | blur_angles = true
43 | blur_angles_kernel_size = 25
44 | blur_length = true
45 | blur_length_kernel_size = 10
46 | line_distance_end_factor = 0.8
47 |
48 | [layer.CitiesLabels]
49 | font_size = 6
50 | circle_radius = 3
51 |
52 | [layer.Grid]
53 | latitude_line_dist = 20
54 | longitude_line_dist = 20
55 | bathymetry_exclude_buffer_distance = 0.8
56 | labels_exclude_buffer_distance = 2.0
57 | font_size = 5
--------------------------------------------------------------------------------
/configs/config_1000x650.toml:
--------------------------------------------------------------------------------
1 | # basic config
2 |
3 | name = "LineWorld1000x650"
4 |
5 | debug = true
6 | projection = "VAN_DER_GRINTEN_I"
7 | wrapover = true
8 |
9 | # units in mm
10 | width = 1000
11 | height = 650
12 |
13 | offset_x = 0.0
14 | offset_y = 60
15 |
16 | tolerance = 0.1
17 | tolerance_exclusion_zones = 0.5
18 |
19 | crop = true
20 | crop_offset_x = 500
21 | crop_offset_y = 350
22 | crop_width = 1000
23 | crop_height = 500
24 |
25 | svg_background_color = "#666666"
26 | inkscape_command = "/Applications/Inkscape.app/Contents/MacOS/inkscape"
27 |
28 | [main]
29 | db_connection = "postgresql+psycopg://localhost:5432/lineworld"
30 |
31 | [layer.Contour]
32 | elevation_anchors = [0, 500, 2000, 9000]
33 | num_elevation_lines = 9
34 |
35 | [layer.Contour2]
36 | elevation_anchors = [0, 500, 2000, 9000]
37 | num_elevation_lines = 15
38 | geotiff_scaling_factor = 0.25
39 | window_size_tpi = 21
40 | window_size_smoothing_low = 21
41 | window_size_smoothing_high = 51
42 | taubin_smoothing_steps = 20
43 | tolerance = 0.05
44 | filter_min_area_map = 2.0
45 |
46 | [layer.Coastlines]
47 | hatching_distance = 1.0
48 |
49 | [layer.Bathymetry]
50 | elevation_anchors = [0, -11000]
51 | num_elevation_lines = 15
52 |
53 | [layer.BathymetryFlowlines]
54 | layer_name = "BathymetryFlowlines"
55 | num_tiles = 4
56 | line_distance = [0.1, 3.0]
57 | blur_angles_kernel_size = 25
58 | blur_inclination_kernel_size = 10
59 | blur_density_kernel_size = 25
60 | line_distance_end_factor = 0.5
61 |
62 | [layer.Grid]
63 | latitude_line_dist = 20
64 | longitude_line_dist = 20
65 | font_size = 6
66 |
67 | [layer.GridBathymetry]
68 | exclude_buffer_distance = 0.5
69 |
70 | [layer.GridLabels]
71 | exclude_buffer_distance = 1.0
72 |
73 | [layer.CitiesLabels]
74 | font_size = 6
75 | circle_radius = 1.0
76 | offset_from_center = 10
77 |
78 | [layer.Labels]
79 | font_size = 6
80 | exclude_buffer_distance = 1.0
--------------------------------------------------------------------------------
/configs/config_2000x1200.toml:
--------------------------------------------------------------------------------
1 | # basic config
2 |
3 | name = "LineWorld2000x1200"
4 |
5 | debug = true
6 | projection = "VAN_DER_GRINTEN_I"
7 | wrapover = true
8 |
9 | # units in mm
10 | width = 2000
11 | height = 1200
12 |
13 | offset_x = 0.0
14 | offset_y = 80
15 |
16 | tolerance = 0.1
17 | tolerance_exclusion_zones = 0.5
18 |
19 | crop = true
20 | crop_offset_x = 500
21 | crop_offset_y = 350
22 | crop_width = 1000
23 | crop_height = 500
24 |
25 | svg_background_color = "#666666"
26 | inkscape_command = "/Applications/Inkscape.app/Contents/MacOS/inkscape"
27 |
28 | [main]
29 | db_connection = "postgresql+psycopg://localhost:5432/lineworld"
30 |
31 | [layer.Contour]
32 | elevation_anchors = [0, 500, 2000, 9000]
33 | num_elevation_lines = 9
34 |
35 | [layer.Contour2]
36 | elevation_anchors = [0, 500, 2000, 9000]
37 | num_elevation_lines = 15
38 | geotiff_scaling_factor = 0.25
39 | window_size_tpi = 21
40 | window_size_smoothing_low = 21
41 | window_size_smoothing_high = 51
42 | taubin_smoothing_steps = 20
43 | tolerance = 0.05
44 | filter_min_area_map = 2.0
45 |
46 | [layer.Coastlines]
47 | hatching_distance = 1.0
48 |
49 | [layer.Bathymetry]
50 | elevation_anchors = [0, -11000]
51 | num_elevation_lines = 15
52 |
53 | [layer.BathymetryFlowlines]
54 | layer_name = "BathymetryFlowlines"
55 | num_tiles = 4
56 | line_distance = [0.4, 3.0]
57 |
58 | blur_angles = true
59 | blur_angles_kernel_size = 10
60 |
61 | blur_distance = true
62 | blur_distance_kernel_size = 10
63 |
64 | blur_length = true
65 | blur_length_kernel_size = 10
66 |
67 | line_distance_end_factor = 0.5
68 |
69 | [layer.Grid]
70 | latitude_line_dist = 20
71 | longitude_line_dist = 20
72 | font_size = 6
73 |
74 | [layer.GridBathymetry]
75 | exclude_buffer_distance = 0.5
76 |
77 | [layer.GridLabels]
78 | exclude_buffer_distance = 1.0
79 |
80 | [layer.CitiesLabels]
81 | font_size = 6
82 | circle_radius = 1.0
83 | offset_from_center = 10
84 |
85 | [layer.Labels]
86 | font_size = 6
87 | exclude_buffer_distance = 1.0
--------------------------------------------------------------------------------
/configs/config_2500x1500.toml:
--------------------------------------------------------------------------------
1 | # basic config
2 |
3 | name = "LineWorld2500x1500"
4 |
5 | debug = true
6 | projection = "VAN_DER_GRINTEN_I"
7 | wrapover = true
8 |
9 | # units in mm
10 | width = 2500
11 | height = 2000
12 |
13 | offset_x = 0.0
14 | offset_y = 100
15 |
16 | tolerance = 0.1
17 | tolerance_exclusion_zones = 0.5
18 |
19 | crop = true
20 | crop_offset_x = 500
21 | crop_offset_y = 350
22 | crop_width = 1000
23 | crop_height = 500
24 |
25 | svg_background_color = "#666666"
26 | inkscape_command = "/Applications/Inkscape.app/Contents/MacOS/inkscape"
27 |
28 | [main]
29 | db_connection = "postgresql+psycopg://localhost:5432/lineworld"
30 |
31 | [layer.Contour]
32 | elevation_anchors = [0, 500, 2000, 9000]
33 | num_elevation_lines = 9
34 |
35 | [layer.Contour2]
36 | elevation_anchors = [0, 500, 2000, 9000]
37 | num_elevation_lines = 15
38 | geotiff_scaling_factor = 0.25
39 | window_size_tpi = 21
40 | window_size_smoothing_low = 21
41 | window_size_smoothing_high = 51
42 | taubin_smoothing_steps = 20
43 | tolerance = 0.05
44 | filter_min_area_map = 2.0
45 |
46 | [layer.Coastlines]
47 | hatching_distance = 1.0
48 |
49 | [layer.Bathymetry]
50 | elevation_anchors = [0, -11000]
51 | num_elevation_lines = 15
52 |
53 | [layer.BathymetryFlowlines]
54 | layer_name = "BathymetryFlowlines"
55 | num_tiles = 4
56 | line_distance = [0.4, 3.0]
57 |
58 | blur_angles = true
59 | blur_angles_kernel_size = 10
60 |
61 | blur_distance = true
62 | blur_distance_kernel_size = 10
63 |
64 | blur_length = true
65 | blur_length_kernel_size = 10
66 |
67 | line_distance_end_factor = 0.5
68 |
69 | [layer.Grid]
70 | latitude_line_dist = 20
71 | longitude_line_dist = 20
72 | font_size = 6
73 |
74 | [layer.GridBathymetry]
75 | exclude_buffer_distance = 0.5
76 |
77 | [layer.GridLabels]
78 | exclude_buffer_distance = 1.0
79 |
80 | [layer.CitiesLabels]
81 | font_size = 6
82 | circle_radius = 1.0
83 | offset_from_center = 10
84 |
85 | [layer.Labels]
86 | font_size = 6
87 | exclude_buffer_distance = 1.0
--------------------------------------------------------------------------------
/configs/config_3000x2000.toml:
--------------------------------------------------------------------------------
1 | # basic config
2 |
3 | name = "LineWorld3000x2000"
4 |
5 | debug = true
6 | projection = "VAN_DER_GRINTEN_I"
7 | wrapover = true
8 |
9 | # units in mm
10 | width = 3000
11 | height = 2000
12 |
13 | offset_x = 0.0
14 | offset_y = 170
15 |
16 | tolerance = 0.1
17 | tolerance_exclusion_zones = 0.5
18 |
19 | svg_background_color = "#666666"
20 | inkscape_command = "/Applications/Inkscape.app/Contents/MacOS/inkscape"
21 |
22 | [main]
23 | db_connection = "postgresql+psycopg://localhost:5432/lineworld"
24 |
25 | [layer.Contour2]
26 | elevation_anchors = [0, 500, 2000, 9000]
27 | num_elevation_lines = 21
28 | geotiff_scaling_factor = 0.25
29 | window_size_tpi = 21
30 | window_size_smoothing_low = 21
31 | window_size_smoothing_high = 51
32 | taubin_smoothing_steps = 20
33 | tolerance = 0.05
34 | filter_min_area_map = 2.0
35 |
36 | [layer.Coastlines]
37 | hatching_distance = 1.5
38 |
39 | [layer.BathymetryFlowlines]
40 | layer_name = "BathymetryFlowlines"
41 | num_tiles = 4
42 | line_distance = [0.1, 3.0]
43 | blur_angles = false
44 | blur_angles_kernel_size = 25
45 | blur_inclination = false
46 | blur_inclination_kernel_size = 10
47 | blur_density_kernel_size = 25
48 | line_distance_end_factor = 0.5
49 |
50 | [layer.Grid]
51 | latitude_line_dist = 20
52 | longitude_line_dist = 20
53 | font_size = 6
54 |
55 | [layer.GridBathymetry]
56 | exclude_buffer_distance = 0.5
57 |
58 | [layer.GridLabels]
59 | exclude_buffer_distance = 1.0
60 |
61 | [layer.CitiesLabels]
62 | font_size = 6
63 | circle_radius = 1.0
64 | offset_from_center = 10
65 | max_iterations = 10000
66 |
67 | [layer.Labels]
68 | font_size = 6
69 | exclude_buffer_distance = 1.0
--------------------------------------------------------------------------------
/configs/config_750x500.toml:
--------------------------------------------------------------------------------
1 | # basic config
2 |
3 | name = "LineWorld750x500"
4 |
5 | # debug = true
6 | projection = "VAN_DER_GRINTEN_I"
7 | wrapover = true
8 |
9 | # units in mm
10 | width = 750
11 | height = 500
12 |
13 | offset_x = 0.0
14 | offset_y = 50
15 |
16 | viewport_padding = [0, 0, 0, 0]
17 |
18 | tolerance = 0.1
19 | tolerance_exclusion_zones = 0.5
20 |
21 | svg_background_color = "#666666"
22 | inkscape_command = "/Applications/Inkscape.app/Contents/MacOS/inkscape"
23 |
24 | [main]
25 | db_connection = "postgresql+psycopg://localhost:5432/lineworld"
26 |
27 | [layer.Contour]
28 | elevation_anchors = [0, 500, 2000, 9000]
29 | num_elevation_lines = 9
30 |
31 | [layer.Contour2]
32 | elevation_anchors = [0, 500, 2000, 9000]
33 | num_elevation_lines = 15
34 | geotiff_scaling_factor = 0.25
35 | window_size_tpi = 21
36 | window_size_smoothing_low = 21
37 | window_size_smoothing_high = 51
38 | taubin_smoothing_steps = 20
39 | tolerance = 0.05
40 | filter_min_area_map = 2.0
41 |
42 | [layer.Coastlines]
43 | hatching_distance = 1.0
44 |
45 | [layer.BathymetryFlowlines]
46 | layer_name = "BathymetryFlowlines"
47 | line_distance = [0.2, 2.0]
48 | line_max_length = [1.5, 15]
49 | blur_distance_kernel_size = 5
50 | blur_angles_kernel_size = 11
51 | blur_length_kernel_size = 5
52 | line_distance_end_factor = 0.75
53 | tolerance = 0.05
54 |
55 | [layer.OceanCurrents]
56 | layer_name = "OceanCurrents"
57 | line_distance = [0.6, 1.4]
58 | line_max_length = [200, 200]
59 | line_distance_end_factor = 0.5
60 | tolerance = 0.05
61 |
62 | [layer.Grid]
63 | latitude_line_dist = 20
64 | longitude_line_dist = 20
65 | font_size = 5
66 |
67 | [layer.GridBathymetry]
68 | exclude_buffer_distance = 1.0
69 |
70 | [layer.GridLabels]
71 | exclude_buffer_distance = 1.5
72 |
73 | [layer.CitiesLabels]
74 | exclude_buffer_distance = 1.5
75 | font_size = 3.5
76 | circle_radius = 1.0
77 | offset_from_center = 12
78 | box_safety_margin = 15
79 | max_iterations = 15000
80 |
81 | [layer.CitiesCircles]
82 | exclude_buffer_distance = 1.1
83 |
84 | [layer.Labels]
85 | font_size = 6
86 | exclude_buffer_distance = 1.5
--------------------------------------------------------------------------------
/experiments/.gitignore:
--------------------------------------------------------------------------------
1 | hatching/data/*
2 | hatching/output/*
--------------------------------------------------------------------------------
/experiments/experiment_conductor.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import os
3 | import re
4 | import subprocess
5 | import tomllib
6 |
7 | import toml
8 | from pathlib import Path
9 |
10 | import cv2
11 | import numpy as np
12 | from loguru import logger
13 |
14 | import lineworld
15 |
16 | # VARIABLE = ["layer", "BathymetryFlowlines", "blur_angles_kernel_size"]
17 | # VARIABLE = ["layer", "BathymetryFlowlines", "blur_density_kernel_size"]
18 | # VARIABLE = ["layer", "BathymetryFlowlines", "line_distance"]
19 | # VARIABLE = ["layer", "BathymetryFlowlines", "scale_adjustment_value"]
20 | VARIABLE = ["layer", "BathymetryFlowlines", "line_distance_end_factor"]
21 | # VARIABLE = ["layer", "BathymetryFlowlines", "line_max_segments"]
22 |
23 | # VARIABLE_STATES = [1, 3, 5, 9, 13, 17, 21, 31, 41, 51, 61, 81, 101]
24 | # VARIABLE_STATES = [
25 | # # [0.5, 3.0],
26 | # # [0.5, 4.0],
27 | # [0.5, 5.0],
28 | # # [0.5, 6.0],
29 | # # [0.5, 7.0],
30 | # # [0.5, 10.0]
31 | # # [3, 6],
32 | # ]
33 | VARIABLE_STATES = [0.2, 0.4, 0.6, 0.8, 1.0]
34 | # VARIABLE_STATES = [5, 10, 20, 30, 40, 50]
35 |
36 | SCRIPT_PATH = "lineworld/run.py"
37 | WORKING_DIR = "."
38 | TEMP_DIR = "temp"
39 | BASE_CONFIG_FILE = Path("configs", "config_750x500.toml")
40 | TMP_CONFIG_FILE = Path(TEMP_DIR, "config_overwrite.toml")
41 | OUTPUT_DIR = "experiments/conductor"
42 |
43 | FFMPEG_TEMP_FILE = Path(OUTPUT_DIR, "ffmpeg_mux_file.txt")
44 | FFMPEG_OUTPUT_FILE = Path(OUTPUT_DIR, f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.mp4")
45 | FFMPEG_DURATION = 1
46 |
47 | INKSCAPE_CONVERSION_SUFFIX = ".png"
48 | INKSCAPE_CONVERSION_WIDTH = 20000
49 | FONT_NAME = cv2.FONT_HERSHEY_SIMPLEX
50 | FONT_SCALE = 1.0
51 | FONT_THICKNESS = 2
52 |
53 | # ----------------------------------------------------------------------------------------------------------------------
54 |
55 | if not os.path.exists(OUTPUT_DIR):
56 | os.makedirs(OUTPUT_DIR)
57 | logger.debug(f"created OUTPUT_DIR {OUTPUT_DIR}")
58 |
59 | total_runtime = 0.0
60 |
61 | overview = []
62 |
63 | for variable_state in VARIABLE_STATES:
64 | timer_start = datetime.datetime.now()
65 |
66 | variable_name = ".".join(VARIABLE)
67 |
68 | # create the temporary config overwrite file
69 | config = {}
70 | with open(BASE_CONFIG_FILE, "rb") as f:
71 | config = tomllib.load(f)
72 |
73 | if isinstance(VARIABLE, list):
74 | tmp_config = config
75 | for i in range(len(VARIABLE)):
76 | if i == len(VARIABLE) - 1:
77 | tmp_config[VARIABLE[i]] = variable_state
78 | else:
79 | if VARIABLE[i] not in tmp_config:
80 | tmp_config[VARIABLE[i]] = {}
81 | tmp_config = tmp_config[VARIABLE[i]]
82 | else:
83 | config[VARIABLE] = variable_state
84 |
85 | with open(TMP_CONFIG_FILE, "w") as f:
86 | toml.dump(config, f)
87 |
88 | variable_state_printable = re.sub("[\[\]]", "", str(variable_state))
89 |
90 | # run the script from the correct working dir
91 | modified_env = os.environ.copy()
92 | modified_env[lineworld.ENV_OVERWRITE_CONFIG] = TMP_CONFIG_FILE
93 | result = subprocess.run(["python", SCRIPT_PATH], cwd=WORKING_DIR, env=modified_env, capture_output=False)
94 |
95 | if result.returncode != 0:
96 | raise Exception(f"non-zero return code. Experiment: {variable_name}={variable_state}")
97 |
98 | runtime = (datetime.datetime.now() - timer_start).total_seconds()
99 | total_runtime += runtime
100 |
101 | # if SVG, convert to image
102 | experiment_output_image_path = config["name"] + ".svg"
103 | if experiment_output_image_path.lower().endswith(".svg"):
104 | converted_image_output_path = Path(
105 | Path(experiment_output_image_path).parent,
106 | Path(experiment_output_image_path).stem + INKSCAPE_CONVERSION_SUFFIX,
107 | )
108 | result = subprocess.run(
109 | [
110 | "/Applications/Inkscape.app/Contents/MacOS/inkscape",
111 | experiment_output_image_path,
112 | f"--export-filename={converted_image_output_path}",
113 | f"--export-width={INKSCAPE_CONVERSION_WIDTH}",
114 | ],
115 | cwd=WORKING_DIR,
116 | check=True,
117 | capture_output=False,
118 | )
119 |
120 | # os.remove(experiment_output_image_path)
121 | experiment_output_image_path = converted_image_output_path
122 |
123 | # insert info text into output image
124 | img = cv2.imread(str(experiment_output_image_path), cv2.IMREAD_COLOR)
125 | img_annotated = np.zeros([img.shape[0] + 100, img.shape[1], 3], dtype=np.uint8)
126 | img_annotated[0 : img.shape[0], 0 : img.shape[1], :] = img
127 |
128 | cv2.putText(
129 | img_annotated,
130 | f"{variable_name}: {str(variable_state_printable):<20}",
131 | (10, img.shape[0] + 40),
132 | FONT_NAME,
133 | FONT_SCALE,
134 | (255, 255, 255),
135 | FONT_THICKNESS,
136 | )
137 |
138 | cv2.putText(
139 | img_annotated,
140 | f"{datetime.datetime.now().strftime('%Y %m %d | %H:%M:%S')}",
141 | (10, img.shape[0] + 80),
142 | FONT_NAME,
143 | FONT_SCALE,
144 | (255, 255, 255),
145 | FONT_THICKNESS,
146 | )
147 |
148 | output_filename = f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_VAR_{variable_name}_{variable_state_printable}{experiment_output_image_path.suffix}"
149 | img_annotated_path = Path(OUTPUT_DIR, output_filename)
150 | cv2.imwrite(str(img_annotated_path), img_annotated)
151 |
152 | # print runtime for each script run
153 | logger.info(f"finished variable: {variable_state_printable:10} | total time: {runtime:>6.2f}s")
154 |
155 | result_dict = {"image": img_annotated_path, "total_time": runtime}
156 | result_dict["variables"] = {}
157 | result_dict["variables"][variable_name] = variable_state
158 | overview.append(result_dict)
159 |
160 | logger.info(f"total experiment runtime: {total_runtime:>6.2f}s")
161 |
162 | # create a slideshow video file with ffmpeg
163 | # write a concat demuxer file
164 |
165 | with open(FFMPEG_TEMP_FILE, "w") as file:
166 | for res in overview:
167 | file.write(f"file '{res['image'].name}'\n")
168 | file.write(f"duration {FFMPEG_DURATION}\n")
169 | file.write(f"file '{overview[-1]['image'].name}'\n")
170 |
171 | # ffmpeg -f concat -i input.txt -vsync vfr -pix_fmt yuv420p output.mp4
172 |
173 | result = subprocess.run(
174 | [
175 | "ffmpeg",
176 | "-y",
177 | "-f",
178 | "concat",
179 | "-i",
180 | str(FFMPEG_TEMP_FILE),
181 | "-vsync",
182 | "vfr",
183 | "-pix_fmt",
184 | "yuv420p",
185 | str(FFMPEG_OUTPUT_FILE),
186 | ],
187 | cwd=WORKING_DIR,
188 | capture_output=True,
189 | check=True,
190 | )
191 |
--------------------------------------------------------------------------------
/experiments/hatching/convert_DEM_to_STL.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import textwrap
3 |
4 | import cv2
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 | from stl import mesh
8 |
9 | SAMPLING_RATE = 10 # per unit
10 | Z_HEIGHT = 1
11 |
12 |
13 | def coord_to_ind(x, y, dim, sampling):
14 | return y * dim[0] * sampling + x
15 |
16 |
17 | ap = argparse.ArgumentParser(description="Generate mesh file from DEM/DET images")
18 | ap.add_argument("-i", "--input", help="input file")
19 | ap.add_argument("-x", type=int, default=100, help="width")
20 | ap.add_argument("-y", type=int, default=100, help="depth")
21 | ap.add_argument("-z", type=float, default=Z_HEIGHT, help="height")
22 | ap.add_argument(
23 | "-s",
24 | "--sampling-rate",
25 | type=int,
26 | default=SAMPLING_RATE,
27 | help="number of points per unit (width/height) [int]",
28 | )
29 | ap.add_argument("--blur", type=int, default=0, help="blur input image kernel size")
30 | ap.add_argument("--output-image", default=None, help="output image filename")
31 | ap.add_argument("--output-xyz", default=None, help="output pointcloud filename")
32 | ap.add_argument("--output-stl", default=None, help="output STL filename")
33 | ap.add_argument("--output-ply", default=None, help="output PLY filename")
34 | ap.add_argument("--cutoff", action="store_true", default=False, help="cut off positive values")
35 | ap.add_argument(
36 | "--surface-only",
37 | action="store_true",
38 | default=False,
39 | help="for point cloud coordinates do not extrude the volume",
40 | )
41 |
42 | args = vars(ap.parse_args())
43 |
44 | DIMENSIONS = [args["x"], args["y"]]
45 | BLOCK_HEIGHT = args["z"] * 2
46 |
47 | data = None
48 | try:
49 | cv2.imread(args["input"], cv2.IMREAD_UNCHANGED)
50 | except cv2.error:
51 | import rasterio
52 |
53 | with rasterio.open(args["input"]) as dataset:
54 | data = dataset.read(1)
55 |
56 | # normalize
57 | data = (data - np.min(data)) / np.ptp(data)
58 |
59 | # CROP
60 | CROP_CENTER = [0.4, 0.4]
61 | CROP_SIZE = [15000, 15000]
62 | data = data[
63 | int(CROP_CENTER[1] * data.shape[1] - CROP_SIZE[1] // 2) : int(CROP_CENTER[1] * data.shape[1] + CROP_SIZE[1] // 2),
64 | int(CROP_CENTER[0] * data.shape[0] - CROP_SIZE[0] // 2) : int(CROP_CENTER[0] * data.shape[0] + CROP_SIZE[0] // 2),
65 | ]
66 |
67 | # resize so that 1 pixel equals 1 sampling point
68 | res = cv2.resize(data, [DIMENSIONS[1] * SAMPLING_RATE, DIMENSIONS[0] * SAMPLING_RATE])
69 | if args["blur"] > 0:
70 | res = cv2.blur(res, (args["blur"], args["blur"]))
71 |
72 | res = np.flip(res, axis=0)
73 |
74 | res_min = np.min(res)
75 | res_max = np.max(res)
76 | res = np.multiply(res, args["z"])
77 |
78 | # cut off the hills, keep the valleys
79 | if args["cutoff"]:
80 | res[res[:, :] > 0] = 0
81 |
82 | if args["output_image"]:
83 | plt.imsave(
84 | args["output_image"],
85 | res,
86 | vmin=-1, # -math.sqrt(2),
87 | vmax=1, # +math.sqrt(2),
88 | origin="upper",
89 | )
90 |
91 | if args["output_ply"]:
92 | s = args["sampling_rate"]
93 | num_vertices = (DIMENSIONS[0] * s) * (DIMENSIONS[1] * s)
94 | num_faces = (DIMENSIONS[0] * s - 1) * (DIMENSIONS[1] * s - 1) * 2
95 |
96 | with open(args["output_ply"], "w") as f:
97 | data = """
98 | ply
99 | format ascii 1.0
100 | element vertex {}
101 | property float x
102 | property float y
103 | property float z
104 | property uchar red
105 | property uchar green
106 | property uchar blue
107 | element face {}
108 | property list uchar int vertex_indices
109 | end_header
110 | """
111 |
112 | data = textwrap.dedent(data[1:]) # remove first newline (for dedent to work)
113 | data = data.format(num_vertices, num_faces)
114 |
115 | vertices = []
116 | faces = []
117 |
118 | f.write(data)
119 |
120 | for y in range(0, DIMENSIONS[1] * s):
121 | for x in range(0, DIMENSIONS[0] * s):
122 | pos = (res[y, x] / args["z"] - res_min) / (res_max - res_min)
123 | # c = colormap._viridis_data[int(pos * (len(colormap._viridis_data) - 1))]
124 | c = [pos, pos, pos]
125 | c = [int(x * 255) for x in c]
126 |
127 | f.write("{:.3f} {:.3f} {:.3f} {:d} {:d} {:d}\n".format(x / s, y / s, res[y, x], *c))
128 |
129 | for y in range(0, DIMENSIONS[1] * s - 1):
130 | for x in range(0, DIMENSIONS[0] * s - 1):
131 | f.write(
132 | "3 {} {} {}\n".format(
133 | coord_to_ind(x, y, DIMENSIONS, s),
134 | coord_to_ind(x + 1, y, DIMENSIONS, s),
135 | coord_to_ind(x, y + 1, DIMENSIONS, s),
136 | )
137 | )
138 |
139 | f.write(
140 | "3 {} {} {}\n".format(
141 | coord_to_ind(x + 1, y, DIMENSIONS, s),
142 | coord_to_ind(x + 1, y + 1, DIMENSIONS, s),
143 | coord_to_ind(x, y + 1, DIMENSIONS, s),
144 | )
145 | )
146 |
147 | f.write("\n")
148 |
149 | if args["output_stl"]:
150 | s = args["sampling_rate"]
151 |
152 | num_faces = (
153 | (DIMENSIONS[0] * s - 1) * (DIMENSIONS[1] * s - 1) * 4 + (DIMENSIONS[0] * s) * 4 + (DIMENSIONS[1] * s) * 4
154 | )
155 |
156 | obj = mesh.Mesh(np.zeros(num_faces, dtype=mesh.Mesh.dtype))
157 | count = 0
158 |
159 | for x in range(0, DIMENSIONS[0] * s - 1):
160 | for y in range(0, DIMENSIONS[1] * s - 1):
161 | obj.vectors[count][0] = [x / s, y / s, res[y, x]]
162 | obj.vectors[count][1] = [(x + 1) / s, y / s, res[y, x + 1]]
163 | obj.vectors[count][2] = [x / s, (y + 1) / s, res[y + 1, x]]
164 |
165 | count += 1
166 |
167 | obj.vectors[count][0] = [(x + 1) / s, y / s, res[y, x + 1]]
168 | obj.vectors[count][1] = [(x + 1) / s, (y + 1) / s, res[y + 1, x + 1]]
169 | obj.vectors[count][2] = [x / s, (y + 1) / s, res[y + 1, x]]
170 |
171 | count += 1
172 |
173 | if not args["surface_only"]:
174 | # side T/B
175 |
176 | for y in [0, DIMENSIONS[1] * s - 1]:
177 | for x in range(0, DIMENSIONS[0] * s - 1):
178 | obj.vectors[count][0] = [x / s, y / s, -BLOCK_HEIGHT]
179 | obj.vectors[count][1] = [(x + 1) / s, y / s, -BLOCK_HEIGHT]
180 | obj.vectors[count][2] = [x / s, y / s, res[y, x]]
181 |
182 | count += 1
183 |
184 | obj.vectors[count][0] = [(x + 1) / s, y / s, -BLOCK_HEIGHT]
185 | obj.vectors[count][1] = [(x + 1) / s, y / s, res[y, x + 1]]
186 | obj.vectors[count][2] = [x / s, y / s, res[y, x]]
187 |
188 | count += 1
189 |
190 | # side L/R
191 |
192 | for x in [0, DIMENSIONS[0] * s - 1]:
193 | for y in range(0, DIMENSIONS[0] * s - 1):
194 | obj.vectors[count][0] = [x / s, y / s, -BLOCK_HEIGHT]
195 | obj.vectors[count][1] = [x / s, (y + 1) / s, -BLOCK_HEIGHT]
196 | obj.vectors[count][2] = [x / s, y / s, res[y, x]]
197 |
198 | count += 1
199 |
200 | obj.vectors[count][0] = [x / s, (y + 1) / s, -BLOCK_HEIGHT]
201 | obj.vectors[count][1] = [x / s, (y + 1) / s, res[y + 1, x]]
202 | obj.vectors[count][2] = [x / s, y / s, res[y, x]]
203 |
204 | count += 1
205 |
206 | # bottom
207 |
208 | for x in range(0, DIMENSIONS[0] * s - 1):
209 | for y in range(0, DIMENSIONS[1] * s - 1):
210 | obj.vectors[count][0] = [x / s, y / s, -BLOCK_HEIGHT]
211 | obj.vectors[count][1] = [(x + 1) / s, y / s, -BLOCK_HEIGHT]
212 | obj.vectors[count][2] = [x / s, (y + 1) / s, -BLOCK_HEIGHT]
213 |
214 | count += 1
215 |
216 | obj.vectors[count][0] = [(x + 1) / s, y / s, -BLOCK_HEIGHT]
217 | obj.vectors[count][1] = [(x + 1) / s, (y + 1) / s, -BLOCK_HEIGHT]
218 | obj.vectors[count][2] = [x / s, (y + 1) / s, -BLOCK_HEIGHT]
219 |
220 | count += 1
221 |
222 | obj.save(args["output_stl"])
223 |
224 | if args["output_xyz"]:
225 | with open(args["output_xyz"], "w") as f:
226 | for i in range(0, DIMENSIONS[1] * args["sampling_rate"]):
227 | for j in range(0, DIMENSIONS[0] * args["sampling_rate"]):
228 | f.write("{} {} {}\n".format(j / args["sampling_rate"], i / args["sampling_rate"], res[i, j]))
229 |
230 | if not args["surface_only"]:
231 | ## additional points
232 | # side L/R
233 |
234 | for i in range(0, DIMENSIONS[1] * args["sampling_rate"]):
235 | for j in [0, DIMENSIONS[0] * args["sampling_rate"] - 1]:
236 | zs = np.linspace(
237 | -BLOCK_HEIGHT,
238 | res[i, j] * args["z"],
239 | int(BLOCK_HEIGHT * args["sampling_rate"]),
240 | endpoint=False,
241 | )
242 | for z in zs:
243 | f.write("{} {} {}\n".format(j / args["sampling_rate"], i / args["sampling_rate"], z))
244 |
245 | # side T/B
246 |
247 | for i in [0, DIMENSIONS[1] * args["sampling_rate"] - 1]:
248 | for j in range(0, DIMENSIONS[0] * args["sampling_rate"]):
249 | zs = np.linspace(
250 | -BLOCK_HEIGHT,
251 | res[i, j] * args["z"],
252 | int(BLOCK_HEIGHT * args["sampling_rate"]),
253 | endpoint=False,
254 | )
255 | for z in zs:
256 | f.write("{} {} {}\n".format(j / args["sampling_rate"], i / args["sampling_rate"], z))
257 |
258 | # bottom plane
259 |
260 | for j in linx[1:]:
261 | for i in liny[1:]:
262 | f.write("{} {} {}\n".format(j / args["ppu"], i / args["ppu"], -BLOCK_HEIGHT))
263 |
--------------------------------------------------------------------------------
/experiments/hatching/convert_STL_to_DEM.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import numpy as np
4 | import trimesh
5 | from PIL import Image
6 |
7 | STL_PATHS = [
8 | # "hatching_dem.stl",
9 | # "slope_test.stl",
10 | # "slope_test_2.stl",
11 | # "slope_test_3.stl",
12 | # "slope_test_4.stl",
13 | "slope_test_5.stl"
14 | ]
15 | INPUT_PATH = "data"
16 | OUTPUT_PATH = "data"
17 |
18 | INP_DIMENSIONS = [100, 100]
19 | OUT_DIMENSIONS = [1000, 1000]
20 |
21 | for stl in STL_PATHS:
22 | stl_path = Path(INPUT_PATH, stl)
23 | mesh = trimesh.load_mesh(str(stl_path))
24 |
25 | scaler = [
26 | OUT_DIMENSIONS[0] / INP_DIMENSIONS[0],
27 | OUT_DIMENSIONS[1] / INP_DIMENSIONS[1],
28 | 1, # 255 / mesh.bounds[1, 2]
29 | ]
30 |
31 | xs = np.linspace(0, INP_DIMENSIONS[0], num=OUT_DIMENSIONS[0], endpoint=False)
32 | ys = np.linspace(0, INP_DIMENSIONS[1], num=OUT_DIMENSIONS[1], endpoint=False)
33 |
34 | xv, yv = np.meshgrid(xs, ys)
35 | zv = np.zeros_like(xv)
36 | zv.fill(mesh.bounds[1, 2] * 1.10)
37 |
38 | ray_origins = np.dstack((xv, yv, zv)).reshape([xs.shape[0] * ys.shape[0], 3])
39 | ray_directions = np.tile(np.array([0, 0, -100]), (ray_origins.shape[0], 1))
40 |
41 | locations, index_ray, index_tri = mesh.ray.intersects_location(
42 | ray_origins=ray_origins, ray_directions=ray_directions
43 | )
44 |
45 | output = np.zeros([*OUT_DIMENSIONS], dtype=np.float32)
46 | # output = np.zeros([*OUT_DIMENSIONS], dtype=np.uint8)
47 |
48 | for loc in locations:
49 | # switch row col / Y axis flip
50 | output[(OUT_DIMENSIONS[1] - 1) - int(loc[1] * scaler[1]), int(loc[0] * scaler[0])] = loc[2] * scaler[2]
51 |
52 | output_filename = Path(OUTPUT_PATH, f"{stl_path.stem}.tif")
53 | im = Image.fromarray(output, mode="F") # float32
54 | im.save(output_filename, "TIFF")
55 | print(f"written output file: {output_filename}")
56 |
57 | output_filename = Path(OUTPUT_PATH, f"{stl_path.stem}.png")
58 |
59 | scale = (255 - 0) / (mesh.bounds[1, 2] - mesh.bounds[0, 2])
60 | offset = 0 - mesh.bounds[0, 2]
61 | output *= scale
62 | output += offset
63 |
64 | output = output.astype(np.uint8)
65 | im = Image.fromarray(output)
66 | im.save(output_filename, "PNG")
67 | print(f"written output file: {output_filename}")
68 |
69 | # ray_visualize = trimesh.load_path(
70 | # np.hstack((ray_origins, ray_origins + ray_directions * 5.0)).reshape(-1, 2, 3)
71 | # )
72 | #
73 | # # unmerge so viewer doesn't smooth
74 | # mesh.unmerge_vertices()
75 | # # make mesh white- ish
76 | # mesh.visual.face_colors = [255, 255, 255, 255]
77 | # mesh.visual.face_colors[index_tri] = [255, 0, 0, 255]
78 | #
79 | # scene = trimesh.Scene([mesh, ray_visualize])
80 | #
81 | # scene.show()
82 |
--------------------------------------------------------------------------------
/experiments/hatching/crop_geotiff.py:
--------------------------------------------------------------------------------
1 | import rasterio
2 | from matplotlib import pyplot
3 | import tifffile
4 | import cv2
5 |
6 | # GEOTIFF_PATH = "data/GebcoToBlender/gebco_mosaic.tif"
7 | GEOTIFF_PATH = "data/GebcoToBlender/fullsize_reproject.tif"
8 | OUTPUT_PATH = "data/gebco_crop.tif"
9 |
10 | # CENTER_LON_LAT = [139.839478, 35.652832]
11 | # CENTER_LON_LAT = [0, 0] # Null Island
12 | # CENTER_LON_LAT = [-45, -25] # North Atlantic ridge
13 | # CENTER_LON_LAT = [-20, -40]
14 | # CENTER_LON_LAT = [146.7996121929291, -43.512453249372804]
15 | # CENTER_LON_LAT = [175.9266138226686, 52.354513899345434] # Aleutian trench
16 | # CENTER_LON_LAT = [-14.373269321117954, -7.9386538081877935] # Ascension Island
17 |
18 | CROP_SIZE = [10000, 10000]
19 | CENTER = [0.4, 0.4]
20 | # CENTER = [0.6, 0.4]
21 |
22 | OUTPUT_SIZE = CROP_SIZE
23 |
24 | with rasterio.open(GEOTIFF_PATH) as dataset:
25 | band = dataset.read(1)
26 | # y, x = dataset.index(*CENTER_LON_LAT)
27 |
28 | x = int(CENTER[0] * band.shape[1])
29 | y = int(CENTER[1] * band.shape[0])
30 |
31 | crop = band[
32 | y - CROP_SIZE[1] // 2 : y + CROP_SIZE[1] // 2,
33 | x - CROP_SIZE[0] // 2 : x + CROP_SIZE[0] // 2,
34 | ]
35 |
36 | crop = cv2.resize(crop, OUTPUT_SIZE)
37 |
38 | tifffile.imwrite(OUTPUT_PATH, crop)
39 |
--------------------------------------------------------------------------------
/experiments/hatching/edge_detection.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import cv2
4 | import numpy as np
5 | from matplotlib import pyplot as plt
6 |
7 | from experiments.hatching.slope import get_slope
8 |
9 | INPUT_FILE = Path("shaded_relief3.png")
10 | # INPUT_FILE = Path("shaded_relief4.png")
11 |
12 | OUTPUT_PATH = Path("experiments/hatching/output")
13 |
14 | THRESHOLD = 100
15 |
16 | ratio = 3
17 | kernel_size = 3
18 |
19 |
20 | def _convert_to_uint8(m: np.array) -> np.array:
21 | if np.min(m) < 0:
22 | m += abs(np.min(m))
23 | m *= 255.0 / np.max(m)
24 | return m.astype(np.uint8)
25 |
26 |
27 | # EXPORT IMAGES FOR EXTERNAL PROCESSING
28 |
29 | ELEVATION_FILE = Path("experiments/hatching/data/gebco_crop.tif")
30 | SAMPLING_STEP = 1
31 | # BLUR = [0, 3, 5, 7, 9, 11, 13, 15, 17]
32 | BLUR = [21, 31, 41, 51]
33 |
34 | for blur_kernel_size in BLUR:
35 | print(f"BLUR: {blur_kernel_size}")
36 |
37 | elevation = (cv2.imread(str(ELEVATION_FILE), cv2.IMREAD_UNCHANGED)).astype(np.float64)
38 |
39 | if blur_kernel_size > 0:
40 | elevation = cv2.blur(elevation, (blur_kernel_size, blur_kernel_size))
41 |
42 | X, Y, dX, dY, angles, inclination = get_slope(elevation, SAMPLING_STEP)
43 |
44 | angles_colormapped = plt.cm.hsv((angles - np.min(angles)) / np.ptp(angles)) * 255
45 | print(f"angles mapped {angles_colormapped.min()} {angles_colormapped.max()}")
46 | angles_colormapped = angles_colormapped.astype(np.uint8)[:, :, 0:3]
47 | cv2.imwrite(
48 | Path(OUTPUT_PATH, f"gebco_crop_angles_b{blur_kernel_size}.png"),
49 | cv2.cvtColor(angles_colormapped, cv2.COLOR_RGB2BGR),
50 | )
51 |
52 | inclination_colormapped = plt.cm.viridis((inclination - np.min(inclination)) / np.ptp(inclination)) * 255
53 | print(f"inclination mapped {inclination_colormapped.min()} {inclination_colormapped.max()}")
54 | inclination_colormapped = inclination_colormapped.astype(np.uint8)[:, :, 0:3]
55 | cv2.imwrite(
56 | Path(OUTPUT_PATH, f"gebco_crop_inclination_b{blur_kernel_size}.png"),
57 | cv2.cvtColor(inclination_colormapped, cv2.COLOR_RGB2BGR),
58 | )
59 |
60 | elevation_colormapped = plt.cm.viridis((elevation - np.min(elevation)) / np.ptp(elevation)) * 255
61 | print(f"elevation mapped {elevation_colormapped.min()} {elevation_colormapped.max()}")
62 | elevation_colormapped = elevation_colormapped.astype(np.uint8)[:, :, 0:3]
63 | cv2.imwrite(
64 | Path(OUTPUT_PATH, f"gebco_crop_elevation_b{blur_kernel_size}.png"),
65 | cv2.cvtColor(elevation_colormapped, cv2.COLOR_RGB2BGR),
66 | )
67 |
68 |
69 | exit()
70 |
71 | # src = cv2.imread(INPUT_FILE)
72 | # src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
73 | # img_blur = cv2.blur(src_gray, (3, 3))
74 | # detected_edges = cv2.Canny(img_blur, THRESHOLD, THRESHOLD*ratio, kernel_size)
75 | # mask = detected_edges != 0
76 | # dst = src * (mask[:, :, None].astype(src.dtype))
77 | #
78 | # overlay = src
79 | # overlay[mask > 0, 2] = 255
80 | #
81 | # cv2.imwrite(Path(OUTPUT_PATH, INPUT_FILE.stem + "_edge.png"), overlay)
82 | # cv2.imwrite(Path(OUTPUT_PATH, INPUT_FILE.stem + "_edge_maskonly.png"), detected_edges)
83 |
84 |
85 | # ELEVATION_FILE = Path("experiments/hatching/data/GebcoToBlender/reproject.tif")
86 | #
87 | # elevation = (cv2.imread(str(ELEVATION_FILE), cv2.IMREAD_UNCHANGED)).astype(np.float64)
88 | # # data = cv2.resize(data, [2000, 2000])
89 | # # elevation[elevation > 0] = 0
90 | #
91 | # X, Y, dX, dY, angles, inclination = get_slope(elevation, 1)
92 | #
93 | # data = np.degrees(angles)
94 | # # data = inclination
95 | # # data = np.degrees(angles) * inclination
96 | #
97 | # anginc = np.degrees(angles) * inclination
98 | # logger.debug(f"anginc min: {np.min(anginc)} | max: {np.max(anginc)}")
99 | #
100 | # data = data.astype(np.float64)
101 | #
102 | # logger.debug(f"data {ELEVATION_FILE} min: {np.min(data)} | max: {np.max(data)}")
103 | #
104 | # if np.min(data) < 0:
105 | # data += abs(np.min(data))
106 | # data *= 255.0 / np.max(data)
107 | #
108 | # data = data.astype(np.uint8)
109 | # data = cv2.blur(data, (5, 5))
110 | # detected_edges = cv2.Canny(data, THRESHOLD, THRESHOLD * ratio, kernel_size)
111 |
112 |
113 | # ELEVATION_FILE = Path("experiments/hatching/data/GebcoToBlender/reproject.tif")
114 | ELEVATION_FILE = Path("experiments/hatching/data/gebco_crop.tif")
115 | CROP_SIZE = [5000, 5000]
116 | SAMPLING_STEP = 1
117 |
118 | elevation = (cv2.imread(str(ELEVATION_FILE), cv2.IMREAD_UNCHANGED)).astype(np.float64)
119 |
120 | elevation[elevation > -500] = -500
121 |
122 | TARGET_RESOLUTION = [elevation.shape[1], elevation.shape[0]]
123 |
124 | elevation = elevation[
125 | TARGET_RESOLUTION[1] // 2 - CROP_SIZE[1] // 2 : TARGET_RESOLUTION[1] // 2 + CROP_SIZE[1] // 2,
126 | TARGET_RESOLUTION[0] // 2 - CROP_SIZE[0] // 2 : TARGET_RESOLUTION[0] // 2 + CROP_SIZE[0] // 2,
127 | ]
128 |
129 | X, Y, dX, dY, angles, inclination = get_slope(elevation, SAMPLING_STEP)
130 |
131 | # angles_deriv2 = cv2.convertScaleAbs(cv2.Laplacian(angles, cv2.CV_64F))
132 | angles_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(angles, cv2.CV_64F)))
133 |
134 | sobelx = cv2.Sobel(angles, cv2.CV_64F, 1, 0, ksize=5)
135 | sobely = cv2.Sobel(angles, cv2.CV_64F, 0, 1, ksize=5)
136 | angles_deriv1 = np.maximum(cv2.convertScaleAbs(sobelx), cv2.convertScaleAbs(sobely))
137 |
138 | inclination_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(inclination, cv2.CV_64F)))
139 |
140 | ang_inc = angles_deriv2 * inclination
141 | logger.debug(f"anginc min: {np.min(ang_inc)} | max: {np.max(ang_inc)}")
142 | ang_inc = _convert_to_uint8(ang_inc)
143 |
144 | _, angle_inc_thres = cv2.threshold(ang_inc, 10, 255, cv2.THRESH_BINARY)
145 |
146 | fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), (ax9, ax10, ax11, ax12)) = plt.subplots(3, 4)
147 | plt.tight_layout(pad=0.01)
148 |
149 | ax1.imshow(cv2.blur(angles, (5, 5)), cmap="hsv")
150 | ax1.set_title("angles")
151 |
152 | # ksize = 7
153 | # angles_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(cv2.blur(angles, (ksize, ksize)), cv2.CV_64F)))
154 | # ax2.imshow(angles_deriv2)
155 | # ax2.set_title(f"angles 2nd deriv / blur {ksize}")
156 | #
157 | # ksize = 17
158 | # angles_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(cv2.blur(angles, (ksize, ksize)), cv2.CV_64F)))
159 | # ax3.imshow(angles_deriv2)
160 | # ax3.set_title(f"angles 2nd deriv / blur {ksize}")
161 | #
162 | # ksize = 23
163 | # angles_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(cv2.blur(angles, (ksize, ksize)), cv2.CV_64F)))
164 | # ax4.imshow(angles_deriv2)
165 | # ax4.set_title(f"angles 2nd deriv / blur {ksize}")
166 |
167 |
168 | ksize = 3
169 | rsize = 4000
170 | angles_deriv2 = _convert_to_uint8(
171 | np.abs(cv2.Laplacian(cv2.blur(cv2.resize(angles, [rsize, rsize]), (ksize, ksize)), cv2.CV_64F))
172 | )
173 | ax2.imshow(angles_deriv2)
174 | ax2.set_title(f"angles 2nd deriv / rsize {rsize}")
175 |
176 | rsize = 3000
177 | angles_deriv2 = _convert_to_uint8(
178 | np.abs(cv2.Laplacian(cv2.blur(cv2.resize(angles, [rsize, rsize]), (ksize, ksize)), cv2.CV_64F))
179 | )
180 | ax3.imshow(angles_deriv2)
181 | ax3.set_title(f"angles 2nd deriv / rsize {rsize}")
182 |
183 | rsize = 2000
184 | angles_deriv2 = _convert_to_uint8(
185 | np.abs(cv2.Laplacian(cv2.blur(cv2.resize(angles, [rsize, rsize]), (ksize, ksize)), cv2.CV_64F))
186 | )
187 | ax4.imshow(angles_deriv2)
188 | ax4.set_title(f"angles 2nd deriv / rsize {rsize}")
189 |
190 | # ax5.imshow(~(angles_deriv2 * _convert_to_uint8(inclination)))
191 | # ax5.set_title("angles * rev inclination")
192 |
193 | ax6.imshow(angle_inc_thres)
194 | ax6.set_title("angle_inc_thres")
195 |
196 | # ax7.imshow(np.clip(elevation_deriv2, 0, np.max(elevation_deriv2) * 0.25))
197 | # ax7.set_title("elevation_deriv2")
198 |
199 | inclination_clipped = np.clip(inclination, 0, np.max(inclination) * 0.5)
200 | ax7.imshow(inclination_clipped)
201 | ax7.set_title("inclination (clipped)")
202 |
203 | ax8.imshow(np.clip(inclination_deriv2, 0, np.max(inclination_deriv2) * 0.25))
204 | ax8.set_title("inclination_deriv2")
205 |
206 | # ax9.imshow(cv2.Canny(np.clip(angle_inc_thres, 0, np.max(angle_inc_thres) * 0.5).astype(np.uint8), 100, 200))
207 | # ax9.set_title("edges: angle_inc_thres")
208 | #
209 | # ax10.imshow(cv2.Canny(np.clip(elevation_deriv2, 0, np.max(elevation_deriv2) * 0.5).astype(np.uint8), 100, 200))
210 | # ax10.set_title("edges: elevation_deriv2")
211 | #
212 | # ax11.imshow(cv2.Canny(np.clip(inclination_deriv2, 0, np.max(inclination_deriv2) * 0.5).astype(np.uint8), 20, 150))
213 | # ax11.set_title("edges: inclination_deriv2")
214 |
215 |
216 | # gap = 10
217 | #
218 | # a = 40
219 | #
220 | # slice = _convert_to_uint8(angles.copy())
221 | # slice[slice < a-gap//2] = 0
222 | # slice[slice > a+gap//2] = 0
223 | # ax9.imshow(slice)
224 | # ax9.set_title(f"angle slice {a}")
225 | #
226 | # a = 45
227 | #
228 | # slice = _convert_to_uint8(angles.copy())
229 | # slice[slice < a-gap//2] = 0
230 | # slice[slice > a+gap//2] = 0
231 | # ax10.imshow(slice)
232 | # ax10.set_title(f"angle slice {a}")
233 | #
234 | # a = 50
235 | #
236 | # slice = _convert_to_uint8(angles.copy())
237 | # slice[slice < a-gap//2] = 0
238 | # slice[slice > a+gap//2] = 0
239 | # ax11.imshow(slice)
240 | # ax11.set_title(f"angle slice {a}")
241 | #
242 | # a = 55
243 | #
244 | # slice = _convert_to_uint8(angles.copy())
245 | # slice[slice < a-gap//2] = 0
246 | # slice[slice > a+gap//2] = 0
247 | # ax12.imshow(slice)
248 | # ax12.set_title(f"angle slice {a}")
249 |
250 |
251 | ax9.imshow(elevation)
252 | ax9.set_title("elevation")
253 |
254 | elevation_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(elevation, cv2.CV_64F)))
255 | ax10.imshow(elevation)
256 | ax10.set_title("elevation 2nd deriv")
257 |
258 | elevation_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(cv2.blur(elevation, (21, 21)), cv2.CV_64F)))
259 | ax10.imshow(elevation_deriv2)
260 | ax10.set_title("elevation 2nd deriv / blur 21")
261 |
262 | elevation_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(cv2.blur(elevation, (23, 23)), cv2.CV_64F)))
263 | ax11.imshow(elevation_deriv2)
264 | ax11.set_title("elevation 2nd deriv / blur 23")
265 |
266 | elevation_deriv2 = _convert_to_uint8(np.abs(cv2.Laplacian(cv2.blur(elevation, (25, 25)), cv2.CV_64F)))
267 | ax12.imshow(elevation_deriv2)
268 | ax12.set_title("elevation 2nd deriv / blur 25")
269 |
270 | fig.set_figheight(3 * 10)
271 | fig.set_figwidth(4 * 10)
272 | ax2.get_yaxis().set_visible(False)
273 | ax3.get_yaxis().set_visible(False)
274 | ax5.get_yaxis().set_visible(False)
275 | ax6.get_yaxis().set_visible(False)
276 | ax7.get_yaxis().set_visible(False)
277 | ax8.get_yaxis().set_visible(False)
278 | ax9.get_yaxis().set_visible(False)
279 | ax10.get_yaxis().set_visible(False)
280 | ax11.get_yaxis().set_visible(False)
281 | ax12.get_yaxis().set_visible(False)
282 |
283 | plt.savefig(Path(OUTPUT_PATH, "edge_overview.png"))
284 |
285 | # cv2.imwrite(Path(OUTPUT_PATH, ELEVATION_FILE.stem + "_data.png"), data)
286 | # cv2.imwrite(Path(OUTPUT_PATH, ELEVATION_FILE.stem + "_edge_maskonly.png"), detected_edges)
287 |
288 | # cv2.imwrite(Path(OUTPUT_PATH, ELEVATION_FILE.stem + "_angles_deriv1.png"), angles_deriv1)
289 | # cv2.imwrite(Path(OUTPUT_PATH, ELEVATION_FILE.stem + "_angles_deriv2.png"), angles_deriv2)
290 | # cv2.imwrite(Path(OUTPUT_PATH, ELEVATION_FILE.stem + "_angles_inc_thres.png"), angle_inc_thres)
291 | # cv2.imwrite(Path(OUTPUT_PATH, ELEVATION_FILE.stem + "_angles_inc.png"), ang_inc)
292 | # cv2.imwrite(Path(OUTPUT_PATH, ELEVATION_FILE.stem + "_inclination_clipped.png"), _convert_to_uint8(np.copy(inclination_clipped)))
293 |
--------------------------------------------------------------------------------
/experiments/hatching/flowlines_stoplines.py:
--------------------------------------------------------------------------------
1 | import cProfile as profile
2 | import datetime
3 | import math
4 | from collections import deque
5 | from dataclasses import dataclass
6 | from pathlib import Path
7 |
8 | import cv2
9 | import numpy as np
10 | import shapely
11 | from loguru import logger
12 | from matplotlib import pyplot as plt
13 | from shapely import LineString, Polygon
14 |
15 | from experiments.hatching.slope import get_slope
16 | from lineworld.core.svgwriter import SvgWriter
17 | from lineworld.util.gebco_grid_to_polygon import _extract_polygons, get_elevation_bounds
18 |
19 |
20 | @dataclass
21 | class FlowlineHatcherConfig:
22 | LINE_DISTANCE: tuple[float, float] = (2, 40) # distance between lines
23 | LINE_STEP_DISTANCE: float = 1.0 # distance between points constituting a line
24 |
25 | MAX_ANGLE_DISCONTINUITY: float = math.pi / 2 # max difference (in radians) in slope between line points
26 | MIN_INCLINATION: float = 0.1 # 50.0
27 |
28 | SEEDPOINT_EXTRACTION_SKIP_LINE_SEGMENTS: int = (
29 | 20 # How many line segments should be skipped before the next seedpoint is extracted
30 | )
31 | LINE_MAX_SEGMENTS: int = 300
32 |
33 | BLUR_ANGLES: bool = True
34 | BLUR_DENSITY_MAP: bool = True
35 |
36 | COLLISION_APPROXIMATE: bool = True
37 |
38 |
39 | class FlowlineHatcher:
40 | def __init__(
41 | self,
42 | polygon: Polygon,
43 | elevation: np.ndarray,
44 | angles: np.ndarray,
45 | inclination: np.ndarray,
46 | density: np.ndarray,
47 | config: FlowlineHatcherConfig,
48 | ):
49 | self.polygon = polygon
50 | self.config = config
51 |
52 | self.elevation = np.pad(elevation, (1, 1), "edge")[1:, 1:]
53 | self.angles = np.pad(angles, (1, 1), "edge")[1:, 1:]
54 | self.inclination = np.pad(inclination, (1, 1), "edge")[1:, 1:]
55 | self.density = np.pad(density, (1, 1), "edge")[1:, 1:]
56 |
57 | self.bbox = self.polygon.bounds
58 | self.bbox = [
59 | 0,
60 | 0,
61 | math.ceil(self.bbox[2] - self.bbox[0]),
62 | math.ceil(self.bbox[3] - self.bbox[1]),
63 | ] # minx, miny, maxx, maxy
64 |
65 | if self.config.BLUR_ANGLES:
66 | self.angles = cv2.blur(self.angles, (100, 100))
67 | # self.angles = cv2.GaussianBlur(self.angles, (11, 11), 0)
68 |
69 | if self.config.BLUR_DENSITY_MAP:
70 | self.density = cv2.blur(self.density, (60, 60))
71 |
72 | self.point_map = {}
73 | for x in range(self.bbox[2] + 1):
74 | for y in range(self.bbox[3] + 1):
75 | self.point_map[f"{x},{y}"] = []
76 |
77 | self.point_raster = np.zeros([self.bbox[3] + 1, self.bbox[2] + 1], dtype=bool)
78 |
79 | def _collision_approximate(self, x: float, y: float) -> bool:
80 | x = int(x)
81 | y = int(y)
82 | half_d = int(self.density[y, x] / 2)
83 |
84 | return np.any(
85 | self.point_raster[
86 | max(y - half_d, 0) : min(y + half_d, self.point_raster.shape[0]),
87 | max(x - half_d, 0) : min(x + half_d, self.point_raster.shape[1]),
88 | ]
89 | )
90 |
91 | def _collision_precise(self, x: float, y: float) -> bool:
92 | rx = round(x)
93 | ry = round(y)
94 | d = self.density[ry, rx]
95 | half_d = math.ceil(d / 2)
96 |
97 | x_minmax = [max(rx - half_d, 0), min(rx + half_d, self.point_raster.shape[1])]
98 | y_minmax = [max(ry - half_d, 0), min(ry + half_d, self.point_raster.shape[0])]
99 |
100 | for ix in range(*x_minmax):
101 | for iy in range(*y_minmax):
102 | for p in self.point_map[f"{ix},{iy}"]:
103 | if math.sqrt((p[0] - x) ** 2 + (p[1] - y) ** 2) < d:
104 | return True
105 |
106 | return False
107 |
108 | def _collision(self, x: float, y: float) -> bool:
109 | if self.config.COLLISION_APPROXIMATE:
110 | return self._collision_approximate(x, y)
111 | else:
112 | return self._collision_precise(x, y)
113 |
114 | def _next_point(self, x1: float, y1: float, forwards: bool) -> float:
115 | rx1 = int(x1)
116 | ry1 = int(y1)
117 |
118 | a1 = self.angles[ry1, rx1]
119 | inc = self.inclination[ry1, rx1]
120 |
121 | if abs(inc) < self.config.MIN_INCLINATION:
122 | return None
123 |
124 | dir = 1
125 | if not forwards:
126 | dir = -1
127 |
128 | x2 = x1 + self.config.LINE_STEP_DISTANCE * math.cos(a1) * dir
129 | y2 = y1 + self.config.LINE_STEP_DISTANCE * math.sin(a1) * dir
130 |
131 | # if not self.polygon.contains(Point(x2, y2)):
132 | # return None
133 |
134 | if x2 < 0 or x2 > self.bbox[2] or y2 < 0 or y2 > self.bbox[3]: # TODO
135 | return None
136 |
137 | if self._collision(x2, y2):
138 | return None
139 |
140 | if self.config.MAX_ANGLE_DISCONTINUITY > 0:
141 | a2 = self.angles[int(y2), int(x2)]
142 |
143 | if abs(a2 - a1) > self.config.MAX_ANGLE_DISCONTINUITY:
144 | return None
145 |
146 | return (x2, y2)
147 |
148 | def _seed_points(self, line_points: list[tuple[float, float]]) -> list[tuple[float, float]]:
149 | num_seedpoints = 1
150 | seed_points = []
151 |
152 | if len(line_points) > self.config.SEEDPOINT_EXTRACTION_SKIP_LINE_SEGMENTS:
153 | num_seedpoints = (len(line_points) - 1) // self.config.SEEDPOINT_EXTRACTION_SKIP_LINE_SEGMENTS
154 |
155 | for i in range(num_seedpoints):
156 | x1, y1 = line_points[i * self.config.SEEDPOINT_EXTRACTION_SKIP_LINE_SEGMENTS]
157 | x2, y2 = line_points[i * self.config.SEEDPOINT_EXTRACTION_SKIP_LINE_SEGMENTS + 1]
158 |
159 | # midpoint
160 | x3 = x1 + (x2 - x1) / 2.0
161 | y3 = y1 + (y2 - y1) / 2.0
162 |
163 | a1 = math.atan2(y1 - y3, x1 - x3)
164 |
165 | a2 = a1
166 | if i % 2 == 0:
167 | a2 += math.radians(90)
168 | else:
169 | a2 -= math.radians(90)
170 |
171 | x4 = self.density[int(y3), int(x3)]
172 | y4 = 0
173 |
174 | x5 = x4 * math.cos(a2) - y4 * math.sin(a2) + x3
175 | y5 = x4 * math.sin(a2) + y4 * math.cos(a2) + y3
176 |
177 | # if not self.polygon.contains(Point([x5, y5])):
178 | # continue
179 |
180 | if x5 < 0 or x5 > self.bbox[2] or y5 < 0 or y5 > self.bbox[3]: # TODO
181 | continue
182 |
183 | seed_points.append([x5, y5])
184 |
185 | return seed_points
186 |
187 | def _debug_viz(self, linestrings: list[LineString]) -> None:
188 | output = np.zeros([self.elevation.shape[0], self.elevation.shape[1], 3], dtype=np.uint8)
189 | for ls in linestrings:
190 | line_points = ls.coords
191 | for i in range(len(line_points) - 1):
192 | start = (round(line_points[i][0]), round(line_points[i][1]))
193 | end = (round(line_points[i + 1][0]), round(line_points[i + 1][1]))
194 | cv2.line(output, start, end, (255, 255, 255), 2)
195 | cv2.imwrite(Path(OUTPUT_PATH, "flowlines.png"), ~output)
196 |
197 | fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2, 3)
198 |
199 | ax1.imshow(self.elevation, cmap="binary")
200 | ax1.set_title("elevation")
201 |
202 | ax2.imshow(self.angles)
203 | ax2.set_title("angles")
204 |
205 | ax3.imshow(self.inclination)
206 | ax3.set_title("inclination")
207 |
208 | ax4.imshow(self.density, cmap="gray")
209 | ax4.set_title("density")
210 |
211 | ax5.imshow(self.point_raster)
212 | ax5.set_title("collision map")
213 |
214 | ax6.imshow(~output)
215 | ax6.set_title("output")
216 |
217 | fig.set_figheight(12)
218 | fig.set_figwidth(20)
219 | ax2.get_yaxis().set_visible(False)
220 | ax3.get_yaxis().set_visible(False)
221 | ax5.get_yaxis().set_visible(False)
222 | ax6.get_yaxis().set_visible(False)
223 |
224 | plt.tight_layout = True
225 |
226 | plt.savefig(Path(OUTPUT_PATH, "flowlines_overview.png"))
227 |
228 | # cv2.imwrite(Path(OUTPUT_PATH, "flowlines.png"), output)
229 |
230 | # output_filename = f"flowlines_BLURANGLES-{BLUR_ANGLES}_BLURDENSITY-{BLUR_DENSITY_MAP}.png"
231 | # cv2.imwrite(Path(OUTPUT_PATH, output_filename), output)
232 |
233 | def hatch(self) -> list[LineString]:
234 | # output = np.zeros([1000, 1000, 3], dtype=np.uint8)
235 |
236 | linestrings = []
237 | starting_points = deque()
238 |
239 | for i in np.linspace(self.bbox[0] + 1, self.bbox[2] - 1, num=100):
240 | for j in np.linspace(self.bbox[1] + 1, self.bbox[3] - 1, num=100):
241 | starting_points.append([i, j])
242 |
243 | while len(starting_points) > 0:
244 | seed = starting_points.popleft()
245 |
246 | if self._collision(*seed):
247 | continue
248 |
249 | line_points = deque([seed])
250 |
251 | # follow gradient up
252 | for i in range(10000):
253 | if self.config.LINE_MAX_SEGMENTS > 0 and len(line_points) >= self.config.LINE_MAX_SEGMENTS:
254 | break
255 |
256 | p = self._next_point(*line_points[-1], True)
257 |
258 | if p is None:
259 | break
260 |
261 | line_points.append(p)
262 |
263 | # follow gradient down
264 | for i in range(10000):
265 | if self.config.LINE_MAX_SEGMENTS > 0 and len(line_points) >= self.config.LINE_MAX_SEGMENTS:
266 | break
267 |
268 | p = self._next_point(*line_points[0], False)
269 |
270 | if p is None:
271 | break
272 |
273 | line_points.appendleft(p)
274 |
275 | if len(line_points) < 2:
276 | continue
277 |
278 | linestrings.append(LineString(line_points))
279 |
280 | # seed points
281 | seed_points = self._seed_points(line_points)
282 | starting_points.extendleft(seed_points)
283 |
284 | # collision checks
285 | for lp in line_points:
286 | x = int(lp[0])
287 | y = int(lp[1])
288 | if self.config.COLLISION_APPROXIMATE:
289 | self.point_raster[y, x] = True
290 | else:
291 | self.point_map[f"{x},{y}"].append(lp)
292 |
293 | # viz
294 | # cv2.circle(output, (round(seed[0]), round(seed[1])), 2, (255, 0, 0), -1)
295 | # for i in range(len(line_points) - 1):
296 | # start = (round(line_points[i][0]), round(line_points[i][1]))
297 | # end = (round(line_points[i + 1][0]), round(line_points[i + 1][1]))
298 | # cv2.line(output, start, end, (255, 255, 255), 2)
299 |
300 | return linestrings
301 |
302 |
303 | ELEVATION_FILE = Path("experiments/hatching/data/flowlines_gebco_crop.tif")
304 | DENSITY_FILE = ELEVATION_FILE
305 |
306 | # ELEVATION_FILE = Path("experiments/hatching/data/slope_test_5.tif")
307 | # DENSITY_FILE = ELEVATION_FILE
308 | # TARGET_RESOLUTION = [1000, 1000]
309 |
310 | OUTPUT_PATH = Path("experiments/hatching/output")
311 |
312 | if __name__ == "__main__":
313 | # sanity checks:
314 |
315 | # if not (LINE_STEP_DISTANCE < LINE_DISTANCE[0]):
316 | # raise Exception("distance between points of a line must be smaller than the distance between lines")
317 |
318 | # self.polygon = shapely.box(0, 0, 999, 999)
319 | # self.polygon = Point([500, 500]).buffer(450)
320 |
321 | c = FlowlineHatcherConfig()
322 |
323 | data = cv2.imread(str(ELEVATION_FILE), cv2.IMREAD_UNCHANGED)
324 |
325 | logger.debug(f"data {ELEVATION_FILE} min: {np.min(data)} | max: {np.max(data)}")
326 |
327 | density_data = None
328 | if DENSITY_FILE.suffix.endswith(".tif"):
329 | density_data = cv2.imread(str(ELEVATION_FILE), cv2.IMREAD_UNCHANGED)
330 | else:
331 | density_data = cv2.imread(str(DENSITY_FILE), cv2.IMREAD_GRAYSCALE)
332 |
333 | density_normalized = (density_data - np.min(density_data)) / (np.max(density_data) - np.min(density_data))
334 | density = np.full(density_data.shape, c.LINE_DISTANCE[0], dtype=float) + (
335 | density_normalized * (c.LINE_DISTANCE[1] - c.LINE_DISTANCE[0])
336 | )
337 |
338 | X, Y, dX, dY, angles, inclination = get_slope(data, 10)
339 |
340 | timer = datetime.datetime.now()
341 |
342 | hatcher = FlowlineHatcher(
343 | shapely.box(0, 0, data.shape[1], data.shape[0]),
344 | data,
345 | angles,
346 | inclination,
347 | density,
348 | c,
349 | )
350 |
351 | pr = profile.Profile()
352 | pr.enable()
353 |
354 | linestrings = hatcher.hatch()
355 |
356 | pr.disable()
357 | pr.dump_stats("profile.pstat")
358 |
359 | total_time = (datetime.datetime.now() - timer).total_seconds()
360 | avg_line_length = sum([x.length for x in linestrings]) / len(linestrings)
361 |
362 | logger.info(f"total time: {total_time:5.2f}s")
363 | logger.info(f"avg line length: {avg_line_length:5.2f}")
364 |
365 | hatcher._debug_viz(linestrings)
366 |
367 | svg = SvgWriter(Path(OUTPUT_PATH, "flowlines.svg"), data.shape)
368 |
369 | options = {"fill": "none", "stroke": "black", "stroke-width": "2"}
370 | svg.add("flowlines", linestrings, options=options)
371 |
372 | land_polys = _extract_polygons(data, *get_elevation_bounds([0, 10_000], 1)[0], True)
373 | options_land = {"fill": "green", "stroke": "none", "fill-opacity": "0.5"}
374 | svg.add("land", land_polys, options=options_land)
375 |
376 | svg.write()
377 |
--------------------------------------------------------------------------------
/experiments/hatching/gebco_to_blender.py:
--------------------------------------------------------------------------------
1 | import os
2 | from contextlib import ExitStack
3 | from pathlib import Path
4 |
5 | import rasterio
6 | from loguru import logger
7 | from rasterio.merge import merge
8 | from rasterio.warp import calculate_default_transform, reproject, Resampling
9 |
10 | DATA_URL = "https://www.bodc.ac.uk/data/open_download/gebco/gebco_2024/geotiff/"
11 |
12 | DATA_DIR = Path("experiments/hatching/data", "GebcoToBlender".lower())
13 | TILES_DIR = Path(DATA_DIR, "tiles")
14 | SCALED_DIR = Path(DATA_DIR, "scaled")
15 | MOSAIC_FILE = Path(DATA_DIR, "gebco_mosaic.tif")
16 | REPROJECT_FILE = Path(DATA_DIR, "blender_reproject.tif")
17 |
18 | GEOTIFF_SCALING_FACTOR = 1 / 8 # correct ratio for blender
19 |
20 | REPROJECT_FILE = Path(DATA_DIR, "fullsize_reproject.tif")
21 | GEOTIFF_SCALING_FACTOR = 1
22 |
23 | OVERWRITE = True
24 | BATHYMETRY_ONLY = False
25 |
26 |
27 | def downscale_and_write(input_path: Path, output_path: Path, scaling_factor: float) -> None:
28 | """
29 | Downscale GEBCO GeoTiff images
30 | """
31 |
32 | with rasterio.open(input_path) as src:
33 | data = src.read(
34 | out_shape=(
35 | src.count,
36 | int(src.height * scaling_factor),
37 | int(src.width * scaling_factor),
38 | ),
39 | resampling=Resampling.bilinear,
40 | )
41 |
42 | transform = src.transform * src.transform.scale((src.width / data.shape[-1]), (src.height / data.shape[-2]))
43 |
44 | config = {
45 | "driver": "GTiff",
46 | "height": data.shape[-2],
47 | "width": data.shape[-1],
48 | "count": 1,
49 | "dtype": data.dtype,
50 | "crs": src.crs,
51 | "transform": transform,
52 | }
53 |
54 | with rasterio.open(output_path, "w", **config) as dst:
55 | dst.write(data)
56 |
57 |
58 | def merge_and_write(geotiff_paths: list[Path], output_path: Path) -> None:
59 | with ExitStack() as stack:
60 | tiles = [stack.enter_context(rasterio.open(geotiff_path)) for geotiff_path in geotiff_paths]
61 |
62 | mosaic, mosaic_transform = merge(tiles, resampling=Resampling.bilinear)
63 |
64 | config = {
65 | "driver": "GTiff",
66 | "height": mosaic.shape[-2],
67 | "width": mosaic.shape[-1],
68 | "count": 1,
69 | "dtype": mosaic.dtype,
70 | "crs": tiles[0].crs,
71 | "transform": mosaic_transform,
72 | }
73 |
74 | with rasterio.open(output_path, "w", **config) as dst:
75 | dst.write(mosaic)
76 |
77 |
78 | def reproject_dataset(src: Path, dst: Path) -> None:
79 | dst_crs = "ESRI:54029"
80 |
81 | with rasterio.open(src) as src:
82 | transform, width, height = calculate_default_transform(src.crs, dst_crs, src.width, src.height, *src.bounds)
83 | kwargs = src.meta.copy()
84 | kwargs.update({"crs": dst_crs, "transform": transform, "width": width, "height": height})
85 |
86 | with rasterio.open(dst, "w", **kwargs) as dst:
87 | for i in range(1, src.count + 1):
88 | band_arr = src.read(i)
89 |
90 | # remove any above-waterlevel terrain
91 | if BATHYMETRY_ONLY:
92 | band_arr[band_arr > 0] = 0
93 |
94 | reproject(
95 | source=band_arr,
96 | destination=rasterio.band(dst, i),
97 | src_transform=src.transform,
98 | src_crs=src.crs,
99 | dst_transform=transform,
100 | dst_crs=dst_crs,
101 | resampling=Resampling.nearest,
102 | )
103 |
104 |
105 | if __name__ == "__main__":
106 | logger.info("extracting elevation data from GeoTiffs")
107 |
108 | # Downscaling
109 | dataset_files = [f for f in TILES_DIR.iterdir() if f.is_file() and f.suffix == ".tif"]
110 | if len(dataset_files) == 0:
111 | logger.warning("no GeoTiffs to transform")
112 |
113 | scaled_files = []
114 | if GEOTIFF_SCALING_FACTOR == 1:
115 | scaled_files = dataset_files
116 | else:
117 | if not os.path.exists(SCALED_DIR):
118 | os.makedirs(SCALED_DIR)
119 | for dataset_file in dataset_files:
120 | scaled_path = Path(SCALED_DIR, dataset_file.name)
121 | scaled_files.append(scaled_path)
122 |
123 | if scaled_path.exists() and not OVERWRITE:
124 | continue
125 |
126 | logger.debug(f"downscaling tile: {dataset_file}")
127 | downscale_and_write(dataset_file, scaled_path, GEOTIFF_SCALING_FACTOR)
128 |
129 | # Merging tiles into a mosaic
130 | if not MOSAIC_FILE.exists() or OVERWRITE:
131 | logger.debug("merging mosaic tiles")
132 | merge_and_write(scaled_files, MOSAIC_FILE)
133 |
134 | # Reprojecting
135 | if not REPROJECT_FILE.exists() or OVERWRITE:
136 | logger.debug("reprojecting mosaic")
137 | reproject_dataset(MOSAIC_FILE, REPROJECT_FILE)
138 |
--------------------------------------------------------------------------------
/experiments/hatching/imgprocessing.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import cv2
4 | import numpy as np
5 | import rasterio
6 |
7 | from experiments.hatching.slope import get_slope
8 |
9 | OUTPUT_PATH = Path("experiments/hatching/output")
10 | ELEVATION_FILE = Path("experiments/hatching/data/GebcoToBlender/fullsize_reproject.tif")
11 |
12 | CROP_SIZE = [15000, 15000]
13 |
14 |
15 | def hstack(*args):
16 | num_rows = 0
17 | num_cols = 0
18 |
19 | for m in args:
20 | num_rows = max(num_rows, m.shape[0])
21 | num_cols += m.shape[1]
22 |
23 | output = np.zeros([num_rows, num_cols], dtype=np.uint8)
24 |
25 | col_advance = 0
26 | for m in args:
27 | output[0 : m.shape[0], col_advance : col_advance + m.shape[1]] = m
28 | col_advance += m.shape[1]
29 |
30 | return output
31 |
32 |
33 | data = None
34 | with rasterio.open(str(ELEVATION_FILE)) as dataset:
35 | data = dataset.read(1)
36 |
37 | CROP_CENTER = [0.4, 0.4]
38 | data = data[
39 | int(CROP_CENTER[1] * data.shape[1] - CROP_SIZE[1] // 2) : int(CROP_CENTER[1] * data.shape[1] + CROP_SIZE[1] // 2),
40 | int(CROP_CENTER[0] * data.shape[0] - CROP_SIZE[0] // 2) : int(CROP_CENTER[0] * data.shape[0] + CROP_SIZE[0] // 2),
41 | ]
42 |
43 | for i in [60, 70, 80, 90, 100, 120, 140, 160, 180, 200]:
44 | data = cv2.blur(data, (i, i))
45 | _, _, _, _, angles, inclination = get_slope(data, 1)
46 |
47 | viz_inclination = normalize_to_uint8(inclination)
48 | angle_width = 30
49 | tanako_ang = cv2.inRange(np.degrees(angles), np.array([45 - angle_width / 2]), np.array([45 + angle_width / 2]))
50 | # tanako_inc = cv2.inRange(inclination, np.array([500]), np.array([np.max(inclination)]))
51 | # tanako_inc = cv2.inRange(inclination, np.array([20]), np.array([2000]))
52 | tanako_inc = inclination > 10
53 |
54 | tanako = (np.logical_and(tanako_ang, tanako_inc) * 255).astype(np.uint8)
55 |
56 | kernel = np.ones((3, 3), np.uint8)
57 | tanako = cv2.morphologyEx(tanako, cv2.MORPH_OPEN, kernel)
58 | tanako = cv2.morphologyEx(tanako, cv2.MORPH_CLOSE, kernel)
59 |
60 | cv2.imwrite(
61 | str(Path(OUTPUT_PATH, f"tanako_base_blur_{i}.png")),
62 | hstack(normalize_to_uint8(inclination), normalize_to_uint8(tanako_ang), tanako),
63 | )
64 |
--------------------------------------------------------------------------------
/experiments/hatching/run.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import cv2
4 | import numpy as np
5 | import shapely
6 | from rasterio.features import rasterize
7 | from shapely import LineString, Polygon, MultiPolygon, MultiLineString
8 | from shapelysmooth import taubin_smooth
9 |
10 | from experiments.hatching.flowlines import FlowlineHatcher, FlowlineHatcherConfig
11 | from experiments.hatching.slope import get_slope
12 | from lineworld.core.hatching import HatchingOptions, HatchingDirection, create_hatching
13 | from lineworld.core.maptools import DocumentInfo
14 | from lineworld.core.svgwriter import SvgWriter
15 | from lineworld.util.gebco_grid_to_polygon import _extract_polygons, get_elevation_bounds
16 | from lineworld.util.geometrytools import unpack_multipolygon
17 |
18 | from loguru import logger
19 |
20 | MIN_RING_LENGTH = 50
21 | POST_SMOOTHING_SIMPLIFY_TOLERANCE = 0.5
22 |
23 | # INPUT_FILE = Path("experiments/hatching/data/hatching_dem.tif")
24 | INPUT_FILE = Path("experiments/hatching/data/gebco_crop.tif")
25 | # INPUT_FILE = Path("experiments/hatching/data/slope_test_3.tif")
26 | # INPUT_FILE = Path("experiments/hatching/data/slope_test_5.tif")
27 |
28 | OUTPUT_PATH = Path("experiments/hatching/output")
29 |
30 | LEVELS = 20
31 | DISTANCES = [3.0 + x * 0.9 for x in range(LEVELS)]
32 | # BOUNDS = get_elevation_bounds([0, 20], LEVELS)
33 |
34 |
35 | def read_data(input_path: Path) -> np.ndarray:
36 | data = cv2.imread(str(input_path), cv2.IMREAD_UNCHANGED)
37 | # data = cv2.resize(img, [30, 30])
38 |
39 | # data = np.flipud(data)
40 | # data = (data * 120/20).astype(np.int8)
41 | # data = np.rot90(data)
42 |
43 | return data
44 |
45 |
46 | def standard_hatching(data: np.ndarray, **kwargs) -> list[MultiLineString | LineString]:
47 | output = []
48 | bounds = get_elevation_bounds([np.min(data), np.max(data)], LEVELS)
49 |
50 | for i in range(LEVELS):
51 | extracted_geometries = _extract_polygons(data, *bounds[i], False)
52 |
53 | polygons = []
54 | for g in extracted_geometries:
55 | polygons += unpack_multipolygon(g)
56 |
57 | hatching_options = HatchingOptions()
58 | hatching_options.distance = DISTANCES[i]
59 | # hatching_options.direction = HatchingDirection.ANGLE_135 if i % 2 == 0 else HatchingDirection.ANGLE_45
60 | hatching_options.direction = HatchingDirection.ANGLE_45
61 |
62 | for p in polygons:
63 | output += [create_hatching(p, None, hatching_options)]
64 |
65 | return output
66 |
67 |
68 | def standard_hatching_concentric(data: np.ndarray, **kwargs) -> list[MultiLineString | LineString]:
69 | def create_hatching2(g: Polygon | MultiPolygon, bbox: list[float], options: HatchingOptions) -> list[LineString]:
70 | lines = []
71 |
72 | if g.is_empty:
73 | return lines
74 |
75 | polys = unpack_multipolygon(g)
76 |
77 | for p in polys:
78 | lines.append(LineString(p.exterior))
79 |
80 | for hole in p.interiors:
81 | lines.append((LineString(hole)))
82 |
83 | lines += create_hatching2(p.buffer(-options.distance), None, options)
84 |
85 | return lines
86 |
87 | output = []
88 | bounds = get_elevation_bounds([np.min(data), np.max(data)], LEVELS)
89 |
90 | for i in range(LEVELS):
91 | extracted_geometries = _extract_polygons(data, *bounds[i], False)
92 |
93 | polygons = []
94 | for g in extracted_geometries:
95 | polygons += unpack_multipolygon(g)
96 |
97 | hatching_options = HatchingOptions()
98 | hatching_options.distance = DISTANCES[i]
99 | # hatching_options.direction = HatchingDirection.ANGLE_135 if i % 2 == 0 else HatchingDirection.ANGLE_45
100 | # hatching_options.direction = HatchingDirection.ANGLE_45
101 |
102 | for p in polygons:
103 | output += create_hatching2(p, None, hatching_options)
104 |
105 | return output
106 |
107 |
108 | def standard_hatching_slope_orientation(
109 | data: np.ndarray, angles: np.ndarray, **kwargs
110 | ) -> list[MultiLineString | LineString]:
111 | output = []
112 | bounds = get_elevation_bounds([np.min(data), np.max(data)], LEVELS)
113 |
114 | for i in range(LEVELS):
115 | extracted_geometries = _extract_polygons(data, *bounds[i], False)
116 |
117 | polygons = []
118 | for g in extracted_geometries:
119 | polygons += unpack_multipolygon(g)
120 |
121 | for p in polygons:
122 | # mask = rasterize([p.buffer(-10)], out_shape=angles.shape)
123 | mask = rasterize([p], out_shape=angles.shape)
124 |
125 | # angles_debug = angles*(255/np.max(angles))
126 | # angles_debug[mask <= 0] = 0
127 | # cv2.imwrite("test.png", angles_debug)
128 |
129 | angle = np.degrees(np.mean(angles[mask > 0]))
130 |
131 | hatching_options = HatchingOptions()
132 | hatching_options.distance = DISTANCES[i]
133 | hatching_options.angle = angle
134 |
135 | output += [create_hatching(p, None, hatching_options)]
136 |
137 | return output
138 |
139 |
140 | def _cut_linestring(ls: LineString) -> np.array:
141 | """
142 | returns NumPy array [x1, y1, x2, y2]
143 | """
144 |
145 | coordinate_pairs = np.zeros([len(ls.coords) - 1, 4], dtype=float)
146 |
147 | coordinate_pairs[:, 0] = ls.xy[0][:-1]
148 | coordinate_pairs[:, 1] = ls.xy[1][:-1]
149 | coordinate_pairs[:, 2] = ls.xy[0][1:]
150 | coordinate_pairs[:, 3] = ls.xy[1][1:]
151 |
152 | return coordinate_pairs
153 |
154 |
155 | def illuminated_contours(data: np.ndarray, **kwargs) -> list[list[MultiLineString | LineString]]:
156 | """
157 | correct results if (and only if) bounds are supplied in the right order, from lower to higher, ie.
158 | BOUNDS = get_elevation_bounds([-20, 0], LEVELS)
159 | """
160 |
161 | angle = 135
162 | width = 90
163 |
164 | all_ls = []
165 | output_bright = []
166 | output_dark = []
167 |
168 | bounds = get_elevation_bounds([np.min(data), np.max(data)], LEVELS)
169 |
170 | for i in range(LEVELS):
171 | extracted_geometries = _extract_polygons(data, *bounds[i], True)
172 |
173 | polygons = []
174 | for g in extracted_geometries:
175 | polygons += unpack_multipolygon(shapely.segmentize(g, 10))
176 |
177 | # smoothing
178 | polygons = [taubin_smooth(x, steps=10, factor=0.7, mu=-0.2) for x in polygons]
179 | polygons = [shapely.simplify(x, POST_SMOOTHING_SIMPLIFY_TOLERANCE) for x in polygons]
180 |
181 | for p in polygons:
182 | # area filtering
183 | # if p.area < 100.0:
184 | # continue
185 |
186 | if p.exterior.length > MIN_RING_LENGTH:
187 | all_ls.append(p.exterior)
188 |
189 | for hole in p.interiors:
190 | if hole.length > MIN_RING_LENGTH:
191 | all_ls.append(hole)
192 | # all_ls += p.interiors
193 |
194 | # cut linestrings to single lines
195 | for ls in all_ls:
196 | lines = _cut_linestring(ls)
197 |
198 | # compute orientation of lines
199 | theta = np.degrees(np.arctan2((lines[:, 3] - lines[:, 1]), (lines[:, 2] - lines[:, 0])))
200 |
201 | bright_mask = (theta > (angle - width)) & (theta < (angle + width))
202 |
203 | for line in lines[bright_mask]:
204 | output_bright.append(LineString([line[:2], line[2:]]))
205 |
206 | for line in lines[~bright_mask]:
207 | output_dark.append(LineString([line[:2], line[2:]]))
208 |
209 | # detect if falling or rising slope works implicit due to reversed hole coordinate order
210 |
211 | # reassemble connected lines of same color to linestrings
212 |
213 | return [output_bright, output_dark]
214 |
215 |
216 | def flowline_hatching(data: np.ndarray, **kwargs) -> list[MultiLineString | LineString]:
217 | c = FlowlineHatcherConfig()
218 |
219 | density_data = data
220 |
221 | density_normalized = (density_data - np.min(density_data)) / (np.max(density_data) - np.min(density_data))
222 | density = np.full(density_data.shape, c.LINE_DISTANCE[0], dtype=float) + (
223 | density_normalized * (c.LINE_DISTANCE[1] - c.LINE_DISTANCE[0])
224 | )
225 |
226 | X, Y, dX, dY, angles, inclination = get_slope(data, 10)
227 |
228 | hatcher = FlowlineHatcher(
229 | shapely.box(0, 0, data.shape[1], data.shape[0]),
230 | data,
231 | angles,
232 | inclination,
233 | density,
234 | c,
235 | )
236 |
237 | linestrings = hatcher.hatch()
238 |
239 | return linestrings
240 |
241 |
242 | if __name__ == "__main__":
243 | # print(_cut_linestring(LineString([
244 | # [0, 1],
245 | # [10, 2],
246 | # [100, 3]
247 | # ])))
248 | #
249 | # exit()
250 |
251 | data = read_data(INPUT_FILE)
252 |
253 | logger.info(f"data {INPUT_FILE} min: {np.min(data)} / max: {np.max(data)}")
254 |
255 | X, Y, dX, dY, angles, inclination = get_slope(data, 10)
256 |
257 | experiments_table = {
258 | "hatching_a": standard_hatching,
259 | "hatching_a_concentric": standard_hatching_concentric,
260 | "hatching_c": standard_hatching_slope_orientation,
261 | "hatching_tanaka": illuminated_contours,
262 | "hatching_flowlines": flowline_hatching,
263 | }
264 |
265 | land_polys = _extract_polygons(data, *get_elevation_bounds([0, 10_000], 1)[0], True)
266 |
267 | for k, v in experiments_table.items():
268 | logger.info(f"running: {k}")
269 |
270 | hatchings = v(data, angles=angles)
271 |
272 | doc = DocumentInfo()
273 | doc.width = data.shape[1]
274 | doc.height = data.shape[0]
275 |
276 | svg = SvgWriter(Path(OUTPUT_PATH, f"{k}.svg"), [doc.width, doc.height])
277 | svg.debug = True
278 | svg.background_color = "white"
279 |
280 | if k == "hatching_tanaka":
281 | svg.background_color = "grey"
282 |
283 | # options_bright = {
284 | # "fill": "none",
285 | # "stroke": "white",
286 | # "stroke-width": "2.0",
287 | # }
288 | #
289 | # options_dark = {
290 | # "fill": "none",
291 | # "stroke": "black",
292 | # "stroke-width": "2.0",
293 | # }
294 |
295 | options_bright = {
296 | "fill": "none",
297 | "stroke": "skyblue",
298 | "stroke-width": "2.0",
299 | }
300 |
301 | options_dark = {
302 | "fill": "none",
303 | "stroke": "darkblue",
304 | "stroke-width": "2.0",
305 | }
306 |
307 | svg.add("contour_bright", hatchings[0], options=options_bright)
308 | svg.add("contour_dark", hatchings[1], options=options_dark)
309 |
310 | else:
311 | options = {
312 | "fill": "none",
313 | "stroke": "black",
314 | "stroke-width": "2.0",
315 | }
316 |
317 | svg.add("contour", hatchings, options=options)
318 |
319 | options_land = {"fill": "green", "stroke": "none", "fill-opacity": "0.5"}
320 |
321 | svg.add("land", land_polys, options=options_land)
322 |
323 | svg.write()
324 |
--------------------------------------------------------------------------------
/experiments/hatching/scales.py:
--------------------------------------------------------------------------------
1 | import math
2 | from pathlib import Path
3 | from typing import Callable, Any
4 |
5 | import cv2
6 | import numpy as np
7 | import matplotlib
8 | from matplotlib import pyplot as plt
9 |
10 | LUT_SIZE = 3000
11 |
12 |
13 | def slope_linear(lut_size=LUT_SIZE) -> float:
14 | lut = np.linspace(0, 1, num=lut_size)
15 | return lut
16 |
17 |
18 | def slope_power(exp=2, lut_size=LUT_SIZE) -> float:
19 | lut = np.linspace(0, 1, num=lut_size)
20 | return lut**exp
21 |
22 | # TODO: numpy-compatible vectorized output for even exponents
23 |
24 | # y = (x*2-1) ** exp
25 |
26 | # if exp % 2 == 0:
27 | # if x < 0.5:
28 | # return (1-y)/2
29 | # else:
30 | # return y/2 + 0.5
31 | # else:
32 | # return y
33 |
34 |
35 | def slope_sine(lut_size=LUT_SIZE) -> float:
36 | lut = np.linspace(0, 1, num=lut_size)
37 | return -(np.cos(math.pi * lut) - 1) / 2
38 |
39 |
40 | def quadratic_bezier(p1=[0.25, 0.25], p2=[0.75, 0.75], lut_size=LUT_SIZE) -> np.ndarray:
41 | p0 = [0, 0]
42 | p3 = [1, 1]
43 |
44 | t = np.linspace(0, 1, num=lut_size * 3)
45 |
46 | x = ((1 - t) ** 3) * p0[0] + 3 * ((1 - t) ** 2) * t * p1[0] + 3 * (1 - t) * (t**2) * p2[0] + (t**3) * p3[0]
47 | y = ((1 - t) ** 3) * p0[1] + 3 * ((1 - t) ** 2) * t * p1[1] + 3 * (1 - t) * (t**2) * p2[1] + (t**3) * p3[1]
48 |
49 | lut = np.full([lut_size], -1, dtype=np.float64)
50 |
51 | for i in range(0, x.shape[0]):
52 | lut[int(x[i] * (lut_size - 1))] = y[i]
53 |
54 | if np.min(lut) == -1:
55 | raise Exception(f"unfilled LUT: {np.where(lut == -1)}")
56 |
57 | return lut
58 |
59 |
60 | def sigmoid(lut_size=LUT_SIZE) -> float:
61 | lut = np.linspace(0, 1, num=lut_size)
62 | return 1 / (1 + np.exp(-lut))
63 |
64 |
65 | class Scale:
66 | def __init__(self, func: Callable, params: dict[str, Any], num_output_bins: int | None = None) -> None:
67 | self.func = func
68 | self.params = params
69 | self.num_output_bins = num_output_bins
70 | self.lut = self.func(**self.params)
71 |
72 | def apply(self, values: np.ndarray) -> np.ndarray:
73 | output = values
74 |
75 | if not values.dtype == np.float64:
76 | output = (values / np.iinfo(values.dtype).max).astype(np.float64)
77 |
78 | output = self.lut[(output * (self.lut.shape[0] - 1)).astype(int)] # returns dtype np.float64
79 |
80 | if self.num_output_bins is not None:
81 | output = np.digitize(output, np.linspace(0, 1, num=self.num_output_bins))
82 |
83 | if not values.dtype == np.float64:
84 | output = (output * np.iinfo(values.dtype).max).astype(values.dtype)
85 |
86 | return output
87 |
88 |
89 | if __name__ == "__main__":
90 | NUM_X_VALUES = 100
91 |
92 | scales = [
93 | [slope_linear, {}],
94 | # [slope_power, {"exp": 2}],
95 | [slope_power, {"exp": 3}],
96 | [slope_sine, {}],
97 | [quadratic_bezier, {"p1": [0, 0.75], "p2": [1, 0.25]}],
98 | [quadratic_bezier, {"p1": [0, 0.50], "p2": [1, 0.50]}],
99 | [quadratic_bezier, {}],
100 | [quadratic_bezier, {"p1": [0.25, 0], "p2": [0.75, 1.0]}],
101 | [quadratic_bezier, {"p1": [0.50, 0], "p2": [0.50, 1.0]}],
102 | [quadratic_bezier, {"p1": [0.75, 0], "p2": [0.25, 1.0]}],
103 | [quadratic_bezier, {"p1": [0.50, 0], "p2": [0.75, 1.0]}],
104 | ]
105 |
106 | fig, axes = plt.subplots(nrows=2, ncols=len(scales))
107 | fig.set_figheight(2 * 5)
108 | fig.set_figwidth(6 * 5)
109 |
110 | IMAGE_FILE = Path("experiments/hatching/data/gebco_crop.tif")
111 | img = (cv2.imread(str(IMAGE_FILE), cv2.IMREAD_UNCHANGED)).astype(np.float64)
112 | img = cv2.resize(img, [1000, 1000])
113 | img = (img - np.min(img)) / (np.max(img) - np.min(img))
114 |
115 | for i in range(len(scales)):
116 | func = scales[i][0]
117 | args = scales[i][1]
118 |
119 | xs = np.linspace(0, 1, endpoint=True, num=NUM_X_VALUES)
120 |
121 | scale_obj = Scale(func, args, num_output_bins=20)
122 | ys = scale_obj.apply(xs) / scale_obj.num_output_bins
123 |
124 | ax = axes[0, i]
125 | ax.plot(xs, ys)
126 |
127 | ax.set_xlim([0, 1])
128 | ax.set_ylim([0, 1])
129 |
130 | ax.set_aspect("equal")
131 | ax.set_title(func.__name__ + "\n" + "".join([f" {k}: {v}" for k, v in args.items()]))
132 |
133 | ax = axes[1, i]
134 | norm = matplotlib.colors.Normalize(vmin=0, vmax=1)
135 | lut_image = scale_obj.apply(img) / scale_obj.num_output_bins
136 | ax.imshow(lut_image, norm=norm)
137 |
138 | plt.savefig(Path("experiments/hatching/output", "scales.png"))
139 |
--------------------------------------------------------------------------------
/experiments/hatching/smoothing.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import cv2
3 | import numpy as np
4 | from lineworld.core.maptools import DocumentInfo
5 | from lineworld.core.svgwriter import SvgWriter
6 | from lineworld.util.gebco_grid_to_polygon import _extract_polygons, get_elevation_bounds
7 | from lineworld.util.geometrytools import unpack_multipolygon
8 |
9 | import shapely
10 |
11 | from shapelysmooth import taubin_smooth
12 |
13 | # INPUT_FILE = Path("data/hatching_dem.tif")
14 | # INPUT_FILE = Path("data/slope_test_2.tif")
15 | # INPUT_FILE = Path("data/slope_test_4.tif")
16 |
17 | INPUT_FILE = Path("experiments/hatching/data/gebco_crop.tif")
18 |
19 | OUTPUT_PATH = Path("experiments/hatching/output")
20 |
21 | LEVELS = 10
22 | DISTANCES = [3.0 + x * 0.9 for x in range(LEVELS)]
23 | BOUNDS = get_elevation_bounds([0, -5700], LEVELS)
24 |
25 | MIN_AREA = 10
26 | SEGMENT_MAX_LENGTH = 10
27 | SIMPLIFY_TOLERANCE = 1.0
28 |
29 |
30 | def _read_data(input_path: Path) -> np.ndarray:
31 | data = cv2.imread(str(input_path), cv2.IMREAD_UNCHANGED)
32 | # data = cv2.resize(img, [30, 30])
33 |
34 | # data = np.flipud(data)
35 | # data = (data * 120/20).astype(np.int8)
36 | # data = np.rot90(data)
37 |
38 | return data
39 |
40 |
41 | if __name__ == "__main__":
42 | data = _read_data(INPUT_FILE)
43 |
44 | print(f"data {INPUT_FILE} min: {np.min(data)} / max: {np.max(data)}")
45 |
46 | doc = DocumentInfo()
47 | doc.width = data.shape[1]
48 | doc.height = data.shape[0]
49 |
50 | output = []
51 |
52 | for i in range(LEVELS):
53 | extracted_geometries = _extract_polygons(data, *BOUNDS[i], True)
54 |
55 | polygons = []
56 | for g in extracted_geometries:
57 | polygons += unpack_multipolygon(g)
58 |
59 | output += polygons
60 |
61 | for i in range(len(output)):
62 | output[i] = shapely.segmentize(output[i], SEGMENT_MAX_LENGTH)
63 |
64 | output_filename = "smoothing"
65 |
66 | svg = SvgWriter(Path(OUTPUT_PATH, f"{output_filename}.svg"), [doc.width, doc.height])
67 | svg.background_color = "white"
68 |
69 | options = {
70 | "fill": "none",
71 | "stroke": "black",
72 | "stroke-width": "2.0",
73 | "opacity": "0.5",
74 | }
75 |
76 | svg.add("original", output, options=options)
77 |
78 | # ----------
79 |
80 | output_taubin = []
81 | for poly in output:
82 | p = taubin_smooth(poly, steps=5, factor=0.7, mu=-0.2)
83 | p = shapely.simplify(p, SIMPLIFY_TOLERANCE)
84 | if p.area < MIN_AREA:
85 | continue
86 | output_taubin.append(p)
87 |
88 | options_taubin = {
89 | "fill": "none",
90 | "stroke": "red",
91 | "stroke-width": "2.0",
92 | "opacity": "0.5",
93 | }
94 |
95 | svg.add("taubin_5", output_taubin, options=options_taubin)
96 |
97 | # ----------
98 |
99 | output_taubin = []
100 | for poly in output:
101 | p = taubin_smooth(poly, steps=30)
102 | if p.area < MIN_AREA:
103 | continue
104 | output_taubin.append(p)
105 |
106 | options_taubin = {
107 | "fill": "none",
108 | "stroke": "green",
109 | "stroke-width": "2.0",
110 | "opacity": "0.5",
111 | }
112 |
113 | svg.add("taubin_30", output_taubin, options=options_taubin)
114 |
115 | # ----------
116 |
117 | output_taubin = []
118 | for poly in output:
119 | p = taubin_smooth(poly, steps=100)
120 | if p.area < MIN_AREA:
121 | continue
122 | output_taubin.append(p)
123 |
124 | options_taubin = {
125 | "fill": "none",
126 | "stroke": "blue",
127 | "stroke-width": "2.0",
128 | "opacity": "0.5",
129 | }
130 |
131 | svg.add("taubin_100", output_taubin, options=options_taubin)
132 |
133 | # ----------
134 |
135 | # output_chaikin = []
136 | # for poly in output:
137 | # output_chaikin.append(chaikin_smooth(poly, iters=5))
138 | #
139 | # options_chaikin = {
140 | # "fill": "none",
141 | # "stroke": "red",
142 | # "stroke-width": "2.0",
143 | # "opacity": "0.5"
144 | # }
145 | #
146 | # svg.add("chaikin_5", output_chaikin, options=options_chaikin)
147 |
148 | # ----------
149 |
150 | svg.write()
151 |
--------------------------------------------------------------------------------
/experiments/labelplacement/parser.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | The main 'geoname' table has the following fields :
4 | ---------------------------------------------------
5 | 0 geonameid : integer id of record in geonames database
6 | 1 name : name of geographical point (utf8) varchar(200)
7 | 2 asciiname : name of geographical point in plain ascii characters, varchar(200)
8 | 3 alternatenames : alternatenames, comma separated, ascii names automatically transliterated, convenience attribute from alternatename table, varchar(10000)
9 | 4 latitude : latitude in decimal degrees (wgs84)
10 | 5 longitude : longitude in decimal degrees (wgs84)
11 | 6 feature class : see http://www.geonames.org/export/codes.html, char(1)
12 | 7 feature code : see http://www.geonames.org/export/codes.html, varchar(10)
13 | 8 country code : ISO-3166 2-letter country code, 2 characters
14 | 9 cc2 : alternate country codes, comma separated, ISO-3166 2-letter country code, 200 characters
15 | 10 admin1 code : fipscode (subject to change to iso code), see exceptions below, see file admin1Codes.txt for display names of this code; varchar(20)
16 | 11 admin2 code : code for the second administrative division, a county in the US, see file admin2Codes.txt; varchar(80)
17 | 12 admin3 code : code for third level administrative division, varchar(20)
18 | 13 admin4 code : code for fourth level administrative division, varchar(20)
19 | 14 population : bigint (8 byte int)
20 | 15 elevation : in meters, integer
21 | 16 dem : digital elevation model, srtm3 or gtopo30, average elevation of 3''x3'' (ca 90mx90m) or 30''x30'' (ca 900mx900m) area in meters, integer. srtm processed by cgiar/ciat.
22 | 17 timezone : the iana timezone id (see file timeZone.txt) varchar(40)
23 | 18 modification date : date of last modification in yyyy-MM-dd format
24 |
25 | """
26 |
27 | import csv
28 | from pathlib import Path
29 |
30 | INPUT_FILE = "cities15000.txt"
31 | INPUT_FILE_ALTNAMES = "alternateNames.txt"
32 | OUTPUT_FILE = "cities.csv"
33 |
34 | MIN_POPULATION = 100_000
35 | ALTNAME_COUNTRYCODE = "de"
36 |
37 | altnames_map = {}
38 |
39 | with open(Path(INPUT_FILE_ALTNAMES)) as file:
40 | for line in file:
41 | fields = line.split("\t")
42 |
43 | if not fields[2].lower() == ALTNAME_COUNTRYCODE:
44 | continue
45 |
46 | altnames_map[fields[1]] = fields[3]
47 |
48 |
49 | with open(Path(INPUT_FILE)) as file_read:
50 | with open(Path(OUTPUT_FILE), "w") as file_write:
51 | writer = csv.DictWriter(
52 | file_write,
53 | delimiter=";",
54 | fieldnames=["ascii_name", "altname", "population", "lon", "lat"],
55 | )
56 | writer.writeheader()
57 | for line in file_read:
58 | fields = line.split("\t")
59 |
60 | if int(fields[14]) < MIN_POPULATION:
61 | continue
62 |
63 | row = {
64 | "ascii_name": fields[2],
65 | "altname": altnames_map.get(fields[0], ""),
66 | "population": fields[14],
67 | "lon": fields[5],
68 | "lat": fields[4],
69 | }
70 |
71 | writer.writerow(row)
72 |
--------------------------------------------------------------------------------
/experiments/projection_test.py:
--------------------------------------------------------------------------------
1 | import pyproj
2 | import shapely
3 | from shapely.geometry import Polygon, LineString
4 | from shapely.affinity import affine_transform
5 |
6 | import numpy as np
7 |
8 | from lineworld.core.svgwriter import SvgWriter
9 |
10 |
11 | EQUATOR = 40075016.68557849
12 |
13 | rect = Polygon(
14 | [
15 | [-(180 - 20), +(90 - 20)],
16 | [-(180 - 20), -(90 - 20)],
17 | [+(180 - 20), -(90 - 20)],
18 | [+(180 - 20), +(90 - 20)],
19 | [-(180 - 20), +(90 - 20)],
20 | ]
21 | )
22 |
23 | width: int = 1000
24 | height: int = width
25 |
26 |
27 | def get_transformation_matrix() -> list[float]:
28 | a = 1 / EQUATOR * width
29 | b = 0
30 | d = 0
31 | e = 1 / EQUATOR * width * -1 # vertical flip
32 | xoff = width / 2.0
33 | yoff = height / 2
34 | return [a, b, d, e, xoff, yoff]
35 |
36 |
37 | def project_func2(coordinates: np.array) -> np.array:
38 | coords = np.copy(coordinates)
39 |
40 | coords[:, 0] = np.degrees(np.radians(coordinates[:, 1]) * np.cos(np.radians(coordinates[:, 0])))
41 | coords[:, 1] = coordinates[:, 0]
42 |
43 | return coords
44 |
45 |
46 | LATITUDE_LINES = 11
47 | LONGITUDE_LINES = 11 + 6
48 | LAT_LON_MIN_SEGMENT_LENGTH = 0.1
49 |
50 | crs_src = pyproj.CRS("EPSG:4326")
51 | # crs_dst = pyproj.CRS('EPSG:3857')
52 | crs_dst = pyproj.CRS.from_proj4("+proj=vandg +over")
53 |
54 | # project_func = pyproj.Transformer.from_crs(crs_src, crs_dst, always_xy=True).transform
55 | project_func = pyproj.Transformer.from_crs(crs_src, crs_dst).transform
56 |
57 | mat = get_transformation_matrix()
58 |
59 | lines = []
60 |
61 | minmax_lat = [-90 - 0, 90 + 0]
62 | minmax_lon = [-180 - 90, 180 + 90]
63 |
64 | lats = np.linspace(*minmax_lat, num=LATITUDE_LINES).tolist() # [1:-1]
65 | # lons = np.linspace(-180, 180, num=LONGITUDE_LINES).tolist() #[1:-1]
66 | lons = np.linspace(*minmax_lon, num=LONGITUDE_LINES).tolist() # [1:-1]
67 |
68 | for lat in lats:
69 | lines.append(LineString([[lat, minmax_lon[0]], [lat, minmax_lon[1]]]))
70 |
71 | for lon in lons:
72 | lines.append(LineString([[minmax_lat[0], lon], [minmax_lat[1], lon]]))
73 |
74 | lines += [shapely.box(-50, -50, 50, 50)]
75 |
76 | lines = shapely.segmentize(lines, LAT_LON_MIN_SEGMENT_LENGTH) # .tolist()
77 |
78 | lines = [shapely.ops.transform(project_func, l) for l in lines] # TODO: should not work...
79 | # lines = [shapely.transform(l, project_func2) for l in lines]
80 | lines = [affine_transform(l, mat) for l in lines]
81 |
82 | geometries = lines
83 |
84 | # for g in geometries:
85 | # print(g)
86 |
87 | svg = SvgWriter("test.svg", [width, height])
88 |
89 | options_grid = {
90 | "fill": "none",
91 | "stroke": "black",
92 | "stroke-width": "1.0",
93 | }
94 | svg.add("grid", geometries, options=options_grid)
95 |
96 | svg.write()
97 |
--------------------------------------------------------------------------------
/hardware/config.yaml:
--------------------------------------------------------------------------------
1 | name: "SquarePlot2"
2 | board: "Fysetc E4"
3 |
4 | stepping:
5 | engine: RMT
6 | idle_ms: 255
7 | pulse_us: 2
8 | dir_delay_us: 1
9 | disable_delay_us: 0
10 |
11 | kinematics:
12 | corexy:
13 |
14 | uart1:
15 | txd_pin: gpio.22
16 | rxd_pin: gpio.21
17 | rts_pin: NO_PIN
18 | cts_pin: NO_PIN
19 | baud: 115200
20 | mode: 8N1
21 |
22 | axes:
23 | shared_stepper_disable_pin: gpio.25:high
24 |
25 | z:
26 | steps_per_mm: 200.000
27 | max_rate_mm_per_min: 2000.000
28 | acceleration_mm_per_sec2: 50.000
29 | max_travel_mm: 100.000
30 | soft_limits: false
31 | homing:
32 | cycle: 0
33 | positive_direction: false
34 | mpos_mm: 150.000
35 | feed_mm_per_min: 100.000
36 | seek_mm_per_min: 200.000
37 | settle_ms: 500
38 | seek_scaler: 1.100
39 | feed_scaler: 1.100
40 |
41 | motor0:
42 | hard_limits: false
43 | pulloff_mm: 1.000
44 | tmc_2209:
45 | uart_num: 1
46 | addr: 0
47 | r_sense_ohms: 0.110
48 | run_amps: 0.600
49 | hold_amps: 0.400
50 | microsteps: 16
51 | stallguard: 0
52 | stallguard_debug: false
53 | toff_disable: 0
54 | toff_stealthchop: 5
55 | toff_coolstep: 3
56 | run_mode: CoolStep
57 | homing_mode: CoolStep
58 | use_enable: false
59 | step_pin: gpio.14
60 | direction_pin: gpio.12
61 | disable_pin: NO_PIN
62 |
63 |
64 | a:
65 | steps_per_mm: 200.000
66 | max_rate_mm_per_min: 2000.000
67 | acceleration_mm_per_sec2: 200.000
68 | max_travel_mm: 0.000
69 | soft_limits: false
70 | homing:
71 | cycle: 0
72 | positive_direction: false
73 | mpos_mm: 150.000
74 | feed_mm_per_min: 100.000
75 | seek_mm_per_min: 200.000
76 | settle_ms: 500
77 | seek_scaler: 1.100
78 | feed_scaler: 1.100
79 |
80 | motor0:
81 | hard_limits: false
82 | pulloff_mm: 1.000
83 | tmc_2209:
84 | uart_num: 1
85 | addr: 2
86 | r_sense_ohms: 0.110
87 | run_amps: 0.600
88 | hold_amps: 0.400
89 | microsteps: 16
90 | stallguard: 0
91 | stallguard_debug: false
92 | toff_disable: 0
93 | toff_stealthchop: 5
94 | toff_coolstep: 3
95 | run_mode: CoolStep
96 | homing_mode: CoolStep
97 | use_enable: false
98 | step_pin: gpio.16
99 | direction_pin: gpio.17
100 | disable_pin: NO_PIN
101 |
102 | x:
103 | steps_per_mm: 80.000
104 | max_rate_mm_per_min: 10000.000
105 | acceleration_mm_per_sec2: 300.000
106 | max_travel_mm: 700.000
107 | soft_limits: false
108 | homing:
109 | cycle: 1
110 | positive_direction: false
111 | mpos_mm: 150.000
112 | feed_mm_per_min: 100.000
113 | seek_mm_per_min: 200.000
114 | settle_ms: 500
115 | seek_scaler: 1.100
116 | feed_scaler: 1.100
117 |
118 | motor0:
119 | limit_all_pin: gpio.34:low
120 | hard_limits: false
121 | pulloff_mm: 1.000
122 | tmc_2209:
123 | uart_num: 1
124 | addr: 1
125 | r_sense_ohms: 0.110
126 | run_amps: 0.600
127 | hold_amps: 0.400
128 | microsteps: 16
129 | stallguard: 0
130 | stallguard_debug: false
131 | toff_disable: 0
132 | toff_stealthchop: 5
133 | toff_coolstep: 3
134 | run_mode: CoolStep
135 | homing_mode: CoolStep
136 | use_enable: false
137 | step_pin: gpio.27
138 | direction_pin: gpio.26
139 | disable_pin: NO_PIN
140 |
141 | y:
142 | steps_per_mm: 80.000
143 | max_rate_mm_per_min: 10000.000
144 | acceleration_mm_per_sec2: 300.000
145 | max_travel_mm: 700.000
146 | soft_limits: false
147 | homing:
148 | cycle: 2
149 | positive_direction: true
150 | mpos_mm: 150.000
151 | feed_mm_per_min: 100.000
152 | seek_mm_per_min: 200.000
153 | settle_ms: 500
154 | seek_scaler: 1.100
155 | feed_scaler: 1.100
156 |
157 | motor0:
158 | limit_all_pin: gpio.35:low
159 | hard_limits: false
160 | pulloff_mm: 1.000
161 | tmc_2209:
162 | uart_num: 1
163 | addr: 3
164 | r_sense_ohms: 0.110
165 | run_amps: 0.600
166 | hold_amps: 0.400
167 | microsteps: 16
168 | stallguard: 0
169 | stallguard_debug: false
170 | toff_disable: 0
171 | toff_stealthchop: 5
172 | toff_coolstep: 3
173 | run_mode: CoolStep
174 | homing_mode: CoolStep
175 | use_enable: false
176 | step_pin: gpio.33
177 | direction_pin: gpio.32
178 | disable_pin: NO_PIN
179 |
180 |
181 | spi:
182 | miso_pin: gpio.19
183 | mosi_pin: gpio.23
184 | sck_pin: gpio.18
185 |
186 | sdcard:
187 | cs_pin: gpio.5
188 | card_detect_pin: NO_PIN
189 |
190 | start:
191 | must_home: false
192 | deactivate_parking: false
193 | check_limits: false
194 |
--------------------------------------------------------------------------------
/lineworld/__init__.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import os
3 | import tomllib
4 | from pathlib import Path
5 | from typing import Any
6 | from loguru import logger
7 |
8 | CONFIG_FILE = Path("configs", "config.toml")
9 | ENV_OVERWRITE_CONFIG = "LINEWORLD_CONFIG"
10 |
11 |
12 | def _recursive_dict_merge(a: dict, b: dict) -> dict:
13 | a = copy.deepcopy(a)
14 | for key, value in b.items():
15 | if key not in a:
16 | a[key] = value
17 | elif isinstance(a[key], dict) and isinstance(value, dict):
18 | a[key] = _recursive_dict_merge(a[key], b[key])
19 | else:
20 | a[key] = value
21 |
22 | return a
23 |
24 |
25 | def get_config() -> dict[str, Any]:
26 | """Load a config.toml file from disk and merge it with all LINEWORLD_* env variables"""
27 |
28 | config = {}
29 | overwrite_config = {}
30 |
31 | with open(CONFIG_FILE, "rb") as f:
32 | config = tomllib.load(f)
33 |
34 | for name, value in os.environ.items():
35 | if name == ENV_OVERWRITE_CONFIG:
36 | logger.info(f"loading overwrite config from {value}")
37 | try:
38 | with open(value, "rb") as f:
39 | overwrite_config = tomllib.load(f)
40 | except Exception as e:
41 | logger.warning(f"loading overwrite config from {value} failed: {e}")
42 |
43 | return _recursive_dict_merge(config, overwrite_config)
44 |
45 |
46 | def apply_config_to_object(config: dict[str, Any], obj: Any) -> Any:
47 | """Replaces the value of any uppercase class variable in the given object with
48 | the value of the identically-named key-value pair in the config dict
49 | """
50 |
51 | altered_obj = copy.deepcopy(obj)
52 | members = [
53 | attr for attr in dir(altered_obj) if not callable(getattr(altered_obj, attr)) and not attr.startswith("__")
54 | ]
55 |
56 | for key, value in config.items():
57 | if key.upper() in members:
58 | altered_obj.__setattr__(key.upper(), config[key])
59 |
60 | return altered_obj
61 |
--------------------------------------------------------------------------------
/lineworld/core/hatching.py:
--------------------------------------------------------------------------------
1 | import math
2 | from dataclasses import dataclass
3 | from enum import Enum
4 |
5 | import numpy as np
6 | import shapely
7 | from shapely.geometry import MultiLineString, LineString, MultiPoint
8 | from shapely import Geometry, transform, affinity
9 |
10 | from lineworld.util.geometrytools import unpack_multilinestring
11 |
12 |
13 | class HatchingDirection(Enum):
14 | ANGLE_45 = 45
15 | VERTICAL = 0
16 | HORIZONTAL = 90
17 | ANGLE_135 = 135
18 |
19 |
20 | @dataclass
21 | class HatchingOptions:
22 | angle: float = 45.0
23 | distance: float = 2.0
24 | lift: bool = True # TODO: currently unimplemented
25 | wiggle: float = 0.0 # TODO: currently unimplemented
26 |
27 |
28 | def _create_hatch_lines(bbox: list[float], distance: float, angle: float) -> MultiLineString | None:
29 | """
30 | Note: distance is measured along an axis, not distance between parallel hatching lines
31 | (if hatching is done at an angle, for example 45°)
32 | """
33 |
34 | if distance <= 0:
35 | return None
36 |
37 | minx, miny, maxx, maxy = bbox
38 |
39 | minx = distance * math.floor(minx / distance)
40 | miny = distance * math.floor(miny / distance)
41 | maxx = distance * math.ceil(maxx / distance)
42 | maxy = distance * math.ceil(maxy / distance)
43 |
44 | diag = math.sqrt((maxx - minx) ** 2 + (maxy - miny) ** 2)
45 |
46 | num = round(diag // distance)
47 |
48 | lines = []
49 |
50 | for i in range(num):
51 | offset = (distance * i) - diag / 2
52 | start = [-diag, offset]
53 | end = [+diag, offset]
54 | lines.append(LineString([start, end]))
55 |
56 | mls = MultiLineString(lines)
57 |
58 | mls = affinity.rotate(mls, angle)
59 | mls = affinity.translate(mls, xoff=minx + (maxx - minx) / 2, yoff=miny + (maxy - miny) / 2)
60 |
61 | return mls
62 |
63 |
64 | def _combine(g: Geometry, hatch_lines: MultiLineString) -> MultiLineString:
65 | res = g.intersection(hatch_lines)
66 | lines = np.array(unpack_multilinestring(res))
67 | lines = lines[~shapely.is_empty(lines)]
68 | return MultiLineString(lines.tolist())
69 |
70 |
71 | def _randomize(g: Geometry) -> Geometry:
72 | def random_transform(x):
73 | rng = np.random.default_rng()
74 | rng.standard_normal(x.shape)
75 | return x + rng.standard_normal(x.shape) / 4
76 |
77 | return transform(g, random_transform)
78 |
79 |
80 | def create_hatching(g: Geometry, bbox: list[float] | None, hatching_options: HatchingOptions) -> MultiLineString | None:
81 | # if no bbox is supplied (ie. by using ST_Envelope in PostGIS),
82 | # we'll compute our own (may be slow)
83 | if bbox is None:
84 | bp = MultiPoint(g.exterior.coords).envelope
85 | bbox = [*bp.exterior.coords[0], *bp.exterior.coords[2]]
86 |
87 | # hatch_lines = _create_hatch_lines(bbox, hatching_options.distance, hatching_options.direction)
88 | hatch_lines = _create_hatch_lines(bbox, hatching_options.distance, hatching_options.angle)
89 |
90 | # sg = shapely.simplify(g, hatching_options.distance/2)
91 | sg = g # g.buffer(1)
92 |
93 | if shapely.is_empty(sg):
94 | return None
95 |
96 | if shapely.is_valid(sg):
97 | sg = shapely.make_valid(sg)
98 |
99 | if hatch_lines is None:
100 | return None
101 |
102 | return _combine(sg, hatch_lines)
103 | # return _randomize(_segmentize(_combine(sg, hatch_lines)))
104 |
--------------------------------------------------------------------------------
/lineworld/core/layerstack.py:
--------------------------------------------------------------------------------
1 | from lineworld.layers.layer import Layer
2 |
3 |
4 | class LayerStack:
5 | stack: dict[str, Layer] = {}
6 |
7 | def __init__(self, layers: list[Layer] = []):
8 | self.add(layers)
9 |
10 | def add(self, layers: Layer | list[Layer]) -> None:
11 | if type(layers) is not list:
12 | layer = [layers]
13 |
14 | for l in layers:
15 | self.stack[l.layer_name] = l
16 |
17 | def get(self, layer_name: str):
18 | return self.stack[layer_name]
19 |
--------------------------------------------------------------------------------
/lineworld/core/map.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from enum import Enum
3 | from typing import Any
4 |
5 | import numpy as np
6 | import pyproj
7 | import shapely
8 | from shapely.geometry import Polygon
9 |
10 |
11 | class Projection(Enum):
12 | WGS84 = "EPSG", 4326
13 | WEB_MERCATOR = "EPSG", 3857
14 | ECKERT_IV = "ESRI", 54012
15 | VAN_DER_GRINTEN_I = "ESRI", 54029
16 |
17 | def __str__(self):
18 | return f"{self.value[0]}:{self.value[1]}"
19 |
20 |
21 | @dataclass
22 | class Pen:
23 | color: list[int]
24 | stroke_size: float
25 |
26 |
27 | class DocumentInfo:
28 | EQUATOR = 40075016.68557849
29 |
30 | def __init__(self, config: dict[str, Any]):
31 | self.config = config
32 |
33 | self.projection = Projection[config.get("projection", "VAN_DER_GRINTEN_I")]
34 | self.wrapover = config.get("wrapover", True)
35 |
36 | self.width = config.get("width", 1000)
37 | self.height = config.get("height", 1000)
38 |
39 | self.offset_x = config.get("offset_x", 0)
40 | self.offset_y = config.get("offset_y", 0)
41 |
42 | def get_transformation_matrix(self) -> list[float]:
43 | """world space to map space"""
44 |
45 | a = 1 / self.EQUATOR * self.width
46 | b = 0
47 | d = 0
48 | e = 1 / self.EQUATOR * self.width * -1 # vertical flip
49 | xoff = self.width / 2.0 + self.offset_x
50 | yoff = self.height / 2.0 + self.offset_y
51 | return [a, b, d, e, xoff, yoff]
52 |
53 | def get_transformation_matrix_raster_to_map(self, raster_width: int, raster_height: int) -> list[float]:
54 | """raster space to map space"""
55 |
56 | a = (1 / raster_width) * self.width
57 | b = 0
58 | d = 0
59 | e = (1 / raster_height) * self.width
60 | xoff = self.offset_x
61 | yoff = -(self.width - self.height) / 2 + self.offset_y
62 |
63 | return [a, b, d, e, xoff, yoff]
64 |
65 | def get_transformation_matrix_map_to_raster(self, raster_width: int, raster_height: int) -> list[float]:
66 | a, b, d, e, xoff, yoff = self.get_transformation_matrix_raster_to_map(raster_width, raster_height)
67 | mat = np.matrix([[a, b, xoff], [d, e, yoff], [0, 0, 1]])
68 | mat_inv = np.linalg.inv(mat)
69 | return [
70 | float(e)
71 | for e in [
72 | mat_inv[0, 0],
73 | mat_inv[0, 1],
74 | mat_inv[1, 0],
75 | mat_inv[1, 1],
76 | mat_inv[0, 2],
77 | mat_inv[1, 2],
78 | ]
79 | ]
80 |
81 | def get_transformation_matrix_font(self, xoff: float, yoff: float) -> list[float]:
82 | a = 1
83 | b = 0
84 | d = 0
85 | e = 1
86 | return [a, b, d, e, xoff, yoff]
87 |
88 | def get_document_size(self) -> tuple[int, int]:
89 | return (self.width, self.height)
90 |
91 | def get_viewport_padding(self) -> list[float | int]:
92 | return self.config.get("viewport_padding", [0, 0, 0, 0])
93 |
94 | def get_viewport(self) -> Polygon:
95 | # if self.config.get("debug", False):
96 | # return shapely.box(-self.width, -self.height, self.width * 2, self.height * 2)
97 | # else:
98 |
99 | padding_top, padding_right, padding_bottom, padding_left = self.config.get("viewport_padding", [0, 0, 0, 0])
100 | return shapely.box(padding_left, padding_top, self.width - padding_right, self.height - padding_bottom)
101 |
102 | def get_projection_func(self, src_projection: Projection) -> Any:
103 | crs_src = pyproj.CRS(f"{src_projection.value[0]}:{src_projection.value[1]}")
104 | crs_dst = pyproj.CRS(f"{self.projection.value[0]}:{self.projection.value[1]}")
105 |
106 | if self.wrapover:
107 | crs_dst = pyproj.CRS.from_proj4("+proj=vandg +over")
108 |
109 | return pyproj.Transformer.from_crs(crs_src, crs_dst, always_xy=True).transform
110 |
--------------------------------------------------------------------------------
/lineworld/core/svgwriter.py:
--------------------------------------------------------------------------------
1 | from io import StringIO, TextIOWrapper
2 | from pathlib import Path
3 | from typing import Any
4 |
5 | from loguru import logger
6 | from shapely import Geometry, MultiLineString, LineString, Polygon
7 |
8 |
9 | class SvgWriter:
10 | type SvgOptions = dict[str, str | int | float]
11 |
12 | layers: dict[str, list[tuple[Geometry, SvgOptions]]]
13 | styles: dict[str, SvgOptions]
14 |
15 | def __init__(self, filename: Path | str, dimensions: tuple[int, int]):
16 | self.filename: Path = Path(filename)
17 | self.dimensions: tuple[int, int] = dimensions
18 | self.image: str | None = None
19 | self.background_color: str | None = None
20 | self.offset: list[int | float] = [0, 0]
21 | self.debug: bool = False
22 |
23 | self.layers = {}
24 | self.styles = {}
25 |
26 | def add_style(self, layer: str, options: SvgOptions):
27 | if layer not in self.styles:
28 | self.styles[layer] = {}
29 |
30 | self.styles[layer] = {**self.styles[layer], **options}
31 |
32 | def add(self, layer: str, geom: Geometry | list[Geometry], options: SvgOptions = {}) -> None:
33 | logger.debug(f"layer {layer}: adding {len(geom) if type(geom) is list else 1} object(s)")
34 |
35 | if layer not in self.layers:
36 | self.layers[layer] = []
37 |
38 | if isinstance(geom, list):
39 | for i in range(len(geom)):
40 | if geom[i].is_empty:
41 | continue
42 | self.layers[layer].append((geom[i], options))
43 | else:
44 | self.layers[layer].append((geom, options))
45 |
46 | def write_layer(self, out: TextIOWrapper, layer_name: str):
47 | layer = self.layers[layer_name]
48 | out.write(f'')
49 | for geom, options in layer:
50 | match geom:
51 | # case Point():
52 | # pass
53 |
54 | case Polygon():
55 | self._write_polygon(out, geom, options)
56 |
57 | case LineString():
58 | self._write_lineString(out, geom, options)
59 |
60 | case MultiLineString():
61 | for ls in geom.geoms:
62 | self._write_lineString(out, ls, options)
63 |
64 | case _:
65 | logger.warning(f"unknown geometry object in layer {layer_name}: {type(geom)}")
66 |
67 | out.write("")
68 | out.write("\n")
69 |
70 | def write(self):
71 | with open(self.filename, "w") as out:
72 | out.write('\n')
73 | out.write(
74 | '\n'
75 | )
76 |
77 | if self.dimensions is not None:
78 | out.write(
79 | f'")
118 |
119 | def _write_polygon(self, out: StringIO, p: Polygon, options: SvgOptions) -> None:
120 | self._write_path(out, p.exterior.coords, options, holes=[hole.coords for hole in p.interiors])
121 |
122 | def _write_lineString(self, out: StringIO, l: LineString, options: SvgOptions) -> None:
123 | self._write_path(out, l.coords, options, close=False)
124 |
125 | def _write_path(
126 | self,
127 | out: StringIO,
128 | p: Any,
129 | options: SvgOptions,
130 | holes: list[Any] = [],
131 | close: bool = True,
132 | ) -> None:
133 | out.write('")
165 | out.write("\n")
166 |
--------------------------------------------------------------------------------
/lineworld/layers/bathymetry.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import geoalchemy2
4 | import shapely
5 | from core.hatching import HatchingOptions, HatchingDirection, create_hatching
6 | from core.map import DocumentInfo
7 | from geoalchemy2 import WKBElement
8 | from geoalchemy2.shape import to_shape
9 | from layers.elevation import ElevationLayer
10 | from shapely.geometry import Polygon, MultiLineString, MultiPolygon
11 | from sqlalchemy import Table, Column, Integer, Float, ForeignKey
12 | from sqlalchemy import engine, MetaData
13 | from sqlalchemy import select
14 | from sqlalchemy import text
15 |
16 | from lineworld.util.geometrytools import unpack_multipolygon
17 |
18 |
19 | class Bathymetry(ElevationLayer):
20 | def __init__(self, layer_id: str, db: engine.Engine, config: dict[str, Any]) -> None:
21 | super().__init__(layer_id, db, config)
22 |
23 | metadata = MetaData()
24 |
25 | self.world_polygon_table = Table(
26 | f"{self.config_name}_bathymetry_world_polygons",
27 | metadata,
28 | Column("id", Integer, primary_key=True),
29 | Column("elevation_level", Integer),
30 | Column("elevation_min", Float),
31 | Column("elevation_max", Float),
32 | Column(
33 | "polygon",
34 | geoalchemy2.Geography("POLYGON", srid=self.DATA_SRID.value[1]),
35 | nullable=False,
36 | ),
37 | )
38 |
39 | self.map_polygon_table = Table(
40 | f"{self.config_name}_bathymetry_map_polygons",
41 | metadata,
42 | Column("id", Integer, primary_key=True),
43 | Column(
44 | "world_polygon_id",
45 | ForeignKey(f"{self.world_polygon_table.fullname}.id"),
46 | ),
47 | Column(
48 | "polygon",
49 | geoalchemy2.Geometry("POLYGON", srid=self.DATA_SRID.value[1]),
50 | nullable=False,
51 | ),
52 | )
53 |
54 | self.map_lines_table = Table(
55 | f"{self.config_name}_bathymetry_map_lines",
56 | metadata,
57 | Column("id", Integer, primary_key=True),
58 | Column("map_polygon_id", ForeignKey(f"{self.map_polygon_table.fullname}.id")),
59 | Column("lines", geoalchemy2.Geometry("MULTILINESTRING"), nullable=False),
60 | )
61 |
62 | metadata.create_all(self.db)
63 |
64 | def _style(
65 | self,
66 | p: Polygon,
67 | elevation_level: int,
68 | document_info: DocumentInfo,
69 | bbox: Polygon | None = None,
70 | ) -> list[MultiLineString]:
71 | if bbox is not None:
72 | # order: ((MINX, MINY), (MINX, MAXY), (MAXX, MAXY), (MAXX, MINY), (MINX, MINY))
73 | # ref: https://postgis.net/docs/ST_Envelope.html
74 |
75 | bbox = [
76 | *bbox.envelope.exterior.coords[0],
77 | *bbox.envelope.exterior.coords[2],
78 | ]
79 |
80 | elevation_level_hatching_distance = [4.0 - 0.2 * i for i in range(20)] # TODO: move to configuration object
81 |
82 | hatching_options = HatchingOptions()
83 | hatching_options.distance = elevation_level_hatching_distance[elevation_level]
84 | hatching_options.direction = HatchingDirection.ANGLE_135
85 |
86 | hatch = create_hatching(p, bbox, hatching_options)
87 |
88 | if hatch is not None:
89 | return [hatch]
90 | else:
91 | return []
92 |
93 | def out(
94 | self,
95 | exclusion_zones: MultiPolygon,
96 | document_info: DocumentInfo,
97 | select_elevation_level: int | None = None,
98 | ) -> tuple[list[shapely.Geometry], MultiPolygon]:
99 | """
100 | Returns (drawing geometries, exclusion polygons)
101 | """
102 |
103 | # Only polygon outlines
104 | # drawing_geometries = []
105 | # with self.db.begin() as conn:
106 | # result = conn.execute(select(self.map_polygon_table))
107 | # geoms = [to_shape(row.polygon) for row in result]
108 | # for g in geoms:
109 | # drawing_geometries.append(g.exterior)
110 | # drawing_geometries += g.interiors
111 | #
112 | # return (drawing_geometries, exclusion_zones)
113 |
114 | drawing_geometries = []
115 | with self.db.begin() as conn:
116 | if select_elevation_level is None:
117 | result = conn.execute(select(self.map_lines_table))
118 | drawing_geometries = [to_shape(row.lines) for row in result]
119 | else:
120 | result = conn.execute(
121 | text(f"""
122 | SELECT lines
123 | FROM
124 | {self.map_lines_table} AS ml JOIN
125 | {self.map_polygon_table} AS mp ON ml.map_polygon_id = mp.id
126 | JOIN {self.world_polygon_table} AS wp ON mp.world_polygon_id = wp.id
127 | WHERE
128 | wp.elevation_level = :elevation_level
129 | """),
130 | {"elevation_level": select_elevation_level},
131 | )
132 |
133 | drawing_geometries = [to_shape(WKBElement(row.lines)) for row in result]
134 |
135 | # cut extrusion_zones into drawing_geometries
136 |
137 | drawing_geometries_cut = []
138 | stencil = shapely.difference(document_info.get_viewport(), exclusion_zones)
139 | for g in drawing_geometries:
140 | drawing_geometries_cut.append(shapely.intersection(g, stencil))
141 |
142 | # and do not add anything to exclusion_zones
143 |
144 | return (drawing_geometries_cut, exclusion_zones)
145 |
146 | def out_polygons(
147 | self,
148 | exclusion_zones: list[Polygon],
149 | document_info: DocumentInfo,
150 | select_elevation_level: int | None = None,
151 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
152 | """
153 | Returns (drawing geometries, exclusion polygons)
154 | """
155 |
156 | stencil = shapely.difference(document_info.get_viewport(), shapely.unary_union(exclusion_zones))
157 |
158 | drawing_geometries = []
159 | with self.db.begin() as conn:
160 | if select_elevation_level is None:
161 | result = conn.execute(select(self.map_polygon_table))
162 | drawing_geometries = [to_shape(row.polygon) for row in result]
163 | else:
164 | result = conn.execute(
165 | text(f"""
166 | SELECT mp.polygon
167 | FROM
168 | {self.map_polygon_table} AS mp JOIN
169 | {self.world_polygon_table} AS wp ON mp.world_polygon_id = wp.id
170 | WHERE
171 | wp.elevation_level = :elevation_level
172 | """),
173 | {"elevation_level": select_elevation_level},
174 | )
175 |
176 | drawing_geometries = [to_shape(WKBElement(row.polygon)) for row in result]
177 |
178 | # remove extrusion zones
179 | drawing_geometries_cut = []
180 | for g in drawing_geometries:
181 | # drawing_geometries_cut.append(shapely.difference(g, exclusion_zones))
182 | drawing_geometries_cut += unpack_multipolygon(shapely.intersection(g, stencil))
183 |
184 | # return (drawing_geometries, exclusion_zones)
185 | return (drawing_geometries_cut, exclusion_zones)
186 |
--------------------------------------------------------------------------------
/lineworld/layers/bflowlines.py:
--------------------------------------------------------------------------------
1 | import math
2 | import os
3 | from dataclasses import dataclass
4 | from pathlib import Path
5 | from typing import Any
6 |
7 | import cv2
8 | import geoalchemy2
9 | import numpy as np
10 | import rasterio
11 | import shapely
12 | from lineworld.core.map import DocumentInfo, Projection
13 | from geoalchemy2.shape import to_shape, from_shape
14 | from lineworld.layers.layer import Layer
15 | from loguru import logger
16 | from scipy import ndimage
17 | from shapely import Polygon, MultiLineString, STRtree, LineString, GeometryCollection
18 | from shapely.affinity import affine_transform
19 | from sqlalchemy import MetaData
20 | from sqlalchemy import Table, Column, Integer
21 | from sqlalchemy import engine
22 | from sqlalchemy import insert
23 | from sqlalchemy import select
24 | from sqlalchemy import text
25 | from rasterio.warp import (
26 | calculate_default_transform,
27 | reproject,
28 | Resampling,
29 | transform_bounds,
30 | )
31 |
32 | import lineworld
33 | from lineworld.core import flowlines
34 | from lineworld.core.flowlines import FlowlineTiler, FlowlineTilerPoly, Mapping
35 | from lineworld.util.rastertools import normalize_to_uint8
36 | from lineworld.util.slope import get_slope
37 |
38 |
39 | @dataclass
40 | class BathymetryFlowlinesMapLines:
41 | id: int | None
42 | lines: MultiLineString
43 |
44 | def todict(self) -> dict[str, int | float | str | None]:
45 | return {"lines": str(from_shape(self.lines))}
46 |
47 |
48 | class BathymetryFlowlines(Layer):
49 | DATA_URL = ""
50 | DATA_SRID = Projection.WGS84
51 |
52 | DEFAULT_LAYER_NAME = "BathymetryFlowlines"
53 |
54 | def __init__(
55 | self,
56 | layer_id: str,
57 | db: engine.Engine,
58 | config: dict[str, Any] = {},
59 | tile_boundaries: list[Polygon] = [],
60 | ) -> None:
61 | super().__init__(layer_id, db, config)
62 |
63 | self.tile_boundaries = tile_boundaries
64 |
65 | self.data_dir = Path(
66 | Layer.DATA_DIR_NAME,
67 | self.config.get("layer_name", self.DEFAULT_LAYER_NAME).lower(),
68 | )
69 | self.source_file = Path(
70 | Layer.DATA_DIR_NAME, "elevation", "gebco_mosaic.tif"
71 | ) # TODO: hardcoded reference to elevation layer
72 | self.elevation_file = Path(
73 | self.data_dir, "flowlines_elevation.tif"
74 | ) # TODO: hardcoded reference to image file rendered by blender
75 | # self.highlights_file = Path(
76 | # self.data_dir, "flowlines_highlights.png"
77 | # ) # TODO: hardcoded reference to image file rendered by blender
78 | self.density_file = Path("blender", "output.png")
79 |
80 | if not self.data_dir.exists():
81 | os.makedirs(self.data_dir)
82 |
83 | metadata = MetaData()
84 |
85 | self.map_lines_table = Table(
86 | f"{self.config_name}_bathymetryflowlines_map_lines",
87 | metadata,
88 | Column("id", Integer, primary_key=True),
89 | Column("lines", geoalchemy2.Geometry("LINESTRING"), nullable=False),
90 | )
91 |
92 | metadata.create_all(self.db)
93 |
94 | def extract(self) -> None:
95 | pass
96 |
97 | def transform_to_world(self) -> None:
98 | pass
99 |
100 | def transform_to_map(self, document_info: DocumentInfo) -> None:
101 | logger.info("reprojecting GeoTiff")
102 |
103 | with rasterio.open(self.source_file) as src:
104 | dst_crs = f"{document_info.projection.value[0]}:{document_info.projection.value[1]}"
105 |
106 | # calculate optimal output dimensions to get the width/height-ratio after reprojection
107 | _, dst_width, dst_height = calculate_default_transform(src.crs, dst_crs, src.width, src.height, *src.bounds)
108 | ratio = dst_width / dst_height
109 |
110 | px_per_mm = self.config.get("px_per_mm", 10.0)
111 | dst_width = int(document_info.width * px_per_mm)
112 | dst_height = int(document_info.width * px_per_mm * ratio)
113 |
114 | skip = False
115 | if self.elevation_file.exists():
116 | with rasterio.open(self.elevation_file) as dst:
117 | if dst.width == dst_width and dst.height == dst_height:
118 | skip = True
119 | logger.info("reprojected GeoTiff exists, skipping")
120 |
121 | if not skip:
122 | xmin, ymin, xmax, ymax = transform_bounds(src.crs, dst_crs, *src.bounds)
123 | dst_transform = rasterio.transform.Affine(
124 | (xmax - xmin) / float(dst_width),
125 | 0,
126 | xmin,
127 | 0,
128 | (ymin - ymax) / float(dst_height),
129 | ymax,
130 | )
131 |
132 | kwargs = src.meta.copy()
133 | kwargs.update(
134 | {
135 | "crs": dst_crs,
136 | "transform": dst_transform,
137 | "width": dst_width,
138 | "height": dst_height,
139 | }
140 | )
141 |
142 | with rasterio.open(self.elevation_file, "w", **kwargs) as dst:
143 | for i in range(1, src.count + 1):
144 | band_arr = src.read(i)
145 |
146 | # remove any above-waterlevel terrain
147 | band_arr[band_arr > 0] = 0
148 |
149 | reproject(
150 | source=band_arr,
151 | destination=rasterio.band(dst, i),
152 | src_transform=src.transform,
153 | src_crs=src.crs,
154 | dst_transform=dst_transform,
155 | dst_crs=dst_crs,
156 | resampling=Resampling.nearest,
157 | )
158 |
159 | logger.debug(f"reprojected fo file {self.elevation_file} | {dst_width} x {dst_height}px")
160 |
161 | def transform_to_lines(self, document_info: DocumentInfo) -> list[BathymetryFlowlinesMapLines]:
162 | flow_config = flowlines.FlowlineHatcherConfig()
163 | flow_config = lineworld.apply_config_to_object(self.config, flow_config)
164 |
165 | elevation = None
166 | with rasterio.open(self.elevation_file) as dataset:
167 | elevation = dataset.read(1)
168 |
169 | pixel_per_mm = 4
170 | elevation = cv2.resize(
171 | elevation,
172 | [int(document_info.width * pixel_per_mm), int(document_info.width * pixel_per_mm)]
173 | ) # TODO
174 |
175 | density = None
176 | try:
177 | # use uint8 for the density map to save some memory and 256 values will be enough precision
178 | density = cv2.imread(str(self.density_file), cv2.IMREAD_GRAYSCALE)
179 | density = normalize_to_uint8(density)
180 | density = cv2.resize(density, [elevation.shape[1], elevation.shape[0]])
181 |
182 | # 50:50 blend of elevation data and externally computed density image
183 | elevation_normalized = normalize_to_uint8(elevation)
184 | density = np.mean(np.dstack([density, elevation_normalized]), axis=2).astype(np.uint8)
185 |
186 | except Exception as e:
187 | logger.error(e)
188 |
189 | # MAX_TPI = 1_000
190 | # tpi = _calculate_topographic_position_index(data, 401)
191 | # tpi = np.clip(np.abs(tpi), 0, MAX_TPI)
192 | # normalized_tpi = normalize_to_uint8(tpi)
193 |
194 | _, _, _, _, angles, inclination = get_slope(elevation, 1)
195 |
196 | WINDOW_SIZE = 7
197 | MAX_WIN_VAR = 40000
198 | win_mean = ndimage.uniform_filter(elevation.astype(float), (WINDOW_SIZE, WINDOW_SIZE))
199 | win_sqr_mean = ndimage.uniform_filter(elevation.astype(float) ** 2, (WINDOW_SIZE, WINDOW_SIZE))
200 | win_var = win_sqr_mean - win_mean**2
201 | win_var = np.clip(win_var, 0, MAX_WIN_VAR)
202 | win_var = win_var * -1 + MAX_WIN_VAR
203 | win_var = (np.iinfo(np.uint8).max * ((win_var - np.min(win_var)) / np.ptp(win_var))).astype(np.uint8)
204 |
205 | # cv2.imwrite("win_var.png", win_var)
206 | # exit()
207 |
208 | # uint8 image must be centered around 128 to deal with negative values
209 | mapping_angle = ((angles + math.pi) / math.tau * 255.0).astype(np.uint8)
210 |
211 | mapping_flat = np.zeros_like(inclination, dtype=np.uint8)
212 | mapping_flat[inclination < flow_config.MIN_INCLINATION] = 255 # uint8
213 |
214 | mapping_distance = density # uint8
215 |
216 | mapping_line_max_length = win_var # uint8
217 |
218 | if self.config.get("blur_distance", False):
219 | kernel_size = self.config.get("blur_distance_kernel_size", 10)
220 | mapping_distance = cv2.blur(mapping_distance, (kernel_size, kernel_size))
221 |
222 | if self.config.get("blur_angles", False):
223 | kernel_size = self.config.get("blur_angles_kernel_size", 10)
224 | mapping_angle = cv2.blur(mapping_angle, (kernel_size, kernel_size))
225 |
226 | if self.config.get("blur_length", False):
227 | kernel_size = self.config.get("blur_length_kernel_size", 10)
228 | mapping_line_max_length = cv2.blur(mapping_line_max_length, (kernel_size, kernel_size))
229 |
230 | mappings = {
231 | Mapping.DISTANCE: mapping_distance,
232 | Mapping.ANGLE: mapping_angle,
233 | Mapping.MAX_LENGTH: mapping_line_max_length,
234 | Mapping.FLAT: mapping_flat,
235 | }
236 |
237 | tiler = None
238 | if self.tile_boundaries is not None and len(self.tile_boundaries) > 0:
239 | # convert from map coordinates to raster pixel coordinates
240 | mat_map_to_raster = document_info.get_transformation_matrix_map_to_raster(
241 | elevation.shape[1], elevation.shape[0]
242 | )
243 | raster_tile_boundaries = [
244 | affine_transform(boundary, mat_map_to_raster) for boundary in self.tile_boundaries
245 | ]
246 |
247 | tiler = FlowlineTilerPoly(
248 | mappings,
249 | flow_config,
250 | self.tile_boundaries, # map space
251 | raster_tile_boundaries, # raster space
252 | use_rust=True
253 | )
254 | else:
255 | tiler = FlowlineTiler(
256 | mappings,
257 | flow_config,
258 | (self.config.get("num_tiles", 4), self.config.get("num_tiles", 4)),
259 | )
260 |
261 | linestrings = tiler.hatch()
262 |
263 | # convert from raster pixel coordinates to map coordinates
264 | # mat_raster_to_map = document_info.get_transformation_matrix_raster_to_map(
265 | # elevation.shape[1], elevation.shape[0]
266 | # )
267 | # linestrings = [affine_transform(line, mat_raster_to_map) for line in linestrings]
268 | linestrings = [line.simplify(self.config.get("tolerance", 0.1)) for line in linestrings]
269 |
270 | # TODO: this should be a function in geometrytools
271 | linestrings_filtered = []
272 | for g in linestrings:
273 | match g:
274 | case LineString():
275 | linestrings_filtered.append(g)
276 | case GeometryCollection():
277 | for sg in g.geoms:
278 | if type(sg) is LineString:
279 | linestrings_filtered.append(sg)
280 | case _:
281 | logger.warning(f"unexpected geometry type during filtering: {type(g)}")
282 |
283 | return [BathymetryFlowlinesMapLines(None, line) for line in linestrings_filtered]
284 |
285 | def load(self, geometries: list[BathymetryFlowlinesMapLines]) -> None:
286 | if geometries is None:
287 | return
288 |
289 | if len(geometries) == 0:
290 | logger.warning("no geometries to load. abort")
291 | return
292 | else:
293 | logger.info(f"loading geometries: {len(geometries)}")
294 |
295 | with self.db.begin() as conn:
296 | conn.execute(text(f"TRUNCATE TABLE {self.map_lines_table.fullname} CASCADE"))
297 | conn.execute(insert(self.map_lines_table), [g.todict() for g in geometries])
298 |
299 | def out(
300 | self, exclusion_zones: list[Polygon], document_info: DocumentInfo
301 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
302 | """
303 | Returns (drawing geometries, exclusion polygons)
304 | """
305 |
306 | drawing_geometries = []
307 | with self.db.begin() as conn:
308 | result = conn.execute(select(self.map_lines_table))
309 | drawing_geometries = [to_shape(row.lines) for row in result]
310 |
311 | # cut extrusion_zones into drawing_geometries
312 | # Note: using a STRtree here instead of unary_union() and difference() is a 6x speedup
313 | drawing_geometries_cut = []
314 | tree = STRtree(exclusion_zones)
315 |
316 | viewport = document_info.get_viewport()
317 |
318 | for g in drawing_geometries:
319 | g_processed = shapely.intersection(g, viewport)
320 | if g_processed.is_empty:
321 | continue
322 |
323 | for i in tree.query(g):
324 | g_processed = shapely.difference(g_processed, exclusion_zones[i])
325 | if g_processed.is_empty:
326 | break
327 | else:
328 | drawing_geometries_cut.append(g_processed)
329 |
330 | # and do not add anything to exclusion_zones
331 | return (drawing_geometries_cut, exclusion_zones)
332 |
--------------------------------------------------------------------------------
/lineworld/layers/cities.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import os
3 | from dataclasses import dataclass
4 | from pathlib import Path
5 | from typing import Any
6 |
7 | import geoalchemy2
8 | import numpy as np
9 | import shapely
10 | from lineworld.core.map import DocumentInfo, Projection
11 | from geoalchemy2.shape import from_shape, to_shape
12 | from lineworld.layers.layer import Layer
13 | from loguru import logger
14 | from shapely import Polygon, MultiLineString, LineString, Point
15 | from shapely.affinity import affine_transform
16 | from sqlalchemy import MetaData
17 | from sqlalchemy import Table, Column, Integer
18 | from sqlalchemy import engine
19 | from sqlalchemy import insert
20 | from sqlalchemy import select
21 | from sqlalchemy import text
22 |
23 | from lineworld.util.geometrytools import add_to_exclusion_zones
24 | from lineworld.util.hersheyfont import HersheyFont
25 | from lineworld.util import labelplacement
26 |
27 |
28 | @dataclass
29 | class CitiesLines:
30 | id: int | None
31 | circlelines: LineString
32 | labellines: MultiLineString
33 |
34 | def __repr__(self) -> str:
35 | return f"CitiesLines [{self.id}]"
36 |
37 | def todict(self) -> dict[str, int | float | str | None]:
38 | return {
39 | "circlelines": str(from_shape(self.circlelines)),
40 | "labellines": str(from_shape(self.labellines)),
41 | }
42 |
43 |
44 | class Cities(Layer):
45 | DATA_URL = ""
46 | DATA_SRID = Projection.WGS84
47 |
48 | DEFAULT_LAYER_NAME = "Cities"
49 |
50 | DEFAULT_CITIES_FILENAME = "cities.csv"
51 |
52 | # simplification tolerance in WGS84 latlon, resolution: 1°=111.32km (equator worst case)
53 | LAT_LON_PRECISION = 0.01
54 | LAT_LON_MIN_SEGMENT_LENGTH = 0.1
55 |
56 | DEFAULT_EXCLUDE_BUFFER_DISTANCE = 2
57 |
58 | def __init__(self, layer_id: str, db: engine.Engine, config: dict[str, Any]) -> None:
59 | super().__init__(layer_id, db, config)
60 |
61 | self.data_dir = Path(
62 | Layer.DATA_DIR_NAME,
63 | self.config.get("layer_name", self.DEFAULT_LAYER_NAME).lower(),
64 | )
65 | self.cities_file = Path(
66 | self.data_dir,
67 | self.config.get("cities_filename", self.DEFAULT_CITIES_FILENAME),
68 | )
69 | self.exclude_buffer_distance = self.config.get("exclude_buffer_distance", self.DEFAULT_EXCLUDE_BUFFER_DISTANCE)
70 |
71 | if not self.data_dir.exists():
72 | os.makedirs(self.data_dir)
73 |
74 | metadata = MetaData()
75 |
76 | self.map_lines_table = Table(
77 | f"{self.config_name}_cities_map_lines",
78 | metadata,
79 | Column("id", Integer, primary_key=True),
80 | Column("circlelines", geoalchemy2.Geometry("LINESTRING"), nullable=False),
81 | Column("labellines", geoalchemy2.Geometry("MULTILINESTRING"), nullable=False),
82 | )
83 |
84 | metadata.create_all(self.db)
85 |
86 | def extract(self) -> None:
87 | pass
88 |
89 | def transform_to_world(self) -> None:
90 | pass
91 |
92 | def transform_to_map(self, document_info: DocumentInfo) -> None:
93 | pass
94 |
95 | def transform_to_lines(self, document_info: DocumentInfo) -> list[CitiesLines]:
96 | lines = []
97 |
98 | cities = labelplacement.read_from_file(self.cities_file, document_info, self.config)
99 | cities = labelplacement.generate_placement(cities, self.config)
100 |
101 | for c in cities:
102 | lines.append(
103 | CitiesLines(
104 | None,
105 | c.circle.boundary,
106 | MultiLineString(c.text[c.placement]),
107 | )
108 | )
109 |
110 | return lines
111 |
112 | def load(self, geometries: list[CitiesLines]) -> None:
113 | if geometries is None:
114 | return
115 |
116 | if len(geometries) == 0:
117 | logger.warning("no geometries to load. abort")
118 | return
119 | else:
120 | logger.info(f"loading geometries: {len(geometries)}")
121 |
122 | with self.db.begin() as conn:
123 | conn.execute(text(f"TRUNCATE TABLE {self.map_lines_table.fullname} CASCADE"))
124 | conn.execute(insert(self.map_lines_table), [g.todict() for g in geometries])
125 |
126 | def _out(
127 | self,
128 | column_name: str,
129 | exclusion_zones: list[Polygon],
130 | document_info: DocumentInfo,
131 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
132 | """
133 | Returns (drawing geometries, exclusion polygons)
134 | """
135 |
136 | stencil = shapely.difference(document_info.get_viewport(), shapely.unary_union(exclusion_zones))
137 |
138 | drawing_geometries = []
139 | with self.db.begin() as conn:
140 | result = conn.execute(select(self.map_lines_table))
141 | drawing_geometries = [to_shape(row._asdict()[column_name]) for row in result]
142 |
143 | viewport_lines = shapely.intersection(stencil, np.array(drawing_geometries, dtype=MultiLineString))
144 | viewport_lines = viewport_lines[~shapely.is_empty(viewport_lines)]
145 | drawing_geometries = viewport_lines.tolist()
146 |
147 | # and add buffered lines to exclusion_zones
148 | exclusion_zones = add_to_exclusion_zones(
149 | drawing_geometries,
150 | exclusion_zones,
151 | self.exclude_buffer_distance,
152 | self.config.get("tolerance_exclusion_zones", 0.5),
153 | )
154 |
155 | return (drawing_geometries, exclusion_zones)
156 |
157 |
158 | class CitiesLabels(Cities):
159 | def __init__(self, layer_id: str, db: engine.Engine, config: dict[str, Any]) -> None:
160 | super().__init__(layer_id, db, config)
161 |
162 | def out(
163 | self, exclusion_zones: list[Polygon], document_info: DocumentInfo
164 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
165 | return self._out("labellines", exclusion_zones, document_info)
166 |
167 |
168 | class CitiesCircles(Cities):
169 | def __init__(self, layer_id: str, db: engine.Engine, config: dict[str, Any]) -> None:
170 | super().__init__(layer_id, db, config)
171 |
172 | def out(
173 | self, exclusion_zones: list[Polygon], document_info: DocumentInfo
174 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
175 | return self._out("circlelines", exclusion_zones, document_info)
176 |
--------------------------------------------------------------------------------
/lineworld/layers/contour.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | import numpy as np
4 | import shapely
5 | from core.map import DocumentInfo
6 | import geoalchemy2
7 | from geoalchemy2 import WKBElement
8 | from geoalchemy2.shape import to_shape
9 | from layers.elevation import ElevationLayer
10 | from shapely.geometry import Polygon, MultiLineString, LineString, MultiPolygon
11 | from sqlalchemy import Table, Column, Integer, Float, ForeignKey
12 | from sqlalchemy import engine, MetaData
13 | from sqlalchemy import select
14 | from sqlalchemy import text
15 |
16 | from lineworld.layers.elevation import ElevationMapPolygon
17 |
18 |
19 | class Contour(ElevationLayer):
20 | def __init__(self, layer_id: str, db: engine.Engine, config: dict[str, Any]) -> None:
21 | super().__init__(layer_id, db, config)
22 |
23 | metadata = MetaData()
24 |
25 | self.world_polygon_table = Table(
26 | f"{self.config_name}_contour_world_polygons",
27 | metadata,
28 | Column("id", Integer, primary_key=True),
29 | Column("elevation_level", Integer),
30 | Column("elevation_min", Float),
31 | Column("elevation_max", Float),
32 | Column(
33 | "polygon",
34 | geoalchemy2.Geography("POLYGON", srid=self.DATA_SRID.value[1]),
35 | nullable=False,
36 | ),
37 | )
38 |
39 | self.map_polygon_table = Table(
40 | f"{self.config_name}_contour_map_polygons",
41 | metadata,
42 | Column("id", Integer, primary_key=True),
43 | Column(
44 | "world_polygon_id",
45 | ForeignKey(f"{self.world_polygon_table.fullname}.id"),
46 | ),
47 | Column(
48 | "polygon",
49 | geoalchemy2.Geometry("POLYGON", srid=self.DATA_SRID.value[1]),
50 | nullable=False,
51 | ),
52 | )
53 |
54 | self.map_lines_table = Table(
55 | f"{self.config_name}_contour_map_lines",
56 | metadata,
57 | Column("id", Integer, primary_key=True),
58 | Column("map_polygon_id", ForeignKey(f"{self.map_polygon_table.fullname}.id")),
59 | Column("lines", geoalchemy2.Geometry("MULTILINESTRING"), nullable=False),
60 | )
61 |
62 | metadata.create_all(self.db)
63 |
64 | def transform_to_map(self, document_info: DocumentInfo) -> list[ElevationMapPolygon]:
65 | return super().transform_to_map(document_info, allow_overlap=True)
66 |
67 | def _style(
68 | self,
69 | p: Polygon,
70 | elevation_level: int,
71 | document_info: DocumentInfo,
72 | bbox: Polygon | None = None,
73 | ) -> list[MultiLineString]:
74 | lines = [LineString(p.exterior.coords)]
75 | lines += [x.coords for x in p.interiors]
76 | return [MultiLineString(lines)]
77 |
78 | def out(
79 | self, exclusion_zones: list[Polygon], document_info: DocumentInfo
80 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
81 | """
82 | Returns (drawing geometries, exclusion polygons)
83 | """
84 |
85 | stencil = shapely.difference(document_info.get_viewport(), shapely.unary_union(exclusion_zones))
86 |
87 | drawing_geometries = []
88 | with self.db.begin() as conn:
89 | result = conn.execute(select(self.map_lines_table))
90 | drawing_geometries = [to_shape(row.lines) for row in result]
91 |
92 | viewport_lines = shapely.intersection(stencil, np.array(drawing_geometries, dtype=MultiLineString))
93 | viewport_lines = viewport_lines[~shapely.is_empty(viewport_lines)]
94 | drawing_geometries = viewport_lines.tolist()
95 |
96 | # do not extend extrusion zones
97 |
98 | return (drawing_geometries, exclusion_zones)
99 |
100 | def out_polygons(
101 | self,
102 | exclusion_zones: MultiPolygon,
103 | document_info: DocumentInfo,
104 | select_elevation_level: int | None = None,
105 | ) -> tuple[list[shapely.Geometry], MultiPolygon]:
106 | """
107 | Returns (drawing geometries, exclusion polygons)
108 | """
109 |
110 | drawing_geometries = []
111 | with self.db.begin() as conn:
112 | if select_elevation_level is None:
113 | result = conn.execute(select(self.map_polygon_table))
114 | drawing_geometries = [to_shape(row.polygon) for row in result]
115 | else:
116 | result = conn.execute(
117 | text(f"""
118 | SELECT mp.polygon
119 | FROM
120 | {self.map_polygon_table} AS mp JOIN
121 | {self.world_polygon_table} AS wp ON mp.world_polygon_id = wp.id
122 | WHERE
123 | wp.elevation_level = :elevation_level
124 | """),
125 | {"elevation_level": select_elevation_level},
126 | )
127 |
128 | drawing_geometries = [to_shape(WKBElement(row.polygon)) for row in result]
129 |
130 | return (drawing_geometries, exclusion_zones)
131 |
--------------------------------------------------------------------------------
/lineworld/layers/labels.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from dataclasses import dataclass
4 | from pathlib import Path
5 | from typing import Any
6 |
7 | import geoalchemy2
8 | import numpy as np
9 | import shapely
10 | from core.map import DocumentInfo, Projection
11 | from geoalchemy2.shape import from_shape, to_shape
12 | from layers.layer import Layer
13 | from loguru import logger
14 | from shapely import Polygon, MultiLineString, LineString
15 | from shapely.affinity import affine_transform
16 | from sqlalchemy import MetaData
17 | from sqlalchemy import Table, Column, String, Integer
18 | from sqlalchemy import engine
19 | from sqlalchemy import insert
20 | from sqlalchemy import select
21 | from sqlalchemy import text
22 |
23 | from lineworld.util.geometrytools import add_to_exclusion_zones
24 | from lineworld.util.hersheyfont import HersheyFont
25 |
26 |
27 | @dataclass
28 | class LabelsLines:
29 | id: int | None
30 | text: str
31 | lines: MultiLineString
32 |
33 | def __repr__(self) -> str:
34 | return f"LabelsLines [{self.id}]: {self.text}"
35 |
36 | def todict(self) -> dict[str, int | float | str | None]:
37 | return {"text": self.text, "lines": str(from_shape(self.lines))}
38 |
39 |
40 | class Labels(Layer):
41 | DATA_URL = ""
42 | DATA_SRID = Projection.WGS84
43 |
44 | DEFAULT_LAYER_NAME = "Labels"
45 | DEFAULT_LABELS_FILENAME = "labels.json"
46 |
47 | # simplification tolerance in WGS84 latlon, resolution: 1°=111.32km (equator worst case)
48 | LAT_LON_PRECISION = 0.01
49 | LAT_LON_MIN_SEGMENT_LENGTH = 0.1
50 |
51 | DEFAULT_EXCLUDE_BUFFER_DISTANCE = 2
52 | DEFAULT_FONT_SIZE = 12
53 |
54 | def __init__(self, layer_id: str, db: engine.Engine, config: dict[str, Any]) -> None:
55 | super().__init__(layer_id, db, config)
56 |
57 | self.data_dir = Path(
58 | Layer.DATA_DIR_NAME,
59 | self.config.get("layer_name", self.DEFAULT_LAYER_NAME).lower(),
60 | )
61 | self.labels_file = Path(
62 | self.data_dir,
63 | self.config.get("labels_filename", self.DEFAULT_LABELS_FILENAME),
64 | )
65 | self.font_size = self.config.get("font_size", self.DEFAULT_FONT_SIZE)
66 |
67 | if not self.data_dir.exists():
68 | os.makedirs(self.data_dir)
69 |
70 | metadata = MetaData()
71 |
72 | self.map_lines_table = Table(
73 | f"{self.config_name}_labels_map_lines",
74 | metadata,
75 | Column("id", Integer, primary_key=True),
76 | Column("text", String, nullable=False),
77 | Column("lines", geoalchemy2.Geometry("MULTILINESTRING"), nullable=False),
78 | )
79 |
80 | metadata.create_all(self.db)
81 |
82 | # self.font = HersheyFont(font_file="fonts/HersheySerifMed.svg")
83 | self.font = HersheyFont()
84 |
85 | def extract(self) -> None:
86 | pass
87 |
88 | def transform_to_world(self) -> None:
89 | pass
90 |
91 | def transform_to_map(self, document_info: DocumentInfo) -> None:
92 | pass
93 |
94 | def transform_to_lines(self, document_info: DocumentInfo) -> list[LabelsLines]:
95 | if not self.labels_file.exists():
96 | logger.warning(f"labels file {self.labels_file} not found")
97 | return []
98 |
99 | project_func = document_info.get_projection_func(self.DATA_SRID)
100 | mat = document_info.get_transformation_matrix()
101 |
102 | labellines = []
103 |
104 | with open(self.labels_file) as f:
105 | data = json.load(f)
106 |
107 | for label_data in data["labels"]:
108 | path = LineString(
109 | [
110 | [label_data[0][1], label_data[0][0]],
111 | [label_data[0][1] + 50, label_data[0][0]],
112 | ]
113 | ).segmentize(0.1)
114 | path = shapely.ops.transform(project_func, path)
115 | path = affine_transform(path, mat)
116 |
117 | sub_labels = label_data[1].split("\n")
118 |
119 | for i, sub_label in enumerate(sub_labels):
120 | lines = MultiLineString(self.font.lines_for_text(sub_label, self.font_size, path=path))
121 |
122 | center_offset = shapely.envelope(lines).centroid
123 | minx, miny, maxx, maxy = lines.bounds
124 |
125 | lines = shapely.affinity.translate(
126 | lines,
127 | xoff=-(center_offset.x - minx),
128 | yoff=+(self.font_size * 1.08 * i),
129 | )
130 |
131 | labellines.append(LabelsLines(None, sub_label, lines))
132 |
133 | return labellines
134 |
135 | def load(self, geometries: list[LabelsLines]) -> None:
136 | if geometries is None:
137 | return
138 |
139 | if len(geometries) == 0:
140 | logger.warning("no geometries to load. abort")
141 | return
142 | else:
143 | logger.info(f"loading geometries: {len(geometries)}")
144 |
145 | with self.db.begin() as conn:
146 | conn.execute(text(f"TRUNCATE TABLE {self.map_lines_table.fullname} CASCADE"))
147 | conn.execute(insert(self.map_lines_table), [g.todict() for g in geometries])
148 |
149 | def out(
150 | self, exclusion_zones: list[Polygon], document_info: DocumentInfo
151 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
152 | """
153 | Returns (drawing geometries, exclusion polygons)
154 | """
155 |
156 | stencil = shapely.difference(document_info.get_viewport(), shapely.unary_union(exclusion_zones))
157 |
158 | drawing_geometries = []
159 | with self.db.begin() as conn:
160 | result = conn.execute(select(self.map_lines_table))
161 | drawing_geometries = [to_shape(row.lines) for row in result]
162 |
163 | viewport_lines = shapely.intersection(stencil, np.array(drawing_geometries, dtype=MultiLineString))
164 | viewport_lines = viewport_lines[~shapely.is_empty(viewport_lines)]
165 | drawing_geometries = viewport_lines.tolist()
166 |
167 | # and add buffered lines to exclusion_zones
168 | exclusion_zones = add_to_exclusion_zones(
169 | drawing_geometries,
170 | exclusion_zones,
171 | self.config.get("exclude_buffer_distance", self.DEFAULT_EXCLUDE_BUFFER_DISTANCE),
172 | self.config.get("tolerance_exclusion_zones", 0.5),
173 | )
174 |
175 | return (drawing_geometries, exclusion_zones)
176 |
--------------------------------------------------------------------------------
/lineworld/layers/layer.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Any
3 |
4 | from shapely import Polygon, Geometry
5 | from sqlalchemy import engine
6 | from loguru import logger
7 |
8 | from lineworld.core.map import DocumentInfo
9 |
10 |
11 | class Layer:
12 | DATA_DIR_NAME = "data"
13 |
14 | def __init__(self, layer_id: str, db: engine.Engine, config: dict[str, Any]):
15 | self.layer_id = layer_id
16 | self.db = db
17 |
18 | if "layer" not in config or layer_id not in config["layer"]:
19 | logger.warning(f"layer {layer_id} has no configuration entry. Using default configuration.")
20 |
21 | self.config_name = re.sub(r"\W+", "", config.get("name", "Basic").lower())
22 | self.config = config.get("layer", {}).get(layer_id, {})
23 |
24 | def transform_to_world(self):
25 | pass
26 |
27 | def transform_to_map(self, document_info: DocumentInfo) -> list[Any]:
28 | pass
29 |
30 | def transform_to_lines(self, document_info: DocumentInfo) -> list[Any]:
31 | pass
32 |
33 | def load(self, geometries: list[Any]) -> None:
34 | pass
35 |
36 | def out(self, exclusion_zones: list[Polygon], document_info: DocumentInfo) -> tuple[list[Geometry], list[Polygon]]:
37 | pass
38 |
--------------------------------------------------------------------------------
/lineworld/layers/meta.py:
--------------------------------------------------------------------------------
1 | import os
2 | from dataclasses import dataclass
3 | from pathlib import Path
4 | from typing import Any
5 |
6 | import geoalchemy2
7 | import shapely
8 | from lineworld.core.map import DocumentInfo, Projection
9 | from geoalchemy2.shape import from_shape, to_shape
10 | from lineworld.layers.layer import Layer
11 | from loguru import logger
12 | from shapely import Polygon, MultiLineString
13 | from sqlalchemy import MetaData
14 | from sqlalchemy import Table, Column, String, Integer
15 | from sqlalchemy import engine
16 | from sqlalchemy import insert
17 | from sqlalchemy import select
18 | from sqlalchemy import text
19 |
20 | from lineworld.util.hersheyfont import HersheyFont
21 |
22 |
23 | @dataclass
24 | class MetaLines:
25 | id: int | None
26 | text: str
27 | lines: MultiLineString
28 |
29 | def __repr__(self) -> str:
30 | return f"MetaLines [{self.id}]: {self.text}"
31 |
32 | def todict(self) -> dict[str, int | float | str | None]:
33 | return {"text": self.text, "lines": str(from_shape(self.lines))}
34 |
35 |
36 | class Meta(Layer):
37 | DATA_URL = ""
38 | DATA_SRID = Projection.WGS84
39 |
40 | DEFAULT_LAYER_NAME = "Meta"
41 |
42 | DEFAULT_FONT_SIZE = 12
43 |
44 | def __init__(self, layer_id: str, db: engine.Engine, config: dict[str, Any]) -> None:
45 | super().__init__(layer_id, db, config)
46 |
47 | self.data_dir = Path(
48 | Layer.DATA_DIR_NAME,
49 | self.config.get("layer_name", self.DEFAULT_LAYER_NAME).lower(),
50 | )
51 |
52 | if not self.data_dir.exists():
53 | os.makedirs(self.data_dir)
54 |
55 | metadata = MetaData()
56 |
57 | self.map_lines_table = Table(
58 | f"{self.config_name}_meta_map_lines",
59 | metadata,
60 | Column("id", Integer, primary_key=True),
61 | Column("text", String, nullable=False),
62 | Column("lines", geoalchemy2.Geometry("MULTILINESTRING"), nullable=False),
63 | )
64 |
65 | metadata.create_all(self.db)
66 |
67 | self.font = HersheyFont()
68 |
69 | def extract(self) -> None:
70 | pass
71 |
72 | def transform_to_world(self) -> None:
73 | pass
74 |
75 | def transform_to_map(self, document_info: DocumentInfo) -> None:
76 | pass
77 |
78 | def transform_to_lines(self, document_info: DocumentInfo) -> list[MetaLines]:
79 | padding = document_info.get_viewport_padding()
80 |
81 | metaLines = []
82 |
83 | metaLines.append(
84 | MetaLines(
85 | None,
86 | "THE WORLD",
87 | shapely.affinity.translate(
88 | MultiLineString(self.font.lines_for_text("THE WORLD", 18)),
89 | xoff=padding[3],
90 | yoff=document_info.height - 4,
91 | ),
92 | )
93 | )
94 |
95 | scale = (40_075 * 1000 * 100) / document_info.width
96 |
97 | metaLines.append(
98 | MetaLines(
99 | None,
100 | None,
101 | shapely.affinity.translate(
102 | MultiLineString(self.font.lines_for_text(f"Scale: 1:{int(scale)}", 9)),
103 | xoff=padding[3] + 120,
104 | yoff=document_info.height - 4,
105 | ),
106 | )
107 | )
108 |
109 | return metaLines
110 |
111 | def load(self, geometries: list[MetaLines]) -> None:
112 | if geometries is None:
113 | return
114 |
115 | if len(geometries) == 0:
116 | logger.warning("no geometries to load. abort")
117 | return
118 | else:
119 | logger.info(f"loading geometries: {len(geometries)}")
120 |
121 | with self.db.begin() as conn:
122 | conn.execute(text(f"TRUNCATE TABLE {self.map_lines_table.fullname} CASCADE"))
123 | conn.execute(insert(self.map_lines_table), [g.todict() for g in geometries])
124 |
125 | def out(
126 | self, exclusion_zones: list[Polygon], document_info: DocumentInfo
127 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
128 | """
129 | Returns (drawing geometries, exclusion polygons)
130 | """
131 |
132 | drawing_geometries = []
133 | with self.db.begin() as conn:
134 | result = conn.execute(select(self.map_lines_table))
135 | drawing_geometries = [to_shape(row.lines) for row in result]
136 |
137 | return (drawing_geometries, exclusion_zones)
138 |
--------------------------------------------------------------------------------
/lineworld/layers/oceancurrents.py:
--------------------------------------------------------------------------------
1 | import math
2 | import os
3 | from dataclasses import dataclass
4 | from pathlib import Path
5 | from typing import Any
6 |
7 | import cv2
8 | import geoalchemy2
9 | import numpy as np
10 | import rasterio
11 | import shapely
12 |
13 | from lineworld.core.map import DocumentInfo, Projection
14 | from geoalchemy2.shape import to_shape, from_shape
15 | from lineworld.layers.layer import Layer
16 | from loguru import logger
17 | from shapely import Polygon, MultiLineString, STRtree, LineString, GeometryCollection
18 | from shapely.affinity import affine_transform
19 | from sqlalchemy import MetaData
20 | from sqlalchemy import Table, Column, Integer
21 | from sqlalchemy import engine
22 | from sqlalchemy import insert
23 | from sqlalchemy import select
24 | from sqlalchemy import text
25 | from rasterio.warp import (
26 | calculate_default_transform,
27 | reproject,
28 | Resampling,
29 | )
30 | import netCDF4
31 |
32 | import lineworld
33 | from lineworld.core import flowlines
34 | from lineworld.core.flowlines import Mapping, FlowlineTilerPoly
35 | from lineworld.util.rastertools import normalize_to_uint8
36 |
37 |
38 | @dataclass
39 | class OceanCurrentsMapLines:
40 | id: int | None
41 | lines: MultiLineString
42 |
43 | def todict(self) -> dict[str, int | float | str | None]:
44 | return {"lines": str(from_shape(self.lines))}
45 |
46 |
47 | class OceanCurrents(Layer):
48 | DATA_URL = ""
49 | DATA_SRID = Projection.WGS84
50 |
51 | DEFAULT_LAYER_NAME = "OceanCurrents"
52 |
53 | def __init__(
54 | self,
55 | layer_id: str,
56 | db: engine.Engine,
57 | config: dict[str, Any] = {},
58 | tile_boundaries: list[Polygon] = [],
59 | ) -> None:
60 | super().__init__(layer_id, db, config)
61 |
62 | self.tile_boundaries = tile_boundaries
63 |
64 | self.data_dir = Path(
65 | Layer.DATA_DIR_NAME,
66 | self.config.get("layer_name", self.DEFAULT_LAYER_NAME).lower(),
67 | )
68 |
69 | self.source_file = Path(self.data_dir, "oscar_currents_final_20220101.nc") # TODO: hardcoded reference
70 |
71 | self.reprojection_file = Path(self.data_dir, "reproject.tif")
72 |
73 | if not self.data_dir.exists():
74 | os.makedirs(self.data_dir)
75 |
76 | metadata = MetaData()
77 |
78 | self.map_lines_table = Table(
79 | f"{self.config_name}_oceancurrents_map_lines",
80 | metadata,
81 | Column("id", Integer, primary_key=True),
82 | Column("lines", geoalchemy2.Geometry("LINESTRING"), nullable=False),
83 | )
84 |
85 | metadata.create_all(self.db)
86 |
87 | def extract(self) -> None:
88 | pass
89 |
90 | def transform_to_world(self) -> None:
91 | pass
92 |
93 | def transform_to_map(self, document_info: DocumentInfo) -> None:
94 | logger.info("reprojecting GeoTiff")
95 |
96 | with netCDF4.Dataset(self.source_file, "r", format="NETCDF4") as data:
97 | # convert xarray to numpy NdArray, fill empty pixels with zeros
98 | u = data.variables["u"][:].filled(0)[0, :, :]
99 | v = data.variables["v"][:].filled(0)[0, :, :]
100 |
101 | # swap lat lon axes
102 | u = u.T
103 | v = v.T
104 |
105 | bands = [u, v]
106 | src_height, src_width = u.shape
107 |
108 | # Resolution: 0.25°, decimal degrees per pixel
109 | src_resolution = 0.25
110 |
111 | # Target resolution, in units of target coordinate reference system
112 | dst_resolution = 10_000.0
113 |
114 | dst_crs = str(document_info.projection)
115 |
116 | src_transform = rasterio.Affine.translation(0, -90) * rasterio.Affine.scale(src_resolution, src_resolution)
117 | src_crs = {"init": str(self.DATA_SRID)} # rasterio-style CRS dict
118 |
119 | # Origin: top-left. Order: left, bottom, right, top
120 | src_bounds = [-180, -90, 180, 90]
121 | dst_transform, dst_width, dst_height = calculate_default_transform(
122 | src_crs, dst_crs, src_width, src_height, *src_bounds, resolution=dst_resolution
123 | )
124 |
125 | params = {
126 | "width": dst_width,
127 | "height": dst_height,
128 | "count": len(bands),
129 | "crs": dst_crs,
130 | "transform": dst_transform,
131 | "dtype": np.float32,
132 | }
133 |
134 | with rasterio.open(self.reprojection_file, "w", **params) as dst:
135 | for i, band in enumerate(bands):
136 | reproject(
137 | source=band,
138 | destination=rasterio.band(dst, i + 1),
139 | src_transform=src_transform,
140 | src_crs=src_crs,
141 | dst_transform=dst_transform,
142 | dst_crs=dst_crs,
143 | resampling=Resampling.nearest,
144 | )
145 |
146 | def transform_to_lines(self, document_info: DocumentInfo) -> list[OceanCurrentsMapLines]:
147 | flow_config = flowlines.FlowlineHatcherConfig()
148 | flow_config = lineworld.apply_config_to_object(self.config, flow_config)
149 |
150 | with rasterio.open(self.reprojection_file) as dataset:
151 | u = dataset.read(1)
152 | v = dataset.read(2)
153 |
154 | # increase size for smoothing
155 | # OSCAR current data has a rather low resolution of 0.25° and
156 | # the flowlines algorithm does no smoothing, just raster pixel selection
157 | # by applying int(coordinate * scaling_factor)
158 | resized_shape = [u.shape[1] * 2, u.shape[0] * 2]
159 | u = cv2.resize(u, resized_shape)
160 | v = cv2.resize(v, resized_shape)
161 |
162 | angles = np.arctan2(u, v)
163 | magnitude = np.hypot(u, v)
164 |
165 | angles = (angles + math.pi / 2) % math.tau
166 | # center around math.pi (128) so we avoid negative values
167 | mapping_angle = angles + math.pi
168 | mapping_angle = ((mapping_angle / math.tau) * 255).astype(np.uint8)
169 |
170 | mapping_flat = np.zeros_like(magnitude, dtype=np.uint8)
171 | mapping_flat[magnitude < flow_config.MIN_INCLINATION] = 255 # uint8
172 |
173 | magnitude = np.clip(magnitude, 0, 1)
174 | mapping_distance = ~normalize_to_uint8(magnitude) # uint8
175 |
176 | # mapping_line_max_length = np.full_like(mapping_angle, 255)
177 | mapping_line_max_length = np.copy(mapping_distance)
178 |
179 | mappings = {
180 | Mapping.DISTANCE: mapping_distance,
181 | Mapping.ANGLE: mapping_angle,
182 | Mapping.MAX_LENGTH: mapping_line_max_length,
183 | Mapping.FLAT: mapping_flat,
184 | }
185 |
186 | mat_map_to_raster = document_info.get_transformation_matrix_map_to_raster(u.shape[1], u.shape[0])
187 | raster_tile_boundaries = [
188 | affine_transform(boundary, mat_map_to_raster) for boundary in self.tile_boundaries
189 | ]
190 |
191 | tiler = FlowlineTilerPoly(
192 | mappings,
193 | flow_config,
194 | self.tile_boundaries, # map space
195 | raster_tile_boundaries, # raster space
196 | use_rust=True
197 | )
198 | linestrings = tiler.hatch()
199 |
200 | # no tiling, compute all at once
201 | # rust_config = flowlines_py.FlowlinesConfig()
202 | # rust_config = _py_config_to_rust_config(flow_config, rust_config)
203 | # mappings = [mapping_distance, mapping_angle, mapping_line_max_length, mapping_flat]
204 | # rust_lines: list[list[tuple[float, float]]] = flowlines_py.hatch([u.shape[1], u.shape[0]], rust_config, *mappings)
205 | # linestrings = [LineString(l) for l in rust_lines]
206 |
207 | # convert from raster pixel coordinates to map coordinates
208 | # mat_raster_to_map = document_info.get_transformation_matrix_raster_to_map(u.shape[1], u.shape[0])
209 | # linestrings = [affine_transform(line, mat_raster_to_map) for line in linestrings]
210 | linestrings = [line.simplify(self.config.get("tolerance", 0.1)) for line in linestrings]
211 |
212 | # TODO: this should be a function in geometrytools
213 | linestrings_filtered = []
214 | for g in linestrings:
215 | match g:
216 | case LineString():
217 | linestrings_filtered.append(g)
218 | case GeometryCollection():
219 | for sg in g.geoms:
220 | if type(sg) is LineString:
221 | linestrings_filtered.append(sg)
222 | case _:
223 | logger.warning(f"unexpected geometry type during filtering: {type(g)}")
224 |
225 | return [OceanCurrentsMapLines(None, line) for line in linestrings_filtered]
226 |
227 | def load(self, geometries: list[OceanCurrentsMapLines]) -> None:
228 | if geometries is None:
229 | return
230 |
231 | if len(geometries) == 0:
232 | logger.warning("no geometries to load. abort")
233 | return
234 | else:
235 | logger.info(f"loading geometries: {len(geometries)}")
236 |
237 | with self.db.begin() as conn:
238 | conn.execute(text(f"TRUNCATE TABLE {self.map_lines_table.fullname} CASCADE"))
239 | conn.execute(insert(self.map_lines_table), [g.todict() for g in geometries])
240 |
241 | def out(
242 | self, exclusion_zones: list[Polygon], document_info: DocumentInfo
243 | ) -> tuple[list[shapely.Geometry], list[Polygon]]:
244 | """
245 | Returns (drawing geometries, exclusion polygons)
246 | """
247 |
248 | drawing_geometries = []
249 | with self.db.begin() as conn:
250 | result = conn.execute(select(self.map_lines_table))
251 | drawing_geometries = [to_shape(row.lines) for row in result]
252 |
253 | # cut extrusion_zones into drawing_geometries
254 | # Note: using a STRtree here instead of unary_union() and difference() is a 6x speedup
255 | drawing_geometries_cut = []
256 | tree = STRtree(exclusion_zones)
257 |
258 | viewport = document_info.get_viewport()
259 |
260 | for g in drawing_geometries:
261 | g_processed = shapely.intersection(g, viewport)
262 | if g_processed.is_empty:
263 | continue
264 |
265 | for i in tree.query(g):
266 | g_processed = shapely.difference(g_processed, exclusion_zones[i])
267 | if g_processed.is_empty:
268 | break
269 | else:
270 | drawing_geometries_cut.append(g_processed)
271 |
272 | # and do not add anything to exclusion_zones
273 | return (drawing_geometries_cut, exclusion_zones)
274 |
--------------------------------------------------------------------------------
/lineworld/main.py:
--------------------------------------------------------------------------------
1 | import uvicorn
2 | from fastapi import FastAPI, Request
3 | from fastapi.responses import HTMLResponse
4 | from fastapi.staticfiles import StaticFiles
5 | from fastapi.templating import Jinja2Templates
6 | from sqlalchemy import create_engine
7 |
8 | from lineworld.core.layerstack import LayerStack
9 | from lineworld.layers import bathymetry, contour, coastlines, grid
10 |
11 | app = FastAPI()
12 |
13 | app.mount("/static", StaticFiles(directory="static"), name="static")
14 | templates = Jinja2Templates(directory="templates")
15 |
16 | engine = create_engine("postgresql+psycopg://localhost:5432/lineworld", echo=True)
17 |
18 | layerstack = LayerStack(
19 | [
20 | bathymetry.Bathymetry("Bathymetry", [0, -12_000], 15, engine),
21 | contour.Contour("Contour", [0, 9_000], 15, engine),
22 | coastlines.Coastlines("Coastlines", engine),
23 | grid.Grid("Grid", engine),
24 | ]
25 | )
26 |
27 |
28 | @app.get("/", response_class=HTMLResponse)
29 | async def root(request: Request):
30 | return templates.TemplateResponse(request=request, name="home.html")
31 |
32 |
33 | @app.get("/layers")
34 | async def get_layer_info():
35 | # return {{"layer_name": k, "z": i} for i, (k, v) in enumerate(layers.items())}
36 | return layerstack
37 |
38 |
39 | @app.get("/layer/{layer_name}")
40 | async def get_layer(layer_name: str):
41 | return layerstack.get(layer_name)
42 |
43 |
44 | if __name__ == "__main__":
45 | uvicorn.run(app, host="0.0.0.0", port=8000)
46 |
--------------------------------------------------------------------------------
/lineworld/run.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import cProfile as profile
3 | import datetime
4 |
5 | import numpy as np
6 | from loguru import logger
7 | from shapely.geometry import MultiPolygon
8 | from sqlalchemy import create_engine
9 |
10 | import lineworld
11 | from core import map
12 | from layers import contour
13 | from lineworld.core.map import DocumentInfo
14 | from lineworld.core.svgwriter import SvgWriter
15 | from lineworld.layers import coastlines, grid, labels, cities, bflowlines, bathymetry, cities, contour2, meta
16 | from lineworld.util.export import convert_svg_to_png
17 |
18 |
19 | def run() -> None:
20 | timer_total_runtime = datetime.datetime.now()
21 |
22 | pr = profile.Profile()
23 | pr.disable()
24 |
25 | # parser = argparse.ArgumentParser(description="...")
26 | # parser.add_argument("--set", metavar="KEY=VALUE", nargs='+')
27 | # args = vars(parser.parse_args())
28 | #
29 | # print(args["set"])
30 | # exit()
31 |
32 | config = lineworld.get_config()
33 | engine = create_engine(config["main"]["db_connection"]) # , echo=True)
34 | document_info = map.DocumentInfo(config)
35 |
36 | layer_grid_bathymetry = grid.GridBathymetry("GridBathymetry", engine, config)
37 | layer_grid_labels = grid.GridLabels("GridLabels", engine, config)
38 |
39 | layer_bathymetry = bflowlines.BathymetryFlowlines(
40 | "BathymetryFlowlines", engine, config, tile_boundaries=layer_grid_bathymetry.get_polygons(document_info)
41 | )
42 | layer_bathymetry2 = bathymetry.Bathymetry("Bathymetry", engine, config)
43 | layer_contour = contour.Contour("Contour", engine, config)
44 | layer_contour2 = contour2.Contour2("Contour2", engine, config)
45 |
46 | layer_coastlines = coastlines.Coastlines("Coastlines", engine, config)
47 |
48 | layer_cities_labels = cities.CitiesLabels("CitiesLabels", engine, config)
49 | layer_cities_circles = cities.CitiesCircles("CitiesCircles", engine, config)
50 |
51 | layer_labels = labels.Labels("Labels", engine, config)
52 | layer_meta = meta.Meta("Meta", engine, config)
53 |
54 | compute_layers = [
55 | layer_bathymetry,
56 | layer_contour2,
57 | # layer_coastlines,
58 | # layer_cities_labels,
59 | # layer_cities_circles,
60 | # layer_labels,
61 | # layer_grid_bathymetry,
62 | # layer_grid_labels,
63 | # layer_meta,
64 | ]
65 |
66 | for layer in compute_layers:
67 | layer.extract()
68 |
69 | timer_start = datetime.datetime.now()
70 | polygons = layer.transform_to_world()
71 | logger.debug("transform in {:5.2f}s".format((datetime.datetime.now() - timer_start).total_seconds()))
72 |
73 | timer_start = datetime.datetime.now()
74 | layer.load(polygons)
75 | logger.debug("load in {:5.2f}s".format((datetime.datetime.now() - timer_start).total_seconds()))
76 |
77 | timer_start = datetime.datetime.now()
78 | polygons = layer.transform_to_map(document_info)
79 | logger.debug("project in {:5.2f}s".format((datetime.datetime.now() - timer_start).total_seconds()))
80 |
81 | timer_start = datetime.datetime.now()
82 | layer.load(polygons)
83 | logger.debug("load in {:5.2f}s".format((datetime.datetime.now() - timer_start).total_seconds()))
84 |
85 | timer_start = datetime.datetime.now()
86 | lines = layer.transform_to_lines(document_info)
87 | logger.debug("draw in {:5.2f}s".format((datetime.datetime.now() - timer_start).total_seconds()))
88 |
89 | timer_start = datetime.datetime.now()
90 | layer.load(lines)
91 | logger.debug("load in {:5.2f}s".format((datetime.datetime.now() - timer_start).total_seconds()))
92 |
93 | # pr.enable()
94 | # pr.disable()
95 |
96 | # pr.dump_stats('profile.pstat')
97 |
98 | visible_layers = [
99 | # layer_meta,
100 | layer_cities_labels,
101 | layer_cities_circles,
102 | layer_grid_labels,
103 | layer_labels,
104 | layer_coastlines,
105 | layer_contour2,
106 | # layer_contour,
107 | layer_grid_bathymetry,
108 | layer_bathymetry,
109 | # layer_bathymetry2,
110 | ]
111 |
112 | exclude = []
113 | draw_objects = {}
114 |
115 | for layer in visible_layers:
116 | timer_start = datetime.datetime.now()
117 | draw, exclude = layer.out(exclude, document_info)
118 | draw_objects[layer.layer_id] = draw
119 | logger.debug(
120 | "{} layer subtraction in {:5.2f}s".format(
121 | layer.layer_id, (datetime.datetime.now() - timer_start).total_seconds()
122 | )
123 | )
124 |
125 | svg_filename = config.get("name", "output")
126 | if not svg_filename.endswith(".svg"):
127 | svg_filename += ".svg"
128 |
129 | svg = SvgWriter(svg_filename, document_info.get_document_size())
130 | # svg.background_color = config.get("svg_background_color", "#333333")
131 |
132 | # options_bathymetry = {
133 | # "fill": "none",
134 | # "stroke": "black",
135 | # "stroke-width": "0",
136 | # "fill-opacity": "0.9"
137 | # }
138 | #
139 | # scale = Colorscale([0, layer_bathymetry.NUM_ELEVATION_LINES])
140 | # for i in range(layer_bathymetry.NUM_ELEVATION_LINES):
141 | # polys, _ = layer_bathymetry.out_polygons(exclude, document_info, select_elevation_level=i)
142 | # color = "rgb({},{},{})".format(*[int(x * 255) for x in scale.get_color(layer_bathymetry.NUM_ELEVATION_LINES-1-i)])
143 | # svg.add(f"bathymetry_polys_{i}", polys, options=options_bathymetry | {"fill": color})
144 |
145 | # options_contour = {
146 | # "fill": "none",
147 | # "stroke": "black",
148 | # "stroke-width": "0",
149 | # "fill-opacity": "0.5"
150 | # }
151 | #
152 | # scale = Colorscale([0, layer_contour.NUM_ELEVATION_LINES])
153 | # for i in range(layer_contour.NUM_ELEVATION_LINES):
154 | # polys, _ = layer_contour.out_polygons([], document_info, select_elevation_level=i)
155 | # color = "rgb({},{},{})".format(*[int(x * 255) for x in scale.get_color(i)])
156 | # svg.add(f"contour_polys_{i}", polys, options=options_contour | {"fill": color})
157 |
158 | layer_styles = {}
159 |
160 | layer_styles[layer_bathymetry.layer_id] = {
161 | "fill": "none",
162 | "stroke": "blue",
163 | "stroke-width": "0.40",
164 | "fill-opacity": "0.1",
165 | }
166 |
167 | layer_styles[layer_contour.layer_id] = {
168 | "fill": "none",
169 | "stroke": "black",
170 | "stroke-width": "0.40",
171 | }
172 |
173 | layer_styles[layer_coastlines.layer_id] = {
174 | "fill": "none",
175 | "stroke": "black",
176 | "stroke-width": "0.5",
177 | }
178 |
179 | layer_styles[layer_grid_labels.layer_id] = {
180 | "fill": "none",
181 | "stroke": "black",
182 | "stroke-width": "1.0",
183 | }
184 |
185 | layer_styles[layer_labels.layer_id] = {
186 | "fill": "none",
187 | "stroke": "black",
188 | "stroke-width": "0.4",
189 | }
190 |
191 | layer_styles[layer_cities_labels.layer_id] = {
192 | "fill": "none",
193 | "stroke": "black",
194 | "stroke-width": "0.5",
195 | }
196 |
197 | layer_styles[layer_cities_circles.layer_id] = {
198 | "fill": "none",
199 | "stroke": "red",
200 | "stroke-width": "0.5",
201 | }
202 |
203 | layer_styles[layer_meta.layer_id] = {
204 | "fill": "none",
205 | "stroke": "black",
206 | "stroke-width": "0.5",
207 | }
208 |
209 | layer_styles[layer_bathymetry2.layer_id] = layer_styles[layer_bathymetry.layer_id]
210 | layer_styles[layer_contour2.layer_id] = layer_styles[layer_contour.layer_id]
211 |
212 | for k, v in layer_styles.items():
213 | svg.add_style(k, v)
214 |
215 | for k, v in draw_objects.items():
216 | svg.add(k, v) # , options=layer_styles.get(k.lower(), {}))
217 |
218 | # tanaka_style = {
219 | # "fill": "none",
220 | # "stroke-width": "0.40",
221 | # "fill-opacity": "1.0",
222 | # }
223 | # svg.add(
224 | # "Contours2_High",
225 | # layer_contour2.out_tanaka(exclude, document_info, highlights=True)[0],
226 | # {**tanaka_style, "stroke": "#999999"},
227 | # )
228 | # svg.add(
229 | # "Contours2_Low",
230 | # layer_contour2.out_tanaka(exclude, document_info, highlights=False)[0],
231 | # {**tanaka_style, "stroke": "black"},
232 | # )
233 |
234 | svg.write()
235 | try:
236 | convert_svg_to_png(svg, svg.dimensions[0] * 10)
237 | except Exception as e:
238 | logger.warning(f"SVG to PNG conversion failed: {e}")
239 |
240 | logger.info(f"total time: {(datetime.datetime.now() - timer_total_runtime).total_seconds():5.2f}s")
241 |
242 |
243 | if __name__ == "__main__":
244 | run()
245 |
--------------------------------------------------------------------------------
/lineworld/util/downloader.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import requests
4 | import shutil
5 |
6 |
7 | def download_file(url: str, filename: Path) -> None:
8 | with requests.get(url, stream=True) as r:
9 | with open(filename, "wb") as f:
10 | shutil.copyfileobj(r.raw, f)
11 |
--------------------------------------------------------------------------------
/lineworld/util/export.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | from pathlib import Path
3 |
4 | import lineworld
5 | from lineworld.core.svgwriter import SvgWriter
6 |
7 |
8 | def convert_svg_to_png(
9 | svgWriter: SvgWriter,
10 | image_width: int | None = None,
11 | working_dir: Path = Path("."),
12 | inkscape_conversion_suffix=".png",
13 | ) -> None:
14 | svg_filename = svgWriter.filename
15 |
16 | if image_width is None:
17 | image_width = svgWriter.dimensions[0]
18 |
19 | converted_image_output_path = Path(Path(svg_filename).parent, Path(svg_filename).stem + inkscape_conversion_suffix)
20 | background_color = svgWriter.background_color if svgWriter.background_color is not None else "white"
21 | inkscape_command = lineworld.get_config().get("inkscape_command", "inkscape")
22 |
23 | result = subprocess.run(
24 | [
25 | inkscape_command,
26 | svg_filename,
27 | f"--export-filename={converted_image_output_path}",
28 | f"--export-width={image_width}",
29 | f"--export-background={background_color}",
30 | ],
31 | cwd=working_dir,
32 | check=True,
33 | capture_output=False,
34 | )
35 |
--------------------------------------------------------------------------------
/lineworld/util/fontsizetest.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import shapely.affinity
4 |
5 | from lineworld.core.svgwriter import SvgWriter
6 | from lineworld.util.hersheyfont import HersheyFont
7 |
8 | TEXT = ["the quick brown fox", "jumps over the lazy hedgehog"]
9 |
10 | FONT_SIZES = [3, 4, 5, 6, 8, 10]
11 | CANVAS_DIMENSIONS = [210 - 20, 297 - 20]
12 | OUTPUT_PATH = "."
13 |
14 | linestrings = []
15 | offset = 10
16 | for font_size in FONT_SIZES:
17 | font = HersheyFont(font_file=Path(HersheyFont.DEFAULT_FONT))
18 |
19 | lines = font.lines_for_text(f"SIZE: {font_size:5.1f}", font_size)
20 | linestrings += [shapely.affinity.translate(l, xoff=0, yoff=offset) for l in lines]
21 | offset += font_size
22 |
23 | for segment in TEXT:
24 | lines = font.lines_for_text(segment, font_size)
25 | linestrings += [shapely.affinity.translate(l, xoff=0, yoff=offset) for l in lines]
26 | offset += font_size + 1
27 |
28 | for segment in TEXT:
29 | lines = font.lines_for_text(segment.upper(), font_size)
30 | linestrings += [shapely.affinity.translate(l, xoff=0, yoff=offset) for l in lines]
31 | offset += font_size + 1
32 |
33 | offset += font_size + 2
34 |
35 | svg_path = Path(OUTPUT_PATH, "fontsizetest.svg")
36 | svg = SvgWriter(svg_path, CANVAS_DIMENSIONS)
37 | options = {"fill": "none", "stroke": "black", "stroke-width": "0.5"}
38 | svg.add("lines", linestrings, options=options)
39 | svg.write()
40 |
--------------------------------------------------------------------------------
/lineworld/util/geometrytools.py:
--------------------------------------------------------------------------------
1 | import itertools
2 |
3 | import numpy as np
4 | import shapely
5 | from loguru import logger
6 | from pyproj import Geod
7 | from shapely import Geometry
8 | from shapely.geometry import (
9 | GeometryCollection,
10 | Polygon,
11 | MultiPolygon,
12 | LineString,
13 | MultiLineString,
14 | )
15 |
16 |
17 | def _unpack_multigeometry[T](g: Geometry | list[Geometry] | np.ndarray, geometry_type: T) -> list[T]:
18 | single: Geometry = None
19 | multi: Geometry = None
20 |
21 | if geometry_type == Polygon:
22 | single, multi = (Polygon, MultiPolygon)
23 | elif geometry_type == LineString:
24 | single, multi = (LineString, MultiLineString)
25 | else:
26 | raise Exception(f"unknown geometry_type for _unpack_multigeometry: {geometry_type}")
27 |
28 | unpacked: list[T] = []
29 | packed = None
30 | if type(g) is np.ndarray:
31 | packed = g.tolist()
32 | elif type(g) is list:
33 | packed = g
34 | else:
35 | packed = [g]
36 |
37 | for e in packed:
38 | match e:
39 | case None:
40 | continue
41 | case single():
42 | unpacked.append(e)
43 | case multi():
44 | unpacked += e.geoms
45 | case GeometryCollection():
46 | for gc in e.geoms:
47 | unpacked += _unpack_multigeometry(gc, geometry_type)
48 | case _:
49 | # logger.warning(f"ignoring geometry: {type(e)}")
50 | pass
51 |
52 | return unpacked
53 |
54 |
55 | def crop_geometry(main: list[Geometry] | Geometry, tool: list[Geometry]) -> list[Geometry] | Geometry:
56 | # TODO
57 | pass
58 |
59 |
60 | def crop_linestrings(linestrings: list[LineString], polygon: Polygon) -> list[LineString]:
61 | """
62 | Intersect all linestrings with the given polygon. Return only non-empty LineStrings.
63 | """
64 | linestrings_cropped = []
65 | for ls in linestrings:
66 | cropped = shapely.intersection(ls, polygon)
67 |
68 | if type(cropped) is shapely.Point:
69 | pass
70 | elif type(cropped) is MultiLineString:
71 | g = unpack_multilinestring(cropped)
72 | linestrings_cropped += g
73 | else:
74 | linestrings_cropped.append(cropped)
75 |
76 | return list(itertools.filterfalse(shapely.is_empty, linestrings_cropped))
77 |
78 |
79 | def _linestring_to_coordinate_pairs(
80 | linestring: LineString,
81 | ) -> list[list[tuple[float, float]]]:
82 | pairs = []
83 |
84 | for i in range(len(linestring.coords) - 1):
85 | pairs.append([linestring.coords[i], linestring.coords[i + 1]])
86 |
87 | return pairs
88 |
89 |
90 | def unpack_multipolygon(g: Geometry | list[Geometry] | np.ndarray) -> list[Polygon]:
91 | return _unpack_multigeometry(g, Polygon)
92 |
93 |
94 | def unpack_multilinestring(
95 | g: Geometry | list[Geometry] | np.ndarray,
96 | ) -> list[LineString]:
97 | return _unpack_multigeometry(g, LineString)
98 |
99 |
100 | def calculate_geodesic_area(p: Polygon) -> float:
101 | poly_area, _ = Geod(ellps="WGS84").geometry_area_perimeter(p)
102 | return poly_area
103 |
104 |
105 | def process_polygons(
106 | polygons: list[Polygon],
107 | simplify_precision: float | None = None,
108 | check_valid: bool = False,
109 | unpack: bool = False,
110 | check_empty: bool = False,
111 | min_area_wgs84: float | None = None,
112 | min_area_mm2: float | None = None,
113 | ) -> np.array:
114 | stat: dict[str, int] = {
115 | "input": 0,
116 | "output": 0,
117 | }
118 |
119 | if len(polygons) == 0:
120 | return np.array([], dtype=Polygon)
121 |
122 | polys = np.array(polygons, dtype=Polygon)
123 |
124 | stat["input"] = polys.shape[0]
125 |
126 | if simplify_precision is not None:
127 | polys = shapely.simplify(polys, simplify_precision)
128 |
129 | if check_valid:
130 | stat["invalid"] = 0
131 | stat["invalid"] = np.count_nonzero(~shapely.is_valid(polys))
132 | polys = shapely.make_valid(polys)
133 |
134 | if unpack:
135 | polys = np.array(unpack_multipolygon(polys))
136 |
137 | if check_empty:
138 | stat["empty"] = 0
139 | mask_empty = shapely.is_empty(polys)
140 | stat["empty"] += np.count_nonzero(mask_empty)
141 | polys = polys[~mask_empty]
142 |
143 | if min_area_wgs84 is not None:
144 | stat["small"] = 0
145 | mask_small = np.vectorize(lambda p: calculate_geodesic_area(p) < min_area_wgs84)(polys)
146 | stat["small"] += np.count_nonzero(mask_small)
147 | polys = polys[~mask_small]
148 |
149 | if min_area_mm2 is not None:
150 | stat["small"] = 0
151 | mask_small = shapely.area(polys) < min_area_mm2
152 | stat["small"] += np.count_nonzero(mask_small)
153 | polys = polys[~mask_small]
154 |
155 | stat["output"] = polys.shape[0]
156 |
157 | logger.debug("Filtering:")
158 | for k, v in stat.items():
159 | logger.debug(f"{k:10} : {v:10}")
160 |
161 | return polys
162 |
163 |
164 | def add_to_exclusion_zones(
165 | drawing_geometries: list[Geometry],
166 | exclusion_zones: list[Polygon],
167 | exclude_buffer: float,
168 | simplification_tolerance: float = 0.5,
169 | ) -> list[Polygon]:
170 | # Note for .buffer(): reducing the quad segments from 8 (default) to 4 gives a speedup of ~40%
171 |
172 | new_zones = shapely.simplify(drawing_geometries, simplification_tolerance)
173 | new_zones = [shapely.buffer(g, exclude_buffer, quad_segs=4) for g in new_zones]
174 | return new_zones + exclusion_zones
175 |
--------------------------------------------------------------------------------
/lineworld/util/labelplacement.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import datetime
3 | import math
4 | import random
5 | from dataclasses import dataclass
6 | from itertools import chain
7 | from pathlib import Path
8 | from typing import Any
9 |
10 | import numpy as np
11 | import shapely
12 | from shapely.ops import transform
13 | from shapely import Point, LineString, MultiLineString, STRtree, Polygon
14 | from shapely.affinity import affine_transform, translate
15 |
16 | import lineworld
17 | from lineworld.core import map
18 | from lineworld.core.map import Projection, DocumentInfo
19 | from lineworld.core.svgwriter import SvgWriter
20 | from lineworld.util.hersheyfont import HersheyFont, Align
21 |
22 | from loguru import logger
23 |
24 | DEFAULT_MAX_ITERATIONS = 10_000
25 |
26 | DEFAULT_FONT_SIZE = 5
27 | DEFAULT_OFFSET_FROM_CENTER = 5
28 | DEFAULT_CIRCLE_RADIUS = 1.5
29 | DEFAULT_BOX_SAFETY_MARGIN = 1.0
30 |
31 | DEFAULT_FILTER_MIN_POPULATION = 1_000_000
32 |
33 | positions = {
34 | "top-right": {"pos": 315, "align": Align.LEFT, "error": 0},
35 | "top-left": {"pos": 225, "align": Align.RIGHT, "error": 1},
36 | "bottom-right": {"pos": 45, "align": Align.LEFT, "error": 2},
37 | "bottom-left": {"pos": 135, "align": Align.RIGHT, "error": 3},
38 | "center-right": {"pos": 0, "align": Align.LEFT, "error": 4},
39 | "center-top": {"pos": 270, "align": Align.CENTER, "error": 5},
40 | "center-left": {"pos": 180, "align": Align.RIGHT, "error": 6},
41 | "center-bottom": {"pos": 90, "align": Align.CENTER, "error": 7},
42 | }
43 |
44 |
45 | @dataclass
46 | class City:
47 | pos: Point
48 | label: str
49 | population: int
50 | priority: float
51 | error: float | None
52 | circle: Polygon
53 | boxes: list[Polygon]
54 | text: list[MultiLineString]
55 | region: int | None
56 | placement: int | None # position index of the best label placement
57 | debug_paths: list[LineString]
58 | debug_boxes: list[Polygon]
59 |
60 |
61 | def _anneal(cities: list[City], region: list[int], config: dict[str, Any]):
62 | # TODO: we're not exactly doing simulated annealing here because only better states are accepted,
63 | # not marginally worse ones based on current temperature
64 |
65 | state = np.zeros([len(region)], dtype=int)
66 | for i in range(state.shape[0]):
67 | state[i] = random.randrange(8)
68 |
69 | error = np.full([len(region)], 1000, dtype=float)
70 | new_error = np.full([len(region)], 0, dtype=float)
71 |
72 | tree_circle = STRtree([cities[ci].circle for i, ci in enumerate(region)])
73 |
74 | position_errors = [positions[key]["error"] for key in positions.keys()]
75 |
76 | for _ in range(config.get("max_iterations", DEFAULT_MAX_ITERATIONS)):
77 | s = np.copy(state)
78 | new_error.fill(0)
79 |
80 | s[random.randrange(s.shape[0])] = random.randrange(8)
81 |
82 | tree_box = STRtree([cities[ci].boxes[s[i]] for i, ci in enumerate(region)])
83 |
84 | for i, ci in enumerate(region):
85 | overlaps = tree_box.query(cities[ci].boxes[s[i]])
86 | new_error[i] += (len(overlaps) - 1) * 100
87 |
88 | overlaps = tree_circle.query(cities[ci].boxes[s[i]])
89 | new_error[i] += len(overlaps) * 100
90 |
91 | new_error[i] += position_errors[s[i]]
92 | new_error[i] += new_error[i] * cities[ci].priority
93 |
94 | if np.sum(new_error) < np.sum(error):
95 | state = s
96 | error = new_error
97 |
98 | if np.sum(error) < 1:
99 | break
100 |
101 | logger.debug(f"region (size {len(region):<3}) error {np.sum(error):6.2f} | {[cities[ci].label for ci in region]}")
102 |
103 | for i, ci in enumerate(region):
104 | cities[ci].error = error[i]
105 | cities[ci].placement = state[i]
106 |
107 |
108 | def read_from_file(filename: Path, document_info: DocumentInfo, config: dict[str, Any]) -> list[City]:
109 | cities = []
110 |
111 | project_func = document_info.get_projection_func(Projection.WGS84)
112 | mat = document_info.get_transformation_matrix()
113 |
114 | font = HersheyFont()
115 | filter_min_population = config.get("filter_min_population", DEFAULT_FILTER_MIN_POPULATION)
116 |
117 | with open(filename) as csvfile:
118 | reader = csv.DictReader(csvfile, delimiter=";")
119 | for row in reader:
120 | population = int(row["population"])
121 | if population < filter_min_population:
122 | continue
123 |
124 | lon = float(row["lon"])
125 | lat = float(row["lat"])
126 | label = row["ascii_name"]
127 | font_size = config.get("font_size", DEFAULT_FONT_SIZE)
128 |
129 | p = Point([lon, lat])
130 |
131 | p = transform(project_func, p)
132 | p = affine_transform(p, mat)
133 |
134 | circle = p.buffer(config.get("circle_radius", DEFAULT_CIRCLE_RADIUS))
135 |
136 | positions_boxes = []
137 | positions_text = []
138 |
139 | debug_paths = []
140 | debug_boxes = []
141 |
142 | for k in positions.keys():
143 | sin = math.sin(math.radians(positions[k]["pos"]))
144 | cos = math.cos(math.radians(positions[k]["pos"]))
145 |
146 | offset = config.get("offset_from_center", DEFAULT_OFFSET_FROM_CENTER)
147 |
148 | xnew = offset * cos - 0 * sin + p.x
149 | ynew = offset * sin + 0 * cos + p.y
150 |
151 | # TODO:
152 | # complex: backproject xnew, ynew to lat lon so we compute the baseline path for the font
153 | # simple: compute the baseline path only once for right and left and move it up and down to the different positions
154 |
155 | path_coords = None
156 | match positions[k]["align"]:
157 | case Align.LEFT:
158 | path_coords = [[lon, lat], [lon + 50, lat]]
159 | case Align.RIGHT:
160 | path_coords = [[lon - 50, lat], [lon, lat]]
161 | case Align.CENTER:
162 | path_coords = [[lon - 25, lat], [lon + 25, lat]]
163 | case _:
164 | raise Exception(f"unexpected enum state align: {positions[k]['align']}")
165 |
166 | path = LineString(path_coords).segmentize(0.1)
167 | path = transform(project_func, path)
168 | path = affine_transform(path, mat)
169 | path = translate(path, xoff=xnew - p.x, yoff=ynew - p.y)
170 | lines = MultiLineString(
171 | font.lines_for_text(label, font_size, align=positions[k]["align"], center_vertical=True, path=path)
172 | )
173 |
174 | positions_text.append(lines)
175 |
176 | box = lines.envelope.buffer(config.get("box_safety_margin", DEFAULT_BOX_SAFETY_MARGIN))
177 | positions_boxes.append(box.envelope)
178 |
179 | debug_paths.append(path)
180 | debug_boxes += [box.exterior]
181 |
182 | cities.append(
183 | City(
184 | pos=p,
185 | label=label,
186 | population=population,
187 | priority=1.0,
188 | error=None,
189 | circle=circle,
190 | boxes=positions_boxes,
191 | text=positions_text,
192 | region=None,
193 | placement=None,
194 | debug_paths=debug_paths,
195 | debug_boxes=debug_boxes
196 | )
197 | )
198 |
199 | return cities
200 |
201 |
202 | def generate_placement(cities: list[City], config: dict[str, Any]) -> list[City]:
203 | min_pop = config.get("filter_min_population", DEFAULT_FILTER_MIN_POPULATION)
204 | max_pop = max([c.population for c in cities])
205 |
206 | for c in cities:
207 | c.priority = (c.population - min_pop) / (max_pop - min_pop)
208 |
209 | # collision check
210 |
211 | cities_cleaned = []
212 | cities = list(reversed(sorted(cities, key=lambda c: c.population)))
213 | tree = STRtree([c.pos.buffer(config.get("circle_radius", DEFAULT_CIRCLE_RADIUS) * 2.5).envelope for c in cities])
214 | for i, c in enumerate(cities):
215 | collisions = tree.query(c.pos)
216 | if min(collisions) < i:
217 | continue
218 | cities_cleaned.append(c)
219 |
220 | logger.info(f"removed during collision checking: {len(cities) - len(cities_cleaned)}")
221 |
222 | cities = cities_cleaned
223 |
224 | # split cities into disjunct sets
225 |
226 | label_polygons = [shapely.ops.unary_union(c.boxes) for c in cities]
227 | tree = STRtree(label_polygons)
228 |
229 | regions = []
230 |
231 | def _rec_propagate(city_index: int, region: int) -> None:
232 | if cities[city_index].region is not None:
233 | return
234 |
235 | cities[city_index].region = region
236 | regions[region].append(city_index)
237 | for overlap_index in tree.query(label_polygons[city_index]):
238 | _rec_propagate(int(overlap_index), region)
239 |
240 | for i in range(len(cities)):
241 | c = cities[i]
242 |
243 | if c.region is not None:
244 | continue
245 |
246 | region_name = len(regions)
247 | regions.append([])
248 |
249 | _rec_propagate(i, region_name)
250 |
251 | # annealing
252 |
253 | timer_start = datetime.datetime.now()
254 | for region in regions:
255 | _anneal(cities, region, config)
256 |
257 | logger.info(f"anneal total time: {(datetime.datetime.now() - timer_start).total_seconds():5.2f}")
258 |
259 | # remove collisions
260 |
261 | cities_cleaned = []
262 | for c in cities:
263 | tree_box = STRtree([cc.boxes[cc.placement] for cc in cities_cleaned])
264 | tree_circle = STRtree([cc.circle for cc in cities_cleaned])
265 |
266 | box = c.boxes[c.placement]
267 | circle = c.circle
268 | overlap = tree_box.query(box).tolist() + tree_box.query(circle).tolist() + tree_circle.query(box).tolist()
269 |
270 | if len(overlap) == 0:
271 | cities_cleaned.append(c)
272 | else:
273 | logger.debug(f"drop city: {c.label}")
274 |
275 | cities = cities_cleaned
276 |
277 | ## debug
278 | # for c in cities:
279 | # if c["error"] is None:
280 | # continue
281 | # print(f"{c["label"]} {c["error"]:>10.2f}")
282 |
283 | return cities
284 |
285 |
286 | if __name__ == "__main__":
287 | INPUT_FILE = Path("data/cities/cities.csv")
288 | OUTPUT_PATH = Path("experiments/labelplacement/output")
289 |
290 | config = lineworld.get_config()
291 | document_info = map.DocumentInfo(config)
292 |
293 | config["max_iterations"] = 1000
294 |
295 | cities = read_from_file(INPUT_FILE, document_info, config)
296 | cities = generate_placement(cities, config)
297 |
298 | svg = SvgWriter(Path(OUTPUT_PATH, "labelplacement.svg"), document_info.get_document_size())
299 | options = {"fill": "none", "stroke": "black", "stroke-width": "0.2"}
300 | svg.add(
301 | "circles", [c.pos.buffer(config.get("circle_radius", DEFAULT_CIRCLE_RADIUS)) for c in cities], options=options
302 | )
303 |
304 | placed_labels = []
305 | for i, c in enumerate(cities):
306 | if c.error is None:
307 | continue
308 | placed_labels.append(c.text[c.placement])
309 |
310 | svg.add("labels", placed_labels, options=options)
311 | svg.write()
312 |
313 | # debug
314 |
315 | circles = [c.circle for c in cities]
316 | boxes = [c.boxes[c.placement] for c in cities]
317 | debug_boxes = [c.debug_boxes[c.placement] for c in cities]
318 | debug_paths = list(chain.from_iterable([c.debug_paths for c in cities]))
319 |
320 | svg = SvgWriter(Path(OUTPUT_PATH, "labelplacement_debug.svg"), document_info.get_document_size())
321 | options = {"fill": "none", "stroke": "black", "stroke-width": "0.2"}
322 | svg.add("circles", circles, options=options)
323 | svg.add("boxes", boxes, options=options)
324 | svg.add("debug_paths", debug_paths, options=options)
325 | svg.add("debug_boxes", debug_boxes, options=options)
326 | svg.add("labels", placed_labels, options=options)
327 | svg.write()
328 |
--------------------------------------------------------------------------------
/lineworld/util/rastertools.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def normalize_to_uint8(data):
5 | return (np.iinfo(np.uint8).max * ((data - np.min(data)) / np.ptp(data))).astype(np.uint8)
6 |
--------------------------------------------------------------------------------
/lineworld/util/scales.py:
--------------------------------------------------------------------------------
1 | from lineworld.util import colormaps
2 | from matplotlib.colors import ListedColormap
3 |
4 |
5 | class Colorscale:
6 | def __init__(self, d, colormap_name="viridis"):
7 | self.d = d
8 |
9 | values = getattr(colormaps, f"_{colormap_name}_data")
10 | self.colormap = ListedColormap(values, name=colormap_name)
11 |
12 | def get_color(self, value):
13 | a = value - self.d[0]
14 |
15 | if a <= 0:
16 | return self.colormap(0)[:-1]
17 |
18 | a = a / (self.d[1] - self.d[0])
19 |
20 | if a >= 1:
21 | return self.colormap(1.0)[:-1]
22 |
23 | return self.colormap(a)[:-1]
24 |
--------------------------------------------------------------------------------
/lineworld/util/slope.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import cv2
3 | import numpy as np
4 | from matplotlib import pyplot as plt
5 |
6 |
7 | def _read_data(input_path: Path) -> np.ndarray:
8 | data = cv2.imread(str(input_path), cv2.IMREAD_UNCHANGED)
9 | # data = cv2.resize(img, [30, 30])
10 |
11 | # data = np.flipud(data)
12 | # data = (data * 120/20).astype(np.int8)
13 | # data = np.rot90(data)
14 |
15 | return data
16 |
17 |
18 | def _unlog(x, n: float = 10) -> float:
19 | return ((n + 1) * x) / ((n * x) + 1)
20 |
21 |
22 | def get_slope(
23 | data: np.ndarray, sampling_step: int
24 | ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
25 | """
26 | Computes angle (in rad) and magnitude of the given 2D array of values
27 | """
28 | test_slice = data[::sampling_step, ::sampling_step]
29 | r, c = np.shape(data)
30 | Y, X = np.mgrid[0:r:sampling_step, 0:c:sampling_step]
31 | dY, dX = np.gradient(test_slice) # order! Y X
32 |
33 | angles = np.arctan2(dY, dX)
34 | magnitude = np.hypot(dY, dX)
35 |
36 | if sampling_step > 1:
37 | angles = cv2.resize(angles, data.shape)
38 | magnitude = cv2.resize(magnitude, data.shape)
39 |
40 | return (X, Y, dX, dY, angles, magnitude)
41 |
42 |
43 | # INPUT_FILE = Path("data/hatching_dem.tif")
44 | INPUT_FILE = Path("data/gebco_crop.tif")
45 | # INPUT_FILE = Path("data/slope_test_2.tif")
46 | # INPUT_FILE = Path("data/slope_test_4.tif")
47 |
48 | OUTPUT_PATH = Path("output")
49 |
50 | SAMPLING_STEP = 5
51 |
52 |
53 | if __name__ == "__main__":
54 | data = _read_data(INPUT_FILE)
55 |
56 | print(f"data {INPUT_FILE} min: {np.min(data)} / max: {np.max(data)}")
57 |
58 | X, Y, dX, dY, angles, inclination = get_slope(data, SAMPLING_STEP)
59 |
60 | fig = plt.figure(figsize=[20, 20])
61 | ax = fig.subplots()
62 | ax.imshow(data)
63 | ax.quiver(X, Y, dX, dY, angles="xy", color="r")
64 | plt.savefig(Path(OUTPUT_PATH, "slope_arrow.png"))
65 |
66 | extent = max([abs(np.min(data)), abs(np.max(data))])
67 |
68 | slope_img = (_unlog((np.abs(dX) + np.abs(dY)) / extent, n=10) * 255).astype(np.uint8)
69 | # slope_img = (_unlog(np.abs(dX) / (extent/2), n=10) * 255).astype(np.uint8)
70 | slope_img = cv2.resize(slope_img, [1000, 1000], interpolation=cv2.INTER_NEAREST)
71 | # slope_img[data > 0] = 255
72 | cv2.imwrite(str(Path(OUTPUT_PATH, "slope.png")), slope_img)
73 |
74 | # angles = angles + 1
75 | # angles[lengths == 0] = 0
76 |
77 | angles = angles / np.linalg.norm(angles)
78 | inclination = inclination / np.linalg.norm(inclination)
79 | elevation = data / np.linalg.norm(data)
80 |
81 | comb = np.hstack([angles, inclination, elevation])
82 |
83 | fig = plt.figure(figsize=[20, 20])
84 | ax = fig.subplots()
85 | plt.imshow(comb, interpolation="none")
86 | plt.savefig(Path(OUTPUT_PATH, "slope2.png"))
87 |
88 | # Three Dimensions:
89 | # 1: angle
90 | # 2: steepness / inclination / slope
91 | # 3: depth / elevation
92 |
93 | # slope_img = np.zeros([1000, 1000, 3], dtype=np.uint8)
94 | # slope_img[:, :, 0] = cv2.resize(np.abs(dX) * 255/extent, [1000, 1000], interpolation=cv2.INTER_NEAREST)
95 | # slope_img[:, :, 1] = cv2.resize(np.abs(dY) * 255/extent, [1000, 1000], interpolation=cv2.INTER_NEAREST)
96 | # cv2.imwrite(str(OUTPUT_PNG), slope_img)
97 |
98 | # slope_img = np.zeros([1000, 1000], dtype=np.uint8)
99 | # slope_img[:, :] = 127
100 | # slope_img[:, :] += cv2.resize((dX+dY) * 255/extent/2, [1000, 1000], interpolation=cv2.INTER_NEAREST).astype(np.uint8)
101 | # cv2.imwrite(str(OUTPUT_PNG), slope_img)
102 |
103 | # from matplotlib import cm
104 | # from matplotlib.colors import LightSource
105 | #
106 | # fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))
107 | # ax.set_zlim(-20000, 20000)
108 | # # ax.set_zlim(-0, 50)
109 | #
110 | # Z = data[::SAMPLING_STEP, ::SAMPLING_STEP]
111 | #
112 | # slope_data = np.abs(dX) + np.abs(dY)
113 | #
114 | # thres = np.max(slope_data)*0.5
115 | # slope_data[slope_data > thres] = thres
116 | #
117 | # ls = LightSource(270, 45)
118 | #
119 | # rgb = ls.shade(slope_data, cmap=cm.cool, vert_exag=0.1, blend_mode='soft')
120 | # # rgb = ls.shade(Z, cmap=cm.bwr, vert_exag=0.1, blend_mode='soft')
121 | #
122 | # surf = ax.plot_surface(X, Y, Z, rcount=100, ccount=100, linewidth=1, facecolors=rgb, antialiased=False) #, cmap=cm.coolwarm)
123 | #
124 | # plt.show()
125 |
126 | # img = cv2.resize(img, [30, 30])
127 | #
128 | # fig = plt.figure(figsize=[10, 10])
129 | # ax = fig.subplots()
130 | #
131 | # ax.imshow(img)
132 | #
133 | # xr = np.arange(0, img.shape[1], 1)
134 | # yr = np.arange(0, img.shape[0], 1)
135 | # # xx, yy = np.meshgrid(xr, yr)
136 | # dy, dx = np.gradient(img, 1)
137 | # ax.quiver(dx, dy, angles="xy") #, headwidth = 5)
138 | #
139 | # plt.savefig("plot.png")
140 |
--------------------------------------------------------------------------------
/media/header.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/volzotan/plotmap/525a0f44c316ee254f2b2bdb40477ffbdc2ddf06/media/header.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "lineworld"
3 | version = "0.1.0"
4 | description = "vector world map generator for pen plotters from OSM/shapefile/geotiff data"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "dask>=2025.1.0",
9 | "distributed>=2025.1.0",
10 | "fiona>=1.10.1",
11 | "fire>=0.7.0",
12 | "geoalchemy2>=0.16.0",
13 | "loguru>=0.7.3",
14 | "lxml>=5.3.0",
15 | "matplotlib>=3.10.0",
16 | "netcdf4>=1.7.2",
17 | "numpy>=2.2.1",
18 | "opencv-python>=4.10.0.84",
19 | "psycopg>=3.2.3",
20 | "pyproj>=3.7.0",
21 | "pytest>=8.3.4",
22 | "rasterio>=1.4.3",
23 | "requests>=2.32.3",
24 | "ruff>=0.6.9",
25 | "shapely>=2.0.6",
26 | "shapelysmooth>=0.2.1",
27 | "svgpathtools>=1.6.1",
28 | "tifffile>=2024.12.12",
29 | "toml>=0.10.2",
30 | ]
31 |
32 | [tool.ruff]
33 | line-length = 120
34 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 |
4 | import pytest
5 |
6 |
7 | @pytest.fixture
8 | def output_path() -> Path:
9 | output_dir = Path("tests_output")
10 | if not output_dir.exists():
11 | os.makedirs(output_dir)
12 | yield output_dir
13 |
--------------------------------------------------------------------------------
/tests/test_flowlines.py:
--------------------------------------------------------------------------------
1 | import math
2 | from pathlib import Path
3 |
4 | import cv2
5 | import numpy as np
6 | import pytest
7 | import rasterio
8 | from scipy import ndimage
9 | from shapely import Point
10 |
11 | from lineworld.core.flowlines import FlowlineHatcherConfig, FlowlineTiler, FlowlineTilerPoly, Mapping
12 | from lineworld.core.svgwriter import SvgWriter
13 | from lineworld.util.export import convert_svg_to_png
14 | from lineworld.util.rastertools import normalize_to_uint8
15 | from lineworld.util.slope import get_slope
16 |
17 |
18 | @pytest.fixture
19 | def resize_size() -> tuple[float | int]:
20 | return (500, 500)
21 |
22 |
23 | @pytest.fixture
24 | def elevation(resize_size: tuple[float | int]) -> np.ndarray:
25 | ELEVATION_FILE = Path("experiments/hatching/data/gebco_crop.tif")
26 |
27 | data = None
28 | with rasterio.open(str(ELEVATION_FILE)) as dataset:
29 | data = dataset.read()[0]
30 |
31 | data = cv2.resize(data, resize_size)
32 |
33 | return data
34 |
35 |
36 | @pytest.fixture
37 | def flow_config() -> FlowlineHatcherConfig:
38 | config = FlowlineHatcherConfig()
39 | return config
40 |
41 |
42 | @pytest.fixture
43 | def mapping(elevation: np.ndarray, output_path: Path, flow_config: FlowlineHatcherConfig) -> dict[Mapping, np.ndarray]:
44 | elevation[elevation > 0] = 0 # bathymetry data only
45 |
46 | _, _, _, _, angles, inclination = get_slope(elevation, 1)
47 |
48 | # uint8 image must be centered around 128 to deal with negative values
49 | mapping_angle = ((angles + math.pi) / math.tau * 255.0).astype(np.uint8)
50 |
51 | mapping_flat = np.zeros_like(inclination, dtype=np.uint8)
52 | mapping_flat[inclination < flow_config.MIN_INCLINATION] = 255 # uint8
53 |
54 | mapping_distance = normalize_to_uint8(elevation) # uint8
55 |
56 | mapping_max_length = np.full_like(angles, int(255 / 2))
57 |
58 | mapping_angle = cv2.blur(mapping_angle, (10, 10))
59 | mapping_distance = cv2.blur(mapping_distance, (10, 10))
60 | mapping_max_length = cv2.blur(mapping_max_length, (10, 10))
61 |
62 | cv2.imwrite(str(Path(output_path, "mapping_angle.png")), normalize_to_uint8(mapping_angle / math.tau))
63 | cv2.imwrite(str(Path(output_path, "mapping_flat.png")), mapping_flat)
64 | cv2.imwrite(str(Path(output_path, "mapping_distance.png")), mapping_distance)
65 | cv2.imwrite(str(Path(output_path, "mapping_max_segments.png")), mapping_max_length)
66 |
67 | return {
68 | Mapping.DISTANCE: mapping_distance,
69 | Mapping.ANGLE: mapping_angle,
70 | Mapping.MAX_LENGTH: mapping_max_length,
71 | Mapping.FLAT: mapping_flat,
72 | }
73 |
74 |
75 | def test_flowlines_tiler_square(
76 | mapping: dict[str, np.ndarray],
77 | output_path: Path,
78 | resize_size: tuple[float | int],
79 | flow_config: FlowlineHatcherConfig,
80 | ):
81 | flow_config.COLLISION_APPROXIMATE = True
82 |
83 | tiler = FlowlineTiler(mapping, flow_config, (2, 2))
84 | linestrings = tiler.hatch()
85 |
86 | svg_path = Path(output_path, "test_flowlines_tiler_square.svg")
87 | svg = SvgWriter(svg_path, resize_size)
88 | options = {"fill": "none", "stroke": "black", "stroke-width": "1"}
89 | svg.add("flowlines", linestrings, options=options)
90 | svg.write()
91 |
92 | convert_svg_to_png(svg, svg.dimensions[0] * 10)
93 |
94 |
95 | def test_flowlines_tiler_poly(
96 | mapping: dict[str, np.ndarray],
97 | output_path: Path,
98 | resize_size: tuple[float | int],
99 | flow_config: FlowlineHatcherConfig,
100 | ):
101 | flow_config.COLLISION_APPROXIMATE = True
102 |
103 | tiler = FlowlineTilerPoly(
104 | mapping, flow_config, [Point([resize_size[0] // 2, resize_size[0] // 2]).buffer(min(resize_size) * 0.49)]
105 | )
106 | linestrings = tiler.hatch()
107 |
108 | svg_path = Path(output_path, "test_flowlines_tiler_poly.svg")
109 | svg = SvgWriter(svg_path, resize_size)
110 | options = {"fill": "none", "stroke": "black", "stroke-width": "1"}
111 | svg.add("flowlines", linestrings, options=options)
112 | svg.write()
113 |
114 | convert_svg_to_png(svg, svg.dimensions[0] * 10)
115 |
--------------------------------------------------------------------------------
/tests/test_gebco_grid_to_polygon.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import cv2
4 | import numpy as np
5 | import pytest
6 | from shapely.geometry import Polygon
7 |
8 | from lineworld.util import gebco_grid_to_polygon
9 |
10 |
11 | @pytest.fixture
12 | def single_poly(output_path: Path) -> np.ndarray:
13 | band = np.zeros((1000, 2000, 1), np.uint8)
14 | cv2.rectangle(band, (100, 100), (1900, 900), (100), -1)
15 | cv2.rectangle(band, (200, 200), (1800, 800), (200), -1)
16 | cv2.imwrite(str(Path(output_path, Path("test_single_poly.png"))), band)
17 | return band
18 |
19 |
20 | @pytest.mark.parametrize("allow_overlap", [True, False])
21 | def test_convert_single_poly(single_poly: np.ndarray, allow_overlap: bool) -> None:
22 | results = gebco_grid_to_polygon._extract_polygons(single_poly, 50, 150, allow_overlap)
23 |
24 | assert len(results) == 1
25 | assert type(results[0]) is Polygon
26 |
27 | if allow_overlap:
28 | assert len(results[0].interiors) == 0 # no hole
29 | else:
30 | assert len(results[0].interiors) == 1 # has hole
31 |
32 |
33 | @pytest.fixture
34 | def poly_with_hole(output_path: Path) -> np.ndarray:
35 | band = np.zeros((1000, 2000, 1), np.uint8)
36 | cv2.rectangle(band, (100, 100), (1900, 900), (100), -1)
37 | cv2.rectangle(band, (200, 200), (1800, 800), (200), -1)
38 | cv2.rectangle(band, (300, 300), (1700, 700), (0), -1)
39 | cv2.imwrite(str(Path(output_path, Path("test_poly_with_hole.png"))), band)
40 | return band
41 |
42 |
43 | @pytest.mark.parametrize("allow_overlap", [True, False])
44 | def test_convert_poly_with_hole(poly_with_hole: np.ndarray, allow_overlap: bool) -> None:
45 | results = gebco_grid_to_polygon._extract_polygons(poly_with_hole, 50, 100, allow_overlap)
46 |
47 | assert len(results) == 1
48 | assert type(results[0]) is Polygon
49 |
50 | holes = results[0].interiors
51 | assert len(holes) == 1
52 |
53 |
54 | @pytest.fixture
55 | def poly_with_hole_and_island(output_path: Path) -> np.ndarray:
56 | band = np.zeros((1000, 2000, 1), np.uint8)
57 | cv2.rectangle(band, (100, 100), (1900, 900), (100), -1)
58 | cv2.rectangle(band, (200, 200), (1800, 800), (200), -1)
59 | cv2.rectangle(band, (300, 300), (1700, 700), (0), -1)
60 | cv2.rectangle(band, (400, 400), (1600, 600), (250), -1)
61 | cv2.imwrite(str(Path(output_path, Path("test_poly_with_hole_and_island.png"))), band)
62 | return band
63 |
64 |
65 | @pytest.mark.parametrize("allow_overlap", [True, False])
66 | def test_convert_poly_with_hole_and_island(poly_with_hole_and_island: np.ndarray, allow_overlap: bool) -> None:
67 | results = gebco_grid_to_polygon._extract_polygons(poly_with_hole_and_island, 50, 255, allow_overlap)
68 |
69 | assert len(results) == 2
70 | for geom in results:
71 | assert type(geom) is Polygon
72 |
73 | if len(results[0].interiors) == 0:
74 | results = list(reversed(results))
75 |
76 | assert len(results[0].interiors) == 1
77 | assert len(results[1].interiors) == 0
78 |
79 |
80 | @pytest.fixture
81 | def poly_with_multiple_holes(output_path: Path) -> np.ndarray:
82 | band = np.zeros((1000, 2000, 1), np.uint8)
83 | cv2.rectangle(band, (100, 100), (1900, 900), (100), -1)
84 | cv2.rectangle(band, (200, 200), (300, 300), (0), -1)
85 | cv2.rectangle(band, (200, 400), (300, 500), (0), -1)
86 | cv2.rectangle(band, (200, 600), (300, 700), (0), -1)
87 | cv2.imwrite(str(Path(output_path, Path("test_poly_with_multiple_holes.png"))), band)
88 | return band
89 |
90 |
91 | @pytest.mark.parametrize("allow_overlap", [True, False])
92 | def test_convert_poly_with_multiple_holes(poly_with_multiple_holes: np.ndarray, allow_overlap: bool) -> None:
93 | results = gebco_grid_to_polygon._extract_polygons(poly_with_multiple_holes, 50, 255, allow_overlap)
94 |
95 | assert len(results) == 1
96 | for geom in results:
97 | assert type(geom) is Polygon
98 |
99 | assert len(results[0].interiors) == 3
100 |
101 |
102 | @pytest.fixture
103 | def poly_with_multiple_layers(output_path: Path) -> np.ndarray:
104 | band = np.zeros((1000, 2000, 1), np.uint8)
105 | cv2.rectangle(band, (100, 100), (900, 900), (100), -1)
106 | cv2.rectangle(band, (200, 200), (800, 800), (150), -1)
107 | cv2.rectangle(band, (300, 300), (700, 700), (200), -1)
108 | cv2.rectangle(band, (400, 400), (600, 600), (250), -1)
109 | cv2.imwrite(str(Path(output_path, Path("poly_with_multiple_layers.png"))), band)
110 | return band
111 |
112 |
113 | def approx_90(actual_value: float, expected_value: float) -> bool:
114 | return abs((actual_value / expected_value)) - 1 < 0.1
115 |
116 |
117 | @pytest.mark.parametrize("allow_overlap", [True, False])
118 | def test_convert_poly_with_multiple_layers(poly_with_multiple_layers: np.ndarray, allow_overlap: bool) -> None:
119 | mask = np.zeros_like(poly_with_multiple_layers, dtype=np.uint8)
120 |
121 | results0 = gebco_grid_to_polygon._extract_polygons(poly_with_multiple_layers, 50, 125, allow_overlap)
122 | results1 = gebco_grid_to_polygon._extract_polygons(poly_with_multiple_layers, 125, 175, allow_overlap, mask=mask)
123 | results2 = gebco_grid_to_polygon._extract_polygons(poly_with_multiple_layers, 175, 225, allow_overlap, mask=mask)
124 | results3 = gebco_grid_to_polygon._extract_polygons(poly_with_multiple_layers, 225, 255, allow_overlap, mask=mask)
125 |
126 | if allow_overlap:
127 | assert approx_90(results0[0].area, 800**2)
128 | assert approx_90(results1[0].area, 600**2)
129 | assert approx_90(results2[0].area, 400**2)
130 | assert approx_90(results3[0].area, 200**2)
131 | else:
132 | assert approx_90(results0[0].area, 800**2 - 600**2)
133 | assert approx_90(results1[0].area, 600**2 - 400**2)
134 | assert approx_90(results2[0].area, 400**2 - 200**2)
135 | assert approx_90(results3[0].area, 200**2)
136 |
--------------------------------------------------------------------------------
/tests/test_hatching.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import cv2
4 | import numpy as np
5 | import pytest
6 | from shapely import Point, LineString
7 |
8 | from lineworld.core.hatching import create_hatching, HatchingOptions
9 | from lineworld.util.geometrytools import unpack_multilinestring, _linestring_to_coordinate_pairs
10 |
11 | CANVAS_DIMENSIONS = [500, 5000]
12 |
13 |
14 | @pytest.fixture
15 | def canvas() -> np.ndarray:
16 | yield np.full(CANVAS_DIMENSIONS + [3], 255, dtype=np.uint8)
17 |
18 |
19 | def _draw_linestrings(canvas: np.ndarray, linestrings: list[LineString]):
20 | for linestring in linestrings:
21 | for pair in _linestring_to_coordinate_pairs(linestring):
22 | pt1 = [int(c) for c in pair[0]]
23 | pt2 = [int(c) for c in pair[1]]
24 | cv2.line(canvas, pt1, pt2, (0, 0, 0), 2)
25 |
26 |
27 | def test_hatching(canvas: np.ndarray, output_path: Path):
28 | for i, angle in enumerate([0, 22.5, 45, 90, 135, 180, 270, 360, 360 + 45]):
29 | g = Point([(i + 1) * CANVAS_DIMENSIONS[0], CANVAS_DIMENSIONS[0] // 2]).buffer(CANVAS_DIMENSIONS[0] * 0.4)
30 |
31 | hatching_options = HatchingOptions()
32 | hatching_options.angle = angle
33 | hatching_options.distance = 10
34 |
35 | hatch_lines = create_hatching(g, None, hatching_options)
36 |
37 | _draw_linestrings(canvas, unpack_multilinestring(hatch_lines))
38 |
39 | cv2.imwrite(str(Path(output_path, Path("test_hatching.png"))), canvas)
40 |
41 |
42 | @pytest.mark.parametrize("distance", [0, 1, 2, 4, 8, 16, 32, 32.5, -1])
43 | def test_hatching_distance(distance: float):
44 | g = Point([100, 100]).buffer(100)
45 |
46 | hatching_options = HatchingOptions()
47 | hatching_options.angle = 45
48 | hatching_options.distance = distance
49 |
50 | hatch_lines = create_hatching(g, None, hatching_options)
51 |
52 | if distance > 0:
53 | assert len(unpack_multilinestring(hatch_lines)) > 0
54 | else:
55 | assert hatch_lines is None
56 |
57 |
58 | def test_hatching_wiggle(canvas: np.ndarray, output_path: Path):
59 | for i, wiggle in enumerate([0, 1, 2, 3, 4, 5, 6.5, 10, 100]):
60 | g = Point([(i + 1) * CANVAS_DIMENSIONS[0], CANVAS_DIMENSIONS[0] // 2]).buffer(CANVAS_DIMENSIONS[0] * 0.4)
61 |
62 | hatching_options = HatchingOptions()
63 | hatching_options.wiggle = wiggle
64 | hatching_options.distance = 10
65 |
66 | hatch_lines = create_hatching(g, None, hatching_options)
67 |
68 | _draw_linestrings(canvas, unpack_multilinestring(hatch_lines))
69 |
70 | cv2.imwrite(str(Path(output_path, Path("test_hatching_wiggle.png"))), canvas)
71 |
--------------------------------------------------------------------------------
/tests/test_hersheyfont.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import cv2
4 | import numpy as np
5 | import pytest
6 | import shapely
7 | from shapely import LineString, Point
8 |
9 | from lineworld.util.hersheyfont import HersheyFont, Align, _linestring_to_coordinate_pairs
10 |
11 | TEXT = "The quick brown fox jumps over the lazy dog"
12 | FONT_SIZE = 54
13 | CANVAS_DIMENSIONS = [1200, 1200]
14 |
15 |
16 | @pytest.fixture
17 | def font() -> HersheyFont:
18 | yield HersheyFont(font_file=Path(".", Path(HersheyFont.DEFAULT_FONT)))
19 |
20 |
21 | @pytest.fixture
22 | def canvas() -> np.ndarray:
23 | yield np.full(CANVAS_DIMENSIONS + [3], 255, dtype=np.uint8)
24 |
25 |
26 | def _draw_linestrings(canvas: np.ndarray, linestrings: list[LineString]):
27 | for linestring in linestrings:
28 | for pair in _linestring_to_coordinate_pairs(linestring):
29 | pt1 = [int(c) for c in pair[0]]
30 | pt2 = [int(c) for c in pair[1]]
31 | cv2.line(canvas, pt1, pt2, (0, 0, 0), 2)
32 |
33 |
34 | def test_text_straight(font: HersheyFont, canvas: np.ndarray, output_path: Path):
35 | for i, alignment in enumerate([Align.LEFT, Align.CENTER, Align.RIGHT]):
36 | offset_y = int(100 + i * 100)
37 | path = LineString([[100, offset_y], [CANVAS_DIMENSIONS[1] - 100, offset_y]]).segmentize(0.1)
38 | linestrings = font.lines_for_text(TEXT, FONT_SIZE, path=path, align=alignment)
39 | _draw_linestrings(canvas, linestrings)
40 |
41 | cv2.imwrite(str(Path(output_path, Path("test_text_straight.png"))), canvas)
42 |
43 |
44 | def test_text_fontsize(font: HersheyFont, canvas: np.ndarray, output_path: Path):
45 | offset_y = 0
46 | for font_size in [1, 10, 20, 25.5, 50, 100]:
47 | offset_y += int(100 + font_size)
48 | path = LineString([[100, offset_y], [CANVAS_DIMENSIONS[1] - 100, offset_y]]).segmentize(0.1)
49 | linestrings = font.lines_for_text(TEXT, font_size, path=path, align=Align.LEFT, reverse_path=False)
50 | _draw_linestrings(canvas, linestrings)
51 |
52 | cv2.imwrite(str(Path(output_path, Path("test_text_fontsize.png"))), canvas)
53 |
54 |
55 | def test_text_curved(font: HersheyFont, canvas: np.ndarray, output_path: Path):
56 | path = shapely.intersection(
57 | LineString(
58 | list(
59 | Point([CANVAS_DIMENSIONS[0] / 2], [CANVAS_DIMENSIONS[1] * 0.7])
60 | .buffer(CANVAS_DIMENSIONS[0] * 0.6)
61 | .exterior.coords
62 | )
63 | ),
64 | shapely.box(100, 100, CANVAS_DIMENSIONS[0] - 100, CANVAS_DIMENSIONS[1] - 100),
65 | )
66 | path = path.segmentize(1)
67 |
68 | for i, alignment in enumerate([Align.LEFT, Align.CENTER, Align.RIGHT]):
69 | path = shapely.affinity.translate(path, yoff=200)
70 | linestrings = font.lines_for_text(TEXT, FONT_SIZE, path=path, align=alignment, reverse_path=True)
71 | _draw_linestrings(canvas, linestrings)
72 |
73 | cv2.imwrite(str(Path(output_path, Path("test_text_curved.png"))), canvas)
74 |
--------------------------------------------------------------------------------
/tests/test_lineworld.py:
--------------------------------------------------------------------------------
1 | import lineworld
2 |
3 |
4 | def test_recursive_dict_merge():
5 | """
6 | Test if the recursive dict merging of toml dicts works as expected
7 | """
8 | c1 = {"i": 10, "j": 11}
9 | c2 = {"i": 12, "k": 13}
10 |
11 | a = {"a": 0, "b": 1, "c": c1, "d": 5}
12 | b = {"a": -1, "c": c2, "e": 6}
13 |
14 | gt = {"a": -1, "b": 1, "c": {"i": 12, "j": 11, "k": 13}, "d": 5, "e": 6}
15 | merged = lineworld._recursive_dict_merge(a, b)
16 | assert gt == merged
17 |
18 |
19 | def test_apply_config_to_object():
20 | """
21 | Test if the recursive dict merging of dicts with dataclass config objects
22 | (i.e.the FlowlineHatcherConfig) works as expected
23 | """
24 |
25 | raise NotImplementedError()
26 |
--------------------------------------------------------------------------------