├── .gitignore ├── .cursor └── mcp.json ├── .mcp.json ├── internal ├── conf │ ├── appconfig.go │ ├── config.go │ └── config_test.go ├── data │ ├── testing_utils.go │ ├── tiles_test.go │ ├── catalog.go │ ├── catalog_db_fun.go │ ├── db_sql.go │ ├── catalog_mock.go │ ├── catalog_test.go │ └── tiles.go ├── service │ ├── viewer.go │ ├── layers.go │ ├── cache_auth.go │ ├── cache_admin.go │ ├── health.go │ ├── cache_middleware.go │ ├── handler.go │ ├── tile.go │ ├── handler_test.go │ ├── service.go │ └── util.go ├── ui │ └── ui.go └── cache │ └── lru.go ├── .editorconfig ├── Dockerfile ├── go.mod ├── duckdb-tileserver.go ├── config └── duckdb-tileserver.toml.example ├── testing ├── sample_data.sql └── test_api_endpoints.sh ├── Makefile ├── FEATURES.md ├── .github └── workflows │ └── ci.yml ├── LICENSE.md ├── go.sum └── assets └── index.gohtml /.gitignore: -------------------------------------------------------------------------------- 1 | duckdb-tileserver 2 | duckdb-tileserver.exe 3 | launch.json 4 | docs/ 5 | public/ 6 | config/duckdb-tileserver.toml 7 | *.duckdb 8 | .playwright-mcp/ 9 | -------------------------------------------------------------------------------- /.cursor/mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "duckdb-go": { 4 | "url": "https://gitmcp.io/duckdb/duckdb-go" 5 | }, 6 | "duckdb-spatial": { 7 | "url": "https://gitmcp.io/duckdb/duckdb-spatial" 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /.mcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "duckdb-go Docs": { 4 | "url": "https://gitmcp.io/duckdb/duckdb-go" 5 | }, 6 | "duckdb-spatial Docs": { 7 | "url": "https://gitmcp.io/duckdb/duckdb-spatial" 8 | }, 9 | "playwright": { 10 | "command": "npx", 11 | "args": [ 12 | "@playwright/mcp@latest" 13 | ] 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /internal/conf/appconfig.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | var setVersion string = "0.1.4" 4 | 5 | // AppConfiguration is the set of global application configuration constants. 6 | type AppConfiguration struct { 7 | // AppName name of the software 8 | Name string 9 | // AppVersion version number of the software 10 | Version string 11 | EnvPrefix string 12 | } 13 | 14 | var AppConfig = AppConfiguration{ 15 | Name: "duckdb-tileserver", 16 | Version: setVersion, 17 | EnvPrefix: "DUCKDBTS", 18 | } 19 | -------------------------------------------------------------------------------- /internal/data/testing_utils.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "reflect" 7 | "runtime" 8 | "testing" 9 | ) 10 | 11 | // testEquals fails the test if exp is not equal to act. 12 | func testEquals(tb testing.TB, exp, act interface{}, msg string) { 13 | if !reflect.DeepEqual(exp, act) { 14 | _, file, line, _ := runtime.Caller(1) 15 | fmt.Printf("%s:%d: %s - expected: %#v; got: %#v\n", filepath.Base(file), line, msg, exp, act) 16 | tb.FailNow() 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # these are the defaults 7 | [*] 8 | charset = utf-8 9 | end_of_line = lf 10 | trim_trailing_whitespace = true 11 | insert_final_newline = true 12 | indent_style = space 13 | indent_size = 4 14 | 15 | # C files want tab indentation 16 | [*.go] 17 | indent_style = tab 18 | 19 | [Makefile] 20 | indent_style = tab 21 | 22 | # HTML files want narrow space 23 | [*.html] 24 | indent_style = space 25 | indent_size = 2 26 | 27 | # YAML files want narrow space 28 | [*.yml] 29 | indent_style = space 30 | indent_size = 2 31 | 32 | # SQL files want tab indentation 33 | [*.sql] 34 | indent_style = space 35 | indent_size = 2 36 | 37 | # .bat files want CRLF 38 | [*.bat] 39 | end_of_line = crlf 40 | -------------------------------------------------------------------------------- /internal/service/viewer.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "net/http" 5 | 6 | log "github.com/sirupsen/logrus" 7 | "github.com/tobilg/duckdb-tileserver/internal/ui" 8 | ) 9 | 10 | // serveMapViewer serves the HTML map viewer page 11 | func serveMapViewer(w http.ResponseWriter, r *http.Request) *appError { 12 | log.Debug("Map viewer request") 13 | 14 | // Load the template 15 | templ, err := ui.LoadTemplate("index.gohtml") 16 | if err != nil { 17 | return appErrorInternal(err, "Error loading map viewer template") 18 | } 19 | 20 | // Execute the standalone template directly (it's a complete HTML page) 21 | w.Header().Set("Content-Type", ContentTypeHTML) 22 | w.WriteHeader(http.StatusOK) 23 | err = templ.Execute(w, nil) 24 | if err != nil { 25 | return appErrorInternal(err, "Error rendering map viewer") 26 | } 27 | 28 | return nil 29 | } 30 | -------------------------------------------------------------------------------- /internal/service/layers.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | 7 | log "github.com/sirupsen/logrus" 8 | "github.com/tobilg/duckdb-tileserver/internal/data" 9 | ) 10 | 11 | // LayersResponse represents the JSON response for the /layers endpoint 12 | type LayersResponse struct { 13 | Layers []*data.Layer `json:"layers"` 14 | } 15 | 16 | // handleLayers returns a list of all available spatial layers 17 | func handleLayers(w http.ResponseWriter, r *http.Request) *appError { 18 | log.Debug("Layers request") 19 | 20 | // Get catalog instance 21 | catDB, ok := catalogInstance.(*data.CatalogDB) 22 | if !ok { 23 | return appErrorInternal(nil, "Invalid catalog type") 24 | } 25 | 26 | // Get all layers 27 | layers, err := catDB.GetLayers() 28 | if err != nil { 29 | return appErrorInternal(err, fmt.Sprintf("Error retrieving layers: %v", err)) 30 | } 31 | 32 | response := LayersResponse{ 33 | Layers: layers, 34 | } 35 | 36 | return writeJSON(w, ContentTypeJSON, response) 37 | } 38 | -------------------------------------------------------------------------------- /internal/ui/ui.go: -------------------------------------------------------------------------------- 1 | package ui 2 | 3 | import ( 4 | "bytes" 5 | "html/template" 6 | 7 | "github.com/tobilg/duckdb-tileserver/internal/conf" 8 | ) 9 | 10 | // HTMLDynamicLoad sets whether HTML templates are loaded every time they are used 11 | // this allows rapid prototyping of changes 12 | var HTMLDynamicLoad bool 13 | 14 | // RenderHTML tbd 15 | func RenderHTML(temp *template.Template, content interface{}, context interface{}) ([]byte, error) { 16 | bodyData := map[string]interface{}{ 17 | "config": conf.Configuration, 18 | "context": context, 19 | "data": content} 20 | contentBytes, err := renderTemplate(temp, bodyData) 21 | if err != nil { 22 | return contentBytes, err 23 | } 24 | return contentBytes, err 25 | } 26 | 27 | func renderTemplate(temp *template.Template, data map[string]interface{}) ([]byte, error) { 28 | var buf bytes.Buffer 29 | 30 | if err := temp.ExecuteTemplate(&buf, "page", data); err != nil { 31 | return buf.Bytes(), err 32 | } 33 | return buf.Bytes(), nil 34 | } 35 | 36 | // LoadTemplate loads a simple template file (for standalone HTML pages) 37 | func LoadTemplate(filename string) (*template.Template, error) { 38 | filePath := conf.Configuration.Server.AssetsPath + "/" + filename 39 | tmpl, err := template.ParseFiles(filePath) 40 | if err != nil { 41 | return nil, err 42 | } 43 | return tmpl, nil 44 | } 45 | -------------------------------------------------------------------------------- /internal/service/cache_auth.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "net/http" 5 | 6 | log "github.com/sirupsen/logrus" 7 | "github.com/tobilg/duckdb-tileserver/internal/conf" 8 | ) 9 | 10 | const ( 11 | headerAPIKey = "X-API-Key" 12 | ) 13 | 14 | // cacheAuthMiddleware validates API key for cache endpoints 15 | func cacheAuthMiddleware(next appHandler) appHandler { 16 | return func(w http.ResponseWriter, r *http.Request) *appError { 17 | // Get configured API key 18 | configuredKey := conf.Configuration.Cache.ApiKey 19 | 20 | // If no API key is configured, allow access (public mode) 21 | if configuredKey == "" { 22 | log.Debug("Cache endpoint accessed without authentication (public mode)") 23 | return next(w, r) 24 | } 25 | 26 | // API key is configured, validate the request 27 | providedKey := r.Header.Get(headerAPIKey) 28 | 29 | // Check if key was provided 30 | if providedKey == "" { 31 | log.Warnf("Cache endpoint accessed without API key from %s", r.RemoteAddr) 32 | return appErrorUnauthorized(nil, "API key required. Provide X-API-Key header.") 33 | } 34 | 35 | // Validate the key 36 | if providedKey != configuredKey { 37 | log.Warnf("Cache endpoint accessed with invalid API key from %s", r.RemoteAddr) 38 | return appErrorForbidden(nil, "Invalid API key") 39 | } 40 | 41 | // Authentication successful 42 | log.Debugf("Cache endpoint accessed with valid API key from %s", r.RemoteAddr) 43 | return next(w, r) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /internal/service/cache_admin.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | 7 | "github.com/gorilla/mux" 8 | ) 9 | 10 | // handleCacheStats returns cache statistics as JSON 11 | func (s *Service) handleCacheStats(w http.ResponseWriter, r *http.Request) *appError { 12 | if !s.cache.Enabled() { 13 | return writeJSON(w, "application/json", map[string]string{ 14 | "status": "disabled", 15 | }) 16 | } 17 | 18 | stats := s.cache.Stats() 19 | return writeJSON(w, "application/json", stats) 20 | } 21 | 22 | // handleCacheClear clears the entire cache 23 | func (s *Service) handleCacheClear(w http.ResponseWriter, r *http.Request) *appError { 24 | if !s.cache.Enabled() { 25 | return appErrorBadRequest(nil, "Cache is disabled") 26 | } 27 | 28 | s.cache.Clear() 29 | 30 | return writeJSON(w, "application/json", map[string]string{ 31 | "status": "ok", 32 | "message": "Cache cleared", 33 | }) 34 | } 35 | 36 | // handleCacheClearLayer clears all tiles for a specific layer 37 | func (s *Service) handleCacheClearLayer(w http.ResponseWriter, r *http.Request) *appError { 38 | if !s.cache.Enabled() { 39 | return appErrorBadRequest(nil, "Cache is disabled") 40 | } 41 | 42 | vars := mux.Vars(r) 43 | layer := vars["layer"] 44 | 45 | removed := s.cache.ClearLayer(layer) 46 | 47 | return writeJSON(w, "application/json", map[string]interface{}{ 48 | "status": "ok", 49 | "message": fmt.Sprintf("Cleared %d tiles for layer %s", removed, layer), 50 | "removed": removed, 51 | "layer": layer, 52 | }) 53 | } 54 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GOLANG_VERSION=1.24.6 2 | ARG TARGETARCH=amd64 3 | ARG VERSION=latest 4 | ARG PLATFORM=amd64 5 | 6 | FROM docker.io/library/golang:${GOLANG_VERSION}-bookworm AS builder 7 | LABEL stage=tileserverbuilder 8 | 9 | # Install build dependencies for CGO and C++ (glibc) 10 | RUN apt-get update && apt-get install -y \ 11 | build-essential \ 12 | pkg-config \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | ARG TARGETARCH 16 | ARG VERSION 17 | 18 | WORKDIR /app 19 | COPY . ./ 20 | 21 | # Native build on the target platform (buildx handles emulation) 22 | RUN CGO_ENABLED=1 go build -v -ldflags "-s -w -X github.com/tobilg/duckdb-tileserver/internal/conf.setVersion=${VERSION}" 23 | 24 | FROM --platform=${TARGETARCH} gcr.io/distroless/cc-debian12:nonroot AS multi-stage 25 | 26 | WORKDIR / 27 | COPY --from=builder /app/duckdb-tileserver /duckdb-tileserver 28 | COPY --from=builder /app/assets /assets 29 | 30 | VOLUME ["/config"] 31 | VOLUME ["/assets"] 32 | 33 | EXPOSE 9000 34 | 35 | ENTRYPOINT ["/duckdb-tileserver"] 36 | CMD [] 37 | 38 | # Local stage for running with a locally built binary 39 | FROM --platform=${PLATFORM} gcr.io/distroless/cc-debian12:nonroot AS local 40 | 41 | WORKDIR / 42 | ADD ./duckdb-tileserver /duckdb-tileserver 43 | ADD ./assets /assets 44 | 45 | VOLUME ["/config"] 46 | VOLUME ["/assets"] 47 | 48 | EXPOSE 9000 49 | EXPOSE 9001 50 | 51 | ENTRYPOINT ["/duckdb-tileserver"] 52 | CMD [] 53 | 54 | # To build 55 | # make APPVERSION=1.1 clean build docker 56 | 57 | # To build using binaries from golang docker image 58 | # make APPVERSION=1.1 clean multi-stage-docker 59 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tobilg/duckdb-tileserver 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | github.com/duckdb/duckdb-go/v2 v2.5.4 7 | github.com/gorilla/handlers v1.5.2 8 | github.com/gorilla/mux v1.8.1 9 | github.com/hashicorp/golang-lru/v2 v2.0.7 10 | github.com/pborman/getopt/v2 v2.1.0 11 | github.com/sirupsen/logrus v1.9.3 12 | github.com/spf13/viper v1.21.0 13 | github.com/theckman/httpforwarded v0.4.0 14 | ) 15 | 16 | require ( 17 | github.com/apache/arrow-go/v18 v18.4.1 // indirect 18 | github.com/duckdb/duckdb-go-bindings v0.1.24 // indirect 19 | github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.24 // indirect 20 | github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.24 // indirect 21 | github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.24 // indirect 22 | github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.24 // indirect 23 | github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.24 // indirect 24 | github.com/duckdb/duckdb-go/arrowmapping v0.0.27 // indirect 25 | github.com/duckdb/duckdb-go/mapping v0.0.27 // indirect 26 | github.com/felixge/httpsnoop v1.0.3 // indirect 27 | github.com/fsnotify/fsnotify v1.9.0 // indirect 28 | github.com/go-viper/mapstructure/v2 v2.4.0 // indirect 29 | github.com/goccy/go-json v0.10.5 // indirect 30 | github.com/google/flatbuffers v25.9.23+incompatible // indirect 31 | github.com/google/uuid v1.6.0 // indirect 32 | github.com/klauspost/compress v1.18.2 // indirect 33 | github.com/klauspost/cpuid/v2 v2.3.0 // indirect 34 | github.com/pelletier/go-toml/v2 v2.2.4 // indirect 35 | github.com/pierrec/lz4/v4 v4.1.22 // indirect 36 | github.com/sagikazarmark/locafero v0.11.0 // indirect 37 | github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect 38 | github.com/spf13/afero v1.15.0 // indirect 39 | github.com/spf13/cast v1.10.0 // indirect 40 | github.com/spf13/pflag v1.0.10 // indirect 41 | github.com/subosito/gotenv v1.6.0 // indirect 42 | github.com/zeebo/xxh3 v1.0.2 // indirect 43 | go.yaml.in/yaml/v3 v3.0.4 // indirect 44 | golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 // indirect 45 | golang.org/x/mod v0.31.0 // indirect 46 | golang.org/x/sync v0.19.0 // indirect 47 | golang.org/x/sys v0.39.0 // indirect 48 | golang.org/x/telemetry v0.0.0-20251208220230-2638a1023523 // indirect 49 | golang.org/x/text v0.28.0 // indirect 50 | golang.org/x/tools v0.40.0 // indirect 51 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect 52 | ) 53 | -------------------------------------------------------------------------------- /internal/data/tiles_test.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestLayerStruct(t *testing.T) { 8 | layer := &Layer{ 9 | Name: "buildings", 10 | Table: "buildings", 11 | GeometryColumn: "geom", 12 | GeometryType: "POLYGON", 13 | Srid: 3857, 14 | Properties: []string{"id", "name", "height"}, 15 | } 16 | 17 | if layer.Name != "buildings" { 18 | t.Errorf("Expected layer name 'buildings', got '%s'", layer.Name) 19 | } 20 | 21 | if layer.Srid != 3857 { 22 | t.Errorf("Expected SRID 3857, got %d", layer.Srid) 23 | } 24 | 25 | if len(layer.Properties) != 3 { 26 | t.Errorf("Expected 3 properties, got %d", len(layer.Properties)) 27 | } 28 | } 29 | 30 | func TestTileJSONStruct(t *testing.T) { 31 | tj := &TileJSON{ 32 | TileJSON: "3.0.0", 33 | Name: "test", 34 | Tiles: []string{"http://localhost:9000/tiles/test/{z}/{x}/{y}.mvt"}, 35 | MinZoom: 0, 36 | MaxZoom: 22, 37 | Bounds: []float64{-180, -85, 180, 85}, 38 | VectorLayers: []VectorLayer{ 39 | { 40 | ID: "test", 41 | MinZoom: 0, 42 | MaxZoom: 22, 43 | Fields: map[string]string{"id": "string"}, 44 | }, 45 | }, 46 | } 47 | 48 | if tj.TileJSON != "3.0.0" { 49 | t.Errorf("Expected TileJSON version 3.0.0, got %s", tj.TileJSON) 50 | } 51 | 52 | if len(tj.Tiles) != 1 { 53 | t.Errorf("Expected 1 tile URL, got %d", len(tj.Tiles)) 54 | } 55 | 56 | if len(tj.VectorLayers) != 1 { 57 | t.Errorf("Expected 1 vector layer, got %d", len(tj.VectorLayers)) 58 | } 59 | 60 | if tj.VectorLayers[0].ID != "test" { 61 | t.Errorf("Expected vector layer ID 'test', got '%s'", tj.VectorLayers[0].ID) 62 | } 63 | } 64 | 65 | func TestExtentStruct(t *testing.T) { 66 | extent := &Extent{ 67 | Minx: -180, 68 | Miny: -85, 69 | Maxx: 180, 70 | Maxy: 85, 71 | } 72 | 73 | if extent.Minx != -180 { 74 | t.Errorf("Expected Minx -180, got %f", extent.Minx) 75 | } 76 | 77 | if extent.Maxy != 85 { 78 | t.Errorf("Expected Maxy 85, got %f", extent.Maxy) 79 | } 80 | } 81 | 82 | func TestVectorLayerFields(t *testing.T) { 83 | fields := map[string]string{ 84 | "id": "number", 85 | "name": "string", 86 | "active": "boolean", 87 | } 88 | 89 | vl := VectorLayer{ 90 | ID: "test", 91 | Fields: fields, 92 | MinZoom: 0, 93 | MaxZoom: 14, 94 | } 95 | 96 | if len(vl.Fields) != 3 { 97 | t.Errorf("Expected 3 fields, got %d", len(vl.Fields)) 98 | } 99 | 100 | if vl.Fields["name"] != "string" { 101 | t.Errorf("Expected name field to be 'string', got '%s'", vl.Fields["name"]) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /internal/service/health.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "database/sql" 5 | "net/http" 6 | 7 | log "github.com/sirupsen/logrus" 8 | "github.com/tobilg/duckdb-tileserver/internal/cache" 9 | "github.com/tobilg/duckdb-tileserver/internal/data" 10 | ) 11 | 12 | // HealthResponse represents the JSON response for the /health endpoint 13 | type HealthResponse struct { 14 | Status string `json:"status"` 15 | Database string `json:"database"` 16 | SpatialExtension string `json:"spatial_extension"` 17 | Cache CacheStatus `json:"cache"` 18 | } 19 | 20 | // CacheStatus represents cache health information 21 | type CacheStatus struct { 22 | Enabled bool `json:"enabled"` 23 | Stats *cache.Stats `json:"stats,omitempty"` 24 | } 25 | 26 | // handleHealth returns health status of the service 27 | func handleHealth(w http.ResponseWriter, r *http.Request) *appError { 28 | log.Debug("Health check request") 29 | 30 | health := HealthResponse{ 31 | Status: "ok", 32 | Database: "unknown", 33 | SpatialExtension: "unknown", 34 | } 35 | 36 | // Check database connection 37 | catDB, ok := catalogInstance.(*data.CatalogDB) 38 | if !ok { 39 | health.Status = "error" 40 | health.Database = "disconnected" 41 | w.WriteHeader(http.StatusServiceUnavailable) 42 | return writeJSON(w, ContentTypeJSON, health) 43 | } 44 | 45 | // Get the underlying database connection to test it 46 | db := catDB.GetDB() 47 | if db == nil { 48 | health.Status = "error" 49 | health.Database = "disconnected" 50 | w.WriteHeader(http.StatusServiceUnavailable) 51 | return writeJSON(w, ContentTypeJSON, health) 52 | } 53 | 54 | // Ping database 55 | err := db.Ping() 56 | if err != nil { 57 | log.Warnf("Database ping failed: %v", err) 58 | health.Status = "error" 59 | health.Database = "disconnected" 60 | w.WriteHeader(http.StatusServiceUnavailable) 61 | return writeJSON(w, ContentTypeJSON, health) 62 | } 63 | health.Database = "connected" 64 | 65 | // Check if spatial extension is loaded 66 | var result sql.NullString 67 | err = db.QueryRow("SELECT ST_Point(0, 0)").Scan(&result) 68 | if err != nil { 69 | log.Warnf("Spatial extension check failed: %v", err) 70 | health.SpatialExtension = "not loaded" 71 | health.Status = "degraded" 72 | } else { 73 | health.SpatialExtension = "loaded" 74 | } 75 | 76 | // Add cache status 77 | cacheStatus := CacheStatus{ 78 | Enabled: serviceInstance != nil && serviceInstance.cache != nil && serviceInstance.cache.Enabled(), 79 | } 80 | if cacheStatus.Enabled { 81 | stats := serviceInstance.cache.Stats() 82 | cacheStatus.Stats = &stats 83 | } 84 | health.Cache = cacheStatus 85 | 86 | // Set response status code based on health 87 | if health.Status == "ok" { 88 | w.WriteHeader(http.StatusOK) 89 | } else if health.Status == "degraded" { 90 | w.WriteHeader(http.StatusOK) // Still operational but degraded 91 | } else { 92 | w.WriteHeader(http.StatusServiceUnavailable) 93 | } 94 | 95 | return writeJSON(w, ContentTypeJSON, health) 96 | } 97 | -------------------------------------------------------------------------------- /internal/service/cache_middleware.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "net/http" 7 | 8 | "github.com/gorilla/mux" 9 | "github.com/tobilg/duckdb-tileserver/internal/conf" 10 | ) 11 | 12 | // tileCacheMiddleware wraps the tile handler to check cache first 13 | func (s *Service) tileCacheMiddleware(next appHandler) appHandler { 14 | return func(w http.ResponseWriter, r *http.Request) *appError { 15 | // Skip cache if service or cache is not initialized 16 | if s == nil || s.cache == nil || !s.cache.Enabled() { 17 | return next(w, r) 18 | } 19 | 20 | // Extract tile coordinates from URL 21 | vars := mux.Vars(r) 22 | layer := vars["layer"] 23 | z := vars["z"] 24 | x := vars["x"] 25 | y := vars["y"] 26 | 27 | // Build cache key 28 | cacheKey := fmt.Sprintf("%s:%s:%s:%s", layer, z, x, y) 29 | 30 | // Try cache first 31 | if cachedTile, found := s.cache.Get(r.Context(), cacheKey); found { 32 | // Cache hit - return immediately 33 | w.Header().Set("Content-Type", "application/vnd.mapbox-vector-tile") 34 | w.Header().Set("Access-Control-Allow-Origin", "*") 35 | w.Header().Set("X-Cache", "HIT") 36 | // Allow browser caching 37 | maxAge := conf.Configuration.Cache.BrowserCacheMaxAge 38 | w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", maxAge)) 39 | 40 | if len(cachedTile) == 0 { 41 | w.WriteHeader(http.StatusNoContent) 42 | } else { 43 | w.WriteHeader(http.StatusOK) 44 | w.Write(cachedTile) 45 | } 46 | return nil 47 | } 48 | 49 | // Cache miss - set headers before calling next handler 50 | w.Header().Set("X-Cache", "MISS") 51 | // Allow browser caching 52 | maxAge := conf.Configuration.Cache.BrowserCacheMaxAge 53 | w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", maxAge)) 54 | 55 | // Capture the response to store it 56 | recorder := &responseCapturer{ 57 | ResponseWriter: w, 58 | body: &bytes.Buffer{}, 59 | } 60 | 61 | // Call original handler 62 | appErr := next(recorder, r) 63 | 64 | // If successful, store in cache (async to not block response) 65 | if appErr == nil && recorder.statusCode == http.StatusOK { 66 | go s.cache.Set(r.Context(), cacheKey, recorder.body.Bytes()) 67 | } 68 | 69 | // Also cache empty tiles (204 No Content) 70 | if appErr == nil && recorder.statusCode == http.StatusNoContent { 71 | go s.cache.Set(r.Context(), cacheKey, []byte{}) 72 | } 73 | 74 | return appErr 75 | } 76 | } 77 | 78 | // responseCapturer captures the response body to store in cache 79 | type responseCapturer struct { 80 | http.ResponseWriter 81 | body *bytes.Buffer 82 | statusCode int 83 | } 84 | 85 | func (rc *responseCapturer) Write(b []byte) (int, error) { 86 | // If WriteHeader wasn't called explicitly, assume 200 OK 87 | if rc.statusCode == 0 { 88 | rc.statusCode = http.StatusOK 89 | } 90 | 91 | // Capture body 92 | rc.body.Write(b) 93 | 94 | // Write to original response 95 | return rc.ResponseWriter.Write(b) 96 | } 97 | 98 | func (rc *responseCapturer) WriteHeader(statusCode int) { 99 | rc.statusCode = statusCode 100 | rc.ResponseWriter.WriteHeader(statusCode) 101 | } 102 | -------------------------------------------------------------------------------- /internal/service/handler.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | 7 | "github.com/gorilla/mux" 8 | log "github.com/sirupsen/logrus" 9 | "github.com/tobilg/duckdb-tileserver/internal/conf" 10 | ) 11 | 12 | const ( 13 | ContentTypeJSON = "application/json" 14 | ContentTypeHTML = "text/html; charset=utf-8" 15 | ContentTypeMVT = "application/vnd.mapbox-vector-tile" 16 | ContentTypeText = "text/plain" 17 | ) 18 | 19 | // initRouter sets up the HTTP routes 20 | func initRouter(basePath string) *mux.Router { 21 | router := mux.NewRouter() 22 | 23 | // Apply base path if specified 24 | var r *mux.Router 25 | if basePath != "" { 26 | log.Infof("Using base path: %s", basePath) 27 | r = router.PathPrefix(basePath).Subrouter() 28 | } else { 29 | r = router 30 | } 31 | 32 | // Root endpoint - HTML map viewer 33 | r.Handle("/", appHandler(handleRoot)).Methods("GET") 34 | r.Handle("/index.html", appHandler(handleRoot)).Methods("GET") 35 | r.Handle("/home.html", appHandler(handleRoot)).Methods("GET") 36 | 37 | // Health check endpoint 38 | r.Handle("/health", appHandler(handleHealth)).Methods("GET") 39 | 40 | // Layers discovery endpoint 41 | r.Handle("/layers", appHandler(handleLayers)).Methods("GET") 42 | r.Handle("/layers.json", appHandler(handleLayers)).Methods("GET") 43 | 44 | // TileJSON metadata endpoint 45 | r.Handle("/tiles/{layer}.json", appHandler(handleTileJSON)).Methods("GET") 46 | 47 | // MVT tile endpoint (with cache middleware) 48 | r.Handle("/tiles/{layer}/{z:[0-9]+}/{x:[0-9]+}/{y:[0-9]+}.mvt", serviceInstance.tileCacheMiddleware(appHandler(handleTile))).Methods("GET") 49 | r.Handle("/tiles/{layer}/{z:[0-9]+}/{x:[0-9]+}/{y:[0-9]+}.pbf", serviceInstance.tileCacheMiddleware(appHandler(handleTile))).Methods("GET") 50 | 51 | // Cache management endpoints (conditionally registered) 52 | if !conf.Configuration.Cache.DisableApi { 53 | log.Info("Cache management endpoints enabled") 54 | // Apply authentication middleware if API key is configured 55 | r.Handle("/cache/stats", appHandler(cacheAuthMiddleware(serviceInstance.handleCacheStats))).Methods("GET") 56 | r.Handle("/cache/clear", appHandler(cacheAuthMiddleware(serviceInstance.handleCacheClear))).Methods("DELETE") 57 | r.Handle("/cache/layer/{layer}", appHandler(cacheAuthMiddleware(serviceInstance.handleCacheClearLayer))).Methods("DELETE") 58 | } else { 59 | log.Info("Cache management endpoints disabled") 60 | } 61 | 62 | // Log registered routes 63 | router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { 64 | pathTemplate, err := route.GetPathTemplate() 65 | if err == nil { 66 | log.Debugf("Registered route: %s", pathTemplate) 67 | } 68 | methods, err := route.GetMethods() 69 | if err == nil { 70 | log.Debugf(" Methods: %v", methods) 71 | } 72 | return nil 73 | }) 74 | 75 | return router 76 | } 77 | 78 | // handleRoot serves the main HTML map viewer 79 | func handleRoot(w http.ResponseWriter, r *http.Request) *appError { 80 | return serveMapViewer(w, r) 81 | } 82 | 83 | // getBaseURL constructs the base URL for the service 84 | func getBaseURL(r *http.Request) string { 85 | // Remove trailing slash from serveURLBase 86 | base := serveURLBase(r) 87 | if len(base) > 0 && base[len(base)-1] == '/' { 88 | base = base[:len(base)-1] 89 | } 90 | return base 91 | } 92 | 93 | // formatTileURL formats a tile URL pattern for use in map viewers 94 | func formatTileURL(baseURL string, layer string) string { 95 | return fmt.Sprintf("%s/tiles/%s/{z}/{x}/{y}.mvt", baseURL, layer) 96 | } 97 | -------------------------------------------------------------------------------- /internal/service/tile.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "strconv" 7 | 8 | "github.com/gorilla/mux" 9 | log "github.com/sirupsen/logrus" 10 | "github.com/tobilg/duckdb-tileserver/internal/data" 11 | ) 12 | 13 | // handleTile serves MVT tiles for a given layer and tile coordinates 14 | func handleTile(w http.ResponseWriter, r *http.Request) *appError { 15 | vars := mux.Vars(r) 16 | layer := vars["layer"] 17 | zStr := vars["z"] 18 | xStr := vars["x"] 19 | yStr := vars["y"] 20 | 21 | // Parse tile coordinates 22 | z, err := strconv.Atoi(zStr) 23 | if err != nil { 24 | return appErrorBadRequest(err, fmt.Sprintf("Invalid zoom level: %s", zStr)) 25 | } 26 | 27 | x, err := strconv.Atoi(xStr) 28 | if err != nil { 29 | return appErrorBadRequest(err, fmt.Sprintf("Invalid x coordinate: %s", xStr)) 30 | } 31 | 32 | y, err := strconv.Atoi(yStr) 33 | if err != nil { 34 | return appErrorBadRequest(err, fmt.Sprintf("Invalid y coordinate: %s", yStr)) 35 | } 36 | 37 | // Validate tile coordinates 38 | if z < 0 || z > 30 { 39 | return appErrorBadRequest(nil, fmt.Sprintf("Zoom level out of range: %d", z)) 40 | } 41 | 42 | maxCoord := 1 << uint(z) // 2^z 43 | if x < 0 || x >= maxCoord { 44 | return appErrorBadRequest(nil, fmt.Sprintf("X coordinate out of range: %d (max: %d)", x, maxCoord-1)) 45 | } 46 | 47 | if y < 0 || y >= maxCoord { 48 | return appErrorBadRequest(nil, fmt.Sprintf("Y coordinate out of range: %d (max: %d)", y, maxCoord-1)) 49 | } 50 | 51 | log.Debugf("Tile request: layer=%s z=%d x=%d y=%d", layer, z, x, y) 52 | 53 | // Get catalog instance (cast to access tile methods) 54 | catDB, ok := catalogInstance.(*data.CatalogDB) 55 | if !ok { 56 | return appErrorInternal(nil, "Invalid catalog type") 57 | } 58 | 59 | // Generate the tile 60 | tileData, err := catDB.GenerateTile(r.Context(), layer, z, x, y) 61 | if err != nil { 62 | if err.Error() == fmt.Sprintf("layer not found: %s", layer) { 63 | return appErrorNotFound(err, fmt.Sprintf("Layer not found: %s", layer)) 64 | } 65 | return appErrorInternal(err, fmt.Sprintf("Error generating tile: %v", err)) 66 | } 67 | 68 | // Return empty tile as 204 No Content if there's no data 69 | if len(tileData) == 0 { 70 | w.WriteHeader(http.StatusNoContent) 71 | return nil 72 | } 73 | 74 | // Write the tile data 75 | w.Header().Set("Content-Type", ContentTypeMVT) 76 | w.Header().Set("Access-Control-Allow-Origin", "*") 77 | w.WriteHeader(http.StatusOK) 78 | _, err = w.Write(tileData) 79 | if err != nil { 80 | return appErrorInternal(err, "Error writing tile data") 81 | } 82 | 83 | return nil 84 | } 85 | 86 | // handleTileJSON serves TileJSON metadata for a layer 87 | func handleTileJSON(w http.ResponseWriter, r *http.Request) *appError { 88 | vars := mux.Vars(r) 89 | layer := vars["layer"] 90 | 91 | log.Debugf("TileJSON request for layer: %s", layer) 92 | 93 | // Get catalog instance 94 | catDB, ok := catalogInstance.(*data.CatalogDB) 95 | if !ok { 96 | return appErrorInternal(nil, "Invalid catalog type") 97 | } 98 | 99 | // Get base URL for tile URLs 100 | baseURL := getBaseURL(r) 101 | 102 | // Generate TileJSON 103 | tileJSON, err := catDB.GetTileJSON(layer, baseURL) 104 | if err != nil { 105 | if err.Error() == fmt.Sprintf("layer not found: %s", layer) { 106 | return appErrorNotFound(err, fmt.Sprintf("Layer not found: %s", layer)) 107 | } 108 | return appErrorInternal(err, fmt.Sprintf("Error generating TileJSON: %v", err)) 109 | } 110 | 111 | // Return JSON response 112 | return writeJSON(w, ContentTypeJSON, tileJSON) 113 | } 114 | -------------------------------------------------------------------------------- /duckdb-tileserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | /* 4 | # Running 5 | Usage: ./duckdb-tileserver [ -test ] [ --database-path /path/to/database.db ] 6 | 7 | Browser: e.g. http://localhost:9000/ 8 | 9 | # Configuration 10 | DuckDB file path in env var `DUCKDBTS_DATABASE_PATH` 11 | Example: `export DUCKDBTS_DATABASE_PATH="/path/to/database.db"` 12 | 13 | Table filtering via env vars `DUCKDBTS_DATABASE_TABLEINCLUDES` and `DUCKDBTS_DATABASE_TABLEEXCLUDES` (optional) 14 | Examples: 15 | `export DUCKDBTS_DATABASE_TABLEINCLUDES="buildings,roads"` 16 | `export DUCKDBTS_DATABASE_TABLEEXCLUDES="temp,staging"` 17 | If not specified, all tables with geometry columns will be served as MVT tile layers 18 | 19 | # Logging 20 | Logging to stdout 21 | */ 22 | 23 | import ( 24 | "fmt" 25 | "os" 26 | 27 | "github.com/tobilg/duckdb-tileserver/internal/conf" 28 | "github.com/tobilg/duckdb-tileserver/internal/data" 29 | "github.com/tobilg/duckdb-tileserver/internal/service" 30 | "github.com/tobilg/duckdb-tileserver/internal/ui" 31 | 32 | "github.com/pborman/getopt/v2" 33 | log "github.com/sirupsen/logrus" 34 | ) 35 | 36 | var flagTestModeOn bool 37 | var flagDebugOn bool 38 | var flagDevModeOn bool 39 | var flagHelp bool 40 | var flagVersion bool 41 | var flagConfigFilename string 42 | var flagDuckDBPath string 43 | 44 | var flagDisableUi bool 45 | 46 | func init() { 47 | initCommnandOptions() 48 | } 49 | 50 | func initCommnandOptions() { 51 | getopt.FlagLong(&flagHelp, "help", '?', "Show command usage") 52 | getopt.FlagLong(&flagConfigFilename, "config", 'c', "", "config file name") 53 | getopt.FlagLong(&flagDebugOn, "debug", 'd', "Set logging level to TRACE") 54 | getopt.FlagLong(&flagDevModeOn, "devel", 0, "Run in development mode") 55 | getopt.FlagLong(&flagTestModeOn, "test", 't', "Serve mock data for testing") 56 | getopt.FlagLong(&flagVersion, "version", 'v', "Output the version information") 57 | getopt.FlagLong(&flagDuckDBPath, "database-path", 0, "", "Path to DuckDB database file") 58 | getopt.FlagLong(&flagDisableUi, "disable-ui", 0, "Disable HTML UI routes") 59 | } 60 | 61 | func main() { 62 | getopt.Parse() 63 | 64 | if flagHelp { 65 | getopt.Usage() 66 | os.Exit(1) 67 | } 68 | 69 | if flagVersion { 70 | fmt.Printf("%s %s\n", conf.AppConfig.Name, conf.AppConfig.Version) 71 | os.Exit(1) 72 | } 73 | 74 | log.Infof("---- %s - Version %s ----------\n", conf.AppConfig.Name, conf.AppConfig.Version) 75 | 76 | conf.InitConfig(flagConfigFilename, flagDebugOn) 77 | 78 | // Set DuckDB parameters from command line if provided 79 | if flagDuckDBPath != "" { 80 | conf.Configuration.Database.DatabasePath = flagDuckDBPath 81 | } 82 | 83 | // Set UI disable flag from command line 84 | if flagDisableUi { 85 | conf.Configuration.Server.DisableUi = true 86 | } 87 | 88 | if flagTestModeOn || flagDevModeOn { 89 | ui.HTMLDynamicLoad = true 90 | log.Info("Running in development mode") 91 | } 92 | // Commandline over-rides config file for debugging 93 | if flagDebugOn || conf.Configuration.Server.Debug { 94 | log.SetLevel(log.TraceLevel) 95 | log.Debugf("Log level = DEBUG\n") 96 | } 97 | conf.DumpConfig() 98 | 99 | //-- Initialize catalog (with DB conn if used) 100 | var catalog data.Catalog 101 | if flagTestModeOn { 102 | catalog = data.CatMockInstance() 103 | } else { 104 | catalog = data.CatDBInstance() 105 | } 106 | includes := conf.Configuration.Database.TableIncludes 107 | excludes := conf.Configuration.Database.TableExcludes 108 | catalog.SetIncludeExclude(includes, excludes) 109 | 110 | //-- Start up service 111 | service.Initialize() 112 | service.Serve(catalog) 113 | } 114 | -------------------------------------------------------------------------------- /config/duckdb-tileserver.toml.example: -------------------------------------------------------------------------------- 1 | [Server] 2 | # Accept connections on this subnet (default accepts on all) 3 | HttpHost = "0.0.0.0" 4 | 5 | # IP ports to listen on 6 | HttpPort = 9000 7 | # HttpsPort = 9001 8 | 9 | # HTTPS configuration - TLS server certificate full chain and key 10 | # If these are not specified, the TLS server will not be started 11 | # TlsServerCertificateFile = "" 12 | # TlsServerPrivateKeyFile = "" 13 | 14 | # Advertise URLs relative to this server name and path 15 | # The default is to look this up from incoming request headers 16 | # Note: do not add a trailing slash. 17 | # UrlBase = "http://localhost:9000" 18 | 19 | # Optional path to add to the service base URL 20 | # If set, all routes will be prefixed with this path 21 | # (e.g. "/tiles", "/services/tiles", etc.) 22 | # Note: No trailing slash is necessary, duckdb_tileserver automatically 23 | # adds a trailing slash for you. 24 | # BasePath = "/" 25 | 26 | # String to return for Access-Control-Allow-Origin header 27 | CORSOrigins = "*" 28 | 29 | # set Debug to true to run in debug mode (can also be set on cmd-line) 30 | # Debug = true 31 | 32 | # Read html templates from this directory 33 | AssetsPath = "./assets" 34 | 35 | # Maximum duration for reading entire request (in seconds) 36 | ReadTimeoutSec = 5 37 | 38 | # Maximum duration for writing response (in seconds) 39 | # Also controls maximum time for processing request 40 | WriteTimeoutSec = 30 41 | 42 | # Disable HTML UI routes (default is false) 43 | # DisableUi = false 44 | 45 | [Database] 46 | # DuckDB database file path 47 | # DUCKDBTS_DATABASE_PATH environment variable takes precedence if set. 48 | # DatabasePath = "/path/to/your/database.db" 49 | 50 | # Publish only these tables as tile layers (default is to publish all tables with geometry columns) 51 | # TableIncludes = [ "buildings", "roads", "parcels" ] 52 | 53 | # Do not publish these tables as tile layers 54 | # TableExcludes = [ "temp_table", "staging_data" ] 55 | 56 | # Connection pool settings 57 | # Maximum number of open connections to the database (default: 25) 58 | # Recommended: 2-4x your CPU cores 59 | # MaxOpenConns = 25 60 | 61 | # Maximum number of idle connections in the pool (default: 5) 62 | # Recommended: 20-30% of MaxOpenConns 63 | # MaxIdleConns = 5 64 | 65 | # Maximum lifetime of a connection in seconds (default: 3600 = 1 hour) 66 | # Connections older than this will be closed and recreated 67 | # ConnMaxLifetime = 3600 68 | 69 | # Maximum idle time of a connection in seconds (default: 600 = 10 minutes) 70 | # Idle connections older than this will be closed 71 | # ConnMaxIdleTime = 600 72 | 73 | [Metadata] 74 | # Title for this service 75 | Title = "DuckDB Tileserver" 76 | # Description of this service 77 | Description = "MVT Tile Server powered by DuckDB Spatial" 78 | 79 | [Cache] 80 | # Enable in-memory LRU tile caching 81 | Enabled = true 82 | 83 | # Disable /cache routes (default is false) 84 | # DisableApi = false 85 | 86 | # API Key for /cache routes (disabled by default, /cache routes are public) 87 | # ApiKey = "abcdefg" 88 | 89 | # Maximum number of tiles to cache (server-side) 90 | # Each tile is typically 5-50KB, so: 91 | # 1,000 tiles ≈ 20 MB 92 | # 10,000 tiles ≈ 200 MB 93 | # 50,000 tiles ≈ 1 GB 94 | # 100,000 tiles ≈ 2 GB 95 | MaxItems = 10000 96 | 97 | # Maximum memory usage in MB (approximate limit) 98 | # Cache will evict oldest tiles when this limit is approached 99 | MaxMemoryMB = 1024 100 | 101 | # Browser cache max-age in seconds (client-side caching) 102 | # How long browsers should cache tiles before revalidating 103 | # Common values: 104 | # 3600 = 1 hour (good for frequently updated data) 105 | # 86400 = 1 day (good for daily updated data) 106 | # 604800 = 1 week (good for rarely updated data) 107 | # 0 = No browser caching (always revalidate) 108 | BrowserCacheMaxAge = 3600 109 | -------------------------------------------------------------------------------- /internal/data/catalog.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | ) 8 | 9 | const ( 10 | //errMsgCollectionNotFound = "Collection not found: %v" 11 | //errMsgFeatureNotFound = "Feature not found: %v" 12 | SRID_4326 = 4326 13 | SRID_UNKNOWN = -1 14 | ) 15 | 16 | // Catalog tbd 17 | type Catalog interface { 18 | SetIncludeExclude(includeList []string, excludeList []string) 19 | 20 | Tables() ([]*Table, error) 21 | 22 | // TableByName returns the table with given name. 23 | // It returns nil if the table does not exist 24 | TableByName(name string) (*Table, error) 25 | 26 | // TableReload reloads volatile table data 27 | TableReload(name string) 28 | 29 | // TableFeatures returns an array of the JSON for the features in a table 30 | // It returns nil if the table does not exist 31 | TableFeatures(ctx context.Context, name string, param *QueryParam) ([]string, error) 32 | 33 | // TableFeature returns the JSON text for a table feature with given id 34 | // It returns an empty string if the table or feature does not exist 35 | TableFeature(ctx context.Context, name string, id string, param *QueryParam) (string, error) 36 | 37 | Functions() ([]*Function, error) 38 | 39 | // FunctionByName returns the function with given name. 40 | // It returns nil if the function does not exist 41 | FunctionByName(name string) (*Function, error) 42 | 43 | FunctionFeatures(ctx context.Context, name string, args map[string]string, param *QueryParam) ([]string, error) 44 | 45 | FunctionData(ctx context.Context, name string, args map[string]string, param *QueryParam) ([]map[string]interface{}, error) 46 | 47 | Close() 48 | } 49 | 50 | // TransformFunction denotes a geometry function with arguments 51 | type TransformFunction struct { 52 | Name string 53 | Arg []string 54 | } 55 | 56 | type Sorting struct { 57 | Name string 58 | IsDesc bool // false = ASC (default), true = DESC 59 | } 60 | 61 | type PropertyFilter struct { 62 | Name string 63 | Value string 64 | } 65 | 66 | // QueryParam holds the optional parameters for a data query 67 | type QueryParam struct { 68 | Crs int 69 | Limit int 70 | Offset int 71 | Bbox *Extent 72 | BboxCrs int 73 | FilterSql string 74 | Filter []*PropertyFilter 75 | // Columns is the list of columns to return 76 | Columns []string 77 | GroupBy []string 78 | SortBy []Sorting 79 | Precision int 80 | TransformFuns []TransformFunction 81 | } 82 | 83 | // Table holds metadata for table/view objects 84 | type Table struct { 85 | ID string 86 | Schema string 87 | Table string 88 | Title string 89 | Description string 90 | GeometryType string 91 | GeometryColumn string 92 | IDColumn string 93 | Srid int 94 | Extent Extent 95 | Columns []string 96 | DbTypes map[string]string 97 | JSONTypes []string 98 | ColDesc []string 99 | } 100 | 101 | // Extent of a table 102 | type Extent struct { 103 | Minx, Miny, Maxx, Maxy float64 104 | } 105 | 106 | // Function tbd 107 | type Function struct { 108 | ID string 109 | Schema string 110 | Name string 111 | Description string 112 | InNames []string 113 | InDbTypes []string 114 | InTypeMap map[string]string 115 | InDefaults []string 116 | NumNoDefault int 117 | OutNames []string 118 | OutDbTypes []string 119 | OutJSONTypes []string 120 | Types map[string]string 121 | GeometryColumn string 122 | IDColumn string 123 | } 124 | 125 | func (fun *Function) IsGeometryFunction() bool { 126 | for _, typ := range fun.OutDbTypes { 127 | if typ == "geometry" { 128 | return true 129 | } 130 | } 131 | return false 132 | } 133 | 134 | func (fun *TransformFunction) apply(expr string) string { 135 | if fun.Name == "" { 136 | return expr 137 | } 138 | if len(fun.Arg) == 0 { 139 | return fmt.Sprintf("%v( %v )", fun.Name, expr) 140 | } 141 | args := strings.Join(fun.Arg, ",") 142 | return fmt.Sprintf("%v( %v, %v )", fun.Name, expr, args) 143 | } 144 | 145 | // Creates a fully qualified function id. 146 | // adds default postgisftw schema if name arg has no schema 147 | func FunctionQualifiedId(name string) string { 148 | if strings.Contains(name, ".") { 149 | return name 150 | } 151 | return SchemaPostGISFTW + "." + name 152 | } 153 | -------------------------------------------------------------------------------- /testing/sample_data.sql: -------------------------------------------------------------------------------- 1 | -- Sample test data for DuckDB Tileserver 2 | -- Creates sample tables with spatial data in different SRIDs 3 | 4 | -- Install and load spatial extension 5 | INSTALL spatial; 6 | LOAD spatial; 7 | 8 | -- Create a sample buildings table in Web Mercator (EPSG:3857) 9 | CREATE TABLE buildings AS 10 | SELECT 11 | row_number() OVER () as id, 12 | 'Building ' || row_number() OVER () as name, 13 | random() * 50 + 25 as height, 14 | ST_Transform( 15 | ST_GeomFromText( 16 | 'POLYGON((' || 17 | (lon - 0.001) || ' ' || (lat - 0.001) || ',' || 18 | (lon + 0.001) || ' ' || (lat - 0.001) || ',' || 19 | (lon + 0.001) || ' ' || (lat + 0.001) || ',' || 20 | (lon - 0.001) || ' ' || (lat + 0.001) || ',' || 21 | (lon - 0.001) || ' ' || (lat - 0.001) || 22 | '))', 23 | 4326 24 | ), 25 | 3857 26 | ) as geometry 27 | FROM ( 28 | SELECT 29 | -74.0060 + (random() - 0.5) * 0.1 as lon, 30 | 40.7128 + (random() - 0.5) * 0.1 as lat 31 | FROM range(1000) 32 | ); 33 | 34 | -- Create a sample roads table in EPSG:4326 (WGS84) 35 | CREATE TABLE roads AS 36 | SELECT 37 | row_number() OVER () as id, 38 | 'Road ' || row_number() OVER () as name, 39 | CASE 40 | WHEN random() < 0.3 THEN 'highway' 41 | WHEN random() < 0.6 THEN 'primary' 42 | WHEN random() < 0.8 THEN 'secondary' 43 | ELSE 'residential' 44 | END as road_type, 45 | ST_GeomFromText( 46 | 'LINESTRING(' || 47 | lon_start || ' ' || lat_start || ',' || 48 | lon_end || ' ' || lat_end || 49 | ')', 50 | 4326 51 | ) as geometry 52 | FROM ( 53 | SELECT 54 | -74.0060 + (random() - 0.5) * 0.1 as lon_start, 55 | 40.7128 + (random() - 0.5) * 0.1 as lat_start, 56 | -74.0060 + (random() - 0.5) * 0.1 as lon_end, 57 | 40.7128 + (random() - 0.5) * 0.1 as lat_end 58 | FROM range(500) 59 | ); 60 | 61 | -- Create a sample points of interest table 62 | CREATE TABLE poi AS 63 | SELECT 64 | row_number() OVER () as id, 65 | poi_name as name, 66 | poi_type as type, 67 | ST_Transform( 68 | ST_Point(lon, lat, 4326), 69 | 3857 70 | ) as geometry 71 | FROM ( 72 | SELECT 73 | -74.0060 + (random() - 0.5) * 0.1 as lon, 74 | 40.7128 + (random() - 0.5) * 0.1 as lat, 75 | CASE 76 | WHEN random() < 0.2 THEN 'restaurant' 77 | WHEN random() < 0.4 THEN 'cafe' 78 | WHEN random() < 0.6 THEN 'park' 79 | WHEN random() < 0.8 THEN 'museum' 80 | ELSE 'shop' 81 | END as poi_type, 82 | 'POI ' || row_number() OVER () as poi_name 83 | FROM range(200) 84 | ); 85 | 86 | -- Create a sample parcels table with more complex polygons 87 | CREATE TABLE parcels AS 88 | SELECT 89 | row_number() OVER () as id, 90 | 'Parcel ' || row_number() OVER () as parcel_id, 91 | random() * 10000 + 1000 as area_sqm, 92 | CASE 93 | WHEN random() < 0.3 THEN 'residential' 94 | WHEN random() < 0.6 THEN 'commercial' 95 | WHEN random() < 0.8 THEN 'industrial' 96 | ELSE 'mixed' 97 | END as zoning, 98 | ST_Transform( 99 | ST_Buffer(ST_Point(lon, lat, 4326), 0.002), 100 | 3857 101 | ) as geometry 102 | FROM ( 103 | SELECT 104 | -74.0060 + (random() - 0.5) * 0.1 as lon, 105 | 40.7128 + (random() - 0.5) * 0.1 as lat 106 | FROM range(300) 107 | ); 108 | 109 | -- Display summary 110 | SELECT 'buildings' as table_name, COUNT(*) as count, ST_GeometryType(geometry) as geom_type, ST_SRID(geometry) as srid FROM buildings 111 | UNION ALL 112 | SELECT 'roads', COUNT(*), ST_GeometryType(geometry), ST_SRID(geometry) FROM roads 113 | UNION ALL 114 | SELECT 'poi', COUNT(*), ST_GeometryType(geometry), ST_SRID(geometry) FROM poi 115 | UNION ALL 116 | SELECT 'parcels', COUNT(*), ST_GeometryType(geometry), ST_SRID(geometry) FROM parcels; 117 | 118 | -- Create spatial indexes for better performance 119 | -- Note: DuckDB spatial extension automatically creates R-Tree indexes for geometry columns 120 | ANALYZE; 121 | 122 | -- Show extents 123 | SELECT 'buildings' as layer, ST_AsText(ST_Extent(ST_Transform(geometry, 4326))) as extent_wgs84 FROM buildings 124 | UNION ALL 125 | SELECT 'roads', ST_AsText(ST_Extent(geometry)) FROM roads 126 | UNION ALL 127 | SELECT 'poi', ST_AsText(ST_Extent(ST_Transform(geometry, 4326))) FROM poi 128 | UNION ALL 129 | SELECT 'parcels', ST_AsText(ST_Extent(ST_Transform(geometry, 4326))) FROM parcels; 130 | -------------------------------------------------------------------------------- /internal/cache/lru.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "sync/atomic" 8 | 9 | lru "github.com/hashicorp/golang-lru/v2" 10 | log "github.com/sirupsen/logrus" 11 | ) 12 | 13 | // TileCache provides thread-safe LRU caching for MVT tiles 14 | type TileCache struct { 15 | cache *lru.Cache[string, []byte] 16 | enabled bool 17 | maxMemoryMB int64 18 | 19 | // Metrics (atomic counters for thread-safety) 20 | hits atomic.Int64 21 | misses atomic.Int64 22 | evictions atomic.Int64 23 | currentSize atomic.Int64 24 | currentBytes atomic.Int64 25 | } 26 | 27 | // Stats represents cache statistics 28 | type Stats struct { 29 | Hits int64 `json:"hits"` 30 | Misses int64 `json:"misses"` 31 | Evictions int64 `json:"evictions"` 32 | Size int `json:"size"` // Number of items 33 | MemoryBytes int64 `json:"memory_bytes"` 34 | HitRate float64 `json:"hit_rate"` // Percentage 35 | } 36 | 37 | // NewTileCache creates a new LRU tile cache 38 | func NewTileCache(maxItems int, maxMemoryMB int) (*TileCache, error) { 39 | if maxItems <= 0 { 40 | return nil, fmt.Errorf("maxItems must be positive, got %d", maxItems) 41 | } 42 | 43 | tc := &TileCache{ 44 | enabled: true, 45 | maxMemoryMB: int64(maxMemoryMB), 46 | } 47 | 48 | // Create LRU cache with eviction callback 49 | cache, err := lru.NewWithEvict(maxItems, tc.onEvict) 50 | if err != nil { 51 | return nil, err 52 | } 53 | tc.cache = cache 54 | 55 | log.Infof("Initialized tile cache: max_items=%d max_memory=%dMB", maxItems, maxMemoryMB) 56 | return tc, nil 57 | } 58 | 59 | // NewDisabledCache returns a cache that's disabled (always misses) 60 | func NewDisabledCache() *TileCache { 61 | return &TileCache{enabled: false} 62 | } 63 | 64 | // Get retrieves a tile from cache 65 | func (tc *TileCache) Get(ctx context.Context, key string) ([]byte, bool) { 66 | if !tc.enabled { 67 | return nil, false 68 | } 69 | 70 | tile, ok := tc.cache.Get(key) 71 | if ok { 72 | tc.hits.Add(1) 73 | log.Debugf("Cache HIT: %s", key) 74 | return tile, true 75 | } 76 | 77 | tc.misses.Add(1) 78 | log.Debugf("Cache MISS: %s", key) 79 | return nil, false 80 | } 81 | 82 | // Set stores a tile in cache 83 | func (tc *TileCache) Set(ctx context.Context, key string, data []byte) error { 84 | if !tc.enabled || len(data) == 0 { 85 | return nil 86 | } 87 | 88 | tileSize := int64(len(data)) 89 | 90 | // Check memory limit before adding 91 | if tc.maxMemoryMB > 0 { 92 | currentMB := tc.currentBytes.Load() / 1024 / 1024 93 | tileMB := tileSize / 1024 / 1024 94 | 95 | if currentMB+tileMB > tc.maxMemoryMB { 96 | log.Debugf("Cache memory limit reached, evicting to make space") 97 | // LRU will automatically evict oldest items 98 | } 99 | } 100 | 101 | // Make a copy to avoid referencing request data 102 | tileCopy := make([]byte, len(data)) 103 | copy(tileCopy, data) 104 | 105 | tc.cache.Add(key, tileCopy) 106 | tc.currentBytes.Add(tileSize) 107 | tc.currentSize.Add(1) 108 | 109 | log.Debugf("Cache SET: %s (%d bytes)", key, tileSize) 110 | return nil 111 | } 112 | 113 | // onEvict is called when an item is evicted from the LRU cache 114 | func (tc *TileCache) onEvict(key string, value []byte) { 115 | tc.evictions.Add(1) 116 | tc.currentSize.Add(-1) 117 | tc.currentBytes.Add(-int64(len(value))) 118 | log.Debugf("Cache EVICT: %s", key) 119 | } 120 | 121 | // Clear removes all items from cache 122 | func (tc *TileCache) Clear() { 123 | if !tc.enabled { 124 | return 125 | } 126 | 127 | tc.cache.Purge() 128 | tc.currentSize.Store(0) 129 | tc.currentBytes.Store(0) 130 | log.Info("Cache cleared") 131 | } 132 | 133 | // ClearLayer removes all tiles for a specific layer 134 | func (tc *TileCache) ClearLayer(layerName string) int { 135 | if !tc.enabled { 136 | return 0 137 | } 138 | 139 | removed := 0 140 | keys := tc.cache.Keys() 141 | 142 | prefix := layerName + ":" 143 | for _, key := range keys { 144 | if strings.HasPrefix(key, prefix) { 145 | tc.cache.Remove(key) 146 | removed++ 147 | } 148 | } 149 | 150 | log.Infof("Cleared %d tiles for layer %s", removed, layerName) 151 | return removed 152 | } 153 | 154 | // Stats returns current cache statistics 155 | func (tc *TileCache) Stats() Stats { 156 | if !tc.enabled { 157 | return Stats{} 158 | } 159 | 160 | hits := tc.hits.Load() 161 | misses := tc.misses.Load() 162 | total := hits + misses 163 | 164 | hitRate := 0.0 165 | if total > 0 { 166 | hitRate = float64(hits) / float64(total) * 100.0 167 | } 168 | 169 | return Stats{ 170 | Hits: hits, 171 | Misses: misses, 172 | Evictions: tc.evictions.Load(), 173 | Size: tc.cache.Len(), 174 | MemoryBytes: tc.currentBytes.Load(), 175 | HitRate: hitRate, 176 | } 177 | } 178 | 179 | // Enabled returns whether the cache is enabled 180 | func (tc *TileCache) Enabled() bool { 181 | return tc.enabled 182 | } 183 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ##AVAILABLE BUILD OPTIONS - 2 | ## APPVERSION - Variable to set the version label 3 | ## GOVERSION - Defaults to 1.21.6 but can be overriden, uses alpine go container as base 4 | ## PROGRAM - Name of binary, duckdb-tileserver 5 | ## CONTAINER - prefix and name of the generated container 6 | ## DATE - Date String used as alternate tag for generated containers 7 | ## BASE_REGISTRY - This is the registry to pull the base image from 8 | ## BASE_IMAGE - The base image to use for the final container 9 | ## TARGETARCH - The architecture the resulting image is based on and the binary is compiled for 10 | ## IMAGE_TAG - The tag to be applied to the container 11 | 12 | APPVERSION ?= latest 13 | GOVERSION ?= 1.24.6 14 | PROGRAM ?= duckdb-tileserver 15 | CONTAINER ?= tobilg/$(PROGRAM) 16 | DATE ?= $(shell date +%Y%m%d) 17 | BASE_REGISTRY ?= registry.access.redhat.com 18 | BASE_IMAGE ?= ubi8-micro 19 | SYSTEMARCH = $(shell uname -m) 20 | 21 | ifeq ($(SYSTEMARCH), x86_64) 22 | TARGETARCH ?= amd64 23 | PLATFORM=amd64 24 | else 25 | TARGETARCH ?= arm64 26 | PLATFORM=arm64 27 | endif 28 | 29 | IMAGE_TAG ?= $(APPVERSION)-$(TARGETARCH) 30 | DATE_TAG ?= $(DATE)-$(TARGETARCH) 31 | 32 | RM = /bin/rm 33 | CP = /bin/cp 34 | MKDIR = /bin/mkdir 35 | SED = /usr/bin/sed 36 | 37 | .PHONY: build bin-for-docker build-common build-in-docker check clean docker install multi-stage-docker release set-local set-multi-stage test uninstall 38 | 39 | .DEFAULT_GOAL := help 40 | 41 | GOFILES := $(shell find . -type f -name '*.go') 42 | 43 | check: ## This checks the current version of Go installed locally 44 | go version 45 | 46 | clean: ## This will clean all local build artifacts 47 | $(info Cleaning project...) 48 | @rm -f $(PROGRAM) 49 | @docker image inspect $(CONTAINER):$(IMAGE_TAG) >/dev/null 2>&1 && docker rmi -f $(shell docker images --filter label=release=latest --filter=reference="*featureserv:*" -q) || echo -n "" 50 | 51 | build: $(PROGRAM) ## Build a local binary using APPVERSION parameter or CI as default 52 | 53 | $(PROGRAM): $(GOFILES) 54 | go build -v -ldflags "-s -w -X github.com/tobilg/duckdb-tileserver/conf.setVersion=$(APPVERSION)" 55 | 56 | bin-for-docker: $(GOFILES) ## Build a local binary using APPVERSION parameter or CI as default (to be used in docker image) 57 | # to be used in docker the built binary needs the CGO_ENABLED=0 option 58 | CGO_ENABLED=0 go build -v -ldflags "-s -w -X github.com/tobilg/duckdb-tileserver/conf.setVersion=$(APPVERSION)" 59 | 60 | build-in-docker: $(GOFILES) ## Build a local binary based of a golang base docker image without the need of a local go environment 61 | docker run --rm -v "$(PWD)":/usr/src/myapp:z -w /usr/src/myapp golang:$(GOVERSION) make APPVERSION=$(APPVERSION) $(PROGRAM) 62 | 63 | build-common: Dockerfile 64 | docker buildx build -f Dockerfile \ 65 | --platform linux/$(TARGETARCH) \ 66 | --target $(BUILDTYPE) \ 67 | --build-arg VERSION=$(APPVERSION) \ 68 | --build-arg GOLANG_VERSION=$(GOVERSION) \ 69 | --build-arg TARGETARCH=$(TARGETARCH) \ 70 | --build-arg PLATFORM=$(PLATFORM) \ 71 | --build-arg BASE_REGISTRY=$(BASE_REGISTRY) \ 72 | --build-arg BASE_IMAGE=$(BASE_IMAGE) \ 73 | --label release="$(APPVERSION)" \ 74 | --label os.version="7.7" \ 75 | --load \ 76 | -t $(CONTAINER):$(IMAGE_TAG) -t $(CONTAINER):$(DATE_TAG) . 77 | docker image prune --filter label=stage=featureservbuilder -f 78 | 79 | set-local: 80 | $(eval BUILDTYPE = local) 81 | 82 | set-multi-stage: 83 | $(eval BUILDTYPE = multi-stage) 84 | 85 | docker: bin-for-docker Dockerfile set-local build-common ## Generate a BASE_IMAGE container with APPVERSION tag, using a locally built binary 86 | 87 | multi-stage-docker: Dockerfile set-multi-stage build-common ## Generate a BASE_IMAGE container with APPVERSION tag, using a binary built in an alpine golang build container 88 | 89 | release: clean docker ## Generate a local build, and then uses the local build to generate BASE_IMAGE container 90 | 91 | test: ## Run the tests locally 92 | go test -v ./internal/conf ./internal/data ./internal/service 93 | 94 | install: $(PROGRAM) ## This will install the program locally 95 | $(MKDIR) -p $(DESTDIR)/usr/bin 96 | $(MKDIR) -p $(DESTDIR)/usr/share/$(PROGRAM) 97 | $(MKDIR) -p $(DESTDIR)/etc 98 | $(CP) $(PROGRAM) $(DESTDIR)/usr/bin/$(PROGRAM) 99 | $(SED) 's,./assets,/usr/share/$(PROGRAM)/assets,' config/$(PROGRAM).toml.example > $(DESTDIR)/etc/$(PROGRAM).toml 100 | $(CP) -r assets $(DESTDIR)/usr/share/$(PROGRAM)/assets 101 | 102 | uninstall: ## This will uninstall the program from your local system 103 | $(RM) $(DESTDIR)/usr/bin/$(PROGRAM) 104 | $(RM) $(DESTDIR)/etc/$(PROGRAM).toml 105 | $(RM) -r $(DESTDIR)/usr/share/$(PROGRAM) 106 | 107 | help: ## Prints this help message 108 | @echo "" 109 | @echo "" 110 | @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | fgrep -v : | sed -e 's/\\$$//' | sed -e 's/.*##//' 111 | @echo "" 112 | @echo "BUILD TARGETS:" 113 | @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | fgrep : | sed -e 's/\\$$//' | sed -e 's/:.*##/:/' 114 | @echo "" 115 | @echo "" 116 | -------------------------------------------------------------------------------- /internal/service/handler_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "crypto/tls" 5 | "encoding/json" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | 10 | "github.com/gorilla/mux" 11 | "github.com/tobilg/duckdb-tileserver/internal/cache" 12 | "github.com/tobilg/duckdb-tileserver/internal/conf" 13 | "github.com/tobilg/duckdb-tileserver/internal/data" 14 | ) 15 | 16 | func init() { 17 | // Initialize minimal config for testing 18 | conf.Configuration.Server.AssetsPath = "../../assets" 19 | conf.Configuration.Metadata.Title = "Test Tileserver" 20 | conf.Configuration.Metadata.Description = "Test Description" 21 | conf.Configuration.Cache.Enabled = false // Disable cache for tests 22 | } 23 | 24 | func setupTestCatalog() { 25 | catalogInstance = data.CatMockInstance() 26 | 27 | // Initialize a test service instance with disabled cache 28 | serviceInstance = &Service{ 29 | cache: cache.NewDisabledCache(), 30 | } 31 | } 32 | 33 | func TestHandleHealth(t *testing.T) { 34 | setupTestCatalog() 35 | 36 | req, err := http.NewRequest("GET", "/health", nil) 37 | if err != nil { 38 | t.Fatal(err) 39 | } 40 | 41 | rr := httptest.NewRecorder() 42 | handler := appHandler(handleHealth) 43 | handler.ServeHTTP(rr, req) 44 | 45 | var response HealthResponse 46 | err = json.Unmarshal(rr.Body.Bytes(), &response) 47 | if err != nil { 48 | t.Errorf("Failed to parse health response: %v", err) 49 | } 50 | 51 | // Mock catalog returns error status (503) because it doesn't have a real DB connection 52 | if status := rr.Code; status != http.StatusServiceUnavailable { 53 | t.Errorf("handler returned wrong status code: got %v want %v", status, http.StatusServiceUnavailable) 54 | } 55 | 56 | if response.Status != "error" { 57 | t.Errorf("Expected status 'error' for mock catalog, got '%s'", response.Status) 58 | } 59 | } 60 | 61 | func TestHandleRoot(t *testing.T) { 62 | setupTestCatalog() 63 | 64 | req, err := http.NewRequest("GET", "/", nil) 65 | if err != nil { 66 | t.Fatal(err) 67 | } 68 | 69 | rr := httptest.NewRecorder() 70 | handler := appHandler(handleRoot) 71 | handler.ServeHTTP(rr, req) 72 | 73 | if status := rr.Code; status != http.StatusOK { 74 | t.Errorf("handler returned wrong status code: got %v want %v", status, http.StatusOK) 75 | } 76 | 77 | contentType := rr.Header().Get("Content-Type") 78 | if contentType != ContentTypeHTML { 79 | t.Errorf("Expected Content-Type %s, got %s", ContentTypeHTML, contentType) 80 | } 81 | } 82 | 83 | func TestHandleTileInvalidCoordinates(t *testing.T) { 84 | setupTestCatalog() 85 | 86 | tests := []struct { 87 | name string 88 | url string 89 | code int 90 | }{ 91 | {"Invalid zoom", "/tiles/test/99/0/0.mvt", http.StatusBadRequest}, 92 | {"Negative zoom", "/tiles/test/-1/0/0.mvt", http.StatusNotFound}, // Regex pattern doesn't match negative numbers 93 | {"Invalid x", "/tiles/test/10/9999/0.mvt", http.StatusBadRequest}, 94 | {"Invalid y", "/tiles/test/10/0/9999.mvt", http.StatusBadRequest}, 95 | {"Negative x", "/tiles/test/10/-1/0.mvt", http.StatusNotFound}, // Regex pattern doesn't match negative numbers 96 | {"Negative y", "/tiles/test/10/0/-1.mvt", http.StatusNotFound}, // Regex pattern doesn't match negative numbers 97 | } 98 | 99 | for _, tt := range tests { 100 | t.Run(tt.name, func(t *testing.T) { 101 | req, err := http.NewRequest("GET", tt.url, nil) 102 | if err != nil { 103 | t.Fatal(err) 104 | } 105 | 106 | rr := httptest.NewRecorder() 107 | router := initRouter("") 108 | router.ServeHTTP(rr, req) 109 | 110 | if status := rr.Code; status != tt.code { 111 | t.Errorf("handler returned wrong status code: got %v want %v", status, tt.code) 112 | } 113 | }) 114 | } 115 | } 116 | 117 | func TestRouter(t *testing.T) { 118 | router := initRouter("") 119 | 120 | tests := []struct { 121 | method string 122 | path string 123 | match bool 124 | }{ 125 | {"GET", "/", true}, 126 | {"GET", "/index.html", true}, 127 | {"GET", "/health", true}, 128 | {"GET", "/layers", true}, 129 | {"GET", "/tiles/buildings.json", true}, 130 | {"GET", "/tiles/buildings/10/512/384.mvt", true}, 131 | {"GET", "/tiles/buildings/10/512/384.pbf", true}, 132 | {"POST", "/", false}, 133 | {"GET", "/invalid", false}, 134 | } 135 | 136 | for _, tt := range tests { 137 | t.Run(tt.method+" "+tt.path, func(t *testing.T) { 138 | req, err := http.NewRequest(tt.method, tt.path, nil) 139 | if err != nil { 140 | t.Fatal(err) 141 | } 142 | 143 | var match bool 144 | router.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { 145 | if route.Match(req, &mux.RouteMatch{}) { 146 | match = true 147 | } 148 | return nil 149 | }) 150 | 151 | if match != tt.match { 152 | t.Errorf("Expected route match %v for %s %s, got %v", tt.match, tt.method, tt.path, match) 153 | } 154 | }) 155 | } 156 | } 157 | 158 | func TestGetBaseURL(t *testing.T) { 159 | tests := []struct { 160 | name string 161 | host string 162 | scheme string 163 | expected string 164 | }{ 165 | { 166 | name: "Simple HTTP", 167 | host: "localhost:9000", 168 | scheme: "http", 169 | expected: "http://localhost:9000", 170 | }, 171 | { 172 | name: "HTTPS", 173 | host: "example.com", 174 | scheme: "https", 175 | expected: "https://example.com", 176 | }, 177 | } 178 | 179 | for _, tt := range tests { 180 | t.Run(tt.name, func(t *testing.T) { 181 | req := httptest.NewRequest("GET", "/", nil) 182 | req.Host = tt.host 183 | if tt.scheme == "https" { 184 | req.TLS = &tls.ConnectionState{} 185 | } 186 | 187 | baseURL := getBaseURL(req) 188 | if baseURL != tt.expected { 189 | t.Errorf("Expected base URL %s, got %s", tt.expected, baseURL) 190 | } 191 | }) 192 | } 193 | } 194 | 195 | func TestFormatTileURL(t *testing.T) { 196 | tests := []struct { 197 | baseURL string 198 | layer string 199 | expected string 200 | }{ 201 | { 202 | baseURL: "http://localhost:9000", 203 | layer: "buildings", 204 | expected: "http://localhost:9000/tiles/buildings/{z}/{x}/{y}.mvt", 205 | }, 206 | { 207 | baseURL: "https://example.com", 208 | layer: "roads", 209 | expected: "https://example.com/tiles/roads/{z}/{x}/{y}.mvt", 210 | }, 211 | } 212 | 213 | for _, tt := range tests { 214 | t.Run(tt.layer, func(t *testing.T) { 215 | result := formatTileURL(tt.baseURL, tt.layer) 216 | if result != tt.expected { 217 | t.Errorf("Expected %s, got %s", tt.expected, result) 218 | } 219 | }) 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /FEATURES.md: -------------------------------------------------------------------------------- 1 | # DuckDB Tileserver Features 2 | 3 | A lightweight MVT (Mapbox Vector Tile) server for DuckDB with spatial extension support. 4 | 5 | ## API 6 | 7 | - [x] Determine response format from request headers `Content-Type`, `Accept` 8 | - [x] CORS support (configurable origins) 9 | - [x] GZIP encoding (via compress handler) 10 | - [x] HTTPS support (optional TLS configuration) 11 | - [x] Proxy support via configurable base URL and base path 12 | 13 | ## Endpoints 14 | 15 | ### Core Endpoints 16 | - [x] `/` - Interactive map viewer (HTML landing page) 17 | - [x] `/index.html`, `/home.html` - Alternative routes to landing page 18 | - [x] `/health` - Health check endpoint 19 | - [x] `/layers` - List all available spatial layers (JSON) 20 | - [x] `/layers.json` - Alternative route to layers endpoint 21 | 22 | ### Tile Endpoints 23 | - [x] `/tiles/{layer}.json` - TileJSON metadata endpoint 24 | - [x] `/tiles/{layer}/{z}/{x}/{y}.mvt` - MVT tile endpoint 25 | - [x] `/tiles/{layer}/{z}/{x}/{y}.pbf` - MVT tile endpoint (alternative extension) 26 | 27 | ### Cache Management Endpoints 28 | - [x] `/cache/stats` - GET cache statistics (hits, misses, hit rate, size, memory, evictions) 29 | - [x] `/cache/clear` - DELETE entire cache 30 | - [x] `/cache/layer/{layer}` - DELETE layer-specific tiles 31 | - [x] Optional API key authentication via `X-API-Key` header 32 | - [x] Configurable enable/disable of cache management endpoints 33 | 34 | ## Tile Features 35 | 36 | - [x] MVT (Mapbox Vector Tile) generation using DuckDB's `ST_AsMVT` function 37 | - [x] Auto-discovery of all tables with geometry columns 38 | - [x] Multi-SRID support with automatic transformation to Web Mercator (EPSG:3857) 39 | - [x] Validation of tile coordinates (z, x, y ranges) 40 | - [x] Empty tile handling (returns 204 No Content when no features in tile) 41 | - [x] TileJSON 2.2.0 specification support 42 | - [x] Geometry type detection and metadata 43 | - [x] Table schema and column metadata in TileJSON 44 | 45 | ## Cache Features 46 | 47 | - [x] LRU tile cache with configurable size and memory limits 48 | - [x] Cache statistics tracking (hits, misses, hit rate, evictions) 49 | - [x] Periodic cache statistics logging (every 5 minutes) 50 | - [x] Cache middleware for tile endpoints 51 | - [x] Browser cache control via `Cache-Control` headers 52 | - [x] Configurable cache enable/disable 53 | - [x] Memory-based eviction when cache exceeds limits 54 | - [x] Layer-specific cache clearing 55 | - [x] Full cache clearing 56 | - [x] Cache management API authentication with configurable API key 57 | - [x] Public and authenticated modes for cache endpoints 58 | 59 | ## Configuration 60 | 61 | - [x] Read config from TOML file 62 | - [x] Configuration file search paths (`/etc`, `./config`, `/config`) 63 | - [x] Environment variable configuration with `DUCKDBTS_` prefix 64 | - [x] Database connection path configuration 65 | - [x] Table include/exclude filters 66 | - [x] HTTP/HTTPS server settings (host, ports) 67 | - [x] TLS certificate and key file paths 68 | - [x] URL base and base path configuration 69 | - [x] CORS origins configuration 70 | - [x] Debug mode 71 | - [x] Assets path for HTML templates 72 | - [x] Request/write timeout configuration 73 | - [x] UI enable/disable toggle 74 | - [x] Cache configuration (enabled, max items, max memory, browser cache max-age) 75 | - [x] Cache endpoint security (disable routes, API key authentication) 76 | - [x] Metadata configuration (title, description) 77 | - [x] Website basemap URL configuration 78 | 79 | ## Operational 80 | 81 | - [x] Graceful shutdown with signal handling 82 | - [x] Request timeouts with context cancellation 83 | - [x] Database connection pooling 84 | - [x] Concurrent HTTP and HTTPS servers 85 | - [x] Timeout handler for long-running requests (returns 503 on timeout) 86 | - [x] Abort timeout on shutdown to prevent hanging 87 | 88 | ## Data Types 89 | 90 | - [x] All geometry types via DuckDB Spatial (POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION) 91 | - [x] Common scalar types: text, int, float, numeric 92 | - [x] Automatic detection of geometry columns 93 | - [x] First geometry column used when multiple exist (with warning) 94 | - [x] Support for tables with and without primary keys 95 | 96 | ## Tables / Views 97 | 98 | - [x] Table column schema discovery 99 | - [x] Support tables with geometry columns 100 | - [x] Support views with geometry columns 101 | - [x] Include/exclude published tables via configuration 102 | - [x] Automatic SRID detection 103 | - [x] Multi-SRID table support with transformation 104 | 105 | ## User Interface (HTML) 106 | 107 | - [x] Interactive HTML landing page with map viewer 108 | - [x] MapLibre GL JS-based map display 109 | - [x] Automatic loading of all available layers 110 | - [x] Layer visibility toggles 111 | - [x] Feature attribute display on click 112 | - [x] Layer list with geometry type indicators 113 | - [x] Configurable basemap URL 114 | - [x] Responsive design 115 | - [x] Development mode for template reloading 116 | 117 | ## Architecture 118 | 119 | ``` 120 | ┌─────────────────┐ 121 | │ HTTP Client │ 122 | │ (Browser/QGIS) │ 123 | └────────┬────────┘ 124 | │ 125 | ↓ 126 | ┌─────────────────┐ 127 | │ HTTP Handlers │ 128 | │ (Gorilla Mux) │ 129 | │ - CORS │ 130 | │ - GZIP │ 131 | │ - Timeout │ 132 | └────────┬────────┘ 133 | │ 134 | ↓ 135 | ┌─────────────────┐ 136 | │ Cache Layer │ 137 | │ (LRU Cache) │ 138 | └────────┬────────┘ 139 | │ 140 | ↓ 141 | ┌─────────────────┐ 142 | │ Tile Generator │ 143 | │ (ST_AsMVT) │ 144 | └────────┬────────┘ 145 | │ 146 | ↓ 147 | ┌─────────────────┐ 148 | │ DuckDB + SQL │ 149 | │ Spatial Ext. │ 150 | └─────────────────┘ 151 | ``` 152 | 153 | ## Performance Features 154 | 155 | 1. **Tile Caching**: LRU cache with configurable size and memory limits 156 | 2. **Spatial Indexing**: DuckDB Spatial automatically creates R-Tree indexes 157 | 3. **Connection Pooling**: Go database/sql package handles connection pooling 158 | 4. **Efficient Tile Generation**: Uses DuckDB's native `ST_AsMVT` function 159 | 5. **GZIP Compression**: Automatic response compression 160 | 6. **Request Timeouts**: Prevents long-running queries from blocking 161 | 7. **Browser Caching**: Configurable `Cache-Control` headers 162 | 163 | ## Technology Stack 164 | 165 | - **Language**: Go 1.24+ 166 | - **Database**: DuckDB with Spatial extension v1.4+ 167 | - **HTTP Framework**: Gorilla Mux 168 | - **Map Viewer**: MapLibre GL JS 169 | - **Tile Format**: Mapbox Vector Tiles (MVT/PBF) 170 | - **Configuration**: Viper (TOML + environment variables) 171 | - **Logging**: Logrus 172 | -------------------------------------------------------------------------------- /internal/conf/config.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | log "github.com/sirupsen/logrus" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | // Configuration for system 13 | var Configuration Config 14 | 15 | func setDefaultConfig() { 16 | viper.SetDefault("Server.HttpHost", "0.0.0.0") 17 | viper.SetDefault("Server.HttpPort", 9000) 18 | viper.SetDefault("Server.HttpsPort", 9001) 19 | viper.SetDefault("Server.TlsServerCertificateFile", "") 20 | viper.SetDefault("Server.TlsServerPrivateKeyFile", "") 21 | viper.SetDefault("Server.UrlBase", "") 22 | viper.SetDefault("Server.BasePath", "") 23 | viper.SetDefault("Server.CORSOrigins", "*") 24 | viper.SetDefault("Server.Debug", false) 25 | viper.SetDefault("Server.AssetsPath", "./assets") 26 | viper.SetDefault("Server.ReadTimeoutSec", 5) 27 | viper.SetDefault("Server.WriteTimeoutSec", 30) 28 | viper.SetDefault("Server.DisableUi", false) 29 | 30 | viper.SetDefault("Database.TableIncludes", []string{}) 31 | viper.SetDefault("Database.TableExcludes", []string{}) 32 | viper.SetDefault("Database.FunctionIncludes", []string{"postgisftw"}) 33 | viper.SetDefault("Database.MaxOpenConns", 25) 34 | viper.SetDefault("Database.MaxIdleConns", 5) 35 | viper.SetDefault("Database.ConnMaxLifetime", 3600) // 1 hour in seconds 36 | viper.SetDefault("Database.ConnMaxIdleTime", 600) // 10 minutes in seconds 37 | 38 | viper.SetDefault("Paging.LimitDefault", 10) 39 | viper.SetDefault("Paging.LimitMax", 1000) 40 | 41 | viper.SetDefault("Metadata.Title", "duckdb-tileserver") 42 | viper.SetDefault("Metadata.Description", "DuckDB Feature Server with Spatial Extension") 43 | 44 | viper.SetDefault("Website.BasemapUrl", "") 45 | 46 | viper.SetDefault("Cache.Enabled", true) 47 | viper.SetDefault("Cache.MaxItems", 10000) 48 | viper.SetDefault("Cache.MaxMemoryMB", 1024) 49 | viper.SetDefault("Cache.BrowserCacheMaxAge", 3600) // 1 hour in seconds 50 | viper.SetDefault("Cache.DisableApi", false) 51 | viper.SetDefault("Cache.ApiKey", "") 52 | } 53 | 54 | // Config for system 55 | type Config struct { 56 | Server Server 57 | Paging Paging 58 | Metadata Metadata 59 | Database Database 60 | Website Website 61 | Cache Cache 62 | } 63 | 64 | // Server config 65 | type Server struct { 66 | HttpHost string 67 | HttpPort int 68 | HttpsPort int 69 | TlsServerCertificateFile string 70 | TlsServerPrivateKeyFile string 71 | UrlBase string 72 | BasePath string 73 | CORSOrigins string 74 | Debug bool 75 | AssetsPath string 76 | ReadTimeoutSec int 77 | WriteTimeoutSec int 78 | DisableUi bool 79 | TransformFunctions []string 80 | } 81 | 82 | // Paging config 83 | type Paging struct { 84 | LimitDefault int 85 | LimitMax int 86 | } 87 | 88 | // Database config 89 | type Database struct { 90 | DatabasePath string 91 | TableIncludes []string 92 | TableExcludes []string 93 | FunctionIncludes []string 94 | MaxOpenConns int // Maximum number of open connections to the database 95 | MaxIdleConns int // Maximum number of idle connections in the pool 96 | ConnMaxLifetime int // Maximum lifetime of a connection in seconds 97 | ConnMaxIdleTime int // Maximum idle time of a connection in seconds 98 | } 99 | 100 | // Metadata config 101 | type Metadata struct { 102 | Title string //`mapstructure:"METADATA_TITLE"` 103 | Description string 104 | } 105 | 106 | type Website struct { 107 | BasemapUrl string 108 | } 109 | 110 | // Cache config 111 | type Cache struct { 112 | Enabled bool 113 | MaxItems int 114 | MaxMemoryMB int 115 | BrowserCacheMaxAge int // Browser cache max-age in seconds 116 | DisableApi bool // Disable cache management API endpoints 117 | ApiKey string // API key for cache management endpoints 118 | } 119 | 120 | // IsHTTPSEnabled tests whether HTTPS is enabled 121 | func (conf *Config) IsTLSEnabled() bool { 122 | return conf.Server.TlsServerCertificateFile != "" && conf.Server.TlsServerPrivateKeyFile != "" 123 | } 124 | 125 | // InitConfig initializes the configuration from the config file 126 | func InitConfig(configFilename string, isDebug bool) { 127 | // --- defaults 128 | setDefaultConfig() 129 | 130 | if isDebug { 131 | viper.Set("Debug", true) 132 | } 133 | 134 | viper.SetEnvPrefix(AppConfig.EnvPrefix) 135 | viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) 136 | viper.AutomaticEnv() 137 | 138 | isExplictConfigFile := configFilename != "" 139 | confFile := AppConfig.Name + ".toml" 140 | if configFilename != "" { 141 | viper.SetConfigFile(configFilename) 142 | } else { 143 | viper.SetConfigName(confFile) 144 | viper.SetConfigType("toml") 145 | viper.AddConfigPath("./config") 146 | viper.AddConfigPath("/config") 147 | viper.AddConfigPath("/etc") 148 | } 149 | err := viper.ReadInConfig() // Find and read the config file 150 | if err != nil { 151 | _, isConfigFileNotFound := err.(viper.ConfigFileNotFoundError) 152 | errrConfRead := fmt.Errorf("fatal error reading config file: %s", err) 153 | isUseDefaultConfig := isConfigFileNotFound && !isExplictConfigFile 154 | if isUseDefaultConfig { 155 | log.Debug(errrConfRead) 156 | } else { 157 | log.Fatal(errrConfRead) 158 | } 159 | } 160 | 161 | log.Infof("Using config file: %s", viper.ConfigFileUsed()) 162 | errUnM := viper.Unmarshal(&Configuration) 163 | if errUnM != nil { 164 | log.Fatal(fmt.Errorf("fatal error decoding config file: %v", errUnM)) 165 | } 166 | 167 | // Read environment variable database configuration 168 | // It takes precedence over config file (if any) 169 | // A blank value is ignored 170 | dbconnSrc := "config file" 171 | if dbPath := os.Getenv("DUCKDBTS_DATABASE_PATH"); dbPath != "" { 172 | Configuration.Database.DatabasePath = dbPath 173 | dbconnSrc = "environment variable DUCKDBTS_DATABASE_PATH" 174 | } else if dbPath := os.Getenv("DUCKDB_PATH"); dbPath != "" { 175 | // Keep backward compatibility 176 | log.Warn("DUCKDB_PATH environment variable is deprecated, use DUCKDBTS_DATABASE_PATH instead") 177 | Configuration.Database.DatabasePath = dbPath 178 | dbconnSrc = "environment variable DUCKDB_PATH (deprecated)" 179 | } 180 | 181 | log.Infof("Using database connection info from %v", dbconnSrc) 182 | 183 | // sanitize the configuration 184 | Configuration.Server.BasePath = strings.TrimRight(Configuration.Server.BasePath, "/") 185 | } 186 | 187 | func DumpConfig() { 188 | log.Debugf("--- Configuration ---") 189 | //fmt.Printf("Viper: %v\n", viper.AllSettings()) 190 | //fmt.Printf("Config: %v\n", Configuration) 191 | var basemapURL = Configuration.Website.BasemapUrl 192 | if basemapURL == "" { 193 | basemapURL = "*** NO URL PROVIDED ***" 194 | } 195 | log.Debugf(" BasemapUrl = %v", basemapURL) 196 | log.Debugf(" TableIncludes = %v", Configuration.Database.TableIncludes) 197 | log.Debugf(" TableExcludes = %v", Configuration.Database.TableExcludes) 198 | log.Debugf(" FunctionIncludes = %v", Configuration.Database.FunctionIncludes) 199 | log.Debugf(" TransformFunctions = %v", Configuration.Server.TransformFunctions) 200 | } 201 | -------------------------------------------------------------------------------- /internal/service/service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "fmt" 7 | "net/http" 8 | "os" 9 | "os/signal" 10 | "time" 11 | 12 | "github.com/gorilla/handlers" 13 | "github.com/gorilla/mux" 14 | log "github.com/sirupsen/logrus" 15 | "github.com/tobilg/duckdb-tileserver/internal/cache" 16 | "github.com/tobilg/duckdb-tileserver/internal/conf" 17 | "github.com/tobilg/duckdb-tileserver/internal/data" 18 | ) 19 | 20 | var catalogInstance data.Catalog 21 | var router *mux.Router 22 | var server *http.Server 23 | var isTLSEnabled bool 24 | var serverTLS *http.Server 25 | var serviceInstance *Service 26 | 27 | // Service holds references to persistent objects 28 | type Service struct { 29 | cache *cache.TileCache 30 | } 31 | 32 | // logCacheStats periodically logs cache statistics 33 | func (s *Service) logCacheStats() { 34 | ticker := time.NewTicker(5 * time.Minute) 35 | defer ticker.Stop() 36 | 37 | for range ticker.C { 38 | if !s.cache.Enabled() { 39 | return 40 | } 41 | 42 | stats := s.cache.Stats() 43 | log.Infof("Cache stats: hits=%d misses=%d hit_rate=%.1f%% items=%d memory=%dMB evictions=%d", 44 | stats.Hits, 45 | stats.Misses, 46 | stats.HitRate, 47 | stats.Size, 48 | stats.MemoryBytes/1024/1024, 49 | stats.Evictions, 50 | ) 51 | } 52 | } 53 | 54 | // Initialize sets the service state from configuration 55 | func Initialize() { 56 | // No initialization needed for tileserver 57 | } 58 | 59 | func createServers() { 60 | confServ := conf.Configuration.Server 61 | 62 | bindAddress := fmt.Sprintf("%v:%v", confServ.HttpHost, confServ.HttpPort) 63 | bindAddressTLS := fmt.Sprintf("%v:%v", confServ.HttpHost, confServ.HttpsPort) 64 | // Use HTTPS only if server certificate and private key files specified 65 | isTLSEnabled = conf.Configuration.IsTLSEnabled() 66 | 67 | log.Infof("Serving HTTP at %s", formatBaseURL("http://", bindAddress, confServ.BasePath)) 68 | if isTLSEnabled { 69 | log.Infof("Serving HTTPS at %s", formatBaseURL("https://", bindAddressTLS, confServ.BasePath)) 70 | } 71 | log.Infof("CORS Allowed Origins: %v\n", conf.Configuration.Server.CORSOrigins) 72 | 73 | router = initRouter(confServ.BasePath) 74 | 75 | // writeTimeout is slighlty longer than request timeout to allow writing error response 76 | timeoutSecRequest := conf.Configuration.Server.WriteTimeoutSec 77 | timeoutSecWrite := timeoutSecRequest + 1 78 | 79 | // ---- Handler chain -------- 80 | // set CORS handling according to config 81 | corsOpt := handlers.AllowedOrigins([]string{conf.Configuration.Server.CORSOrigins}) 82 | corsHeaders := handlers.AllowedHeaders([]string{"Content-Type", "Accept", "Authorization", "X-Requested-With", "X-API-Key"}) 83 | corsMethods := handlers.AllowedMethods([]string{"GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD"}) 84 | corsHandler := handlers.CORS(corsOpt, corsHeaders, corsMethods)(router) 85 | compressHandler := handlers.CompressHandler(corsHandler) 86 | 87 | // Use a TimeoutHandler to ensure a request does not run past the WriteTimeout duration. 88 | // This provides a context that allows cancellation to be propagated 89 | // down to the database driver. 90 | // If timeout expires, service returns 503 and a text message 91 | timeoutHandler := http.TimeoutHandler(compressHandler, 92 | time.Duration(timeoutSecRequest)*time.Second, 93 | "Request timeout") 94 | 95 | // more "production friendly" timeouts 96 | // https://blog.simon-frey.eu/go-as-in-golang-standard-net-http-config-will-break-your-production/#You_should_at_least_do_this_The_easy_path 97 | server = &http.Server{ 98 | ReadTimeout: time.Duration(conf.Configuration.Server.ReadTimeoutSec) * time.Second, 99 | WriteTimeout: time.Duration(timeoutSecWrite) * time.Second, 100 | Addr: bindAddress, 101 | Handler: timeoutHandler, 102 | } 103 | 104 | if isTLSEnabled { 105 | serverTLS = &http.Server{ 106 | ReadTimeout: time.Duration(conf.Configuration.Server.ReadTimeoutSec) * time.Second, 107 | WriteTimeout: time.Duration(timeoutSecWrite) * time.Second, 108 | Addr: bindAddressTLS, 109 | Handler: timeoutHandler, 110 | TLSConfig: &tls.Config{ 111 | MinVersion: tls.VersionTLS12, // Secure TLS versions only 112 | }, 113 | } 114 | } 115 | } 116 | 117 | // Serve starts the web service 118 | func Serve(catalog data.Catalog) { 119 | confServ := conf.Configuration.Server 120 | catalogInstance = catalog 121 | 122 | // Initialize tile cache 123 | var tileCache *cache.TileCache 124 | if conf.Configuration.Cache.Enabled { 125 | var err error 126 | tileCache, err = cache.NewTileCache( 127 | conf.Configuration.Cache.MaxItems, 128 | conf.Configuration.Cache.MaxMemoryMB, 129 | ) 130 | if err != nil { 131 | log.Warnf("Failed to initialize cache: %v (continuing without cache)", err) 132 | tileCache = cache.NewDisabledCache() 133 | } 134 | } else { 135 | log.Info("Tile cache disabled by configuration") 136 | tileCache = cache.NewDisabledCache() 137 | } 138 | 139 | // Create service instance 140 | svc := &Service{ 141 | cache: tileCache, 142 | } 143 | 144 | // Start periodic stats logging 145 | if tileCache.Enabled() { 146 | go svc.logCacheStats() 147 | } 148 | 149 | // Store service instance globally for handlers to access 150 | serviceInstance = svc 151 | 152 | createServers() 153 | 154 | log.Infof("==== Service: %s ====\n", conf.Configuration.Metadata.Title) 155 | 156 | // start http service 157 | go func() { 158 | // ListenAndServe returns http.ErrServerClosed when the server receives 159 | // a call to Shutdown(). Other errors are unexpected. 160 | if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { 161 | log.Fatal(err) 162 | } 163 | }() 164 | 165 | // start https service 166 | if isTLSEnabled { 167 | go func() { 168 | // ListenAndServe returns http.ErrServerClosed when the server receives 169 | // a call to Shutdown(). Other errors are unexpected. 170 | if err := serverTLS.ListenAndServeTLS(confServ.TlsServerCertificateFile, confServ.TlsServerPrivateKeyFile); err != nil && err != http.ErrServerClosed { 171 | log.Fatal(err) 172 | } 173 | }() 174 | } 175 | 176 | // wait here for interrupt signal (^C) 177 | sig := make(chan os.Signal, 1) 178 | signal.Notify(sig, os.Interrupt) 179 | <-sig 180 | 181 | // Interrupt signal received: Start shutting down 182 | log.Infoln("Shutting down...") 183 | 184 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 185 | defer cancel() 186 | errConn := server.Shutdown(ctx) 187 | if errConn != nil { 188 | log.Warnf("Server connection failed to shutdown: %v", errConn.Error()) 189 | } 190 | if isTLSEnabled { 191 | errConnTls := serverTLS.Shutdown(ctx) 192 | if errConnTls != nil { 193 | log.Warnf("Server TLS connection failed to shutdown: %v", errConnTls.Error()) 194 | } 195 | } 196 | 197 | // abort after waiting long enough for service to shutdown gracefully 198 | // this terminates long-running DB queries, which otherwise block shutdown 199 | abortTimeoutSec := conf.Configuration.Server.WriteTimeoutSec + 10 200 | chanCancelFatal := FatalAfter(abortTimeoutSec, "Timeout on shutdown - aborting.") 201 | 202 | log.Debugln("Closing DB connections") 203 | catalogInstance.Close() 204 | 205 | log.Infoln("Service stopped.") 206 | // cancel the abort since it is not needed 207 | close(chanCancelFatal) 208 | } 209 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags: 8 | - 'v*' 9 | pull_request: 10 | branches: 11 | - main 12 | 13 | permissions: 14 | contents: write 15 | 16 | jobs: 17 | test: 18 | name: Test 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v5 23 | 24 | - name: Install Go 25 | uses: actions/setup-go@v6 26 | with: 27 | go-version: 1.24.x 28 | 29 | - name: Test 30 | run: make test 31 | 32 | build: 33 | name: Build 34 | needs: test 35 | strategy: 36 | matrix: 37 | include: 38 | - os: ubuntu-latest 39 | goos: linux 40 | goarch: amd64 41 | artifact_name: duckdb-tileserver 42 | asset_name: duckdb-tileserver-linux-amd64 43 | - os: macos-latest 44 | goos: darwin 45 | goarch: arm64 46 | artifact_name: duckdb-tileserver 47 | asset_name: duckdb-tileserver-darwin-arm64 48 | - os: windows-latest 49 | goos: windows 50 | goarch: amd64 51 | artifact_name: duckdb-tileserver.exe 52 | asset_name: duckdb-tileserver-windows-amd64.exe 53 | runs-on: ${{ matrix.os }} 54 | steps: 55 | - name: Checkout 56 | uses: actions/checkout@v5 57 | 58 | - name: Install Go 59 | uses: actions/setup-go@v6 60 | with: 61 | go-version: 1.24.x 62 | 63 | - name: Get version 64 | id: get_version 65 | shell: bash 66 | run: | 67 | if [[ $GITHUB_REF == refs/tags/* ]]; then 68 | VERSION=${GITHUB_REF#refs/tags/} 69 | else 70 | VERSION=${GITHUB_SHA::7} 71 | fi 72 | echo "version=$VERSION" >> $GITHUB_OUTPUT 73 | 74 | - name: Build 75 | env: 76 | GOOS: ${{ matrix.goos }} 77 | GOARCH: ${{ matrix.goarch }} 78 | run: make build 79 | 80 | - name: Create archive 81 | if: github.event_name == 'push' 82 | shell: bash 83 | run: | 84 | mkdir -p dist 85 | VERSION="${{ steps.get_version.outputs.version }}" 86 | if [ "${{ runner.os }}" = "Windows" ]; then 87 | FILENAME="dist/${{ matrix.asset_name }}-${VERSION}.zip" 88 | 7z a -tzip "$FILENAME" "${{ matrix.artifact_name }}" assets/ config/duckdb-tileserver.toml.example 89 | else 90 | FILENAME="dist/${{ matrix.asset_name }}-${VERSION}.tar.gz" 91 | tar -czf "$FILENAME" "${{ matrix.artifact_name }}" assets/ config/duckdb-tileserver.toml.example 92 | fi 93 | 94 | - name: Upload artifact 95 | if: github.event_name == 'push' 96 | uses: actions/upload-artifact@v5 97 | with: 98 | name: ${{ matrix.asset_name }} 99 | path: dist/* 100 | retention-days: 1 101 | 102 | release: 103 | name: Create Release 104 | needs: build 105 | runs-on: ubuntu-latest 106 | if: startsWith(github.ref, 'refs/tags/v') 107 | steps: 108 | - name: Checkout 109 | uses: actions/checkout@v5 110 | 111 | - name: Download all artifacts 112 | uses: actions/download-artifact@v4 113 | with: 114 | path: dist/ 115 | 116 | - name: Display structure 117 | run: ls -R dist/ 118 | 119 | - name: Create Release 120 | uses: softprops/action-gh-release@v2 121 | with: 122 | draft: false 123 | prerelease: false 124 | generate_release_notes: true 125 | files: | 126 | dist/**/*.zip 127 | dist/**/*.tar.gz 128 | env: 129 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 130 | 131 | docker: 132 | name: Build Docker Image 133 | needs: test 134 | runs-on: ubuntu-latest 135 | if: github.event_name == 'push' && github.repository == 'tobilg/duckdb-tileserver' 136 | strategy: 137 | matrix: 138 | arch: [amd64, arm64] 139 | steps: 140 | - name: Checkout 141 | uses: actions/checkout@v5 142 | 143 | - name: Set up Docker Buildx 144 | uses: docker/setup-buildx-action@v3 145 | 146 | - name: Derive version 147 | id: version 148 | run: | 149 | if [[ "${{ github.ref }}" == refs/tags/v* ]]; then 150 | VERSION=${GITHUB_REF#refs/tags/v} 151 | echo "version=$VERSION" >> $GITHUB_OUTPUT 152 | echo "is_release=true" >> $GITHUB_OUTPUT 153 | else 154 | echo "version=latest" >> $GITHUB_OUTPUT 155 | echo "is_release=false" >> $GITHUB_OUTPUT 156 | fi 157 | 158 | - name: Build ${{ matrix.arch }} 159 | run: make APPVERSION=${{ steps.version.outputs.version }} TARGETARCH=${{ matrix.arch }} multi-stage-docker 160 | 161 | - name: Login to DockerHub 162 | uses: docker/login-action@v3 163 | with: 164 | username: ${{ secrets.DOCKERHUB_USERNAME }} 165 | password: ${{ secrets.DOCKERHUB_TOKEN }} 166 | 167 | - name: Push ${{ matrix.arch }} image 168 | env: 169 | DOCKER_REPO: tobilg/duckdb-tileserver 170 | run: docker push --all-tags $DOCKER_REPO 171 | 172 | docker-manifest: 173 | name: Create Docker Manifests 174 | needs: docker 175 | runs-on: ubuntu-latest 176 | if: github.event_name == 'push' && github.repository == 'tobilg/duckdb-tileserver' 177 | steps: 178 | - name: Checkout 179 | uses: actions/checkout@v5 180 | 181 | - name: Derive version 182 | id: version 183 | run: | 184 | if [[ "${{ github.ref }}" == refs/tags/v* ]]; then 185 | VERSION=${GITHUB_REF#refs/tags/v} 186 | echo "version=$VERSION" >> $GITHUB_OUTPUT 187 | echo "is_release=true" >> $GITHUB_OUTPUT 188 | else 189 | echo "version=latest" >> $GITHUB_OUTPUT 190 | echo "is_release=false" >> $GITHUB_OUTPUT 191 | fi 192 | 193 | - name: Login to DockerHub 194 | uses: docker/login-action@v3 195 | with: 196 | username: ${{ secrets.DOCKERHUB_USERNAME }} 197 | password: ${{ secrets.DOCKERHUB_TOKEN }} 198 | 199 | - name: Create and push manifests 200 | env: 201 | DOCKER_REPO: tobilg/duckdb-tileserver 202 | VERSION: ${{ steps.version.outputs.version }} 203 | IS_RELEASE: ${{ steps.version.outputs.is_release }} 204 | run: | 205 | if [[ "$IS_RELEASE" == "true" ]]; then 206 | echo "Creating manifests for release version: $VERSION" 207 | 208 | # Create and push version-specific manifest 209 | docker manifest create $DOCKER_REPO:$VERSION \ 210 | $DOCKER_REPO:$VERSION-amd64 \ 211 | $DOCKER_REPO:$VERSION-arm64 212 | docker manifest push $DOCKER_REPO:$VERSION 213 | 214 | # Also update latest 215 | docker manifest create $DOCKER_REPO:latest \ 216 | $DOCKER_REPO:$VERSION-amd64 \ 217 | $DOCKER_REPO:$VERSION-arm64 218 | docker manifest push $DOCKER_REPO:latest 219 | else 220 | echo "Creating manifests for main branch" 221 | 222 | # Create and push latest manifest 223 | docker manifest create $DOCKER_REPO:latest \ 224 | $DOCKER_REPO:latest-amd64 \ 225 | $DOCKER_REPO:latest-arm64 226 | docker manifest push $DOCKER_REPO:latest 227 | fi 228 | -------------------------------------------------------------------------------- /internal/conf/config_test.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "reflect" 8 | "runtime" 9 | "testing" 10 | 11 | "github.com/spf13/viper" 12 | ) 13 | 14 | // TestTableIncludesEnvironmentVariable tests that TableIncludes can be set via environment variable 15 | func TestTableIncludesEnvironmentVariable(t *testing.T) { 16 | // Clear any existing environment variables 17 | defer clearConfigEnvVars() 18 | 19 | tests := []struct { 20 | name string 21 | envValue string 22 | expected []string 23 | }{ 24 | { 25 | name: "Single table", 26 | envValue: "public.table1", 27 | expected: []string{"public.table1"}, 28 | }, 29 | { 30 | name: "Multiple tables", 31 | envValue: "public,schema1.table1,table2", 32 | expected: []string{"public", "schema1.table1", "table2"}, 33 | }, 34 | { 35 | name: "Empty value", 36 | envValue: "", 37 | expected: []string{}, 38 | }, 39 | { 40 | name: "Schema only", 41 | envValue: "public", 42 | expected: []string{"public"}, 43 | }, 44 | { 45 | name: "Complex table names", 46 | envValue: "my_schema.my_table,another_schema.complex_table_name", 47 | expected: []string{"my_schema.my_table", "another_schema.complex_table_name"}, 48 | }, 49 | } 50 | 51 | for _, tt := range tests { 52 | t.Run(tt.name, func(t *testing.T) { 53 | // Clear all environment variables first 54 | clearConfigEnvVars() 55 | 56 | // Set environment variable 57 | if tt.envValue != "" { 58 | os.Setenv("DUCKDBTS_DATABASE_TABLEINCLUDES", tt.envValue) 59 | } 60 | 61 | // Reset viper for clean state 62 | viper.Reset() 63 | 64 | // Initialize config 65 | InitConfig("", false) 66 | 67 | // Check result 68 | equals(t, tt.expected, Configuration.Database.TableIncludes, "TableIncludes") 69 | 70 | // Clean up 71 | clearConfigEnvVars() 72 | }) 73 | } 74 | } 75 | 76 | // TestTableExcludesEnvironmentVariable tests that TableExcludes can be set via environment variable 77 | func TestTableExcludesEnvironmentVariable(t *testing.T) { 78 | // Clear any existing environment variables 79 | defer clearConfigEnvVars() 80 | 81 | tests := []struct { 82 | name string 83 | envValue string 84 | expected []string 85 | }{ 86 | { 87 | name: "Single table exclusion", 88 | envValue: "private.secrets", 89 | expected: []string{"private.secrets"}, 90 | }, 91 | { 92 | name: "Multiple table exclusions", 93 | envValue: "private,temp,logs.debug", 94 | expected: []string{"private", "temp", "logs.debug"}, 95 | }, 96 | { 97 | name: "Empty value", 98 | envValue: "", 99 | expected: []string{}, 100 | }, 101 | { 102 | name: "Schema exclusion", 103 | envValue: "temp", 104 | expected: []string{"temp"}, 105 | }, 106 | { 107 | name: "Complex exclusion patterns", 108 | envValue: "temp_schema,staging.test_data,logs.debug_logs", 109 | expected: []string{"temp_schema", "staging.test_data", "logs.debug_logs"}, 110 | }, 111 | { 112 | name: "System and private exclusions", 113 | envValue: "system,private,internal.migrations", 114 | expected: []string{"system", "private", "internal.migrations"}, 115 | }, 116 | } 117 | 118 | for _, tt := range tests { 119 | t.Run(tt.name, func(t *testing.T) { 120 | // Clear all environment variables first 121 | clearConfigEnvVars() 122 | 123 | // Set environment variable 124 | if tt.envValue != "" { 125 | os.Setenv("DUCKDBTS_DATABASE_TABLEEXCLUDES", tt.envValue) 126 | } 127 | 128 | // Reset viper for clean state 129 | viper.Reset() 130 | 131 | // Initialize config 132 | InitConfig("", false) 133 | 134 | // Check result 135 | equals(t, tt.expected, Configuration.Database.TableExcludes, "TableExcludes") 136 | 137 | // Clean up 138 | clearConfigEnvVars() 139 | }) 140 | } 141 | } 142 | 143 | // TestConfigFileOverriddenByEnvironment tests that environment variables take precedence over config file 144 | func TestConfigFileOverriddenByEnvironment(t *testing.T) { 145 | clearConfigEnvVars() 146 | defer clearConfigEnvVars() 147 | 148 | // Create a temporary config file 149 | configContent := ` 150 | [Database] 151 | TableIncludes = ["file_table1", "file_table2"] 152 | TableExcludes = ["file_exclude"] 153 | ` 154 | 155 | tempDir, err := os.MkdirTemp("", "duckdb-tileserver_test") 156 | if err != nil { 157 | t.Fatal(err) 158 | } 159 | defer os.RemoveAll(tempDir) 160 | 161 | configFile := filepath.Join(tempDir, "test_config.toml") 162 | err = os.WriteFile(configFile, []byte(configContent), 0644) 163 | if err != nil { 164 | t.Fatal(err) 165 | } 166 | 167 | // Set environment variables that should override config file 168 | os.Setenv("DUCKDBTS_DATABASE_TABLEINCLUDES", "env_table1,env_table2") 169 | os.Setenv("DUCKDBTS_DATABASE_TABLEEXCLUDES", "env_exclude") 170 | defer func() { 171 | os.Unsetenv("DUCKDBTS_DATABASE_TABLEINCLUDES") 172 | os.Unsetenv("DUCKDBTS_DATABASE_TABLEEXCLUDES") 173 | }() 174 | 175 | viper.Reset() 176 | InitConfig(configFile, false) 177 | 178 | // Environment variables should take precedence 179 | expectedIncludes := []string{"env_table1", "env_table2"} 180 | expectedExcludes := []string{"env_exclude"} 181 | 182 | equals(t, expectedIncludes, Configuration.Database.TableIncludes, "TableIncludes from env") 183 | equals(t, expectedExcludes, Configuration.Database.TableExcludes, "TableExcludes from env") 184 | } 185 | 186 | // TestConfigFileOnly tests that config file values are used when no environment variables are set 187 | func TestConfigFileOnly(t *testing.T) { 188 | clearConfigEnvVars() 189 | defer clearConfigEnvVars() 190 | 191 | configContent := ` 192 | [Database] 193 | TableIncludes = ["config_table1", "config_table2"] 194 | TableExcludes = ["config_exclude"] 195 | ` 196 | 197 | tempDir, err := os.MkdirTemp("", "duckdb-tileserver_test") 198 | if err != nil { 199 | t.Fatal(err) 200 | } 201 | defer os.RemoveAll(tempDir) 202 | 203 | configFile := filepath.Join(tempDir, "test_config.toml") 204 | err = os.WriteFile(configFile, []byte(configContent), 0644) 205 | if err != nil { 206 | t.Fatal(err) 207 | } 208 | 209 | viper.Reset() 210 | InitConfig(configFile, false) 211 | 212 | expectedIncludes := []string{"config_table1", "config_table2"} 213 | expectedExcludes := []string{"config_exclude"} 214 | 215 | equals(t, expectedIncludes, Configuration.Database.TableIncludes, "TableIncludes from config") 216 | equals(t, expectedExcludes, Configuration.Database.TableExcludes, "TableExcludes from config") 217 | } 218 | 219 | // TestDefaultValues tests that default values are used when no config file or environment variables are set 220 | func TestDefaultValues(t *testing.T) { 221 | clearConfigEnvVars() 222 | defer clearConfigEnvVars() 223 | 224 | viper.Reset() 225 | InitConfig("", false) 226 | 227 | // Should have empty slices as defaults 228 | equals(t, []string{}, Configuration.Database.TableIncludes, "Default TableIncludes") 229 | equals(t, []string{}, Configuration.Database.TableExcludes, "Default TableExcludes") 230 | } 231 | 232 | // TestEnvironmentVariableFormat tests various formats for the environment variable 233 | func TestEnvironmentVariableFormat(t *testing.T) { 234 | clearConfigEnvVars() 235 | defer clearConfigEnvVars() 236 | 237 | tests := []struct { 238 | name string 239 | envValue string 240 | expected []string 241 | }{ 242 | { 243 | name: "No spaces", 244 | envValue: "table1,table2,table3", 245 | expected: []string{"table1", "table2", "table3"}, 246 | }, 247 | { 248 | name: "With spaces (Viper doesn't trim)", 249 | envValue: "table1, table2 , table3", 250 | expected: []string{"table1", " table2 ", " table3"}, 251 | }, 252 | { 253 | name: "Single item", 254 | envValue: "single_table", 255 | expected: []string{"single_table"}, 256 | }, 257 | { 258 | name: "Mixed schema.table and table only", 259 | envValue: "schema1.table1,table2,schema2.table3", 260 | expected: []string{"schema1.table1", "table2", "schema2.table3"}, 261 | }, 262 | } 263 | 264 | for _, tt := range tests { 265 | t.Run(tt.name, func(t *testing.T) { 266 | clearConfigEnvVars() 267 | os.Setenv("DUCKDBTS_DATABASE_TABLEINCLUDES", tt.envValue) 268 | 269 | viper.Reset() 270 | InitConfig("", false) 271 | 272 | // Check that configuration matches expected values 273 | equals(t, tt.expected, Configuration.Database.TableIncludes, "TableIncludes") 274 | }) 275 | } 276 | } 277 | 278 | // Helper function to clear all configuration-related environment variables 279 | func clearConfigEnvVars() { 280 | envVars := []string{ 281 | "DUCKDBTS_DATABASE_TABLEINCLUDES", 282 | "DUCKDBTS_DATABASE_TABLEEXCLUDES", 283 | "DUCKDBTS_DATABASE_PATH", 284 | "DUCKDBTS_DATABASE_TABLENAME", 285 | "DUCKDBTS_SERVER_HTTPPORT", 286 | "DUCKDBTS_SERVER_DEBUG", 287 | } 288 | 289 | for _, envVar := range envVars { 290 | os.Unsetenv(envVar) 291 | } 292 | 293 | // Also clear the global Configuration variable 294 | Configuration = Config{} 295 | } 296 | 297 | // equals fails the test if exp is not equal to act. 298 | func equals(tb testing.TB, exp, act interface{}, msg string) { 299 | if !reflect.DeepEqual(exp, act) { 300 | _, file, line, _ := runtime.Caller(1) 301 | fmt.Printf("%s:%d: %s - expected: %#v; got: %#v\n", filepath.Base(file), line, msg, exp, act) 302 | tb.FailNow() 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /internal/data/catalog_db_fun.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "encoding/json" 7 | "fmt" 8 | "time" 9 | 10 | log "github.com/sirupsen/logrus" 11 | ) 12 | 13 | // FunctionIDColumnName is the name for a function-supplied ID 14 | const FunctionIDColumnName = "id" 15 | 16 | const SchemaPostGISFTW = "postgisftw" 17 | 18 | func (cat *CatalogDB) Functions() ([]*Function, error) { 19 | cat.refreshFunctions(true) 20 | return cat.functions, nil 21 | } 22 | 23 | func (cat *CatalogDB) FunctionByName(name string) (*Function, error) { 24 | cat.refreshFunctions(false) 25 | fn, ok := cat.functionMap[name] 26 | if !ok { 27 | return nil, nil 28 | } 29 | return fn, nil 30 | } 31 | 32 | func (cat *CatalogDB) refreshFunctions(force bool) { 33 | // TODO: refresh on timed basis? 34 | if force || !isFunctionsLoaded { 35 | cat.loadFunctions() 36 | } 37 | isFunctionsLoaded = true 38 | } 39 | 40 | func (cat *CatalogDB) loadFunctions() { 41 | cat.functions, cat.functionMap = readFunctionDefs(cat.dbconn) 42 | } 43 | 44 | func readFunctionDefs(db *sql.DB) ([]*Function, map[string]*Function) { 45 | sql := sqlFunctions() 46 | log.Debugf("Load function catalog:\n%v", sql) 47 | rows, err := db.Query(sql) 48 | if err != nil { 49 | log.Fatal(err) 50 | } 51 | var functions []*Function 52 | functionMap := make(map[string]*Function) 53 | for rows.Next() { 54 | fn := scanFunctionDef(rows) 55 | // TODO: for now only show geometry functions 56 | //if fn.IsGeometryFunction() { 57 | functions = append(functions, fn) 58 | functionMap[fn.ID] = fn 59 | //} 60 | } 61 | // Check for errors from iterating over rows. 62 | if err := rows.Err(); err != nil { 63 | log.Fatal(err) 64 | } 65 | rows.Close() 66 | return functions, functionMap 67 | } 68 | 69 | func scanFunctionDef(rows *sql.Rows) *Function { 70 | var ( 71 | id, schema, name, description string 72 | inNamesStr, inTypesStr, inDefaultsStr, outNamesStr, outTypesStr string 73 | ) 74 | 75 | err := rows.Scan(&id, &schema, &name, &description, 76 | &inNamesStr, &inTypesStr, &inDefaultsStr, &outNamesStr, &outTypesStr) 77 | if err != nil { 78 | log.Fatalf("Error reading function catalog: %v", err) 79 | } 80 | 81 | // Parse JSON arrays from strings 82 | inNames := parseJSONStringArray(inNamesStr) 83 | inTypes := parseJSONStringArray(inTypesStr) 84 | inDefaultsDB := parseJSONStringArray(inDefaultsStr) 85 | numNoDefault := len(inNames) - len(inDefaultsDB) 86 | inDefaults := extendLeft(inDefaultsDB, len(inNames)) 87 | outNames := parseJSONStringArray(outNamesStr) 88 | outTypes := parseJSONStringArray(outTypesStr) 89 | outJSONTypes := toJSONTypeFromDuckDBArray(outTypes) 90 | 91 | inTypeMap := make(map[string]string) 92 | addTypes(inTypeMap, inNames, inTypes) 93 | 94 | datatypes := make(map[string]string) 95 | addTypes(datatypes, inNames, inTypes) 96 | addTypes(datatypes, outNames, outTypes) 97 | 98 | // synthesize a description if none provided 99 | if description == "" { 100 | description = fmt.Sprintf("The function %v", id) 101 | } 102 | 103 | geomCol := geometryColumn(outNames, datatypes) 104 | 105 | funDef := Function{ 106 | ID: id, 107 | Schema: schema, 108 | Name: name, 109 | Description: description, 110 | InNames: inNames, 111 | InDbTypes: inTypes, 112 | InTypeMap: inTypeMap, 113 | InDefaults: inDefaults, 114 | NumNoDefault: numNoDefault, 115 | OutNames: outNames, 116 | OutDbTypes: outTypes, 117 | OutJSONTypes: outJSONTypes, 118 | Types: datatypes, 119 | GeometryColumn: geomCol, 120 | } 121 | //fmt.Printf("DEBUG: Function definitions: %v\n", funDef) 122 | return &funDef 123 | } 124 | 125 | // parseJSONStringArray parses a JSON string array like '["a","b","c"]' into []string 126 | func parseJSONStringArray(jsonStr string) []string { 127 | if jsonStr == "" || jsonStr == "[]" { 128 | return []string{} 129 | } 130 | var result []string 131 | err := json.Unmarshal([]byte(jsonStr), &result) 132 | if err != nil { 133 | log.Warnf("Error parsing JSON array '%s': %v", jsonStr, err) 134 | return []string{} 135 | } 136 | return result 137 | } 138 | 139 | func addTypes(typeMap map[string]string, names []string, types []string) { 140 | for i, name := range names { 141 | typeMap[name] = types[i] 142 | } 143 | } 144 | func geometryColumn(names []string, types map[string]string) string { 145 | // TODO: extract from outNames, outTypes 146 | for _, name := range names { 147 | if types[name] == DuckDBTypeGeometry { 148 | return name 149 | } 150 | } 151 | return "" 152 | } 153 | 154 | // toArray is no longer needed - replaced by parseJSONStringArray 155 | 156 | // extendLeft extends an array to have given size, with original contents right-aligned 157 | func extendLeft(arr []string, size int) []string { 158 | if size <= len(arr) { 159 | return arr 160 | } 161 | // create array of requested size and right-justify input 162 | arr2 := make([]string, size) 163 | offset := size - len(arr) 164 | for i := 0; i < len(arr); i++ { 165 | arr2[i+offset] = arr[i] 166 | } 167 | return arr2 168 | } 169 | 170 | func (cat *CatalogDB) FunctionFeatures(ctx context.Context, name string, args map[string]string, param *QueryParam) ([]string, error) { 171 | fn, err := cat.FunctionByName(name) 172 | if err != nil || fn == nil { 173 | return nil, err 174 | } 175 | errArg := checkArgsValid(fn, args) 176 | if errArg != nil { 177 | log.Debug("ERROR: " + errArg.Error()) 178 | return nil, errArg 179 | } 180 | propCols := removeNames(param.Columns, fn.GeometryColumn, "") 181 | idColIndex := indexOfName(propCols, FunctionIDColumnName) 182 | sql, argValues := sqlGeomFunction(fn, args, propCols, param) 183 | log.Debugf("Function features query: %v", sql) 184 | log.Debugf("Function %v Args: %v", name, argValues) 185 | features, err := readFeaturesWithArgs(ctx, cat.dbconn, sql, argValues, idColIndex, propCols) 186 | return features, err 187 | } 188 | 189 | func (cat *CatalogDB) FunctionData(ctx context.Context, name string, args map[string]string, param *QueryParam) ([]map[string]interface{}, error) { 190 | fn, err := cat.FunctionByName(name) 191 | if err != nil || fn == nil { 192 | return nil, err 193 | } 194 | errArg := checkArgsValid(fn, args) 195 | if errArg != nil { 196 | return nil, errArg 197 | } 198 | propCols := param.Columns 199 | sql, argValues := sqlFunction(fn, args, propCols, param) 200 | log.Debugf("Function data query: %v", sql) 201 | log.Debugf("Function %v Args: %v", name, argValues) 202 | data, err := readDataWithArgs(ctx, cat.dbconn, propCols, sql, argValues) 203 | return data, err 204 | } 205 | 206 | // inputArgs extracts function arguments from any provided in the query parameters 207 | // Arg values are stored as strings, and relies on Postgres to convert 208 | // to the actual types required by the function 209 | func checkArgsValid(fn *Function, args map[string]string) error { 210 | for argName := range args { 211 | if _, ok := fn.InTypeMap[argName]; !ok { 212 | return fmt.Errorf("'%v' is not a parameter of function '%v'", argName, fn.ID) 213 | } 214 | } 215 | return nil 216 | } 217 | 218 | func removeNames(names []string, ex1 string, ex2 string) []string { 219 | var newNames []string 220 | for _, name := range names { 221 | if name != ex1 && name != ex2 { 222 | newNames = append(newNames, name) 223 | } 224 | } 225 | return newNames 226 | } 227 | 228 | func readDataWithArgs(ctx context.Context, db *sql.DB, propCols []string, sql string, args []interface{}) ([]map[string]interface{}, error) { 229 | start := time.Now() 230 | rows, err := db.QueryContext(ctx, sql, args...) 231 | if err != nil { 232 | log.Warnf("Error running Data query: %v", err) 233 | return nil, err 234 | } 235 | defer rows.Close() 236 | data := scanData(ctx, rows, propCols) 237 | log.Debugf(fmtQueryStats, len(data), time.Since(start)) 238 | return data, nil 239 | } 240 | 241 | func scanData(ctx context.Context, rows *sql.Rows, propCols []string) []map[string]interface{} { 242 | // init data array to empty (not nil) 243 | var data []map[string]interface{} = []map[string]interface{}{} 244 | for rows.Next() { 245 | obj := scanDataRow(rows, propCols) 246 | //log.Println(feature) 247 | data = append(data, obj) 248 | } 249 | // context check done outside rows loop, 250 | // because a long-running function might not produce any rows before timeout 251 | if err := ctx.Err(); err != nil { 252 | //log.Debugf("Context error scanning Features: %v", err) 253 | return data 254 | } 255 | // Check for errors from iterating over rows. 256 | if err := rows.Err(); err != nil { 257 | log.Warnf("Error scanning Data rows: %v", err) 258 | // TODO: return nil here ? 259 | } 260 | return data 261 | } 262 | 263 | func scanDataRow(rows *sql.Rows, propNames []string) map[string]interface{} { 264 | // Get column names to dynamically scan 265 | columns, err := rows.Columns() 266 | if err != nil { 267 | log.Warnf("Error getting columns: %v", err) 268 | return nil 269 | } 270 | 271 | // Create a slice to hold values 272 | values := make([]interface{}, len(columns)) 273 | valuePtrs := make([]interface{}, len(columns)) 274 | for i := range values { 275 | valuePtrs[i] = &values[i] 276 | } 277 | 278 | err = rows.Scan(valuePtrs...) 279 | if err != nil { 280 | log.Warnf("Error getting Data row values: %v", err) 281 | return nil 282 | } 283 | 284 | //fmt.Println(geom) 285 | props := extractProperties(values, 0, propNames) 286 | return props 287 | } 288 | -------------------------------------------------------------------------------- /testing/test_api_endpoints.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Comprehensive API endpoint testing for duckdb-tileserver 4 | # This script tests various API endpoints based on the test cases from pgfs_test.md 5 | 6 | set -e 7 | 8 | echo "🧪 DuckDB FeatureServ API Testing Suite" 9 | echo "=======================================" 10 | 11 | # Configuration 12 | HOST="localhost:9000" 13 | BASE_URL="http://$HOST" 14 | TEST_DB="test_spatial.duckdb" 15 | 16 | # Colors for output 17 | RED='\033[0;31m' 18 | GREEN='\033[0;32m' 19 | YELLOW='\033[1;33m' 20 | BLUE='\033[0;34m' 21 | NC='\033[0m' # No Color 22 | 23 | # Test counter 24 | TESTS_PASSED=0 25 | TESTS_FAILED=0 26 | 27 | # Helper function to run a test 28 | run_test() { 29 | local test_name="$1" 30 | local url="$2" 31 | local expected_pattern="$3" 32 | 33 | echo -e "${BLUE}Testing:${NC} $test_name" 34 | echo -e "${YELLOW}URL:${NC} $url" 35 | 36 | response=$(curl -s "$url" 2>/dev/null) 37 | status=$? 38 | 39 | if [ $status -eq 0 ]; then 40 | if [ -n "$expected_pattern" ]; then 41 | if echo "$response" | grep -q "$expected_pattern"; then 42 | echo -e "${GREEN}✓ PASS${NC} - Response contains expected pattern" 43 | TESTS_PASSED=$((TESTS_PASSED + 1)) 44 | else 45 | echo -e "${RED}✗ FAIL${NC} - Response doesn't contain expected pattern: $expected_pattern" 46 | echo "Response: $(echo "$response" | head -c 200)..." 47 | TESTS_FAILED=$((TESTS_FAILED + 1)) 48 | fi 49 | else 50 | echo -e "${GREEN}✓ PASS${NC} - Request successful" 51 | TESTS_PASSED=$((TESTS_PASSED + 1)) 52 | fi 53 | else 54 | echo -e "${RED}✗ FAIL${NC} - Request failed" 55 | TESTS_FAILED=$((TESTS_FAILED + 1)) 56 | fi 57 | echo "" 58 | } 59 | 60 | # Check if server is running 61 | echo "📡 Checking if server is running..." 62 | if ! curl -s "$BASE_URL/collections" >/dev/null 2>&1; then 63 | echo "❌ Server not running at $BASE_URL" 64 | echo "Please start the server with: ./duckdb-tileserver --database-path $TEST_DB" 65 | exit 1 66 | fi 67 | echo "✅ Server is running" 68 | echo "" 69 | 70 | # Test Categories 71 | 72 | echo "🏠 === BASIC ENDPOINT TESTS ===" 73 | run_test "Service landing page" "$BASE_URL/" "duckdb-featureserv" 74 | run_test "Collections endpoint" "$BASE_URL/collections" "test_" 75 | run_test "Collections JSON format" "$BASE_URL/collections.json" "collections" 76 | run_test "API documentation" "$BASE_URL/api" "openapi" 77 | 78 | echo "📋 === COLLECTION METADATA TESTS ===" 79 | run_test "Individual collection metadata" "$BASE_URL/collections/test_geom" "test_geom" 80 | run_test "Collection with CRS data" "$BASE_URL/collections/test_crs" "test_crs" 81 | run_test "Collection with JSON data" "$BASE_URL/collections/test_json" "test_json" 82 | 83 | echo "🗺️ === FEATURE RETRIEVAL TESTS ===" 84 | run_test "All features from test_geom" "$BASE_URL/collections/test_geom/items" "FeatureCollection" 85 | run_test "Limited features (5)" "$BASE_URL/collections/test_crs/items?limit=5" "features" 86 | run_test "Features with offset" "$BASE_URL/collections/test_crs/items?limit=5&offset=10" "features" 87 | run_test "JSON format features" "$BASE_URL/collections/test_json/items.json" "Point" 88 | run_test "HTML format features" "$BASE_URL/collections/test_geom/items.html" "html" 89 | run_test "JSON format with properties" "$BASE_URL/collections/test_json/items.json?properties=id,val_json" "val_json" 90 | run_test "HTML format with properties" "$BASE_URL/collections/test_crs/items.html?properties=id,name&limit=5" "html" 91 | 92 | echo "🎯 === PROPERTY SELECTION TESTS ===" 93 | run_test "Select specific properties" "$BASE_URL/collections/test_names/items?properties=id,colCamelCase" "colCamelCase" 94 | run_test "Select geometry and ID only" "$BASE_URL/collections/test_geom/items?properties=id" "geometry" 95 | 96 | echo "📦 === BOUNDING BOX TESTS ===" 97 | run_test "Simple bbox filter" "$BASE_URL/collections/test_crs/items?bbox=1000000,400000,1010000,410000" "features" 98 | run_test "Larger bbox filter" "$BASE_URL/collections/test_crs/items?bbox=1000000,400000,1030000,430000" "features" 99 | run_test "Point data bbox" "$BASE_URL/collections/test_json/items?bbox=0.5,0.5,2.5,2.5" "Point" 100 | run_test "Empty bbox result" "$BASE_URL/collections/test_json/items?bbox=100,100,101,101" "FeatureCollection" 101 | 102 | echo "🔍 === CQL FILTER TESTS ===" 103 | run_test "ID range filter" "$BASE_URL/collections/test_crs/items?filter=id%20BETWEEN%201%20AND%205" "features" 104 | run_test "String LIKE filter" "$BASE_URL/collections/test_crs/items?filter=name%20LIKE%20%271_%25%27" "features" 105 | run_test "NOT IN filter" "$BASE_URL/collections/test_crs/items?filter=NOT%20id%20IN%20(1,2,3)" "features" 106 | run_test "NOT id In filter (original syntax)" "$BASE_URL/collections/test_crs/items?filter=NOT%20id%20In%20(1,2,3)" "features" 107 | run_test "Equality filter" "$BASE_URL/collections/test_geom/items?filter=data%20=%20%27aaa%27" "Point" 108 | run_test "String BETWEEN filter" "$BASE_URL/collections/test_crs/items?filter=name%20BETWEEN%20%270_0%27%20AND%20%272_2%27" "features" 109 | run_test "String LIKE with wildcards" "$BASE_URL/collections/test_crs/items?filter=name%20LIKE%20%27%25_0%27" "features" 110 | 111 | echo "🌍 === SPATIAL FILTER TESTS ===" 112 | run_test "Point intersection" "$BASE_URL/collections/test_geom/items?filter=INTERSECTS(geom,%20POINT(1%201))" "features" 113 | run_test "LineString intersection" "$BASE_URL/collections/test_geom/items?filter=INTERSECTS(geom,%20LINESTRING(1%201,%202%202))" "features" 114 | run_test "Geometry within distance" "$BASE_URL/collections/test_json/items?filter=DWITHIN(geom,%20POINT(1.5%201.5),%201)" "features" 115 | run_test "DWITHIN with Point coordinates" "$BASE_URL/collections/test_crs/items?filter=DWITHIN(geom,POINT(1005000%20405000),50000)" "features" 116 | run_test "Large distance DWITHIN" "$BASE_URL/collections/test_crs/items?filter=DWITHIN(geom,POINT(1000000%20400000),60000)&limit=100" "features" 117 | run_test "CROSSES spatial operator" "$BASE_URL/collections/test_crs/items?filter=CROSSES(geom,%20LINESTRING(1000000%20400000,1100000%20500000))" "FeatureCollection" 118 | run_test "ENVELOPE intersection" "$BASE_URL/collections/test_crs/items?filter=INTERSECTS(geom,%20ENVELOPE(1000000,400000,1100000,500000))&limit=100" "features" 119 | run_test "DWITHIN Point with encoding" "$BASE_URL/collections/test_json/items?filter=DWITHIN(geom,POINT(0%252%201),10)" "FeatureCollection" 120 | run_test "DWITHIN LineString geometry" "$BASE_URL/collections/test_crs/items?filter=DWITHIN(geom,LINESTRING(1000000%20400000,1010000%20410000),10000)&limit=1000" "FeatureCollection" 121 | 122 | echo "🔢 === ARRAY DATA TESTS ===" 123 | run_test "Array properties" "$BASE_URL/collections/test_arr/items?properties=id,val_int,val_txt" "val_int" 124 | run_test "All array data" "$BASE_URL/collections/test_arr/items" "val_bool" 125 | 126 | echo "📄 === JSON DATA TESTS ===" 127 | run_test "JSON properties" "$BASE_URL/collections/test_json/items?properties=id,val_json" "val_json" 128 | run_test "JSON with geometry" "$BASE_URL/collections/test_json/items" "coordinates" 129 | 130 | echo "🔀 === MIXED GEOMETRY TESTS ===" 131 | run_test "Point and LineString" "$BASE_URL/collections/test_geom/items" "Point.*LineString" 132 | 133 | echo "📐 === COORDINATE SYSTEM TESTS ===" 134 | run_test "Default CRS features" "$BASE_URL/collections/test_json/items" "coordinates" 135 | run_test "Custom CRS data" "$BASE_URL/collections/test_crs/items?limit=1" "1000000" 136 | run_test "SRID 0 features" "$BASE_URL/collections/test_srid0/items?limit=3" "coordinates" 137 | run_test "CRS parameter test" "$BASE_URL/collections/test_crs/items.json?crs=3005&limit=5" "features" 138 | run_test "CRS with bbox-crs small area" "$BASE_URL/collections/test_crs/items.json?crs=3005&bbox-crs=3005&bbox=1000000,400000,1010000,410000" "FeatureCollection" 139 | run_test "CRS with bbox-crs larger area" "$BASE_URL/collections/test_crs/items.json?crs=3005&bbox-crs=3005&bbox=1000000,400000,1030000,430000" "features" 140 | 141 | echo "🗂️ === FILTER-CRS TESTS ===" 142 | run_test "Filter-CRS with DWITHIN geo coords" "$BASE_URL/collections/test_crs/items.html?filter=DWITHIN(geom,POINT(-124.6%2049.3),40000)&limit=100" "html" 143 | run_test "Filter-CRS with specific CRS" "$BASE_URL/collections/test_crs/items.html?filter=DWITHIN(geom,POINT(1000000%20400000),60000)&filter-crs=3005&limit=100" "html" 144 | run_test "Filter-CRS with ENVELOPE" "$BASE_URL/collections/test_crs/items.html?filter=INTERSECTS(geom,%20ENVELOPE(1000000,400000,1100000,500000))&filter-crs=3005&limit=100" "html" 145 | 146 | echo "📏 === COLUMN NAME TESTS ===" 147 | run_test "Quoted column names" "$BASE_URL/collections/test_names/items?properties=id,\"colCamelCase\"" "colCamelCase" 148 | run_test "Case sensitive filter" "$BASE_URL/collections/test_names/items?filter=\"colCamelCase\"%20=%201" "features" 149 | run_test "Column names from original test" "$BASE_URL/collections/test_names/items.json?properties=id,colCamelCase" "colCamelCase" 150 | 151 | echo "⚠️ === ERROR HANDLING TESTS ===" 152 | run_test "Non-existent collection" "$BASE_URL/collections/nonexistent/items" "Collection not found" 153 | run_test "Invalid property" "$BASE_URL/collections/test_geom/items?properties=invalid_column" "FeatureCollection" 154 | run_test "Malformed filter" "$BASE_URL/collections/test_geom/items?filter=invalid%20syntax" "CQL syntax error" 155 | 156 | echo "⚙️ === FUNCTIONS TESTS ===" 157 | run_test "Functions endpoint" "$BASE_URL/functions" "functions" 158 | run_test "Non-existent function" "$BASE_URL/functions/nonexistent_function/items.json" "Function not found" 159 | 160 | echo "🚀 === PERFORMANCE TESTS ===" 161 | run_test "Large result set" "$BASE_URL/collections/test_crs/items" "features" 162 | run_test "Combined filter and pagination" "$BASE_URL/collections/test_crs/items?filter=id%20%3E%2050&limit=10" "features" 163 | run_test "Large limit test" "$BASE_URL/collections/test_crs/items?limit=1000" "features" 164 | 165 | echo "" 166 | echo "📊 === TEST SUMMARY ===" 167 | echo -e "${GREEN}Tests Passed: $TESTS_PASSED${NC}" 168 | echo -e "${RED}Tests Failed: $TESTS_FAILED${NC}" 169 | echo "Total Tests: $((TESTS_PASSED + TESTS_FAILED))" 170 | 171 | if [ $TESTS_FAILED -eq 0 ]; then 172 | echo -e "${GREEN}🎉 All tests passed!${NC}" 173 | exit 0 174 | else 175 | echo -e "${RED}❌ Some tests failed.${NC}" 176 | exit 1 177 | fi 178 | -------------------------------------------------------------------------------- /internal/service/util.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "html/template" 8 | "net/http" 9 | "net/url" 10 | "strings" 11 | "time" 12 | 13 | "github.com/gorilla/mux" 14 | log "github.com/sirupsen/logrus" 15 | "github.com/theckman/httpforwarded" 16 | "github.com/tobilg/duckdb-tileserver/internal/conf" 17 | "github.com/tobilg/duckdb-tileserver/internal/ui" 18 | ) 19 | 20 | const ( 21 | schemeHTTP = "http" 22 | schemeHTTPS = "https" 23 | ) 24 | 25 | //--- simple request handler error framework 26 | // see https://blog.golang.org/error-handling-and-go 27 | // Allows handlers to return structure error messages, 28 | // and centralizes common logic in the ServeHTTP method 29 | 30 | type appError struct { 31 | Error error 32 | Message string 33 | Code int 34 | } 35 | 36 | // appHandler is a named function type which is augmented 37 | // with a ServeHTTP method 38 | // This allows it to be used as an http.Handler. 39 | // When the ServeHTTP method is called 40 | // the bound function is called, and 41 | // can return a structured error message. 42 | type appHandler func(http.ResponseWriter, *http.Request) *appError 43 | 44 | // ServeHTTP is the base Handler for routed requests. 45 | // Common handling logic is placed here 46 | // See also https://golang.org/pkg/net/http/#Handler 47 | func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 48 | // --- log the request 49 | log.Printf("%v %v %v\n", r.RemoteAddr, r.Method, r.URL) 50 | 51 | // signal for normal completion of handler 52 | handlerDone := make(chan struct{}) 53 | start := time.Now() 54 | 55 | // monitor context status and log anything abnormal 56 | go func() { 57 | select { 58 | case <-handlerDone: 59 | log.Debugf("---- Request complete in %v", time.Since(start)) 60 | case <-r.Context().Done(): 61 | // log cancelations 62 | switch r.Context().Err() { 63 | case context.DeadlineExceeded: 64 | log.Warnf("---- Request processing terminated by write timeout after %v", time.Since(start)) 65 | case context.Canceled: 66 | log.Debugf("---- Request cancelled by client after %v", time.Since(start)) 67 | } 68 | } 69 | }() 70 | 71 | // execute the handler 72 | e := fn(w, r) 73 | 74 | if e != nil { // e is *appError, not os.Error. 75 | // TODO: is this the desire behaviour? 76 | // perhaps detect format and emit accordingly? 77 | // log error here? 78 | // should log attached error? 79 | // panic on severe error? 80 | log.Debugf("Request processing error: %v (%v)\n", e.Message, e.Code) 81 | if e.Message == "" { 82 | // Return just the status code without a response body 83 | w.WriteHeader(e.Code) 84 | } else { 85 | http.Error(w, e.Message, e.Code) 86 | } 87 | } 88 | close(handlerDone) 89 | } 90 | 91 | // FatalAfter aborts by logging a fatal message, after a time delay. 92 | // The abort can be cancelled by closing the returned channel 93 | func FatalAfter(delaySec int, msg string) chan struct{} { 94 | chanCancel := make(chan struct{}) 95 | go func() { 96 | select { 97 | case <-chanCancel: 98 | // do nothing if cancelled 99 | return 100 | case <-time.After(time.Duration(delaySec) * time.Second): 101 | // terminate with extreme predjudice 102 | log.Fatalln(msg) 103 | } 104 | }() 105 | return chanCancel 106 | } 107 | 108 | func appErrorMsg(err error, msg string, code int) *appError { 109 | return &appError{err, msg, code} 110 | } 111 | 112 | func appErrorInternal(err error, msg string) *appError { 113 | return &appError{err, msg, http.StatusInternalServerError} 114 | } 115 | 116 | func appErrorBadRequest(err error, msg string) *appError { 117 | return &appError{err, msg, http.StatusBadRequest} 118 | } 119 | 120 | func appErrorInternalFmt(err error, format string, v ...interface{}) *appError { 121 | msg := fmt.Sprintf(format, v...) 122 | return &appError{err, msg, http.StatusInternalServerError} 123 | } 124 | 125 | func appErrorNotFoundFmt(err error, format string, v string) *appError { 126 | msg := fmt.Sprintf(format, v) 127 | return &appError{err, msg, http.StatusNotFound} 128 | } 129 | 130 | func appErrorNotFound(err error, msg string) *appError { 131 | return &appError{err, msg, http.StatusNotFound} 132 | } 133 | 134 | func appErrorUnauthorized(err error, msg string) *appError { 135 | return &appError{err, msg, http.StatusUnauthorized} 136 | } 137 | 138 | func appErrorForbidden(err error, msg string) *appError { 139 | return &appError{err, msg, http.StatusForbidden} 140 | } 141 | 142 | //======================== 143 | 144 | func serveURLBase(r *http.Request) string { 145 | // Use configuration file settings if we have them 146 | configURL := conf.Configuration.Server.UrlBase 147 | 148 | if configURL != "" { 149 | return configURL + "/" 150 | } 151 | // Preferred scheme 152 | ps := schemeHTTP 153 | // check if HTTPS (TLS) is being used 154 | if r.TLS != nil { 155 | ps = schemeHTTPS 156 | } 157 | // Preferred host:port 158 | ph := strings.TrimRight(r.Host, "/") 159 | 160 | // Check IETF standard "Forwarded" header 161 | // for reverse proxy information 162 | xf := http.CanonicalHeaderKey("Forwarded") 163 | if f, ok := r.Header[xf]; ok { 164 | if fm, err := httpforwarded.Parse(f); err == nil { 165 | if len(fm["host"]) > 0 && len(fm["proto"]) > 0 { 166 | ph = fm["host"][0] 167 | ps = fm["proto"][0] 168 | return fmt.Sprintf("%v://%v/", ps, ph) 169 | } 170 | } 171 | } 172 | 173 | // Check X-Forwarded-Host and X-Forwarded-Proto headers 174 | xfh := http.CanonicalHeaderKey("X-Forwarded-Host") 175 | if fh, ok := r.Header[xfh]; ok { 176 | ph = fh[0] 177 | } 178 | xfp := http.CanonicalHeaderKey("X-Forwarded-Proto") 179 | if fp, ok := r.Header[xfp]; ok { 180 | ps = fp[0] 181 | } 182 | 183 | path := conf.Configuration.Server.BasePath 184 | return fmt.Sprintf("%v://%v%v/", ps, ph, path) 185 | } 186 | 187 | func getRequestVar(varname string, r *http.Request) string { 188 | vars := mux.Vars(r) 189 | name := vars[varname] 190 | return name 191 | } 192 | 193 | // urlPathFormat provides a URL for the given base and path 194 | func urlPath(urlBase string, path string) string { 195 | url := fmt.Sprintf("%v%v", urlBase, path) 196 | return url 197 | } 198 | 199 | // urlPathFormat provides a URL for the given base, path and format 200 | func urlPathFormat(urlBase string, path string, format string) string { 201 | var pathFormat string 202 | if path == "" { 203 | pathFormat = "" 204 | if format == "html" { 205 | pathFormat = "index.html" 206 | } 207 | } else { 208 | pathFormat = path + "." + format 209 | } 210 | url := fmt.Sprintf("%v%v", urlBase, pathFormat) 211 | return url 212 | } 213 | 214 | func urlPathFormatQuery(urlBase string, path string, format string, query string) string { 215 | url := urlPathFormat(urlBase, path, format) 216 | if query != "" { 217 | url = fmt.Sprintf("%v?%v", url, query) 218 | } 219 | return url 220 | } 221 | 222 | // formatBaseURL takes a hostname (baseHost) and a base path 223 | // and joins them. Both are parsed as URLs (using net/url) and 224 | // then joined to ensure a properly formed URL. 225 | // net/url does not support parsing hostnames without a scheme 226 | // (e.g. example.com is invalid; http://example.com is valid). 227 | // serverURLHost ensures a scheme is added. 228 | func formatBaseURL(protocol string, baseHost string, basePath string) string { 229 | urlHost, err := url.Parse(protocol + baseHost) 230 | if err != nil { 231 | log.Fatal(err) 232 | } 233 | urlPath, err := url.Parse(basePath) 234 | if err != nil { 235 | log.Fatal(err) 236 | } 237 | return strings.TrimRight(urlHost.ResolveReference(urlPath).String(), "/") 238 | } 239 | 240 | // restrict creates a map containing only entries in names 241 | func restrict(inMap map[string]string, names []string) map[string]string { 242 | outMap := make(map[string]string) 243 | for _, name := range names { 244 | //log.Debugf("testing request param %v", name) 245 | if val, ok := inMap[name]; ok { 246 | outMap[name] = val 247 | //log.Debugf("copying request param %v = %v ", name, val) 248 | } 249 | } 250 | return outMap 251 | } 252 | 253 | // removeNames removes a list of names from a map (map is modified) 254 | // 255 | //nolint:unused 256 | func removeNames(inMap map[string]string, names []string) { 257 | for _, name := range names { 258 | delete(inMap, name) 259 | } 260 | } 261 | 262 | func writeJSON(w http.ResponseWriter, contype string, content interface{}) *appError { 263 | encodedContent, err := json.Marshal(content) 264 | if err != nil { 265 | log.Printf("JSON encoding error: %v", err.Error()) 266 | return appErrorInternal(err, "Error encoding JSON") 267 | } 268 | //fmt.Println(string(encodedContent)) 269 | writeResponse(w, contype, encodedContent) 270 | return nil 271 | } 272 | 273 | func writeText(w http.ResponseWriter, contype string, encodedContent []byte) *appError { 274 | //fmt.Println(string(encodedContent)) 275 | writeResponse(w, contype, encodedContent) 276 | return nil 277 | } 278 | 279 | func writeHTML(w http.ResponseWriter, content interface{}, context interface{}, templ *template.Template) *appError { 280 | encodedContent, err := ui.RenderHTML(templ, content, context) 281 | if err != nil { 282 | log.Printf("HTML encoding error: %v", err.Error()) 283 | return appErrorInternal(err, "Error encoding HTML") 284 | } 285 | return writeResponse(w, "text/html; charset=utf-8", encodedContent) 286 | } 287 | 288 | // checkUIDisabled returns an error if HTML UI is disabled 289 | func checkUIDisabled(format string) *appError { 290 | if format == "html" && conf.Configuration.Server.DisableUi { 291 | return appErrorNotFound(nil, "") 292 | } 293 | return nil 294 | } 295 | 296 | func writeResponse(w http.ResponseWriter, contype string, encodedContent []byte) *appError { 297 | w.Header().Set("Content-Type", contype) 298 | w.WriteHeader(http.StatusOK) 299 | _, err := w.Write(encodedContent) 300 | if err != nil { 301 | return appErrorInternal(err, "Error writing response") 302 | } 303 | return nil 304 | } 305 | 306 | // Sets response 'status', and writes a json-encoded object with property "description" having value "msg". 307 | // 308 | //nolint:all 309 | func writeError(w http.ResponseWriter, code string, msg string, status int) { 310 | w.WriteHeader(status) 311 | 312 | result, err := json.Marshal(struct { 313 | Code string `json:"code"` 314 | Description string `json:"description"` 315 | }{ 316 | Code: code, 317 | Description: msg, 318 | }) 319 | 320 | if err != nil { 321 | w.Write([]byte(fmt.Sprintf("problem marshaling error: %v", msg))) 322 | } else { 323 | w.Write(result) 324 | } 325 | } 326 | -------------------------------------------------------------------------------- /internal/data/db_sql.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | 8 | log "github.com/sirupsen/logrus" 9 | ) 10 | 11 | const forceTextTSVECTOR = "tsvector" 12 | 13 | const sqlTables = ` 14 | SELECT 15 | table_name AS id, 16 | 'main' AS schema, 17 | table_name AS table, 18 | '' AS description, 19 | column_name AS geometry_column, 20 | 4326 AS srid, 21 | 'GEOMETRY' AS geometry_type, 22 | '' AS id_column, 23 | '[]' AS props 24 | FROM information_schema.columns 25 | WHERE data_type = 'GEOMETRY' 26 | ORDER BY table_name 27 | ` 28 | 29 | const sqlFunctionsTemplate = ` 30 | SELECT 31 | 'spatial.ST_Area' AS id, 32 | 'spatial' AS schema, 33 | 'ST_Area' AS function, 34 | 'Calculate area of geometry' AS description, 35 | '["geom"]' AS input_names, 36 | '["GEOMETRY"]' AS input_types, 37 | '[]' AS argdefaults, 38 | '["area"]' AS output_names, 39 | '["DOUBLE"]' AS output_types 40 | UNION ALL 41 | SELECT 42 | 'spatial.ST_AsGeoJSON' AS id, 43 | 'spatial' AS schema, 44 | 'ST_AsGeoJSON' AS function, 45 | 'Convert geometry to GeoJSON' AS description, 46 | '["geom"]' AS input_names, 47 | '["GEOMETRY"]' AS input_types, 48 | '[]' AS argdefaults, 49 | '["geojson"]' AS output_names, 50 | '["VARCHAR"]' AS output_types 51 | ` 52 | 53 | func sqlFunctions() string { 54 | return sqlFunctionsTemplate 55 | } 56 | 57 | func quotedList(names []string) string { 58 | itemsJoin := strings.Join(names, "','") 59 | return "'" + itemsJoin + "'" 60 | } 61 | 62 | const sqlFmtExtentEst = `SELECT ST_XMin(ext.geom) AS xmin, ST_YMin(ext.geom) AS ymin, ST_XMax(ext.geom) AS xmax, ST_YMax(ext.geom) AS ymax 63 | FROM ( SELECT ST_Envelope_Agg("%s") AS geom FROM "%s" ) AS ext;` 64 | 65 | func sqlExtentEstimated(tbl *Table) string { 66 | return fmt.Sprintf(sqlFmtExtentEst, tbl.GeometryColumn, tbl.Table) 67 | } 68 | 69 | const sqlFmtExtentExact = `SELECT ST_XMin(ext.geom) AS xmin, ST_YMin(ext.geom) AS ymin, ST_XMax(ext.geom) AS xmax, ST_YMax(ext.geom) AS ymax 70 | FROM (SELECT COALESCE(ST_Envelope_Agg("%s"), ST_GeomFromText('POLYGON((-180 -90, 180 -90, 180 90, -180 90, -180 -90))', 4326)) AS geom FROM "%s" ) AS ext;` 71 | 72 | func sqlExtentExact(tbl *Table) string { 73 | return fmt.Sprintf(sqlFmtExtentExact, tbl.GeometryColumn, tbl.Table) 74 | } 75 | 76 | const sqlFmtFeatures = "SELECT %v %v FROM \"%s\" %v %v %v %s;" 77 | 78 | func sqlFeatures(tbl *Table, param *QueryParam) (string, []interface{}) { 79 | geomCol := sqlGeomCol(tbl.GeometryColumn, tbl.Srid, param) 80 | propCols := sqlColList(param.Columns, tbl.DbTypes, true) 81 | bboxFilter := sqlBBoxFilter(tbl.GeometryColumn, param.Bbox, param.BboxCrs) 82 | attrFilter, attrVals := sqlAttrFilter(param.Filter) 83 | cqlFilter := sqlCqlFilter(param.FilterSql) 84 | sqlWhere := sqlWhere(bboxFilter, attrFilter, cqlFilter) 85 | sqlGroupBy := sqlGroupBy(param.GroupBy) 86 | sqlOrderBy := sqlOrderBy(param.SortBy) 87 | sqlLimitOffset := sqlLimitOffset(param.Limit, param.Offset) 88 | sql := fmt.Sprintf(sqlFmtFeatures, geomCol, propCols, tbl.Table, sqlWhere, sqlGroupBy, sqlOrderBy, sqlLimitOffset) 89 | return sql, attrVals 90 | } 91 | 92 | // sqlColList creates a comma-separated column list, or blank if no columns 93 | // If addLeadingComma is true, a leading comma is added, for use when the target SQL has columns defined before 94 | func sqlColList(names []string, dbtypes map[string]string, addLeadingComma bool) string { 95 | if len(names) == 0 { 96 | return "" 97 | } 98 | 99 | var cols []string 100 | for _, col := range names { 101 | colExpr := sqlColExpr(col, dbtypes[col]) 102 | cols = append(cols, colExpr) 103 | } 104 | colsStr := strings.Join(cols, ",") 105 | if addLeadingComma { 106 | return ", " + colsStr 107 | } 108 | return colsStr 109 | } 110 | 111 | // makeSQLColExpr casts a column to text if type is unknown to PGX 112 | func sqlColExpr(name string, dbtype string) string { 113 | 114 | name = strconv.Quote(name) 115 | 116 | // TODO: make this more data-driven / configurable 117 | switch dbtype { 118 | case forceTextTSVECTOR: 119 | return fmt.Sprintf("%s::text", name) 120 | } 121 | 122 | // for properties that will be treated as a string in the JSON response, 123 | // cast to text. This allows displaying data types that DuckDB 124 | // does not support out of the box, as long as it can be cast to text. 125 | if toJSONTypeFromDuckDB(dbtype) == JSONTypeString { 126 | return fmt.Sprintf("%s::VARCHAR", name) 127 | } 128 | 129 | return name 130 | } 131 | 132 | const sqlFmtFeature = "SELECT %v %v FROM \"%s\" WHERE \"%v\" = $1 LIMIT 1" 133 | 134 | func sqlFeature(tbl *Table, param *QueryParam) string { 135 | geomCol := sqlGeomCol(tbl.GeometryColumn, tbl.Srid, param) 136 | propCols := sqlColList(param.Columns, tbl.DbTypes, true) 137 | sql := fmt.Sprintf(sqlFmtFeature, geomCol, propCols, tbl.Table, tbl.IDColumn) 138 | return sql 139 | } 140 | 141 | func sqlCqlFilter(sql string) string { 142 | //log.Debug("SQL = " + sql) 143 | if len(sql) == 0 { 144 | return "" 145 | } 146 | return "(" + sql + ")" 147 | } 148 | 149 | func sqlWhere(cond1 string, cond2 string, cond3 string) string { 150 | var condList []string 151 | if len(cond1) > 0 { 152 | condList = append(condList, cond1) 153 | } 154 | if len(cond2) > 0 { 155 | condList = append(condList, cond2) 156 | } 157 | if len(cond3) > 0 { 158 | condList = append(condList, cond3) 159 | } 160 | where := strings.Join(condList, " AND ") 161 | if len(where) > 0 { 162 | where = " WHERE " + where 163 | } 164 | return where 165 | } 166 | 167 | func sqlAttrFilter(filterConds []*PropertyFilter) (string, []interface{}) { 168 | var vals []interface{} 169 | var exprItems []string 170 | for i, cond := range filterConds { 171 | sqlCond := fmt.Sprintf("\"%v\" = $%v", cond.Name, i+1) 172 | exprItems = append(exprItems, sqlCond) 173 | vals = append(vals, cond.Value) 174 | } 175 | sql := strings.Join(exprItems, " AND ") 176 | return sql, vals 177 | } 178 | 179 | // DuckDB spatial doesn't support SRID parameter in ST_GeomFromText 180 | const sqlFmtBBoxGeoFilter = ` ST_Intersects("%v", ST_GeomFromText('POLYGON((%v %v, %v %v, %v %v, %v %v, %v %v))')) ` 181 | 182 | func sqlBBoxFilter(geomCol string, bbox *Extent, bboxSRID int) string { 183 | if bbox == nil { 184 | return "" 185 | } 186 | // For DuckDB, use ST_GeomFromText without SRID parameter 187 | return fmt.Sprintf(sqlFmtBBoxGeoFilter, geomCol, 188 | bbox.Minx, bbox.Miny, bbox.Maxx, bbox.Miny, bbox.Maxx, bbox.Maxy, bbox.Minx, bbox.Maxy, bbox.Minx, bbox.Miny) 189 | } 190 | 191 | const sqlFmtGeomCol = `ST_AsGeoJSON( %v %v ) AS _geojson` 192 | 193 | func sqlGeomCol(geomCol string, sourceSRID int, param *QueryParam) string { 194 | geomColSafe := strconv.Quote(geomCol) 195 | geomExpr := applyTransform(param.TransformFuns, geomColSafe) 196 | geomOutExpr := transformToOutCrs(geomExpr, sourceSRID, param.Crs) 197 | sql := fmt.Sprintf(sqlFmtGeomCol, geomOutExpr, sqlPrecisionArg(param.Precision)) 198 | return sql 199 | } 200 | 201 | func transformToOutCrs(geomExpr string, sourceSRID, outSRID int) string { 202 | if sourceSRID == outSRID { 203 | return geomExpr 204 | } 205 | // For DuckDB spatial, we'll return the original for now since transform support is limited 206 | return geomExpr 207 | } 208 | 209 | func sqlPrecisionArg(precision int) string { 210 | if precision < 0 { 211 | return "" 212 | } 213 | sqlPrecision := fmt.Sprintf(",%v", precision) 214 | return sqlPrecision 215 | } 216 | 217 | const sqlFmtOrderBy = `ORDER BY "%v" %v` 218 | 219 | func sqlOrderBy(ordering []Sorting) string { 220 | if len(ordering) <= 0 { 221 | return "" 222 | } 223 | // TODO: support more than one ordering 224 | col := ordering[0].Name 225 | dir := "" 226 | if ordering[0].IsDesc { 227 | dir = "DESC" 228 | } 229 | sql := fmt.Sprintf(sqlFmtOrderBy, col, dir) 230 | return sql 231 | } 232 | 233 | const sqlFmtGroupBy = `GROUP BY "%v"` 234 | 235 | func sqlGroupBy(groupBy []string) string { 236 | if len(groupBy) <= 0 { 237 | return "" 238 | } 239 | // TODO: support more than one grouping 240 | col := groupBy[0] 241 | sql := fmt.Sprintf(sqlFmtGroupBy, col) 242 | log.Debugf("group by: %s", sql) 243 | return sql 244 | } 245 | 246 | func sqlLimitOffset(limit int, offset int) string { 247 | sqlLim := "" 248 | if limit >= 0 { 249 | sqlLim = fmt.Sprintf(" LIMIT %d", limit) 250 | } 251 | sqlOff := "" 252 | if offset > 0 { 253 | sqlOff = fmt.Sprintf(" OFFSET %d", offset) 254 | } 255 | return sqlLim + sqlOff 256 | } 257 | 258 | func applyTransform(funs []TransformFunction, expr string) string { 259 | if funs == nil { 260 | return expr 261 | } 262 | for _, fun := range funs { 263 | expr = fun.apply(expr) 264 | } 265 | return expr 266 | } 267 | 268 | const sqlFmtGeomFunction = "SELECT %s %s FROM \"%s\"( %v ) %v %v %s;" 269 | 270 | func sqlGeomFunction(fn *Function, args map[string]string, propCols []string, param *QueryParam) (string, []interface{}) { 271 | sqlArgs, argVals := sqlFunctionArgs(args) 272 | sqlGeomCol := sqlGeomCol(fn.GeometryColumn, SRID_UNKNOWN, param) 273 | sqlPropCols := sqlColList(propCols, fn.Types, true) 274 | //-- SRS of function output is unknown, so have to assume 4326 275 | bboxFilter := sqlBBoxFilter(fn.GeometryColumn, param.Bbox, param.BboxCrs) 276 | cqlFilter := sqlCqlFilter(param.FilterSql) 277 | sqlWhere := sqlWhere(bboxFilter, cqlFilter, "") 278 | sqlOrderBy := sqlOrderBy(param.SortBy) 279 | sqlLimitOffset := sqlLimitOffset(param.Limit, param.Offset) 280 | sql := fmt.Sprintf(sqlFmtGeomFunction, sqlGeomCol, sqlPropCols, fn.Name, sqlArgs, sqlWhere, sqlOrderBy, sqlLimitOffset) 281 | return sql, argVals 282 | } 283 | 284 | const sqlFmtFunction = "SELECT %v FROM \"%s\"( %v ) %v %v %s;" 285 | 286 | func sqlFunction(fn *Function, args map[string]string, propCols []string, param *QueryParam) (string, []interface{}) { 287 | sqlArgs, argVals := sqlFunctionArgs(args) 288 | sqlPropCols := sqlColList(propCols, fn.Types, false) 289 | cqlFilter := sqlCqlFilter(param.FilterSql) 290 | sqlWhere := sqlWhere(cqlFilter, "", "") 291 | sqlOrderBy := sqlOrderBy(param.SortBy) 292 | sqlLimitOffset := sqlLimitOffset(param.Limit, param.Offset) 293 | sql := fmt.Sprintf(sqlFmtFunction, sqlPropCols, fn.Name, sqlArgs, sqlWhere, sqlOrderBy, sqlLimitOffset) 294 | return sql, argVals 295 | } 296 | 297 | func sqlFunctionArgs(argValues map[string]string) (string, []interface{}) { 298 | var vals []interface{} 299 | var argItems []string 300 | i := 1 301 | for argName := range argValues { 302 | argItem := fmt.Sprintf("%v => $%v", argName, i) 303 | argItems = append(argItems, argItem) 304 | i++ 305 | vals = append(vals, argValues[argName]) 306 | } 307 | sql := strings.Join(argItems, ",") 308 | return sql, vals 309 | } 310 | -------------------------------------------------------------------------------- /internal/data/catalog_mock.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | 8 | log "github.com/sirupsen/logrus" 9 | ) 10 | 11 | type CatalogMock struct { 12 | TableDefs []*Table 13 | tableData map[string][]*featureMock 14 | FunctionDefs []*Function 15 | } 16 | 17 | var instance CatalogMock 18 | 19 | // CatMockInstance tbd 20 | func CatMockInstance() *CatalogMock { 21 | log.Printf("Using Test Catalog data") 22 | // TODO: make a singleton 23 | instance = newCatalogMock() 24 | return &instance 25 | } 26 | 27 | func newCatalogMock() CatalogMock { 28 | // must be in synch with featureMock type 29 | propNames := []string{"prop_a", "prop_b", "prop_c", "prop_d"} 30 | types := map[string]string{ 31 | "prop_a": "text", 32 | "prop_b": "int", 33 | "prop_c": "text", 34 | "prop_d": "int", 35 | } 36 | jtypes := []string{"string", "number", "string", "number"} 37 | colDesc := []string{"Property A", "Property B", "Property C", "Property D"} 38 | 39 | layerA := &Table{ 40 | ID: "mock_a", 41 | Title: "Mock A", 42 | Description: "This dataset contains mock data about A (9 points)", 43 | Extent: Extent{Minx: -120, Miny: 40, Maxx: -74, Maxy: 50}, 44 | Srid: 4326, 45 | Columns: propNames, 46 | DbTypes: types, 47 | JSONTypes: jtypes, 48 | ColDesc: colDesc, 49 | } 50 | 51 | layerB := &Table{ 52 | ID: "mock_b", 53 | Title: "Mock B", 54 | Description: "This dataset contains mock data about B (100 points)", 55 | Extent: Extent{Minx: -75, Miny: 45, Maxx: -74, Maxy: 46}, 56 | Srid: 4326, 57 | Columns: propNames, 58 | DbTypes: types, 59 | JSONTypes: jtypes, 60 | ColDesc: colDesc, 61 | } 62 | 63 | layerC := &Table{ 64 | ID: "mock_c", 65 | Title: "Mock C", 66 | Description: "This dataset contains mock data about C (10000 points)", 67 | Extent: Extent{Minx: -120, Miny: 40, Maxx: -74, Maxy: 60}, 68 | Srid: 4326, 69 | Columns: propNames, 70 | DbTypes: types, 71 | JSONTypes: jtypes, 72 | ColDesc: colDesc, 73 | } 74 | 75 | tableData := map[string][]*featureMock{} 76 | tableData["mock_a"] = makePointFeatures(layerA.Extent, 3, 3) 77 | tableData["mock_b"] = makePointFeatures(layerB.Extent, 10, 10) 78 | tableData["mock_c"] = makePointFeatures(layerC.Extent, 100, 100) 79 | 80 | var tables []*Table 81 | tables = append(tables, layerA) 82 | tables = append(tables, layerB) 83 | tables = append(tables, layerC) 84 | 85 | funA := &Function{ 86 | ID: "fun_a", 87 | Schema: "postgisftw", 88 | Name: "fun_a", 89 | Description: "Function A", 90 | InNames: []string{"in_param1"}, 91 | InDbTypes: []string{"text"}, 92 | InTypeMap: map[string]string{ 93 | "in_param1": "text", 94 | }, 95 | InDefaults: []string{"aa"}, 96 | NumNoDefault: 0, 97 | OutNames: []string{"out_param1"}, 98 | OutDbTypes: []string{"text"}, 99 | OutJSONTypes: []string{"string"}, 100 | Types: map[string]string{ 101 | "in_param1": "text", 102 | }, 103 | GeometryColumn: "", 104 | IDColumn: "", 105 | } 106 | funB := &Function{ 107 | ID: "fun_b", 108 | Schema: "postgisftw", 109 | Name: "fun_b", 110 | Description: "Function B", 111 | InNames: []string{"in_param1"}, 112 | InDbTypes: []string{"int"}, 113 | InTypeMap: map[string]string{ 114 | "in_param1": "int", 115 | }, 116 | InDefaults: []string{"999"}, 117 | NumNoDefault: 0, 118 | OutNames: []string{"out_geom", "out_id", "out_param1"}, 119 | OutDbTypes: []string{"geometry", "int", "text"}, 120 | OutJSONTypes: []string{"geometry", "int", "string"}, 121 | Types: map[string]string{ 122 | "in_param1": "int", 123 | "out_geom": "geometry", 124 | "out_id": "int", 125 | "out_param1": "text", 126 | }, 127 | GeometryColumn: "", 128 | IDColumn: "", 129 | } 130 | funNoParam := &Function{ 131 | ID: "fun_noparam", 132 | Schema: "postgisftw", 133 | Name: "fun_noparam", 134 | Description: "Function with no parameters", 135 | InNames: []string{}, 136 | InDbTypes: []string{}, 137 | InTypeMap: map[string]string{}, 138 | InDefaults: []string{}, 139 | NumNoDefault: 0, 140 | OutNames: []string{"out_geom", "out_id", "out_param1"}, 141 | OutDbTypes: []string{"geometry", "int", "text"}, 142 | OutJSONTypes: []string{"geometry", "int", "string"}, 143 | Types: map[string]string{ 144 | "in_param1": "int", 145 | "out_geom": "geometry", 146 | "out_id": "int", 147 | "out_param1": "text", 148 | }, 149 | GeometryColumn: "", 150 | IDColumn: "", 151 | } 152 | funDefs := []*Function{ 153 | funA, 154 | funB, 155 | funNoParam, 156 | } 157 | catMock := CatalogMock{ 158 | TableDefs: tables, 159 | tableData: tableData, 160 | FunctionDefs: funDefs, 161 | } 162 | 163 | return catMock 164 | } 165 | 166 | func (cat *CatalogMock) SetIncludeExclude(includeList []string, excludeList []string) { 167 | } 168 | 169 | func (cat *CatalogMock) Close() { 170 | // this is a no-op 171 | } 172 | 173 | func (cat *CatalogMock) Tables() ([]*Table, error) { 174 | return cat.TableDefs, nil 175 | } 176 | 177 | func (cat *CatalogMock) TableReload(name string) { 178 | // no-op for mock data 179 | } 180 | 181 | func (cat *CatalogMock) TableByName(name string) (*Table, error) { 182 | for _, lyr := range cat.TableDefs { 183 | if lyr.ID == name { 184 | return lyr, nil 185 | } 186 | } 187 | // not found - indicated by nil value returned 188 | return nil, nil 189 | } 190 | 191 | func (cat *CatalogMock) TableFeatures(ctx context.Context, name string, param *QueryParam) ([]string, error) { 192 | features, ok := cat.tableData[name] 193 | if !ok { 194 | // table not found - indicated by nil value returned 195 | return nil, nil 196 | } 197 | featFilt := doFilter(features, param.Filter) 198 | featuresLim := doLimit(featFilt, param.Limit, param.Offset) 199 | // handle empty property list 200 | propNames := cat.TableDefs[0].Columns 201 | if len(param.Columns) > 0 { 202 | propNames = param.Columns 203 | } 204 | return featuresToJSON(featuresLim, propNames), nil 205 | } 206 | 207 | func (cat *CatalogMock) TableFeature(ctx context.Context, name string, id string, param *QueryParam) (string, error) { 208 | features, ok := cat.tableData[name] 209 | if !ok { 210 | // table not found - indicated by empty value returned 211 | return "", nil 212 | } 213 | index, err := strconv.Atoi(id) 214 | if err != nil { 215 | // a malformed int is treated as feature not found 216 | return "", nil 217 | } 218 | 219 | // TODO: return not found if index out of range 220 | if index < 0 || index >= len(features) { 221 | return "", nil 222 | } 223 | // handle empty property list 224 | propNames := cat.TableDefs[0].Columns 225 | if len(param.Columns) > 0 { 226 | propNames = param.Columns 227 | } 228 | 229 | return features[index].toJSON(propNames), nil 230 | } 231 | 232 | func (cat *CatalogMock) Functions() ([]*Function, error) { 233 | return cat.FunctionDefs, nil 234 | } 235 | 236 | func (cat *CatalogMock) FunctionByName(name string) (*Function, error) { 237 | for _, fn := range cat.FunctionDefs { 238 | if fn.Schema+"."+fn.ID == name { 239 | return fn, nil 240 | } 241 | } 242 | // not found - indicated by nil value returned 243 | return nil, nil 244 | } 245 | 246 | func (cat *CatalogMock) FunctionFeatures(ctx context.Context, name string, args map[string]string, param *QueryParam) ([]string, error) { 247 | // TODO: 248 | return nil, nil 249 | } 250 | 251 | func (cat *CatalogMock) FunctionData(ctx context.Context, name string, args map[string]string, param *QueryParam) ([]map[string]any, error) { 252 | // TODO: 253 | return nil, nil 254 | } 255 | 256 | func makePointFeatures(extent Extent, nx int, ny int) []*featureMock { 257 | basex := extent.Minx 258 | basey := extent.Miny 259 | dx := (extent.Maxx - extent.Minx) / float64(nx) 260 | dy := (extent.Maxy - extent.Miny) / float64(ny) 261 | 262 | n := nx * ny 263 | features := make([]*featureMock, n) 264 | index := 0 265 | for ix := 0; ix < nx; ix++ { 266 | for iy := 0; iy < ny; iy++ { 267 | id := index + 1 268 | x := basex + dx*float64(ix) 269 | y := basey + dy*float64(iy) 270 | features[index] = makeFeatureMockPoint(id, x, y) 271 | //fmt.Println(features[index]) 272 | 273 | index++ 274 | } 275 | } 276 | return features 277 | } 278 | 279 | type featureMock struct { 280 | ID string 281 | Geom string 282 | PropA string 283 | PropB int 284 | PropC string 285 | PropD int 286 | } 287 | 288 | func makeFeatureMockPoint(id int, x float64, y float64) *featureMock { 289 | geomFmt := `{"type": "Point","coordinates": [ %v, %v ] }` 290 | geomStr := fmt.Sprintf(geomFmt, x, y) 291 | 292 | idstr := strconv.Itoa(id) 293 | feat := featureMock{idstr, geomStr, "propA", id, "propC", id % 10} 294 | return &feat 295 | } 296 | 297 | func (fm *featureMock) toJSON(propNames []string) string { 298 | props := fm.extractProperties(propNames) 299 | return makeFeatureJSON(fm.ID, fm.Geom, props) 300 | } 301 | 302 | func (fm *featureMock) extractProperties(propNames []string) map[string]any { 303 | props := make(map[string]any) 304 | for _, name := range propNames { 305 | val, err := fm.getProperty(name) 306 | if err != nil { 307 | // panic to avoid having to return error 308 | panic(fmt.Errorf("unknown property: %v", name)) 309 | } 310 | props[name] = val 311 | } 312 | return props 313 | } 314 | 315 | func (fm *featureMock) getProperty(name string) (any, error) { 316 | if name == "prop_a" { 317 | return fm.PropA, nil 318 | } 319 | if name == "prop_b" { 320 | return fm.PropB, nil 321 | } 322 | if name == "prop_c" { 323 | return fm.PropC, nil 324 | } 325 | if name == "prop_d" { 326 | return fm.PropD, nil 327 | } 328 | return nil, fmt.Errorf("unknown property: %v", name) 329 | } 330 | 331 | func doFilter(features []*featureMock, filter []*PropertyFilter) []*featureMock { 332 | var result []*featureMock 333 | for _, feat := range features { 334 | if isFilterMatches(feat, filter) { 335 | result = append(result, feat) 336 | } 337 | } 338 | return result 339 | } 340 | 341 | func isFilterMatches(feature *featureMock, filter []*PropertyFilter) bool { 342 | for _, cond := range filter { 343 | val, _ := feature.getProperty(cond.Name) 344 | valStr := fmt.Sprintf("%v", val) 345 | if cond.Value != valStr { 346 | return false 347 | } 348 | } 349 | return true 350 | } 351 | 352 | func doLimit(features []*featureMock, limit int, offset int) []*featureMock { 353 | start := 0 354 | end := len(features) 355 | // handle limit/offset (offset is only respected if limit present) 356 | if limit < len(features) { 357 | start = offset 358 | end = offset + limit 359 | if end >= len(features) { 360 | end = len(features) 361 | } 362 | } 363 | return features[start:end] 364 | } 365 | 366 | func featuresToJSON(features []*featureMock, propNames []string) []string { 367 | n := len(features) 368 | featJSON := make([]string, n) 369 | for i := 0; i < n; i++ { 370 | featJSON[i] = features[i].toJSON(propNames) 371 | } 372 | return featJSON 373 | } 374 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2025 TobiLG for duckdb-tileserver 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= 2 | github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= 3 | github.com/apache/arrow-go/v18 v18.4.1 h1:q/jVkBWCJOB9reDgaIZIdruLQUb1kbkvOnOFezVH1C4= 4 | github.com/apache/arrow-go/v18 v18.4.1/go.mod h1:tLyFubsAl17bvFdUAy24bsSvA/6ww95Iqi67fTpGu3E= 5 | github.com/apache/thrift v0.22.0 h1:r7mTJdj51TMDe6RtcmNdQxgn9XcyfGDOzegMDRg47uc= 6 | github.com/apache/thrift v0.22.0/go.mod h1:1e7J/O1Ae6ZQMTYdy9xa3w9k+XHWPfRvdPyJeynQ+/g= 7 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 10 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/duckdb/duckdb-go-bindings v0.1.24 h1:p1v3GruGHGcZD69cWauH6QrOX32oooqdUAxrWK3Fo6o= 12 | github.com/duckdb/duckdb-go-bindings v0.1.24/go.mod h1:WA7U/o+b37MK2kiOPPueVZ+FIxt5AZFCjszi8hHeH18= 13 | github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.24 h1:XhqMj+bvpTIm+hMeps1Kk94r2eclAswk2ISFs4jMm+g= 14 | github.com/duckdb/duckdb-go-bindings/darwin-amd64 v0.1.24/go.mod h1:jfbOHwGZqNCpMAxV4g4g5jmWr0gKdMvh2fGusPubxC4= 15 | github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.24 h1:OyHr5PykY5FG81jchpRoESMDQX1HK66PdNsfxoHxbwM= 16 | github.com/duckdb/duckdb-go-bindings/darwin-arm64 v0.1.24/go.mod h1:zLVtv1a7TBuTPvuAi32AIbnuw7jjaX5JElZ+urv1ydc= 17 | github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.24 h1:6Y4VarmcT7Oe8stwta4dOLlUX8aG4ciG9VhFKnp91a4= 18 | github.com/duckdb/duckdb-go-bindings/linux-amd64 v0.1.24/go.mod h1:GCaBoYnuLZEva7BXzdXehTbqh9VSvpLB80xcmxGBGs8= 19 | github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.24 h1:NCAGH7o1RsJv631EQGOqs94ABtmYZO6JjMHkv7GIgG8= 20 | github.com/duckdb/duckdb-go-bindings/linux-arm64 v0.1.24/go.mod h1:kpQSpJmDSSZQ3ikbZR1/8UqecqMeUkWFjFX2xZxlCuI= 21 | github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.24 h1:JOupXaHMMu8zLgq7v9uxPjl1CXSJHlISCxopMiqtkzU= 22 | github.com/duckdb/duckdb-go-bindings/windows-amd64 v0.1.24/go.mod h1:wa+egSGXTPS16NPADFCK1yFyt3VSXxUS6Pt2fLnvRPM= 23 | github.com/duckdb/duckdb-go/arrowmapping v0.0.27 h1:w0XKX+EJpAN4XOQlKxSxSKZq/tCVbRfTRBp98jA0q8M= 24 | github.com/duckdb/duckdb-go/arrowmapping v0.0.27/go.mod h1:VkFx49Icor1bbxOPxAU8jRzwL0nTXICOthxVq4KqOqQ= 25 | github.com/duckdb/duckdb-go/mapping v0.0.27 h1:QEta+qPEKmfhd89U8vnm4MVslj1UscmkyJwu8x+OtME= 26 | github.com/duckdb/duckdb-go/mapping v0.0.27/go.mod h1:7C4QWJWG6UOV9b0iWanfF5ML1ivJPX45Kz+VmlvRlTA= 27 | github.com/duckdb/duckdb-go/v2 v2.5.4 h1:+ip+wPCwf7Eu/dXxp19aLCxwpLUaeOy2UV/peBphXK0= 28 | github.com/duckdb/duckdb-go/v2 v2.5.4/go.mod h1:CeobOFmWpf7MTDb+MW08/zIWP8TQ2jbPbMgGo5761tY= 29 | github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= 30 | github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= 31 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 32 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 33 | github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= 34 | github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= 35 | github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= 36 | github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= 37 | github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= 38 | github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= 39 | github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= 40 | github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 41 | github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU= 42 | github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= 43 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 44 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 45 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 46 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 47 | github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= 48 | github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= 49 | github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= 50 | github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= 51 | github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= 52 | github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= 53 | github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= 54 | github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= 55 | github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= 56 | github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= 57 | github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= 58 | github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= 59 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 60 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 61 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 62 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 63 | github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= 64 | github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= 65 | github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= 66 | github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= 67 | github.com/pborman/getopt/v2 v2.1.0 h1:eNfR+r+dWLdWmV8g5OlpyrTYHkhVNxHBdN2cCrJmOEA= 68 | github.com/pborman/getopt/v2 v2.1.0/go.mod h1:4NtW75ny4eBw9fO1bhtNdYTlZKYX5/tBLtsOpwKIKd0= 69 | github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= 70 | github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= 71 | github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= 72 | github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= 73 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 74 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 75 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 76 | github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= 77 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 78 | github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= 79 | github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= 80 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 81 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 82 | github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= 83 | github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= 84 | github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= 85 | github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= 86 | github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= 87 | github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= 88 | github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= 89 | github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 90 | github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= 91 | github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= 92 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 93 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 94 | github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= 95 | github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= 96 | github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= 97 | github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= 98 | github.com/theckman/httpforwarded v0.4.0 h1:N55vGJT+6ojTnLY3LQCNliJC4TW0P0Pkeys1G1WpX2w= 99 | github.com/theckman/httpforwarded v0.4.0/go.mod h1:GVkFynv6FJreNbgH/bpOU9ITDZ7a5WuzdNCtIMI1pVI= 100 | github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= 101 | github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= 102 | github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= 103 | github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= 104 | go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= 105 | go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= 106 | golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 h1:MDfG8Cvcqlt9XXrmEiD4epKn7VJHZO84hejP9Jmp0MM= 107 | golang.org/x/exp v0.0.0-20251209150349-8475f28825e9/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU= 108 | golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= 109 | golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= 110 | golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= 111 | golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= 112 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 113 | golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= 114 | golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= 115 | golang.org/x/telemetry v0.0.0-20251208220230-2638a1023523 h1:H52Mhyrc44wBgLTGzq6+0cmuVuF3LURCSXsLMOqfFos= 116 | golang.org/x/telemetry v0.0.0-20251208220230-2638a1023523/go.mod h1:ArQvPJS723nJQietgilmZA+shuB3CZxH1n2iXq9VSfs= 117 | golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= 118 | golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= 119 | golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= 120 | golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= 121 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= 122 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= 123 | gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= 124 | gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= 125 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 126 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 127 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 128 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 129 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 130 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 131 | -------------------------------------------------------------------------------- /assets/index.gohtml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | DuckDB Tileserver 6 | 7 | 8 | 9 | 95 | 96 | 97 |
Loading layers...
98 |
99 |

DuckDB Tileserver

100 |

MVT tiles powered by DuckDB Spatial

101 |
102 | 106 |
107 | 385 | 386 | 387 | -------------------------------------------------------------------------------- /internal/data/catalog_test.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | // TestTableIncludeExcludeLogic tests the table filtering logic 9 | func TestTableIncludeExcludeLogic(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | table *Table 13 | includes []string 14 | excludes []string 15 | shouldMatch bool 16 | }{ 17 | { 18 | name: "No includes, no excludes - should include", 19 | table: &Table{ 20 | ID: "public.users", 21 | Schema: "public", 22 | Table: "users", 23 | }, 24 | includes: []string{}, 25 | excludes: []string{}, 26 | shouldMatch: true, 27 | }, 28 | { 29 | name: "Schema in includes - should include", 30 | table: &Table{ 31 | ID: "public.users", 32 | Schema: "public", 33 | Table: "users", 34 | }, 35 | includes: []string{"public"}, 36 | excludes: []string{}, 37 | shouldMatch: true, 38 | }, 39 | { 40 | name: "Table ID in includes - should include", 41 | table: &Table{ 42 | ID: "public.users", 43 | Schema: "public", 44 | Table: "users", 45 | }, 46 | includes: []string{"public.users"}, 47 | excludes: []string{}, 48 | shouldMatch: true, 49 | }, 50 | { 51 | name: "Neither schema nor table ID in includes - should exclude", 52 | table: &Table{ 53 | ID: "public.users", 54 | Schema: "public", 55 | Table: "users", 56 | }, 57 | includes: []string{"private"}, 58 | excludes: []string{}, 59 | shouldMatch: false, 60 | }, 61 | { 62 | name: "Schema in excludes - should exclude", 63 | table: &Table{ 64 | ID: "private.secrets", 65 | Schema: "private", 66 | Table: "secrets", 67 | }, 68 | includes: []string{}, 69 | excludes: []string{"private"}, 70 | shouldMatch: false, 71 | }, 72 | { 73 | name: "Table ID in excludes - should exclude", 74 | table: &Table{ 75 | ID: "public.temp_table", 76 | Schema: "public", 77 | Table: "temp_table", 78 | }, 79 | includes: []string{}, 80 | excludes: []string{"public.temp_table"}, 81 | shouldMatch: false, 82 | }, 83 | { 84 | name: "In includes but also in excludes - should exclude (excludes take precedence)", 85 | table: &Table{ 86 | ID: "public.users", 87 | Schema: "public", 88 | Table: "users", 89 | }, 90 | includes: []string{"public"}, 91 | excludes: []string{"public.users"}, 92 | shouldMatch: false, 93 | }, 94 | { 95 | name: "Case insensitive matching - schema", 96 | table: &Table{ 97 | ID: "Public.Users", 98 | Schema: "Public", 99 | Table: "Users", 100 | }, 101 | includes: []string{"public"}, 102 | excludes: []string{}, 103 | shouldMatch: true, 104 | }, 105 | { 106 | name: "Case insensitive matching - table ID", 107 | table: &Table{ 108 | ID: "Public.Users", 109 | Schema: "Public", 110 | Table: "Users", 111 | }, 112 | includes: []string{"public.users"}, 113 | excludes: []string{}, 114 | shouldMatch: true, 115 | }, 116 | { 117 | name: "Multiple includes, matches one", 118 | table: &Table{ 119 | ID: "schema1.table1", 120 | Schema: "schema1", 121 | Table: "table1", 122 | }, 123 | includes: []string{"schema2", "schema1.table1", "schema3"}, 124 | excludes: []string{}, 125 | shouldMatch: true, 126 | }, 127 | { 128 | name: "Multiple includes, matches none", 129 | table: &Table{ 130 | ID: "schema1.table1", 131 | Schema: "schema1", 132 | Table: "table1", 133 | }, 134 | includes: []string{"schema2", "schema3.table1", "schema4"}, 135 | excludes: []string{}, 136 | shouldMatch: false, 137 | }, 138 | { 139 | name: "Table name without schema in includes", 140 | table: &Table{ 141 | ID: "users", 142 | Schema: "main", 143 | Table: "users", 144 | }, 145 | includes: []string{"users"}, 146 | excludes: []string{}, 147 | shouldMatch: true, 148 | }, 149 | { 150 | name: "Schema-qualified table in includes, simple table name in table", 151 | table: &Table{ 152 | ID: "users", 153 | Schema: "main", 154 | Table: "users", 155 | }, 156 | includes: []string{"main.users"}, 157 | excludes: []string{}, 158 | shouldMatch: false, // Should not match because table ID is "users", not "main.users" 159 | }, 160 | } 161 | 162 | for _, tt := range tests { 163 | t.Run(tt.name, func(t *testing.T) { 164 | catalog := &CatalogDB{} 165 | catalog.SetIncludeExclude(tt.includes, tt.excludes) 166 | 167 | result := catalog.isIncluded(tt.table) 168 | testEquals(t, tt.shouldMatch, result, fmt.Sprintf("Table filtering for %s", tt.table.ID)) 169 | }) 170 | } 171 | } 172 | 173 | // TestSetIncludeExclude tests the SetIncludeExclude method 174 | func TestSetIncludeExclude(t *testing.T) { 175 | catalog := &CatalogDB{} 176 | 177 | includes := []string{"Public", "Schema1.Table1", "mixed_Case"} 178 | excludes := []string{"Private", "Schema2.TABLE2"} 179 | 180 | catalog.SetIncludeExclude(includes, excludes) 181 | 182 | // Check that all entries are stored in lowercase 183 | expectedIncludes := map[string]string{ 184 | "public": "public", 185 | "schema1.table1": "schema1.table1", 186 | "mixed_case": "mixed_case", 187 | } 188 | 189 | expectedExcludes := map[string]string{ 190 | "private": "private", 191 | "schema2.table2": "schema2.table2", 192 | } 193 | 194 | testEquals(t, expectedIncludes, catalog.tableIncludes, "Table includes") 195 | testEquals(t, expectedExcludes, catalog.tableExcludes, "Table excludes") 196 | } 197 | 198 | // TestIsMatchSchemaTable tests the isMatchSchemaTable function directly 199 | func TestIsMatchSchemaTable(t *testing.T) { 200 | tests := []struct { 201 | name string 202 | table *Table 203 | list map[string]string 204 | shouldMatch bool 205 | }{ 206 | { 207 | name: "Schema matches", 208 | table: &Table{ 209 | ID: "public.users", 210 | Schema: "public", 211 | Table: "users", 212 | }, 213 | list: map[string]string{ 214 | "public": "public", 215 | }, 216 | shouldMatch: true, 217 | }, 218 | { 219 | name: "Table ID matches", 220 | table: &Table{ 221 | ID: "public.users", 222 | Schema: "public", 223 | Table: "users", 224 | }, 225 | list: map[string]string{ 226 | "public.users": "public.users", 227 | }, 228 | shouldMatch: true, 229 | }, 230 | { 231 | name: "No match", 232 | table: &Table{ 233 | ID: "public.users", 234 | Schema: "public", 235 | Table: "users", 236 | }, 237 | list: map[string]string{ 238 | "private": "private", 239 | "public.posts": "public.posts", 240 | }, 241 | shouldMatch: false, 242 | }, 243 | { 244 | name: "Case insensitive schema match", 245 | table: &Table{ 246 | ID: "Public.Users", 247 | Schema: "Public", 248 | Table: "Users", 249 | }, 250 | list: map[string]string{ 251 | "public": "public", 252 | }, 253 | shouldMatch: true, 254 | }, 255 | { 256 | name: "Case insensitive table ID match", 257 | table: &Table{ 258 | ID: "Public.Users", 259 | Schema: "Public", 260 | Table: "Users", 261 | }, 262 | list: map[string]string{ 263 | "public.users": "public.users", 264 | }, 265 | shouldMatch: true, 266 | }, 267 | } 268 | 269 | for _, tt := range tests { 270 | t.Run(tt.name, func(t *testing.T) { 271 | result := isMatchSchemaTable(tt.table, tt.list) 272 | testEquals(t, tt.shouldMatch, result, fmt.Sprintf("Schema/table matching for %s", tt.table.ID)) 273 | }) 274 | } 275 | } 276 | 277 | // TestEmptyIncludesExcludes tests behavior with empty includes/excludes 278 | func TestEmptyIncludesExcludes(t *testing.T) { 279 | catalog := &CatalogDB{} 280 | 281 | // Test with nil slices 282 | catalog.SetIncludeExclude(nil, nil) 283 | 284 | table := &Table{ 285 | ID: "any.table", 286 | Schema: "any", 287 | Table: "table", 288 | } 289 | 290 | // Should include when no restrictions 291 | result := catalog.isIncluded(table) 292 | testEquals(t, true, result, "Should include when no restrictions") 293 | 294 | // Test with empty slices 295 | catalog.SetIncludeExclude([]string{}, []string{}) 296 | result = catalog.isIncluded(table) 297 | testEquals(t, true, result, "Should include with empty slices") 298 | } 299 | 300 | // TestComplexIncludeExcludeScenarios tests complex real-world scenarios 301 | func TestComplexIncludeExcludeScenarios(t *testing.T) { 302 | tests := []struct { 303 | name string 304 | includes []string 305 | excludes []string 306 | tables []struct { 307 | table *Table 308 | expected bool 309 | } 310 | }{ 311 | { 312 | name: "Include specific schema, exclude specific table", 313 | includes: []string{"public"}, 314 | excludes: []string{"public.temp_logs"}, 315 | tables: []struct { 316 | table *Table 317 | expected bool 318 | }{ 319 | { 320 | table: &Table{ 321 | ID: "public.users", 322 | Schema: "public", 323 | Table: "users", 324 | }, 325 | expected: true, 326 | }, 327 | { 328 | table: &Table{ 329 | ID: "public.temp_logs", 330 | Schema: "public", 331 | Table: "temp_logs", 332 | }, 333 | expected: false, // Excluded 334 | }, 335 | { 336 | table: &Table{ 337 | ID: "private.secrets", 338 | Schema: "private", 339 | Table: "secrets", 340 | }, 341 | expected: false, // Not in includes 342 | }, 343 | }, 344 | }, 345 | { 346 | name: "Multiple schemas, multiple excludes", 347 | includes: []string{"public", "reports"}, 348 | excludes: []string{"public.temp", "reports.legacy"}, 349 | tables: []struct { 350 | table *Table 351 | expected bool 352 | }{ 353 | { 354 | table: &Table{ 355 | ID: "public.users", 356 | Schema: "public", 357 | Table: "users", 358 | }, 359 | expected: true, 360 | }, 361 | { 362 | table: &Table{ 363 | ID: "reports.monthly", 364 | Schema: "reports", 365 | Table: "monthly", 366 | }, 367 | expected: true, 368 | }, 369 | { 370 | table: &Table{ 371 | ID: "public.temp", 372 | Schema: "public", 373 | Table: "temp", 374 | }, 375 | expected: false, // Excluded 376 | }, 377 | { 378 | table: &Table{ 379 | ID: "reports.legacy", 380 | Schema: "reports", 381 | Table: "legacy", 382 | }, 383 | expected: false, // Excluded 384 | }, 385 | }, 386 | }, 387 | } 388 | 389 | for _, tt := range tests { 390 | t.Run(tt.name, func(t *testing.T) { 391 | catalog := &CatalogDB{} 392 | catalog.SetIncludeExclude(tt.includes, tt.excludes) 393 | 394 | for i, tableTest := range tt.tables { 395 | result := catalog.isIncluded(tableTest.table) 396 | testEquals(t, tableTest.expected, result, 397 | fmt.Sprintf("Table %d (%s) filtering", i, tableTest.table.ID)) 398 | } 399 | }) 400 | } 401 | } 402 | 403 | // TestTableExcludesOnlyScenarios tests scenarios where only excludes are specified (no includes) 404 | func TestTableExcludesOnlyScenarios(t *testing.T) { 405 | tests := []struct { 406 | name string 407 | excludes []string 408 | tables []struct { 409 | table *Table 410 | expected bool 411 | } 412 | }{ 413 | { 414 | name: "Exclude sensitive schemas", 415 | excludes: []string{"private", "system", "internal"}, 416 | tables: []struct { 417 | table *Table 418 | expected bool 419 | }{ 420 | { 421 | table: &Table{ 422 | ID: "public.users", 423 | Schema: "public", 424 | Table: "users", 425 | }, 426 | expected: true, // Not excluded 427 | }, 428 | { 429 | table: &Table{ 430 | ID: "private.secrets", 431 | Schema: "private", 432 | Table: "secrets", 433 | }, 434 | expected: false, // Excluded by schema 435 | }, 436 | { 437 | table: &Table{ 438 | ID: "system.config", 439 | Schema: "system", 440 | Table: "config", 441 | }, 442 | expected: false, // Excluded by schema 443 | }, 444 | { 445 | table: &Table{ 446 | ID: "internal.migrations", 447 | Schema: "internal", 448 | Table: "migrations", 449 | }, 450 | expected: false, // Excluded by schema 451 | }, 452 | { 453 | table: &Table{ 454 | ID: "reports.monthly", 455 | Schema: "reports", 456 | Table: "monthly", 457 | }, 458 | expected: true, // Not excluded 459 | }, 460 | }, 461 | }, 462 | { 463 | name: "Exclude specific problematic tables", 464 | excludes: []string{"public.temp_cache", "logs.debug_info", "staging.test_data"}, 465 | tables: []struct { 466 | table *Table 467 | expected bool 468 | }{ 469 | { 470 | table: &Table{ 471 | ID: "public.users", 472 | Schema: "public", 473 | Table: "users", 474 | }, 475 | expected: true, // Not excluded 476 | }, 477 | { 478 | table: &Table{ 479 | ID: "public.temp_cache", 480 | Schema: "public", 481 | Table: "temp_cache", 482 | }, 483 | expected: false, // Excluded by table ID 484 | }, 485 | { 486 | table: &Table{ 487 | ID: "logs.access", 488 | Schema: "logs", 489 | Table: "access", 490 | }, 491 | expected: true, // Not excluded 492 | }, 493 | { 494 | table: &Table{ 495 | ID: "logs.debug_info", 496 | Schema: "logs", 497 | Table: "debug_info", 498 | }, 499 | expected: false, // Excluded by table ID 500 | }, 501 | { 502 | table: &Table{ 503 | ID: "staging.test_data", 504 | Schema: "staging", 505 | Table: "test_data", 506 | }, 507 | expected: false, // Excluded by table ID 508 | }, 509 | { 510 | table: &Table{ 511 | ID: "staging.reports", 512 | Schema: "staging", 513 | Table: "reports", 514 | }, 515 | expected: true, // Not excluded 516 | }, 517 | }, 518 | }, 519 | { 520 | name: "Mixed schema and table exclusions", 521 | excludes: []string{"temp", "debug.traces", "private.keys", "logs"}, 522 | tables: []struct { 523 | table *Table 524 | expected bool 525 | }{ 526 | { 527 | table: &Table{ 528 | ID: "public.users", 529 | Schema: "public", 530 | Table: "users", 531 | }, 532 | expected: true, // Not excluded 533 | }, 534 | { 535 | table: &Table{ 536 | ID: "temp.calculations", 537 | Schema: "temp", 538 | Table: "calculations", 539 | }, 540 | expected: false, // Excluded by schema 541 | }, 542 | { 543 | table: &Table{ 544 | ID: "debug.traces", 545 | Schema: "debug", 546 | Table: "traces", 547 | }, 548 | expected: false, // Excluded by table ID 549 | }, 550 | { 551 | table: &Table{ 552 | ID: "debug.performance", 553 | Schema: "debug", 554 | Table: "performance", 555 | }, 556 | expected: true, // Not excluded (only debug.traces is excluded) 557 | }, 558 | { 559 | table: &Table{ 560 | ID: "private.keys", 561 | Schema: "private", 562 | Table: "keys", 563 | }, 564 | expected: false, // Excluded by table ID 565 | }, 566 | { 567 | table: &Table{ 568 | ID: "private.settings", 569 | Schema: "private", 570 | Table: "settings", 571 | }, 572 | expected: true, // Not excluded (only private.keys is excluded) 573 | }, 574 | { 575 | table: &Table{ 576 | ID: "logs.access", 577 | Schema: "logs", 578 | Table: "access", 579 | }, 580 | expected: false, // Excluded by schema 581 | }, 582 | }, 583 | }, 584 | } 585 | 586 | for _, tt := range tests { 587 | t.Run(tt.name, func(t *testing.T) { 588 | catalog := &CatalogDB{} 589 | catalog.SetIncludeExclude([]string{}, tt.excludes) // No includes, only excludes 590 | 591 | for i, tableTest := range tt.tables { 592 | result := catalog.isIncluded(tableTest.table) 593 | testEquals(t, tableTest.expected, result, 594 | fmt.Sprintf("Table %d (%s) exclusion filtering", i, tableTest.table.ID)) 595 | } 596 | }) 597 | } 598 | } 599 | -------------------------------------------------------------------------------- /internal/data/tiles.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | "math" 8 | 9 | log "github.com/sirupsen/logrus" 10 | ) 11 | 12 | const ( 13 | SRID_3857 = 3857 // Web Mercator 14 | ) 15 | 16 | // Layer represents a spatial layer that can serve MVT tiles 17 | type Layer struct { 18 | Name string `json:"name"` 19 | Table string `json:"table"` 20 | GeometryColumn string `json:"geometry_column"` 21 | GeometryType string `json:"geometry_type"` 22 | Srid int `json:"srid"` // SRID of bounds (always 3857 for API responses) 23 | SourceSrid int `json:"-"` // SRID of source data (not exposed in API) 24 | Bounds *Extent `json:"bounds,omitempty"` 25 | Properties []string `json:"properties,omitempty"` 26 | PropertyTypes map[string]string `json:"-"` // Column name -> data type mapping (not exposed in API) 27 | } 28 | 29 | // TileJSON represents the TileJSON specification metadata 30 | type TileJSON struct { 31 | TileJSON string `json:"tilejson"` 32 | Name string `json:"name,omitempty"` 33 | Description string `json:"description,omitempty"` 34 | Version string `json:"version,omitempty"` 35 | Scheme string `json:"scheme,omitempty"` 36 | Tiles []string `json:"tiles"` 37 | MinZoom int `json:"minzoom,omitempty"` 38 | MaxZoom int `json:"maxzoom,omitempty"` 39 | Bounds []float64 `json:"bounds,omitempty"` 40 | Center []float64 `json:"center,omitempty"` 41 | VectorLayers []VectorLayer `json:"vector_layers,omitempty"` 42 | } 43 | 44 | // VectorLayer represents a layer in the TileJSON spec 45 | type VectorLayer struct { 46 | ID string `json:"id"` 47 | Description string `json:"description,omitempty"` 48 | MinZoom int `json:"minzoom,omitempty"` 49 | MaxZoom int `json:"maxzoom,omitempty"` 50 | Fields map[string]string `json:"fields,omitempty"` 51 | } 52 | 53 | // GetLayers returns all tables with geometry columns 54 | func (cat *CatalogDB) GetLayers() ([]*Layer, error) { 55 | query := ` 56 | SELECT 57 | table_name, 58 | column_name as geometry_column 59 | FROM duckdb_columns 60 | WHERE data_type = 'GEOMETRY' 61 | ORDER BY table_name 62 | ` 63 | 64 | rows, err := cat.dbconn.Query(query) 65 | if err != nil { 66 | return nil, fmt.Errorf("error querying layers: %w", err) 67 | } 68 | defer rows.Close() 69 | 70 | var layers []*Layer 71 | seenTables := make(map[string]bool) 72 | 73 | for rows.Next() { 74 | var tableName, geomColumn string 75 | if err := rows.Scan(&tableName, &geomColumn); err != nil { 76 | log.Warnf("Error scanning layer row: %v", err) 77 | continue 78 | } 79 | 80 | // Skip if we've already seen this table (first geometry column wins) 81 | if seenTables[tableName] { 82 | log.Warnf("Table %s has multiple geometry columns, using first one: %s", tableName, geomColumn) 83 | continue 84 | } 85 | seenTables[tableName] = true 86 | 87 | // Apply include/exclude filters 88 | if !cat.isTableIncluded(tableName) { 89 | continue 90 | } 91 | 92 | layer := &Layer{ 93 | Name: tableName, 94 | Table: tableName, 95 | GeometryColumn: geomColumn, 96 | } 97 | 98 | // Get full metadata including bounds 99 | if err := cat.enrichLayerMetadata(layer); err != nil { 100 | log.Warnf("Error enriching layer %s metadata: %v", tableName, err) 101 | // Continue anyway with basic info 102 | } 103 | 104 | layers = append(layers, layer) 105 | } 106 | 107 | if err := rows.Err(); err != nil { 108 | return nil, fmt.Errorf("error iterating layers: %w", err) 109 | } 110 | 111 | log.Infof("Found %d layers with geometry columns", len(layers)) 112 | return layers, nil 113 | } 114 | 115 | // enrichLayerMetadataLightweight adds only geometry type and properties (skips expensive bounds calculation) 116 | func (cat *CatalogDB) enrichLayerMetadataLightweight(layer *Layer) error { 117 | // Get geometry type 118 | query := fmt.Sprintf(` 119 | SELECT ST_GeometryType(%s) as geom_type 120 | FROM %s 121 | WHERE %s IS NOT NULL 122 | LIMIT 1 123 | `, layer.GeometryColumn, layer.Table, layer.GeometryColumn) 124 | 125 | var geomType sql.NullString 126 | err := cat.dbconn.QueryRow(query).Scan(&geomType) 127 | if err != nil && err != sql.ErrNoRows { 128 | return fmt.Errorf("error getting geometry metadata: %w", err) 129 | } 130 | 131 | if geomType.Valid { 132 | layer.GeometryType = geomType.String 133 | } 134 | 135 | // Get property columns (non-geometry columns) 136 | propsQuery := fmt.Sprintf(` 137 | SELECT column_name 138 | FROM duckdb_columns 139 | WHERE table_name = '%s' AND data_type != 'GEOMETRY' 140 | ORDER BY column_name 141 | `, layer.Table) 142 | 143 | rows, err := cat.dbconn.Query(propsQuery) 144 | if err != nil { 145 | return fmt.Errorf("error getting properties: %w", err) 146 | } 147 | defer rows.Close() 148 | 149 | var properties []string 150 | for rows.Next() { 151 | var col string 152 | if err := rows.Scan(&col); err != nil { 153 | continue 154 | } 155 | properties = append(properties, col) 156 | } 157 | layer.Properties = properties 158 | 159 | return nil 160 | } 161 | 162 | // enrichLayerMetadata adds geometry type, SRID, bounds, and property list to a layer 163 | func (cat *CatalogDB) enrichLayerMetadata(layer *Layer) error { 164 | // Get geometry type 165 | // Note: DuckDB Spatial doesn't support ST_SRID() - SRID is not stored per-geometry 166 | query := fmt.Sprintf(` 167 | SELECT ST_GeometryType(%s) as geom_type 168 | FROM %s 169 | WHERE %s IS NOT NULL 170 | LIMIT 1 171 | `, layer.GeometryColumn, layer.Table, layer.GeometryColumn) 172 | 173 | var geomType sql.NullString 174 | err := cat.dbconn.QueryRow(query).Scan(&geomType) 175 | if err != nil && err != sql.ErrNoRows { 176 | return fmt.Errorf("error getting geometry metadata: %w", err) 177 | } 178 | 179 | if geomType.Valid { 180 | layer.GeometryType = geomType.String 181 | } 182 | 183 | // Get native bounds - calculate extent ONCE to avoid expensive double table scan 184 | // DuckDB Spatial doesn't store SRID per-geometry, so we detect it from coordinate ranges 185 | // Use ST_Extent_Agg to aggregate all geometries into a single bounding box 186 | nativeBoundsQuery := fmt.Sprintf(` 187 | WITH extent_calc AS ( 188 | SELECT ST_Extent_Agg(%s) as extent 189 | FROM %s 190 | WHERE %s IS NOT NULL 191 | ) 192 | SELECT 193 | ST_XMin(extent) as minx, 194 | ST_YMin(extent) as miny, 195 | ST_XMax(extent) as maxx, 196 | ST_YMax(extent) as maxy, 197 | -- Also get transformed bounds in one query to avoid double table scan 198 | -- Note: ST_Transform swaps X and Y! So ST_XMin gives us the minimum Y value (latitude) 199 | -- We swap them back so our API returns Minx=longitude, Miny=latitude 200 | ST_YMin(ST_Transform(extent, 'EPSG:4326', 'EPSG:3857')) as minx_3857, 201 | ST_XMin(ST_Transform(extent, 'EPSG:4326', 'EPSG:3857')) as miny_3857, 202 | ST_YMax(ST_Transform(extent, 'EPSG:4326', 'EPSG:3857')) as maxx_3857, 203 | ST_XMax(ST_Transform(extent, 'EPSG:4326', 'EPSG:3857')) as maxy_3857 204 | FROM extent_calc 205 | `, layer.GeometryColumn, layer.Table, layer.GeometryColumn) 206 | 207 | var nativeMinx, nativeMiny, nativeMaxx, nativeMaxy sql.NullFloat64 208 | var minx3857, miny3857, maxx3857, maxy3857 sql.NullFloat64 209 | err = cat.dbconn.QueryRow(nativeBoundsQuery).Scan( 210 | &nativeMinx, &nativeMiny, &nativeMaxx, &nativeMaxy, 211 | &minx3857, &miny3857, &maxx3857, &maxy3857) 212 | if err != nil && err != sql.ErrNoRows { 213 | log.Warnf("Error getting bounds for layer %s: %v", layer.Name, err) 214 | return err 215 | } 216 | 217 | // Detect coordinate system from bounds 218 | // EPSG:3857 (Web Mercator) has values roughly in range [-20037508, 20037508] 219 | // EPSG:4326 (WGS84) has values in range [-180, 180] for lon, [-90, 90] for lat 220 | sourceSrid := SRID_4326 // Default assumption 221 | if nativeMinx.Valid && nativeMaxx.Valid { 222 | maxAbsX := math.Max(math.Abs(nativeMinx.Float64), math.Abs(nativeMaxx.Float64)) 223 | if maxAbsX > 360 { 224 | // Likely already in Web Mercator (EPSG:3857) 225 | sourceSrid = SRID_3857 226 | } 227 | } 228 | 229 | // Use appropriate bounds for EPSG:3857 output 230 | // If data is already in 3857, use native bounds; otherwise use transformed bounds 231 | var minx, miny, maxx, maxy sql.NullFloat64 232 | if sourceSrid == SRID_3857 { 233 | // Already in Web Mercator - DuckDB ST_Extent returns swapped coordinates for EPSG:3857 234 | // ST_XMin/ST_YMin are swapped, so we swap them to get correct longitude/latitude order 235 | minx, miny, maxx, maxy = nativeMiny, nativeMinx, nativeMaxy, nativeMaxx 236 | } else { 237 | // Data is in EPSG:4326, use transformed bounds 238 | // The SQL query swapped them (lines 199-202), so we need to swap back 239 | minx, miny, maxx, maxy = miny3857, minx3857, maxy3857, maxx3857 240 | } 241 | 242 | if minx.Valid && miny.Valid && maxx.Valid && maxy.Valid { 243 | layer.Bounds = &Extent{ 244 | Minx: minx.Float64, 245 | Miny: miny.Float64, 246 | Maxx: maxx.Float64, 247 | Maxy: maxy.Float64, 248 | } 249 | // Store source SRID for tile generation, set API SRID to 3857 since bounds are in Web Mercator 250 | layer.SourceSrid = sourceSrid 251 | layer.Srid = SRID_3857 252 | } 253 | 254 | // Get property columns (non-geometry columns) 255 | propsQuery := fmt.Sprintf(` 256 | SELECT column_name 257 | FROM duckdb_columns 258 | WHERE table_name = '%s' AND data_type != 'GEOMETRY' 259 | ORDER BY column_name 260 | `, layer.Table) 261 | 262 | rows, err := cat.dbconn.Query(propsQuery) 263 | if err != nil { 264 | return fmt.Errorf("error getting properties: %w", err) 265 | } 266 | defer rows.Close() 267 | 268 | var properties []string 269 | for rows.Next() { 270 | var col string 271 | if err := rows.Scan(&col); err != nil { 272 | continue 273 | } 274 | properties = append(properties, col) 275 | } 276 | layer.Properties = properties 277 | 278 | return nil 279 | } 280 | 281 | // isTableIncluded checks if a table should be included based on include/exclude lists 282 | func (cat *CatalogDB) isTableIncluded(tableName string) bool { 283 | // If includes list is specified and table not in it, exclude 284 | if len(cat.tableIncludes) > 0 { 285 | if _, ok := cat.tableIncludes[tableName]; !ok { 286 | return false 287 | } 288 | } 289 | 290 | // If table is in excludes list, exclude 291 | if len(cat.tableExcludes) > 0 { 292 | if _, ok := cat.tableExcludes[tableName]; ok { 293 | return false 294 | } 295 | } 296 | 297 | return true 298 | } 299 | 300 | // GetLayerByName returns a single layer by name with lightweight metadata for tile generation 301 | // Uses an in-memory cache to avoid repeated metadata queries 302 | func (cat *CatalogDB) GetLayerByName(name string) (*Layer, error) { 303 | // Try cache first (fast path with read lock) 304 | cat.layerCacheMutex.RLock() 305 | cached, ok := cat.layerMetadataCache[name] 306 | cat.layerCacheMutex.RUnlock() 307 | 308 | if ok { 309 | log.Debugf("Layer metadata cache HIT: %s", name) 310 | return cached, nil 311 | } 312 | 313 | log.Debugf("Layer metadata cache MISS: %s", name) 314 | 315 | // Cache miss - query database 316 | layer, err := cat.queryLayerMetadata(name) 317 | if err != nil { 318 | return nil, err 319 | } 320 | 321 | // Store in cache (write lock) 322 | cat.layerCacheMutex.Lock() 323 | cat.layerMetadataCache[name] = layer 324 | cat.layerCacheMutex.Unlock() 325 | 326 | log.Debugf("Layer metadata cached: %s", name) 327 | 328 | return layer, nil 329 | } 330 | 331 | // queryLayerMetadata queries the database for layer metadata (not cached) 332 | func (cat *CatalogDB) queryLayerMetadata(name string) (*Layer, error) { 333 | // Query for just this specific table's geometry column 334 | query := ` 335 | SELECT column_name as geometry_column 336 | FROM duckdb_columns 337 | WHERE table_name = $1 AND data_type = 'GEOMETRY' 338 | LIMIT 1 339 | ` 340 | 341 | var geomColumn string 342 | err := cat.dbconn.QueryRow(query, name).Scan(&geomColumn) 343 | if err != nil { 344 | if err == sql.ErrNoRows { 345 | return nil, fmt.Errorf("layer not found: %s", name) 346 | } 347 | return nil, fmt.Errorf("error querying layer %s: %w", name, err) 348 | } 349 | 350 | // Check if table is included 351 | if !cat.isTableIncluded(name) { 352 | return nil, fmt.Errorf("layer not included: %s", name) 353 | } 354 | 355 | layer := &Layer{ 356 | Name: name, 357 | Table: name, 358 | GeometryColumn: geomColumn, 359 | } 360 | 361 | // Detect source SRID without calculating full bounds (lightweight check) 362 | // Sample one geometry to check coordinate range 363 | sridQuery := fmt.Sprintf(` 364 | SELECT ST_X(ST_Centroid(%s)) as x 365 | FROM %s 366 | WHERE %s IS NOT NULL 367 | LIMIT 1 368 | `, geomColumn, name, geomColumn) 369 | 370 | var sampleX sql.NullFloat64 371 | err = cat.dbconn.QueryRow(sridQuery).Scan(&sampleX) 372 | if err == nil && sampleX.Valid { 373 | // Detect based on coordinate range 374 | if math.Abs(sampleX.Float64) > 360 { 375 | layer.SourceSrid = SRID_3857 376 | } else { 377 | layer.SourceSrid = SRID_4326 378 | } 379 | } else { 380 | // Default to 4326 if we can't detect 381 | layer.SourceSrid = SRID_4326 382 | } 383 | 384 | // Get property columns (non-geometry columns) for MVT generation 385 | // This is lightweight and necessary to include properties in tiles 386 | // We also need data types to handle casting of unsupported types 387 | propsQuery := fmt.Sprintf(` 388 | SELECT column_name, data_type 389 | FROM duckdb_columns 390 | WHERE table_name = '%s' AND data_type != 'GEOMETRY' 391 | ORDER BY column_name 392 | `, name) 393 | 394 | rows, err := cat.dbconn.Query(propsQuery) 395 | if err != nil { 396 | return nil, fmt.Errorf("error getting properties: %w", err) 397 | } 398 | defer rows.Close() 399 | 400 | var properties []string 401 | propertyTypes := make(map[string]string) 402 | for rows.Next() { 403 | var col, dataType string 404 | if err := rows.Scan(&col, &dataType); err != nil { 405 | continue 406 | } 407 | properties = append(properties, col) 408 | propertyTypes[col] = dataType 409 | } 410 | layer.Properties = properties 411 | layer.PropertyTypes = propertyTypes 412 | 413 | return layer, nil 414 | } 415 | 416 | // GenerateTile generates an MVT tile for the given layer and tile coordinates 417 | // Uses the shared connection pool for efficient resource management 418 | func (cat *CatalogDB) GenerateTile(ctx context.Context, layerName string, z, x, y int) ([]byte, error) { 419 | layer, err := cat.GetLayerByName(layerName) 420 | if err != nil { 421 | return nil, err 422 | } 423 | 424 | // Use the shared connection pool (connection is automatically acquired and released) 425 | db := cat.dbconn 426 | 427 | // Build the SQL query using ST_AsMVT following the Python reference implementation 428 | // https://github.com/bmcandr/fast-geoparquet-features/blob/main/app/main.py#L352-L418 429 | 430 | // Transform geometry to Web Mercator (EPSG:3857) for tiles if needed 431 | // DuckDB Spatial requires string CRS identifiers: ST_Transform(geom, 'source_crs', 'dest_crs', always_xy := true) 432 | geomExpr := layer.GeometryColumn 433 | if layer.SourceSrid != SRID_3857 && layer.SourceSrid != 0 { 434 | geomExpr = fmt.Sprintf("ST_Transform(%s, 'EPSG:4326', 'EPSG:3857', always_xy := true)", layer.GeometryColumn) 435 | } 436 | 437 | // Build column list for properties (all non-geometry columns) 438 | // We must not include the original geometry column since ST_AsMVT only allows one geometry column 439 | // Cast unsupported types to supported ones for MVT encoding 440 | // ST_AsMVT supports: VARCHAR, FLOAT, DOUBLE, INTEGER, BIGINT, BOOLEAN 441 | propertyColumns := "" 442 | if len(layer.Properties) > 0 { 443 | for i, prop := range layer.Properties { 444 | if i > 0 { 445 | propertyColumns += ", " 446 | } 447 | 448 | // Check if type needs casting for MVT compatibility 449 | // MVT spec only supports: VARCHAR, BOOLEAN, INTEGER, BIGINT, FLOAT, DOUBLE 450 | dataType := layer.PropertyTypes[prop] 451 | needsCast := false 452 | castToDouble := false 453 | 454 | // Check for complex/composite types (need VARCHAR cast) 455 | // Use strings.HasPrefix to properly detect type prefixes 456 | if len(dataType) >= 3 { 457 | if dataType[:3] == "MAP" || dataType[:3] == "map" { 458 | needsCast = true 459 | } else if len(dataType) >= 4 && (dataType[:4] == "LIST" || dataType[:4] == "list") { 460 | needsCast = true 461 | } else if len(dataType) >= 5 && (dataType[:5] == "ARRAY" || dataType[:5] == "array") { 462 | needsCast = true 463 | } else if len(dataType) >= 5 && (dataType[:5] == "UNION" || dataType[:5] == "union") { 464 | needsCast = true 465 | } else if len(dataType) >= 6 && (dataType[:6] == "STRUCT" || dataType[:6] == "struct") { 466 | needsCast = true 467 | } 468 | } 469 | 470 | // Check for DECIMAL/NUMERIC (need DOUBLE cast for precision) 471 | if len(dataType) >= 7 && (dataType[:7] == "DECIMAL" || dataType[:7] == "NUMERIC") { 472 | needsCast = true 473 | castToDouble = true 474 | } 475 | 476 | // Check specific unsupported types 477 | switch dataType { 478 | // Temporal types 479 | case "DATE", "TIME", "TIMESTAMP", "TIMESTAMP WITH TIME ZONE", "TIMESTAMPTZ", "INTERVAL": 480 | needsCast = true 481 | // Binary/Special types 482 | case "BLOB", "BIT", "UUID", "JSON": 483 | needsCast = true 484 | // Large numeric types 485 | case "HUGEINT": 486 | needsCast = true 487 | } 488 | 489 | if needsCast { 490 | // Cast to DOUBLE for DECIMAL/NUMERIC, VARCHAR for all others 491 | if castToDouble { 492 | propertyColumns += fmt.Sprintf("CAST(%s AS DOUBLE) as %s", prop, prop) 493 | } else { 494 | propertyColumns += fmt.Sprintf("CAST(%s AS VARCHAR) as %s", prop, prop) 495 | } 496 | } else { 497 | // Type is MVT-compatible, use as-is 498 | propertyColumns += prop 499 | } 500 | } 501 | propertyColumns += ", " 502 | } 503 | 504 | // The MVT generation follows this pattern: 505 | // 1. Filter features that intersect the tile envelope 506 | // 2. Transform geometries to EPSG:3857 if needed 507 | // 3. Clip geometries to tile extent using ST_AsMVTGeom 508 | // 4. Aggregate into MVT format using ST_AsMVT 509 | query := fmt.Sprintf(` 510 | WITH tile_bounds AS ( 511 | SELECT ST_TileEnvelope($1::INTEGER, $2::INTEGER, $3::INTEGER) as envelope, 512 | ST_Extent(ST_TileEnvelope($1::INTEGER, $2::INTEGER, $3::INTEGER)) as extent 513 | ), 514 | features AS ( 515 | SELECT 516 | %sST_AsMVTGeom( 517 | %s, 518 | (SELECT extent FROM tile_bounds) 519 | ) as geom 520 | FROM %s, tile_bounds 521 | WHERE ST_Intersects(%s, tile_bounds.envelope) 522 | ) 523 | SELECT ST_AsMVT(features, '%s') 524 | FROM features 525 | WHERE geom IS NOT NULL 526 | `, propertyColumns, geomExpr, layer.Table, geomExpr, layerName) 527 | 528 | log.Debugf("Generating tile for layer=%s z=%d x=%d y=%d", layerName, z, x, y) 529 | 530 | var tileData []byte 531 | err = db.QueryRowContext(ctx, query, z, x, y).Scan(&tileData) 532 | if err != nil { 533 | return nil, fmt.Errorf("error generating tile: %w", err) 534 | } 535 | 536 | // Return empty tile if no data 537 | // ST_AsMVT returns NULL or minimal MVT when there are no features 538 | if len(tileData) == 0 { 539 | return []byte{}, nil 540 | } 541 | 542 | // Some MVT clients can't parse very small MVT blobs (< 10 bytes) 543 | // These are typically empty layers with minimal structure 544 | // Return empty to trigger 204 No Content response 545 | if len(tileData) < 10 { 546 | log.Debugf("Tile data too small (%d bytes), treating as empty", len(tileData)) 547 | return []byte{}, nil 548 | } 549 | 550 | log.Debugf("Generated tile with %d bytes", len(tileData)) 551 | return tileData, nil 552 | } 553 | 554 | // GetTileJSON returns TileJSON metadata for a layer 555 | func (cat *CatalogDB) GetTileJSON(layerName string, baseURL string) (*TileJSON, error) { 556 | layer, err := cat.GetLayerByName(layerName) 557 | if err != nil { 558 | return nil, err 559 | } 560 | 561 | tileURL := fmt.Sprintf("%s/tiles/%s/{z}/{x}/{y}.mvt", baseURL, layerName) 562 | 563 | tj := &TileJSON{ 564 | TileJSON: "3.0.0", 565 | Name: layer.Name, 566 | Version: "1.0.0", 567 | Scheme: "xyz", 568 | Tiles: []string{tileURL}, 569 | MinZoom: 0, 570 | MaxZoom: 22, 571 | } 572 | 573 | // Add bounds if available 574 | if layer.Bounds != nil { 575 | tj.Bounds = []float64{ 576 | layer.Bounds.Minx, 577 | layer.Bounds.Miny, 578 | layer.Bounds.Maxx, 579 | layer.Bounds.Maxy, 580 | } 581 | 582 | // Calculate center point 583 | centerX := (layer.Bounds.Minx + layer.Bounds.Maxx) / 2 584 | centerY := (layer.Bounds.Miny + layer.Bounds.Maxy) / 2 585 | tj.Center = []float64{centerX, centerY, 10} // default zoom 10 586 | } 587 | 588 | // Add vector layer metadata 589 | fields := make(map[string]string) 590 | for _, prop := range layer.Properties { 591 | fields[prop] = "string" // simplified - could determine actual type 592 | } 593 | 594 | tj.VectorLayers = []VectorLayer{ 595 | { 596 | ID: layerName, 597 | MinZoom: 0, 598 | MaxZoom: 22, 599 | Fields: fields, 600 | }, 601 | } 602 | 603 | return tj, nil 604 | } 605 | --------------------------------------------------------------------------------