├── .github └── workflows │ ├── publish.yml │ └── test.yml ├── .gitignore ├── .goreleaser.yml ├── Dockerfile ├── LICENSE ├── README.md ├── caddy ├── Caddyfile └── pmtiles_proxy.go ├── examples └── minimal.go ├── go.mod ├── go.sum ├── main.go └── pmtiles ├── bitmap.go ├── bitmap_test.go ├── bucket.go ├── bucket_test.go ├── cluster.go ├── cluster_test.go ├── convert.go ├── convert_test.go ├── directory.go ├── directory_test.go ├── edit.go ├── edit_test.go ├── extract.go ├── extract_test.go ├── fixtures ├── test_fixture_1.pmtiles └── unclustered.pmtiles ├── makesync.go ├── makesync_test.go ├── region.go ├── region_test.go ├── server.go ├── server_metrics.go ├── server_test.go ├── show.go ├── show_test.go ├── sync.go ├── sync_test.go ├── tile_id.go ├── tile_id_test.go ├── tilejson.go ├── tilejson_test.go ├── upload.go ├── upload_test.go └── verify.go /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: publish 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - 'main' 8 | tags: 9 | - 'v*' 10 | pull_request: 11 | branches: 12 | - 'main' 13 | 14 | jobs: 15 | docker-build: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v4 20 | - name: Set up QEMU 21 | uses: docker/setup-qemu-action@v3 22 | - name: Set up Docker Buildx 23 | uses: docker/setup-buildx-action@v3 24 | - uses: docker/metadata-action@v5 25 | id: meta 26 | with: 27 | images: | 28 | protomaps/go-pmtiles 29 | ghcr.io/${{ github.repository }} 30 | - name: Login to Docker Hub 31 | uses: docker/login-action@v3 32 | with: 33 | username: protomaps 34 | password: ${{ secrets.DOCKER_PASSWORD }} 35 | - name: Login to Github Container Registry 36 | uses: docker/login-action@v3 37 | with: 38 | registry: ghcr.io 39 | username: ${{ github.actor }} 40 | password: ${{ secrets.GITHUB_TOKEN }} 41 | - name: Build and push 42 | uses: docker/build-push-action@v5 43 | with: 44 | context: . 45 | platforms: linux/amd64,linux/arm64 46 | push: ${{ github.event_name != 'pull_request' }} 47 | provenance: false 48 | tags: ${{ steps.meta.outputs.tags }} 49 | labels: ${{ steps.meta.outputs.labels }} 50 | outputs: type=image,name=target,annotation-index.org.opencontainers.image.description=See https://github.com/protomaps/go-pmtiles for more info. 51 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - "main" 7 | 8 | jobs: 9 | test: 10 | runs-on: ${{ matrix.os }} 11 | strategy: 12 | matrix: 13 | os: [ubuntu-latest, windows-latest] 14 | steps: 15 | - uses: actions/checkout@v3 16 | - uses: actions/setup-go@v3 17 | with: 18 | go-version: "^1.22.7" 19 | - run: go test ./pmtiles 20 | fmt_vet_lint: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v3 24 | - uses: actions/setup-go@v3 25 | with: 26 | go-version: "^1.22.7" 27 | - run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi 28 | - run: go vet caddy/pmtiles_proxy.go 29 | - run: go vet main.go 30 | - run: go vet ./pmtiles 31 | - name: Run Revive Action by pulling pre-built image 32 | uses: docker://morphy/revive-action:v2 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist/ 2 | go-pmtiles 3 | /*.pmtiles 4 | /*.pmtiles.sync 5 | /*.mbtiles 6 | *.json 7 | *.geojson 8 | *.tsv.gz 9 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # AC_USERNAME= AC_PASSWORD= goreleaser release --rm-dist --parallelism 1 2 | before: 3 | hooks: 4 | - go mod tidy 5 | builds: 6 | - binary: pmtiles 7 | id: go-pmtiles 8 | env: 9 | - CGO_ENABLED=0 10 | flags: 11 | - -trimpath 12 | goos: 13 | - linux 14 | - windows 15 | goarch: 16 | - amd64 17 | - arm64 18 | - binary: pmtiles 19 | id: pmtiles-macos-amd64 20 | env: 21 | - CGO_ENABLED=0 22 | flags: 23 | - -trimpath 24 | goos: 25 | - darwin 26 | goarch: 27 | - amd64 28 | hooks: 29 | post: 30 | - cmd: 'codesign -s "Developer ID Application: Brandon Liu (WNSC27EEHU)" -v {{ .Path }} --options=runtime' 31 | output: true 32 | - cmd: zip -j "{{ .Path }}_signed.zip" {{ .Path }} 33 | - cmd: xcrun notarytool submit "{{ .Path }}_signed.zip" --apple-id {{ .Env.AC_USERNAME }} --password {{ .Env.AC_PASSWORD }} --team-id WNSC27EEHU --wait 34 | - mv {{ .Path }}_signed.zip dist/go-pmtiles-{{ .Version }}_Darwin_x86_64.zip 35 | - binary: pmtiles 36 | id: pmtiles-macos-arm64 37 | env: 38 | - CGO_ENABLED=0 39 | flags: 40 | - -trimpath 41 | goos: 42 | - darwin 43 | goarch: 44 | - arm64 45 | hooks: 46 | post: 47 | - cmd: 'codesign -s "Developer ID Application: Brandon Liu (WNSC27EEHU)" -v {{ .Path }} --options=runtime' 48 | output: true 49 | - cmd: zip -j "{{ .Path }}_signed.zip" {{ .Path }} 50 | - cmd: xcrun notarytool submit "{{ .Path }}_signed.zip" --apple-id {{ .Env.AC_USERNAME }} --password {{ .Env.AC_PASSWORD }} --team-id WNSC27EEHU --wait 51 | - mv {{ .Path }}_signed.zip dist/go-pmtiles-{{ .Version }}_Darwin_arm64.zip 52 | archives: 53 | - id: default 54 | builds: 55 | - go-pmtiles 56 | replacements: 57 | linux: Linux 58 | windows: Windows 59 | amd64: x86_64 60 | format_overrides: 61 | - goos: windows 62 | format: zip 63 | 64 | checksum: 65 | disable: true 66 | 67 | snapshot: 68 | name_template: "{{ .Tag }}-next" 69 | 70 | changelog: 71 | sort: asc 72 | filters: 73 | exclude: 74 | - '^docs:' 75 | - '^test:' 76 | 77 | release: 78 | extra_files: 79 | - glob: ./dist/*_Darwin_arm64.zip 80 | - glob: ./dist/*_Darwin_x86_64.zip 81 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.22.7-alpine3.19 AS builder 2 | COPY . /workspace 3 | WORKDIR /workspace 4 | ENV CGO_ENABLED=0 5 | RUN go build -o /workspace/go-pmtiles 6 | FROM gcr.io/distroless/static 7 | COPY --from=builder /workspace/go-pmtiles /go-pmtiles 8 | EXPOSE 8080 9 | ENTRYPOINT ["/go-pmtiles"] 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2021 Protomaps LLC 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-pmtiles 2 | 3 | The single-file utility for creating and working with [PMTiles](https://github.com/protomaps/PMTiles) archives. 4 | 5 | ## Installation 6 | 7 | See [Releases](https://github.com/protomaps/go-pmtiles/releases) for your OS and architecture. 8 | 9 | ## Docs 10 | 11 | See [docs.protomaps.com/pmtiles/cli](https://docs.protomaps.com/pmtiles/cli) for usage. 12 | 13 | See [Go package docs](https://pkg.go.dev/github.com/protomaps/go-pmtiles/pmtiles) for API usage. 14 | 15 | ## Development 16 | 17 | Run the program in development: 18 | 19 | ```sh 20 | go run main.go 21 | ``` 22 | 23 | Run the test suite: 24 | 25 | ```sh 26 | go test ./pmtiles 27 | ``` 28 | -------------------------------------------------------------------------------- /caddy/Caddyfile: -------------------------------------------------------------------------------- 1 | { 2 | # to use handle_path for prefixes, pmtiles_proxy must have a defined 3 | # position in the ordering of handlers. 4 | order pmtiles_proxy before reverse_proxy 5 | } 6 | 7 | localhost:2019 { 8 | handle_path /tiles/* { 9 | pmtiles_proxy { 10 | 11 | # replace with your bucket URL or http path 12 | bucket https://example.com 13 | 14 | cache_size 256 15 | 16 | # used to embed a tiles URL in TileJSON. 17 | public_url https://localhost:2019/tiles 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /caddy/pmtiles_proxy.go: -------------------------------------------------------------------------------- 1 | package caddy 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "log" 7 | "net/http" 8 | "strconv" 9 | "time" 10 | 11 | "github.com/caddyserver/caddy/v2" 12 | "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" 13 | "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" 14 | "github.com/caddyserver/caddy/v2/modules/caddyhttp" 15 | "github.com/protomaps/go-pmtiles/pmtiles" 16 | "go.uber.org/zap" 17 | _ "gocloud.dev/blob/azureblob" 18 | _ "gocloud.dev/blob/fileblob" 19 | _ "gocloud.dev/blob/gcsblob" 20 | _ "gocloud.dev/blob/s3blob" 21 | ) 22 | 23 | func init() { 24 | caddy.RegisterModule(Middleware{}) 25 | httpcaddyfile.RegisterHandlerDirective("pmtiles_proxy", parseCaddyfile) 26 | } 27 | 28 | // Middleware creates a Z/X/Y tileserver backed by a local or remote bucket of PMTiles archives. 29 | type Middleware struct { 30 | Bucket string `json:"bucket"` 31 | CacheSize int `json:"cache_size"` 32 | PublicURL string `json:"public_url"` 33 | logger *zap.Logger 34 | server *pmtiles.Server 35 | } 36 | 37 | // CaddyModule returns the Caddy module information. 38 | func (Middleware) CaddyModule() caddy.ModuleInfo { 39 | return caddy.ModuleInfo{ 40 | ID: "http.handlers.pmtiles_proxy", 41 | New: func() caddy.Module { return new(Middleware) }, 42 | } 43 | } 44 | 45 | func (m *Middleware) Provision(ctx caddy.Context) error { 46 | m.logger = ctx.Logger() 47 | logger := log.New(io.Discard, "", log.Ldate) 48 | prefix := "." // serve only the root of the bucket for now, at the root route of Caddyfile 49 | server, err := pmtiles.NewServer(m.Bucket, prefix, logger, m.CacheSize, m.PublicURL) 50 | if err != nil { 51 | return err 52 | } 53 | m.server = server 54 | server.Start() 55 | return nil 56 | } 57 | 58 | func (m *Middleware) Validate() error { 59 | if m.Bucket == "" { 60 | return fmt.Errorf("no bucket") 61 | } 62 | if m.CacheSize <= 0 { 63 | m.CacheSize = 64 64 | } 65 | return nil 66 | } 67 | 68 | func (m Middleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { 69 | start := time.Now() 70 | statusCode := m.server.ServeHTTP(w, r) 71 | m.logger.Info("response", zap.Int("status", statusCode), zap.String("path", r.URL.Path), zap.Duration("duration", time.Since(start))) 72 | 73 | return next.ServeHTTP(w, r) 74 | } 75 | 76 | func (m *Middleware) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { 77 | for d.Next() { 78 | for nesting := d.Nesting(); d.NextBlock(nesting); { 79 | switch d.Val() { 80 | case "bucket": 81 | if !d.Args(&m.Bucket) { 82 | return d.ArgErr() 83 | } 84 | case "cache_size": 85 | var cacheSize string 86 | if !d.Args(&cacheSize) { 87 | return d.ArgErr() 88 | } 89 | num, err := strconv.Atoi(cacheSize) 90 | if err != nil { 91 | return d.ArgErr() 92 | } 93 | m.CacheSize = num 94 | case "public_url": 95 | if !d.Args(&m.PublicURL) { 96 | return d.ArgErr() 97 | } 98 | } 99 | } 100 | } 101 | return nil 102 | } 103 | 104 | func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { 105 | var m Middleware 106 | err := m.UnmarshalCaddyfile(h.Dispenser) 107 | return m, err 108 | } 109 | 110 | var ( 111 | _ caddy.Provisioner = (*Middleware)(nil) 112 | _ caddy.Validator = (*Middleware)(nil) 113 | _ caddyhttp.MiddlewareHandler = (*Middleware)(nil) 114 | _ caddyfile.Unmarshaler = (*Middleware)(nil) 115 | ) 116 | -------------------------------------------------------------------------------- /examples/minimal.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/base64" 5 | "github.com/protomaps/go-pmtiles/pmtiles" 6 | "os" 7 | ) 8 | 9 | // A program that creates the smallest meaningful PMTiles archive, 10 | // consisting of a purple square at tile 0,0,0 (the entire earth) 11 | // Uses only two library functions, SerializeHeader and SerializeEntries 12 | func main() { 13 | // A solid purple PNG with 50% opacity. 14 | png, _ := base64.StdEncoding.DecodeString("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mM0NLxTDwADmAG/Djok1gAAAABJRU5ErkJggg==") 15 | 16 | // Create an entry with TileID=0 (z=0, x=0, y=0), Offset=0, Length=len(png), and RunLength=1 17 | entries := []pmtiles.EntryV3{{0, 0, uint32(len(png)), 1}} 18 | 19 | // Create the bytes of the root directory 20 | directory := pmtiles.SerializeEntries(entries, pmtiles.NoCompression) 21 | 22 | // the JSON metadata is the empty object 23 | metadata := []byte("{}") 24 | 25 | // here we set the data of the header (the first 127 bytes) 26 | var h pmtiles.HeaderV3 27 | h.SpecVersion = 3 28 | 29 | // the root directory follows the header 30 | h.RootOffset = pmtiles.HeaderV3LenBytes 31 | h.RootLength = uint64(len(directory)) 32 | 33 | // the JSON metadata follows the root directory. 34 | h.MetadataOffset = h.RootOffset + uint64(len(directory)) 35 | h.MetadataLength = uint64(len(metadata)) 36 | 37 | // there are no leaves, but set the offset to the right place and length=0 38 | h.LeafDirectoryOffset = h.MetadataOffset + h.MetadataLength 39 | h.LeafDirectoryLength = 0 40 | 41 | // the tile data follows the JSON metadata 42 | h.TileDataOffset = h.LeafDirectoryOffset 43 | h.TileDataLength = uint64(len(png)) 44 | 45 | // set statistics 46 | h.AddressedTilesCount = 1 47 | h.TileEntriesCount = 1 48 | h.TileContentsCount = 1 49 | 50 | // since we store a PNG, the tile data should not be interpreted as compressed 51 | h.TileCompression = pmtiles.NoCompression 52 | h.TileType = pmtiles.Png 53 | h.InternalCompression = pmtiles.NoCompression 54 | 55 | // set the zoom and geographic bounds 56 | h.MinZoom = 0 57 | h.CenterZoom = 0 58 | h.MaxZoom = 0 59 | h.MinLatE7 = -85.051129 * 10000000 60 | h.MaxLatE7 = 85.051129 * 10000000 61 | h.MinLonE7 = -180 * 10000000 62 | h.MaxLonE7 = 180 * 10000000 63 | 64 | outfile, _ := os.Create("minimal.pmtiles") 65 | defer outfile.Close() 66 | outfile.Write(pmtiles.SerializeHeader(h)) 67 | outfile.Write(directory) 68 | outfile.Write(metadata) 69 | outfile.Write(png) 70 | } 71 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/protomaps/go-pmtiles 2 | 3 | go 1.22.7 4 | 5 | require ( 6 | cloud.google.com/go/storage v1.43.0 7 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 8 | github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 9 | github.com/RoaringBitmap/roaring v1.5.0 10 | github.com/alecthomas/kong v0.8.0 11 | github.com/aws/aws-sdk-go-v2 v1.30.3 12 | github.com/aws/aws-sdk-go-v2/service/s3 v1.58.3 13 | github.com/aws/smithy-go v1.20.3 14 | github.com/caddyserver/caddy/v2 v2.8.4 15 | github.com/cespare/xxhash/v2 v2.3.0 16 | github.com/dustin/go-humanize v1.0.1 17 | github.com/paulmach/orb v0.10.0 18 | github.com/prometheus/client_golang v1.19.1 19 | github.com/rs/cors v1.11.1 20 | github.com/schollz/progressbar/v3 v3.13.1 21 | github.com/stretchr/testify v1.9.0 22 | go.uber.org/zap v1.27.0 23 | gocloud.dev v0.40.0 24 | golang.org/x/sync v0.10.0 25 | google.golang.org/api v0.191.0 26 | zombiezen.com/go/sqlite v1.1.2 27 | ) 28 | 29 | require ( 30 | cloud.google.com/go v0.115.0 // indirect 31 | cloud.google.com/go/auth v0.8.1 // indirect 32 | cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect 33 | cloud.google.com/go/compute/metadata v0.5.0 // indirect 34 | cloud.google.com/go/iam v1.1.13 // indirect 35 | filippo.io/edwards25519 v1.1.0 // indirect 36 | github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect 37 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect 38 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect 39 | github.com/Azure/go-autorest v14.2.0+incompatible // indirect 40 | github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect 41 | github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect 42 | github.com/Masterminds/goutils v1.1.1 // indirect 43 | github.com/Masterminds/semver/v3 v3.2.0 // indirect 44 | github.com/Masterminds/sprig/v3 v3.2.3 // indirect 45 | github.com/Microsoft/go-winio v0.6.0 // indirect 46 | github.com/antlr4-go/antlr/v4 v4.13.0 // indirect 47 | github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b // indirect 48 | github.com/aws/aws-sdk-go v1.55.5 // indirect 49 | github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect 50 | github.com/aws/aws-sdk-go-v2/config v1.27.27 // indirect 51 | github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect 52 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect 53 | github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.10 // indirect 54 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect 55 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect 56 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect 57 | github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect 58 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect 59 | github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect 60 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect 61 | github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect 62 | github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect 63 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect 64 | github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect 65 | github.com/beorn7/perks v1.0.1 // indirect 66 | github.com/bits-and-blooms/bitset v1.2.0 // indirect 67 | github.com/caddyserver/certmagic v0.21.3 // indirect 68 | github.com/caddyserver/zerossl v0.1.3 // indirect 69 | github.com/cespare/xxhash v1.1.0 // indirect 70 | github.com/chzyer/readline v1.5.1 // indirect 71 | github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect 72 | github.com/davecgh/go-spew v1.1.1 // indirect 73 | github.com/dgraph-io/badger v1.6.2 // indirect 74 | github.com/dgraph-io/badger/v2 v2.2007.4 // indirect 75 | github.com/dgraph-io/ristretto v0.1.0 // indirect 76 | github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect 77 | github.com/felixge/httpsnoop v1.0.4 // indirect 78 | github.com/go-jose/go-jose/v3 v3.0.4 // indirect 79 | github.com/go-kit/kit v0.13.0 // indirect 80 | github.com/go-kit/log v0.2.1 // indirect 81 | github.com/go-logfmt/logfmt v0.6.0 // indirect 82 | github.com/go-logr/logr v1.4.2 // indirect 83 | github.com/go-logr/stdr v1.2.2 // indirect 84 | github.com/go-sql-driver/mysql v1.8.1 // indirect 85 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect 86 | github.com/golang-jwt/jwt/v5 v5.2.2 // indirect 87 | github.com/golang/glog v1.2.1 // indirect 88 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 89 | github.com/golang/protobuf v1.5.4 // indirect 90 | github.com/golang/snappy v0.0.4 // indirect 91 | github.com/google/cel-go v0.20.1 // indirect 92 | github.com/google/pprof v0.0.0-20231212022811-ec68065c825e // indirect 93 | github.com/google/s2a-go v0.1.8 // indirect 94 | github.com/google/uuid v1.6.0 // indirect 95 | github.com/google/wire v0.6.0 // indirect 96 | github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect 97 | github.com/googleapis/gax-go/v2 v2.13.0 // indirect 98 | github.com/huandu/xstrings v1.3.3 // indirect 99 | github.com/imdario/mergo v0.3.12 // indirect 100 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 101 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect 102 | github.com/jackc/pgconn v1.14.3 // indirect 103 | github.com/jackc/pgio v1.0.0 // indirect 104 | github.com/jackc/pgpassfile v1.0.0 // indirect 105 | github.com/jackc/pgproto3/v2 v2.3.3 // indirect 106 | github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect 107 | github.com/jackc/pgtype v1.14.0 // indirect 108 | github.com/jackc/pgx/v4 v4.18.3 // indirect 109 | github.com/jmespath/go-jmespath v0.4.0 // indirect 110 | github.com/klauspost/compress v1.17.8 // indirect 111 | github.com/klauspost/cpuid/v2 v2.2.7 // indirect 112 | github.com/kylelemons/godebug v1.1.0 // indirect 113 | github.com/libdns/libdns v0.2.2 // indirect 114 | github.com/manifoldco/promptui v0.9.0 // indirect 115 | github.com/mattn/go-colorable v0.1.13 // indirect 116 | github.com/mattn/go-isatty v0.0.20 // indirect 117 | github.com/mattn/go-runewidth v0.0.14 // indirect 118 | github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect 119 | github.com/mholt/acmez/v2 v2.0.1 // indirect 120 | github.com/miekg/dns v1.1.59 // indirect 121 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect 122 | github.com/mitchellh/copystructure v1.2.0 // indirect 123 | github.com/mitchellh/go-ps v1.0.0 // indirect 124 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 125 | github.com/mschoch/smat v0.2.0 // indirect 126 | github.com/ncruces/go-strftime v0.1.9 // indirect 127 | github.com/onsi/ginkgo/v2 v2.13.2 // indirect 128 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect 129 | github.com/pkg/errors v0.9.1 // indirect 130 | github.com/pmezard/go-difflib v1.0.0 // indirect 131 | github.com/prometheus/client_model v0.5.0 // indirect 132 | github.com/prometheus/common v0.48.0 // indirect 133 | github.com/prometheus/procfs v0.12.0 // indirect 134 | github.com/quic-go/qpack v0.5.1 // indirect 135 | github.com/quic-go/quic-go v0.48.2 // indirect 136 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect 137 | github.com/rivo/uniseg v0.2.0 // indirect 138 | github.com/rs/xid v1.5.0 // indirect 139 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 140 | github.com/shopspring/decimal v1.2.0 // indirect 141 | github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect 142 | github.com/slackhq/nebula v1.6.1 // indirect 143 | github.com/smallstep/certificates v0.26.1 // indirect 144 | github.com/smallstep/nosql v0.6.1 // indirect 145 | github.com/smallstep/pkcs7 v0.0.0-20231024181729-3b98ecc1ca81 // indirect 146 | github.com/smallstep/scep v0.0.0-20231024192529-aee96d7ad34d // indirect 147 | github.com/smallstep/truststore v0.13.0 // indirect 148 | github.com/spf13/cast v1.4.1 // indirect 149 | github.com/spf13/cobra v1.8.0 // indirect 150 | github.com/spf13/pflag v1.0.5 // indirect 151 | github.com/stoewer/go-strcase v1.2.0 // indirect 152 | github.com/tailscale/tscert v0.0.0-20240517230440-bbccfbf48933 // indirect 153 | github.com/urfave/cli v1.22.14 // indirect 154 | github.com/zeebo/blake3 v0.2.3 // indirect 155 | go.etcd.io/bbolt v1.3.9 // indirect 156 | go.mongodb.org/mongo-driver v1.11.4 // indirect 157 | go.opencensus.io v0.24.0 // indirect 158 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect 159 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect 160 | go.opentelemetry.io/otel v1.28.0 // indirect 161 | go.opentelemetry.io/otel/metric v1.28.0 // indirect 162 | go.opentelemetry.io/otel/sdk v1.28.0 // indirect 163 | go.opentelemetry.io/otel/trace v1.28.0 // indirect 164 | go.step.sm/cli-utils v0.9.0 // indirect 165 | go.step.sm/crypto v0.45.0 // indirect 166 | go.step.sm/linkedca v0.20.1 // indirect 167 | go.uber.org/automaxprocs v1.5.3 // indirect 168 | go.uber.org/mock v0.4.0 // indirect 169 | go.uber.org/multierr v1.11.0 // indirect 170 | go.uber.org/zap/exp v0.2.0 // indirect 171 | golang.org/x/crypto v0.31.0 // indirect 172 | golang.org/x/crypto/x509roots/fallback v0.0.0-20240507223354-67b13616a595 // indirect 173 | golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect 174 | golang.org/x/mod v0.17.0 // indirect 175 | golang.org/x/net v0.28.0 // indirect 176 | golang.org/x/oauth2 v0.22.0 // indirect 177 | golang.org/x/sys v0.28.0 // indirect 178 | golang.org/x/term v0.27.0 // indirect 179 | golang.org/x/text v0.21.0 // indirect 180 | golang.org/x/time v0.6.0 // indirect 181 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect 182 | golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect 183 | google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 // indirect 184 | google.golang.org/genproto/googleapis/api v0.0.0-20240812133136-8ffd90a71988 // indirect 185 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240812133136-8ffd90a71988 // indirect 186 | google.golang.org/grpc v1.65.0 // indirect 187 | google.golang.org/protobuf v1.34.2 // indirect 188 | gopkg.in/yaml.v3 v3.0.1 // indirect 189 | howett.net/plist v1.0.0 // indirect 190 | modernc.org/libc v1.41.0 // indirect 191 | modernc.org/mathutil v1.6.0 // indirect 192 | modernc.org/memory v1.7.2 // indirect 193 | modernc.org/sqlite v1.29.1 // indirect 194 | ) 195 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net/http" 7 | "net/url" 8 | "os" 9 | "path/filepath" 10 | "strconv" 11 | "time" 12 | 13 | "github.com/alecthomas/kong" 14 | "github.com/prometheus/client_golang/prometheus/promhttp" 15 | 16 | "github.com/protomaps/go-pmtiles/pmtiles" 17 | _ "gocloud.dev/blob/azureblob" 18 | _ "gocloud.dev/blob/fileblob" 19 | _ "gocloud.dev/blob/gcsblob" 20 | _ "gocloud.dev/blob/s3blob" 21 | ) 22 | 23 | var ( 24 | version = "dev" 25 | commit = "none" 26 | date = "unknown" 27 | ) 28 | 29 | var cli struct { 30 | Show struct { 31 | Path string `arg:""` 32 | Bucket string `help:"Remote bucket"` 33 | Metadata bool `help:"Print only the JSON metadata"` 34 | HeaderJson bool `help:"Print a JSON representation of part of the header information"` 35 | Tilejson bool `help:"Print the TileJSON"` 36 | PublicURL string `help:"Public base URL of tile endpoint for TileJSON e.g. https://example.com/tiles"` 37 | } `cmd:"" help:"Inspect a local or remote archive"` 38 | 39 | Tile struct { 40 | Path string `arg:""` 41 | Z int `arg:""` 42 | X int `arg:""` 43 | Y int `arg:""` 44 | Bucket string `help:"Remote bucket"` 45 | } `cmd:"" help:"Fetch one tile from a local or remote archive and output on stdout"` 46 | 47 | Cluster struct { 48 | Input string `arg:"" help:"Input archive" type:"existingfile"` 49 | NoDeduplication bool `help:"Don't attempt to deduplicate tiles"` 50 | } `cmd:"" help:"Cluster an unclustered local archive, optimizing the size and layout"` 51 | 52 | Edit struct { 53 | Input string `arg:"" help:"Input archive" type:"existingfile"` 54 | HeaderJson string `help:"Input header JSON file (written by show --header-json)" type:"existingfile"` 55 | Metadata string `help:"Input metadata JSON (written by show --metadata)" type:"existingfile"` 56 | } `cmd:"" help:"Edit JSON metadata or parts of the header"` 57 | 58 | Extract struct { 59 | Input string `arg:"" help:"Input local or remote archive"` 60 | Output string `arg:"" help:"Output archive" type:"path"` 61 | Bucket string `help:"Remote bucket of input archive"` 62 | Region string `help:"local GeoJSON Polygon or MultiPolygon file for area of interest" type:"existingfile"` 63 | Bbox string `help:"bbox area of interest: min_lon,min_lat,max_lon,max_lat" type:"string"` 64 | Minzoom int8 `default:"-1" help:"Minimum zoom level, inclusive"` 65 | Maxzoom int8 `default:"-1" help:"Maximum zoom level, inclusive"` 66 | DownloadThreads int `default:"4" help:"Number of download threads"` 67 | DryRun bool `help:"Calculate tiles to extract, but don't download them"` 68 | Overfetch float32 `default:"0.05" help:"What ratio of extra data to download to minimize # requests; 0.2 is 20%"` 69 | } `cmd:"" help:"Create an archive from a larger archive for a subset of zoom levels or geographic region"` 70 | 71 | Merge struct { 72 | Output string `arg:"" help:"Output archive" type:"path"` 73 | Input []string `arg:"" help:"Input archives"` 74 | } `cmd:"" help:"Merge multiple archives into a single archive" hidden:""` 75 | 76 | Convert struct { 77 | Input string `arg:"" help:"Input archive" type:"existingfile"` 78 | Output string `arg:"" help:"Output archive" type:"path"` 79 | Force bool `help:"Force removal"` 80 | NoDeduplication bool `help:"Don't attempt to deduplicate tiles"` 81 | Tmpdir string `help:"An optional path to a folder for temporary files" type:"existingdir"` 82 | } `cmd:"" help:"Convert an MBTiles database to PMTiles"` 83 | 84 | Verify struct { 85 | Input string `arg:"" help:"Input archive" type:"existingfile"` 86 | } `cmd:"" help:"Verify the correctness of an archive structure, without verifying individual tile contents"` 87 | 88 | Makesync struct { 89 | Input string `arg:"" type:"existingfile"` 90 | BlockSizeKb int `default:"20" help:"The approximate block size, in kilobytes; 0 means 1 tile = 1 block"` 91 | } `cmd:"" help:"" hidden:""` 92 | 93 | Sync struct { 94 | Existing string `arg:"" type:"existingfile"` 95 | New string `arg:"" help:"Local or remote archive, with .sync sidecar file"` 96 | DryRun bool `help:"Calculate new parts to download, but don't download them"` 97 | RangesPerRequest int `default:"100" help:"Number of ranges in a single HTTP request (limit depends on server)"` 98 | } `cmd:"" help:"Sync a local file with a remote one by only downloading changed parts" hidden:""` 99 | 100 | Serve struct { 101 | Path string `arg:"" help:"Local path or bucket prefix"` 102 | Interface string `default:"0.0.0.0"` 103 | Port int `default:"8080"` 104 | AdminPort int `default:"-1"` 105 | Cors string `help:"Comma-separated list of of allowed HTTP CORS origins"` 106 | CacheSize int `default:"64" help:"Size of cache in megabytes"` 107 | Bucket string `help:"Remote bucket"` 108 | PublicURL string `help:"Public base URL of tile endpoint for TileJSON e.g. https://example.com/tiles/"` 109 | } `cmd:"" help:"Run an HTTP proxy server for Z/X/Y tiles"` 110 | 111 | Upload struct { 112 | InputPmtiles string `arg:"" type:"existingfile" help:"The local PMTiles file"` 113 | RemotePmtiles string `arg:"" help:"The name for the remote PMTiles source"` 114 | MaxConcurrency int `default:"2" help:"# of upload threads"` 115 | Bucket string `required:"" help:"Bucket to upload to"` 116 | } `cmd:"" help:"Upload a local archive to remote storage"` 117 | 118 | Version struct { 119 | } `cmd:"" help:"Show the program version"` 120 | } 121 | 122 | func main() { 123 | if len(os.Args) < 2 { 124 | os.Args = append(os.Args, "--help") 125 | } 126 | 127 | logger := log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile) 128 | ctx := kong.Parse(&cli) 129 | 130 | switch ctx.Command() { 131 | case "show ": 132 | err := pmtiles.Show(logger, os.Stdout, cli.Show.Bucket, cli.Show.Path, cli.Show.HeaderJson, cli.Show.Metadata, cli.Show.Tilejson, cli.Show.PublicURL, false, 0, 0, 0) 133 | if err != nil { 134 | logger.Fatalf("Failed to show archive, %v", err) 135 | } 136 | case "tile ": 137 | err := pmtiles.Show(logger, os.Stdout, cli.Tile.Bucket, cli.Tile.Path, false, false, false, "", true, cli.Tile.Z, cli.Tile.X, cli.Tile.Y) 138 | if err != nil { 139 | logger.Fatalf("Failed to show tile, %v", err) 140 | } 141 | case "serve ": 142 | server, err := pmtiles.NewServer(cli.Serve.Bucket, cli.Serve.Path, logger, cli.Serve.CacheSize, cli.Serve.PublicURL) 143 | 144 | if err != nil { 145 | logger.Fatalf("Failed to create new server, %v", err) 146 | } 147 | 148 | pmtiles.SetBuildInfo(version, commit, date) 149 | server.Start() 150 | 151 | mux := http.NewServeMux() 152 | 153 | mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 154 | start := time.Now() 155 | statusCode := server.ServeHTTP(w, r) 156 | logger.Printf("served %d %s in %s", statusCode, url.PathEscape(r.URL.Path), time.Since(start)) 157 | }) 158 | 159 | logger.Printf("Serving %s %s on port %d and interface %s with Access-Control-Allow-Origin: %s\n", cli.Serve.Bucket, cli.Serve.Path, cli.Serve.Port, cli.Serve.Interface, cli.Serve.Cors) 160 | if cli.Serve.AdminPort > 0 { 161 | go func() { 162 | adminPort := strconv.Itoa(cli.Serve.AdminPort) 163 | logger.Printf("Serving /metrics on port %s and interface %s\n", adminPort, cli.Serve.Interface) 164 | adminMux := http.NewServeMux() 165 | adminMux.Handle("/metrics", promhttp.Handler()) 166 | logger.Fatal(startHTTPServer(cli.Serve.Interface+":"+adminPort, adminMux)) 167 | }() 168 | } 169 | 170 | if cli.Serve.Cors != "" { 171 | muxWithCors := pmtiles.NewCors(cli.Serve.Cors).Handler(mux) 172 | logger.Fatal(startHTTPServer(cli.Serve.Interface+":"+strconv.Itoa(cli.Serve.Port), muxWithCors)) 173 | } else { 174 | logger.Fatal(startHTTPServer(cli.Serve.Interface+":"+strconv.Itoa(cli.Serve.Port), mux)) 175 | } 176 | case "extract ": 177 | err := pmtiles.Extract(logger, cli.Extract.Bucket, cli.Extract.Input, cli.Extract.Minzoom, cli.Extract.Maxzoom, cli.Extract.Region, cli.Extract.Bbox, cli.Extract.Output, cli.Extract.DownloadThreads, cli.Extract.Overfetch, cli.Extract.DryRun) 178 | if err != nil { 179 | logger.Fatalf("Failed to extract, %v", err) 180 | } 181 | case "cluster ": 182 | err := pmtiles.Cluster(logger, cli.Cluster.Input, !cli.Cluster.NoDeduplication) 183 | if err != nil { 184 | logger.Fatalf("Failed to cluster, %v", err) 185 | } 186 | case "convert ": 187 | path := cli.Convert.Input 188 | output := cli.Convert.Output 189 | 190 | var tmpfile *os.File 191 | 192 | if cli.Convert.Tmpdir == "" { 193 | var err error 194 | tmpfile, err = os.CreateTemp("", "pmtiles") 195 | 196 | if err != nil { 197 | logger.Fatalf("Failed to create temp file, %v", err) 198 | } 199 | } else { 200 | absTemproot, err := filepath.Abs(cli.Convert.Tmpdir) 201 | 202 | if err != nil { 203 | logger.Fatalf("Failed to derive absolute path for %s, %v", cli.Convert.Tmpdir, err) 204 | } 205 | 206 | tmpfile, err = os.CreateTemp(absTemproot, "pmtiles") 207 | 208 | if err != nil { 209 | logger.Fatalf("Failed to create temp file, %v", err) 210 | } 211 | } 212 | 213 | defer os.Remove(tmpfile.Name()) 214 | err := pmtiles.Convert(logger, path, output, !cli.Convert.NoDeduplication, tmpfile) 215 | 216 | if err != nil { 217 | logger.Fatalf("Failed to convert %s, %v", path, err) 218 | } 219 | case "upload ": 220 | err := pmtiles.Upload(logger, cli.Upload.InputPmtiles, cli.Upload.Bucket, cli.Upload.RemotePmtiles, cli.Upload.MaxConcurrency) 221 | 222 | if err != nil { 223 | logger.Fatalf("Failed to upload file, %v", err) 224 | } 225 | case "verify ": 226 | err := pmtiles.Verify(logger, cli.Verify.Input) 227 | if err != nil { 228 | logger.Fatalf("Failed to verify archive, %v", err) 229 | } 230 | case "edit ": 231 | err := pmtiles.Edit(logger, cli.Edit.Input, cli.Edit.HeaderJson, cli.Edit.Metadata) 232 | if err != nil { 233 | logger.Fatalf("Failed to edit archive, %v", err) 234 | } 235 | case "makesync ": 236 | err := pmtiles.Makesync(logger, version, cli.Makesync.Input, cli.Makesync.BlockSizeKb) 237 | if err != nil { 238 | logger.Fatalf("Failed to makesync archive, %v", err) 239 | } 240 | case "sync ": 241 | err := pmtiles.Sync(logger, cli.Sync.Existing, cli.Sync.New, cli.Sync.DryRun) 242 | if err != nil { 243 | logger.Fatalf("Failed to sync archive, %v", err) 244 | } 245 | case "version": 246 | fmt.Printf("pmtiles %s, commit %s, built at %s\n", version, commit, date) 247 | default: 248 | panic(ctx.Command()) 249 | } 250 | 251 | } 252 | func startHTTPServer(addr string, handler http.Handler) error { 253 | server := &http.Server{ 254 | ReadTimeout: 10 * time.Second, 255 | ReadHeaderTimeout: 10 * time.Second, 256 | WriteTimeout: 10 * time.Second, 257 | IdleTimeout: 30 * time.Second, 258 | Addr: addr, 259 | Handler: handler, 260 | } 261 | return server.ListenAndServe() 262 | } 263 | -------------------------------------------------------------------------------- /pmtiles/bitmap.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "github.com/RoaringBitmap/roaring/roaring64" 5 | "github.com/paulmach/orb" 6 | "github.com/paulmach/orb/maptile" 7 | "github.com/paulmach/orb/maptile/tilecover" 8 | "github.com/paulmach/orb/planar" 9 | "github.com/paulmach/orb/project" 10 | ) 11 | 12 | func bitmapMultiPolygon(zoom uint8, multipolygon orb.MultiPolygon) (*roaring64.Bitmap, *roaring64.Bitmap) { 13 | boundarySet := roaring64.New() 14 | 15 | for _, polygon := range multipolygon { 16 | for _, ring := range polygon { 17 | boundaryTiles, _ := tilecover.Geometry(orb.LineString(ring), maptile.Zoom(zoom)) // TODO is this buffer-aware? 18 | for tile := range boundaryTiles { 19 | boundarySet.Add(ZxyToID(uint8(tile.Z), tile.X, tile.Y)) 20 | } 21 | } 22 | } 23 | 24 | multipolygonProjected := project.MultiPolygon(multipolygon.Clone(), project.WGS84.ToMercator) 25 | 26 | interiorSet := roaring64.New() 27 | i := boundarySet.Iterator() 28 | for i.HasNext() { 29 | id := i.Next() 30 | if !boundarySet.Contains(id+1) && i.HasNext() { 31 | z, x, y := IDToZxy(id + 1) 32 | tile := maptile.New(x, y, maptile.Zoom(z)) 33 | if planar.MultiPolygonContains(multipolygonProjected, project.Point(tile.Center(), project.WGS84.ToMercator)) { 34 | interiorSet.AddRange(id+1, i.PeekNext()) 35 | } 36 | } 37 | } 38 | 39 | return boundarySet, interiorSet 40 | } 41 | 42 | func generalizeOr(r *roaring64.Bitmap, minzoom uint8) { 43 | if r.GetCardinality() == 0 { 44 | return 45 | } 46 | maxZ, _, _ := IDToZxy(r.ReverseIterator().Next()) 47 | 48 | var temp *roaring64.Bitmap 49 | var toIterate *roaring64.Bitmap 50 | 51 | temp = roaring64.New() 52 | toIterate = r 53 | 54 | for currentZ := int(maxZ); currentZ > int(minzoom); currentZ-- { 55 | iter := toIterate.Iterator() 56 | for iter.HasNext() { 57 | parentID := ParentID(iter.Next()) 58 | temp.Add(parentID) 59 | } 60 | toIterate = temp 61 | r.Or(temp) 62 | temp = roaring64.New() 63 | } 64 | } 65 | 66 | func generalizeAnd(r *roaring64.Bitmap) { 67 | if r.GetCardinality() == 0 { 68 | return 69 | } 70 | maxZ, _, _ := IDToZxy(r.ReverseIterator().Next()) 71 | 72 | var temp *roaring64.Bitmap 73 | var toIterate *roaring64.Bitmap 74 | 75 | temp = roaring64.New() 76 | toIterate = r 77 | 78 | for currentZ := int(maxZ); currentZ > 0; currentZ-- { 79 | iter := toIterate.Iterator() 80 | filled := 0 81 | current := uint64(0) // check me... 82 | for iter.HasNext() { 83 | id := iter.Next() 84 | parentID := ParentID(id) 85 | if parentID == current { 86 | filled++ 87 | if filled == 4 { 88 | temp.Add(parentID) 89 | } 90 | } else { 91 | current = parentID 92 | filled = 1 93 | } 94 | } 95 | toIterate = temp 96 | r.Or(temp) 97 | temp = roaring64.New() 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /pmtiles/bitmap_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "github.com/RoaringBitmap/roaring/roaring64" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestGeneralizeAnd(t *testing.T) { 10 | b := roaring64.New() 11 | generalizeAnd(b) 12 | assert.Equal(t, uint64(0), b.GetCardinality()) 13 | b = roaring64.New() 14 | b.Add(ZxyToID(3, 0, 0)) 15 | generalizeAnd(b) 16 | assert.Equal(t, uint64(1), b.GetCardinality()) 17 | b = roaring64.New() 18 | b.Add(ZxyToID(3, 0, 0)) 19 | b.Add(ZxyToID(3, 0, 1)) 20 | b.Add(ZxyToID(3, 1, 0)) 21 | b.Add(ZxyToID(3, 1, 1)) 22 | generalizeAnd(b) 23 | assert.Equal(t, uint64(5), b.GetCardinality()) 24 | assert.True(t, b.Contains(ZxyToID(2, 0, 0))) 25 | } 26 | 27 | func TestGeneralizeOr(t *testing.T) { 28 | b := roaring64.New() 29 | generalizeOr(b, 0) 30 | assert.Equal(t, uint64(0), b.GetCardinality()) 31 | b = roaring64.New() 32 | b.Add(ZxyToID(3, 0, 0)) 33 | generalizeOr(b, 0) 34 | assert.Equal(t, uint64(4), b.GetCardinality()) 35 | assert.True(t, b.Contains(ZxyToID(2, 0, 0))) 36 | assert.True(t, b.Contains(ZxyToID(1, 0, 0))) 37 | assert.True(t, b.Contains(ZxyToID(0, 0, 0))) 38 | } 39 | 40 | func TestGeneralizeOrMinZoom(t *testing.T) { 41 | b := roaring64.New() 42 | b.Add(ZxyToID(3, 0, 0)) 43 | generalizeOr(b, 2) 44 | assert.Equal(t, uint64(2), b.GetCardinality()) 45 | assert.True(t, b.Contains(ZxyToID(2, 0, 0))) 46 | assert.False(t, b.Contains(ZxyToID(1, 0, 0))) 47 | } 48 | -------------------------------------------------------------------------------- /pmtiles/bucket.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/binary" 7 | "encoding/hex" 8 | "errors" 9 | "fmt" 10 | "io" 11 | "net/http" 12 | "net/url" 13 | "os" 14 | "path" 15 | "path/filepath" 16 | "strconv" 17 | "strings" 18 | 19 | "cloud.google.com/go/storage" 20 | "github.com/Azure/azure-sdk-for-go/sdk/azcore" 21 | "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" 22 | "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" 23 | "github.com/aws/aws-sdk-go-v2/aws" 24 | "github.com/aws/aws-sdk-go-v2/service/s3" 25 | smithyHttp "github.com/aws/smithy-go/transport/http" 26 | "github.com/cespare/xxhash/v2" 27 | "gocloud.dev/blob" 28 | "google.golang.org/api/googleapi" 29 | ) 30 | 31 | // Bucket is an abstration over a gocloud or plain HTTP bucket. 32 | type Bucket interface { 33 | Close() error 34 | NewRangeReader(ctx context.Context, key string, offset int64, length int64) (io.ReadCloser, error) 35 | NewRangeReaderEtag(ctx context.Context, key string, offset int64, length int64, etag string) (io.ReadCloser, string, int, error) 36 | } 37 | 38 | // RefreshRequiredError is an error that indicates the etag has chanced on the remote file 39 | type RefreshRequiredError struct { 40 | StatusCode int 41 | } 42 | 43 | func (m *RefreshRequiredError) Error() string { 44 | return fmt.Sprintf("HTTP error indicates file has changed: %d", m.StatusCode) 45 | } 46 | 47 | type mockBucket struct { 48 | items map[string][]byte 49 | } 50 | 51 | func (m mockBucket) Close() error { 52 | return nil 53 | } 54 | 55 | func (m mockBucket) NewRangeReader(ctx context.Context, key string, offset int64, length int64) (io.ReadCloser, error) { 56 | body, _, _, err := m.NewRangeReaderEtag(ctx, key, offset, length, "") 57 | return body, err 58 | 59 | } 60 | func (m mockBucket) NewRangeReaderEtag(_ context.Context, key string, offset int64, length int64, etag string) (io.ReadCloser, string, int, error) { 61 | bs, ok := m.items[key] 62 | if !ok { 63 | return nil, "", 404, fmt.Errorf("Not found %s", key) 64 | } 65 | 66 | resultEtag := generateEtag(bs) 67 | if len(etag) > 0 && resultEtag != etag { 68 | return nil, "", 412, &RefreshRequiredError{} 69 | } 70 | if offset >= int64(len(bs)) { 71 | return nil, "", 416, &RefreshRequiredError{416} 72 | } 73 | 74 | end := offset + length 75 | if end > int64(len(bs)) { 76 | end = int64(len(bs)) 77 | } 78 | return io.NopCloser(bytes.NewReader(bs[offset:end])), resultEtag, 206, nil 79 | } 80 | 81 | // FileBucket is a bucket backed by a directory on disk 82 | type FileBucket struct { 83 | path string 84 | } 85 | 86 | // NewFileBucket initializes a FileBucket and returns a new instance 87 | func NewFileBucket(path string) *FileBucket { 88 | return &FileBucket{path: path} 89 | } 90 | 91 | func (b FileBucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (io.ReadCloser, error) { 92 | body, _, _, err := b.NewRangeReaderEtag(ctx, key, offset, length, "") 93 | return body, err 94 | } 95 | 96 | func uintToBytes(n uint64) []byte { 97 | bs := make([]byte, 8) 98 | binary.LittleEndian.PutUint64(bs, n) 99 | return bs 100 | } 101 | 102 | func hasherToEtag(hasher *xxhash.Digest) string { 103 | sum := uintToBytes(hasher.Sum64()) 104 | return fmt.Sprintf(`"%s"`, hex.EncodeToString(sum)) 105 | } 106 | 107 | func generateEtag(data []byte) string { 108 | hasher := xxhash.New() 109 | hasher.Write(data) 110 | return hasherToEtag(hasher) 111 | } 112 | 113 | func generateEtagFromInts(ns ...int64) string { 114 | hasher := xxhash.New() 115 | for _, n := range ns { 116 | hasher.Write(uintToBytes(uint64(n))) 117 | } 118 | return hasherToEtag(hasher) 119 | } 120 | 121 | func (b FileBucket) NewRangeReaderEtag(_ context.Context, key string, offset, length int64, etag string) (io.ReadCloser, string, int, error) { 122 | name := filepath.Join(b.path, key) 123 | file, err := os.Open(name) 124 | defer file.Close() 125 | if err != nil { 126 | return nil, "", 404, err 127 | } 128 | info, err := file.Stat() 129 | if err != nil { 130 | return nil, "", 404, err 131 | } 132 | newEtag := generateEtagFromInts(info.ModTime().UnixNano(), info.Size()) 133 | if len(etag) > 0 && etag != newEtag { 134 | return nil, "", 412, &RefreshRequiredError{} 135 | } 136 | result := make([]byte, length) 137 | read, err := file.ReadAt(result, offset) 138 | 139 | if err == io.EOF { 140 | part := result[0:read] 141 | return io.NopCloser(bytes.NewReader(part)), newEtag, 206, nil 142 | } 143 | 144 | if err != nil { 145 | return nil, "", 500, err 146 | } 147 | if read != int(length) { 148 | return nil, "", 416, fmt.Errorf("Expected to read %d bytes but only read %d", length, read) 149 | } 150 | 151 | return io.NopCloser(bytes.NewReader(result)), newEtag, 206, nil 152 | } 153 | 154 | func (b FileBucket) Close() error { 155 | return nil 156 | } 157 | 158 | // HTTPClient is an interface that lets you swap out the default client with a mock one in tests 159 | type HTTPClient interface { 160 | Do(req *http.Request) (*http.Response, error) 161 | } 162 | 163 | type HTTPBucket struct { 164 | baseURL string 165 | client HTTPClient 166 | } 167 | 168 | func (b HTTPBucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (io.ReadCloser, error) { 169 | body, _, _, err := b.NewRangeReaderEtag(ctx, key, offset, length, "") 170 | return body, err 171 | } 172 | 173 | func (b HTTPBucket) NewRangeReaderEtag(ctx context.Context, key string, offset, length int64, etag string) (io.ReadCloser, string, int, error) { 174 | reqURL := b.baseURL + "/" + key 175 | 176 | req, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil) 177 | if err != nil { 178 | return nil, "", 500, err 179 | } 180 | 181 | req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) 182 | if len(etag) > 0 { 183 | req.Header.Set("If-Match", etag) 184 | } 185 | 186 | resp, err := b.client.Do(req) 187 | if err != nil { 188 | return nil, "", resp.StatusCode, err 189 | } 190 | 191 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { 192 | resp.Body.Close() 193 | if isRefreshRequiredCode(resp.StatusCode) { 194 | err = &RefreshRequiredError{resp.StatusCode} 195 | } else { 196 | err = fmt.Errorf("HTTP error: %d", resp.StatusCode) 197 | } 198 | return nil, "", resp.StatusCode, err 199 | } 200 | 201 | return resp.Body, resp.Header.Get("ETag"), resp.StatusCode, nil 202 | } 203 | 204 | func (b HTTPBucket) Close() error { 205 | return nil 206 | } 207 | 208 | func isRefreshRequiredCode(code int) bool { 209 | return code == http.StatusPreconditionFailed || code == http.StatusRequestedRangeNotSatisfiable 210 | } 211 | 212 | type BucketAdapter struct { 213 | Bucket *blob.Bucket 214 | } 215 | 216 | func (ba BucketAdapter) NewRangeReader(ctx context.Context, key string, offset, length int64) (io.ReadCloser, error) { 217 | body, _, _, err := ba.NewRangeReaderEtag(ctx, key, offset, length, "") 218 | return body, err 219 | } 220 | 221 | func etagToGeneration(etag string) int64 { 222 | i, _ := strconv.ParseInt(etag, 10, 64) 223 | return i 224 | } 225 | 226 | func generationToEtag(generation int64) string { 227 | return strconv.FormatInt(generation, 10) 228 | } 229 | 230 | func setProviderEtag(asFunc func(interface{}) bool, etag string) { 231 | var awsV2Req *s3.GetObjectInput 232 | var azblobReq *azblob.DownloadStreamOptions 233 | var gcsHandle **storage.ObjectHandle 234 | if asFunc(&awsV2Req) { 235 | awsV2Req.IfMatch = aws.String(etag) 236 | } else if asFunc(&azblobReq) { 237 | azEtag := azcore.ETag(etag) 238 | azblobReq.AccessConditions = &azblob.AccessConditions{ 239 | ModifiedAccessConditions: &container.ModifiedAccessConditions{ 240 | IfMatch: &azEtag, 241 | }, 242 | } 243 | } else if asFunc(&gcsHandle) { 244 | *gcsHandle = (*gcsHandle).If(storage.Conditions{ 245 | GenerationMatch: etagToGeneration(etag), 246 | }) 247 | } 248 | } 249 | 250 | func getProviderErrorStatusCode(err error) int { 251 | var awsV2Err *smithyHttp.ResponseError 252 | var azureErr *azcore.ResponseError 253 | var gcpErr *googleapi.Error 254 | 255 | if errors.As(err, &awsV2Err); awsV2Err != nil { 256 | return awsV2Err.HTTPStatusCode() 257 | } else if errors.As(err, &azureErr); azureErr != nil { 258 | return azureErr.StatusCode 259 | } else if errors.As(err, &gcpErr); gcpErr != nil { 260 | return gcpErr.Code 261 | } 262 | return 404 263 | } 264 | 265 | func getProviderEtag(reader *blob.Reader) string { 266 | var awsV2Resp s3.GetObjectOutput 267 | var azureResp azblob.DownloadStreamResponse 268 | var gcpResp *storage.Reader 269 | 270 | if reader.As(&awsV2Resp) { 271 | return *awsV2Resp.ETag 272 | } else if reader.As(&azureResp) { 273 | return string(*azureResp.ETag) 274 | } else if reader.As(&gcpResp) { 275 | return generationToEtag(gcpResp.Attrs.Generation) 276 | } 277 | 278 | return "" 279 | } 280 | 281 | func (ba BucketAdapter) NewRangeReaderEtag(ctx context.Context, key string, offset, length int64, etag string) (io.ReadCloser, string, int, error) { 282 | reader, err := ba.Bucket.NewRangeReader(ctx, key, offset, length, &blob.ReaderOptions{ 283 | BeforeRead: func(asFunc func(interface{}) bool) error { 284 | if len(etag) > 0 { 285 | setProviderEtag(asFunc, etag) 286 | } 287 | return nil 288 | }, 289 | }) 290 | status := 206 291 | if err != nil { 292 | status = getProviderErrorStatusCode(err) 293 | if isRefreshRequiredCode(status) { 294 | return nil, "", status, &RefreshRequiredError{status} 295 | } 296 | 297 | return nil, "", status, err 298 | } 299 | 300 | return reader, getProviderEtag(reader), status, nil 301 | } 302 | 303 | func (ba BucketAdapter) Close() error { 304 | return ba.Bucket.Close() 305 | } 306 | 307 | func NormalizeBucketKey(bucket string, prefix string, key string) (string, string, error) { 308 | if bucket == "" { 309 | if strings.HasPrefix(key, "http") { 310 | u, err := url.Parse(key) 311 | if err != nil { 312 | return "", "", err 313 | } 314 | dir, file := path.Split(u.Path) 315 | if strings.HasSuffix(dir, "/") { 316 | dir = dir[:len(dir)-1] 317 | } 318 | return u.Scheme + "://" + u.Host + dir, file, nil 319 | } 320 | fileprotocol := "file://" 321 | if string(os.PathSeparator) != "/" { 322 | fileprotocol += "/" 323 | } 324 | if prefix != "" { 325 | abs, err := filepath.Abs(prefix) 326 | if err != nil { 327 | return "", "", err 328 | } 329 | return fileprotocol + filepath.ToSlash(abs), key, nil 330 | } 331 | abs, err := filepath.Abs(key) 332 | if err != nil { 333 | return "", "", err 334 | } 335 | return fileprotocol + filepath.ToSlash(filepath.Dir(abs)), filepath.Base(abs), nil 336 | } 337 | return bucket, key, nil 338 | } 339 | 340 | func OpenBucket(ctx context.Context, bucketURL string, bucketPrefix string) (Bucket, error) { 341 | if strings.HasPrefix(bucketURL, "http") { 342 | bucket := HTTPBucket{bucketURL, http.DefaultClient} 343 | return bucket, nil 344 | } 345 | if strings.HasPrefix(bucketURL, "file") { 346 | fileprotocol := "file://" 347 | if string(os.PathSeparator) != "/" { 348 | fileprotocol += "/" 349 | } 350 | path := strings.Replace(bucketURL, fileprotocol, "", 1) 351 | bucket := NewFileBucket(filepath.FromSlash(path)) 352 | return bucket, nil 353 | } 354 | bucket, err := blob.OpenBucket(ctx, bucketURL) 355 | if err != nil { 356 | return nil, err 357 | } 358 | if bucketPrefix != "" && bucketPrefix != "/" && bucketPrefix != "." { 359 | bucket = blob.PrefixedBucket(bucket, path.Clean(bucketPrefix)+string(os.PathSeparator)) 360 | } 361 | wrappedBucket := BucketAdapter{bucket} 362 | return wrappedBucket, err 363 | } 364 | -------------------------------------------------------------------------------- /pmtiles/bucket_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "net/http" 8 | "os" 9 | "path/filepath" 10 | "strings" 11 | "testing" 12 | 13 | "github.com/Azure/azure-sdk-for-go/sdk/azcore" 14 | "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" 15 | 16 | "github.com/aws/aws-sdk-go-v2/aws" 17 | "github.com/aws/aws-sdk-go-v2/service/s3" 18 | smithyHttp "github.com/aws/smithy-go/transport/http" 19 | 20 | "github.com/stretchr/testify/assert" 21 | _ "gocloud.dev/blob/fileblob" 22 | "google.golang.org/api/googleapi" 23 | ) 24 | 25 | func TestNormalizeLocalFile(t *testing.T) { 26 | bucket, key, _ := NormalizeBucketKey("", "", "../foo/bar.pmtiles") 27 | assert.Equal(t, "bar.pmtiles", key) 28 | assert.True(t, strings.HasSuffix(bucket, "/foo")) 29 | assert.True(t, strings.HasPrefix(bucket, "file://")) 30 | } 31 | 32 | func TestNormalizeLocalFileWindows(t *testing.T) { 33 | if string(os.PathSeparator) != "/" { 34 | bucket, key, _ := NormalizeBucketKey("", "", "\\foo\\bar.pmtiles") 35 | assert.Equal(t, "bar.pmtiles", key) 36 | assert.True(t, strings.HasSuffix(bucket, "/foo")) 37 | assert.True(t, strings.HasPrefix(bucket, "file://")) 38 | } 39 | } 40 | 41 | func TestNormalizeHttp(t *testing.T) { 42 | bucket, key, _ := NormalizeBucketKey("", "", "http://example.com/foo/bar.pmtiles") 43 | assert.Equal(t, "bar.pmtiles", key) 44 | assert.Equal(t, "http://example.com/foo", bucket) 45 | } 46 | 47 | func TestNormalizePathPrefixServer(t *testing.T) { 48 | bucket, key, _ := NormalizeBucketKey("", "../foo", "") 49 | assert.Equal(t, "", key) 50 | assert.True(t, strings.HasSuffix(bucket, "/foo")) 51 | assert.True(t, strings.HasPrefix(bucket, "file://")) 52 | } 53 | 54 | type ClientMock struct { 55 | request *http.Request 56 | response *http.Response 57 | } 58 | 59 | func (c *ClientMock) Do(req *http.Request) (*http.Response, error) { 60 | c.request = req 61 | return c.response, nil 62 | } 63 | 64 | func TestHttpBucketRequestNormal(t *testing.T) { 65 | mock := ClientMock{} 66 | header := http.Header{} 67 | header.Add("ETag", "etag") 68 | bucket := HTTPBucket{"http://tiles.example.com/tiles", &mock} 69 | mock.response = &http.Response{ 70 | StatusCode: 200, 71 | Body: io.NopCloser(strings.NewReader("abc")), 72 | Header: header, 73 | } 74 | data, etag, status, err := bucket.NewRangeReaderEtag(context.Background(), "a/b/c", 100, 3, "") 75 | assert.Equal(t, "", mock.request.Header.Get("If-Match")) 76 | assert.Equal(t, "bytes=100-102", mock.request.Header.Get("Range")) 77 | assert.Equal(t, "http://tiles.example.com/tiles/a/b/c", mock.request.URL.String()) 78 | assert.Equal(t, 200, status) 79 | assert.Nil(t, err) 80 | b, err := io.ReadAll(data) 81 | assert.Nil(t, err) 82 | assert.Equal(t, "abc", string(b)) 83 | assert.Equal(t, "etag", etag) 84 | assert.Nil(t, err) 85 | } 86 | 87 | func TestHttpBucketRequestRequestEtag(t *testing.T) { 88 | mock := ClientMock{} 89 | header := http.Header{} 90 | header.Add("ETag", "etag2") 91 | bucket := HTTPBucket{"http://tiles.example.com/tiles", &mock} 92 | mock.response = &http.Response{ 93 | StatusCode: 200, 94 | Body: io.NopCloser(strings.NewReader("abc")), 95 | Header: header, 96 | } 97 | data, etag, status, err := bucket.NewRangeReaderEtag(context.Background(), "a/b/c", 0, 3, "etag1") 98 | assert.Equal(t, "etag1", mock.request.Header.Get("If-Match")) 99 | assert.Equal(t, 200, status) 100 | assert.Nil(t, err) 101 | b, err := io.ReadAll(data) 102 | assert.Nil(t, err) 103 | assert.Equal(t, "abc", string(b)) 104 | assert.Equal(t, "etag2", etag) 105 | assert.Nil(t, err) 106 | } 107 | 108 | func TestHttpBucketRequestRequestEtagFailed(t *testing.T) { 109 | mock := ClientMock{} 110 | header := http.Header{} 111 | header.Add("ETag", "etag2") 112 | bucket := HTTPBucket{"http://tiles.example.com/tiles", &mock} 113 | mock.response = &http.Response{ 114 | StatusCode: 412, 115 | Body: io.NopCloser(strings.NewReader("abc")), 116 | Header: header, 117 | } 118 | _, _, status, err := bucket.NewRangeReaderEtag(context.Background(), "a/b/c", 0, 3, "etag1") 119 | assert.Equal(t, "etag1", mock.request.Header.Get("If-Match")) 120 | assert.Equal(t, 412, status) 121 | assert.True(t, isRefreshRequiredError(err)) 122 | 123 | mock.response.StatusCode = 416 124 | _, _, status, err = bucket.NewRangeReaderEtag(context.Background(), "a/b/c", 0, 3, "etag1") 125 | assert.Equal(t, 416, status) 126 | assert.True(t, isRefreshRequiredError(err)) 127 | 128 | mock.response.StatusCode = 404 129 | _, _, status, err = bucket.NewRangeReaderEtag(context.Background(), "a/b/c", 0, 3, "etag1") 130 | assert.False(t, isRefreshRequiredError(err)) 131 | assert.Equal(t, 404, status) 132 | } 133 | 134 | func TestFileBucketReplace(t *testing.T) { 135 | tmp := t.TempDir() 136 | bucketURL, _, err := NormalizeBucketKey("", tmp, "") 137 | assert.Nil(t, err) 138 | bucket, err := OpenBucket(context.Background(), bucketURL, "") 139 | assert.Nil(t, err) 140 | assert.NotNil(t, bucket) 141 | assert.Nil(t, os.WriteFile(filepath.Join(tmp, "archive.pmtiles"), []byte{1, 2, 3}, 0666)) 142 | 143 | // first read from file 144 | reader, etag1, status, err := bucket.NewRangeReaderEtag(context.Background(), "archive.pmtiles", 1, 1, "") 145 | assert.Equal(t, 206, status) 146 | assert.Nil(t, err) 147 | data, err := io.ReadAll(reader) 148 | assert.Nil(t, err) 149 | assert.Equal(t, []byte{2}, data) 150 | 151 | // change file, verify etag changes 152 | assert.Nil(t, os.WriteFile(filepath.Join(tmp, "archive.pmtiles"), []byte{4, 5, 6, 7}, 0666)) 153 | reader, etag2, status, err := bucket.NewRangeReaderEtag(context.Background(), "archive.pmtiles", 1, 1, "") 154 | assert.Equal(t, 206, status) 155 | assert.Nil(t, err) 156 | data, err = io.ReadAll(reader) 157 | assert.Nil(t, err) 158 | assert.NotEqual(t, etag1, etag2) 159 | assert.Equal(t, []byte{5}, data) 160 | 161 | // and requesting with old etag fails with refresh required error 162 | _, _, status, err = bucket.NewRangeReaderEtag(context.Background(), "archive.pmtiles", 1, 1, etag1) 163 | assert.Equal(t, 412, status) 164 | assert.True(t, isRefreshRequiredError(err)) 165 | } 166 | 167 | func TestFileBucketRename(t *testing.T) { 168 | tmp := t.TempDir() 169 | assert.Nil(t, os.WriteFile(filepath.Join(tmp, "archive.pmtiles"), []byte{1, 2, 3}, 0666)) 170 | assert.Nil(t, os.WriteFile(filepath.Join(tmp, "archive2.pmtiles"), []byte{4, 5, 6, 7}, 0666)) 171 | 172 | bucketURL, _, err := NormalizeBucketKey("", tmp, "") 173 | assert.Nil(t, err) 174 | bucket, err := OpenBucket(context.Background(), bucketURL, "") 175 | assert.Nil(t, err) 176 | assert.NotNil(t, bucket) 177 | assert.Nil(t, os.WriteFile(filepath.Join(tmp, "archive.pmtiles"), []byte{1, 2, 3}, 0666)) 178 | 179 | // first read from file 180 | reader, etag1, status, err := bucket.NewRangeReaderEtag(context.Background(), "archive.pmtiles", 1, 1, "") 181 | assert.Equal(t, 206, status) 182 | assert.Nil(t, err) 183 | data, err := io.ReadAll(reader) 184 | assert.Nil(t, err) 185 | assert.Equal(t, []byte{2}, data) 186 | 187 | // change file, verify etag changes 188 | os.Rename(filepath.Join(tmp, "archive.pmtiles"), filepath.Join(tmp, "archive3.pmtiles")) 189 | os.Rename(filepath.Join(tmp, "archive2.pmtiles"), filepath.Join(tmp, "archive.pmtiles")) 190 | reader, etag2, status, err := bucket.NewRangeReaderEtag(context.Background(), "archive.pmtiles", 1, 1, "") 191 | assert.Equal(t, 206, status) 192 | assert.Nil(t, err) 193 | data, err = io.ReadAll(reader) 194 | assert.Nil(t, err) 195 | assert.NotEqual(t, etag1, etag2) 196 | assert.Equal(t, []byte{5}, data) 197 | 198 | // and requesting with old etag fails with refresh required error 199 | _, _, status, err = bucket.NewRangeReaderEtag(context.Background(), "archive.pmtiles", 1, 1, etag1) 200 | assert.Equal(t, 412, status) 201 | assert.True(t, isRefreshRequiredError(err)) 202 | } 203 | 204 | func TestFileShorterThan16K(t *testing.T) { 205 | tmp := t.TempDir() 206 | assert.Nil(t, os.WriteFile(filepath.Join(tmp, "archive.pmtiles"), []byte{1, 2, 3}, 0666)) 207 | 208 | bucketURL, _, err := NormalizeBucketKey("", tmp, "") 209 | bucket, err := OpenBucket(context.Background(), bucketURL, "") 210 | 211 | reader, _, status, err := bucket.NewRangeReaderEtag(context.Background(), "archive.pmtiles", 0, 16384, "") 212 | assert.Equal(t, 206, status) 213 | assert.Nil(t, err) 214 | data, err := io.ReadAll(reader) 215 | assert.Nil(t, err) 216 | assert.Equal(t, 3, len(data)) 217 | } 218 | 219 | func TestSetProviderEtagAwsV2(t *testing.T) { 220 | var awsV2Req s3.GetObjectInput 221 | assert.Nil(t, awsV2Req.IfMatch) 222 | asFunc := func(i interface{}) bool { 223 | v, ok := i.(**s3.GetObjectInput) 224 | if ok { 225 | *v = &awsV2Req 226 | } 227 | return true 228 | } 229 | setProviderEtag(asFunc, "123") 230 | assert.Equal(t, aws.String("123"), awsV2Req.IfMatch) 231 | } 232 | 233 | func TestSetProviderEtagAzure(t *testing.T) { 234 | var azOptions azblob.DownloadStreamOptions 235 | assert.Nil(t, azOptions.AccessConditions) 236 | asFunc := func(i interface{}) bool { 237 | v, ok := i.(**azblob.DownloadStreamOptions) 238 | if ok { 239 | *v = &azOptions 240 | } 241 | return ok 242 | } 243 | setProviderEtag(asFunc, "123") 244 | assert.Equal(t, azcore.ETag("123"), *azOptions.AccessConditions.ModifiedAccessConditions.IfMatch) 245 | } 246 | 247 | func TestGetProviderErrorStatusCode(t *testing.T) { 248 | awsV2Err := &smithyHttp.ResponseError{Response: &smithyHttp.Response{Response: &http.Response{ 249 | StatusCode: 500, 250 | Header: http.Header{}, 251 | }}} 252 | statusCode := getProviderErrorStatusCode(awsV2Err) 253 | assert.Equal(t, 500, statusCode) 254 | 255 | azureErr := &azcore.ResponseError{StatusCode: 500} 256 | statusCode = getProviderErrorStatusCode(azureErr) 257 | assert.Equal(t, 500, statusCode) 258 | 259 | gcpErr := &googleapi.Error{Code: 500} 260 | statusCode = getProviderErrorStatusCode(gcpErr) 261 | assert.Equal(t, 500, statusCode) 262 | 263 | err := errors.New("generic error") 264 | statusCode = getProviderErrorStatusCode(err) 265 | assert.Equal(t, 404, statusCode) 266 | } 267 | 268 | func TestGenerationEtag(t *testing.T) { 269 | assert.Equal(t, int64(123), etagToGeneration("123")) 270 | assert.Equal(t, "123", generationToEtag(int64(123))) 271 | } 272 | -------------------------------------------------------------------------------- /pmtiles/cluster.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "fmt" 5 | "github.com/schollz/progressbar/v3" 6 | "io" 7 | "log" 8 | "os" 9 | ) 10 | 11 | func Cluster(logger *log.Logger, InputPMTiles string, deduplicate bool) error { 12 | file, err := os.OpenFile(InputPMTiles, os.O_RDONLY, 0666) 13 | if err != nil { 14 | return err 15 | } 16 | 17 | buf := make([]byte, 127) 18 | _, err = file.Read(buf) 19 | if err != nil { 20 | return err 21 | } 22 | 23 | header, err := DeserializeHeader(buf) 24 | if err != nil { 25 | return err 26 | } 27 | 28 | if header.Clustered { 29 | return fmt.Errorf("archive is already clustered") 30 | } 31 | 32 | fmt.Println("total directory size", header.RootLength+header.LeafDirectoryLength) 33 | 34 | metadataReader := io.NewSectionReader(file, int64(header.MetadataOffset), int64(header.MetadataLength)) 35 | 36 | metadata, err := DeserializeMetadata(metadataReader, header.InternalCompression) 37 | 38 | resolver := newResolver(deduplicate, false) 39 | tmpfile, err := os.CreateTemp("", "pmtiles") 40 | if err != nil { 41 | return err 42 | } 43 | 44 | bar := progressbar.Default(int64(header.TileEntriesCount)) 45 | 46 | err = IterateEntries(header, 47 | func(offset uint64, length uint64) ([]byte, error) { 48 | return io.ReadAll(io.NewSectionReader(file, int64(offset), int64(length))) 49 | }, 50 | func(e EntryV3) { 51 | data, _ := io.ReadAll(io.NewSectionReader(file, int64(header.TileDataOffset+e.Offset), int64(e.Length))) 52 | if isNew, newData := resolver.AddTileIsNew(e.TileID, data, e.RunLength); isNew { 53 | tmpfile.Write(newData) 54 | } 55 | bar.Add(1) 56 | }) 57 | 58 | if err != nil { 59 | return err 60 | } 61 | 62 | file.Close() 63 | 64 | header.Clustered = true 65 | newHeader, err := finalize(logger, resolver, header, tmpfile, InputPMTiles, metadata) 66 | if err != nil { 67 | return err 68 | } 69 | fmt.Printf("total directory size %d (%f%% of original)\n", newHeader.RootLength+newHeader.LeafDirectoryLength, float64(newHeader.RootLength+newHeader.LeafDirectoryLength)/float64(header.RootLength+header.LeafDirectoryLength)*100) 70 | return nil 71 | } 72 | -------------------------------------------------------------------------------- /pmtiles/cluster_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "os" 6 | "testing" 7 | ) 8 | 9 | func TestCluster(t *testing.T) { 10 | fileToCluster := makeFixtureCopy(t, "unclustered", "cluster") 11 | 12 | err := Cluster(logger, fileToCluster, true) 13 | assert.Nil(t, err) 14 | 15 | file, err := os.OpenFile(fileToCluster, os.O_RDONLY, 0666) 16 | defer file.Close() 17 | assert.Nil(t, err) 18 | 19 | buf := make([]byte, 127) 20 | _, _ = file.Read(buf) 21 | header, _ := DeserializeHeader(buf) 22 | assert.True(t, header.Clustered) 23 | } 24 | -------------------------------------------------------------------------------- /pmtiles/convert.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "encoding/json" 7 | "fmt" 8 | "hash" 9 | "hash/fnv" 10 | "io" 11 | "log" 12 | "math" 13 | "os" 14 | "strconv" 15 | "strings" 16 | "time" 17 | 18 | "github.com/RoaringBitmap/roaring/roaring64" 19 | "github.com/schollz/progressbar/v3" 20 | "zombiezen.com/go/sqlite" 21 | ) 22 | 23 | type offsetLen struct { 24 | Offset uint64 25 | Length uint32 26 | } 27 | 28 | type resolver struct { 29 | deduplicate bool 30 | compress bool 31 | Entries []EntryV3 32 | Offset uint64 33 | OffsetMap map[string]offsetLen 34 | AddressedTiles uint64 // none of them can be empty 35 | compressor *gzip.Writer 36 | compressTmp *bytes.Buffer 37 | hashfunc hash.Hash 38 | } 39 | 40 | func (r *resolver) NumContents() uint64 { 41 | if r.deduplicate { 42 | return uint64(len(r.OffsetMap)) 43 | } 44 | return r.AddressedTiles 45 | } 46 | 47 | // must be called in increasing tile_id order, uniquely 48 | func (r *resolver) AddTileIsNew(tileID uint64, data []byte, runLength uint32) (bool, []byte) { 49 | r.AddressedTiles++ 50 | var found offsetLen 51 | var ok bool 52 | var sumString string 53 | if r.deduplicate { 54 | r.hashfunc.Reset() 55 | r.hashfunc.Write(data) 56 | var tmp []byte 57 | sumString = string(r.hashfunc.Sum(tmp)) 58 | found, ok = r.OffsetMap[sumString] 59 | } 60 | 61 | if r.deduplicate && ok { 62 | lastEntry := r.Entries[len(r.Entries)-1] 63 | if tileID == lastEntry.TileID+uint64(lastEntry.RunLength) && lastEntry.Offset == found.Offset { 64 | // RLE 65 | if lastEntry.RunLength+runLength > math.MaxUint32 { 66 | panic("Maximum 32-bit run length exceeded") 67 | } 68 | r.Entries[len(r.Entries)-1].RunLength += runLength 69 | } else { 70 | r.Entries = append(r.Entries, EntryV3{tileID, found.Offset, found.Length, runLength}) 71 | } 72 | 73 | return false, nil 74 | } 75 | var newData []byte 76 | if !r.compress || (len(data) >= 2 && data[0] == 31 && data[1] == 139) { 77 | // the tile is already compressed 78 | newData = data 79 | } else { 80 | r.compressTmp.Reset() 81 | r.compressor.Reset(r.compressTmp) 82 | r.compressor.Write(data) 83 | r.compressor.Close() 84 | newData = r.compressTmp.Bytes() 85 | } 86 | 87 | if r.deduplicate { 88 | r.OffsetMap[sumString] = offsetLen{r.Offset, uint32(len(newData))} 89 | } 90 | r.Entries = append(r.Entries, EntryV3{tileID, r.Offset, uint32(len(newData)), runLength}) 91 | r.Offset += uint64(len(newData)) 92 | return true, newData 93 | } 94 | 95 | func newResolver(deduplicate bool, compress bool) *resolver { 96 | b := new(bytes.Buffer) 97 | compressor, _ := gzip.NewWriterLevel(b, gzip.BestCompression) 98 | r := resolver{deduplicate, compress, make([]EntryV3, 0), 0, make(map[string]offsetLen), 0, compressor, b, fnv.New128a()} 99 | return &r 100 | } 101 | 102 | // Convert an existing archive on disk to a new PMTiles specification version 3 archive. 103 | func Convert(logger *log.Logger, input string, output string, deduplicate bool, tmpfile *os.File) error { 104 | return convertMbtiles(logger, input, output, deduplicate, tmpfile) 105 | } 106 | 107 | func setZoomCenterDefaults(header *HeaderV3, entries []EntryV3) { 108 | minZ, _, _ := IDToZxy(entries[0].TileID) 109 | header.MinZoom = minZ 110 | maxZ, _, _ := IDToZxy(entries[len(entries)-1].TileID) 111 | header.MaxZoom = maxZ 112 | 113 | if header.CenterZoom == 0 && header.CenterLonE7 == 0 && header.CenterLatE7 == 0 { 114 | header.CenterZoom = header.MinZoom 115 | header.CenterLonE7 = (header.MinLonE7 + header.MaxLonE7) / 2 116 | header.CenterLatE7 = (header.MinLatE7 + header.MaxLatE7) / 2 117 | } 118 | } 119 | 120 | func convertMbtiles(logger *log.Logger, input string, output string, deduplicate bool, tmpfile *os.File) error { 121 | start := time.Now() 122 | conn, err := sqlite.OpenConn(input, sqlite.OpenReadOnly) 123 | if err != nil { 124 | return fmt.Errorf("Failed to create database connection, %w", err) 125 | } 126 | defer conn.Close() 127 | 128 | mbtilesMetadata := make([]string, 0) 129 | { 130 | stmt, _, err := conn.PrepareTransient("SELECT name, value FROM metadata") 131 | if err != nil { 132 | return fmt.Errorf("Failed to create SQL statement, %w", err) 133 | } 134 | defer stmt.Finalize() 135 | 136 | for { 137 | row, err := stmt.Step() 138 | if err != nil { 139 | return fmt.Errorf("Failed to step statement, %w", err) 140 | } 141 | if !row { 142 | break 143 | } 144 | mbtilesMetadata = append(mbtilesMetadata, stmt.ColumnText(0)) 145 | mbtilesMetadata = append(mbtilesMetadata, stmt.ColumnText(1)) 146 | } 147 | } 148 | 149 | if !mbtilesMetadataHasFormat(mbtilesMetadata) { 150 | logger.Println("WARNING: MBTiles metadata is missing format information. Update this with: INSERT INTO metadata (name, value) VALUES ('format', 'png')") 151 | } 152 | 153 | header, jsonMetadata, err := mbtilesToHeaderJSON(mbtilesMetadata) 154 | 155 | if err != nil { 156 | return fmt.Errorf("Failed to convert MBTiles to header JSON, %w", err) 157 | } 158 | 159 | logger.Println("Pass 1: Assembling TileID set") 160 | // assemble a sorted set of all TileIds 161 | tileset := roaring64.New() 162 | { 163 | stmt, _, err := conn.PrepareTransient("SELECT zoom_level, tile_column, tile_row FROM tiles") 164 | if err != nil { 165 | return fmt.Errorf("Failed to create statement, %w", err) 166 | } 167 | defer stmt.Finalize() 168 | 169 | for { 170 | row, err := stmt.Step() 171 | if err != nil { 172 | return fmt.Errorf("Failed to step statement, %w", err) 173 | } 174 | if !row { 175 | break 176 | } 177 | z := uint8(stmt.ColumnInt64(0)) 178 | x := uint32(stmt.ColumnInt64(1)) 179 | y := uint32(stmt.ColumnInt64(2)) 180 | flippedY := (1 << z) - 1 - y 181 | id := ZxyToID(z, x, flippedY) 182 | tileset.Add(id) 183 | } 184 | } 185 | 186 | if tileset.GetCardinality() == 0 { 187 | return fmt.Errorf("no tiles in MBTiles archive") 188 | } 189 | 190 | logger.Println("Pass 2: writing tiles") 191 | resolve := newResolver(deduplicate, header.TileType == Mvt) 192 | { 193 | bar := progressbar.Default(int64(tileset.GetCardinality())) 194 | i := tileset.Iterator() 195 | stmt := conn.Prep("SELECT tile_data FROM tiles WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?") 196 | 197 | var rawTileTmp bytes.Buffer 198 | 199 | for i.HasNext() { 200 | id := i.Next() 201 | z, x, y := IDToZxy(id) 202 | flippedY := (1 << z) - 1 - y 203 | 204 | stmt.BindInt64(1, int64(z)) 205 | stmt.BindInt64(2, int64(x)) 206 | stmt.BindInt64(3, int64(flippedY)) 207 | 208 | hasRow, err := stmt.Step() 209 | if err != nil { 210 | return fmt.Errorf("Failed to step statement, %w", err) 211 | } 212 | if !hasRow { 213 | return fmt.Errorf("Missing row") 214 | } 215 | 216 | reader := stmt.ColumnReader(0) 217 | rawTileTmp.Reset() 218 | rawTileTmp.ReadFrom(reader) 219 | data := rawTileTmp.Bytes() 220 | 221 | if len(data) > 0 { 222 | if isNew, newData := resolve.AddTileIsNew(id, data, 1); isNew { 223 | _, err := tmpfile.Write(newData) 224 | if err != nil { 225 | return fmt.Errorf("Failed to write to tempfile: %s", err) 226 | } 227 | } 228 | } 229 | 230 | stmt.ClearBindings() 231 | stmt.Reset() 232 | bar.Add(1) 233 | } 234 | } 235 | _, err = finalize(logger, resolve, header, tmpfile, output, jsonMetadata) 236 | if err != nil { 237 | return err 238 | } 239 | logger.Println("Finished in ", time.Since(start)) 240 | return nil 241 | } 242 | 243 | func finalize(logger *log.Logger, resolve *resolver, header HeaderV3, tmpfile *os.File, output string, jsonMetadata map[string]interface{}) (HeaderV3, error) { 244 | logger.Println("# of addressed tiles: ", resolve.AddressedTiles) 245 | logger.Println("# of tile entries (after RLE): ", len(resolve.Entries)) 246 | logger.Println("# of tile contents: ", resolve.NumContents()) 247 | 248 | header.AddressedTilesCount = resolve.AddressedTiles 249 | header.TileEntriesCount = uint64(len(resolve.Entries)) 250 | header.TileContentsCount = resolve.NumContents() 251 | 252 | // assemble the final file 253 | outfile, err := os.Create(output) 254 | if err != nil { 255 | return header, fmt.Errorf("Failed to create %s, %w", output, err) 256 | } 257 | defer outfile.Close() 258 | 259 | rootBytes, leavesBytes, numLeaves := optimizeDirectories(resolve.Entries, 16384-HeaderV3LenBytes, Gzip) 260 | 261 | if numLeaves > 0 { 262 | logger.Println("Root dir bytes: ", len(rootBytes)) 263 | logger.Println("Leaves dir bytes: ", len(leavesBytes)) 264 | logger.Println("Num leaf dirs: ", numLeaves) 265 | logger.Println("Total dir bytes: ", len(rootBytes)+len(leavesBytes)) 266 | logger.Println("Average leaf dir bytes: ", len(leavesBytes)/numLeaves) 267 | logger.Printf("Average bytes per addressed tile: %.2f\n", float64(len(rootBytes)+len(leavesBytes))/float64(resolve.AddressedTiles)) 268 | } else { 269 | logger.Println("Total dir bytes: ", len(rootBytes)) 270 | logger.Printf("Average bytes per addressed tile: %.2f\n", float64(len(rootBytes))/float64(resolve.AddressedTiles)) 271 | } 272 | 273 | metadataBytes, err := SerializeMetadata(jsonMetadata, Gzip) 274 | 275 | if err != nil { 276 | return header, fmt.Errorf("Failed to marshal metadata, %w", err) 277 | } 278 | 279 | setZoomCenterDefaults(&header, resolve.Entries) 280 | 281 | header.Clustered = true 282 | header.InternalCompression = Gzip 283 | if header.TileType == Mvt { 284 | header.TileCompression = Gzip 285 | } 286 | 287 | header.RootOffset = HeaderV3LenBytes 288 | header.RootLength = uint64(len(rootBytes)) 289 | header.MetadataOffset = header.RootOffset + header.RootLength 290 | header.MetadataLength = uint64(len(metadataBytes)) 291 | header.LeafDirectoryOffset = header.MetadataOffset + header.MetadataLength 292 | header.LeafDirectoryLength = uint64(len(leavesBytes)) 293 | header.TileDataOffset = header.LeafDirectoryOffset + header.LeafDirectoryLength 294 | header.TileDataLength = resolve.Offset 295 | 296 | headerBytes := SerializeHeader(header) 297 | 298 | _, err = outfile.Write(headerBytes) 299 | if err != nil { 300 | return header, fmt.Errorf("Failed to write header to outfile, %w", err) 301 | } 302 | _, err = outfile.Write(rootBytes) 303 | if err != nil { 304 | return header, fmt.Errorf("Failed to write header to outfile, %w", err) 305 | } 306 | _, err = outfile.Write(metadataBytes) 307 | if err != nil { 308 | return header, fmt.Errorf("Failed to write header to outfile, %w", err) 309 | } 310 | _, err = outfile.Write(leavesBytes) 311 | if err != nil { 312 | return header, fmt.Errorf("Failed to write header to outfile, %w", err) 313 | } 314 | _, err = tmpfile.Seek(0, 0) 315 | if err != nil { 316 | return header, fmt.Errorf("Failed to seek to start of tempfile, %w", err) 317 | } 318 | _, err = io.Copy(outfile, tmpfile) 319 | if err != nil { 320 | return header, fmt.Errorf("Failed to copy data to outfile, %w", err) 321 | } 322 | 323 | return header, nil 324 | } 325 | 326 | func parseBounds(bounds string) (int32, int32, int32, int32, error) { 327 | parts := strings.Split(bounds, ",") 328 | E7 := 10000000.0 329 | minLon, err := strconv.ParseFloat(strings.TrimSpace(parts[0]), 64) 330 | if err != nil { 331 | return 0, 0, 0, 0, err 332 | } 333 | minLat, err := strconv.ParseFloat(strings.TrimSpace(parts[1]), 64) 334 | if err != nil { 335 | return 0, 0, 0, 0, err 336 | } 337 | maxLon, err := strconv.ParseFloat(strings.TrimSpace(parts[2]), 64) 338 | if err != nil { 339 | return 0, 0, 0, 0, err 340 | } 341 | maxLat, err := strconv.ParseFloat(strings.TrimSpace(parts[3]), 64) 342 | if err != nil { 343 | return 0, 0, 0, 0, err 344 | } 345 | return int32(minLon * E7), int32(minLat * E7), int32(maxLon * E7), int32(maxLat * E7), nil 346 | } 347 | 348 | func parseCenter(center string) (int32, int32, uint8, error) { 349 | parts := strings.Split(center, ",") 350 | E7 := 10000000.0 351 | centerLon, err := strconv.ParseFloat(strings.TrimSpace(parts[0]), 64) 352 | if err != nil { 353 | return 0, 0, 0, err 354 | } 355 | centerLat, err := strconv.ParseFloat(strings.TrimSpace(parts[1]), 64) 356 | if err != nil { 357 | return 0, 0, 0, err 358 | } 359 | centerZoom, err := strconv.ParseInt(strings.TrimSpace(parts[2]), 10, 8) 360 | if err != nil { 361 | return 0, 0, 0, err 362 | } 363 | return int32(centerLon * E7), int32(centerLat * E7), uint8(centerZoom), nil 364 | } 365 | 366 | func mbtilesMetadataHasFormat(mbtilesMetadata []string) bool { 367 | for i := 0; i < len(mbtilesMetadata); i += 2 { 368 | if mbtilesMetadata[i] == "format" { 369 | return true 370 | } 371 | } 372 | return false 373 | } 374 | 375 | func mbtilesToHeaderJSON(mbtilesMetadata []string) (HeaderV3, map[string]interface{}, error) { 376 | header := HeaderV3{} 377 | jsonResult := make(map[string]interface{}) 378 | boundsSet := false 379 | for i := 0; i < len(mbtilesMetadata); i += 2 { 380 | value := mbtilesMetadata[i+1] 381 | switch key := mbtilesMetadata[i]; key { 382 | case "format": 383 | switch value { 384 | case "pbf": 385 | header.TileType = Mvt 386 | case "png": 387 | header.TileType = Png 388 | header.TileCompression = NoCompression 389 | case "jpg": 390 | header.TileType = Jpeg 391 | header.TileCompression = NoCompression 392 | case "webp": 393 | header.TileType = Webp 394 | header.TileCompression = NoCompression 395 | case "avif": 396 | header.TileType = Avif 397 | header.TileCompression = NoCompression 398 | } 399 | jsonResult["format"] = value 400 | case "bounds": 401 | minLon, minLat, maxLon, maxLat, err := parseBounds(value) 402 | if err != nil { 403 | return header, jsonResult, err 404 | } 405 | 406 | if minLon >= maxLon || minLat >= maxLat { 407 | return header, jsonResult, fmt.Errorf("zero-area bounds in mbtiles metadata") 408 | } 409 | header.MinLonE7 = minLon 410 | header.MinLatE7 = minLat 411 | header.MaxLonE7 = maxLon 412 | header.MaxLatE7 = maxLat 413 | boundsSet = true 414 | case "center": 415 | centerLon, centerLat, centerZoom, err := parseCenter(value) 416 | if err != nil { 417 | return header, jsonResult, err 418 | } 419 | header.CenterLonE7 = centerLon 420 | header.CenterLatE7 = centerLat 421 | header.CenterZoom = centerZoom 422 | case "json": 423 | var mbtilesJSON map[string]interface{} 424 | json.Unmarshal([]byte(value), &mbtilesJSON) 425 | for k, v := range mbtilesJSON { 426 | jsonResult[k] = v 427 | } 428 | case "compression": 429 | switch value { 430 | case "gzip": 431 | if header.TileType == Mvt { 432 | header.TileCompression = Gzip 433 | } else { 434 | header.TileCompression = NoCompression 435 | } 436 | } 437 | jsonResult["compression"] = value 438 | // name, attribution, description, type, version 439 | case "scheme": 440 | // do nothing - the scheme "TMS" in TileJSON is meaningless in PMTiles 441 | // remove it from the stored metadata 442 | default: 443 | jsonResult[key] = value 444 | } 445 | } 446 | 447 | E7 := 10000000.0 448 | if !boundsSet { 449 | header.MinLonE7 = int32(-180 * E7) 450 | header.MinLatE7 = int32(-85 * E7) 451 | header.MaxLonE7 = int32(180 * E7) 452 | header.MaxLatE7 = int32(85 * E7) 453 | } 454 | 455 | return header, jsonResult, nil 456 | } 457 | -------------------------------------------------------------------------------- /pmtiles/convert_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestResolver(t *testing.T) { 9 | resolver := newResolver(true, true) 10 | resolver.AddTileIsNew(1, []byte{0x1, 0x2}, 1) 11 | assert.Equal(t, 1, len(resolver.Entries)) 12 | resolver.AddTileIsNew(2, []byte{0x1, 0x3}, 1) 13 | assert.Equal(t, uint64(52), resolver.Offset) 14 | isNew, _ := resolver.AddTileIsNew(3, []byte{0x1, 0x2}, 1) 15 | assert.False(t, isNew) 16 | assert.Equal(t, uint64(52), resolver.Offset) 17 | resolver.AddTileIsNew(4, []byte{0x1, 0x2}, 1) 18 | assert.Equal(t, 3, len(resolver.Entries)) 19 | resolver.AddTileIsNew(6, []byte{0x1, 0x2}, 1) 20 | assert.Equal(t, 4, len(resolver.Entries)) 21 | } 22 | 23 | func TestResolverRunLength(t *testing.T) { 24 | resolver := newResolver(true, true) 25 | resolver.AddTileIsNew(1, []byte{0x1, 0x2}, 2) 26 | assert.Equal(t, uint32(2), resolver.Entries[0].RunLength) 27 | resolver.AddTileIsNew(3, []byte{0x1, 0x2}, 2) 28 | assert.Equal(t, 1, len(resolver.Entries)) 29 | assert.Equal(t, uint32(4), resolver.Entries[0].RunLength) 30 | } 31 | 32 | func TestResolverRunLengthNoDeduplicate(t *testing.T) { 33 | resolver := newResolver(false, true) 34 | resolver.AddTileIsNew(1, []byte{0x1, 0x2}, 2) 35 | assert.Equal(t, uint32(2), resolver.Entries[0].RunLength) 36 | } 37 | 38 | func TestZoomCenterDefaults(t *testing.T) { 39 | // with no center set 40 | header := HeaderV3{} 41 | header.MinLonE7 = -45 * 10000000 42 | header.MaxLonE7 = -43 * 10000000 43 | header.MinLatE7 = 21 * 10000000 44 | header.MaxLatE7 = 23 * 10000000 45 | entries := make([]EntryV3, 0) 46 | entries = append(entries, EntryV3{ZxyToID(3, 0, 0), 0, 0, 0}) 47 | entries = append(entries, EntryV3{ZxyToID(4, 0, 0), 1, 1, 1}) 48 | setZoomCenterDefaults(&header, entries) 49 | assert.Equal(t, uint8(3), header.MinZoom) 50 | assert.Equal(t, uint8(4), header.MaxZoom) 51 | assert.Equal(t, uint8(3), header.CenterZoom) 52 | assert.Equal(t, int32(-44*10000000), header.CenterLonE7) 53 | assert.Equal(t, int32(22*10000000), header.CenterLatE7) 54 | 55 | // with a center set 56 | header = HeaderV3{} 57 | header.MinLonE7 = -45 * 10000000 58 | header.MaxLonE7 = -43 * 10000000 59 | header.MinLatE7 = 21 * 10000000 60 | header.MaxLatE7 = 23 * 10000000 61 | header.CenterLonE7 = header.MinLonE7 62 | header.CenterLatE7 = header.MinLatE7 63 | header.CenterZoom = 4 64 | setZoomCenterDefaults(&header, entries) 65 | assert.Equal(t, uint8(4), header.CenterZoom) 66 | assert.Equal(t, int32(-45*10000000), header.CenterLonE7) 67 | assert.Equal(t, int32(21*10000000), header.CenterLatE7) 68 | } 69 | 70 | func TestMbtiles(t *testing.T) { 71 | header, jsonMetadata, err := mbtilesToHeaderJSON([]string{ 72 | "name", "test_name", 73 | "format", "pbf", 74 | "bounds", "-180.0,-85,180,85", 75 | "center", "-122.1906,37.7599,11", 76 | "attribution", "
abc
", 77 | "description", "a description", 78 | "type", "overlay", 79 | "version", "1", 80 | "json", "{\"vector_layers\":[{\"abc\":123}],\"tilestats\":{\"def\":456}}", 81 | "compression", "gzip", 82 | "scheme", "tms", 83 | }) 84 | assert.Nil(t, err) 85 | assert.Equal(t, int32(-180*10000000), header.MinLonE7) 86 | assert.Equal(t, int32(-85*10000000), header.MinLatE7) 87 | assert.Equal(t, int32(180*10000000), header.MaxLonE7) 88 | assert.Equal(t, int32(85*10000000), header.MaxLatE7) 89 | assert.Equal(t, Mvt, int(header.TileType)) 90 | assert.Equal(t, int32(-122.1906*10000000), header.CenterLonE7) 91 | assert.Equal(t, int32(37.7599*10000000), header.CenterLatE7) 92 | assert.Equal(t, 11, int(header.CenterZoom)) 93 | assert.Equal(t, Gzip, int(header.TileCompression)) 94 | 95 | // assert removal of redundant fields 96 | _, ok := jsonMetadata["center"] 97 | assert.False(t, ok) 98 | _, ok = jsonMetadata["bounds"] 99 | assert.False(t, ok) 100 | 101 | // assert removal of problematic fields. 102 | _, ok = jsonMetadata["scheme"] 103 | assert.False(t, ok) 104 | 105 | // assert preservation of metadata fields for roundtrip 106 | _, ok = jsonMetadata["name"] 107 | assert.True(t, ok) 108 | 109 | _, ok = jsonMetadata["format"] 110 | assert.True(t, ok) 111 | 112 | _, ok = jsonMetadata["attribution"] 113 | assert.True(t, ok) 114 | 115 | _, ok = jsonMetadata["description"] 116 | assert.True(t, ok) 117 | 118 | _, ok = jsonMetadata["type"] 119 | assert.True(t, ok) 120 | 121 | _, ok = jsonMetadata["version"] 122 | assert.True(t, ok) 123 | 124 | _, ok = jsonMetadata["compression"] 125 | assert.True(t, ok) 126 | 127 | // assert well-known json fields at top level 128 | _, ok = jsonMetadata["vector_layers"] 129 | assert.True(t, ok) 130 | 131 | _, ok = jsonMetadata["tilestats"] 132 | assert.True(t, ok) 133 | } 134 | 135 | func TestMbtilesMissingFormat(t *testing.T) { 136 | assert.False(t, mbtilesMetadataHasFormat([]string{"version", "1.0"})) 137 | assert.True(t, mbtilesMetadataHasFormat([]string{"format", "png"})) 138 | } 139 | 140 | func TestMbtilesMissingBoundsCenter(t *testing.T) { 141 | header, _, err := mbtilesToHeaderJSON([]string{ 142 | "name", "test_name", 143 | "format", "pbf", 144 | "attribution", "
abc
", 145 | "description", "a description", 146 | "type", "overlay", 147 | "version", "1", 148 | "json", "{\"vector_layers\":[{\"abc\":123}],\"tilestats\":{\"def\":456}}", 149 | "compression", "gzip", 150 | }) 151 | assert.Nil(t, err) 152 | assert.Equal(t, int32(-180*10000000), header.MinLonE7) 153 | assert.Equal(t, int32(-85*10000000), header.MinLatE7) 154 | assert.Equal(t, int32(180*10000000), header.MaxLonE7) 155 | assert.Equal(t, int32(85*10000000), header.MaxLatE7) 156 | assert.Equal(t, int32(0), header.CenterLonE7) 157 | assert.Equal(t, int32(0), header.CenterLatE7) 158 | } 159 | 160 | func TestMbtilesDegenerateBounds(t *testing.T) { 161 | _, _, err := mbtilesToHeaderJSON([]string{ 162 | "name", "test_name", 163 | "format", "pbf", 164 | "bounds", "0,0,0,0", 165 | "attribution", "
abc
", 166 | "description", "a description", 167 | "type", "overlay", 168 | "version", "1", 169 | "json", "{\"vector_layers\":[{\"abc\":123}],\"tilestats\":{\"def\":456}}", 170 | "compression", "gzip", 171 | }) 172 | assert.NotNil(t, err) 173 | } 174 | 175 | func TestMbtilesCoordinatesHasSpace(t *testing.T) { 176 | header, _, err := mbtilesToHeaderJSON([]string{ 177 | "name", "test_name", 178 | "format", "pbf", 179 | "bounds", " -180.0, -85, 180, 85", 180 | "center", " -122.1906, 37.7599, 11", 181 | "attribution", "
abc
", 182 | "description", "a description", 183 | "type", "overlay", 184 | "version", "1", 185 | "compression", "gzip", 186 | }) 187 | assert.Nil(t, err) 188 | assert.Equal(t, int32(-180*10000000), header.MinLonE7) 189 | assert.Equal(t, int32(-85*10000000), header.MinLatE7) 190 | assert.Equal(t, int32(180*10000000), header.MaxLonE7) 191 | assert.Equal(t, int32(85*10000000), header.MaxLatE7) 192 | assert.Equal(t, int32(-122.1906*10000000), header.CenterLonE7) 193 | assert.Equal(t, int32(37.7599*10000000), header.CenterLatE7) 194 | } 195 | -------------------------------------------------------------------------------- /pmtiles/directory.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "compress/gzip" 7 | "encoding/binary" 8 | "encoding/json" 9 | "errors" 10 | "fmt" 11 | "io" 12 | ) 13 | 14 | type Zxy struct { 15 | Z uint8 16 | X uint32 17 | Y uint32 18 | } 19 | 20 | // Compression is the compression algorithm applied to individual tiles (or none) 21 | type Compression uint8 22 | 23 | const ( 24 | UnknownCompression Compression = 0 25 | NoCompression = 1 26 | Gzip = 2 27 | Brotli = 3 28 | Zstd = 4 29 | ) 30 | 31 | // TileType is the format of individual tile contents in the archive. 32 | type TileType uint8 33 | 34 | const ( 35 | UnknownTileType TileType = 0 36 | Mvt = 1 37 | Png = 2 38 | Jpeg = 3 39 | Webp = 4 40 | Avif = 5 41 | ) 42 | 43 | // HeaderV3LenBytes is the fixed-size binary header size. 44 | const HeaderV3LenBytes = 127 45 | 46 | // HeaderV3 is a binary header for PMTiles specification version 3. 47 | type HeaderV3 struct { 48 | SpecVersion uint8 49 | RootOffset uint64 50 | RootLength uint64 51 | MetadataOffset uint64 52 | MetadataLength uint64 53 | LeafDirectoryOffset uint64 54 | LeafDirectoryLength uint64 55 | TileDataOffset uint64 56 | TileDataLength uint64 57 | AddressedTilesCount uint64 58 | TileEntriesCount uint64 59 | TileContentsCount uint64 60 | Clustered bool 61 | InternalCompression Compression 62 | TileCompression Compression 63 | TileType TileType 64 | MinZoom uint8 65 | MaxZoom uint8 66 | MinLonE7 int32 67 | MinLatE7 int32 68 | MaxLonE7 int32 69 | MaxLatE7 int32 70 | CenterZoom uint8 71 | CenterLonE7 int32 72 | CenterLatE7 int32 73 | } 74 | 75 | // HeaderJson is a human-readable representation of parts of the binary header 76 | // that may need to be manually edited. 77 | // Omitted parts are the responsibility of the generator program and not editable. 78 | // The formatting is aligned with the TileJSON / MBTiles specification. 79 | type HeaderJson struct { 80 | TileCompression string `json:"tile_compression"` 81 | TileType string `json:"tile_type"` 82 | MinZoom int `json:"minzoom"` 83 | MaxZoom int `json:"maxzoom"` 84 | Bounds []float64 `json:"bounds"` 85 | Center []float64 `json:"center"` 86 | } 87 | 88 | func headerContentType(header HeaderV3) (string, bool) { 89 | switch header.TileType { 90 | case Mvt: 91 | return "application/x-protobuf", true 92 | case Png: 93 | return "image/png", true 94 | case Jpeg: 95 | return "image/jpeg", true 96 | case Webp: 97 | return "image/webp", true 98 | case Avif: 99 | return "image/avif", true 100 | default: 101 | return "", false 102 | } 103 | } 104 | 105 | func tileTypeToString(t TileType) string { 106 | switch t { 107 | case Mvt: 108 | return "mvt" 109 | case Png: 110 | return "png" 111 | case Jpeg: 112 | return "jpg" 113 | case Webp: 114 | return "webp" 115 | case Avif: 116 | return "avif" 117 | default: 118 | return "" 119 | } 120 | } 121 | 122 | func stringToTileType(t string) TileType { 123 | switch t { 124 | case "mvt": 125 | return Mvt 126 | case "png": 127 | return Png 128 | case "jpg": 129 | return Jpeg 130 | case "webp": 131 | return Webp 132 | case "avif": 133 | return Avif 134 | default: 135 | return UnknownTileType 136 | } 137 | } 138 | 139 | func headerExt(header HeaderV3) string { 140 | base := tileTypeToString(header.TileType) 141 | if base == "" { 142 | return "" 143 | } 144 | return "." + base 145 | } 146 | 147 | func compressionToString(compression Compression) (string, bool) { 148 | switch compression { 149 | case NoCompression: 150 | return "none", false 151 | case Gzip: 152 | return "gzip", true 153 | case Brotli: 154 | return "br", true 155 | case Zstd: 156 | return "zstd", true 157 | default: 158 | return "unknown", false 159 | } 160 | } 161 | 162 | func stringToCompression(s string) Compression { 163 | switch s { 164 | case "none": 165 | return NoCompression 166 | case "gzip": 167 | return Gzip 168 | case "br": 169 | return Brotli 170 | case "zstd": 171 | return Zstd 172 | default: 173 | return UnknownCompression 174 | } 175 | } 176 | 177 | func headerToJson(header HeaderV3) HeaderJson { 178 | compressionString, _ := compressionToString(header.TileCompression) 179 | return HeaderJson{ 180 | TileCompression: compressionString, 181 | TileType: tileTypeToString(header.TileType), 182 | MinZoom: int(header.MinZoom), 183 | MaxZoom: int(header.MaxZoom), 184 | Bounds: []float64{float64(header.MinLonE7) / 10000000, float64(header.MinLatE7) / 10000000, float64(header.MaxLonE7) / 10000000, float64(header.MaxLatE7) / 10000000}, 185 | Center: []float64{float64(header.CenterLonE7) / 10000000, float64(header.CenterLatE7) / 10000000, float64(header.CenterZoom)}, 186 | } 187 | } 188 | 189 | func headerToStringifiedJson(header HeaderV3) string { 190 | s, _ := json.MarshalIndent(headerToJson(header), "", " ") 191 | return string(s) 192 | } 193 | 194 | // EntryV3 is an entry in a PMTiles spec version 3 directory. 195 | type EntryV3 struct { 196 | TileID uint64 197 | Offset uint64 198 | Length uint32 199 | RunLength uint32 200 | } 201 | 202 | type nopWriteCloser struct { 203 | *bytes.Buffer 204 | } 205 | 206 | func (w *nopWriteCloser) Close() error { return nil } 207 | 208 | func SerializeMetadata(metadata map[string]interface{}, compression Compression) ([]byte, error) { 209 | jsonBytes, err := json.Marshal(metadata) 210 | if err != nil { 211 | return nil, err 212 | } 213 | 214 | if compression == NoCompression { 215 | return jsonBytes, nil 216 | } else if compression == Gzip { 217 | var b bytes.Buffer 218 | w, err := gzip.NewWriterLevel(&b, gzip.BestCompression) 219 | if err != nil { 220 | return nil, err 221 | } 222 | w.Write(jsonBytes) 223 | w.Close() 224 | return b.Bytes(), nil 225 | } else { 226 | return nil, errors.New("compression not supported") 227 | } 228 | } 229 | 230 | func DeserializeMetadataBytes(reader io.Reader, compression Compression) ([]byte, error) { 231 | var jsonBytes []byte 232 | var err error 233 | 234 | if compression == NoCompression { 235 | jsonBytes, err = io.ReadAll(reader) 236 | if err != nil { 237 | return nil, err 238 | } 239 | } else if compression == Gzip { 240 | gzipReader, err := gzip.NewReader(reader) 241 | if err != nil { 242 | return nil, err 243 | } 244 | jsonBytes, err = io.ReadAll(gzipReader) 245 | if err != nil { 246 | return nil, err 247 | } 248 | gzipReader.Close() 249 | } else { 250 | return nil, errors.New("compression not supported") 251 | } 252 | 253 | return jsonBytes, nil 254 | } 255 | 256 | func DeserializeMetadata(reader io.Reader, compression Compression) (map[string]interface{}, error) { 257 | jsonBytes, err := DeserializeMetadataBytes(reader, compression) 258 | var metadata map[string]interface{} 259 | err = json.Unmarshal(jsonBytes, &metadata) 260 | 261 | if err != nil { 262 | return nil, err 263 | } 264 | 265 | return metadata, nil 266 | } 267 | 268 | func SerializeEntries(entries []EntryV3, compression Compression) []byte { 269 | var b bytes.Buffer 270 | var w io.WriteCloser 271 | 272 | tmp := make([]byte, binary.MaxVarintLen64) 273 | if compression == NoCompression { 274 | w = &nopWriteCloser{&b} 275 | } else if compression == Gzip { 276 | w, _ = gzip.NewWriterLevel(&b, gzip.BestCompression) 277 | } else { 278 | panic("Compression not supported") 279 | } 280 | 281 | var n int 282 | n = binary.PutUvarint(tmp, uint64(len(entries))) 283 | w.Write(tmp[:n]) 284 | 285 | lastID := uint64(0) 286 | for _, entry := range entries { 287 | n = binary.PutUvarint(tmp, uint64(entry.TileID)-lastID) 288 | w.Write(tmp[:n]) 289 | 290 | lastID = uint64(entry.TileID) 291 | } 292 | 293 | for _, entry := range entries { 294 | n := binary.PutUvarint(tmp, uint64(entry.RunLength)) 295 | w.Write(tmp[:n]) 296 | } 297 | 298 | for _, entry := range entries { 299 | n := binary.PutUvarint(tmp, uint64(entry.Length)) 300 | w.Write(tmp[:n]) 301 | } 302 | 303 | for i, entry := range entries { 304 | var n int 305 | if i > 0 && entry.Offset == entries[i-1].Offset+uint64(entries[i-1].Length) { 306 | n = binary.PutUvarint(tmp, 0) 307 | } else { 308 | n = binary.PutUvarint(tmp, uint64(entry.Offset+1)) // add 1 to not conflict with 0 309 | } 310 | w.Write(tmp[:n]) 311 | } 312 | 313 | w.Close() 314 | 315 | return b.Bytes() 316 | } 317 | 318 | func DeserializeEntries(data *bytes.Buffer, compression Compression) []EntryV3 { 319 | entries := make([]EntryV3, 0) 320 | 321 | var reader io.Reader 322 | 323 | if compression == NoCompression { 324 | reader = data 325 | } else if compression == Gzip { 326 | reader, _ = gzip.NewReader(data) 327 | } else { 328 | panic("Compression not supported") 329 | } 330 | byteReader := bufio.NewReader(reader) 331 | 332 | numEntries, _ := binary.ReadUvarint(byteReader) 333 | 334 | lastID := uint64(0) 335 | for i := uint64(0); i < numEntries; i++ { 336 | tmp, _ := binary.ReadUvarint(byteReader) 337 | entries = append(entries, EntryV3{lastID + tmp, 0, 0, 0}) 338 | lastID = lastID + tmp 339 | } 340 | 341 | for i := uint64(0); i < numEntries; i++ { 342 | runLength, _ := binary.ReadUvarint(byteReader) 343 | entries[i].RunLength = uint32(runLength) 344 | } 345 | 346 | for i := uint64(0); i < numEntries; i++ { 347 | length, _ := binary.ReadUvarint(byteReader) 348 | entries[i].Length = uint32(length) 349 | } 350 | 351 | for i := uint64(0); i < numEntries; i++ { 352 | tmp, _ := binary.ReadUvarint(byteReader) 353 | if i > 0 && tmp == 0 { 354 | entries[i].Offset = entries[i-1].Offset + uint64(entries[i-1].Length) 355 | } else { 356 | entries[i].Offset = tmp - 1 357 | } 358 | } 359 | 360 | return entries 361 | } 362 | 363 | func FindTile(entries []EntryV3, tileID uint64) (EntryV3, bool) { 364 | m := 0 365 | n := len(entries) - 1 366 | for m <= n { 367 | k := (n + m) >> 1 368 | cmp := int64(tileID) - int64(entries[k].TileID) 369 | if cmp > 0 { 370 | m = k + 1 371 | } else if cmp < 0 { 372 | n = k - 1 373 | } else { 374 | return entries[k], true 375 | } 376 | } 377 | 378 | // at this point, m > n 379 | if n >= 0 { 380 | if entries[n].RunLength == 0 { 381 | return entries[n], true 382 | } 383 | if tileID-entries[n].TileID < uint64(entries[n].RunLength) { 384 | return entries[n], true 385 | } 386 | } 387 | return EntryV3{}, false 388 | } 389 | 390 | func SerializeHeader(header HeaderV3) []byte { 391 | b := make([]byte, HeaderV3LenBytes) 392 | copy(b[0:7], "PMTiles") 393 | 394 | b[7] = 3 395 | binary.LittleEndian.PutUint64(b[8:8+8], header.RootOffset) 396 | binary.LittleEndian.PutUint64(b[16:16+8], header.RootLength) 397 | binary.LittleEndian.PutUint64(b[24:24+8], header.MetadataOffset) 398 | binary.LittleEndian.PutUint64(b[32:32+8], header.MetadataLength) 399 | binary.LittleEndian.PutUint64(b[40:40+8], header.LeafDirectoryOffset) 400 | binary.LittleEndian.PutUint64(b[48:48+8], header.LeafDirectoryLength) 401 | binary.LittleEndian.PutUint64(b[56:56+8], header.TileDataOffset) 402 | binary.LittleEndian.PutUint64(b[64:64+8], header.TileDataLength) 403 | binary.LittleEndian.PutUint64(b[72:72+8], header.AddressedTilesCount) 404 | binary.LittleEndian.PutUint64(b[80:80+8], header.TileEntriesCount) 405 | binary.LittleEndian.PutUint64(b[88:88+8], header.TileContentsCount) 406 | if header.Clustered { 407 | b[96] = 0x1 408 | } 409 | b[97] = uint8(header.InternalCompression) 410 | b[98] = uint8(header.TileCompression) 411 | b[99] = uint8(header.TileType) 412 | b[100] = header.MinZoom 413 | b[101] = header.MaxZoom 414 | binary.LittleEndian.PutUint32(b[102:102+4], uint32(header.MinLonE7)) 415 | binary.LittleEndian.PutUint32(b[106:106+4], uint32(header.MinLatE7)) 416 | binary.LittleEndian.PutUint32(b[110:110+4], uint32(header.MaxLonE7)) 417 | binary.LittleEndian.PutUint32(b[114:114+4], uint32(header.MaxLatE7)) 418 | b[118] = header.CenterZoom 419 | binary.LittleEndian.PutUint32(b[119:119+4], uint32(header.CenterLonE7)) 420 | binary.LittleEndian.PutUint32(b[123:123+4], uint32(header.CenterLatE7)) 421 | return b 422 | } 423 | 424 | func DeserializeHeader(d []byte) (HeaderV3, error) { 425 | h := HeaderV3{} 426 | magicNumber := d[0:7] 427 | if string(magicNumber) != "PMTiles" { 428 | return h, fmt.Errorf("magic number not detected. confirm this is a PMTiles archive") 429 | } 430 | 431 | specVersion := d[7] 432 | if specVersion > uint8(3) { 433 | return h, fmt.Errorf("archive is spec version %d, but this program only supports version 3: upgrade your pmtiles program", specVersion) 434 | } 435 | 436 | h.SpecVersion = specVersion 437 | h.RootOffset = binary.LittleEndian.Uint64(d[8 : 8+8]) 438 | h.RootLength = binary.LittleEndian.Uint64(d[16 : 16+8]) 439 | h.MetadataOffset = binary.LittleEndian.Uint64(d[24 : 24+8]) 440 | h.MetadataLength = binary.LittleEndian.Uint64(d[32 : 32+8]) 441 | h.LeafDirectoryOffset = binary.LittleEndian.Uint64(d[40 : 40+8]) 442 | h.LeafDirectoryLength = binary.LittleEndian.Uint64(d[48 : 48+8]) 443 | h.TileDataOffset = binary.LittleEndian.Uint64(d[56 : 56+8]) 444 | h.TileDataLength = binary.LittleEndian.Uint64(d[64 : 64+8]) 445 | h.AddressedTilesCount = binary.LittleEndian.Uint64(d[72 : 72+8]) 446 | h.TileEntriesCount = binary.LittleEndian.Uint64(d[80 : 80+8]) 447 | h.TileContentsCount = binary.LittleEndian.Uint64(d[88 : 88+8]) 448 | h.Clustered = (d[96] == 0x1) 449 | h.InternalCompression = Compression(d[97]) 450 | h.TileCompression = Compression(d[98]) 451 | h.TileType = TileType(d[99]) 452 | h.MinZoom = d[100] 453 | h.MaxZoom = d[101] 454 | h.MinLonE7 = int32(binary.LittleEndian.Uint32(d[102 : 102+4])) 455 | h.MinLatE7 = int32(binary.LittleEndian.Uint32(d[106 : 106+4])) 456 | h.MaxLonE7 = int32(binary.LittleEndian.Uint32(d[110 : 110+4])) 457 | h.MaxLatE7 = int32(binary.LittleEndian.Uint32(d[114 : 114+4])) 458 | h.CenterZoom = d[118] 459 | h.CenterLonE7 = int32(binary.LittleEndian.Uint32(d[119 : 119+4])) 460 | h.CenterLatE7 = int32(binary.LittleEndian.Uint32(d[123 : 123+4])) 461 | 462 | return h, nil 463 | } 464 | 465 | func buildRootsLeaves(entries []EntryV3, leafSize int, compression Compression) ([]byte, []byte, int) { 466 | rootEntries := make([]EntryV3, 0) 467 | leavesBytes := make([]byte, 0) 468 | numLeaves := 0 469 | 470 | for idx := 0; idx < len(entries); idx += leafSize { 471 | numLeaves++ 472 | end := idx + leafSize 473 | if idx+leafSize > len(entries) { 474 | end = len(entries) 475 | } 476 | serialized := SerializeEntries(entries[idx:end], compression) 477 | 478 | rootEntries = append(rootEntries, EntryV3{entries[idx].TileID, uint64(len(leavesBytes)), uint32(len(serialized)), 0}) 479 | leavesBytes = append(leavesBytes, serialized...) 480 | } 481 | 482 | rootBytes := SerializeEntries(rootEntries, compression) 483 | return rootBytes, leavesBytes, numLeaves 484 | } 485 | 486 | func optimizeDirectories(entries []EntryV3, targetRootLen int, compression Compression) ([]byte, []byte, int) { 487 | if len(entries) < 16384 { 488 | testRootBytes := SerializeEntries(entries, compression) 489 | // Case1: the entire directory fits into the target len 490 | if len(testRootBytes) <= targetRootLen { 491 | return testRootBytes, make([]byte, 0), 0 492 | } 493 | } 494 | 495 | // TODO: case 2: mixed tile entries/directory entries in root 496 | 497 | // case 3: root directory is leaf pointers only 498 | // use an iterative method, increasing the size of the leaf directory until the root fits 499 | 500 | var leafSize float32 501 | leafSize = float32(len(entries)) / 3500 502 | 503 | if leafSize < 4096 { 504 | leafSize = 4096 505 | } 506 | 507 | for { 508 | rootBytes, leavesBytes, numLeaves := buildRootsLeaves(entries, int(leafSize), compression) 509 | if len(rootBytes) <= targetRootLen { 510 | return rootBytes, leavesBytes, numLeaves 511 | } 512 | leafSize *= 1.2 513 | } 514 | } 515 | 516 | func IterateEntries(header HeaderV3, fetch func(uint64, uint64) ([]byte, error), operation func(EntryV3)) error { 517 | var CollectEntries func(uint64, uint64) error 518 | 519 | CollectEntries = func(dir_offset uint64, dir_length uint64) error { 520 | data, err := fetch(dir_offset, dir_length) 521 | if err != nil { 522 | return err 523 | } 524 | 525 | directory := DeserializeEntries(bytes.NewBuffer(data), header.InternalCompression) 526 | for _, entry := range directory { 527 | if entry.RunLength > 0 { 528 | operation(entry) 529 | } else { 530 | CollectEntries(header.LeafDirectoryOffset+entry.Offset, uint64(entry.Length)) 531 | } 532 | } 533 | return nil 534 | } 535 | 536 | return CollectEntries(header.RootOffset, header.RootLength) 537 | } 538 | -------------------------------------------------------------------------------- /pmtiles/directory_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bytes" 5 | "github.com/stretchr/testify/assert" 6 | "math/rand" 7 | "testing" 8 | ) 9 | 10 | func TestDirectoryRoundtrip(t *testing.T) { 11 | entries := make([]EntryV3, 0) 12 | entries = append(entries, EntryV3{0, 0, 0, 0}) 13 | entries = append(entries, EntryV3{1, 1, 1, 1}) 14 | entries = append(entries, EntryV3{2, 2, 2, 2}) 15 | 16 | serialized := SerializeEntries(entries, Gzip) 17 | result := DeserializeEntries(bytes.NewBuffer(serialized), Gzip) 18 | assert.Equal(t, 3, len(result)) 19 | assert.Equal(t, uint64(0), result[0].TileID) 20 | assert.Equal(t, uint64(0), result[0].Offset) 21 | assert.Equal(t, uint32(0), result[0].Length) 22 | assert.Equal(t, uint32(0), result[0].RunLength) 23 | assert.Equal(t, uint64(1), result[1].TileID) 24 | assert.Equal(t, uint64(1), result[1].Offset) 25 | assert.Equal(t, uint32(1), result[1].Length) 26 | assert.Equal(t, uint32(1), result[1].RunLength) 27 | assert.Equal(t, uint64(2), result[2].TileID) 28 | assert.Equal(t, uint64(2), result[2].Offset) 29 | assert.Equal(t, uint32(2), result[2].Length) 30 | assert.Equal(t, uint32(2), result[2].RunLength) 31 | } 32 | 33 | func TestDirectoryRoundtripNoCompress(t *testing.T) { 34 | entries := make([]EntryV3, 0) 35 | entries = append(entries, EntryV3{0, 0, 0, 0}) 36 | entries = append(entries, EntryV3{1, 1, 1, 1}) 37 | entries = append(entries, EntryV3{2, 2, 2, 2}) 38 | 39 | serialized := SerializeEntries(entries, NoCompression) 40 | result := DeserializeEntries(bytes.NewBuffer(serialized), NoCompression) 41 | assert.Equal(t, 3, len(result)) 42 | assert.Equal(t, uint64(0), result[0].TileID) 43 | assert.Equal(t, uint64(0), result[0].Offset) 44 | assert.Equal(t, uint32(0), result[0].Length) 45 | assert.Equal(t, uint32(0), result[0].RunLength) 46 | assert.Equal(t, uint64(1), result[1].TileID) 47 | assert.Equal(t, uint64(1), result[1].Offset) 48 | assert.Equal(t, uint32(1), result[1].Length) 49 | assert.Equal(t, uint32(1), result[1].RunLength) 50 | assert.Equal(t, uint64(2), result[2].TileID) 51 | assert.Equal(t, uint64(2), result[2].Offset) 52 | assert.Equal(t, uint32(2), result[2].Length) 53 | assert.Equal(t, uint32(2), result[2].RunLength) 54 | } 55 | 56 | func TestHeaderRoundtrip(t *testing.T) { 57 | header := HeaderV3{} 58 | header.RootOffset = 1 59 | header.RootLength = 2 60 | header.MetadataOffset = 3 61 | header.MetadataLength = 4 62 | header.LeafDirectoryOffset = 5 63 | header.LeafDirectoryLength = 6 64 | header.TileDataOffset = 7 65 | header.TileDataLength = 8 66 | header.AddressedTilesCount = 9 67 | header.TileEntriesCount = 10 68 | header.TileContentsCount = 11 69 | header.Clustered = true 70 | header.InternalCompression = Gzip 71 | header.TileCompression = Brotli 72 | header.TileType = Mvt 73 | header.MinZoom = 1 74 | header.MaxZoom = 2 75 | header.MinLonE7 = 1.1 * 10000000 76 | header.MinLatE7 = 2.1 * 10000000 77 | header.MaxLonE7 = 1.2 * 10000000 78 | header.MaxLatE7 = 2.2 * 10000000 79 | header.CenterZoom = 3 80 | header.CenterLonE7 = 3.1 * 10000000 81 | header.CenterLatE7 = 3.2 * 10000000 82 | b := SerializeHeader(header) 83 | result, _ := DeserializeHeader(b) 84 | assert.Equal(t, uint64(1), result.RootOffset) 85 | assert.Equal(t, uint64(2), result.RootLength) 86 | assert.Equal(t, uint64(3), result.MetadataOffset) 87 | assert.Equal(t, uint64(4), result.MetadataLength) 88 | assert.Equal(t, uint64(5), result.LeafDirectoryOffset) 89 | assert.Equal(t, uint64(6), result.LeafDirectoryLength) 90 | assert.Equal(t, uint64(7), result.TileDataOffset) 91 | assert.Equal(t, uint64(8), result.TileDataLength) 92 | assert.Equal(t, uint64(9), result.AddressedTilesCount) 93 | assert.Equal(t, uint64(10), result.TileEntriesCount) 94 | assert.Equal(t, uint64(11), result.TileContentsCount) 95 | assert.Equal(t, true, result.Clustered) 96 | assert.Equal(t, Gzip, int(result.InternalCompression)) 97 | assert.Equal(t, Brotli, int(result.TileCompression)) 98 | assert.Equal(t, Mvt, int(result.TileType)) 99 | assert.Equal(t, uint8(1), result.MinZoom) 100 | assert.Equal(t, uint8(2), result.MaxZoom) 101 | assert.Equal(t, int32(11000000), result.MinLonE7) 102 | assert.Equal(t, int32(21000000), result.MinLatE7) 103 | assert.Equal(t, int32(12000000), result.MaxLonE7) 104 | assert.Equal(t, int32(22000000), result.MaxLatE7) 105 | assert.Equal(t, uint8(3), result.CenterZoom) 106 | assert.Equal(t, int32(31000000), result.CenterLonE7) 107 | assert.Equal(t, int32(32000000), result.CenterLatE7) 108 | } 109 | 110 | func TestHeaderJsonRoundtrip(t *testing.T) { 111 | header := HeaderV3{} 112 | header.TileCompression = Brotli 113 | header.TileType = Mvt 114 | header.MinZoom = 1 115 | header.MaxZoom = 3 116 | header.MinLonE7 = 1.1 * 10000000 117 | header.MinLatE7 = 2.1 * 10000000 118 | header.MaxLonE7 = 1.2 * 10000000 119 | header.MaxLatE7 = 2.2 * 10000000 120 | header.CenterZoom = 2 121 | header.CenterLonE7 = 3.1 * 10000000 122 | header.CenterLatE7 = 3.2 * 10000000 123 | j := headerToJson(header) 124 | assert.Equal(t, "br", j.TileCompression) 125 | assert.Equal(t, "mvt", j.TileType) 126 | assert.Equal(t, 1, j.MinZoom) 127 | assert.Equal(t, 3, j.MaxZoom) 128 | assert.Equal(t, []float64{1.1, 2.1, 1.2, 2.2}, j.Bounds) 129 | assert.Equal(t, []float64{3.1, 3.2, 2}, j.Center) 130 | } 131 | 132 | func TestOptimizeDirectories(t *testing.T) { 133 | rand.Seed(3857) 134 | entries := make([]EntryV3, 0) 135 | entries = append(entries, EntryV3{0, 0, 100, 1}) 136 | _, leavesBytes, numLeaves := optimizeDirectories(entries, 100, Gzip) 137 | assert.False(t, len(leavesBytes) > 0) 138 | assert.Equal(t, 0, numLeaves) 139 | 140 | entries = make([]EntryV3, 0) 141 | var i uint64 142 | var offset uint64 143 | for ; i < 1000; i++ { 144 | randtilesize := rand.Intn(1000000) 145 | entries = append(entries, EntryV3{i, offset, uint32(randtilesize), 1}) 146 | offset += uint64(randtilesize) 147 | } 148 | 149 | rootBytes, leavesBytes, numLeaves := optimizeDirectories(entries, 1024, Gzip) 150 | 151 | assert.False(t, len(rootBytes) > 1024) 152 | 153 | assert.False(t, numLeaves == 0) 154 | assert.False(t, len(leavesBytes) == 0) 155 | } 156 | 157 | func TestFindTileMissing(t *testing.T) { 158 | entries := make([]EntryV3, 0) 159 | _, ok := FindTile(entries, 0) 160 | assert.False(t, ok) 161 | } 162 | 163 | func TestFindTileFirstEntry(t *testing.T) { 164 | entries := []EntryV3{{TileID: 100, Offset: 1, Length: 1, RunLength: 1}} 165 | entry, ok := FindTile(entries, 100) 166 | assert.Equal(t, true, ok) 167 | assert.Equal(t, uint64(1), entry.Offset) 168 | assert.Equal(t, uint32(1), entry.Length) 169 | _, ok = FindTile(entries, 101) 170 | assert.Equal(t, false, ok) 171 | } 172 | 173 | func TestFindTileMultipleEntries(t *testing.T) { 174 | entries := []EntryV3{ 175 | {TileID: 100, Offset: 1, Length: 1, RunLength: 2}, 176 | } 177 | entry, ok := FindTile(entries, 101) 178 | assert.Equal(t, true, ok) 179 | assert.Equal(t, uint64(1), entry.Offset) 180 | assert.Equal(t, uint32(1), entry.Length) 181 | 182 | entries = []EntryV3{ 183 | {TileID: 100, Offset: 1, Length: 1, RunLength: 1}, 184 | {TileID: 150, Offset: 2, Length: 2, RunLength: 2}, 185 | } 186 | entry, ok = FindTile(entries, 151) 187 | assert.Equal(t, true, ok) 188 | assert.Equal(t, uint64(2), entry.Offset) 189 | assert.Equal(t, uint32(2), entry.Length) 190 | 191 | entries = []EntryV3{ 192 | {TileID: 50, Offset: 1, Length: 1, RunLength: 2}, 193 | {TileID: 100, Offset: 2, Length: 2, RunLength: 1}, 194 | {TileID: 150, Offset: 3, Length: 3, RunLength: 1}, 195 | } 196 | entry, ok = FindTile(entries, 51) 197 | assert.Equal(t, true, ok) 198 | assert.Equal(t, uint64(1), entry.Offset) 199 | assert.Equal(t, uint32(1), entry.Length) 200 | } 201 | 202 | func TestFindTileLeafSearch(t *testing.T) { 203 | entries := []EntryV3{ 204 | {TileID: 100, Offset: 1, Length: 1, RunLength: 0}, 205 | } 206 | entry, ok := FindTile(entries, 150) 207 | assert.Equal(t, true, ok) 208 | assert.Equal(t, uint64(1), entry.Offset) 209 | assert.Equal(t, uint32(1), entry.Length) 210 | } 211 | 212 | func TestBuildRootsLeaves(t *testing.T) { 213 | entries := []EntryV3{ 214 | {TileID: 100, Offset: 1, Length: 1, RunLength: 0}, 215 | } 216 | _, _, numLeaves := buildRootsLeaves(entries, 1, Gzip) 217 | assert.Equal(t, 1, numLeaves) 218 | } 219 | 220 | func TestStringifiedExtension(t *testing.T) { 221 | assert.Equal(t, "", headerExt(HeaderV3{})) 222 | assert.Equal(t, ".mvt", headerExt(HeaderV3{TileType: Mvt})) 223 | assert.Equal(t, ".png", headerExt(HeaderV3{TileType: Png})) 224 | assert.Equal(t, ".jpg", headerExt(HeaderV3{TileType: Jpeg})) 225 | assert.Equal(t, ".webp", headerExt(HeaderV3{TileType: Webp})) 226 | assert.Equal(t, ".avif", headerExt(HeaderV3{TileType: Avif})) 227 | } 228 | 229 | func TestStringToTileType(t *testing.T) { 230 | assert.Equal(t, "mvt", tileTypeToString(stringToTileType("mvt"))) 231 | assert.Equal(t, "png", tileTypeToString(stringToTileType("png"))) 232 | assert.Equal(t, "jpg", tileTypeToString(stringToTileType("jpg"))) 233 | assert.Equal(t, "webp", tileTypeToString(stringToTileType("webp"))) 234 | assert.Equal(t, "avif", tileTypeToString(stringToTileType("avif"))) 235 | assert.Equal(t, "", tileTypeToString(stringToTileType(""))) 236 | } 237 | 238 | func TestStringToCompression(t *testing.T) { 239 | s, has := compressionToString(stringToCompression("gzip")) 240 | assert.True(t, has) 241 | assert.Equal(t, "gzip", s) 242 | s, has = compressionToString(stringToCompression("br")) 243 | assert.True(t, has) 244 | assert.Equal(t, "br", s) 245 | s, has = compressionToString(stringToCompression("zstd")) 246 | assert.True(t, has) 247 | assert.Equal(t, "zstd", s) 248 | s, has = compressionToString(stringToCompression("none")) 249 | assert.False(t, has) 250 | assert.Equal(t, "none", s) 251 | s, has = compressionToString(stringToCompression("unknown")) 252 | assert.False(t, has) 253 | assert.Equal(t, "unknown", s) 254 | } 255 | 256 | func TestMetadataRoundtrip(t *testing.T) { 257 | data := map[string]interface{}{ 258 | "foo": "bar", 259 | } 260 | b, err := SerializeMetadata(data, NoCompression) 261 | assert.Nil(t, err) 262 | newData, err := DeserializeMetadata(bytes.NewReader(b), NoCompression) 263 | assert.Nil(t, err) 264 | assert.Equal(t, "bar", newData["foo"]) 265 | 266 | b, err = SerializeMetadata(data, Gzip) 267 | assert.Nil(t, err) 268 | newData, err = DeserializeMetadata(bytes.NewReader(b), Gzip) 269 | assert.Nil(t, err) 270 | assert.Equal(t, "bar", newData["foo"]) 271 | } 272 | -------------------------------------------------------------------------------- /pmtiles/edit.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "github.com/schollz/progressbar/v3" 8 | "io" 9 | "io/ioutil" 10 | "log" 11 | "os" 12 | ) 13 | 14 | // Edit parts of the header or metadata. 15 | // works in-place if only the header is modified. 16 | func Edit(_ *log.Logger, inputArchive string, newHeaderJSONFile string, newMetadataFile string) error { 17 | if newHeaderJSONFile == "" && newMetadataFile == "" { 18 | return fmt.Errorf("must supply --header-json and/or --metadata to edit") 19 | } 20 | 21 | file, err := os.OpenFile(inputArchive, os.O_RDWR, 0666) 22 | defer file.Close() 23 | if err != nil { 24 | return err 25 | } 26 | 27 | buf := make([]byte, 127) 28 | _, err = file.Read(buf) 29 | if err != nil { 30 | return err 31 | } 32 | oldHeader, err := DeserializeHeader(buf) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | newHeader := oldHeader 38 | 39 | if newHeaderJSONFile != "" { 40 | newHeaderData := HeaderJson{} 41 | data, err := ioutil.ReadFile(newHeaderJSONFile) 42 | if err != nil { 43 | return err 44 | } 45 | err = json.Unmarshal(data, &newHeaderData) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | if len(newHeaderData.Bounds) != 4 { 51 | return fmt.Errorf("header len(bounds) must == 4") 52 | } 53 | if len(newHeaderData.Center) != 3 { 54 | return fmt.Errorf("header len(center) must == 3") 55 | } 56 | 57 | newHeader.TileType = stringToTileType(newHeaderData.TileType) 58 | newHeader.TileCompression = stringToCompression(newHeaderData.TileCompression) 59 | newHeader.MinZoom = uint8(newHeaderData.MinZoom) 60 | newHeader.MaxZoom = uint8(newHeaderData.MaxZoom) 61 | newHeader.MinLonE7 = int32(newHeaderData.Bounds[0] * 10000000) 62 | newHeader.MinLatE7 = int32(newHeaderData.Bounds[1] * 10000000) 63 | newHeader.MaxLonE7 = int32(newHeaderData.Bounds[2] * 10000000) 64 | newHeader.MaxLatE7 = int32(newHeaderData.Bounds[3] * 10000000) 65 | newHeader.CenterLonE7 = int32(newHeaderData.Center[0] * 10000000) 66 | newHeader.CenterLatE7 = int32(newHeaderData.Center[1] * 10000000) 67 | newHeader.CenterZoom = uint8(newHeaderData.Center[2]) 68 | } 69 | 70 | if newMetadataFile == "" { 71 | buf = SerializeHeader(newHeader) 72 | _, err = file.WriteAt(buf, 0) 73 | if err != nil { 74 | return err 75 | } 76 | file.Close() 77 | return nil 78 | } 79 | 80 | metadataReader, err := os.Open(newMetadataFile) 81 | if err != nil { 82 | return err 83 | } 84 | defer metadataReader.Close() 85 | 86 | parsedMetadata, err := DeserializeMetadata(metadataReader, NoCompression) 87 | if err != nil { 88 | return err 89 | } 90 | 91 | metadataBytes, err := SerializeMetadata(parsedMetadata, oldHeader.InternalCompression) 92 | if err != nil { 93 | return err 94 | } 95 | 96 | tempFilePath := inputArchive + ".tmp" 97 | 98 | if _, err = os.Stat(tempFilePath); err == nil { 99 | return fmt.Errorf("A file with the same name already exists") 100 | } 101 | 102 | outfile, err := os.Create(tempFilePath) 103 | if err != nil { 104 | return err 105 | } 106 | defer outfile.Close() 107 | 108 | newHeader.MetadataOffset = newHeader.RootOffset + newHeader.RootLength 109 | newHeader.MetadataLength = uint64(len(metadataBytes)) 110 | newHeader.LeafDirectoryOffset = newHeader.MetadataOffset + newHeader.MetadataLength 111 | newHeader.TileDataOffset = newHeader.LeafDirectoryOffset + newHeader.LeafDirectoryLength 112 | 113 | bar := progressbar.DefaultBytes( 114 | int64(HeaderV3LenBytes+newHeader.RootLength+uint64(len(metadataBytes))+newHeader.LeafDirectoryLength+newHeader.TileDataLength), 115 | "writing file", 116 | ) 117 | 118 | buf = SerializeHeader(newHeader) 119 | io.Copy(io.MultiWriter(outfile, bar), bytes.NewReader(buf)) 120 | 121 | rootSection := io.NewSectionReader(file, int64(oldHeader.RootOffset), int64(oldHeader.RootLength)) 122 | if _, err := io.Copy(io.MultiWriter(outfile, bar), rootSection); err != nil { 123 | return err 124 | } 125 | 126 | if _, err := io.Copy(io.MultiWriter(outfile, bar), bytes.NewReader(metadataBytes)); err != nil { 127 | return err 128 | } 129 | 130 | leafSection := io.NewSectionReader(file, int64(oldHeader.LeafDirectoryOffset), int64(oldHeader.LeafDirectoryLength)) 131 | if _, err := io.Copy(io.MultiWriter(outfile, bar), leafSection); err != nil { 132 | return err 133 | } 134 | 135 | tileSection := io.NewSectionReader(file, int64(oldHeader.TileDataOffset), int64(oldHeader.TileDataLength)) 136 | if _, err := io.Copy(io.MultiWriter(outfile, bar), tileSection); err != nil { 137 | return err 138 | } 139 | 140 | // explicitly close in order to rename 141 | file.Close() 142 | outfile.Close() 143 | if err := os.Rename(tempFilePath, inputArchive); err != nil { 144 | return err 145 | } 146 | return nil 147 | } 148 | -------------------------------------------------------------------------------- /pmtiles/edit_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "github.com/stretchr/testify/assert" 8 | "io" 9 | "log" 10 | "os" 11 | "path/filepath" 12 | "testing" 13 | ) 14 | 15 | var logger = log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile) 16 | 17 | func makeFixtureCopy(t *testing.T, srcName string, dstName string) string { 18 | src, _ := os.OpenFile("fixtures/"+srcName+".pmtiles", os.O_RDONLY, 0666) 19 | defer src.Close() 20 | fname := filepath.Join(t.TempDir(), dstName+".pmtiles") 21 | dest, _ := os.Create(fname) 22 | _, _ = io.Copy(dest, src) 23 | dest.Close() 24 | return fname 25 | } 26 | 27 | func TestEditHeader(t *testing.T) { 28 | fileToEdit := makeFixtureCopy(t, "test_fixture_1", "edit_header") 29 | 30 | headerPath := filepath.Join(t.TempDir(), "header.json") 31 | headerFile, _ := os.Create(headerPath) 32 | fmt.Fprint(headerFile, `{"tile_type":"png","tile_compression":"br","bounds":[-1,1,-1,1],"center":[0,0,0]}`) 33 | headerFile.Close() 34 | 35 | err := Edit(logger, fileToEdit, headerPath, "") 36 | assert.Nil(t, err) 37 | 38 | var b bytes.Buffer 39 | err = Show(logger, &b, "", fileToEdit, true, false, false, "", false, 0, 0, 0) 40 | assert.Nil(t, err) 41 | 42 | var input map[string]interface{} 43 | json.Unmarshal(b.Bytes(), &input) 44 | assert.Equal(t, "png", input["tile_type"]) 45 | assert.Equal(t, "br", input["tile_compression"]) 46 | } 47 | 48 | func TestEditMetadata(t *testing.T) { 49 | fileToEdit := makeFixtureCopy(t, "test_fixture_1", "edit_metadata") 50 | 51 | metadataPath := filepath.Join(t.TempDir(), "metadata.json") 52 | metadataFile, _ := os.Create(metadataPath) 53 | fmt.Fprint(metadataFile, `{"foo":"bar"}`) 54 | metadataFile.Close() 55 | 56 | err := Edit(logger, fileToEdit, "", metadataPath) 57 | assert.Nil(t, err) 58 | 59 | var b bytes.Buffer 60 | err = Show(logger, &b, "", fileToEdit, false, true, false, "", false, 0, 0, 0) 61 | assert.Nil(t, err) 62 | 63 | var input map[string]interface{} 64 | json.Unmarshal(b.Bytes(), &input) 65 | assert.Equal(t, "bar", input["foo"]) 66 | } 67 | 68 | func TestMalformedHeader(t *testing.T) { 69 | fileToEdit := makeFixtureCopy(t, "test_fixture_1", "edit_header_malformed") 70 | 71 | headerPath := filepath.Join(t.TempDir(), "header_malformed.json") 72 | headerFile, _ := os.Create(headerPath) 73 | fmt.Fprint(headerFile, `{`) 74 | headerFile.Close() 75 | 76 | err := Edit(logger, fileToEdit, headerPath, "") 77 | assert.Error(t, err) 78 | } 79 | 80 | func TestMalformedHeaderBounds(t *testing.T) { 81 | fileToEdit := makeFixtureCopy(t, "test_fixture_1", "edit_header_bad_bounds") 82 | 83 | headerPath := filepath.Join(t.TempDir(), "header_bad_bounds.json") 84 | headerFile, _ := os.Create(headerPath) 85 | fmt.Fprint(headerFile, `{"tile_type":"png","tile_compression":"br","bounds":[],"center":[0,0,0]}`) 86 | headerFile.Close() 87 | 88 | err := Edit(logger, fileToEdit, headerPath, "") 89 | assert.Error(t, err) 90 | } 91 | 92 | func TestHeaderUnknownEnum(t *testing.T) { 93 | fileToEdit := makeFixtureCopy(t, "test_fixture_1", "edit_header_unknown") 94 | 95 | headerPath := filepath.Join(t.TempDir(), "header_unknown.json") 96 | headerFile, _ := os.Create(headerPath) 97 | fmt.Fprint(headerFile, `{"tile_type":"foo","tile_compression":"foo","bounds":[-1,1,-1,1],"center":[0,0,0]}`) 98 | headerFile.Close() 99 | 100 | err := Edit(logger, fileToEdit, headerPath, "") 101 | assert.Nil(t, err) 102 | 103 | var b bytes.Buffer 104 | err = Show(logger, &b, "", fileToEdit, true, false, false, "", false, 0, 0, 0) 105 | assert.Nil(t, err) 106 | 107 | var input map[string]interface{} 108 | json.Unmarshal(b.Bytes(), &input) 109 | assert.Equal(t, "", input["tile_type"]) 110 | assert.Equal(t, "unknown", input["tile_compression"]) 111 | } 112 | 113 | func TestMalformedHeaderCenter(t *testing.T) { 114 | fileToEdit := makeFixtureCopy(t, "test_fixture_1", "edit_header_bad_center") 115 | 116 | headerPath := filepath.Join(t.TempDir(), "header_bad_center.json") 117 | headerFile, _ := os.Create(headerPath) 118 | fmt.Fprint(headerFile, `{"tile_type":"png","tile_compression":"br","bounds":[-1,1,-1,1],"center":[0,0]}`) 119 | headerFile.Close() 120 | 121 | err := Edit(logger, fileToEdit, headerPath, "") 122 | assert.Error(t, err) 123 | } 124 | 125 | func TestMalformedMetadata(t *testing.T) { 126 | fileToEdit := makeFixtureCopy(t, "test_fixture_1", "edit_metadata_malformed") 127 | 128 | metadataPath := filepath.Join(t.TempDir(), "metadata_malformed.json") 129 | metadataFile, _ := os.Create(metadataPath) 130 | fmt.Fprint(metadataFile, `{`) 131 | metadataFile.Close() 132 | 133 | err := Edit(logger, fileToEdit, "", metadataPath) 134 | assert.Error(t, err) 135 | } 136 | 137 | func TestTempfileExists(t *testing.T) { 138 | fileToEdit := makeFixtureCopy(t, "test_fixture_1", "edit_existing_tempfile") 139 | 140 | tmp, _ := os.Create(fileToEdit + ".tmp") 141 | defer tmp.Close() 142 | 143 | metadataPath := filepath.Join(t.TempDir(), "metadata.json") 144 | metadataFile, _ := os.Create(metadataPath) 145 | fmt.Fprint(metadataFile, `{"foo":"bar"}`) 146 | metadataFile.Close() 147 | 148 | err := Edit(logger, fileToEdit, "", metadataPath) 149 | assert.Error(t, err) 150 | } 151 | -------------------------------------------------------------------------------- /pmtiles/extract.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bytes" 5 | "container/list" 6 | "context" 7 | "fmt" 8 | "github.com/RoaringBitmap/roaring/roaring64" 9 | "github.com/dustin/go-humanize" 10 | "github.com/paulmach/orb" 11 | "github.com/schollz/progressbar/v3" 12 | "golang.org/x/sync/errgroup" 13 | "io" 14 | "io/ioutil" 15 | "log" 16 | "math" 17 | "os" 18 | "sort" 19 | "sync" 20 | "time" 21 | ) 22 | 23 | type srcDstRange struct { 24 | SrcOffset uint64 25 | DstOffset uint64 26 | Length uint64 27 | } 28 | 29 | // RelevantEntries finds the intersection of a bitmap and a directory 30 | // return sorted slice of entries, and slice of all leaf entries 31 | // any runlengths > 1 will be "trimmed" to the relevance bitmap 32 | func RelevantEntries(bitmap *roaring64.Bitmap, maxzoom uint8, dir []EntryV3) ([]EntryV3, []EntryV3) { 33 | lastTile := ZxyToID(maxzoom+1, 0, 0) 34 | leaves := make([]EntryV3, 0) 35 | tiles := make([]EntryV3, 0) 36 | for idx, entry := range dir { 37 | if entry.RunLength == 0 { 38 | tmp := roaring64.New() 39 | 40 | // if this is the last thing in the directory, it needs to be bounded 41 | if idx == len(dir)-1 { 42 | tmp.AddRange(entry.TileID, lastTile) 43 | } else { 44 | tmp.AddRange(entry.TileID, dir[idx+1].TileID) 45 | } 46 | 47 | if bitmap.Intersects(tmp) { 48 | leaves = append(leaves, entry) 49 | } 50 | } else if entry.RunLength == 1 { 51 | if bitmap.Contains(entry.TileID) { 52 | tiles = append(tiles, entry) 53 | } 54 | } else { 55 | // runlength > 1 56 | currentID := entry.TileID 57 | currentRunLength := uint32(0) 58 | for y := entry.TileID; y < entry.TileID+uint64(entry.RunLength); y++ { 59 | if bitmap.Contains(y) { 60 | if currentRunLength == 0 { 61 | currentRunLength = 1 62 | currentID = y 63 | } else { 64 | currentRunLength++ 65 | } 66 | } else { 67 | if currentRunLength > 0 { 68 | tiles = append(tiles, EntryV3{currentID, entry.Offset, entry.Length, currentRunLength}) 69 | } 70 | currentRunLength = 0 71 | } 72 | } 73 | if currentRunLength > 0 { 74 | tiles = append(tiles, EntryV3{currentID, entry.Offset, entry.Length, currentRunLength}) 75 | } 76 | } 77 | } 78 | return tiles, leaves 79 | } 80 | 81 | // Given a tile entries for a Source archive, sorted in TileID order, 82 | // return: 83 | // * Re-encoded tile-entries, with their offsets changed to contiguous (clustered) order in a new archive. 84 | // * SrcDstRange: slice of offsets in the source archive, offset in the new archive, and length. 85 | // - Each range is one or more tiles 86 | // - the output must not have contiguous entries 87 | // - It is sorted by new offsets, but not necessarily by source offsets 88 | // 89 | // * The total size of the tile section in the new archive 90 | // * The # of addressed tiles (sum over RunLength) 91 | // * # the number of unique offsets ("tile contents") 92 | // - this might not be the last SrcDstRange new_offset + length, it's the highest offset (can be in the middle) 93 | func reencodeEntries(dir []EntryV3) ([]EntryV3, []srcDstRange, uint64, uint64, uint64) { 94 | reencoded := make([]EntryV3, 0, len(dir)) 95 | seenOffsets := make(map[uint64]uint64) 96 | ranges := make([]srcDstRange, 0) 97 | addressedTiles := uint64(0) 98 | 99 | dstOffset := uint64(0) 100 | for _, entry := range dir { 101 | if val, ok := seenOffsets[entry.Offset]; ok { 102 | reencoded = append(reencoded, EntryV3{entry.TileID, val, entry.Length, entry.RunLength}) 103 | } else { 104 | if len(ranges) > 0 { 105 | lastRange := ranges[len(ranges)-1] 106 | if lastRange.SrcOffset+lastRange.Length == entry.Offset { 107 | ranges[len(ranges)-1].Length += uint64(entry.Length) 108 | } else { 109 | ranges = append(ranges, srcDstRange{entry.Offset, dstOffset, uint64(entry.Length)}) 110 | } 111 | } else { 112 | ranges = append(ranges, srcDstRange{entry.Offset, dstOffset, uint64(entry.Length)}) 113 | } 114 | 115 | reencoded = append(reencoded, EntryV3{entry.TileID, dstOffset, entry.Length, entry.RunLength}) 116 | seenOffsets[entry.Offset] = dstOffset 117 | dstOffset += uint64(entry.Length) 118 | } 119 | 120 | addressedTiles += uint64(entry.RunLength) 121 | } 122 | return reencoded, ranges, dstOffset, addressedTiles, uint64(len(seenOffsets)) 123 | } 124 | 125 | // "want the next N bytes, then discard N bytes" 126 | type copyDiscard struct { 127 | Wanted uint64 128 | Discard uint64 129 | } 130 | 131 | type overfetchRange struct { 132 | Rng srcDstRange 133 | CopyDiscards []copyDiscard 134 | } 135 | 136 | // A single request, where only some of the bytes 137 | // in the requested range we want 138 | type overfetchListItem struct { 139 | Rng srcDstRange 140 | CopyDiscards []copyDiscard 141 | BytesToNext uint64 // the "priority" 142 | prev *overfetchListItem 143 | next *overfetchListItem 144 | index int 145 | } 146 | 147 | // MergeRanges takes a slice of SrcDstRanges, that: 148 | // * is non-contiguous, and is sorted by DstOffset 149 | // * an Overfetch parameter 150 | // - overfetch = 0.2 means we can request an extra 20% 151 | // - overfetch = 1.00 means we can double our total transfer size 152 | // 153 | // Return a list of OverfetchRanges 154 | // 155 | // Each OverfetchRange is one or more input ranges 156 | // input ranges are merged in order of smallest byte distance to next range 157 | // until the overfetch budget is consumed. 158 | // The list is sorted by Length 159 | func MergeRanges(ranges []srcDstRange, overfetch float32) (*list.List, uint64) { 160 | totalSize := 0 161 | 162 | shortest := make([]*overfetchListItem, len(ranges)) 163 | 164 | // create the heap items 165 | for i, rng := range ranges { 166 | var bytesToNext int64 167 | if i == len(ranges)-1 { 168 | bytesToNext = math.MaxInt64 169 | } else { 170 | bytesToNext = int64(ranges[i+1].SrcOffset) - (int64(rng.SrcOffset) + int64(rng.Length)) 171 | if bytesToNext < 0 { 172 | bytesToNext = math.MaxInt64 173 | } 174 | } 175 | 176 | shortest[i] = &overfetchListItem{ 177 | Rng: rng, 178 | BytesToNext: uint64(bytesToNext), 179 | CopyDiscards: []copyDiscard{{uint64(rng.Length), 0}}, 180 | } 181 | totalSize += int(rng.Length) 182 | } 183 | 184 | // make the list doubly-linked 185 | for i, item := range shortest { 186 | if i > 0 { 187 | item.prev = shortest[i-1] 188 | } 189 | if i < len(shortest)-1 { 190 | item.next = shortest[i+1] 191 | } 192 | } 193 | 194 | overfetchBudget := int(float32(totalSize) * overfetch) 195 | 196 | // sort by ascending distance to next range 197 | sort.Slice(shortest, func(i, j int) bool { 198 | return shortest[i].BytesToNext < shortest[j].BytesToNext 199 | }) 200 | 201 | // while we haven't consumed the budget, merge ranges 202 | for (len(shortest) > 1) && (overfetchBudget-int(shortest[0].BytesToNext) >= 0) { 203 | item := shortest[0] 204 | 205 | // merge this item into item.next 206 | newLength := item.Rng.Length + item.BytesToNext + item.next.Rng.Length 207 | item.next.Rng = srcDstRange{item.Rng.SrcOffset, item.Rng.DstOffset, newLength} 208 | item.next.prev = item.prev 209 | if item.prev != nil { 210 | item.prev.next = item.next 211 | } 212 | item.CopyDiscards[len(item.CopyDiscards)-1].Discard = item.BytesToNext 213 | item.next.CopyDiscards = append(item.CopyDiscards, item.next.CopyDiscards...) 214 | 215 | shortest = shortest[1:] 216 | 217 | overfetchBudget -= int(item.BytesToNext) 218 | } 219 | 220 | sort.Slice(shortest, func(i, j int) bool { 221 | return shortest[i].Rng.Length > shortest[j].Rng.Length 222 | }) 223 | 224 | totalBytes := uint64(0) 225 | result := list.New() 226 | for _, x := range shortest { 227 | result.PushBack(overfetchRange{ 228 | Rng: x.Rng, 229 | CopyDiscards: x.CopyDiscards, 230 | }) 231 | totalBytes += x.Rng.Length 232 | } 233 | 234 | return result, totalBytes 235 | } 236 | 237 | // Extract a smaller archive from local or remote archive. 238 | // 1. Get the root directory (check that it is clustered) 239 | // 2. Turn the input geometry into a relevance bitmap (using min(maxzoom, headermaxzoom)) 240 | // 3. Get all relevant level 1 directories (if any) 241 | // 4. Get all relevant level 2 directories (usually none) 242 | // 5. With the existing directory + relevance bitmap, construct 243 | // - a new total directory (root + leaf directories) 244 | // - a sorted slice of byte ranges in the old file required 245 | // 246 | // 6. Merge requested ranges using an overfetch parametter 247 | // 7. write the modified header 248 | // 8. write the root directory. 249 | // 9. get and write the metadata. 250 | // 10. write the leaf directories (if any) 251 | // 11. Get all tiles, and write directly to the output. 252 | func Extract(_ *log.Logger, bucketURL string, key string, minzoom int8, maxzoom int8, regionFile string, bbox string, output string, downloadThreads int, overfetch float32, dryRun bool) error { 253 | // 1. fetch the header 254 | start := time.Now() 255 | ctx := context.Background() 256 | 257 | bucketURL, key, err := NormalizeBucketKey(bucketURL, "", key) 258 | 259 | if err != nil { 260 | return err 261 | } 262 | 263 | bucket, err := OpenBucket(ctx, bucketURL, "") 264 | 265 | if err != nil { 266 | return err 267 | } 268 | 269 | if err != nil { 270 | return fmt.Errorf("Failed to open bucket for %s, %w", bucketURL, err) 271 | } 272 | defer bucket.Close() 273 | 274 | r, err := bucket.NewRangeReader(ctx, key, 0, HeaderV3LenBytes) 275 | 276 | if err != nil { 277 | return fmt.Errorf("Failed to create range reader for %s, %w", key, err) 278 | } 279 | b, err := io.ReadAll(r) 280 | if err != nil { 281 | return err 282 | } 283 | r.Close() 284 | 285 | header, err := DeserializeHeader(b[0:HeaderV3LenBytes]) 286 | 287 | if !header.Clustered { 288 | return fmt.Errorf("source archive must be clustered for extracts") 289 | } 290 | 291 | sourceMetadataOffset := header.MetadataOffset 292 | sourceTileDataOffset := header.TileDataOffset 293 | 294 | if minzoom == -1 || int8(header.MinZoom) > minzoom { 295 | minzoom = int8(header.MinZoom) 296 | } 297 | 298 | if maxzoom == -1 || int8(header.MaxZoom) < maxzoom { 299 | maxzoom = int8(header.MaxZoom) 300 | } 301 | 302 | if minzoom > maxzoom { 303 | return fmt.Errorf("minzoom cannot be greater than maxzoom") 304 | } 305 | 306 | var relevantSet *roaring64.Bitmap 307 | if regionFile != "" || bbox != "" { 308 | if regionFile != "" && bbox != "" { 309 | return fmt.Errorf("only one of region and bbox can be specified") 310 | } 311 | 312 | var multipolygon orb.MultiPolygon 313 | 314 | if regionFile != "" { 315 | dat, _ := ioutil.ReadFile(regionFile) 316 | multipolygon, err = UnmarshalRegion(dat) 317 | 318 | if err != nil { 319 | return err 320 | } 321 | } else { 322 | multipolygon, err = BboxRegion(bbox) 323 | if err != nil { 324 | return err 325 | } 326 | } 327 | 328 | // 2. construct a relevance bitmap 329 | 330 | bound := multipolygon.Bound() 331 | 332 | boundarySet, interiorSet := bitmapMultiPolygon(uint8(maxzoom), multipolygon) 333 | relevantSet = boundarySet 334 | relevantSet.Or(interiorSet) 335 | generalizeOr(relevantSet, uint8(minzoom)) 336 | 337 | header.MinLonE7 = int32(bound.Left() * 10000000) 338 | header.MinLatE7 = int32(bound.Bottom() * 10000000) 339 | header.MaxLonE7 = int32(bound.Right() * 10000000) 340 | header.MaxLatE7 = int32(bound.Top() * 10000000) 341 | header.CenterLonE7 = int32(bound.Center().X() * 10000000) 342 | header.CenterLatE7 = int32(bound.Center().Y() * 10000000) 343 | } else { 344 | relevantSet = roaring64.New() 345 | relevantSet.AddRange(ZxyToID(uint8(minzoom), 0, 0), ZxyToID(uint8(maxzoom)+1, 0, 0)) 346 | } 347 | 348 | // 3. get relevant entries from root 349 | dirOffset := header.RootOffset 350 | dirLength := header.RootLength 351 | 352 | rootReader, err := bucket.NewRangeReader(ctx, key, int64(dirOffset), int64(dirLength)) 353 | if err != nil { 354 | return err 355 | } 356 | defer rootReader.Close() 357 | rootBytes, err := io.ReadAll(rootReader) 358 | if err != nil { 359 | return err 360 | } 361 | 362 | rootDir := DeserializeEntries(bytes.NewBuffer(rootBytes), header.InternalCompression) 363 | 364 | tileEntries, leaves := RelevantEntries(relevantSet, uint8(maxzoom), rootDir) 365 | 366 | // 4. get all relevant leaf entries 367 | 368 | leafRanges := make([]srcDstRange, 0) 369 | for _, leaf := range leaves { 370 | leafRanges = append(leafRanges, srcDstRange{header.LeafDirectoryOffset + leaf.Offset, 0, uint64(leaf.Length)}) 371 | } 372 | 373 | overfetchLeaves, _ := MergeRanges(leafRanges, overfetch) 374 | numOverfetchLeaves := overfetchLeaves.Len() 375 | fmt.Printf("fetching %d dirs, %d chunks, %d requests\n", len(leaves), len(leafRanges), overfetchLeaves.Len()) 376 | 377 | for { 378 | if overfetchLeaves.Len() == 0 { 379 | break 380 | } 381 | or := overfetchLeaves.Remove(overfetchLeaves.Front()).(overfetchRange) 382 | 383 | chunkReader, err := bucket.NewRangeReader(ctx, key, int64(or.Rng.SrcOffset), int64(or.Rng.Length)) 384 | if err != nil { 385 | return err 386 | } 387 | 388 | for _, cd := range or.CopyDiscards { 389 | 390 | leafBytes := make([]byte, cd.Wanted) 391 | _, err := io.ReadFull(chunkReader, leafBytes) 392 | if err != nil { 393 | return err 394 | } 395 | leafdir := DeserializeEntries(bytes.NewBuffer(leafBytes), header.InternalCompression) 396 | newEntries, newLeaves := RelevantEntries(relevantSet, uint8(maxzoom), leafdir) 397 | 398 | if len(newLeaves) > 0 { 399 | panic("This doesn't support leaf level 2+.") 400 | } 401 | tileEntries = append(tileEntries, newEntries...) 402 | 403 | _, err = io.CopyN(io.Discard, chunkReader, int64(cd.Discard)) 404 | if err != nil { 405 | return err 406 | } 407 | } 408 | chunkReader.Close() 409 | } 410 | 411 | sort.Slice(tileEntries, func(i, j int) bool { 412 | return tileEntries[i].TileID < tileEntries[j].TileID 413 | }) 414 | 415 | fmt.Printf("Region tiles %d, result tile entries %d\n", relevantSet.GetCardinality(), len(tileEntries)) 416 | 417 | // 6. create the new header and chunk list 418 | // we now need to re-encode this entry list using cumulative offsets 419 | reencoded, tileParts, tiledataLength, addressedTiles, tileContents := reencodeEntries(tileEntries) 420 | 421 | overfetchRanges, totalBytes := MergeRanges(tileParts, overfetch) 422 | 423 | numOverfetchRanges := overfetchRanges.Len() 424 | fmt.Printf("fetching %d tiles, %d chunks, %d requests\n", len(reencoded), len(tileParts), overfetchRanges.Len()) 425 | 426 | // TODO: takes up too much RAM 427 | // construct the directories 428 | newRootBytes, newLeavesBytes, _ := optimizeDirectories(reencoded, 16384-HeaderV3LenBytes, Gzip) 429 | 430 | // 7. write the modified header 431 | header.RootOffset = HeaderV3LenBytes 432 | header.RootLength = uint64(len(newRootBytes)) 433 | header.MetadataOffset = header.RootOffset + header.RootLength 434 | header.LeafDirectoryOffset = header.MetadataOffset + header.MetadataLength 435 | header.LeafDirectoryLength = uint64(len(newLeavesBytes)) 436 | header.TileDataOffset = header.LeafDirectoryOffset + header.LeafDirectoryLength 437 | 438 | header.TileDataLength = tiledataLength 439 | header.AddressedTilesCount = addressedTiles 440 | header.TileEntriesCount = uint64(len(tileEntries)) 441 | header.TileContentsCount = tileContents 442 | 443 | header.MaxZoom = uint8(maxzoom) 444 | header.MinZoom = uint8(minzoom) 445 | 446 | headerBytes := SerializeHeader(header) 447 | 448 | totalActualBytes := uint64(0) 449 | for _, x := range tileParts { 450 | totalActualBytes += x.Length 451 | } 452 | 453 | if !dryRun { 454 | 455 | outfile, err := os.Create(output) 456 | defer outfile.Close() 457 | 458 | if err != nil { 459 | return err 460 | } 461 | 462 | outfile.Truncate(127 + int64(len(newRootBytes)) + int64(header.MetadataLength) + int64(len(newLeavesBytes)) + int64(totalActualBytes)) 463 | 464 | _, err = outfile.Write(headerBytes) 465 | if err != nil { 466 | return err 467 | } 468 | 469 | // 8. write the root directory 470 | _, err = outfile.Write(newRootBytes) 471 | if err != nil { 472 | return err 473 | } 474 | 475 | // 9. get and write the metadata 476 | metadataReader, err := bucket.NewRangeReader(ctx, key, int64(sourceMetadataOffset), int64(header.MetadataLength)) 477 | if err != nil { 478 | return err 479 | } 480 | metadataBytes, err := io.ReadAll(metadataReader) 481 | defer metadataReader.Close() 482 | if err != nil { 483 | return err 484 | } 485 | 486 | outfile.Write(metadataBytes) 487 | 488 | // 10. write the leaf directories 489 | _, err = outfile.Write(newLeavesBytes) 490 | if err != nil { 491 | return err 492 | } 493 | 494 | bar := progressbar.DefaultBytes( 495 | int64(totalBytes), 496 | "fetching chunks", 497 | ) 498 | 499 | var mu sync.Mutex 500 | 501 | downloadPart := func(or overfetchRange) error { 502 | tileReader, err := bucket.NewRangeReader(ctx, key, int64(sourceTileDataOffset+or.Rng.SrcOffset), int64(or.Rng.Length)) 503 | if err != nil { 504 | return err 505 | } 506 | offsetWriter := io.NewOffsetWriter(outfile, int64(header.TileDataOffset)+int64(or.Rng.DstOffset)) 507 | 508 | for _, cd := range or.CopyDiscards { 509 | 510 | _, err := io.CopyN(io.MultiWriter(offsetWriter, bar), tileReader, int64(cd.Wanted)) 511 | if err != nil { 512 | return err 513 | } 514 | 515 | _, err = io.CopyN(bar, tileReader, int64(cd.Discard)) 516 | if err != nil { 517 | return err 518 | } 519 | } 520 | tileReader.Close() 521 | return nil 522 | } 523 | 524 | errs, _ := errgroup.WithContext(ctx) 525 | 526 | for i := 0; i < downloadThreads; i++ { 527 | workBack := (i == 0 && downloadThreads > 1) 528 | errs.Go(func() error { 529 | done := false 530 | var or overfetchRange 531 | for { 532 | mu.Lock() 533 | if overfetchRanges.Len() == 0 { 534 | done = true 535 | } else { 536 | if workBack { 537 | or = overfetchRanges.Remove(overfetchRanges.Back()).(overfetchRange) 538 | } else { 539 | or = overfetchRanges.Remove(overfetchRanges.Front()).(overfetchRange) 540 | } 541 | } 542 | mu.Unlock() 543 | if done { 544 | return nil 545 | } 546 | err := downloadPart(or) 547 | if err != nil { 548 | return err 549 | } 550 | } 551 | }) 552 | } 553 | 554 | err = errs.Wait() 555 | if err != nil { 556 | return err 557 | } 558 | } 559 | 560 | fmt.Printf("Completed in %v with %v download threads (%v tiles/s).\n", time.Since(start), downloadThreads, float64(len(reencoded))/float64(time.Since(start).Seconds())) 561 | totalRequests := 2 // header + root 562 | totalRequests += numOverfetchLeaves // leaves 563 | totalRequests++ // metadata 564 | totalRequests += numOverfetchRanges 565 | fmt.Printf("Extract required %d total requests.\n", totalRequests) 566 | fmt.Printf("Extract transferred %s (overfetch %v) for an archive size of %s\n", humanize.Bytes(totalBytes), overfetch, humanize.Bytes(totalActualBytes)) 567 | 568 | return nil 569 | } 570 | -------------------------------------------------------------------------------- /pmtiles/extract_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "github.com/RoaringBitmap/roaring/roaring64" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestRelevantEntries(t *testing.T) { 10 | entries := make([]EntryV3, 0) 11 | entries = append(entries, EntryV3{0, 0, 0, 1}) 12 | 13 | bitmap := roaring64.New() 14 | bitmap.Add(0) 15 | 16 | tiles, leaves := RelevantEntries(bitmap, 4, entries) 17 | 18 | assert.Equal(t, len(tiles), 1) 19 | assert.Equal(t, len(leaves), 0) 20 | } 21 | 22 | func TestRelevantEntriesRunLength(t *testing.T) { 23 | entries := make([]EntryV3, 0) 24 | entries = append(entries, EntryV3{0, 0, 0, 5}) 25 | 26 | bitmap := roaring64.New() 27 | bitmap.Add(1) 28 | bitmap.Add(2) 29 | bitmap.Add(4) 30 | 31 | tiles, leaves := RelevantEntries(bitmap, 4, entries) 32 | 33 | assert.Equal(t, len(tiles), 2) 34 | assert.Equal(t, tiles[0].RunLength, uint32(2)) 35 | assert.Equal(t, tiles[1].RunLength, uint32(1)) 36 | assert.Equal(t, len(leaves), 0) 37 | } 38 | 39 | func TestRelevantEntriesLeaf(t *testing.T) { 40 | entries := make([]EntryV3, 0) 41 | entries = append(entries, EntryV3{0, 0, 0, 0}) 42 | 43 | bitmap := roaring64.New() 44 | bitmap.Add(1) 45 | 46 | tiles, leaves := RelevantEntries(bitmap, 4, entries) 47 | 48 | assert.Equal(t, len(tiles), 0) 49 | assert.Equal(t, len(leaves), 1) 50 | } 51 | 52 | func TestRelevantEntriesNotLeaf(t *testing.T) { 53 | entries := make([]EntryV3, 0) 54 | entries = append(entries, EntryV3{0, 0, 0, 0}) 55 | entries = append(entries, EntryV3{2, 0, 0, 1}) 56 | entries = append(entries, EntryV3{4, 0, 0, 0}) 57 | 58 | bitmap := roaring64.New() 59 | bitmap.Add(3) 60 | 61 | tiles, leaves := RelevantEntries(bitmap, 4, entries) 62 | 63 | assert.Equal(t, len(tiles), 0) 64 | assert.Equal(t, len(leaves), 0) 65 | } 66 | 67 | func TestRelevantEntriesMaxZoom(t *testing.T) { 68 | entries := make([]EntryV3, 0) 69 | entries = append(entries, EntryV3{0, 0, 0, 0}) 70 | 71 | bitmap := roaring64.New() 72 | bitmap.Add(6) 73 | _, leaves := RelevantEntries(bitmap, 1, entries) 74 | assert.Equal(t, len(leaves), 0) 75 | 76 | _, leaves = RelevantEntries(bitmap, 2, entries) 77 | assert.Equal(t, len(leaves), 1) 78 | } 79 | 80 | func TestReencodeEntries(t *testing.T) { 81 | entries := make([]EntryV3, 0) 82 | entries = append(entries, EntryV3{0, 400, 10, 1}) 83 | entries = append(entries, EntryV3{1, 500, 20, 2}) 84 | 85 | reencoded, result, datalen, addressed, contents := reencodeEntries(entries) 86 | 87 | assert.Equal(t, 2, len(result)) 88 | assert.Equal(t, result[0].SrcOffset, uint64(400)) 89 | assert.Equal(t, result[0].Length, uint64(10)) 90 | assert.Equal(t, result[1].SrcOffset, uint64(500)) 91 | assert.Equal(t, result[1].Length, uint64(20)) 92 | 93 | assert.Equal(t, 2, len(reencoded)) 94 | assert.Equal(t, reencoded[0].Offset, uint64(0)) 95 | assert.Equal(t, reencoded[1].Offset, uint64(10)) 96 | 97 | assert.Equal(t, uint64(30), datalen) 98 | assert.Equal(t, uint64(3), addressed) 99 | assert.Equal(t, uint64(2), contents) 100 | } 101 | 102 | func TestReencodeEntriesDuplicate(t *testing.T) { 103 | entries := make([]EntryV3, 0) 104 | entries = append(entries, EntryV3{0, 400, 10, 1}) 105 | entries = append(entries, EntryV3{1, 500, 20, 1}) 106 | entries = append(entries, EntryV3{2, 400, 10, 1}) 107 | 108 | reencoded, result, datalen, addressed, contents := reencodeEntries(entries) 109 | 110 | assert.Equal(t, 2, len(result)) 111 | assert.Equal(t, result[0].SrcOffset, uint64(400)) 112 | assert.Equal(t, result[0].Length, uint64(10)) 113 | assert.Equal(t, result[1].SrcOffset, uint64(500)) 114 | assert.Equal(t, result[1].Length, uint64(20)) 115 | 116 | assert.Equal(t, len(reencoded), 3) 117 | assert.Equal(t, reencoded[0].Offset, uint64(0)) 118 | assert.Equal(t, reencoded[1].Offset, uint64(10)) 119 | assert.Equal(t, reencoded[2].Offset, uint64(0)) 120 | 121 | assert.Equal(t, uint64(30), datalen) 122 | assert.Equal(t, uint64(3), addressed) 123 | assert.Equal(t, uint64(2), contents) 124 | } 125 | 126 | func TestReencodeContiguous(t *testing.T) { 127 | entries := make([]EntryV3, 0) 128 | entries = append(entries, EntryV3{0, 400, 10, 0}) 129 | entries = append(entries, EntryV3{1, 410, 20, 0}) 130 | 131 | _, result, _, _, _ := reencodeEntries(entries) 132 | 133 | assert.Equal(t, len(result), 1) 134 | assert.Equal(t, result[0].SrcOffset, uint64(400)) 135 | assert.Equal(t, result[0].Length, uint64(30)) 136 | } 137 | 138 | func TestMergeRanges(t *testing.T) { 139 | ranges := make([]srcDstRange, 0) 140 | ranges = append(ranges, srcDstRange{0, 0, 50}) 141 | ranges = append(ranges, srcDstRange{60, 60, 60}) 142 | 143 | result, totalTransferBytes := MergeRanges(ranges, 0.1) 144 | 145 | assert.Equal(t, 1, result.Len()) 146 | assert.Equal(t, uint64(120), totalTransferBytes) 147 | front := result.Front().Value.(overfetchRange) 148 | assert.Equal(t, srcDstRange{0, 0, 120}, front.Rng) 149 | assert.Equal(t, 2, len(front.CopyDiscards)) 150 | assert.Equal(t, copyDiscard{50, 10}, front.CopyDiscards[0]) 151 | assert.Equal(t, copyDiscard{60, 0}, front.CopyDiscards[1]) 152 | } 153 | 154 | func TestMergeRangesMultiple(t *testing.T) { 155 | ranges := make([]srcDstRange, 0) 156 | ranges = append(ranges, srcDstRange{0, 0, 50}) 157 | ranges = append(ranges, srcDstRange{60, 60, 10}) 158 | ranges = append(ranges, srcDstRange{80, 80, 10}) 159 | 160 | result, totalTransferBytes := MergeRanges(ranges, 0.3) 161 | front := result.Front().Value.(overfetchRange) 162 | assert.Equal(t, uint64(90), totalTransferBytes) 163 | assert.Equal(t, 1, result.Len()) 164 | assert.Equal(t, srcDstRange{0, 0, 90}, front.Rng) 165 | assert.Equal(t, 3, len(front.CopyDiscards)) 166 | } 167 | 168 | func TestMergeRangesNonSrcOrdered(t *testing.T) { 169 | ranges := make([]srcDstRange, 0) 170 | ranges = append(ranges, srcDstRange{20, 0, 50}) 171 | ranges = append(ranges, srcDstRange{0, 60, 50}) 172 | 173 | result, _ := MergeRanges(ranges, 0.1) 174 | assert.Equal(t, 2, result.Len()) 175 | } 176 | -------------------------------------------------------------------------------- /pmtiles/fixtures/test_fixture_1.pmtiles: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/protomaps/go-pmtiles/f290198372de4fce850304a4250d7d1b83e26fe9/pmtiles/fixtures/test_fixture_1.pmtiles -------------------------------------------------------------------------------- /pmtiles/fixtures/unclustered.pmtiles: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/protomaps/go-pmtiles/f290198372de4fce850304a4250d7d1b83e26fe9/pmtiles/fixtures/unclustered.pmtiles -------------------------------------------------------------------------------- /pmtiles/makesync.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bufio" 5 | "encoding/binary" 6 | "encoding/json" 7 | "fmt" 8 | "github.com/cespare/xxhash/v2" 9 | "github.com/schollz/progressbar/v3" 10 | "golang.org/x/sync/errgroup" 11 | "io" 12 | "log" 13 | "os" 14 | "runtime" 15 | "sort" 16 | "sync" 17 | ) 18 | 19 | type syncBlock struct { 20 | Start uint64 // the start tileID of the block 21 | Offset uint64 // the offset in the source archive. This is not serialized, only used in makesync. 22 | Length uint64 // the length of the block 23 | Hash uint64 // the hash of the block 24 | } 25 | 26 | type syncHeader struct { 27 | Version string `json:"version"` 28 | BlockSize uint64 `json:"block_size"` 29 | HashType string `json:"hash_type"` 30 | HashSize uint8 `json:"hash_size"` 31 | NumBlocks int `json:"num_blocks"` 32 | } 33 | 34 | type syncTask struct { 35 | NewBlock syncBlock 36 | OldOffset uint64 37 | } 38 | 39 | func serializeSyncBlocks(output io.Writer, blocks []syncBlock) { 40 | tmp := make([]byte, binary.MaxVarintLen64) 41 | var n int 42 | 43 | lastStartID := uint64(0) 44 | for _, block := range blocks { 45 | n = binary.PutUvarint(tmp, uint64(block.Start-lastStartID)) 46 | output.Write(tmp[:n]) 47 | n = binary.PutUvarint(tmp, uint64(block.Length)) 48 | output.Write(tmp[:n]) 49 | binary.LittleEndian.PutUint64(tmp, block.Hash) 50 | output.Write(tmp[0:8]) 51 | 52 | lastStartID = block.Start 53 | } 54 | } 55 | 56 | func deserializeSyncBlocks(numBlocks int, reader *bufio.Reader) []syncBlock { 57 | blocks := make([]syncBlock, 0) 58 | 59 | lastStartID := uint64(0) 60 | offset := uint64(0) 61 | buf := make([]byte, 8) 62 | 63 | for i := 0; i < numBlocks; i++ { 64 | start, _ := binary.ReadUvarint(reader) 65 | length, _ := binary.ReadUvarint(reader) 66 | _, _ = io.ReadFull(reader, buf) 67 | blocks = append(blocks, syncBlock{Start: lastStartID + start, Offset: offset, Length: length, Hash: binary.LittleEndian.Uint64(buf)}) 68 | 69 | lastStartID = lastStartID + start 70 | offset = offset + length 71 | } 72 | 73 | return blocks 74 | } 75 | 76 | func Makesync(logger *log.Logger, cliVersion string, fileName string, blockSizeKb int) error { 77 | fmt.Println("WARNING: This is an experimental feature. Do not rely on this in production!") 78 | blockSizeBytes := uint64(1000 * blockSizeKb) 79 | 80 | file, err := os.OpenFile(fileName, os.O_RDONLY, 0666) 81 | 82 | if err != nil { 83 | return err 84 | } 85 | 86 | buf := make([]byte, 127) 87 | _, err = file.Read(buf) 88 | if err != nil { 89 | return err 90 | } 91 | 92 | header, err := DeserializeHeader(buf) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | if !header.Clustered { 98 | return fmt.Errorf("archive must be clustered for makesync") 99 | } 100 | 101 | output, err := os.Create(fileName + ".sync") 102 | 103 | if err != nil { 104 | return err 105 | } 106 | 107 | defer output.Close() 108 | 109 | bar := progressbar.Default( 110 | int64(header.TileEntriesCount), 111 | "writing syncfile", 112 | ) 113 | 114 | var current syncBlock 115 | 116 | tasks := make(chan syncBlock, 1000) 117 | 118 | var wg sync.WaitGroup 119 | var mu sync.Mutex 120 | 121 | blocks := make([]syncBlock, 0) 122 | 123 | errs := new(errgroup.Group) 124 | 125 | for i := 0; i < runtime.GOMAXPROCS(0); i++ { 126 | errs.Go(func() error { 127 | wg.Add(1) 128 | hasher := xxhash.New() 129 | for block := range tasks { 130 | r := io.NewSectionReader(file, int64(header.TileDataOffset+block.Offset), int64(block.Length)) 131 | 132 | if _, err := io.Copy(hasher, r); err != nil { 133 | log.Fatal(err) 134 | } 135 | 136 | block.Hash = hasher.Sum64() 137 | 138 | mu.Lock() 139 | blocks = append(blocks, block) 140 | mu.Unlock() 141 | 142 | hasher.Reset() 143 | } 144 | wg.Done() 145 | return nil 146 | }) 147 | } 148 | 149 | err = IterateEntries(header, 150 | func(offset uint64, length uint64) ([]byte, error) { 151 | return io.ReadAll(io.NewSectionReader(file, int64(offset), int64(length))) 152 | }, 153 | func(e EntryV3) { 154 | bar.Add(1) 155 | if current.Length == 0 { 156 | current.Start = e.TileID 157 | current.Offset = e.Offset 158 | current.Length = uint64(e.Length) 159 | } else if e.Offset > current.Offset+uint64(current.Length) { 160 | panic("Invalid clustering of archive detected - check with verify") 161 | } else if e.Offset == current.Offset+uint64(current.Length) { 162 | if current.Length+uint64(e.Length) > blockSizeBytes { 163 | tasks <- syncBlock{current.Start, current.Offset, current.Length, 0} 164 | 165 | current.Start = e.TileID 166 | current.Offset = e.Offset 167 | current.Length = uint64(e.Length) 168 | } else { 169 | current.Length += uint64(e.Length) 170 | } 171 | } 172 | }) 173 | 174 | if err != nil { 175 | return err 176 | } 177 | 178 | tasks <- syncBlock{current.Start, current.Offset, current.Length, 0} 179 | close(tasks) 180 | 181 | wg.Wait() 182 | 183 | sort.Slice(blocks, func(i, j int) bool { return blocks[i].Start < blocks[j].Start }) 184 | 185 | sh := syncHeader{ 186 | Version: cliVersion, 187 | HashSize: 8, 188 | BlockSize: blockSizeBytes, 189 | HashType: "xxh64", 190 | NumBlocks: len(blocks), 191 | } 192 | 193 | syncHeaderBytes, err := json.Marshal(sh) 194 | 195 | output.Write(syncHeaderBytes) 196 | output.Write([]byte{'\n'}) 197 | 198 | serializeSyncBlocks(output, blocks) 199 | 200 | fmt.Printf("Created syncfile with %d blocks.\n", len(blocks)) 201 | return nil 202 | } 203 | -------------------------------------------------------------------------------- /pmtiles/makesync_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "github.com/stretchr/testify/assert" 7 | "testing" 8 | ) 9 | 10 | func TestSyncBlockRoundtrip(t *testing.T) { 11 | var blocks []syncBlock 12 | blocks = append(blocks, syncBlock{Start: 1, Offset: 2, Length: 3, Hash: 4}) 13 | var b bytes.Buffer 14 | serializeSyncBlocks(&b, blocks) 15 | list := deserializeSyncBlocks(1, bufio.NewReader(&b)) 16 | 17 | assert.Equal(t, 1, len(list)) 18 | assert.Equal(t, uint64(1), list[0].Start) 19 | assert.Equal(t, uint64(3), list[0].Length) 20 | assert.Equal(t, uint64(4), list[0].Hash) 21 | } 22 | -------------------------------------------------------------------------------- /pmtiles/region.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "fmt" 5 | "github.com/paulmach/orb" 6 | "github.com/paulmach/orb/geojson" 7 | "strconv" 8 | "strings" 9 | ) 10 | 11 | // BboxRegion parses a bbox string into an orb.MultiPolygon region. 12 | func BboxRegion(bbox string) (orb.MultiPolygon, error) { 13 | parts := strings.Split(bbox, ",") 14 | minLon, err := strconv.ParseFloat(parts[0], 64) 15 | if err != nil { 16 | return nil, err 17 | } 18 | minLat, err := strconv.ParseFloat(parts[1], 64) 19 | if err != nil { 20 | return nil, err 21 | } 22 | maxLon, err := strconv.ParseFloat(parts[2], 64) 23 | if err != nil { 24 | return nil, err 25 | } 26 | maxLat, err := strconv.ParseFloat(parts[3], 64) 27 | if err != nil { 28 | return nil, err 29 | } 30 | return orb.MultiPolygon{{{{minLon, maxLat}, {maxLon, maxLat}, {maxLon, minLat}, {minLon, minLat}, {minLon, maxLat}}}}, nil 31 | } 32 | 33 | // UnmarshalRegion parses JSON bytes into an orb.MultiPolygon region. 34 | func UnmarshalRegion(data []byte) (orb.MultiPolygon, error) { 35 | fc, err := geojson.UnmarshalFeatureCollection(data) 36 | 37 | if err == nil { 38 | retval := make([]orb.Polygon, 0) 39 | for _, f := range fc.Features { 40 | switch v := f.Geometry.(type) { 41 | case orb.Polygon: 42 | retval = append(retval, v) 43 | case orb.MultiPolygon: 44 | retval = append(retval, v...) 45 | } 46 | } 47 | if len(retval) > 0 { 48 | return retval, nil 49 | } 50 | } 51 | 52 | f, err := geojson.UnmarshalFeature(data) 53 | 54 | if err == nil { 55 | switch v := f.Geometry.(type) { 56 | case orb.Polygon: 57 | return []orb.Polygon{v}, nil 58 | case orb.MultiPolygon: 59 | return v, nil 60 | } 61 | } 62 | 63 | g, err := geojson.UnmarshalGeometry(data) 64 | 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | switch v := g.Geometry().(type) { 70 | case orb.Polygon: 71 | return []orb.Polygon{v}, nil 72 | case orb.MultiPolygon: 73 | return v, nil 74 | } 75 | 76 | return nil, fmt.Errorf("No geometry") 77 | } 78 | -------------------------------------------------------------------------------- /pmtiles/region_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestBboxRegion(t *testing.T) { 9 | result, err := BboxRegion("-1.906033,50.680367,1.097501,52.304934") 10 | assert.Nil(t, err) 11 | assert.Equal(t, -1.906033, result[0][0][0][0]) 12 | assert.Equal(t, 52.304934, result[0][0][0][1]) 13 | assert.Equal(t, 1.097501, result[0][0][2][0]) 14 | assert.Equal(t, 50.680367, result[0][0][2][1]) 15 | } 16 | 17 | func TestRawPolygonRegion(t *testing.T) { 18 | result, err := UnmarshalRegion([]byte(`{ 19 | "type": "Polygon", 20 | "coordinates": [[[0, 0],[0,1],[1,1],[0,0]]] 21 | }`)) 22 | assert.Nil(t, err) 23 | assert.Equal(t, 1, len(result)) 24 | } 25 | 26 | func TestRawMultiPolygonRegion(t *testing.T) { 27 | result, err := UnmarshalRegion([]byte(`{ 28 | "type": "MultiPolygon", 29 | "coordinates": [[[[0, 0],[0,1],[1,1],[0,0]]]] 30 | }`)) 31 | assert.Nil(t, err) 32 | assert.Equal(t, 1, len(result)) 33 | } 34 | 35 | func TestRawPolygonFeatureRegion(t *testing.T) { 36 | result, err := UnmarshalRegion([]byte(`{ 37 | "type": "Feature", 38 | "geometry": { 39 | "type": "Polygon", 40 | "coordinates": [[[0, 0],[0,1],[1,1],[0,0]]] 41 | } 42 | }`)) 43 | assert.Nil(t, err) 44 | assert.Equal(t, 1, len(result)) 45 | } 46 | 47 | func TestRawMultiPolygonFeatureRegion(t *testing.T) { 48 | result, err := UnmarshalRegion([]byte(`{ 49 | "type": "Feature", 50 | "geometry": { 51 | "type": "MultiPolygon", 52 | "coordinates": [[[[0, 0],[0,1],[1,1],[0,0]]]] 53 | } 54 | }`)) 55 | assert.Nil(t, err) 56 | assert.Equal(t, 1, len(result)) 57 | } 58 | 59 | func TestFeatureCollectionRegion(t *testing.T) { 60 | result, err := UnmarshalRegion([]byte(`{ 61 | "type": "FeatureCollection", 62 | "features": [ 63 | { 64 | "type": "Feature", 65 | "geometry": { 66 | "type": "MultiPolygon", 67 | "coordinates": [[[[0, 0],[0,1],[1,1],[0,0]]]] 68 | } 69 | }, 70 | { 71 | "type": "Feature", 72 | "geometry": { 73 | "type": "Polygon", 74 | "coordinates": [[[1, 1],[1,2],[2,2],[1,1]]] 75 | } 76 | } 77 | ] 78 | }`)) 79 | assert.Nil(t, err) 80 | assert.Equal(t, 2, len(result)) 81 | } 82 | 83 | func TestEmptyFeatureCollectionRegion(t *testing.T) { 84 | _, err := UnmarshalRegion([]byte(`{ 85 | "type": "FeatureCollection", 86 | "features": [ 87 | ] 88 | }`)) 89 | assert.NotNil(t, err) 90 | } 91 | -------------------------------------------------------------------------------- /pmtiles/server_metrics.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "strconv" 8 | "time" 9 | 10 | "github.com/prometheus/client_golang/prometheus" 11 | ) 12 | 13 | var buildInfoMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 14 | Namespace: "pmtiles", 15 | Name: "buildinfo", 16 | }, []string{"version", "revision"}) 17 | 18 | var buildTimeMetric = prometheus.NewGauge(prometheus.GaugeOpts{ 19 | Namespace: "pmtiles", 20 | Name: "buildtime", 21 | }) 22 | 23 | func init() { 24 | err := prometheus.Register(buildInfoMetric) 25 | if err != nil { 26 | fmt.Println("Error registering metric", err) 27 | } 28 | err = prometheus.Register(buildTimeMetric) 29 | if err != nil { 30 | fmt.Println("Error registering metric", err) 31 | } 32 | } 33 | 34 | // SetBuildInfo initializes static metrics with pmtiles version, git hash, and build time 35 | func SetBuildInfo(version, commit, date string) { 36 | buildInfoMetric.WithLabelValues(version, commit).Set(1) 37 | time, err := time.Parse(time.RFC3339, date) 38 | if err == nil { 39 | buildTimeMetric.Set(float64(time.Unix())) 40 | } else { 41 | buildTimeMetric.Set(0) 42 | } 43 | } 44 | 45 | type metrics struct { 46 | // overall requests: # requests, request duration, response size by archive/status code 47 | requests *prometheus.CounterVec 48 | responseSize *prometheus.HistogramVec 49 | requestDuration *prometheus.HistogramVec 50 | // dir cache: # requests, hits, cache entries, cache bytes, cache bytes limit 51 | dirCacheEntries prometheus.Gauge 52 | dirCacheSizeBytes prometheus.Gauge 53 | dirCacheLimitBytes prometheus.Gauge 54 | dirCacheRequests *prometheus.CounterVec 55 | // requests to bucket: # total, response duration by archive/status code 56 | bucketRequests *prometheus.CounterVec 57 | bucketRequestDuration *prometheus.HistogramVec 58 | // misc 59 | reloads *prometheus.CounterVec 60 | } 61 | 62 | // utility to time an overall tile request 63 | type requestTracker struct { 64 | finished bool 65 | start time.Time 66 | metrics *metrics 67 | } 68 | 69 | func (m *metrics) startRequest() *requestTracker { 70 | return &requestTracker{start: time.Now(), metrics: m} 71 | } 72 | 73 | func (r *requestTracker) finish(ctx context.Context, archive, handler string, status, responseSize int, logDetails bool) { 74 | if !r.finished { 75 | r.finished = true 76 | // exclude archive path from "not found" metrics to limit cardinality on requests for nonexistant archives 77 | statusString := strconv.Itoa(status) 78 | if status == 404 { 79 | archive = "" 80 | } else if isCanceled(ctx) { 81 | statusString = "canceled" 82 | } 83 | 84 | labels := []string{archive, handler, statusString} 85 | r.metrics.requests.WithLabelValues(labels...).Inc() 86 | if logDetails { 87 | r.metrics.responseSize.WithLabelValues(labels...).Observe(float64(responseSize)) 88 | r.metrics.requestDuration.WithLabelValues(labels...).Observe(time.Since(r.start).Seconds()) 89 | } 90 | } 91 | } 92 | 93 | // utility to time an individual request to the underlying bucket 94 | type bucketRequestTracker struct { 95 | finished bool 96 | start time.Time 97 | metrics *metrics 98 | archive string 99 | kind string 100 | } 101 | 102 | func (m *metrics) startBucketRequest(archive, kind string) *bucketRequestTracker { 103 | return &bucketRequestTracker{start: time.Now(), metrics: m, archive: archive, kind: kind} 104 | } 105 | 106 | func (r *bucketRequestTracker) finish(ctx context.Context, status string) { 107 | if !r.finished { 108 | r.finished = true 109 | // exclude archive path from "not found" metrics to limit cardinality on requests for nonexistant archives 110 | if status == "404" || status == "403" { 111 | r.archive = "" 112 | } else if isCanceled(ctx) { 113 | status = "canceled" 114 | } 115 | r.metrics.bucketRequests.WithLabelValues(r.archive, r.kind, status).Inc() 116 | r.metrics.bucketRequestDuration.WithLabelValues(r.archive, status).Observe(time.Since(r.start).Seconds()) 117 | } 118 | } 119 | 120 | // misc helpers 121 | 122 | func (m *metrics) reloadFile(name string) { 123 | m.reloads.WithLabelValues(name).Inc() 124 | } 125 | 126 | func (m *metrics) initCacheStats(limitBytes int) { 127 | m.dirCacheLimitBytes.Set(float64(limitBytes)) 128 | m.updateCacheStats(0, 0) 129 | } 130 | 131 | func (m *metrics) updateCacheStats(sizeBytes, entries int) { 132 | m.dirCacheEntries.Set(float64(entries)) 133 | m.dirCacheSizeBytes.Set(float64(sizeBytes)) 134 | } 135 | 136 | func (m *metrics) cacheRequest(archive, kind, status string) { 137 | m.dirCacheRequests.WithLabelValues(archive, kind, status).Inc() 138 | } 139 | 140 | func register[K prometheus.Collector](logger *log.Logger, metric K) K { 141 | if err := prometheus.Register(metric); err != nil { 142 | logger.Println(err) 143 | } 144 | return metric 145 | } 146 | 147 | func createMetrics(scope string, logger *log.Logger) *metrics { 148 | namespace := "pmtiles" 149 | durationBuckets := prometheus.DefBuckets 150 | kib := 1024.0 151 | mib := kib * kib 152 | sizeBuckets := []float64{1.0 * kib, 5.0 * kib, 10.0 * kib, 25.0 * kib, 50.0 * kib, 100 * kib, 250 * kib, 500 * kib, 1.0 * mib} 153 | 154 | return &metrics{ 155 | // overall requests 156 | requests: register(logger, prometheus.NewCounterVec(prometheus.CounterOpts{ 157 | Namespace: namespace, 158 | Subsystem: scope, 159 | Name: "requests_total", 160 | Help: "Overall number of requests to the service", 161 | }, []string{"archive", "handler", "status"})), 162 | responseSize: register(logger, prometheus.NewHistogramVec(prometheus.HistogramOpts{ 163 | Namespace: namespace, 164 | Subsystem: scope, 165 | Name: "response_size_bytes", 166 | Help: "Overall response size in bytes", 167 | Buckets: sizeBuckets, 168 | }, []string{"archive", "handler", "status"})), 169 | requestDuration: register(logger, prometheus.NewHistogramVec(prometheus.HistogramOpts{ 170 | Namespace: namespace, 171 | Subsystem: scope, 172 | Name: "request_duration_seconds", 173 | Help: "Overall request duration in seconds", 174 | Buckets: durationBuckets, 175 | }, []string{"archive", "handler", "status"})), 176 | 177 | // dir cache 178 | dirCacheEntries: register(logger, prometheus.NewGauge(prometheus.GaugeOpts{ 179 | Namespace: namespace, 180 | Subsystem: scope, 181 | Name: "dir_cache_entries", 182 | Help: "Number of directories in the cache", 183 | })), 184 | dirCacheSizeBytes: register(logger, prometheus.NewGauge(prometheus.GaugeOpts{ 185 | Namespace: namespace, 186 | Subsystem: scope, 187 | Name: "dir_cache_size_bytes", 188 | Help: "Current directory cache usage in bytes", 189 | })), 190 | dirCacheLimitBytes: register(logger, prometheus.NewGauge(prometheus.GaugeOpts{ 191 | Namespace: namespace, 192 | Subsystem: scope, 193 | Name: "dir_cache_limit_bytes", 194 | Help: "Maximum directory cache size limit in bytes", 195 | })), 196 | dirCacheRequests: register(logger, prometheus.NewCounterVec(prometheus.CounterOpts{ 197 | Namespace: namespace, 198 | Subsystem: scope, 199 | Name: "dir_cache_requests", 200 | Help: "Requests to the directory cache by archive and status (hit/miss)", 201 | }, []string{"archive", "kind", "status"})), 202 | 203 | // requests to bucket 204 | bucketRequests: register(logger, prometheus.NewCounterVec(prometheus.CounterOpts{ 205 | Namespace: namespace, 206 | Subsystem: scope, 207 | Name: "bucket_requests_total", 208 | Help: "Requests to the underlying bucket", 209 | }, []string{"archive", "kind", "status"})), 210 | bucketRequestDuration: register(logger, prometheus.NewHistogramVec(prometheus.HistogramOpts{ 211 | Namespace: namespace, 212 | Subsystem: scope, 213 | Name: "bucket_request_duration_seconds", 214 | Help: "Request duration in seconds for individual requests to the underlying bucket", 215 | Buckets: durationBuckets, 216 | }, []string{"archive", "status"})), 217 | 218 | // misc 219 | reloads: register(logger, prometheus.NewCounterVec(prometheus.CounterOpts{ 220 | Namespace: namespace, 221 | Subsystem: scope, 222 | Name: "bucket_reloads", 223 | Help: "Number of times an archive was reloaded due to the etag changing", 224 | }, []string{"archive"})), 225 | } 226 | } 227 | -------------------------------------------------------------------------------- /pmtiles/server_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "net/http" 7 | "net/http/httptest" 8 | "sort" 9 | "testing" 10 | 11 | "github.com/prometheus/client_golang/prometheus" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | var testResponse = []byte("bar") 16 | var testHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 17 | _, _ = w.Write(testResponse) 18 | }) 19 | 20 | func TestRegex(t *testing.T) { 21 | ok, key, z, x, y, ext := parseTilePath("/foo/0/0/0") 22 | assert.False(t, ok) 23 | ok, key, z, x, y, ext = parseTilePath("/foo/0/0/0.mvt") 24 | assert.True(t, ok) 25 | assert.Equal(t, key, "foo") 26 | assert.Equal(t, z, uint8(0)) 27 | assert.Equal(t, x, uint32(0)) 28 | assert.Equal(t, y, uint32(0)) 29 | assert.Equal(t, ext, "mvt") 30 | ok, key, z, x, y, ext = parseTilePath("/foo/bar/0/0/0.mvt") 31 | assert.True(t, ok) 32 | assert.Equal(t, key, "foo/bar") 33 | assert.Equal(t, z, uint8(0)) 34 | assert.Equal(t, x, uint32(0)) 35 | assert.Equal(t, y, uint32(0)) 36 | assert.Equal(t, ext, "mvt") 37 | // https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html 38 | ok, key, z, x, y, ext = parseTilePath("/!-_.*'()/0/0/0.mvt") 39 | assert.True(t, ok) 40 | assert.Equal(t, key, "!-_.*'()") 41 | assert.Equal(t, z, uint8(0)) 42 | assert.Equal(t, x, uint32(0)) 43 | assert.Equal(t, y, uint32(0)) 44 | assert.Equal(t, ext, "mvt") 45 | ok, key = parseMetadataPath("/!-_.*'()/metadata") 46 | assert.True(t, ok) 47 | assert.Equal(t, key, "!-_.*'()") 48 | ok, key = parseTilejsonPath("/!-_.*'().json") 49 | assert.True(t, ok) 50 | assert.Equal(t, key, "!-_.*'()") 51 | } 52 | 53 | func fakeArchive(t *testing.T, header HeaderV3, metadata map[string]interface{}, tiles map[Zxy][]byte, leaves bool, internalCompression Compression) []byte { 54 | byTileID := make(map[uint64][]byte) 55 | keys := make([]uint64, 0, len(tiles)) 56 | for zxy, bytes := range tiles { 57 | header.MaxZoom = max(header.MaxZoom, zxy.Z) 58 | id := ZxyToID(zxy.Z, zxy.X, zxy.Y) 59 | byTileID[id] = bytes 60 | keys = append(keys, id) 61 | } 62 | sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) 63 | resolver := newResolver(false, false) 64 | tileDataBytes := make([]byte, 0) 65 | for _, id := range keys { 66 | tileBytes := byTileID[id] 67 | resolver.AddTileIsNew(id, tileBytes, 1) 68 | tileDataBytes = append(tileDataBytes, tileBytes...) 69 | } 70 | 71 | metadataBytes, _ := SerializeMetadata(metadata, internalCompression) 72 | var rootBytes []byte 73 | var leavesBytes []byte 74 | if leaves { 75 | rootBytes, leavesBytes, _ = buildRootsLeaves(resolver.Entries, 1, internalCompression) 76 | } else { 77 | rootBytes = SerializeEntries(resolver.Entries, internalCompression) 78 | leavesBytes = make([]byte, 0) 79 | } 80 | 81 | header.InternalCompression = internalCompression 82 | if header.TileType == Mvt { 83 | header.TileCompression = Gzip 84 | } 85 | 86 | header.RootOffset = HeaderV3LenBytes 87 | header.RootLength = uint64(len(rootBytes)) 88 | header.MetadataOffset = header.RootOffset + header.RootLength 89 | header.MetadataLength = uint64(len(metadataBytes)) 90 | header.LeafDirectoryOffset = header.MetadataOffset + header.MetadataLength 91 | header.LeafDirectoryLength = uint64(len(leavesBytes)) 92 | header.TileDataOffset = header.LeafDirectoryOffset + header.LeafDirectoryLength 93 | header.TileDataLength = resolver.Offset 94 | 95 | archiveBytes := SerializeHeader(header) 96 | archiveBytes = append(archiveBytes, rootBytes...) 97 | archiveBytes = append(archiveBytes, metadataBytes...) 98 | archiveBytes = append(archiveBytes, leavesBytes...) 99 | archiveBytes = append(archiveBytes, tileDataBytes...) 100 | return archiveBytes 101 | } 102 | 103 | func newServer(t *testing.T) (mockBucket, *Server) { 104 | prometheus.DefaultRegisterer = prometheus.NewRegistry() 105 | bucket := mockBucket{make(map[string][]byte)} 106 | server, err := NewServerWithBucket(bucket, "", log.Default(), 10, "tiles.example.com") 107 | assert.Nil(t, err) 108 | server.Start() 109 | return bucket, server 110 | } 111 | 112 | func TestPostReturns405(t *testing.T) { 113 | _, server := newServer(t) 114 | res := httptest.NewRecorder() 115 | req, _ := http.NewRequest("POST", "/", nil) 116 | server.ServeHTTP(res, req) 117 | assert.Equal(t, 405, res.Code) 118 | } 119 | 120 | func TestMissingFileReturns404(t *testing.T) { 121 | _, server := newServer(t) 122 | statusCode, _, _ := server.Get(context.Background(), "/") 123 | assert.Equal(t, 204, statusCode) 124 | statusCode, _, _ = server.Get(context.Background(), "/archive.json") 125 | assert.Equal(t, 404, statusCode) 126 | statusCode, _, _ = server.Get(context.Background(), "/archive/metadata") 127 | assert.Equal(t, 404, statusCode) 128 | statusCode, _, _ = server.Get(context.Background(), "/archive/0/0/0.mvt") 129 | assert.Equal(t, 404, statusCode) 130 | } 131 | 132 | func TestMvtEmptyArchiveReads(t *testing.T) { 133 | mockBucket, server := newServer(t) 134 | header := HeaderV3{ 135 | TileType: Mvt, 136 | } 137 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{}, false, Gzip) 138 | 139 | statusCode, _, _ := server.Get(context.Background(), "/") 140 | assert.Equal(t, 204, statusCode) 141 | statusCode, _, data := server.Get(context.Background(), "/archive.json") 142 | assert.JSONEq(t, `{ 143 | "bounds": [0,0,0,0], 144 | "center": [0,0,0], 145 | "maxzoom": 0, 146 | "minzoom": 0, 147 | "scheme": "xyz", 148 | "tilejson": "3.0.0", 149 | "tiles": ["tiles.example.com/archive/{z}/{x}/{y}.mvt"], 150 | "vector_layers": null 151 | }`, string(data)) 152 | assert.Equal(t, 200, statusCode) 153 | statusCode, _, data = server.Get(context.Background(), "/archive/metadata") 154 | assert.JSONEq(t, `{}`, string(data)) 155 | assert.Equal(t, 200, statusCode) 156 | statusCode, _, _ = server.Get(context.Background(), "/archive/0/0/0.mvt") 157 | assert.Equal(t, 204, statusCode) 158 | } 159 | 160 | func TestReadMetadata(t *testing.T) { 161 | mockBucket, server := newServer(t) 162 | header := HeaderV3{ 163 | TileType: Mvt, 164 | } 165 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{ 166 | "vector_layers": []map[string]string{{"id": "layer1"}}, 167 | "attribution": "Attribution", 168 | "description": "Description", 169 | "name": "Name", 170 | "version": "1.0", 171 | }, map[Zxy][]byte{}, false, Gzip) 172 | 173 | statusCode, _, _ := server.Get(context.Background(), "/") 174 | assert.Equal(t, 204, statusCode) 175 | statusCode, _, data := server.Get(context.Background(), "/archive.json") 176 | assert.JSONEq(t, `{ 177 | "attribution": "Attribution", 178 | "description": "Description", 179 | "name": "Name", 180 | "version": "1.0", 181 | "bounds": [0,0,0,0], 182 | "center": [0,0,0], 183 | "maxzoom": 0, 184 | "minzoom": 0, 185 | "scheme": "xyz", 186 | "tilejson": "3.0.0", 187 | "tiles": ["tiles.example.com/archive/{z}/{x}/{y}.mvt"], 188 | "vector_layers": [ 189 | {"id": "layer1"} 190 | ] 191 | }`, string(data)) 192 | assert.Equal(t, 200, statusCode) 193 | statusCode, _, data = server.Get(context.Background(), "/archive/metadata") 194 | assert.JSONEq(t, `{ 195 | "attribution": "Attribution", 196 | "description": "Description", 197 | "name": "Name", 198 | "version": "1.0", 199 | "vector_layers": [ 200 | {"id": "layer1"} 201 | ] 202 | }`, string(data)) 203 | } 204 | 205 | func TestReadMetadataNoCompression(t *testing.T) { 206 | mockBucket, server := newServer(t) 207 | header := HeaderV3{ 208 | TileType: Mvt, 209 | } 210 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{ 211 | "vector_layers": []map[string]string{{"id": "layer1"}}, 212 | }, map[Zxy][]byte{}, false, NoCompression) 213 | 214 | statusCode, _, data := server.Get(context.Background(), "/archive/metadata") 215 | assert.Equal(t, 200, statusCode) 216 | assert.JSONEq(t, `{ 217 | "vector_layers": [ 218 | {"id": "layer1"} 219 | ] 220 | }`, string(data)) 221 | } 222 | 223 | func TestReadTiles(t *testing.T) { 224 | mockBucket, server := newServer(t) 225 | header := HeaderV3{ 226 | TileType: Mvt, 227 | } 228 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 229 | {0, 0, 0}: {0, 1, 2, 3}, 230 | {4, 1, 2}: {1, 2, 3}, 231 | }, false, Gzip) 232 | 233 | statusCode, _, _ := server.Get(context.Background(), "/") 234 | assert.Equal(t, 204, statusCode) 235 | statusCode, _, _ = server.Get(context.Background(), "/archive.json") 236 | assert.Equal(t, 200, statusCode) 237 | statusCode, _, _ = server.Get(context.Background(), "/archive/metadata") 238 | assert.Equal(t, 200, statusCode) 239 | statusCode, _, data := server.Get(context.Background(), "/archive/0/0/0.mvt") 240 | assert.Equal(t, 200, statusCode) 241 | assert.Equal(t, []byte{0, 1, 2, 3}, data) 242 | statusCode, _, data = server.Get(context.Background(), "/archive/4/1/2.mvt") 243 | assert.Equal(t, 200, statusCode) 244 | assert.Equal(t, []byte{1, 2, 3}, data) 245 | statusCode, _, _ = server.Get(context.Background(), "/archive/3/1/2.mvt") 246 | assert.Equal(t, 204, statusCode) 247 | } 248 | 249 | func TestReadTilesFromLeaves(t *testing.T) { 250 | mockBucket, server := newServer(t) 251 | header := HeaderV3{ 252 | TileType: Mvt, 253 | } 254 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 255 | {0, 0, 0}: {0, 1, 2, 3}, 256 | {4, 1, 2}: {1, 2, 3}, 257 | }, true, Gzip) 258 | 259 | statusCode, _, data := server.Get(context.Background(), "/archive/0/0/0.mvt") 260 | assert.Equal(t, 200, statusCode) 261 | assert.Equal(t, []byte{0, 1, 2, 3}, data) 262 | statusCode, _, data = server.Get(context.Background(), "/archive/4/1/2.mvt") 263 | assert.Equal(t, 200, statusCode) 264 | assert.Equal(t, []byte{1, 2, 3}, data) 265 | statusCode, _, _ = server.Get(context.Background(), "/archive/3/1/2.mvt") 266 | assert.Equal(t, 204, statusCode) 267 | } 268 | 269 | func TestReadTilesFromLeavesNoCompression(t *testing.T) { 270 | mockBucket, server := newServer(t) 271 | header := HeaderV3{ 272 | TileType: Mvt, 273 | } 274 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 275 | {0, 0, 0}: {0, 1, 2, 3}, 276 | {4, 1, 2}: {1, 2, 3}, 277 | }, true, NoCompression) 278 | 279 | statusCode, _, data := server.Get(context.Background(), "/archive/4/1/2.mvt") 280 | assert.Equal(t, 200, statusCode) 281 | assert.Equal(t, []byte{1, 2, 3}, data) 282 | } 283 | 284 | func TestInvalidateCacheOnTileRequest(t *testing.T) { 285 | mockBucket, server := newServer(t) 286 | header := HeaderV3{ 287 | TileType: Mvt, 288 | } 289 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 290 | {0, 0, 0}: {0, 1, 2, 3}, 291 | }, false, Gzip) 292 | 293 | statusCode, _, data := server.Get(context.Background(), "/archive/0/0/0.mvt") 294 | assert.Equal(t, 200, statusCode) 295 | assert.Equal(t, []byte{0, 1, 2, 3}, data) 296 | 297 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 298 | {0, 0, 0}: {4, 5, 6, 7}, 299 | }, false, Gzip) 300 | 301 | statusCode, _, data = server.Get(context.Background(), "/archive/0/0/0.mvt") 302 | assert.Equal(t, 200, statusCode) 303 | assert.Equal(t, []byte{4, 5, 6, 7}, data) 304 | } 305 | 306 | func TestInvalidateCacheOnDirRequest(t *testing.T) { 307 | mockBucket, server := newServer(t) 308 | header := HeaderV3{ 309 | TileType: Mvt, 310 | } 311 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 312 | {0, 0, 0}: {0, 1}, 313 | {1, 1, 1}: {2, 3}, 314 | }, true, Gzip) 315 | 316 | // cache first leaf dir 317 | statusCode, _, data := server.Get(context.Background(), "/archive/0/0/0.mvt") 318 | assert.Equal(t, 200, statusCode) 319 | assert.Equal(t, []byte{0, 1}, data) 320 | 321 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 322 | {0, 0, 0}: {4, 5}, 323 | {1, 1, 1}: {6, 7}, 324 | }, false, Gzip) 325 | 326 | // get etag mismatch on second leaf dir request 327 | statusCode, _, data = server.Get(context.Background(), "/archive/1/1/1.mvt") 328 | assert.Equal(t, 200, statusCode) 329 | assert.Equal(t, []byte{6, 7}, data) 330 | statusCode, _, data = server.Get(context.Background(), "/archive/0/0/0.mvt") 331 | assert.Equal(t, 200, statusCode) 332 | assert.Equal(t, []byte{4, 5}, data) 333 | } 334 | 335 | func TestInvalidateCacheOnTileJSONRequest(t *testing.T) { 336 | mockBucket, server := newServer(t) 337 | header := HeaderV3{ 338 | TileType: Mvt, 339 | } 340 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 341 | {0, 0, 0}: {0, 1}, 342 | {1, 1, 1}: {2, 3}, 343 | }, false, Gzip) 344 | statusCode, _, data := server.Get(context.Background(), "/archive.json") 345 | assert.Equal(t, 200, statusCode) 346 | assert.JSONEq(t, `{ 347 | "bounds": [0,0,0,0], 348 | "center": [0,0,0], 349 | "maxzoom": 1, 350 | "minzoom": 0, 351 | "scheme": "xyz", 352 | "tilejson": "3.0.0", 353 | "tiles": ["tiles.example.com/archive/{z}/{x}/{y}.mvt"], 354 | "vector_layers": null 355 | }`, string(data)) 356 | 357 | header = HeaderV3{ 358 | TileType: Mvt, 359 | CenterZoom: 4, 360 | } 361 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 362 | {0, 0, 0}: {0, 1}, 363 | {1, 1, 1}: {2, 3}, 364 | }, false, Gzip) 365 | statusCode, _, data = server.Get(context.Background(), "/archive.json") 366 | assert.Equal(t, 200, statusCode) 367 | assert.JSONEq(t, `{ 368 | "bounds": [0,0,0,0], 369 | "center": [0,0,4], 370 | "maxzoom": 1, 371 | "minzoom": 0, 372 | "scheme": "xyz", 373 | "tilejson": "3.0.0", 374 | "tiles": ["tiles.example.com/archive/{z}/{x}/{y}.mvt"], 375 | "vector_layers": null 376 | }`, string(data)) 377 | } 378 | 379 | func TestInvalidateCacheOnMetadataRequest(t *testing.T) { 380 | mockBucket, server := newServer(t) 381 | header := HeaderV3{ 382 | TileType: Mvt, 383 | } 384 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{ 385 | "meta": "data", 386 | }, map[Zxy][]byte{ 387 | {0, 0, 0}: {0, 1}, 388 | {1, 1, 1}: {2, 3}, 389 | }, false, Gzip) 390 | statusCode, _, data := server.Get(context.Background(), "/archive/metadata") 391 | assert.Equal(t, 200, statusCode) 392 | assert.JSONEq(t, `{ 393 | "meta": "data" 394 | }`, string(data)) 395 | 396 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{ 397 | "meta": "data2", 398 | }, map[Zxy][]byte{ 399 | {0, 0, 0}: {0, 1}, 400 | {1, 1, 1}: {2, 3}, 401 | }, false, Gzip) 402 | statusCode, _, data = server.Get(context.Background(), "/archive/metadata") 403 | assert.Equal(t, 200, statusCode) 404 | assert.JSONEq(t, `{ 405 | "meta": "data2" 406 | }`, string(data)) 407 | } 408 | 409 | func TestEtagResponsesFromTile(t *testing.T) { 410 | mockBucket, server := newServer(t) 411 | header := HeaderV3{ 412 | TileType: Mvt, 413 | } 414 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 415 | {0, 0, 0}: {0, 1, 2, 3}, 416 | {4, 1, 2}: {1, 2, 3}, 417 | }, false, Gzip) 418 | 419 | statusCode, headers000v1, _ := server.Get(context.Background(), "/archive/0/0/0.mvt") 420 | assert.Equal(t, 200, statusCode) 421 | statusCode, headers412v1, _ := server.Get(context.Background(), "/archive/4/1/2.mvt") 422 | assert.Equal(t, 200, statusCode) 423 | statusCode, headers311v1, _ := server.Get(context.Background(), "/archive/3/1/1.mvt") 424 | assert.Equal(t, 204, statusCode) 425 | 426 | mockBucket.items["archive.pmtiles"] = fakeArchive(t, header, map[string]interface{}{}, map[Zxy][]byte{ 427 | {0, 0, 0}: {0, 1, 2, 3}, 428 | {4, 1, 2}: {1, 2, 3, 4}, // different 429 | }, false, Gzip) 430 | 431 | statusCode, headers000v2, _ := server.Get(context.Background(), "/archive/0/0/0.mvt") 432 | assert.Equal(t, 200, statusCode) 433 | statusCode, headers412v2, _ := server.Get(context.Background(), "/archive/4/1/2.mvt") 434 | assert.Equal(t, 200, statusCode) 435 | statusCode, headers311v2, _ := server.Get(context.Background(), "/archive/3/1/1.mvt") 436 | assert.Equal(t, 204, statusCode) 437 | 438 | // 204's have no etag 439 | assert.Equal(t, "", headers311v1["ETag"]) 440 | assert.Equal(t, "", headers311v2["ETag"]) 441 | 442 | // 000 and 311 didn't change 443 | assert.Equal(t, headers000v1["ETag"], headers000v2["ETag"]) 444 | 445 | // 412 did change 446 | assert.NotEqual(t, headers412v1["ETag"], headers412v2["ETag"]) 447 | 448 | // all are different 449 | assert.NotEqual(t, headers000v1["ETag"], headers311v1["ETag"]) 450 | assert.NotEqual(t, headers000v1["ETag"], headers412v1["ETag"]) 451 | } 452 | 453 | func TestSingleCorsOrigin(t *testing.T) { 454 | res := httptest.NewRecorder() 455 | req, _ := http.NewRequest("GET", "http://example.com/foo", nil) 456 | req.Header.Add("Origin", "http://example.com") 457 | c := NewCors("http://example.com") 458 | c.Handler(testHandler).ServeHTTP(res, req) 459 | assert.Equal(t, 200, res.Code) 460 | assert.Equal(t, "http://example.com", res.Header().Get("Access-Control-Allow-Origin")) 461 | } 462 | 463 | func TestMultiCorsOrigin(t *testing.T) { 464 | res := httptest.NewRecorder() 465 | req, _ := http.NewRequest("GET", "http://example2.com/foo", nil) 466 | req.Header.Add("Origin", "http://example2.com") 467 | c := NewCors("http://example.com,http://example2.com") 468 | c.Handler(testHandler).ServeHTTP(res, req) 469 | assert.Equal(t, 200, res.Code) 470 | assert.Equal(t, "http://example2.com", res.Header().Get("Access-Control-Allow-Origin")) 471 | } 472 | 473 | func TestWildcardCors(t *testing.T) { 474 | res := httptest.NewRecorder() 475 | req, _ := http.NewRequest("GET", "http://example.com/foo", nil) 476 | req.Header.Add("Origin", "http://example.com") 477 | c := NewCors("*") 478 | c.Handler(testHandler).ServeHTTP(res, req) 479 | assert.Equal(t, 200, res.Code) 480 | assert.Equal(t, "*", res.Header().Get("Access-Control-Allow-Origin")) 481 | } 482 | 483 | func TestCorsOptions(t *testing.T) { 484 | res := httptest.NewRecorder() 485 | req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) 486 | req.Header.Add("Origin", "http://example.com") 487 | req.Header.Add("Access-Control-Request-Method", "GET") 488 | c := NewCors("*") 489 | c.Handler(testHandler).ServeHTTP(res, req) 490 | assert.Equal(t, 204, res.Code) 491 | assert.Equal(t, "*", res.Header().Get("Access-Control-Allow-Origin")) 492 | } 493 | -------------------------------------------------------------------------------- /pmtiles/show.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "net/url" 9 | "strings" 10 | 11 | // "github.com/dustin/go-humanize" 12 | "io" 13 | "log" 14 | "os" 15 | ) 16 | 17 | // Show prints detailed information about an archive. 18 | func Show(_ *log.Logger, output io.Writer, bucketURL string, key string, showHeaderJsonOnly bool, showMetadataOnly bool, showTilejson bool, publicURL string, showTile bool, z int, x int, y int) error { 19 | ctx := context.Background() 20 | 21 | bucketURL, key, err := NormalizeBucketKey(bucketURL, "", key) 22 | 23 | if err != nil { 24 | return err 25 | } 26 | 27 | bucket, err := OpenBucket(ctx, bucketURL, "") 28 | 29 | if err != nil { 30 | return fmt.Errorf("Failed to open bucket for %s, %w", bucketURL, err) 31 | } 32 | defer bucket.Close() 33 | 34 | r, err := bucket.NewRangeReader(ctx, key, 0, 16384) 35 | 36 | if err != nil { 37 | return fmt.Errorf("Failed to create range reader for %s, %w", key, err) 38 | } 39 | b, err := io.ReadAll(r) 40 | if err != nil { 41 | return fmt.Errorf("Failed to read %s, %w", key, err) 42 | } 43 | r.Close() 44 | 45 | header, err := DeserializeHeader(b[0:HeaderV3LenBytes]) 46 | if err != nil { 47 | // check to see if it's a V2 file 48 | if string(b[0:2]) == "PM" { 49 | specVersion := b[2] 50 | return fmt.Errorf("PMTiles version %d detected; please use 'pmtiles convert' to upgrade to version 3", specVersion) 51 | } 52 | 53 | return fmt.Errorf("Failed to read %s, %w", key, err) 54 | } 55 | 56 | if !showTile { 57 | metadataReader, err := bucket.NewRangeReader(ctx, key, int64(header.MetadataOffset), int64(header.MetadataLength)) 58 | if err != nil { 59 | return fmt.Errorf("Failed to create range reader for %s, %w", key, err) 60 | } 61 | 62 | metadataBytes, err := DeserializeMetadataBytes(metadataReader, header.InternalCompression) 63 | if err != nil { 64 | return fmt.Errorf("Failed to read %s, %w", key, err) 65 | } 66 | 67 | if showMetadataOnly && showTilejson { 68 | return fmt.Errorf("cannot use more than one of --header-json, --metadata, and --tilejson together") 69 | } 70 | 71 | if showHeaderJsonOnly { 72 | fmt.Fprintln(output, headerToStringifiedJson(header)) 73 | } else if showMetadataOnly { 74 | fmt.Fprintln(output, string(metadataBytes)) 75 | } else if showTilejson { 76 | if publicURL == "" { 77 | // Using Fprintf instead of logger here, as this message should be written to Stderr in case 78 | // Stdout is being redirected. 79 | fmt.Fprintln(os.Stderr, "no --public-url specified; using placeholder tiles URL") 80 | } 81 | tilejsonBytes, err := CreateTileJSON(header, metadataBytes, publicURL) 82 | if err != nil { 83 | return fmt.Errorf("Failed to create tilejson for %s, %w", key, err) 84 | } 85 | fmt.Fprintln(output, string(tilejsonBytes)) 86 | } else { 87 | fmt.Printf("pmtiles spec version: %d\n", header.SpecVersion) 88 | // fmt.Printf("total size: %s\n", humanize.Bytes(uint64(r.Size()))) 89 | fmt.Printf("tile type: %s\n", tileTypeToString(header.TileType)) 90 | fmt.Printf("bounds: (long: %f, lat: %f) (long: %f, lat: %f)\n", float64(header.MinLonE7)/10000000, float64(header.MinLatE7)/10000000, float64(header.MaxLonE7)/10000000, float64(header.MaxLatE7)/10000000) 91 | fmt.Printf("min zoom: %d\n", header.MinZoom) 92 | fmt.Printf("max zoom: %d\n", header.MaxZoom) 93 | fmt.Printf("center: (long: %f, lat: %f)\n", float64(header.CenterLonE7)/10000000, float64(header.CenterLatE7)/10000000) 94 | fmt.Printf("center zoom: %d\n", header.CenterZoom) 95 | fmt.Printf("addressed tiles count: %d\n", header.AddressedTilesCount) 96 | fmt.Printf("tile entries count: %d\n", header.TileEntriesCount) 97 | fmt.Printf("tile contents count: %d\n", header.TileContentsCount) 98 | fmt.Printf("clustered: %t\n", header.Clustered) 99 | internalCompression, _ := compressionToString(header.InternalCompression) 100 | fmt.Printf("internal compression: %s\n", internalCompression) 101 | tileCompression, _ := compressionToString(header.TileCompression) 102 | fmt.Printf("tile compression: %s\n", tileCompression) 103 | 104 | var metadataMap map[string]interface{} 105 | json.Unmarshal(metadataBytes, &metadataMap) 106 | for k, v := range metadataMap { 107 | switch v := v.(type) { 108 | case string: 109 | fmt.Println(k, v) 110 | default: 111 | fmt.Println(k, "") 112 | } 113 | } 114 | 115 | if strings.HasPrefix(bucketURL, "https://") { 116 | fmt.Println("web viewer: https://pmtiles.io/#url=" + url.QueryEscape(bucketURL+"/"+key)) 117 | } 118 | } 119 | } else { 120 | // write the tile to stdout 121 | 122 | tileID := ZxyToID(uint8(z), uint32(x), uint32(y)) 123 | 124 | dirOffset := header.RootOffset 125 | dirLength := header.RootLength 126 | 127 | for depth := 0; depth <= 3; depth++ { 128 | r, err := bucket.NewRangeReader(ctx, key, int64(dirOffset), int64(dirLength)) 129 | if err != nil { 130 | return fmt.Errorf("Network error") 131 | } 132 | defer r.Close() 133 | b, err := io.ReadAll(r) 134 | if err != nil { 135 | return fmt.Errorf("I/O Error") 136 | } 137 | directory := DeserializeEntries(bytes.NewBuffer(b), header.InternalCompression) 138 | entry, ok := FindTile(directory, tileID) 139 | if ok { 140 | if entry.RunLength > 0 { 141 | tileReader, err := bucket.NewRangeReader(ctx, key, int64(header.TileDataOffset+entry.Offset), int64(entry.Length)) 142 | if err != nil { 143 | return fmt.Errorf("Network error") 144 | } 145 | defer tileReader.Close() 146 | tileBytes, err := io.ReadAll(tileReader) 147 | if err != nil { 148 | return fmt.Errorf("I/O Error") 149 | } 150 | output.Write(tileBytes) 151 | break 152 | } 153 | dirOffset = header.LeafDirectoryOffset + entry.Offset 154 | dirLength = uint64(entry.Length) 155 | } else { 156 | fmt.Println("Tile not found in archive.") 157 | return nil 158 | } 159 | } 160 | } 161 | return nil 162 | } 163 | -------------------------------------------------------------------------------- /pmtiles/show_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "github.com/stretchr/testify/assert" 7 | "log" 8 | "os" 9 | "testing" 10 | ) 11 | 12 | func TestShowHeader(t *testing.T) { 13 | var b bytes.Buffer 14 | logger := log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile) 15 | err := Show(logger, &b, "", "fixtures/test_fixture_1.pmtiles", true, false, false, "", false, 0, 0, 0) 16 | assert.Nil(t, err) 17 | 18 | var input map[string]interface{} 19 | json.Unmarshal(b.Bytes(), &input) 20 | assert.Equal(t, "mvt", input["tile_type"]) 21 | assert.Equal(t, "gzip", input["tile_compression"]) 22 | } 23 | 24 | func TestShowMetadata(t *testing.T) { 25 | var b bytes.Buffer 26 | logger := log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile) 27 | err := Show(logger, &b, "", "fixtures/test_fixture_1.pmtiles", false, true, false, "", false, 0, 0, 0) 28 | assert.Nil(t, err) 29 | 30 | var input map[string]interface{} 31 | json.Unmarshal(b.Bytes(), &input) 32 | assert.Equal(t, "tippecanoe v2.5.0", input["generator"]) 33 | } 34 | -------------------------------------------------------------------------------- /pmtiles/sync.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "github.com/cespare/xxhash/v2" 9 | "github.com/dustin/go-humanize" 10 | "github.com/schollz/progressbar/v3" 11 | "golang.org/x/sync/errgroup" 12 | "io" 13 | "log" 14 | "mime" 15 | "mime/multipart" 16 | "net/http" 17 | "os" 18 | "runtime" 19 | "sort" 20 | "strconv" 21 | "strings" 22 | "sync" 23 | "time" 24 | ) 25 | 26 | type multiRange struct { 27 | str string 28 | ranges []srcDstRange 29 | } 30 | 31 | func makeMultiRanges(ranges []srcDstRange, baseOffset int64, maxHeaderBytes int) []multiRange { 32 | var result []multiRange 33 | var b strings.Builder 34 | var currentRanges []srcDstRange 35 | 36 | for _, r := range ranges { 37 | rangeStr := fmt.Sprintf("%d-%d", 38 | baseOffset+int64(r.SrcOffset), 39 | baseOffset+int64(r.SrcOffset)+int64(r.Length)-1, 40 | ) 41 | 42 | if b.Len()+len(rangeStr)+1 > maxHeaderBytes { 43 | if b.Len() > 0 { 44 | result = append(result, multiRange{ 45 | str: b.String(), 46 | ranges: currentRanges, 47 | }) 48 | b.Reset() 49 | currentRanges = nil 50 | } 51 | } 52 | 53 | if b.Len() > 0 { 54 | b.WriteString(",") 55 | } 56 | b.WriteString(rangeStr) 57 | currentRanges = append(currentRanges, r) 58 | } 59 | 60 | if b.Len() > 0 { 61 | result = append(result, multiRange{ 62 | str: b.String(), 63 | ranges: currentRanges, 64 | }) 65 | } 66 | 67 | return result 68 | } 69 | 70 | func Sync(logger *log.Logger, oldVersion string, newVersion string, dryRun bool) error { 71 | fmt.Println("WARNING: This is an experimental feature. Do not rely on this in production!") 72 | start := time.Now() 73 | 74 | client := &http.Client{} 75 | 76 | var bufferedReader *bufio.Reader 77 | req, err := http.NewRequest("GET", newVersion+".sync", nil) 78 | if err != nil { 79 | return err 80 | } 81 | resp, err := client.Do(req) 82 | if resp.StatusCode != http.StatusOK { 83 | return fmt.Errorf(".sync file not found") 84 | } 85 | if err != nil { 86 | return err 87 | } 88 | bar := progressbar.DefaultBytes( 89 | resp.ContentLength, 90 | "downloading syncfile", 91 | ) 92 | bufferedReader = bufio.NewReader(io.TeeReader(resp.Body, bar)) 93 | 94 | var syncHeader syncHeader 95 | jsonBytes, _ := bufferedReader.ReadSlice('\n') 96 | 97 | json.Unmarshal(jsonBytes, &syncHeader) 98 | 99 | blocks := deserializeSyncBlocks(syncHeader.NumBlocks, bufferedReader) 100 | bar.Close() 101 | 102 | ctx := context.Background() 103 | 104 | oldFile, err := os.OpenFile(oldVersion, os.O_RDONLY, 0666) 105 | defer oldFile.Close() 106 | 107 | if err != nil { 108 | return err 109 | } 110 | 111 | buf := make([]byte, HeaderV3LenBytes) 112 | _, err = oldFile.Read(buf) 113 | if err != nil { 114 | return err 115 | } 116 | oldHeader, err := DeserializeHeader(buf) 117 | if err != nil { 118 | return err 119 | } 120 | 121 | if !oldHeader.Clustered { 122 | return fmt.Errorf("archive must be clustered for sync") 123 | } 124 | 125 | bar = progressbar.Default( 126 | int64(len(blocks)), 127 | "calculating diff", 128 | ) 129 | 130 | wanted := make([]syncBlock, 0) 131 | have := make([]srcDstRange, 0) 132 | 133 | idx := 0 134 | 135 | tasks := make(chan syncTask, 1000) 136 | var wg sync.WaitGroup 137 | var mu sync.Mutex 138 | 139 | errs, _ := errgroup.WithContext(ctx) 140 | 141 | for i := 0; i < runtime.GOMAXPROCS(0); i++ { 142 | errs.Go(func() error { 143 | wg.Add(1) 144 | for task := range tasks { 145 | hasher := xxhash.New() 146 | r := io.NewSectionReader(oldFile, int64(oldHeader.TileDataOffset+task.OldOffset), int64(task.NewBlock.Length)) 147 | 148 | if _, err := io.Copy(hasher, r); err != nil { 149 | log.Fatal(err) 150 | } 151 | 152 | mu.Lock() 153 | if task.NewBlock.Hash == hasher.Sum64() { 154 | have = append(have, srcDstRange{SrcOffset: task.OldOffset, DstOffset: task.NewBlock.Offset, Length: task.NewBlock.Length}) 155 | } else { 156 | wanted = append(wanted, task.NewBlock) 157 | } 158 | mu.Unlock() 159 | } 160 | wg.Done() 161 | return nil 162 | }) 163 | } 164 | 165 | err = IterateEntries(oldHeader, 166 | func(offset uint64, length uint64) ([]byte, error) { 167 | return io.ReadAll(io.NewSectionReader(oldFile, int64(offset), int64(length))) 168 | }, 169 | func(e EntryV3) { 170 | if idx < len(blocks) { 171 | for e.TileID > blocks[idx].Start { 172 | mu.Lock() 173 | wanted = append(wanted, blocks[idx]) 174 | mu.Unlock() 175 | bar.Add(1) 176 | idx = idx + 1 177 | } 178 | 179 | if e.TileID == blocks[idx].Start { 180 | tasks <- syncTask{NewBlock: blocks[idx], OldOffset: e.Offset} 181 | bar.Add(1) 182 | idx = idx + 1 183 | } 184 | } 185 | }) 186 | 187 | if err != nil { 188 | return err 189 | } 190 | 191 | // we may not've consumed until the end 192 | for idx < len(blocks) { 193 | mu.Lock() 194 | wanted = append(wanted, blocks[idx]) 195 | mu.Unlock() 196 | bar.Add(1) 197 | idx = idx + 1 198 | } 199 | 200 | close(tasks) 201 | wg.Wait() 202 | 203 | sort.Slice(wanted, func(i, j int) bool { return wanted[i].Start < wanted[j].Start }) 204 | sort.Slice(have, func(i, j int) bool { return have[i].SrcOffset < have[j].SrcOffset }) 205 | 206 | toTransfer := uint64(0) 207 | totalRemoteBytes := uint64(0) 208 | for _, v := range wanted { 209 | toTransfer += v.Length 210 | totalRemoteBytes += v.Length 211 | } 212 | 213 | for _, v := range have { 214 | totalRemoteBytes += v.Length 215 | } 216 | 217 | blocksMatched := float64(len(have)) / float64(len(blocks)) * 100 218 | pct := float64(toTransfer) / float64(totalRemoteBytes) * 100 219 | 220 | fmt.Printf("%d/%d blocks matched (%.1f%%), need to transfer %s/%s (%.1f%%).\n", len(have), len(blocks), blocksMatched, humanize.Bytes(toTransfer), humanize.Bytes(totalRemoteBytes), pct) 221 | 222 | ranges := make([]srcDstRange, 0) 223 | for _, v := range wanted { 224 | l := len(ranges) 225 | // combine contiguous ranges 226 | if l > 0 && (ranges[l-1].SrcOffset+ranges[l-1].Length) == v.Offset { 227 | ranges[l-1].Length = ranges[l-1].Length + v.Length 228 | } else { 229 | ranges = append(ranges, srcDstRange{SrcOffset: v.Offset, DstOffset: v.Offset, Length: v.Length}) 230 | } 231 | } 232 | 233 | haveRanges := make([]srcDstRange, 0) 234 | for _, v := range have { 235 | l := len(haveRanges) 236 | // combine contiguous ranges 237 | if l > 0 && (haveRanges[l-1].SrcOffset+haveRanges[l-1].Length) == v.SrcOffset { 238 | haveRanges[l-1].Length = haveRanges[l-1].Length + v.Length 239 | } else { 240 | haveRanges = append(haveRanges, v) 241 | } 242 | } 243 | 244 | fmt.Printf("need %d chunks\n", len(ranges)) 245 | 246 | if !dryRun { 247 | req, err := http.NewRequest("HEAD", newVersion, nil) 248 | resp, err := client.Do(req) 249 | targetLength, _ := strconv.Atoi(resp.Header.Get("Content-Length")) 250 | 251 | tmpFilename := oldVersion + ".tmp" 252 | outfile, err := os.Create(tmpFilename) 253 | outfile.Truncate(int64(targetLength)) 254 | 255 | // write the first 16 kb to the new file 256 | req, err = http.NewRequest("GET", newVersion, nil) 257 | req.Header.Set("Range", "bytes=0-16383") 258 | resp, err = client.Do(req) 259 | bufferedReader = bufio.NewReader(io.TeeReader(resp.Body, outfile)) 260 | if err != nil { 261 | return err 262 | } 263 | bytesData, err := io.ReadAll(bufferedReader) 264 | if err != nil { 265 | return err 266 | } 267 | newHeader, err := DeserializeHeader(bytesData[0:HeaderV3LenBytes]) 268 | if err != nil { 269 | return err 270 | } 271 | 272 | // write the metadata section to the new file 273 | metadataWriter := io.NewOffsetWriter(outfile, int64(newHeader.MetadataOffset)) 274 | req, err = http.NewRequest("GET", newVersion, nil) 275 | req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", newHeader.MetadataOffset, newHeader.MetadataOffset+newHeader.MetadataLength-1)) 276 | resp, err = client.Do(req) 277 | io.Copy(metadataWriter, resp.Body) 278 | 279 | // write the leaf directories, if any, to the new file (show progress) 280 | leafWriter := io.NewOffsetWriter(outfile, int64(newHeader.LeafDirectoryOffset)) 281 | req, err = http.NewRequest("GET", newVersion, nil) 282 | req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", newHeader.LeafDirectoryOffset, newHeader.LeafDirectoryOffset+newHeader.LeafDirectoryLength-1)) 283 | resp, err = client.Do(req) 284 | 285 | leafBar := progressbar.DefaultBytes( 286 | int64(newHeader.LeafDirectoryLength), 287 | "downloading leaf directories", 288 | ) 289 | io.Copy(leafWriter, io.TeeReader(resp.Body, leafBar)) 290 | leafBar.Close() 291 | 292 | fmt.Println(len(have), "local chunks") 293 | bar := progressbar.DefaultBytes( 294 | int64(totalRemoteBytes-toTransfer), 295 | "copying local chunks", 296 | ) 297 | 298 | // write the tile data (from local) 299 | for _, h := range haveRanges { 300 | chunkWriter := io.NewOffsetWriter(outfile, int64(newHeader.TileDataOffset+h.DstOffset)) 301 | r := io.NewSectionReader(oldFile, int64(oldHeader.TileDataOffset+h.SrcOffset), int64(h.Length)) 302 | io.Copy(io.MultiWriter(chunkWriter, bar), r) 303 | } 304 | 305 | oldFile.Close() 306 | 307 | // write the tile data (from remote) 308 | multiRanges := makeMultiRanges(ranges, int64(newHeader.TileDataOffset), 1048576-200) 309 | fmt.Println("Batched into http requests", len(multiRanges)) 310 | 311 | bar = progressbar.DefaultBytes( 312 | int64(toTransfer), 313 | "fetching remote chunks", 314 | ) 315 | 316 | downloadPart := func(task multiRange) error { 317 | req, err := http.NewRequest("GET", newVersion, nil) 318 | req.Header.Set("Range", fmt.Sprintf("bytes=%s", task.str)) 319 | resp, err := client.Do(req) 320 | if resp.StatusCode != http.StatusPartialContent { 321 | return fmt.Errorf("non-OK multirange request") 322 | } 323 | 324 | _, params, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) 325 | if err != nil { 326 | return err 327 | } 328 | 329 | mr := multipart.NewReader(resp.Body, params["boundary"]) 330 | 331 | for _, r := range task.ranges { 332 | part, _ := mr.NextPart() 333 | _ = part.Header.Get("Content-Range") 334 | chunkWriter := io.NewOffsetWriter(outfile, int64(newHeader.TileDataOffset+r.DstOffset)) 335 | io.Copy(io.MultiWriter(chunkWriter, bar), part) 336 | } 337 | return nil 338 | } 339 | 340 | var mu sync.Mutex 341 | downloadThreads := 4 342 | 343 | errs, _ := errgroup.WithContext(ctx) 344 | 345 | for i := 0; i < downloadThreads; i++ { 346 | errs.Go(func() error { 347 | done := false 348 | var head multiRange 349 | for { 350 | mu.Lock() 351 | if len(multiRanges) == 0 { 352 | done = true 353 | } else { 354 | head, multiRanges = multiRanges[0], multiRanges[1:] 355 | } 356 | mu.Unlock() 357 | if done { 358 | return nil 359 | } 360 | err := downloadPart(head) 361 | if err != nil { 362 | return err 363 | } 364 | } 365 | }) 366 | } 367 | 368 | err = errs.Wait() 369 | if err != nil { 370 | return err 371 | } 372 | 373 | // atomically rename the old file to the new file. 374 | outfile.Close() 375 | err = os.Rename(tmpFilename, oldVersion) 376 | if err != nil { 377 | return err 378 | } 379 | } 380 | 381 | fmt.Printf("Completed sync in %v.\n", time.Since(start)) 382 | return nil 383 | } 384 | -------------------------------------------------------------------------------- /pmtiles/sync_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestMakeMultiRanges(t *testing.T) { 9 | ranges := make([]srcDstRange, 0) 10 | ranges = append(ranges, srcDstRange{0, 0, 50}) 11 | ranges = append(ranges, srcDstRange{60, 60, 60}) 12 | ranges = append(ranges, srcDstRange{200, 200, 50}) 13 | result := makeMultiRanges(ranges, 10, 15) 14 | assert.Equal(t, 2, len(result)) 15 | assert.Equal(t, result[0].str, "10-59,70-129") 16 | assert.Equal(t, result[1].str, "210-259") 17 | } 18 | -------------------------------------------------------------------------------- /pmtiles/tile_id.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "math/bits" 5 | ) 6 | 7 | func rotate(n uint32, x uint32, y uint32, rx uint32, ry uint32) (uint32, uint32) { 8 | if ry == 0 { 9 | if rx != 0 { 10 | x = n - 1 - x 11 | y = n - 1 - y 12 | } 13 | return y, x 14 | } 15 | return x, y 16 | } 17 | 18 | // ZxyToID converts (Z,X,Y) tile coordinates to a Hilbert TileID. 19 | func ZxyToID(z uint8, x uint32, y uint32) uint64 { 20 | var acc uint64 = (1<<(z*2) - 1) / 3 21 | n := uint32(z - 1) 22 | for s := uint32(1 << n); s > 0; s >>= 1 { 23 | var rx = s & x 24 | var ry = s & y 25 | acc += uint64((3*rx)^ry) << n 26 | x, y = rotate(s, x, y, rx, ry) 27 | n-- 28 | } 29 | return acc 30 | } 31 | 32 | // IDToZxy converts a Hilbert TileID to (Z,X,Y) tile coordinates. 33 | func IDToZxy(i uint64) (uint8, uint32, uint32) { 34 | var z = uint8(bits.Len64(3*i+1)-1) / 2 35 | var acc = (uint64(1)<<(z*2) - 1) / 3 36 | var t = i - acc 37 | var tx, ty uint32 38 | for a := uint8(0); a < z; a++ { 39 | var s = uint32(1) << a 40 | var rx = 1 & (uint32(t) >> 1) 41 | var ry = 1 & (uint32(t) ^ rx) 42 | tx, ty = rotate(s, tx, ty, rx, ry) 43 | tx += rx << a 44 | ty += ry << a 45 | t >>= 2 46 | } 47 | return uint8(z), tx, ty 48 | } 49 | 50 | // ParentID efficiently finds a parent Hilbert TileID without converting to (Z,X,Y). 51 | func ParentID(i uint64) uint64 { 52 | var z = uint8(64-bits.LeadingZeros64(3*i+1)-1) / 2 53 | var acc uint64 = (1<<(z*2) - 1) / 3 54 | var parentAcc uint64 = (1<<((z-1)*2) - 1) / 3 55 | return parentAcc + (i-acc)/4 56 | } 57 | -------------------------------------------------------------------------------- /pmtiles/tile_id_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestZxyToId(t *testing.T) { 10 | assert.Equal(t, uint64(0), ZxyToID(0, 0, 0)) 11 | assert.Equal(t, uint64(1), ZxyToID(1, 0, 0)) 12 | assert.Equal(t, uint64(2), ZxyToID(1, 0, 1)) 13 | assert.Equal(t, uint64(3), ZxyToID(1, 1, 1)) 14 | assert.Equal(t, uint64(4), ZxyToID(1, 1, 0)) 15 | assert.Equal(t, uint64(5), ZxyToID(2, 0, 0)) 16 | } 17 | 18 | func TestIdToZxy(t *testing.T) { 19 | z, x, y := IDToZxy(0) 20 | assert.Equal(t, uint8(0), z) 21 | assert.Equal(t, uint32(0), x) 22 | assert.Equal(t, uint32(0), y) 23 | z, x, y = IDToZxy(1) 24 | assert.Equal(t, uint8(1), z) 25 | assert.Equal(t, uint32(0), x) 26 | assert.Equal(t, uint32(0), y) 27 | z, x, y = IDToZxy(19078479) 28 | assert.Equal(t, uint8(12), z) 29 | assert.Equal(t, uint32(3423), x) 30 | assert.Equal(t, uint32(1763), y) 31 | } 32 | 33 | func TestManyTileIds(t *testing.T) { 34 | var z uint8 35 | var x uint32 36 | var y uint32 37 | for z = 0; z < 10; z++ { 38 | for x = 0; x < (1 << z); x++ { 39 | for y = 0; y < (1 << z); y++ { 40 | id := ZxyToID(z, x, y) 41 | rz, rx, ry := IDToZxy(id) 42 | if !(z == rz && x == rx && y == ry) { 43 | t.Fatalf(`fail on %d %d %d`, z, x, y) 44 | } 45 | } 46 | } 47 | } 48 | } 49 | 50 | func TestExtremes(t *testing.T) { 51 | var tz uint8 52 | for tz = 0; tz < 32; tz++ { 53 | var dim uint32 = (1 << tz) - 1 54 | z, x, y := IDToZxy(ZxyToID(tz, 0, 0)) 55 | assert.Equal(t, tz, z) 56 | assert.Equal(t, uint32(0), x) 57 | assert.Equal(t, uint32(0), y) 58 | z, x, y = IDToZxy(ZxyToID(z, dim, 0)) 59 | assert.Equal(t, tz, z) 60 | assert.Equal(t, dim, x) 61 | assert.Equal(t, uint32(0), y) 62 | z, x, y = IDToZxy(ZxyToID(z, 0, dim)) 63 | assert.Equal(t, tz, z) 64 | assert.Equal(t, uint32(0), x) 65 | assert.Equal(t, dim, y) 66 | z, x, y = IDToZxy(ZxyToID(z, dim, dim)) 67 | assert.Equal(t, tz, z) 68 | assert.Equal(t, dim, x) 69 | assert.Equal(t, dim, y) 70 | } 71 | } 72 | 73 | func TestParent(t *testing.T) { 74 | assert.Equal(t, ZxyToID(0, 0, 0), ParentID(ZxyToID(1, 0, 0))) 75 | 76 | assert.Equal(t, ZxyToID(1, 0, 0), ParentID(ZxyToID(2, 0, 0))) 77 | assert.Equal(t, ZxyToID(1, 0, 0), ParentID(ZxyToID(2, 0, 1))) 78 | assert.Equal(t, ZxyToID(1, 0, 0), ParentID(ZxyToID(2, 1, 0))) 79 | assert.Equal(t, ZxyToID(1, 0, 0), ParentID(ZxyToID(2, 1, 1))) 80 | 81 | assert.Equal(t, ZxyToID(1, 0, 1), ParentID(ZxyToID(2, 0, 2))) 82 | assert.Equal(t, ZxyToID(1, 0, 1), ParentID(ZxyToID(2, 0, 3))) 83 | assert.Equal(t, ZxyToID(1, 0, 1), ParentID(ZxyToID(2, 1, 2))) 84 | assert.Equal(t, ZxyToID(1, 0, 1), ParentID(ZxyToID(2, 1, 3))) 85 | 86 | assert.Equal(t, ZxyToID(1, 1, 0), ParentID(ZxyToID(2, 2, 0))) 87 | assert.Equal(t, ZxyToID(1, 1, 0), ParentID(ZxyToID(2, 2, 1))) 88 | assert.Equal(t, ZxyToID(1, 1, 0), ParentID(ZxyToID(2, 3, 0))) 89 | assert.Equal(t, ZxyToID(1, 1, 0), ParentID(ZxyToID(2, 3, 1))) 90 | 91 | assert.Equal(t, ZxyToID(1, 1, 1), ParentID(ZxyToID(2, 2, 2))) 92 | assert.Equal(t, ZxyToID(1, 1, 1), ParentID(ZxyToID(2, 2, 3))) 93 | assert.Equal(t, ZxyToID(1, 1, 1), ParentID(ZxyToID(2, 3, 2))) 94 | assert.Equal(t, ZxyToID(1, 1, 1), ParentID(ZxyToID(2, 3, 3))) 95 | 96 | assert.Equal(t, ZxyToID(18, 500, 1), ParentID(ZxyToID(19, 1000, 3))) 97 | assert.Equal(t, ZxyToID(18, 500, 2), ParentID(ZxyToID(19, 1000, 4))) 98 | assert.Equal(t, ZxyToID(18, 1, 500), ParentID(ZxyToID(19, 3, 1000))) 99 | assert.Equal(t, ZxyToID(18, 2, 500), ParentID(ZxyToID(19, 4, 1000))) 100 | } 101 | 102 | func BenchmarkZxyToId(b *testing.B) { 103 | for n := 0; n < b.N; n++ { 104 | for z := uint8(0); z < 15; z += 1 { 105 | s := uint32(1 << z) 106 | for x := uint32(0); x < s; x += 13 { 107 | for y := uint32(0); y < s; y += 13 { 108 | _ = ZxyToID(z, x, y) 109 | } 110 | } 111 | } 112 | } 113 | } 114 | 115 | func BenchmarkIdToZxy(b *testing.B) { 116 | end := ZxyToID(15, 0, 0) 117 | b.ResetTimer() 118 | for n := 0; n < b.N; n++ { 119 | for i := uint64(0); i < end; i += 13 { 120 | _, _, _ = IDToZxy(i) 121 | } 122 | } 123 | } 124 | 125 | func BenchmarkParentId(b *testing.B) { 126 | end := ZxyToID(15, 0, 0) 127 | b.ResetTimer() 128 | for n := 0; n < b.N; n++ { 129 | for i := uint64(1); i < end; i += 13 { 130 | _ = ParentID(i) 131 | } 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /pmtiles/tilejson.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | // CreateTileJSON returns TileJSON from an archive header+metadata and a given public tileURL. 8 | func CreateTileJSON(header HeaderV3, metadataBytes []byte, tileURL string) ([]byte, error) { 9 | var metadataMap map[string]interface{} 10 | json.Unmarshal(metadataBytes, &metadataMap) 11 | 12 | tilejson := make(map[string]interface{}) 13 | 14 | tilejson["tilejson"] = "3.0.0" 15 | tilejson["scheme"] = "xyz" 16 | 17 | if tileURL == "" { 18 | tileURL = "https://example.com" 19 | } 20 | 21 | tilejson["tiles"] = []string{tileURL + "/{z}/{x}/{y}" + headerExt(header)} 22 | if ok := header.TileType == Mvt; ok { 23 | tilejson["vector_layers"] = metadataMap["vector_layers"] 24 | } 25 | 26 | if val, ok := metadataMap["attribution"]; ok { 27 | tilejson["attribution"] = val 28 | } 29 | 30 | if val, ok := metadataMap["description"]; ok { 31 | tilejson["description"] = val 32 | } 33 | 34 | if val, ok := metadataMap["name"]; ok { 35 | tilejson["name"] = val 36 | } 37 | 38 | if val, ok := metadataMap["version"]; ok { 39 | tilejson["version"] = val 40 | } 41 | 42 | E7 := 10000000.0 43 | tilejson["bounds"] = []float64{float64(header.MinLonE7) / E7, float64(header.MinLatE7) / E7, float64(header.MaxLonE7) / E7, float64(header.MaxLatE7) / E7} 44 | tilejson["center"] = []interface{}{float64(header.CenterLonE7) / E7, float64(header.CenterLatE7) / E7, header.CenterZoom} 45 | tilejson["minzoom"] = header.MinZoom 46 | tilejson["maxzoom"] = header.MaxZoom 47 | 48 | return json.MarshalIndent(tilejson, "", "\t") 49 | } 50 | -------------------------------------------------------------------------------- /pmtiles/tilejson_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestCreateTilejson(t *testing.T) { 11 | // Define test inputs 12 | header := HeaderV3{ 13 | MinZoom: 0.0, 14 | MaxZoom: 14.0, 15 | MinLonE7: -1144000000, 16 | MinLatE7: 479000000, 17 | MaxLonE7: -1139000000, 18 | MaxLatE7: 483000000, 19 | CenterLonE7: -1141500000, 20 | CenterLatE7: 481000000, 21 | TileType: Mvt, 22 | } 23 | metadataBytes := []byte(` 24 | { 25 | "vector_layers": [{"id": "layer1"}], 26 | "attribution": "Attribution", 27 | "description": "Description", 28 | "name": "Name", 29 | "version": "1.0" 30 | }`) 31 | tileURL := "https://example.com/foo" 32 | 33 | // Call the function 34 | tilejsonBytes, err := CreateTileJSON(header, metadataBytes, tileURL) 35 | 36 | // Check for errors 37 | if err != nil { 38 | t.Errorf("CreateTilejson returned an error: %v", err) 39 | } 40 | 41 | // Parse the tilejsonBytes to check the output 42 | var tilejson map[string]interface{} 43 | err = json.Unmarshal(tilejsonBytes, &tilejson) 44 | if err != nil { 45 | t.Errorf("Failed to parse the generated TileJSON: %v", err) 46 | } 47 | 48 | assert.Equal(t, "3.0.0", tilejson["tilejson"]) 49 | assert.Equal(t, "xyz", tilejson["scheme"]) 50 | assert.Equal(t, []interface{}{"https://example.com/foo/{z}/{x}/{y}.mvt"}, tilejson["tiles"]) 51 | assert.Equal(t, []interface{}{map[string]interface{}{"id": "layer1"}}, tilejson["vector_layers"]) 52 | assert.Equal(t, "Attribution", tilejson["attribution"]) 53 | assert.Equal(t, "Description", tilejson["description"]) 54 | assert.Equal(t, "Name", tilejson["name"]) 55 | assert.Equal(t, "1.0", tilejson["version"]) 56 | 57 | assert.Equal(t, []interface{}{-114.400000, 47.900000, -113.900000, 48.300000}, tilejson["bounds"]) 58 | assert.Equal(t, []interface{}{-114.150000, 48.100000, 0.0}, tilejson["center"]) 59 | assert.Equal(t, 0.0, tilejson["minzoom"]) 60 | assert.Equal(t, 14.0, tilejson["maxzoom"]) 61 | } 62 | 63 | func TestCreateTilejsonOptionalFields(t *testing.T) { 64 | header := HeaderV3{ 65 | MinZoom: 0.0, 66 | MaxZoom: 14.0, 67 | MinLonE7: -1144000000, 68 | MinLatE7: 479000000, 69 | MaxLonE7: -1139000000, 70 | MaxLatE7: 483000000, 71 | CenterLonE7: -1141500000, 72 | CenterLatE7: 481000000, 73 | TileType: Png, 74 | } 75 | metadataBytes := []byte(` 76 | { 77 | }`) 78 | 79 | tilejsonBytes, err := CreateTileJSON(header, metadataBytes, "") 80 | 81 | // Check for errors 82 | if err != nil { 83 | t.Errorf("CreateTilejson returned an error: %v", err) 84 | } 85 | 86 | var tilejson map[string]interface{} 87 | err = json.Unmarshal(tilejsonBytes, &tilejson) 88 | if err != nil { 89 | t.Errorf("Failed to parse the generated TileJSON: %v", err) 90 | } 91 | 92 | assert.Equal(t, "3.0.0", tilejson["tilejson"]) 93 | assert.Equal(t, "xyz", tilejson["scheme"]) 94 | assert.Equal(t, []interface{}{"https://example.com/{z}/{x}/{y}.png"}, tilejson["tiles"]) 95 | assert.NotContains(t, tilejson, "attribution") 96 | assert.NotContains(t, tilejson, "description") 97 | assert.NotContains(t, tilejson, "name") 98 | assert.NotContains(t, tilejson, "version") 99 | } 100 | -------------------------------------------------------------------------------- /pmtiles/upload.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/schollz/progressbar/v3" 7 | "gocloud.dev/blob" 8 | "io" 9 | "log" 10 | "os" 11 | ) 12 | 13 | // Determine the multipart block size based on the total file size. 14 | func partSizeBytes(totalSize int64) int { 15 | if totalSize/(5*1024*1024) >= 10_000 { 16 | return int(totalSize/10_000 + 1) 17 | } 18 | return 5 * 1024 * 1024 19 | } 20 | 21 | // Upload a pmtiles archive to a bucket. 22 | func Upload(_ *log.Logger, InputPMTiles string, bucket string, RemotePMTiles string, maxConcurrency int) error { 23 | ctx := context.Background() 24 | 25 | b, err := blob.OpenBucket(ctx, bucket) 26 | if err != nil { 27 | return fmt.Errorf("Failed to setup bucket: %w", err) 28 | } 29 | defer b.Close() 30 | 31 | f, err := os.Open(InputPMTiles) 32 | if err != nil { 33 | return fmt.Errorf("Failed to open file: %w", err) 34 | } 35 | defer f.Close() 36 | 37 | filestat, err := f.Stat() 38 | if err != nil { 39 | return fmt.Errorf("Failed to stat file: %w", err) 40 | } 41 | 42 | opts := &blob.WriterOptions{ 43 | BufferSize: partSizeBytes(filestat.Size()), 44 | MaxConcurrency: maxConcurrency, 45 | } 46 | 47 | w, err := b.NewWriter(ctx, RemotePMTiles, opts) 48 | if err != nil { 49 | return fmt.Errorf("Failed to obtain writer: %w", err) 50 | } 51 | 52 | bar := progressbar.Default(filestat.Size()) 53 | io.Copy(io.MultiWriter(w, bar), f) 54 | 55 | if err := w.Close(); err != nil { 56 | return fmt.Errorf("Failed to complete upload: %w", err) 57 | } 58 | 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /pmtiles/upload_test.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestPartSizeBytes(t *testing.T) { 9 | assert.Equal(t, 5*1024*1024, partSizeBytes(100)) 10 | assert.Equal(t, 6442451, partSizeBytes(60*1024*1024*1024)) 11 | } 12 | -------------------------------------------------------------------------------- /pmtiles/verify.go: -------------------------------------------------------------------------------- 1 | package pmtiles 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/RoaringBitmap/roaring/roaring64" 7 | "io" 8 | "log" 9 | "math" 10 | "os" 11 | "time" 12 | ) 13 | 14 | // Verify that an archive's header statistics are correct, 15 | // and that tiles are propertly ordered if clustered=true. 16 | func Verify(_ *log.Logger, file string) error { 17 | start := time.Now() 18 | ctx := context.Background() 19 | 20 | bucketURL, key, err := NormalizeBucketKey("", "", file) 21 | 22 | if err != nil { 23 | return err 24 | } 25 | 26 | bucket, err := OpenBucket(ctx, bucketURL, "") 27 | 28 | if err != nil { 29 | return fmt.Errorf("failed to open bucket for %s, %w", bucketURL, err) 30 | } 31 | defer bucket.Close() 32 | 33 | r, err := bucket.NewRangeReader(ctx, key, 0, 16384) 34 | 35 | if err != nil { 36 | return fmt.Errorf("failed to create range reader for %s, %w", key, err) 37 | } 38 | b, err := io.ReadAll(r) 39 | if err != nil { 40 | return fmt.Errorf("failed to read %s, %w", key, err) 41 | } 42 | r.Close() 43 | 44 | header, err := DeserializeHeader(b[0:HeaderV3LenBytes]) 45 | 46 | if err != nil { 47 | return fmt.Errorf("failed to read %s, %w", key, err) 48 | } 49 | 50 | if header.RootOffset == 0 { 51 | return fmt.Errorf("Root directory offset=%v must not be 0", header.RootOffset) 52 | } 53 | 54 | if header.MetadataOffset == 0 { 55 | return fmt.Errorf("Metadata offset=%v must not be 0", header.MetadataOffset) 56 | } 57 | 58 | if header.LeafDirectoryOffset == 0 { 59 | return fmt.Errorf("Leaf directories offset=%v must not be 0", header.LeafDirectoryOffset) 60 | } 61 | 62 | if header.TileDataOffset == 0 { 63 | return fmt.Errorf("Tile data offset=%v must not be 0", header.TileDataOffset) 64 | } 65 | 66 | fileInfo, _ := os.Stat(file) 67 | 68 | if header.RootLength > uint64(fileInfo.Size()) { 69 | return fmt.Errorf("Root directory offset=%v length=%v out of bounds", header.RootOffset, header.RootLength) 70 | } 71 | 72 | if header.MetadataLength > uint64(fileInfo.Size()) { 73 | return fmt.Errorf("Metadata offset=%v length=%v out of bounds", header.MetadataOffset, header.MetadataLength) 74 | } 75 | 76 | if header.LeafDirectoryLength > uint64(fileInfo.Size()) { 77 | return fmt.Errorf("Leaf directories offset=%v length=%v out of bounds", header.LeafDirectoryOffset, header.LeafDirectoryLength) 78 | } 79 | 80 | if header.TileDataLength > uint64(fileInfo.Size()) { 81 | return fmt.Errorf("Tile data offset=%v length=%v out of bounds", header.TileDataOffset, header.TileDataLength) 82 | } 83 | 84 | lengthFromHeader := int64(HeaderV3LenBytes + header.RootLength + header.MetadataLength + header.LeafDirectoryLength + header.TileDataLength) 85 | lengthFromHeaderWithPadding := int64(16384 + header.MetadataLength + header.LeafDirectoryLength + header.TileDataLength) 86 | 87 | if !(fileInfo.Size() == lengthFromHeader || fileInfo.Size() == lengthFromHeaderWithPadding) { 88 | return fmt.Errorf("total length of archive %v does not match header %v or %v (padded)", fileInfo.Size(), lengthFromHeader, lengthFromHeaderWithPadding) 89 | } 90 | 91 | var minTileID uint64 92 | var maxTileID uint64 93 | minTileID = math.MaxUint64 94 | maxTileID = 0 95 | 96 | addressedTiles := 0 97 | tileEntries := 0 98 | offsets := roaring64.New() 99 | var currentOffset uint64 100 | 101 | err = IterateEntries(header, 102 | func(offset uint64, length uint64) ([]byte, error) { 103 | reader, err := bucket.NewRangeReader(ctx, key, int64(offset), int64(length)) 104 | if err != nil { 105 | return nil, err 106 | } 107 | defer reader.Close() 108 | return io.ReadAll(reader) 109 | }, 110 | func(e EntryV3) { 111 | offsets.Add(e.Offset) 112 | addressedTiles += int(e.RunLength) 113 | tileEntries++ 114 | 115 | if e.TileID < minTileID { 116 | minTileID = e.TileID 117 | } 118 | if e.TileID > maxTileID { 119 | maxTileID = e.TileID 120 | } 121 | 122 | if e.Offset+uint64(e.Length) > header.TileDataLength { 123 | fmt.Printf("Invalid: %v outside of tile data section", e) 124 | } 125 | 126 | if header.Clustered { 127 | if !offsets.Contains(e.Offset) { 128 | if e.Offset != currentOffset { 129 | fmt.Printf("Invalid: out-of-order entry %v in clustered archive", e) 130 | } 131 | currentOffset += uint64(e.Length) 132 | } 133 | } 134 | }) 135 | 136 | if err != nil { 137 | return err 138 | } 139 | 140 | if uint64(addressedTiles) != header.AddressedTilesCount { 141 | return fmt.Errorf("invalid: header AddressedTilesCount=%v but %v tiles addressed", header.AddressedTilesCount, addressedTiles) 142 | } 143 | 144 | if uint64(tileEntries) != header.TileEntriesCount { 145 | return fmt.Errorf("invalid: header TileEntriesCount=%v but %v tile entries", header.TileEntriesCount, tileEntries) 146 | } 147 | 148 | if offsets.GetCardinality() != header.TileContentsCount { 149 | return fmt.Errorf("invalid: header TileContentsCount=%v but %v tile contents", header.TileContentsCount, offsets.GetCardinality()) 150 | } 151 | 152 | if z, _, _ := IDToZxy(minTileID); z != header.MinZoom { 153 | return fmt.Errorf("invalid: header MinZoom=%v does not match min tile z %v", header.MinZoom, z) 154 | } 155 | 156 | if z, _, _ := IDToZxy(maxTileID); z != header.MaxZoom { 157 | return fmt.Errorf("invalid: header MaxZoom=%v does not match max tile z %v", header.MaxZoom, z) 158 | } 159 | 160 | if !(header.CenterZoom >= header.MinZoom && header.CenterZoom <= header.MaxZoom) { 161 | return fmt.Errorf("invalid: header CenterZoom=%v not within MinZoom/MaxZoom", header.CenterZoom) 162 | } 163 | 164 | if header.MinLonE7 >= header.MaxLonE7 || header.MinLatE7 >= header.MaxLatE7 { 165 | return fmt.Errorf("Invalid: bounds has area <= 0: clients may not display tiles correctly") 166 | } 167 | 168 | fmt.Printf("Completed verify in %v.\n", time.Since(start)) 169 | return nil 170 | } 171 | --------------------------------------------------------------------------------