├── Version.config
├── lib
├── cunit
│ ├── .gitignore
│ ├── cu_tester.h
│ ├── Makefile
│ ├── data
│ │ ├── simple-schema-no-intensity.xml
│ │ ├── simple-schema-xy.xml
│ │ ├── simple-schema-empty-description.xml
│ │ ├── simple-schema-laz.xml
│ │ ├── simple-schema-laz-multiple-dim.xml
│ │ ├── simple-schema-xym.xml
│ │ ├── simple-schema-xyz.xml
│ │ ├── simple-schema.xml
│ │ ├── simple-schema-fine.xml
│ │ ├── simple-schema-missing-dimension.xml
│ │ ├── simple-schema-no-name.xml
│ │ ├── simple-schema-empty-name.xml
│ │ └── simple-schema-xyzm.xml
│ └── cu_pc_util.c
├── pc_config.h.in
├── Makefile
├── lazperf_adapter.h
├── TODO.md
├── sort_r
│ └── README.md
├── lazperf_adapter.hpp
├── stringbuffer.h
├── pc_pointlist.c
├── pc_mem.c
├── pc_dimstats.c
├── pc_stats.c
└── pc_val.c
├── .clang-format
├── doc
├── _static
│ ├── logo
│ │ └── favicon.ico
│ ├── breathe.css
│ └── sphinx.css
├── copyright.rst
├── functions
│ ├── index.rst
│ ├── schema.rst
│ ├── wkb.rst
│ ├── points.rst
│ ├── utils.rst
│ └── postgis.rst
├── tutorials
│ ├── index.rst
│ ├── compression.rst
│ └── storing.rst
├── community.rst
├── concepts
│ ├── index.rst
│ ├── tables.rst
│ ├── objects.rst
│ ├── compressions.rst
│ ├── schemas.rst
│ └── binary.rst
├── _templates
│ └── footer.html
├── update.rst
├── faq.rst
├── index.rst
├── install.rst
├── Makefile
├── embed.py
└── quickstart.rst
├── pgsql
├── .gitignore
├── sql
│ ├── schema.sql
│ └── pointcloud_columns.sql
├── sqldefines.h.in
├── expected
│ ├── schema.out
│ └── pointcloud_columns.out
├── pointcloud.control.in
├── META.json
├── Makefile.in
├── pc_editor.c
└── pc_pgsql.h
├── tools
├── valgrind.sh
├── code_layout.sh
├── install_lazperf.sh
├── build_install.sh
└── benchmark_compression
│ ├── getsize.sql
│ ├── compression_benchmark.sh
│ ├── pointcloud-laz.sql
│ ├── pointcloud.sql
│ └── pointcloud-dim.sql
├── .gitattributes
├── .github
├── scripts
│ ├── pgpointcloud.sh
│ ├── test_dump_restore.sh
│ ├── postgresql_postgis.sh
│ └── test_dump_restore.sql
└── workflows
│ ├── dockerhub.yml
│ ├── jammy_postgres13_postgis33.yml
│ ├── jammy_postgres14_postgis33.yml
│ ├── jammy_postgres15_postgis33.yml
│ ├── noble_postgres13_postgis33.yml
│ ├── noble_postgres14_postgis33.yml
│ ├── noble_postgres15_postgis33.yml
│ ├── jammy_postgres16_postgis33.yml
│ ├── jammy_postgres17_postgis33.yml
│ ├── noble_postgres16_postgis33.yml
│ ├── noble_postgres17_postgis33.yml
│ ├── website.yml
│ └── code_layout.yml
├── pgsql_postgis
├── pointcloud_postgis.control.in
├── META.json
├── Makefile
└── pointcloud_postgis.sql.in
├── util
└── proc_upgrade.pl
├── Makefile
├── .gitignore
├── config.mk.in
├── docker
├── initdb-pgpointcloud.sh
└── Dockerfile
├── macros
└── ac_proj4_version.m4
├── .cirrus.yml
├── autogen.sh
├── COPYRIGHT
├── README.md
└── NEWS
/Version.config:
--------------------------------------------------------------------------------
1 | 1.2.5
2 |
--------------------------------------------------------------------------------
/lib/cunit/.gitignore:
--------------------------------------------------------------------------------
1 | cu_tester
2 |
--------------------------------------------------------------------------------
/.clang-format:
--------------------------------------------------------------------------------
1 | BasedOnStyle: llvm
2 | BreakBeforeBraces: Allman
3 |
--------------------------------------------------------------------------------
/doc/_static/logo/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pgpointcloud/pointcloud/HEAD/doc/_static/logo/favicon.ico
--------------------------------------------------------------------------------
/pgsql/.gitignore:
--------------------------------------------------------------------------------
1 | regression.out
2 | regression.diffs
3 | results/
4 | tmp_check/
5 | log/
6 | Makefile
7 | sqldefines.h
8 |
--------------------------------------------------------------------------------
/tools/valgrind.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | valgrind --leak-check=full --error-exitcode=1 lib/cunit/cu_tester
6 |
--------------------------------------------------------------------------------
/tools/code_layout.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | clang-format -i -style=file pgsql/*.c pgsql/*.h lib/*.c lib/*.h lib/cunit/*.c lib/*.cpp lib/*.hpp lib/cunit/*.h
4 |
--------------------------------------------------------------------------------
/pgsql/sql/schema.sql:
--------------------------------------------------------------------------------
1 | -- See https://github.com/pgpointcloud/pointcloud/issues/71
2 | set client_min_messages to ERROR;
3 | select '#71', PC_SchemaIsValid(''::xml::text);
4 |
--------------------------------------------------------------------------------
/tools/install_lazperf.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -ex
3 | git clone https://github.com/hobuinc/laz-perf.git
4 | cd laz-perf; git checkout 1.5.0; cmake -DWITH_TESTS=FALSE .; make; sudo make install
5 |
--------------------------------------------------------------------------------
/lib/pc_config.h.in:
--------------------------------------------------------------------------------
1 | #undef LIBXML2_VERSION
2 |
3 | #undef PGSQL_VERSION
4 |
5 | #undef HAVE_LAZPERF
6 |
7 | #undef HAVE_CUNIT
8 |
9 | #undef PROJECT_SOURCE_DIR
10 |
11 | #undef POINTCLOUD_VERSION
12 |
--------------------------------------------------------------------------------
/pgsql/sqldefines.h.in:
--------------------------------------------------------------------------------
1 | #define PGSQL_VERSION @PGSQL_VERSION@
2 | #define POINTCLOUD_VERSION @POINTCLOUD_VERSION@
3 |
4 | #if PGSQL_VERSION >= 96
5 | #define _PARALLEL PARALLEL SAFE
6 | #else
7 | #define _PARALLEL
8 | #endif
9 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.vcproj eol=crlf
2 | *.bat eol=crlf
3 | *.sh eol=lf
4 | *.ac eol=lf
5 | *.h.in eol=lf
6 | *.h eol=lf
7 | GNUmakefile.in eol=lf
8 | *_expected eol=lf
9 | *.dmp eol=lf
10 |
--------------------------------------------------------------------------------
/tools/build_install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if [ -f config.mk ]; then
6 | make clean maintainer-clean
7 | fi
8 |
9 | ./autogen.sh
10 | ./configure CFLAGS="-Wall -Werror -O2 -g" $@
11 | make
12 | sudo make install
13 |
--------------------------------------------------------------------------------
/doc/copyright.rst:
--------------------------------------------------------------------------------
1 | .. _license:
2 |
3 | ******************************************************************************
4 | License
5 | ******************************************************************************
6 |
7 | .. include:: ../COPYRIGHT
8 |
--------------------------------------------------------------------------------
/.github/scripts/pgpointcloud.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | sh ./tools/build_install.sh
6 | sh ./tools/install_lazperf.sh
7 | sh ./tools/build_install.sh --with-lazperf=/usr/local
8 | make check
9 | sh ./tools/valgrind.sh
10 | make installcheck
11 |
--------------------------------------------------------------------------------
/pgsql/expected/schema.out:
--------------------------------------------------------------------------------
1 | -- See https://github.com/pgpointcloud/pointcloud/issues/71
2 | set client_min_messages to ERROR;
3 | select '#71', PC_SchemaIsValid(''::xml::text);
4 | ?column? | pc_schemaisvalid
5 | ----------+------------------
6 | #71 | f
7 | (1 row)
8 |
9 |
--------------------------------------------------------------------------------
/.github/scripts/test_dump_restore.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 |
3 | set -e
4 |
5 | createdb test
6 | psql test < .github/scripts/test_dump_restore.sql
7 | pg_dump test -Fp > dump.sql
8 | cat dump.sql
9 | createdb test_restore
10 | psql -v ON_ERROR_STOP=1 test_restore < dump.sql
11 |
--------------------------------------------------------------------------------
/pgsql/pointcloud.control.in:
--------------------------------------------------------------------------------
1 | # pointcloud extension
2 | comment = 'data type for lidar point clouds'
3 | default_version = '#POINTCLOUD_VERSION#'
4 | module_pathname = '$libdir/pointcloud-#POINTCLOUD_VERSION_MAJOR#'
5 | relocatable = false
6 | superuser = true
7 | #requires = 'postgis'
8 |
--------------------------------------------------------------------------------
/pgsql_postgis/pointcloud_postgis.control.in:
--------------------------------------------------------------------------------
1 | # pointcloud postgis integration extension
2 | comment = 'integration for pointcloud LIDAR data and PostGIS geometry data'
3 | default_version = '@POINTCLOUD_VERSION@'
4 | relocatable = false
5 | superuser = false
6 | requires = 'postgis, pointcloud'
7 |
--------------------------------------------------------------------------------
/pgsql/sql/pointcloud_columns.sql:
--------------------------------------------------------------------------------
1 | INSERT INTO pointcloud_formats (pcid, srid) VALUES (777, 666);
2 | CREATE TABLE pc1(p pcpoint);
3 | CREATE TABLE pc2(p pcpoint);
4 | DELETE FROM pointcloud_formats WHERE pcid = 777;
5 | SELECT * from pointcloud_columns ORDER BY 1,2,3,4;
6 | DROP TABLE pc1;
7 | DROP TABLE pc2;
8 |
--------------------------------------------------------------------------------
/tools/benchmark_compression/getsize.sql:
--------------------------------------------------------------------------------
1 | SELECT
2 | relname as "Table",
3 | pg_size_pretty(pg_total_relation_size(relid)) As "Size",
4 | pg_size_pretty(pg_total_relation_size(relid) - pg_relation_size(relid)) as "External Size"
5 | FROM pg_catalog.pg_statio_user_tables ORDER BY pg_total_relation_size(relid) DESC;
6 |
--------------------------------------------------------------------------------
/util/proc_upgrade.pl:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2015 Boundless
2 |
3 | #!/usr/bin/perl -w
4 |
5 | eval "exec perl -w $0 $@"
6 | if (0);
7 |
8 | local $/;
9 | local $sql = ;
10 | $sql =~ s/\nCREATE TYPE[^;]*;//gs;
11 | $sql =~ s/\nCREATE AGGREGATE[^;]*;//gs;
12 | $sql =~ s/\nCREATE CAST[^;]*;//gs;
13 |
14 | print $sql;
15 |
--------------------------------------------------------------------------------
/tools/benchmark_compression/compression_benchmark.sh:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 |
3 | DB=compression_benchmark
4 |
5 | createdb $DB
6 |
7 | psql -d $DB -f pointcloud.sql > /dev/null 2>&1
8 | psql -d $DB -f pointcloud-laz.sql > /dev/null 2>&1
9 | psql -d $DB -f pointcloud-dim.sql > /dev/null 2>&1
10 | psql -d $DB -f getsize.sql
11 |
12 | dropdb $DB
13 |
--------------------------------------------------------------------------------
/doc/functions/index.rst:
--------------------------------------------------------------------------------
1 | .. _functions:
2 |
3 | ******************************************************************************
4 | Functions
5 | ******************************************************************************
6 |
7 | .. toctree::
8 | :maxdepth: 2
9 |
10 | schema
11 | points
12 | patchs
13 | wkb
14 | postgis
15 | utils
16 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 |
2 | all install uninstall noop clean distclean:
3 | $(MAKE) -C lib $@
4 | $(MAKE) -C pgsql $@
5 | $(MAKE) -C pgsql_postgis $@
6 |
7 | check:
8 | $(MAKE) -C lib $@
9 |
10 | installcheck:
11 | $(MAKE) -C pgsql $@
12 |
13 | maintainer-clean: clean
14 | rm -f config.log config.mk config.status lib/pc_config.h configure
15 | rm -rf autom4te.cache build
16 |
--------------------------------------------------------------------------------
/doc/tutorials/index.rst:
--------------------------------------------------------------------------------
1 | .. _tutorials:
2 |
3 | ******************************************************************************
4 | Tutorials
5 | ******************************************************************************
6 |
7 | This chapter provides some basic tutorials on how to deploy and use
8 | pgPointcloud.
9 |
10 | .. toctree::
11 | :maxdepth: 1
12 |
13 | storing
14 | compression
15 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.o
2 | *.a
3 | *.so
4 | autom4te.cache
5 | configure
6 | config.log
7 | confdefs.h
8 | aclocal.m4
9 | config.status
10 | config.mk
11 | lib/pc_config.h
12 |
13 | Makefile
14 | */Makefile
15 | */*/Makefile
16 | pgsql/pointcloud.control
17 | pgsql/pointcloud--*
18 | pgsql_postgis/pointcloud_postgis.control
19 | pgsql_postgis/pointcloud_postgis--*
20 |
21 | pgsql/pc_access.bc
22 | pgsql/pc_editor.bc
23 | pgsql/pc_inout.bc
24 | pgsql/pc_pgsql.bc
25 |
26 | venv
27 | doc/build
28 | doc/__pycache__/
29 |
30 | **/.DS_Store
31 |
--------------------------------------------------------------------------------
/pgsql/expected/pointcloud_columns.out:
--------------------------------------------------------------------------------
1 | INSERT INTO pointcloud_formats (pcid, srid) VALUES (777, 666);
2 | CREATE TABLE pc1(p pcpoint);
3 | CREATE TABLE pc2(p pcpoint);
4 | DELETE FROM pointcloud_formats WHERE pcid = 777;
5 | SELECT * from pointcloud_columns ORDER BY 1,2,3,4;
6 | schema | table | column | pcid | srid | type
7 | --------+-------+--------+------+------+---------
8 | public | pc1 | p | | | pcpoint
9 | public | pc2 | p | | | pcpoint
10 | (2 rows)
11 |
12 | DROP TABLE pc1;
13 | DROP TABLE pc2;
14 |
--------------------------------------------------------------------------------
/config.mk.in:
--------------------------------------------------------------------------------
1 | CC = @CC@
2 | CXX = @CXX@
3 | CFLAGS += @CFLAGS@
4 | CXXFLAGS += -fPIC -std=c++11 @CXXFLAGS@
5 | SQLPP = @SQLPP@
6 |
7 | XML2_CPPFLAGS = @XML2_CPPFLAGS@
8 | XML2_LDFLAGS = @XML2_LDFLAGS@
9 |
10 | ZLIB_CPPFLAGS = @ZLIB_CPPFLAGS@
11 | ZLIB_LDFLAGS = @ZLIB_LDFLAGS@
12 |
13 | CUNIT_CPPFLAGS = @CUNIT_CPPFLAGS@
14 | CUNIT_LDFLAGS = @CUNIT_LDFLAGS@
15 |
16 | PG_CONFIG = @PG_CONFIG@
17 | PGXS = @PGXS@
18 |
19 | LIB_A = libpc.a
20 | LIB_A_LAZPERF = liblazperf.a
21 |
22 | LAZPERF_STATUS = @LAZPERF_STATUS@
23 | LAZPERF_CPPFLAGS = @LAZPERF_CPPFLAGS@
24 |
25 | PGSQL_MAJOR_VERSION = @PGSQL_MAJOR_VERSION@
26 |
--------------------------------------------------------------------------------
/doc/community.rst:
--------------------------------------------------------------------------------
1 | .. _community:
2 |
3 | ******************************************************************************
4 | Community
5 | ******************************************************************************
6 |
7 | pgPointcloud's community interacts through `Mailing List`_ and `GitHub`_.
8 | The `Mailing List`_ communication channel is for general questions and
9 | feedback. The `GitHub`_ communication channel is for development activities,
10 | bug reports, and testing.
11 |
12 | .. _`Mailing List`: http://lists.osgeo.org/mailman/listinfo/pgpointcloud
13 | .. _`GitHub`: https://github.com/pgpointcloud/pointcloud
14 |
--------------------------------------------------------------------------------
/doc/concepts/index.rst:
--------------------------------------------------------------------------------
1 | .. _concepts:
2 |
3 | ******************************************************************************
4 | Concepts
5 | ******************************************************************************
6 |
7 | LIDAR sensors quickly produce millions of points with large numbers of
8 | variables measured on each point. The challenge for a point cloud database
9 | extension is efficiently storing this data while allowing high fidelity access
10 | to the many variables stored.
11 |
12 | .. toctree::
13 | :maxdepth: 2
14 |
15 | schemas
16 | objects
17 | tables
18 | compressions
19 | binary
20 |
--------------------------------------------------------------------------------
/doc/_templates/footer.html:
--------------------------------------------------------------------------------
1 | {% extends "!footer.html" %}
2 |
3 | {% block extrafooter %}
4 |
15 | {{ super() }}
16 | {% endblock %}
17 |
--------------------------------------------------------------------------------
/doc/_static/breathe.css:
--------------------------------------------------------------------------------
1 |
2 | /* -- breathe specific styles ----------------------------------------------- */
3 |
4 | /* So enum value descriptions are displayed inline to the item */
5 | .breatheenumvalues li tt + p {
6 | display: inline;
7 | }
8 |
9 | /* So parameter descriptions are displayed inline to the item */
10 | .breatheparameterlist li tt + p {
11 | display: inline;
12 | }
13 |
14 |
15 |
16 | .plugin .admonition-title{
17 | background: #b92938;
18 | }
19 |
20 | .plugin .admonition-title::before{
21 | content: '\f1e6';
22 | }
23 |
24 | .embed .admonition-title::before{
25 | content: '\f00c';
26 | }
27 |
28 | .streamable .admonition-title::before{
29 | content: '\f141';
30 | }
31 |
--------------------------------------------------------------------------------
/.github/workflows/dockerhub.yml:
--------------------------------------------------------------------------------
1 | name: "Docker image latest"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | jobs:
9 | build:
10 | name: Docker image latest
11 | runs-on: ubuntu-22.04
12 | steps:
13 | - name: Check out repository code
14 | uses: actions/checkout@v4
15 |
16 | - name: Set up Docker Buildx
17 | uses: docker/setup-buildx-action@v3
18 |
19 | - name: Log in to Docker Hub
20 | uses: docker/login-action@v3
21 | with:
22 | username: ${{ secrets.DOCKERHUB_USERNAME }}
23 | password: ${{ secrets.DOCKERHUB_PASSWORD }}
24 |
25 | - name: Build and push Docker image
26 | uses: docker/build-push-action@v5
27 | with:
28 | context: ./docker
29 | push: true
30 | tags: pgpointcloud/pointcloud:latest
31 |
--------------------------------------------------------------------------------
/.github/workflows/jammy_postgres13_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-22.04] PostgreSQL 13 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | build:
11 | name: Building and testing
12 | runs-on: ubuntu-22.04
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v2
16 | - name: Install PostgreSQL and PostGIS
17 | env:
18 | POSTGRESQL_VERSION: 13
19 | POSTGIS_VERSION: 3
20 | run: .github/scripts/postgresql_postgis.sh
21 | - name: Install and check PgPointCloud
22 | run: .github/scripts/pgpointcloud.sh
23 | - name: Error
24 | if: ${{ failure() }}
25 | run: cat pgsql/regression.diffs
26 | - name: Dump and restore tests
27 | run: .github/scripts/test_dump_restore.sh
28 |
--------------------------------------------------------------------------------
/.github/workflows/jammy_postgres14_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-22.04] PostgreSQL 14 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | build:
11 | name: Building and testing
12 | runs-on: ubuntu-22.04
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v2
16 | - name: Install PostgreSQL and PostGIS
17 | env:
18 | POSTGRESQL_VERSION: 14
19 | POSTGIS_VERSION: 3
20 | run: .github/scripts/postgresql_postgis.sh
21 | - name: Install and check PgPointCloud
22 | run: .github/scripts/pgpointcloud.sh
23 | - name: Error
24 | if: ${{ failure() }}
25 | run: cat pgsql/regression.diffs
26 | - name: Dump and restore tests
27 | run: .github/scripts/test_dump_restore.sh
28 |
--------------------------------------------------------------------------------
/.github/workflows/jammy_postgres15_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-22.04] PostgreSQL 15 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | build:
11 | name: Building and testing
12 | runs-on: ubuntu-22.04
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v2
16 | - name: Install PostgreSQL and PostGIS
17 | env:
18 | POSTGRESQL_VERSION: 15
19 | POSTGIS_VERSION: 3
20 | run: .github/scripts/postgresql_postgis.sh
21 | - name: Install and check PgPointCloud
22 | run: .github/scripts/pgpointcloud.sh
23 | - name: Error
24 | if: ${{ failure() }}
25 | run: cat pgsql/regression.diffs
26 | - name: Dump and restore tests
27 | run: .github/scripts/test_dump_restore.sh
28 |
--------------------------------------------------------------------------------
/.github/workflows/noble_postgres13_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-24.04] PostgreSQL 13 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | build:
11 | name: Building and testing
12 | runs-on: ubuntu-24.04
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v2
16 | - name: Install PostgreSQL and PostGIS
17 | env:
18 | POSTGRESQL_VERSION: 13
19 | POSTGIS_VERSION: 3
20 | run: .github/scripts/postgresql_postgis.sh
21 | - name: Install and check PgPointCloud
22 | run: .github/scripts/pgpointcloud.sh
23 | - name: Error
24 | if: ${{ failure() }}
25 | run: cat pgsql/regression.diffs
26 | - name: Dump and restore tests
27 | run: .github/scripts/test_dump_restore.sh
28 |
--------------------------------------------------------------------------------
/.github/workflows/noble_postgres14_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-24.04] PostgreSQL 14 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | build:
11 | name: Building and testing
12 | runs-on: ubuntu-24.04
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v2
16 | - name: Install PostgreSQL and PostGIS
17 | env:
18 | POSTGRESQL_VERSION: 14
19 | POSTGIS_VERSION: 3
20 | run: .github/scripts/postgresql_postgis.sh
21 | - name: Install and check PgPointCloud
22 | run: .github/scripts/pgpointcloud.sh
23 | - name: Error
24 | if: ${{ failure() }}
25 | run: cat pgsql/regression.diffs
26 | - name: Dump and restore tests
27 | run: .github/scripts/test_dump_restore.sh
28 |
--------------------------------------------------------------------------------
/.github/workflows/noble_postgres15_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-24.04] PostgreSQL 15 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | build:
11 | name: Building and testing
12 | runs-on: ubuntu-24.04
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v2
16 | - name: Install PostgreSQL and PostGIS
17 | env:
18 | POSTGRESQL_VERSION: 15
19 | POSTGIS_VERSION: 3
20 | run: .github/scripts/postgresql_postgis.sh
21 | - name: Install and check PgPointCloud
22 | run: .github/scripts/pgpointcloud.sh
23 | - name: Error
24 | if: ${{ failure() }}
25 | run: cat pgsql/regression.diffs
26 | - name: Dump and restore tests
27 | run: .github/scripts/test_dump_restore.sh
28 |
--------------------------------------------------------------------------------
/.github/workflows/jammy_postgres16_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-22.04] PostgreSQL 16 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | pull_request:
9 |
10 | jobs:
11 | build:
12 | name: Building and testing
13 | runs-on: ubuntu-22.04
14 | steps:
15 | - name: Check out repository code
16 | uses: actions/checkout@v2
17 | - name: Install PostgreSQL and PostGIS
18 | env:
19 | POSTGRESQL_VERSION: 16
20 | POSTGIS_VERSION: 3
21 | run: .github/scripts/postgresql_postgis.sh
22 | - name: Install and check PgPointCloud
23 | run: .github/scripts/pgpointcloud.sh
24 | - name: Error
25 | if: ${{ failure() }}
26 | run: cat pgsql/regression.diffs
27 | - name: Dump and restore tests
28 | run: .github/scripts/test_dump_restore.sh
29 |
--------------------------------------------------------------------------------
/.github/workflows/jammy_postgres17_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-22.04] PostgreSQL 17 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | pull_request:
9 |
10 | jobs:
11 | build:
12 | name: Building and testing
13 | runs-on: ubuntu-22.04
14 | steps:
15 | - name: Check out repository code
16 | uses: actions/checkout@v2
17 | - name: Install PostgreSQL and PostGIS
18 | env:
19 | POSTGRESQL_VERSION: 17
20 | POSTGIS_VERSION: 3
21 | run: .github/scripts/postgresql_postgis.sh
22 | - name: Install and check PgPointCloud
23 | run: .github/scripts/pgpointcloud.sh
24 | - name: Error
25 | if: ${{ failure() }}
26 | run: cat pgsql/regression.diffs
27 | - name: Dump and restore tests
28 | run: .github/scripts/test_dump_restore.sh
29 |
--------------------------------------------------------------------------------
/.github/workflows/noble_postgres16_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-24.04] PostgreSQL 16 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | pull_request:
9 |
10 | jobs:
11 | build:
12 | name: Building and testing
13 | runs-on: ubuntu-24.04
14 | steps:
15 | - name: Check out repository code
16 | uses: actions/checkout@v2
17 | - name: Install PostgreSQL and PostGIS
18 | env:
19 | POSTGRESQL_VERSION: 16
20 | POSTGIS_VERSION: 3
21 | run: .github/scripts/postgresql_postgis.sh
22 | - name: Install and check PgPointCloud
23 | run: .github/scripts/pgpointcloud.sh
24 | - name: Error
25 | if: ${{ failure() }}
26 | run: cat pgsql/regression.diffs
27 | - name: Dump and restore tests
28 | run: .github/scripts/test_dump_restore.sh
29 |
--------------------------------------------------------------------------------
/.github/workflows/noble_postgres17_postgis33.yml:
--------------------------------------------------------------------------------
1 | name: "[ubuntu-24.04] PostgreSQL 17 and PostGIS 3.3"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | pull_request:
9 |
10 | jobs:
11 | build:
12 | name: Building and testing
13 | runs-on: ubuntu-24.04
14 | steps:
15 | - name: Check out repository code
16 | uses: actions/checkout@v2
17 | - name: Install PostgreSQL and PostGIS
18 | env:
19 | POSTGRESQL_VERSION: 17
20 | POSTGIS_VERSION: 3
21 | run: .github/scripts/postgresql_postgis.sh
22 | - name: Install and check PgPointCloud
23 | run: .github/scripts/pgpointcloud.sh
24 | - name: Error
25 | if: ${{ failure() }}
26 | run: cat pgsql/regression.diffs
27 | - name: Dump and restore tests
28 | run: .github/scripts/test_dump_restore.sh
29 |
--------------------------------------------------------------------------------
/.github/workflows/website.yml:
--------------------------------------------------------------------------------
1 | name: "Website"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | code:
11 | name: Website
12 | runs-on: ubuntu-22.04
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v2
16 | - name: Install sphinx
17 | run: sudo apt-get install python3-virtualenv virtualenv
18 | - name: Build html documentation
19 | run: virtualenv -p /usr/bin/python3 venv && source venv/bin/activate && pip install sphinx sphinx_rtd_theme && cd doc && make html && cd -
20 | - name: Deploy
21 | uses: peaceiris/actions-gh-pages@v3
22 | if: ${{ github.ref == 'refs/heads/master' }}
23 | with:
24 | deploy_key: ${{ secrets.ACTIONS_DEPLOY_KEY }}
25 | publish_branch: gh-pages
26 | publish_dir: ./doc/build/html/
27 |
--------------------------------------------------------------------------------
/docker/initdb-pgpointcloud.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | ##### Script based on initialization script from postgis-docker #####
4 |
5 | set -e
6 |
7 | # Perform all actions as $POSTGRES_USER
8 | export PGUSER="$POSTGRES_USER"
9 |
10 | # Create the 'template_postgis' template db
11 | "${psql[@]}" <<- 'EOSQL'
12 | CREATE DATABASE template_postgis IS_TEMPLATE true;
13 | EOSQL
14 |
15 | # Load PostGIS into both template_database and $POSTGRES_DB
16 | for DB in template_postgis "$POSTGRES_DB"; do
17 | echo "Loading PostGIS and POINTCLOUD extensions into $DB"
18 | "${psql[@]}" --dbname="$DB" <<-'EOSQL'
19 | CREATE EXTENSION IF NOT EXISTS postgis;
20 | CREATE EXTENSION IF NOT EXISTS postgis_topology;
21 | CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;
22 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;
23 | CREATE EXTENSION IF NOT EXISTS pointcloud;
24 | CREATE EXTENSION IF NOT EXISTS pointcloud_postgis;
25 | EOSQL
26 | done
--------------------------------------------------------------------------------
/lib/Makefile:
--------------------------------------------------------------------------------
1 |
2 | include ../config.mk
3 |
4 | CPPFLAGS = $(XML2_CPPFLAGS) $(ZLIB_CPPFLAGS) $(LAZPERF_CPPFLAGS)
5 | LDFLAGS = $(XML2_LDFLAGS) $(ZLIB_LDFLAGS)
6 | CFLAGS += -fPIC
7 |
8 | OBJS = \
9 | pc_bytes.o \
10 | pc_dimstats.o \
11 | pc_filter.o \
12 | pc_mem.o \
13 | pc_patch.o \
14 | pc_patch_dimensional.o \
15 | pc_patch_uncompressed.o \
16 | pc_point.o \
17 | pc_pointlist.o \
18 | pc_schema.o \
19 | pc_sort.o \
20 | pc_stats.o \
21 | pc_util.o \
22 | pc_val.o \
23 | stringbuffer.o \
24 | hashtable.o \
25 | pc_patch_lazperf.o
26 |
27 | OBJS_LAZPERF = \
28 | lazperf_adapter.o
29 |
30 | all: $(LIB_A) $(LIB_A_LAZPERF)
31 | $(MAKE) -C cunit $@
32 |
33 | $(LIB_A): $(OBJS)
34 | ar rs $@ $^
35 |
36 | $(LIB_A_LAZPERF): $(OBJS_LAZPERF)
37 | ar rs $@ $^
38 |
39 | clean:
40 | @rm -f $(OBJS) $(LIB_A) $(OBJS_LAZPERF) $(LIB_A_LAZPERF)
41 | $(MAKE) -C cunit $@
42 |
43 | install uninstall:
44 | @echo "No install target in lib"
45 |
46 | check:
47 | $(MAKE) -C cunit $@
48 |
--------------------------------------------------------------------------------
/macros/ac_proj4_version.m4:
--------------------------------------------------------------------------------
1 | dnl **********************************************************************
2 | dnl * ac_proj4_version.m4
3 | dnl *
4 | dnl * Copyright 2008 Mark Cave-Ayland
5 | dnl *
6 | dnl **********************************************************************
7 |
8 | dnl
9 | dnl Return the PROJ.4 version number
10 | dnl
11 |
12 | AC_DEFUN([AC_PROJ_VERSION], [
13 | AC_RUN_IFELSE(
14 | [AC_LANG_PROGRAM([
15 | #ifdef HAVE_STDINT_H
16 | #include
17 | #endif
18 | #include "proj_api.h"
19 | ],
20 | [
21 | FILE *fp;
22 |
23 | fp = fopen("conftest.out", "w");
24 | fprintf(fp, "%d\n", PJ_VERSION);
25 | fclose(fp)])
26 | ],
27 | [
28 | dnl The program ran successfully, so return the version number in the form MAJORMINOR
29 | $1=`cat conftest.out | sed 's/\([[0-9]]\)\([[0-9]]\)\([[0-9]]\)/\1\2/'`
30 | ],
31 | [
32 | dnl The program failed so return an empty variable
33 | $1=""
34 | ]
35 | )
36 | ])
37 |
38 |
--------------------------------------------------------------------------------
/lib/cunit/cu_tester.h:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * cu_tester.h
3 | *
4 | * Testing harness for PgSQL PointClouds header
5 | *
6 | * Portions Copyright (c) 2012, OpenGeo
7 | *
8 | ***********************************************************************/
9 |
10 | #ifndef _CU_TESTER_H
11 | #define _CU_TESTER_H
12 |
13 | #include "pc_api_internal.h"
14 |
15 | #define PC_TEST(test_func) \
16 | { \
17 | #test_func, test_func \
18 | }
19 | #define MAX_CUNIT_MSG_LENGTH 512
20 | #define CU_ASSERT_SUCCESS(rv) CU_ASSERT((rv) == PC_SUCCESS)
21 | #define CU_ASSERT_FAILURE(rv) CU_ASSERT((rv) == PC_FAILURE)
22 |
23 | /* Read a file (XML) into a cstring */
24 | char *file_to_str(const char *fname);
25 |
26 | /* Resets cu_error_msg back to blank. */
27 | void cu_error_msg_reset(void);
28 |
29 | #endif
30 |
--------------------------------------------------------------------------------
/lib/lazperf_adapter.h:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * lazperf_adapter.h
3 | *
4 | * PgSQL Pointcloud is free and open source software provided
5 | * by the Government of Canada
6 | *
7 | * Copyright (c) 2013 Natural Resources Canada
8 | * Copyright (c) 2013 OpenGeo
9 | * Copyright (c) 2017 Oslandia
10 | *
11 | ***********************************************************************/
12 |
13 | #ifndef _LAZPERF_ADAPTER_H
14 | #define _LAZPERF_ADAPTER_H
15 |
16 | #include
17 | #include
18 |
19 | #include "pc_api_internal.h"
20 |
21 | #ifdef __cplusplus
22 | extern "C"
23 | {
24 | #endif
25 | size_t lazperf_compress_from_uncompressed(const PCPATCH_UNCOMPRESSED *pa,
26 | uint8_t **compressed);
27 | size_t lazperf_uncompress_from_compressed(const PCPATCH_LAZPERF *pa,
28 | uint8_t **decompressed);
29 | #ifdef __cplusplus
30 | }
31 | #endif
32 |
33 | #endif /* _LAZPERF_ADAPTER_H */
34 |
--------------------------------------------------------------------------------
/doc/functions/schema.rst:
--------------------------------------------------------------------------------
1 | .. _schema:
2 |
3 | ********************************************************************************
4 | Schema
5 | ********************************************************************************
6 |
7 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 | PC_SchemaGetNDims
9 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 |
11 | :PC_SchemaGetNDims(pcid integer) returns integer:
12 |
13 | Return the number of dimensions in the corresponding schema.
14 |
15 | .. code-block::
16 |
17 | SELECT PC_SchemaGetNDims(1);
18 |
19 | 18
20 |
21 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 | PC_SchemaIsValid
23 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 |
25 | :PC_SchemaIsValid(xml text) returns boolean:
26 |
27 | Return `true` if the pointcloud schema is valid.
28 |
29 | .. code-block::
30 |
31 | SELECT PC_SchemaIsValid(schema) FROM pointcloud_formats LIMIT 1;
32 |
33 | t
34 |
--------------------------------------------------------------------------------
/.cirrus.yml:
--------------------------------------------------------------------------------
1 | freebsd_instance:
2 | image: freebsd-13-0-release-amd64
3 | cpu: 8
4 | memory: 16G
5 |
6 | task:
7 | only_if: $BRANCH != 'main'
8 | install_script:
9 | - sed -i.bak -e 's,pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly,pkg+http://pkg.FreeBSD.org/\${ABI}/latest,' /etc/pkg/FreeBSD.conf
10 | - ASSUME_ALWAYS_YES=yes pkg bootstrap -f
11 | - pkg install -y postgresql13-server postgresql13-client gmake autoconf automake pkgconf iconv postgis31 cunit
12 |
13 | configure_script:
14 | - ./autogen.sh
15 | - ./configure --without-lazperf
16 | - service postgresql oneinitdb
17 | - service postgresql onestart
18 | - psql -U postgres -c "CREATE ROLE runner SUPERUSER LOGIN CREATEDB;"
19 | build_script:
20 | - gmake -j8
21 | check_script:
22 | - gmake -j8 check
23 | - gmake -j8 install
24 | matrix:
25 | - name: freebsd12-amd64
26 | freebsd_instance:
27 | image: freebsd-12-2-release-amd64
28 | - name: freebsd13-amd64
29 | freebsd_instance:
30 | image: freebsd-13-0-release-amd64
31 |
--------------------------------------------------------------------------------
/pgsql/META.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "pointcloud",
3 | "abstract": "LIDAR data support type and functions",
4 | "description": "Point and Patch types for LIDAR storage. Aggregate and access functions. JSON and binary serializations. Compression.",
5 | "version": "1.0.0",
6 | "release_status": "unstable",
7 | "maintainer": "Paul Ramsey",
8 | "license": "bsd",
9 | "provides": {
10 | "pointcloud": {
11 | "abstract": "LIDAR point and patch types and functions",
12 | "version": "1.0.0",
13 | "file": "",
14 | "docfile": ""
15 | }
16 | }
17 | "prereqs": {
18 | "runtime": {
19 | "requires": {
20 | }
21 | }
22 | },
23 | "generated_by": "Paul Ramsey",
24 | "resources": {
25 | "bugtracker": {
26 | "web": "https://github.com/pgpointcloud/pointcloud"
27 | },
28 | "repository": {
29 | "url": "",
30 | "web": "https://github.com/pgpointcloud/pointcloud",
31 | "type": "git"
32 | }
33 | },
34 | "meta-spec": {
35 | "version": "1.0.0",
36 | "url": "http://pgxn.org/meta/spec.txt"
37 | },
38 | "tags": [
39 | "gis", "lidar"
40 | ]
41 | }
42 |
--------------------------------------------------------------------------------
/pgsql_postgis/META.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "pointcloud_postgis",
3 | "abstract": "PostGIS integration functions for Pointcloud",
4 | "description": "Provides GIS overlay and vector/raster hooks for point clou data.",
5 | "version": "1.0.0",
6 | "release_status": "unstable",
7 | "maintainer": "Paul Ramsey",
8 | "license": "bsd",
9 | "provides": {
10 | "pointcloud_postgis": {
11 | "abstract": "PostGIS integration for Pointcloud",
12 | "version": "1.0.0",
13 | "file": "",
14 | "docfile": ""
15 | }
16 | }
17 | "prereqs": {
18 | "runtime": {
19 | "requires": {
20 | "postgis": "2.0.0",
21 | "pointcloud": "1.0.0"
22 | }
23 | }
24 | },
25 | "generated_by": "Paul Ramsey",
26 | "resources": {
27 | "bugtracker": {
28 | "web": "https://github.com/pgpointcloud/pointcloud"
29 | },
30 | "repository": {
31 | "url": "",
32 | "web": "https://github.com/pgpointcloud/pointcloud",
33 | "type": "git"
34 | }
35 | },
36 | "meta-spec": {
37 | "version": "1.0.0",
38 | "url": "http://pgxn.org/meta/spec.txt"
39 | },
40 | "tags": [
41 | "gis", "lidar", "spatial", "geometry"
42 | ]
43 | }
44 |
--------------------------------------------------------------------------------
/lib/cunit/Makefile:
--------------------------------------------------------------------------------
1 |
2 | include ../../config.mk
3 |
4 | CPPFLAGS = $(XML2_CPPFLAGS) $(CUNIT_CPPFLAGS) $(ZLIB_CPPFLAGS) -I..
5 | LDFLAGS = $(XML2_LDFLAGS) $(CUNIT_LDFLAGS) $(ZLIB_LDFLAGS)
6 |
7 | EXE = cu_tester
8 |
9 | # ADD YOUR NEW TEST FILE HERE (1/1)
10 | OBJS = \
11 | cu_tester.o \
12 | cu_pc_bytes.o \
13 | cu_pc_schema.o \
14 | cu_pc_point.o \
15 | cu_pc_patch.o \
16 | cu_pc_patch_lazperf.o \
17 | cu_pc_sort.o \
18 | cu_pc_util.o
19 |
20 | ifeq ($(CUNIT_LDFLAGS),)
21 | # No cunit? Emit message and continue
22 |
23 | all:
24 | @echo "CUnit not found, skipping build"
25 |
26 | check:
27 | @echo "CUnit not found, skipping tests"
28 |
29 | else
30 | # Yes cunit? Build tests and run
31 |
32 | # Build the unit tester
33 | all: $(EXE)
34 |
35 | # Build and run the unit tester
36 | check: $(EXE)
37 | @./$(EXE)
38 |
39 | endif
40 |
41 | # Build the main unit test executable
42 | $(EXE): $(OBJS) ../$(LIB_A) ../$(LIB_A_LAZPERF)
43 | $(CC) -o $@ $^ $(LDFLAGS) -lm -lstdc++
44 |
45 | ../$(LIB_A):
46 | $(MAKE) -C .. $(LIB_A)
47 |
48 | ../$(LIB_A_LAZPERF):
49 | $(MAKE) -C .. $(LIB_A_LAZPERF)
50 |
51 | # Clean target
52 | clean:
53 | @rm -f $(OBJS)
54 | @rm -f $(EXE)
55 |
56 |
--------------------------------------------------------------------------------
/.github/workflows/code_layout.yml:
--------------------------------------------------------------------------------
1 | name: "Code layout"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 |
9 | jobs:
10 | c_code_layout:
11 | name: C linter
12 | runs-on: ubuntu-22.04
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v2
16 | - name: Install clang-format
17 | run: sudo apt-get install clang-format
18 | - name: Clang-format check
19 | run: clang-format --dry-run --Werror -style=file pgsql/*.c pgsql/*.h lib/*.c lib/*.cpp lib/*.hpp lib/*.h lib/cunit/*.c lib/cunit/*.h
20 | sql_code_layout:
21 | name: SQL linter
22 | runs-on: ubuntu-22.04
23 | steps:
24 | - name: Checkout code
25 | uses: actions/checkout@v3
26 | - name: Set up Python 3.10
27 | uses: actions/setup-python@v4
28 | with:
29 | python-version: '3.10'
30 | - name: Install SQLFluff
31 | run: |
32 | pip install sqlfluff
33 | - name: Run SQLFluff lint
34 | run: |
35 | sqlfluff lint --dialect postgres ./pgsql/pointcloud.sql.in
36 | sqlfluff lint --dialect postgres ./pgsql_postgis/pointcloud_postgis.sql.in
37 |
--------------------------------------------------------------------------------
/autogen.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | giveup()
4 | {
5 | echo
6 | echo " Something went wrong, giving up!"
7 | echo
8 | exit 1
9 | }
10 |
11 | OSTYPE=`uname -s`
12 |
13 | AUTOCONF=`which autoconf 2>/dev/null`
14 | if [ ! ${AUTOCONF} ]; then
15 | echo "Missing autoconf!"
16 | exit
17 | fi
18 | AUTOCONF_VER=`${AUTOCONF} --version | grep -E "^.*[0-9]$" | sed 's/^.* //'`
19 |
20 | for aclocal in aclocal aclocal-1.10 aclocal-1.9; do
21 | ACLOCAL=`which $aclocal 2>/dev/null`
22 | if test -x "${ACLOCAL}"; then
23 | break;
24 | fi
25 | done
26 | if [ ! ${ACLOCAL} ]; then
27 | echo "Missing aclocal!"
28 | exit
29 | fi
30 | ACLOCAL_VER=`${ACLOCAL} --version | grep -E "^.*[0-9]$" | sed 's/^.* //'`
31 |
32 | echo "* Running $ACLOCAL (${ACLOCAL_VER})"
33 | ${ACLOCAL} -I macros || giveup
34 |
35 | echo "* Running ${AUTOCONF} (${AUTOCONF_VER})"
36 | ${AUTOCONF} || giveup
37 |
38 | if test -f "${PWD}/configure"; then
39 | echo "======================================"
40 | echo "Now you are ready to run './configure'"
41 | echo "======================================"
42 | else
43 | echo " Failed to generate ./configure script!"
44 | giveup
45 | fi
46 |
--------------------------------------------------------------------------------
/.github/scripts/postgresql_postgis.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | set -e
4 |
5 | curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg >/dev/null
6 | echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main $POSTGRESQL_VERSION" |sudo tee /etc/apt/sources.list.d/pgdg.list
7 |
8 | # RAISE priority of pgdg
9 | cat << EOF >> ./pgdg.pref
10 | Package: *
11 | Pin: release o=apt.postgresql.org
12 | Pin-Priority: 600
13 | EOF
14 | sudo mv ./pgdg.pref /etc/apt/preferences.d/
15 | sudo apt update
16 | sudo apt-get update
17 | sudo apt-get purge postgresql-*
18 | sudo apt-get install -q postgresql-$POSTGRESQL_VERSION postgresql-server-dev-$POSTGRESQL_VERSION postgresql-client-$POSTGRESQL_VERSION libcunit1-dev valgrind g++
19 |
20 | if [ -z "$POSTGIS_VERSION" ]
21 | then
22 | echo "No PostGIS version specified, skipping install of PostGIS"
23 | else
24 | sudo apt-get install postgresql-$POSTGRESQL_VERSION-postgis-$POSTGIS_VERSION
25 | fi
26 |
27 | sudo pg_dropcluster --stop $POSTGRESQL_VERSION main
28 | sudo rm -rf /etc/postgresql/$POSTGRESQL_VERSION /var/lib/postgresql/$POSTGRESQL_VERSION
29 | sudo pg_createcluster -u postgres $POSTGRESQL_VERSION main --start -- --auth-local trust --auth-host trust
30 | sudo /etc/init.d/postgresql start $POSTGRESQL_VERSION || sudo journalctl -xe
31 | sudo -iu postgres psql -c 'CREATE ROLE runner SUPERUSER LOGIN CREATEDB;'
32 |
--------------------------------------------------------------------------------
/pgsql_postgis/Makefile:
--------------------------------------------------------------------------------
1 | # pointcloud
2 |
3 | include ../config.mk
4 |
5 | #MODULE_big = pointcloud_postgis
6 | #OBJS =
7 | SED = sed
8 | EXTENSION = pointcloud_postgis
9 | EXTVERSION=$(shell cat ../Version.config)
10 |
11 | UPGRADABLE = 1.1.0 1.1.1 1.2.0 1.2.1 1.2.2 1.2.3 1.2.4
12 | UPGRADES = \
13 | $(shell echo $(UPGRADABLE) | \
14 | $(SED) 's/^/$(EXTENSION)--/' | \
15 | $(SED) 's/$$/--$(EXTVERSION).sql/' | \
16 | $(SED) 's/ /--$(EXTVERSION).sql $(EXTENSION)--/g') \
17 | $(EXTENSION)--$(EXTVERSION)--$(EXTVERSION)next.sql \
18 | $(EXTENSION)--$(EXTVERSION)next--$(EXTVERSION).sql
19 |
20 | DATA_built = \
21 | $(EXTENSION).control \
22 | $(EXTENSION)--$(EXTVERSION).sql \
23 | $(UPGRADES)
24 | #REGRESS = pointcloud
25 |
26 | # Add in build/link flags for lib
27 | #PG_CPPFLAGS += -I../lib
28 | #SHLIB_LINK += $(filter -lm, $(LIBS)) $(XML2_LDFLAGS) $(ZLIB_LDFLAGS) ../lib/$(LIB_A)
29 |
30 | # We are going to use PGXS for sure
31 | include $(PGXS)
32 |
33 | $(EXTENSION).control: $(EXTENSION).control.in Makefile
34 | $(SED) -e 's/@POINTCLOUD_VERSION@/$(EXTVERSION)/' $< > $@
35 |
36 | $(EXTENSION)--$(EXTVERSION).sql: $(EXTENSION).sql.in Makefile
37 | $(SED) -e 's/@POINTCLOUD_VERSION@/$(EXTVERSION)/' $< > $@
38 |
39 | # NOTE: relies on PERL being defined by PGXS
40 | $(EXTENSION)--%--$(EXTVERSION).sql: $(EXTENSION)--$(EXTVERSION).sql ../util/proc_upgrade.pl
41 | cat $< | ../util/proc_upgrade.pl > $@
42 |
43 | $(EXTENSION)--%--$(EXTVERSION)next.sql: $(EXTENSION)--$(EXTVERSION)next--$(EXTVERSION).sql
44 | ln -f $< $@
45 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-no-intensity.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 0.01
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 0.01
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | int32_t
25 | 0.01
26 |
27 |
28 | dimensional
29 | 4326
30 |
31 |
32 |
--------------------------------------------------------------------------------
/doc/update.rst:
--------------------------------------------------------------------------------
1 | .. _update:
2 |
3 | ******************************************************************************
4 | Update
5 | ******************************************************************************
6 |
7 | pgPointcloud extension
8 | ------------------------------------------------------------------------------
9 |
10 | Once a new version of pgPointcloud installed, you may want to update your
11 | databases where the extension is already in use. The first thing to compare is
12 | the version currently used with versions actually available on your system:
13 |
14 | .. code-block:: sql
15 |
16 | SELECT pc_version();
17 | --> 1.1.1
18 |
19 | SELECT version FROM pg_available_extension_versions WHERE name ='pointcloud';
20 | --> 1.1.1
21 | --> 1.2.1
22 |
23 |
24 | Then you can update to the latest version:
25 |
26 | .. code-block:: sql
27 |
28 | ALTER EXTENSION pointcloud;
29 | ALTER EXTENSION pointcloud_postgis;
30 |
31 | or target a specific version:
32 |
33 | .. code-block:: sql
34 |
35 | ALTER EXTENSION pointcloud UPDATE TO '1.2.1';
36 |
37 | SELECT pc_version();
38 | --> 1.2.1
39 |
40 |
41 | .. warning::
42 |
43 | The GHT compression has been removed in the 1.2.0 version. Unfortunately,
44 | you have to remove the compression on your tables before updating the
45 | extension from 1.1.x to a higher version. Some information are available in
46 | the `Schema and compression`_ tutorial.
47 |
48 | .. _`Schema and compression`: /https://pgpointcloud.github.io/pointcloud/tutorials/compression.html#schema-and-compression
49 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-xy.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 1
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 2
18 |
19 |
20 | 3
21 | 2
22 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
23 | Intensity
24 | uint16_t
25 | 3
26 |
27 |
28 | dimensional
29 | 4326
30 |
31 |
32 |
--------------------------------------------------------------------------------
/COPYRIGHT:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013 Natural Resources Canada
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are
6 | met:
7 |
8 | * Redistributions of source code must retain the above copyright notice,
9 | this list of conditions and the following disclaimer.
10 |
11 | * Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | * Neither the name of the Natural Resources Canada, the Government of
16 | Canada nor the names of its contributors may be used to endorse or
17 | promote products derived from this software without specific prior
18 | written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | POSSIBILITY OF SUCH DAMAGE.
31 |
32 |
--------------------------------------------------------------------------------
/doc/concepts/tables.rst:
--------------------------------------------------------------------------------
1 | .. _tables:
2 |
3 | ********************************************************************************
4 | Tables
5 | ********************************************************************************
6 |
7 | Usually you will only be creating tables for storing ``PcPatch`` objects, and
8 | using ``PcPoint`` objects as transitional objects for filtering, but it is
9 | possible to create tables of both types. ``PcPatch`` and ``PcPoint`` columns
10 | both require an argument that indicate the pcid that will be used to interpret
11 | the column.
12 |
13 | .. code-block:: sql
14 |
15 | -- This example requires the schema entry from the previous
16 | -- section to be loaded so that pcid==1 exists.
17 |
18 | -- A table of points
19 | CREATE TABLE points (
20 | id SERIAL PRIMARY KEY,
21 | pt PCPOINT(1)
22 | );
23 |
24 | -- A table of patches
25 | CREATE TABLE patches (
26 | id SERIAL PRIMARY KEY,
27 | pa PCPATCH(1)
28 | );
29 |
30 | In addition to any tables you create, you will find two other system-provided
31 | point cloud tables:
32 |
33 | - the ``pointcloud_formats`` table that holds all the pcid entries and
34 | schema documents
35 | - the ``pointcloud_columns`` view, that displays all the columns in your
36 | database that contain point cloud objects
37 |
38 | Now that you have created two tables, you'll see entries for them in the
39 | ``pointcloud_columns`` view:
40 |
41 | .. code-block:: sql
42 |
43 | SELECT * FROM pointcloud_columns;
44 |
45 | schema | table | column | pcid | srid | type
46 | --------+-------------+--------+------+------+---------
47 | public | points | pt | 1 | 4326 | pcpoint
48 | public | patches | pa | 1 | 4326 | pcpatch
49 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM postgres:14
2 | MAINTAINER Paul Blottiere
3 |
4 | ENV POSTGRES_VERSION 14
5 | ENV POSTGIS_VERSION 3
6 | ENV LAZPERF_VERSION 1.5.0
7 |
8 | RUN apt-get update \
9 | && apt-get install -y --no-install-recommends \
10 | postgis \
11 | postgresql-${POSTGRES_VERSION}-postgis-${POSTGIS_VERSION} \
12 | postgresql-${POSTGRES_VERSION}-postgis-${POSTGIS_VERSION}-scripts
13 |
14 | RUN apt-get update \
15 | && apt-get install -y --no-install-recommends \
16 | git \
17 | ca-certificates \
18 | build-essential \
19 | autoconf \
20 | automake \
21 | cmake \
22 | zlib1g-dev \
23 | postgresql-server-dev-all \
24 | libxml2-dev \
25 | && rm -rf /var/lib/apt/lists/* \
26 | && git clone https://github.com/hobuinc/laz-perf.git \
27 | && cd laz-perf \
28 | && git checkout ${LAZPERF_VERSION} \
29 | && cmake -DWITH_TESTS=FALSE . \
30 | && make \
31 | && make install \
32 | && cd .. \
33 | && rm -r laz-perf \
34 | && git clone https://github.com/pgpointcloud/pointcloud \
35 | && cd pointcloud \
36 | && ./autogen.sh \
37 | && ./configure --with-lazperf=/usr/local --with-pgconfig=/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_config CFLAGS="-Wall -Werror -O2 -g" \
38 | && make \
39 | && make install \
40 | && apt-get purge -y --auto-remove \
41 | git \
42 | ca-certificates \
43 | build-essential \
44 | autoconf \
45 | automake \
46 | cmake \
47 | zlib1g-dev \
48 | postgresql-server-dev-all \
49 | libxml2-dev
50 |
51 | RUN mkdir -p /docker-entrypoint-initdb.d
52 | COPY ./initdb-pgpointcloud.sh /docker-entrypoint-initdb.d/10_pgpointcloud.sh
53 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-empty-description.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 |
7 | X
8 | int32_t
9 | 0.01
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 0.01
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | int32_t
25 | 0.01
26 |
27 |
28 | 4
29 | 2
30 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
31 | Intensity
32 | uint16_t
33 | 1
34 |
35 |
36 | dimensional
37 | 4326
38 |
39 |
40 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-laz.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 0.01
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 0.01
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | int32_t
25 | 0.01
26 |
27 |
28 | 4
29 | 2
30 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
31 | Intensity
32 | uint16_t
33 | 1
34 |
35 |
36 | laz
37 |
38 |
39 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-laz-multiple-dim.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int64_t
9 | 0.01
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | double
17 | 0.01
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | float
25 | 0.01
26 |
27 |
28 | 4
29 | 2
30 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
31 | Intensity
32 | uint64_t
33 | 1
34 |
35 |
36 | laz
37 |
38 |
39 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-xym.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 1
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 2
18 |
19 |
20 | 3
21 | 4
22 | M coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | M
24 | int32_t
25 | 4
26 |
27 |
28 | 4
29 | 2
30 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
31 | Intensity
32 | uint16_t
33 | 3
34 |
35 |
36 | dimensional
37 | 4326
38 |
39 |
40 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-xyz.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 1
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 2
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | int32_t
25 | 4
26 |
27 |
28 | 4
29 | 2
30 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
31 | Intensity
32 | uint16_t
33 | 3
34 |
35 |
36 | dimensional
37 | 4326
38 |
39 |
40 |
--------------------------------------------------------------------------------
/lib/TODO.md:
--------------------------------------------------------------------------------
1 | To Do
2 | =====
3 |
4 | - Add in dimensional stats caching to speed up dimensional compression in batch cases
5 |
6 | - (?) convert PCBYTES to use PCDIMENSION* instead of holding all values as dupes
7 | - (??) convert PCBYTES handling to pass-by-reference instead of pass-by-value
8 | - implement PC\_PatchAvg/PC\_PatchMin/PC\_PatchMax as C functions against patches with dimensional and uncompressed implementations
9 | - TESTS for pc\_patch\_dimensional\_from\_uncompressed() and pc\_patch\_dimensional\_compress()
10 |
11 | - Update pc\_patch\_from\_patchlist() to merge GHT patches without decompression
12 | - Update pc\_patch\_from\_patchlist() to merge dimensional patches directly
13 |
14 | - Before doing dimensional compression, sort by geohash (actually by a localized geohash based on the patch bounds). This will (?) enhance the autocorrelation of values and improve run-length encoding in particular
15 |
16 | - Compute PCSTATS in WKB reading code for all patch variants, not just uncompressed
17 | - compute stats in libght
18 | - compute stats of dimensional
19 | - Remove extents in favour of PCSTATS
20 | - Make PCSTATS a static member of the PCPATCH, not a pointer
21 |
22 | - PC\_Filter[GreaterThan|LessThan|Between](patch, dimension, value) should note the relevant stats and short-circuit return either all or none of the patch, as necessary
23 |
24 | Use Cases to Support
25 | --------------------
26 |
27 | - frustrum filtering
28 | - raster overlaying
29 | - filtering on attribute values
30 | - filtering on spatial polygons (in *and* out)
31 | - find the k nearest points to point P
32 | - gridding/binning ("avg intensity per cell", "max z per cell", "agv red per cell", "rgb into grid/picture")
33 | - reprojection / re-schema
34 |
35 | More Functions
36 | --------------
37 |
38 | - PC\_FilterPolygon(patch, wkb) returns patch
39 | - PC\_Filter(patch, dimension, expression) returns patch
40 | - PC\_Get(pcpatch, dimname) returns Array(numeric)
41 |
42 | - PC\_Transform(pcpatch, newpcid)
43 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 0.01
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 0.01
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | int32_t
25 | 0.01
26 |
27 |
28 | 4
29 | 2
30 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
31 | Intensity
32 | uint16_t
33 | 1
34 |
35 |
36 | dimensional
37 | 4326
38 |
39 |
40 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-fine.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 0.000001
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 0.000001
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | int32_t
25 | 0.01
26 |
27 |
28 | 4
29 | 2
30 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
31 | Intensity
32 | uint16_t
33 | 1
34 |
35 |
36 | dimensional
37 | 4326
38 |
39 |
40 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-missing-dimension.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 0.01
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 0.01
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | int32_t
25 | 0.01
26 |
27 |
28 | 5
29 | 2
30 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
31 | Intensity
32 | uint16_t
33 | 1
34 |
35 |
36 | dimensional
37 | 4326
38 |
39 |
40 |
--------------------------------------------------------------------------------
/lib/sort_r/README.md:
--------------------------------------------------------------------------------
1 | The following README.md and sort_r.h files have been imported from the [noporpoise/sort_r Github project](https://github.com/noporpoise/sort_r) in order to provide a portable reentrant version of qsort.
2 |
3 |
4 | sort_r
5 | ======
6 |
7 | Isaac Turner 2013
8 | Portable qsort_r / qsort_s
9 | Discussion here: http://stackoverflow.com/questions/4300896/how-portable-is-the-re-entrant-qsort-r-function-compared-to-qsort
10 | License: Public Domain - use as you wish, no warranty
11 |
12 | [](https://travis-ci.org/noporpoise/sort_r)
13 |
14 | About
15 | -----
16 |
17 | If you want to qsort() an array with a comparison operator that takes parameters
18 | you need to use global variables to pass those parameters (not possible when
19 | writing multithreaded code), or use qsort_r/qsort_s which are not portable
20 | (there are separate GNU/BSD/Windows versions and they all take different arguments).
21 |
22 | So I wrote a portable qsort_r/qsort_s called sort_r():
23 |
24 | void sort_r(void *base, size_t nel, size_t width,
25 | int (*compar)(const void *a1, const void *a2, void *aarg),
26 | void *arg);
27 |
28 | `base` is the array to be sorted
29 | `nel` is the number of elements in the array
30 | `width` is the size in bytes of each element of the array
31 | `compar` is the comparison function
32 | `arg` is a pointer to be passed to the comparison function
33 |
34 | Using sort_r
35 | ------------
36 |
37 | Add `#include "sort_r.h"` to the top of your code. Then copy sort_r.h into your
38 | working directory, or add -I path/to/sort_r to your compile arguments.
39 |
40 | Build Example
41 | -------------
42 |
43 | Compile example code (`example.c`) with:
44 |
45 | make
46 |
47 | To build using nested functions and qsort instead of qsort_r use
48 |
49 | make NESTED_QSORT=1
50 |
51 | Nested functions are not permitted under ISO C, they are a GCC extension.
52 |
53 | License
54 | -------
55 |
56 | Public Domain. Use as you wish. No warranty. There may be bugs.
57 |
58 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-no-name.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 2
6 | uint16_t
7 | 1
8 |
9 |
10 | 2
11 | 4
12 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
13 | X
14 | int32_t
15 | 0.01
16 |
17 |
18 | 3
19 | 4
20 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
21 | Y
22 | int32_t
23 | 0.01
24 |
25 |
26 | 4
27 | 4
28 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
29 | Z
30 | int32_t
31 | 0.01
32 |
33 |
34 | 5
35 | 2
36 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
37 | Intensity
38 | uint16_t
39 | 1
40 |
41 |
42 | dimensional
43 | 4326
44 |
45 |
46 |
--------------------------------------------------------------------------------
/pgsql/Makefile.in:
--------------------------------------------------------------------------------
1 | # pointcloud
2 |
3 | include ../config.mk
4 |
5 | SQLPP = @SQLPP@
6 |
7 | OBJS = \
8 | pc_inout.o \
9 | pc_access.o \
10 | pc_editor.o \
11 | pc_pgsql.o
12 |
13 | SED = sed
14 | EXTENSION = pointcloud
15 | EXTVERSION=$(shell cat ../Version.config)
16 | EXTVERSION_MAJOR=$(shell cut -d. -f1,2 ../Version.config)
17 | MODULE_big = $(EXTENSION)-$(EXTVERSION_MAJOR)
18 | UPGRADABLE = 1.1.0 1.1.1 1.2.0 1.2.1 1.2.2 1.2.3 1.2.4
19 |
20 | UPGRADES = \
21 | $(shell echo $(UPGRADABLE) | \
22 | $(SED) 's/^/$(EXTENSION)--/' | \
23 | $(SED) 's/$$/--$(EXTVERSION).sql/' | \
24 | $(SED) 's/ /--$(EXTVERSION).sql $(EXTENSION)--/g') \
25 | $(EXTENSION)--$(EXTVERSION)--$(EXTVERSION)next.sql \
26 | $(EXTENSION)--$(EXTVERSION)next--$(EXTVERSION).sql
27 |
28 | DATA_built = \
29 | $(EXTENSION).control \
30 | $(EXTENSION)--$(EXTVERSION).sql \
31 | $(UPGRADES)
32 |
33 | REGRESS =
34 |
35 | ifeq ("$(PGSQL_MAJOR_VERSION)", "9")
36 | REGRESS += pointcloud_pg9
37 | else
38 | REGRESS += pointcloud
39 | endif
40 |
41 | REGRESS += pointcloud_columns schema
42 |
43 | ifeq ("$(PGSQL_MAJOR_VERSION)", "9")
44 | ifneq ("$(LAZPERF_STATUS)", "disabled")
45 | REGRESS += pointcloud_laz_pg9
46 | endif
47 | else
48 | ifneq ("$(LAZPERF_STATUS)", "disabled")
49 | REGRESS += pointcloud_laz
50 | endif
51 | endif
52 |
53 | # Add in build/link flags for lib
54 | PG_CPPFLAGS += -I../lib
55 | SHLIB_LINK += ../lib/$(LIB_A) ../lib/$(LIB_A_LAZPERF) -lstdc++ $(filter -lm, $(LIBS)) $(XML2_LDFLAGS) $(ZLIB_LDFLAGS)
56 |
57 | # We are going to use PGXS for sure
58 | include $(PGXS)
59 |
60 | $(EXTENSION).control: $(EXTENSION).control.in Makefile
61 | $(SED) -e 's/#POINTCLOUD_VERSION#/$(EXTVERSION)/' \
62 | -e 's/#POINTCLOUD_VERSION_MAJOR#/$(EXTVERSION_MAJOR)/' $< > $@
63 |
64 | $(EXTENSION)--$(EXTVERSION).sql: $(EXTENSION).sql.in Makefile
65 | $(SQLPP) -I. $< | $(SED) -e 's/#POINTCLOUD_VERSION#/$(EXTVERSION)/' > $@
66 |
67 | # NOTE: relies on PERL being defined by PGXS
68 | $(EXTENSION)--%--$(EXTVERSION).sql: $(EXTENSION)--$(EXTVERSION).sql ../util/proc_upgrade.pl
69 | cat $< | ../util/proc_upgrade.pl > $@
70 |
71 | $(EXTENSION)--%--$(EXTVERSION)next.sql: $(EXTENSION)--$(EXTVERSION)next--$(EXTVERSION).sql
72 | ln -f $< $@
73 |
--------------------------------------------------------------------------------
/doc/concepts/objects.rst:
--------------------------------------------------------------------------------
1 | .. _objects:
2 |
3 | ********************************************************************************
4 | Point Cloud Objects
5 | ********************************************************************************
6 |
7 | --------------------------------------------------------------------------------
8 | PcPoint
9 | --------------------------------------------------------------------------------
10 |
11 | The basic point cloud type is a ``PcPoint``. Every point has a (large?) number of
12 | dimensions, but at a minimum an X and Y coordinate that place it in space.
13 |
14 | Points can be rendered in a human-readable JSON form using the
15 | ``PC_AsText(pcpoint)`` function. The "pcid" is the foreign key reference to the
16 | ``pointcloud_formats`` table, where the meaning of each dimension in the "pt"
17 | array of doubles is explained. The underlying storage of the data might not be
18 | double, but by the time it has been extracted, scaled and offset, it is
19 | representable as doubles.
20 |
21 | .. code-block:: sql
22 |
23 | {
24 | "pcid" : 1,
25 | "pt" : [0.01, 0.02, 0.03, 4]
26 | }
27 |
28 | --------------------------------------------------------------------------------
29 | PcPatch
30 | --------------------------------------------------------------------------------
31 |
32 | The structure of database storage is such that storing billions of points as
33 | individual records in a table is not an efficient use of resources. Instead, we
34 | collect a group of ``PcPoint`` into a ``PcPatch``. Each patch should hopefully
35 | contain points that are near together.
36 |
37 | Instead of a table of billions of single ``PcPoint`` records, a collection of
38 | LIDAR data can be represented in the database as a much smaller collection (10s
39 | of millions) of ``PcPatch`` records.
40 |
41 | Patches can be rendered into a human-readable JSON form using the
42 | ``PC_AsText(pcpatch)`` function. The "pcid" is the foreign key reference to the
43 | ``pointcloud_formats`` table.
44 |
45 | .. code-block:: sql
46 |
47 | {
48 | "pcid" : 1,
49 | "pts" : [
50 | [0.02, 0.03, 0.05, 6],
51 | [0.02, 0.03, 0.05, 8]
52 | ]
53 | }
54 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-empty-name.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 2
6 |
7 | uint16_t
8 | 1
9 |
10 |
11 | 2
12 | 4
13 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
14 | X
15 | int32_t
16 | 0.01
17 |
18 |
19 | 3
20 | 4
21 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
22 | Y
23 | int32_t
24 | 0.01
25 |
26 |
27 | 4
28 | 4
29 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
30 | Z
31 | int32_t
32 | 0.01
33 |
34 |
35 | 5
36 | 2
37 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
38 | Intensity
39 | uint16_t
40 | 1
41 |
42 |
43 | dimensional
44 | 4326
45 |
46 |
47 |
--------------------------------------------------------------------------------
/doc/functions/wkb.rst:
--------------------------------------------------------------------------------
1 | .. _wkb:
2 |
3 | ********************************************************************************
4 | WKB
5 | ********************************************************************************
6 |
7 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 | PC_AsBinary
9 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 |
11 | :PC_AsBinary(p pcpoint) returns bytea:
12 |
13 | Return the OGC "well-known binary" format for the point.
14 |
15 | .. code-block::
16 |
17 | SELECT PC_AsBinary('010100000064CEFFFF94110000703000000400'::pcpoint);
18 |
19 | \x01010000800000000000c05fc000000000008046400000000000005f40
20 |
21 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 | PC_BoundingDiagonalAsBinary
23 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 |
25 | :PC_BoundingDiagonalAsBinary(p pcpatch) returns bytea:
26 |
27 | Return the OGC "well-known binary" format for the bounding diagonal of the
28 | patch.
29 |
30 | .. code-block::
31 |
32 | SELECT PC_BoundingDiagonalAsBinary( PC_Patch(ARRAY[ PC_MakePoint(1, ARRAY[0.,0.,0.,10.]), PC_MakePoint(1, ARRAY[1.,1.,1.,10.]), PC_MakePoint(1, ARRAY[10.,10.,10.,10.])]));
33 |
34 | \x01020000a0e610000002000000000000000000000000000000000000000000000000000000000000000000244000000000000024400000000000002440
35 |
36 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
37 | PC_EnvelopeAsBinary
38 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
39 |
40 | :PC_EnvelopeAsBinary(p pcpatch) returns bytea:
41 |
42 | Return the OGC "well-known binary" format for the 2D bounds of the patch.
43 | Useful for performing 2D intersection tests with geometries.
44 |
45 | .. code-block::
46 |
47 | SELECT PC_EnvelopeAsBinary(pa) FROM patches LIMIT 1;
48 |
49 | \x0103000000010000000500000090c2f5285cbf5fc0e17a
50 | 14ae4781464090c2f5285cbf5fc0ec51b81e858b46400ad7
51 | a3703dba5fc0ec51b81e858b46400ad7a3703dba5fc0e17a
52 | 14ae4781464090c2f5285cbf5fc0e17a14ae47814640
53 |
54 | ``PC_Envelope`` is an alias to ``PC_EnvelopeAsBinary``. But ``PC_Envelope`` is
55 | deprecated and will be removed in a future version (2.0) of the extension.
56 | ``PC_EnvelopeAsBinary`` is to be used instead.
57 |
--------------------------------------------------------------------------------
/lib/cunit/data/simple-schema-xyzm.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 1
5 | 4
6 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
7 | X
8 | int32_t
9 | 1
10 |
11 |
12 | 2
13 | 4
14 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
15 | Y
16 | int32_t
17 | 2
18 |
19 |
20 | 3
21 | 4
22 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
23 | Z
24 | int32_t
25 | 4
26 |
27 |
28 | 4
29 | 4
30 | M coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
31 | M
32 | int32_t
33 | 8
34 |
35 |
36 | 5
37 | 2
38 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
39 | Intensity
40 | uint16_t
41 | 1
42 |
43 |
44 | dimensional
45 | 4326
46 |
47 |
48 |
--------------------------------------------------------------------------------
/doc/faq.rst:
--------------------------------------------------------------------------------
1 | .. _faq:
2 |
3 | ******************************************************************************
4 | FAQ
5 | ******************************************************************************
6 |
7 | **What can be done with pgPointcloud?**
8 |
9 | - pgPointcloud offers a way to efficiently store points in a postgres
10 | database. In fact, pgpPointCloud stores groups of points (``pcPoints``)
11 | called ``pcPatch`` storing groups of point allows efficient compression of
12 | data. This patch have a bounding box which can be used by PostGIS spatial
13 | features to greatly accelerate queries. pgPointcloud allows then to
14 | efficiently query very big point cloud. Querying can be done using spatial
15 | criteria (which points are in this area), as well as point attributes
16 | criteria (which points have a value of this attribute between .. and ...
17 | ?).
18 |
19 | - The point cloud being in a server, they can be used for processing or
20 | visualization, or streamed.
21 |
22 | |
23 |
24 | **Why use pgPointcloud to store Lidar points into postgreSQL and not in point cloud files?**
25 |
26 | The traditional way of storing point cloud is using several files containing
27 | each a part of the point cloud. This has some severe limitations:
28 |
29 | - Not efficient data query (to get a few points, you need to read the whole file)
30 | - No concurrency (only one user can modify points at a time/read points at
31 | a time)
32 | - Files tends to get duplicated a lot (each worker has it's own private
33 | version) no security of data ( file could be corrupted by a processing,
34 | hard to manage who access what)
35 | - Hard to use several different point cloud at the same time
36 | - Hard to use point cloud with other spatial data (vector, raster, images)
37 |
38 | pgPointcloud solves all of this problem, at a very low cost: you have to use a
39 | DBMS.
40 |
41 | |
42 |
43 | **Does pgPointcloud scale?**
44 |
45 | pgPointcloud is a young project, yet it has been proven to work fast (1ms query
46 | time) with a 5 billions points cloud.
47 |
48 | Currently pgPointcloud is fast for:
49 |
50 | - Load data into DB
51 | - Automatically compress data
52 | - Query patches based on spatial or other attributes
53 |
54 | It is slow for:
55 |
56 | - Data output (100k pts/sec)
57 | - In base conversion (no functions)
58 |
--------------------------------------------------------------------------------
/tools/benchmark_compression/pointcloud-laz.sql:
--------------------------------------------------------------------------------
1 | create EXTENSION if not exists pointcloud;
2 |
3 | INSERT INTO pointcloud_formats (pcid, srid, schema)
4 | VALUES (5, 0,
5 | '
6 |
7 |
8 | 1
9 | 4
10 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
11 | X
12 | int32_t
13 | 0.01
14 |
15 |
16 | 2
17 | 4
18 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
19 | Y
20 | int32_t
21 | 0.01
22 |
23 |
24 | 3
25 | 4
26 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
27 | Z
28 | int32_t
29 | 0.01
30 |
31 |
32 | 4
33 | 2
34 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
35 | Intensity
36 | uint16_t
37 | 1
38 |
39 |
40 | laz
41 |
42 | '
43 | );
44 |
45 |
46 | CREATE TABLE IF NOT EXISTS pa_compression_laz (
47 | pa PCPATCH(5)
48 | );
49 | \d pa_compression_laz
50 |
51 | INSERT INTO pa_compression_laz (pa)
52 | SELECT PC_Patch(PC_MakePoint(5, ARRAY[x,y,z,intensity]))
53 | FROM (
54 | SELECT
55 | -127+a/100.0 AS x,
56 | 45+a/100.0 AS y,
57 | 1.0*a AS z,
58 | a/10 AS intensity,
59 | a/400 AS gid
60 | FROM generate_series(1,100000) AS a
61 | ) AS values GROUP BY gid;
62 |
63 | TRUNCATE pointcloud_formats;
64 |
--------------------------------------------------------------------------------
/tools/benchmark_compression/pointcloud.sql:
--------------------------------------------------------------------------------
1 | create EXTENSION if not exists pointcloud;
2 |
3 | INSERT INTO pointcloud_formats (pcid, srid, schema)
4 | VALUES (5, 0,
5 | '
6 |
7 |
8 | 1
9 | 4
10 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
11 | X
12 | int32_t
13 | 0.01
14 |
15 |
16 | 2
17 | 4
18 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
19 | Y
20 | int32_t
21 | 0.01
22 |
23 |
24 | 3
25 | 4
26 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
27 | Z
28 | int32_t
29 | 0.01
30 |
31 |
32 | 4
33 | 2
34 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
35 | Intensity
36 | uint16_t
37 | 1
38 |
39 |
40 | none
41 |
42 | '
43 | );
44 |
45 |
46 | CREATE TABLE IF NOT EXISTS pa_compression_none (
47 | pa PCPATCH(5)
48 | );
49 | \d pa_compression_none
50 |
51 | INSERT INTO pa_compression_none (pa)
52 | SELECT PC_Patch(PC_MakePoint(5, ARRAY[x,y,z,intensity]))
53 | FROM (
54 | SELECT
55 | -127+a/100.0 AS x,
56 | 45+a/100.0 AS y,
57 | 1.0*a AS z,
58 | a/10 AS intensity,
59 | a/400 AS gid
60 | FROM generate_series(1,100000) AS a
61 | ) AS values GROUP BY gid;
62 |
63 | TRUNCATE pointcloud_formats;
64 |
--------------------------------------------------------------------------------
/tools/benchmark_compression/pointcloud-dim.sql:
--------------------------------------------------------------------------------
1 | create EXTENSION if not exists pointcloud;
2 |
3 | INSERT INTO pointcloud_formats (pcid, srid, schema)
4 | VALUES (5, 0,
5 | '
6 |
7 |
8 | 1
9 | 4
10 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
11 | X
12 | int32_t
13 | 0.01
14 |
15 |
16 | 2
17 | 4
18 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
19 | Y
20 | int32_t
21 | 0.01
22 |
23 |
24 | 3
25 | 4
26 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
27 | Z
28 | int32_t
29 | 0.01
30 |
31 |
32 | 4
33 | 2
34 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
35 | Intensity
36 | uint16_t
37 | 1
38 |
39 |
40 | dimensional
41 |
42 | '
43 | );
44 |
45 |
46 | CREATE TABLE IF NOT EXISTS pa_compression_dimensional (
47 | pa PCPATCH(5)
48 | );
49 | \d pa_compression_dimensional
50 |
51 | INSERT INTO pa_compression_dimensional (pa)
52 | SELECT PC_Patch(PC_MakePoint(5, ARRAY[x,y,z,intensity]))
53 | FROM (
54 | SELECT
55 | -127+a/100.0 AS x,
56 | 45+a/100.0 AS y,
57 | 1.0*a AS z,
58 | a/10 AS intensity,
59 | a/400 AS gid
60 | FROM generate_series(1,100000) AS a
61 | ) AS values GROUP BY gid;
62 |
63 | TRUNCATE pointcloud_formats;
64 |
--------------------------------------------------------------------------------
/pgsql_postgis/pointcloud_postgis.sql.in:
--------------------------------------------------------------------------------
1 | -----------------------------------------------------------------------------
2 | -- Function to overlap polygon on patch
3 | --
4 | CREATE OR REPLACE FUNCTION PC_Intersection(pcpatch, geometry)
5 | RETURNS pcpatch AS
6 | $$
7 | WITH
8 | pts AS (SELECT @extschema@.PC_Explode($1) AS pt),
9 | pgpts AS (SELECT @extschema@.ST_GeomFromEWKB(@extschema@.PC_AsBinary(pt)) AS pgpt, pt FROM pts),
10 | ipts AS (SELECT pt FROM pgpts WHERE @extschema@.ST_Intersects(pgpt, $2)),
11 | ipch AS (SELECT @extschema@.PC_Patch(pt) AS pch FROM ipts)
12 | SELECT pch FROM ipch;
13 | $$
14 | LANGUAGE 'sql';
15 |
16 | -----------------------------------------------------------------------------
17 | -- Cast from pcpatch to polygon
18 | --
19 | CREATE OR REPLACE FUNCTION PC_EnvelopeGeometry(pcpatch)
20 | RETURNS geometry AS
21 | $$
22 | SELECT @extschema@.ST_GeomFromEWKB(@extschema@.PC_EnvelopeAsBinary($1))
23 | $$
24 | LANGUAGE 'sql';
25 |
26 | CREATE OR REPLACE FUNCTION geometry(pcpatch)
27 | RETURNS geometry AS
28 | $$
29 | SELECT @extschema@.PC_EnvelopeGeometry($1)
30 | $$
31 | LANGUAGE 'sql';
32 |
33 | CREATE CAST (pcpatch AS geometry) WITH FUNCTION PC_EnvelopeGeometry(pcpatch);
34 |
35 |
36 | -----------------------------------------------------------------------------
37 | -- Cast from pcpoint to point
38 | --
39 | CREATE OR REPLACE FUNCTION geometry(pcpoint)
40 | RETURNS geometry AS
41 | $$
42 | SELECT @extschema@.ST_GeomFromEWKB(@extschema@.PC_AsBinary($1))
43 | $$
44 | LANGUAGE 'sql';
45 |
46 | CREATE CAST (pcpoint AS geometry) WITH FUNCTION geometry(pcpoint);
47 |
48 |
49 | -----------------------------------------------------------------------------
50 | -- Function to overlap polygon on patch
51 | --
52 | CREATE OR REPLACE FUNCTION PC_Intersects(pcpatch, geometry)
53 | RETURNS boolean AS
54 | $$
55 | SELECT @extschema@.ST_Intersects($2, @extschema@.PC_EnvelopeGeometry($1))
56 | $$
57 | LANGUAGE 'sql';
58 |
59 | CREATE OR REPLACE FUNCTION PC_Intersects(geometry, pcpatch)
60 | RETURNS boolean AS
61 | $$
62 | SELECT @extschema@.PC_Intersects($2, $1)
63 | $$
64 | LANGUAGE 'sql';
65 |
66 | -----------------------------------------------------------------------------
67 | -- Function from pcpatch to LineString
68 | --
69 | CREATE OR REPLACE FUNCTION PC_BoundingDiagonalGeometry(pcpatch)
70 | RETURNS geometry AS
71 | $$
72 | SELECT @extschema@.ST_GeomFromEWKB(@extschema@.PC_BoundingDiagonalAsBinary($1))
73 | $$
74 | LANGUAGE 'sql';
75 |
76 | -----------------------------------------------------------------------------
77 | -- Function returning the version number
78 | --
79 | CREATE OR REPLACE FUNCTION PC_PostGIS_Version()
80 | RETURNS text AS $$ SELECT '@POINTCLOUD_VERSION@'::text $$
81 | LANGUAGE 'sql' IMMUTABLE STRICT;
82 |
--------------------------------------------------------------------------------
/.github/scripts/test_dump_restore.sql:
--------------------------------------------------------------------------------
1 | CREATE EXTENSION postgis;
2 | CREATE EXTENSION pointcloud;
3 | CREATE EXTENSION pointcloud_postgis;
4 |
5 | INSERT INTO pointcloud_formats (pcid, srid, schema)
6 | VALUES (3, 0,
7 | '
8 |
9 |
10 | 1
11 | 4
12 | X coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
13 | X
14 | int32_t
15 | 0.01
16 |
17 |
18 | 2
19 | 4
20 | Y coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
21 | Y
22 | int32_t
23 | 0.01
24 |
25 |
26 | 3
27 | 4
28 | Z coordinate as a long integer. You must use the scale and offset information of the header to determine the double value.
29 | Z
30 | int32_t
31 | 0.01
32 |
33 |
34 | 4
35 | 2
36 | The intensity value is the integer representation of the pulse return magnitude. This value is optional and system specific. However, it should always be included if available.
37 | Intensity
38 | uint16_t
39 | 1
40 |
41 |
42 | dimensional
43 | 4326
44 |
45 | '
46 | );
47 |
48 | CREATE TABLE IF NOT EXISTS probes (
49 | pa PCPATCH(3)
50 | );
51 |
52 | CREATE INDEX ON probes USING gist(pc_envelopegeometry(pa));
53 |
54 | INSERT INTO probes(pa) VALUES ('0000000003000000000000000200000002000000030000000500060000000200000003000000050008');
55 | INSERT INTO probes(pa) VALUES ('000000000300000000000000020000000600000007000000050006000000090000000A00000005000A');
56 | INSERT INTO probes(pa) VALUES ('0000000003000000000000000200000002000000030000000500060000000200000003000000050003');
57 | INSERT INTO probes(pa) VALUES ('0000000003000000000000000200000002000000030000000500060000000200000003000000050001');
58 |
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | .. _home:
2 |
3 | ******************************************************************************
4 | pgPointcloud - A PostgreSQL extension for storing point cloud (LIDAR) data.
5 | ******************************************************************************
6 |
7 | LIDAR_ `point cloud`_ are becoming more and more available. Devices are easy to
8 | get, not too expensive, and provide very accurate 3D points. pgPointCLoud is an
9 | open source PostgreSQL_ extension for storing point cloud data and use it with
10 | PostGIS_. It is very easy to use, robust and efficient.
11 |
12 | By storing LIDAR points in a PostgreSQL database, pgPointcloud eases many
13 | problems and allows a good integration with other geo-spatial data
14 | (vector, raster) into one common framework : PostGIS.
15 |
16 | News
17 | --------------------------------------------------------------------------------
18 |
19 | **19-09-2023**
20 | ................................................................................
21 |
22 | pgPointcloud 1.2.5 has been released.
23 |
24 |
25 | Concepts
26 | --------------------------------------------------------------------------------
27 |
28 | .. toctree::
29 | :maxdepth: 2
30 |
31 | concepts/index
32 |
33 | Install
34 | --------------------------------------------------------------------------------
35 |
36 | .. toctree::
37 | :maxdepth: 2
38 |
39 | install
40 |
41 | Getting Started
42 | --------------------------------------------------------------------------------
43 |
44 | .. toctree::
45 | :maxdepth: 2
46 |
47 | quickstart
48 |
49 | Functions
50 | --------------------------------------------------------------------------------
51 |
52 | .. toctree::
53 | :maxdepth: 2
54 |
55 | functions/index
56 |
57 | Update
58 | --------------------------------------------------------------------------------
59 |
60 | .. toctree::
61 | :maxdepth: 2
62 |
63 | update
64 |
65 | Tutorials
66 | --------------------------------------------------------------------------------
67 |
68 | .. toctree::
69 | :maxdepth: 2
70 |
71 | tutorials/index
72 |
73 | Development
74 | --------------------------------------------------------------------------------
75 |
76 | .. toctree::
77 | :maxdepth: 2
78 |
79 | development
80 |
81 | Project
82 | --------------------------------------------------------------------------------
83 |
84 | .. toctree::
85 | :maxdepth: 2
86 |
87 | community
88 | faq
89 | copyright
90 |
91 |
92 | Indices and tables
93 | --------------------------------------------------------------------------------
94 |
95 | * :ref:`genindex`
96 | * :ref:`search`
97 |
98 |
99 | .. _`point cloud`: http://en.wikipedia.org/wiki/Point_cloud
100 | .. _`LIDAR`: http://en.wikipedia.org/wiki/LIDAR
101 | .. _`PostgreSQL`: https://www.postgresql.org/
102 | .. _`PostGIS`: https://postgis.net/
103 |
--------------------------------------------------------------------------------
/doc/concepts/compressions.rst:
--------------------------------------------------------------------------------
1 | .. _compressions:
2 |
3 | ********************************************************************************
4 | Compressions
5 | ********************************************************************************
6 |
7 | One of the issues with LIDAR data is that there is a lot of it. To deal with
8 | data volumes, PostgreSQL Pointcloud allows schemas to declare their preferred
9 | compression method in the ```` block of the schema document. In
10 | the example schema, we declared our compression as follows:
11 |
12 | .. code-block:: xml
13 |
14 |
15 | dimensional
16 |
17 |
18 | There are currently three supported compressions:
19 |
20 | - **None**, which stores points and patches as byte arrays using the type and
21 | formats described in the schema document.
22 | - **Dimensional**, which stores points the same as 'none' but stores patches as
23 | collections of dimensional data arrays, with an "appropriate" compression
24 | applied. Dimensional compression makes the most sense for smaller patch
25 | sizes, since small patches will tend to have more homogeneous dimensions.
26 | - **LAZ** or "LASZip". You must build Pointcloud with laz-perf support to make
27 | use of the LAZ compression. If no compression is declared in
28 | ````, then a compression of "none" is assumed.
29 |
30 | -------------------------------------------------------------------------------
31 | Dimensional Compression
32 | -------------------------------------------------------------------------------
33 |
34 | Dimensional compression first flips the patch representation from a list of N
35 | points containing M dimension values to a list of M dimensions each containing
36 | N values.
37 |
38 | .. code-block:: json
39 |
40 | {"pcid":1,"pts":[
41 | [-126.99,45.01,1,0],[-126.98,45.02,2,0],[-126.97,45.03,3,0],
42 | [-126.96,45.04,4,0],[-126.95,45.05,5,0],[-126.94,45.06,6,0]
43 | ]}
44 |
45 | Becomes, notionally:
46 |
47 | .. code-block:: json
48 |
49 | {"pcid":1,"dims":[
50 | [-126.99,-126.98,-126.97,-126.96,-126.95,-126.94],
51 | [45.01,45.02,45.03,45.04,45.05,45.06],
52 | [1,2,3,4,5,6],
53 | [0,0,0,0,0,0]
54 | ]}
55 |
56 | The potential benefit for compression is that each dimension has quite
57 | different distribution characteristics, and is amenable to different
58 | approaches. In this example, the fourth dimension (intensity) can be very
59 | highly compressed with run-length encoding (one run of six zeros). The first
60 | and second dimensions have relatively low variability relative to their
61 | magnitude and can be compressed by removing the repeated bits.
62 |
63 | Dimensional compression currently uses only three compression schemes:
64 |
65 | - run-length encoding, for dimensions with low variability
66 | - common bits removal, for dimensions with variability in a narrow bit range
67 | - raw deflate compression using zlib, for dimensions that aren't amenable to
68 | the other schemes
69 |
70 | For LIDAR data organized into patches of points that sample similar areas, the
71 | dimensional scheme compresses at between 3:1 and 5:1 efficiency.
72 |
--------------------------------------------------------------------------------
/doc/functions/points.rst:
--------------------------------------------------------------------------------
1 | .. _points:
2 |
3 | ********************************************************************************
4 | PcPoint
5 | ********************************************************************************
6 |
7 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 | PC_AsText
9 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 |
11 | :PC_AsText(p pcpoint) returns text:
12 |
13 | Returns a JSON version of the data in that point.
14 |
15 | .. code-block::
16 |
17 | SELECT PC_AsText('010100000064CEFFFF94110000703000000400'::pcpoint);
18 |
19 | {"pcid":1,"pt":[-127,45,124,4]}
20 |
21 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 | PC_Get
23 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 |
25 | :PC_Get(pt pcpoint) returns float8[]:
26 |
27 | Returns values of all dimensions in an array.
28 |
29 | .. code-block::
30 |
31 | SELECT PC_Get('010100000064CEFFFF94110000703000000400'::pcpoint);
32 |
33 | {-127,45,124,4}
34 |
35 | :PC_Get(pt pcpoint, dimname text) returns numeric:
36 |
37 | Returns the numeric value of the named dimension. The dimension name must exist
38 | in the schema.
39 |
40 | .. code-block::
41 |
42 | SELECT PC_Get('010100000064CEFFFF94110000703000000400'::pcpoint, 'Intensity');
43 |
44 | 4
45 |
46 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
47 | PC_MakePoint
48 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
49 |
50 | :PC_MakePoint(pcid integer, vals float8[]) returns pcpoint:
51 |
52 | Given a valid pcid schema number and an array of doubles that matches the
53 | schema, construct a new pcpoint.
54 |
55 | .. code-block::
56 |
57 | SELECT PC_MakePoint(1, ARRAY[-127, 45, 124.0, 4.0]);
58 |
59 | 010100000064CEFFFF94110000703000000400
60 |
61 | Insert some test values into the points table:
62 |
63 | .. code-block::
64 |
65 | INSERT INTO points (pt)
66 | SELECT PC_MakePoint(1, ARRAY[x,y,z,intensity])
67 | FROM (
68 | SELECT
69 | -127+a/100.0 AS x,
70 | 45+a/100.0 AS y,
71 | 1.0*a AS z,
72 | a/10 AS intensity
73 | FROM generate_series(1,100) AS a
74 | ) AS values;
75 |
76 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
77 | PC_MemSize
78 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
79 |
80 | :PC_MemSize(pt pcpoint) returns int4:
81 |
82 | Returns the memory size of a pcpoint.
83 |
84 | .. code-block::
85 |
86 | SELECT PC_MemSize(PC_MakePoint(1, ARRAY[-127, 45, 124.0, 4.0]));
87 |
88 | 25
89 |
90 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
91 | PC_PCId
92 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
93 |
94 | :PC_PCId(p pcpoint) returns integer (from 1.1.0):
95 |
96 | Returns the pcid schema number of this point.
97 |
98 | .. code-block::
99 |
100 | SELECT PC_PCId('010100000064CEFFFF94110000703000000400'::pcpoint);
101 |
102 | 1
103 |
--------------------------------------------------------------------------------
/lib/cunit/cu_pc_util.c:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * cu_pc_util.c
3 | *
4 | * Testing for the util functions
5 | *
6 | * Portions Copyright (c) 2017, Oslandia
7 | *
8 | ***********************************************************************/
9 |
10 | #include "CUnit/Basic.h"
11 | #include "cu_tester.h"
12 |
13 | /* GLOBALS ************************************************************/
14 |
15 | static PCSCHEMA *schema = NULL;
16 | static const char *xmlfile = "data/pdal-schema.xml";
17 |
18 | /* Setup/teardown for this suite */
19 | static int init_suite(void)
20 | {
21 | char *xmlstr = file_to_str(xmlfile);
22 | schema = pc_schema_from_xml(xmlstr);
23 | pcfree(xmlstr);
24 | if (!schema)
25 | return 1;
26 |
27 | return 0;
28 | }
29 |
30 | static int clean_suite(void)
31 | {
32 | pc_schema_free(schema);
33 | return 0;
34 | }
35 |
36 | /* TESTS **************************************************************/
37 |
38 | static void test_bounding_diagonal_wkb_from_bounds()
39 | {
40 | PCBOUNDS bounds;
41 | size_t wkbsize;
42 | uint8_t *wkb;
43 | char *wkbhex;
44 |
45 | bounds.xmin = -10;
46 | bounds.xmax = 10;
47 | bounds.ymin = -10;
48 | bounds.ymax = 10;
49 |
50 | wkb = pc_bounding_diagonal_wkb_from_bounds(&bounds, schema, &wkbsize);
51 | CU_ASSERT(wkb != NULL);
52 | CU_ASSERT(wkbsize == 41);
53 |
54 | wkbhex = pc_hexbytes_from_bytes(wkb, wkbsize);
55 | CU_ASSERT(wkbhex != NULL);
56 | CU_ASSERT_STRING_EQUAL(wkbhex, "01020000000200000000000000000024C000000000000"
57 | "024C000000000000024400000000000002440");
58 |
59 | pcfree(wkb);
60 | pcfree(wkbhex);
61 | }
62 |
63 | static void test_bounding_diagonal_wkb_from_stats()
64 | {
65 | PCSTATS *stats;
66 | size_t wkbsize;
67 | uint8_t *wkb;
68 | char *wkbhex;
69 |
70 | stats = pc_stats_new(schema);
71 |
72 | pc_point_set_x(&stats->min, -10);
73 | pc_point_set_x(&stats->max, 10);
74 | pc_point_set_y(&stats->min, -10);
75 | pc_point_set_y(&stats->max, 10);
76 | pc_point_set_z(&stats->min, -10);
77 | pc_point_set_z(&stats->max, 10);
78 |
79 | wkb = pc_bounding_diagonal_wkb_from_stats(stats, &wkbsize);
80 | CU_ASSERT(wkb != NULL);
81 | CU_ASSERT(wkbsize == 73);
82 |
83 | wkbhex = pc_hexbytes_from_bytes(wkb, wkbsize);
84 | CU_ASSERT(wkbhex != NULL);
85 | CU_ASSERT_STRING_EQUAL(wkbhex,
86 | "01020000C00200000000000000000024C000000000000024C0000"
87 | "00000000024C00000000000000000000000000000244000000000"
88 | "0000244000000000000024400000000000000000");
89 |
90 | pc_stats_free(stats);
91 | pcfree(wkb);
92 | pcfree(wkbhex);
93 | }
94 |
95 | /* REGISTER ***********************************************************/
96 |
97 | CU_TestInfo util_tests[] = {PC_TEST(test_bounding_diagonal_wkb_from_bounds),
98 | PC_TEST(test_bounding_diagonal_wkb_from_stats),
99 | CU_TEST_INFO_NULL};
100 |
101 | CU_SuiteInfo util_suite = {.pName = "util",
102 | .pInitFunc = init_suite,
103 | .pCleanupFunc = clean_suite,
104 | .pTests = util_tests};
105 |
--------------------------------------------------------------------------------
/lib/lazperf_adapter.hpp:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * lazperf_adapter.hpp
3 | *
4 | * LazPerf compression/decompression
5 | *
6 | * Copyright (c) 2016 Paul Blottiere, Oslandia
7 | *
8 | ***********************************************************************/
9 |
10 | #pragma once
11 |
12 | #include "pc_api_internal.h"
13 |
14 | #ifdef HAVE_LAZPERF
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 |
23 | /**********************************************************************
24 | * C API
25 | */
26 | #include "lazperf_adapter.h"
27 |
28 | /**********************************************************************
29 | * INTERNAL CPP
30 | */
31 | // utility functions
32 | void lazperf_dump(uint8_t *data, const size_t size);
33 | void lazperf_dump(const PCPATCH_UNCOMPRESSED *p);
34 | void lazperf_dump(const PCPATCH_LAZPERF *p);
35 |
36 | // struct which capture data coming from the compressor
37 | struct LazPerfBuf
38 | {
39 | LazPerfBuf() : buf(), idx(0) {}
40 |
41 | const uint8_t *data()
42 | {
43 | return reinterpret_cast(buf.data());
44 | }
45 |
46 | void putBytes(const unsigned char *b, size_t len)
47 | {
48 | while (len--)
49 | {
50 | buf.push_back(*b++);
51 | }
52 | }
53 |
54 | void putByte(const unsigned char b) { buf.push_back(b); }
55 |
56 | unsigned char getByte() { return buf[idx++]; }
57 |
58 | void getBytes(unsigned char *b, int len)
59 | {
60 | for (int i = 0; i < len; i++)
61 | {
62 | b[i] = getByte();
63 | }
64 | }
65 |
66 | std::vector buf;
67 | size_t idx;
68 | };
69 |
70 | // some typedef
71 | typedef laszip::encoders::arithmetic Encoder;
72 | typedef laszip::decoders::arithmetic Decoder;
73 |
74 | typedef laszip::formats::dynamic_field_compressor::ptr Compressor;
75 | typedef laszip::formats::dynamic_field_decompressor::ptr Decompressor;
76 |
77 | // LazPerf class
78 | template class LazPerf
79 | {
80 |
81 | public:
82 | LazPerf(const PCSCHEMA *pcschema, LazPerfBuf &buf);
83 | ~LazPerf();
84 |
85 | size_t pointsize() const { return _pointsize; }
86 |
87 | protected:
88 | void initSchema();
89 | bool addField(const PCDIMENSION *dim);
90 |
91 | const PCSCHEMA *_pcschema;
92 | LazPerfCoder _coder;
93 | LazPerfEngine _engine;
94 | size_t _pointsize;
95 | };
96 |
97 | // compressor
98 | class LazPerfCompressor : public LazPerf
99 | {
100 |
101 | public:
102 | LazPerfCompressor(const PCSCHEMA *pcschema, LazPerfBuf &output);
103 | ~LazPerfCompressor();
104 |
105 | size_t compress(const uint8_t *input, const size_t inputsize);
106 | };
107 |
108 | // decompressor
109 | class LazPerfDecompressor : public LazPerf
110 | {
111 |
112 | public:
113 | LazPerfDecompressor(const PCSCHEMA *pcschema, LazPerfBuf &input);
114 | ~LazPerfDecompressor();
115 |
116 | size_t decompress(uint8_t *data, const size_t datasize);
117 | };
118 | #endif // HAVE_LAZPERF
119 |
--------------------------------------------------------------------------------
/lib/stringbuffer.h:
--------------------------------------------------------------------------------
1 | /**********************************************************************
2 | * stringbuffer.h
3 | *
4 | * Copyright 2002 Thamer Alharbash
5 | * Copyright 2009 Paul Ramsey
6 | * Copyright 2015 Sandro Santilli
7 | *
8 | * Redistribution and use in source and binary forms, with or
9 | * without modification, are permitted provided that the following
10 | * conditions are met:
11 | *
12 | * Redistributions of source code must retain the above copyright
13 | * notice, this list of conditions and the following disclaimer.
14 | *
15 | * Redistributions in binary form must reproduce the above
16 | * copyright notice, this list of conditions and the following
17 | * disclaimer in the documentation and/or other materials provided
18 | * with the distribution.
19 | *
20 | * The name of the author may not be used to endorse or promote
21 | * products derived from this software without specific prior
22 | * written permission.
23 | *
24 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY
25 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
27 | * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
28 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
30 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
31 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
32 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
35 | * THE POSSIBILITY OF SUCH DAMAGE.
36 | *
37 | **********************************************************************/
38 |
39 | #ifndef _STRINGBUFFER_H
40 | #define _STRINGBUFFER_H 1
41 |
42 | #include
43 | #include
44 | #include
45 | #include
46 |
47 | /* Use appropriate allocators for this deployment */
48 | #define malloc pcalloc
49 | #define free pcfree
50 | #define realloc pcrealloc
51 |
52 | #include "pc_api_internal.h"
53 |
54 | #define STRINGBUFFER_STARTSIZE 128
55 |
56 | typedef struct
57 | {
58 | size_t capacity;
59 | char *str_end;
60 | char *str_start;
61 | } stringbuffer_t;
62 |
63 | extern stringbuffer_t *stringbuffer_create_with_size(size_t size);
64 | extern stringbuffer_t *stringbuffer_create(void);
65 | extern void stringbuffer_destroy(stringbuffer_t *sb);
66 | extern void stringbuffer_clear(stringbuffer_t *sb);
67 | void stringbuffer_set(stringbuffer_t *sb, const char *s);
68 | void stringbuffer_copy(stringbuffer_t *sb, stringbuffer_t *src);
69 | extern void stringbuffer_append(stringbuffer_t *sb, const char *s);
70 | extern int stringbuffer_aprintf(stringbuffer_t *sb, const char *fmt, ...);
71 | extern const char *stringbuffer_getstring(stringbuffer_t *sb);
72 | extern char *stringbuffer_getstringcopy(stringbuffer_t *sb);
73 | extern char *stringbuffer_release_string(stringbuffer_t *sb);
74 | extern int stringbuffer_getlength(stringbuffer_t *sb);
75 | extern char stringbuffer_lastchar(stringbuffer_t *s);
76 | extern int stringbuffer_trim_trailing_white(stringbuffer_t *s);
77 | extern int stringbuffer_trim_trailing_zeroes(stringbuffer_t *s);
78 |
79 | #endif /* _STRINGBUFFER_H */
80 |
--------------------------------------------------------------------------------
/pgsql/pc_editor.c:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * pc_editor.c
3 | *
4 | * Editor functions for points and patches in PgSQL.
5 | *
6 | * Copyright (c) 2017 Oslandia
7 | *
8 | ***********************************************************************/
9 |
10 | #include "pc_pgsql.h" /* Common PgSQL support for our type */
11 |
12 | Datum pcpatch_setpcid(PG_FUNCTION_ARGS);
13 | Datum pcpatch_transform(PG_FUNCTION_ARGS);
14 |
15 | static SERIALIZED_PATCH *pcpatch_set_schema(SERIALIZED_PATCH *serpa,
16 | PCSCHEMA *oschema,
17 | PCSCHEMA *nschema, float8 def)
18 | {
19 | SERIALIZED_PATCH *serpatch;
20 | PCPATCH *paout;
21 |
22 | if (pc_schema_same_dimensions(oschema, nschema))
23 | {
24 | // oschema and nschema have the same dimensions at the same
25 | // positions, so we can take a fast path and avoid the
26 | // point-by-point dimension-by-dimension copying
27 |
28 | if (oschema->compression == nschema->compression)
29 | {
30 | // no need to deserialize the patch
31 | serpatch = palloc(serpa->size);
32 | if (!serpatch)
33 | return NULL;
34 | memcpy(serpatch, serpa, serpa->size);
35 | serpatch->pcid = nschema->pcid;
36 | return serpatch;
37 | }
38 | else
39 | {
40 | paout = pc_patch_deserialize(serpa, oschema);
41 | if (!paout)
42 | return NULL;
43 | paout->schema = nschema;
44 | }
45 | }
46 | else
47 | {
48 | PCPATCH *patch;
49 |
50 | patch = pc_patch_deserialize(serpa, oschema);
51 | if (!patch)
52 | return NULL;
53 |
54 | paout = pc_patch_set_schema(patch, nschema, def);
55 |
56 | if (patch != paout)
57 | pc_patch_free(patch);
58 |
59 | if (!paout)
60 | return NULL;
61 | }
62 |
63 | serpatch = pc_patch_serialize(paout, NULL);
64 | pc_patch_free(paout);
65 |
66 | return serpatch;
67 | }
68 |
69 | PG_FUNCTION_INFO_V1(pcpatch_setpcid);
70 | Datum pcpatch_setpcid(PG_FUNCTION_ARGS)
71 | {
72 | SERIALIZED_PATCH *serpatch;
73 | SERIALIZED_PATCH *serpa = PG_GETARG_SERPATCH_P(0);
74 | int32 pcid = PG_GETARG_INT32(1);
75 | float8 def = PG_GETARG_FLOAT8(2);
76 | PCSCHEMA *oschema = pc_schema_from_pcid(serpa->pcid, fcinfo);
77 | PCSCHEMA *nschema = pc_schema_from_pcid(pcid, fcinfo);
78 |
79 | serpatch = pcpatch_set_schema(serpa, oschema, nschema, def);
80 | if (!serpatch)
81 | PG_RETURN_NULL();
82 | PG_RETURN_POINTER(serpatch);
83 | }
84 |
85 | PG_FUNCTION_INFO_V1(pcpatch_transform);
86 | Datum pcpatch_transform(PG_FUNCTION_ARGS)
87 | {
88 | PCPATCH *patch, *paout;
89 | SERIALIZED_PATCH *serpatch;
90 | SERIALIZED_PATCH *serpa = PG_GETARG_SERPATCH_P(0);
91 | int32 pcid = PG_GETARG_INT32(1);
92 | float8 def = PG_GETARG_FLOAT8(2);
93 | PCSCHEMA *oschema = pc_schema_from_pcid(serpa->pcid, fcinfo);
94 | PCSCHEMA *nschema = pc_schema_from_pcid(pcid, fcinfo);
95 |
96 | patch = pc_patch_deserialize(serpa, oschema);
97 | if (!patch)
98 | PG_RETURN_NULL();
99 |
100 | paout = pc_patch_transform(patch, nschema, def);
101 |
102 | pc_patch_free(patch);
103 |
104 | if (!paout)
105 | PG_RETURN_NULL();
106 |
107 | serpatch = pc_patch_serialize(paout, NULL);
108 | pc_patch_free(paout);
109 |
110 | PG_RETURN_POINTER(serpatch);
111 | }
112 |
--------------------------------------------------------------------------------
/doc/functions/utils.rst:
--------------------------------------------------------------------------------
1 | .. _utils:
2 |
3 | ********************************************************************************
4 | Utils
5 | ********************************************************************************
6 |
7 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 | PC_Full_Version
9 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 |
11 | :PC_Full_Version() returns text:
12 |
13 | Return a composite version string summarizing pointcloud system components.
14 | Includes library, SQL, libxml2, PostgreSQL interface versions, and LAZperf
15 | support flag. Useful for debugging or verifying runtime compatibility of the
16 | pointcloud extension.
17 |
18 | .. code-block::
19 |
20 | SELECT PC_Full_Version();
21 |
22 | POINTCLOUD="1.2.5 2346cc2" PGSQL="170" LIBXML2="2.14.3 LAZPERF enabled=false
23 |
24 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 | PC_Lazperf_Enabled
26 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 |
28 | :PC_Lazperf_Enabled() returns boolean:
29 |
30 | Return `true` if the pointcloud extension includes LAZperf compression support.
31 |
32 | .. code-block::
33 |
34 | SELECT PC_Lazperf_Enabled();
35 |
36 | t
37 |
38 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
39 | PC_LibXML2_Version
40 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
41 |
42 | :PC_LibXML2_Version() returns text:
43 |
44 | Return the `libxml2` version number.
45 |
46 | .. code-block::
47 |
48 | SELECT PC_LibXML2_Version();
49 |
50 | 2.14.3
51 |
52 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
53 | PC_Lib_Version
54 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
55 |
56 | :PC_Lib_Version() returns text:
57 |
58 | Return the library version number.
59 |
60 | .. code-block::
61 |
62 | SELECT PC_Lib_Version();
63 |
64 | 1.2.5 2346cc2
65 |
66 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
67 | PC_PGSQL_Version
68 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
69 |
70 | :PC_PGSQL_Version() returns text:
71 |
72 | Return the `pgsql` version number.
73 |
74 | .. code-block::
75 |
76 | SELECT PC_PGSQL_Version();
77 |
78 | 170
79 |
80 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
81 | PC_PostGIS_Version
82 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
83 |
84 | :PC_PostGIS_Version() returns text:
85 |
86 | Return the PostGIS extension version number.
87 |
88 | .. code-block::
89 |
90 | SELECT PC_PostGIS_Version();
91 |
92 | 1.2.5
93 |
94 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
95 | PC_Script_Version
96 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
97 |
98 | :PC_Script_Version() returns text:
99 |
100 | Return the script version number.
101 |
102 | .. code-block::
103 |
104 | SELECT PC_Script_Version();
105 |
106 | 1.2.5
107 |
108 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
109 | PC_Version
110 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
111 |
112 | :PC_Version() returns text:
113 |
114 | Return the extension version number.
115 |
116 | .. code-block::
117 |
118 | SELECT PC_Version();
119 |
120 | 1.2.5
121 |
--------------------------------------------------------------------------------
/doc/tutorials/compression.rst:
--------------------------------------------------------------------------------
1 | ******************************************************************************
2 | Schema and compression
3 | ******************************************************************************
4 |
5 | This tutorial is an introduction for investigating XML schemas and playing with
6 | compression of patches.
7 |
8 | ------------------------------------------------------------------------------
9 | Compression type
10 | ------------------------------------------------------------------------------
11 |
12 | The compression of a patch may be retrieved through its XML schema but it's
13 | also stored in the patch itself. Of course, both needs to be consistent so
14 | updating an existing schema is hardly discouraged and may lead to errors.
15 |
16 | In the first case, the XML schema needs to be parsed with ``xpath`` function to
17 | retrieve the ``pc:metadata`` tag of a specific patch:
18 |
19 | .. code-block:: sql
20 |
21 | WITH tmp AS (
22 | SELECT pc_pcid(pa)
23 | AS _pcid
24 | FROM airport
25 | LIMIT 1
26 | )
27 | SELECT unnest(
28 | xpath(
29 | '/pc:PointCloudSchema/pc:metadata/Metadata/text()',
30 | schema::xml,
31 | array[
32 | ['pc', 'http://pointcloud.org/schemas/PC/'],
33 | ['xsi', 'http://www.w3.org/2001/XMLSchema-instance']
34 | ]
35 | )
36 | )
37 | FROM tmp,pointcloud_formats
38 | WHERE pcid=tmp._pcid;
39 | --> dimensional
40 |
41 |
42 | A much easier way to retrieve the compression type is to take a look to the
43 | JSON summary of the patch:
44 |
45 | .. code-block:: sql
46 |
47 | SELECT pc_summary(pa)::json->'compr' FROM airport LIMIT 1;
48 | --> dimensional
49 |
50 | ------------------------------------------------------------------------------
51 | Create a new schema
52 | ------------------------------------------------------------------------------
53 |
54 | A schema is just a XML document and may be manually inserted into the
55 | ``pointcloud_formats`` table directly from a file. We can also duplicate an
56 | existing schema and tweak some parameters.
57 |
58 | For example, we can create a new schema without compression and based on the
59 | schema ``pcid=1``:
60 |
61 | .. code-block:: sql
62 |
63 | INSERT INTO pointcloud_formats (pcid, srid, schema)
64 | SELECT 2, srid, regexp_replace(schema, 'dimensional', 'none', 'g')
65 | FROM pointcloud_formats
66 | WHERE pcid=1;
67 |
68 | ------------------------------------------------------------------------------
69 | Transform a patch
70 | ------------------------------------------------------------------------------
71 |
72 | Thanks to the ``pc_transform`` function, we can transform the underlying data
73 | of a patch to match a specific schema. So if we want to remove the dimensional
74 | compression from an existing patch, we can use the schema with ``pcid=2``
75 | previously created.
76 |
77 | In this particular case, the transformed patch doesn't have compression
78 | anymore:
79 |
80 | .. code-block:: sql
81 |
82 | SELECT pc_summary(pc_transform(pa, 2))::json->'compr' FROM airport LIMIT 1;
83 | --> none
84 |
85 | So a new table of uncompressed patches may be easily created:
86 |
87 | .. code-block:: sql
88 |
89 | CREATE TABLE airport_uncompressed AS SELECT pc_transform(pa, 2) AS pa FROM airport;
90 |
91 | SELECT pc_summary(pa)::json->'compr' FROM airport_uncompressed LIMIT 1;
92 | --> none
93 |
94 | SELECT pc_astext(pc_patchavg(pa)) FROM airport LIMIT 1;
95 | --> {"pcid":1,"pt":[65535,0,0,0,0,0,0,0,0,30744,25999,17189,728265,4.67644e+06,299.08]}
96 |
97 | SELECT pc_astext(pc_patchavg(pa)) FROM airport_uncompressed LIMIT 1;
98 | --> {"pcid":2,"pt":[65535,0,0,0,0,0,0,0,0,30744,25999,17189,728265,4.67644e+06,299.08]}
99 |
--------------------------------------------------------------------------------
/lib/pc_pointlist.c:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * pc_pointlist.c
3 | *
4 | * Point list handling. Create, get and set values from the
5 | * basic PCPOINTLIST structure.
6 | *
7 | * PgSQL Pointcloud is free and open source software provided
8 | * by the Government of Canada
9 | * Copyright (c) 2013 Natural Resources Canada
10 | *
11 | ***********************************************************************/
12 |
13 | #include "pc_api_internal.h"
14 | #include
15 |
16 | PCPOINTLIST *pc_pointlist_make(uint32_t npoints)
17 | {
18 | PCPOINTLIST *pl = pcalloc(sizeof(PCPOINTLIST));
19 | pl->points = pcalloc(sizeof(PCPOINT *) * npoints);
20 | pl->maxpoints = npoints;
21 | pl->npoints = 0;
22 | pl->mem = NULL;
23 | return pl;
24 | }
25 |
26 | void pc_pointlist_free(PCPOINTLIST *pl)
27 | {
28 | int i;
29 | for (i = 0; i < pl->npoints; i++)
30 | {
31 | pc_point_free(pl->points[i]);
32 | }
33 | if (pl->mem)
34 | pcfree(pl->mem);
35 | pcfree(pl->points);
36 | pcfree(pl);
37 | return;
38 | }
39 |
40 | void pc_pointlist_add_point(PCPOINTLIST *pl, PCPOINT *pt)
41 | {
42 | if (pl->npoints >= pl->maxpoints)
43 | {
44 | if (pl->maxpoints < 1)
45 | pl->maxpoints = 1;
46 | pl->maxpoints *= 2;
47 | pl->points = pcrealloc(pl->points, pl->maxpoints * sizeof(PCPOINT *));
48 | }
49 |
50 | pl->points[pl->npoints] = pt;
51 | pl->npoints += 1;
52 | return;
53 | }
54 |
55 | PCPOINT *pc_pointlist_get_point(const PCPOINTLIST *pl, int i)
56 | {
57 | return pl->points[i];
58 | }
59 |
60 | PCPOINTLIST *pc_pointlist_from_dimensional(const PCPATCH_DIMENSIONAL *pdl)
61 | {
62 | PCPOINTLIST *pl;
63 | PCPATCH_DIMENSIONAL *pdl_uncompressed;
64 | const PCSCHEMA *schema = pdl->schema;
65 | int i, j, ndims, npoints;
66 | uint8_t *data;
67 | assert(pdl);
68 |
69 | pdl_uncompressed = pc_patch_dimensional_decompress(pdl);
70 |
71 | ndims = schema->ndims;
72 | npoints = pdl->npoints;
73 | pl = pc_pointlist_make(npoints);
74 | pl->mem = data = pcalloc(npoints * schema->size);
75 |
76 | for (i = 0; i < npoints; i++)
77 | {
78 | PCPOINT *pt = pc_point_from_data(schema, data);
79 | for (j = 0; j < ndims; j++)
80 | {
81 | PCDIMENSION *dim = pc_schema_get_dimension(schema, j);
82 |
83 | uint8_t *in = pdl_uncompressed->bytes[j].bytes + dim->size * i;
84 | uint8_t *out = data + dim->byteoffset;
85 | memcpy(out, in, dim->size);
86 | }
87 | pc_pointlist_add_point(pl, pt);
88 | data += schema->size;
89 | }
90 | pc_patch_dimensional_free(pdl_uncompressed);
91 |
92 | return pl;
93 | }
94 |
95 | PCPOINTLIST *pc_pointlist_from_uncompressed(const PCPATCH_UNCOMPRESSED *patch)
96 | {
97 | int i;
98 | PCPOINTLIST *pl;
99 | size_t pt_size = patch->schema->size;
100 | uint32_t npoints = patch->npoints;
101 |
102 | pl = pc_pointlist_make(npoints);
103 | for (i = 0; i < npoints; i++)
104 | {
105 | pc_pointlist_add_point(
106 | pl, pc_point_from_data(patch->schema, patch->data + i * pt_size));
107 | }
108 | return pl;
109 | }
110 |
111 | PCPOINTLIST *pc_pointlist_from_patch(const PCPATCH *patch)
112 | {
113 | switch (patch->type)
114 | {
115 | case PC_NONE:
116 | {
117 | return pc_pointlist_from_uncompressed((PCPATCH_UNCOMPRESSED *)patch);
118 | }
119 | case PC_DIMENSIONAL:
120 | {
121 | return pc_pointlist_from_dimensional((PCPATCH_DIMENSIONAL *)patch);
122 | }
123 | case PC_LAZPERF:
124 | {
125 | return pc_pointlist_from_lazperf((PCPATCH_LAZPERF *)patch);
126 | }
127 | }
128 |
129 | /* Don't get here */
130 | pcerror("pc_pointlist_from_patch: unsupported compression type %d",
131 | patch->type);
132 | return NULL;
133 | }
134 |
--------------------------------------------------------------------------------
/lib/pc_mem.c:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * pc_mem.c
3 | *
4 | * Memory and message management routines.
5 | * Allow this library to be used both inside and outside a
6 | * PgSQL backend.
7 | *
8 | * PgSQL Pointcloud is free and open source software provided
9 | * by the Government of Canada
10 | * Copyright (c) 2013 Natural Resources Canada
11 | *
12 | ***********************************************************************/
13 |
14 | #include "pc_api_internal.h"
15 | #include
16 |
17 | struct pc_context_t
18 | {
19 | pc_allocator alloc;
20 | pc_reallocator realloc;
21 | pc_deallocator free;
22 | pc_message_handler err;
23 | pc_message_handler warn;
24 | pc_message_handler info;
25 | };
26 |
27 | static struct pc_context_t pc_context;
28 |
29 | /*
30 | * Default allocators
31 | *
32 | * We include some default allocators that use malloc/free/realloc
33 | * along with stdout/stderr since this is the most common use case
34 | *
35 | */
36 |
37 | static void *default_allocator(size_t size) { return malloc(size); }
38 |
39 | static void default_freeor(void *mem) { free(mem); }
40 |
41 | static void *default_reallocator(void *mem, size_t size)
42 | {
43 | return realloc(mem, size);
44 | }
45 |
46 | static void default_msg_handler(const char *label, const char *fmt, va_list ap)
47 | {
48 | char newfmt[1024] = {0};
49 | snprintf(newfmt, 1024, "%s%s\n", label, fmt);
50 | newfmt[1023] = '\0';
51 | vprintf(newfmt, ap);
52 | }
53 |
54 | static void default_info_handler(const char *fmt, va_list ap)
55 | {
56 | default_msg_handler("INFO: ", fmt, ap);
57 | }
58 |
59 | static void default_warn_handler(const char *fmt, va_list ap)
60 | {
61 | default_msg_handler("WARNING: ", fmt, ap);
62 | }
63 |
64 | static void default_error_handler(const char *fmt, va_list ap)
65 | {
66 | default_msg_handler("ERROR: ", fmt, ap);
67 | va_end(ap);
68 | exit(1);
69 | }
70 |
71 | void pc_install_default_handlers(void)
72 | {
73 | pc_context.alloc = default_allocator;
74 | pc_context.realloc = default_reallocator;
75 | pc_context.free = default_freeor;
76 | pc_context.err = default_error_handler;
77 | pc_context.info = default_info_handler;
78 | pc_context.warn = default_warn_handler;
79 | }
80 |
81 | void pc_set_handlers(pc_allocator allocator, pc_reallocator reallocator,
82 | pc_deallocator deallocator,
83 | pc_message_handler error_handler,
84 | pc_message_handler info_handler,
85 | pc_message_handler warn_handler)
86 | {
87 | if (!allocator)
88 | allocator = pc_context.alloc;
89 | if (!reallocator)
90 | reallocator = pc_context.realloc;
91 | if (!deallocator)
92 | deallocator = pc_context.free;
93 | if (!error_handler)
94 | error_handler = pc_context.err;
95 | if (!warn_handler)
96 | warn_handler = pc_context.warn;
97 | if (!info_handler)
98 | info_handler = pc_context.info;
99 |
100 | pc_context.alloc = allocator;
101 | pc_context.realloc = reallocator;
102 | pc_context.free = deallocator;
103 | pc_context.err = error_handler;
104 | pc_context.warn = warn_handler;
105 | pc_context.info = info_handler;
106 | return;
107 | }
108 |
109 | void *pcalloc(size_t size)
110 | {
111 | void *mem;
112 | if (!size)
113 | return NULL;
114 | mem = pc_context.alloc(size);
115 | memset(mem, 0, size); /* Always clean memory */
116 | return mem;
117 | }
118 |
119 | char *pcstrdup(const char *str)
120 | {
121 | size_t len = strlen(str);
122 | char *newstr = pcalloc(len + 1);
123 | memcpy(newstr, str, len + 1);
124 | return newstr;
125 | }
126 |
127 | void *pcrealloc(void *mem, size_t size)
128 | {
129 | return pc_context.realloc(mem, size);
130 | }
131 |
132 | void pcfree(void *mem) { pc_context.free(mem); }
133 |
134 | void pcerror(const char *fmt, ...)
135 | {
136 | va_list ap;
137 | va_start(ap, fmt);
138 | (*pc_context.err)(fmt, ap);
139 | va_end(ap);
140 | }
141 |
142 | void pcinfo(const char *fmt, ...)
143 | {
144 | va_list ap;
145 | va_start(ap, fmt);
146 | (*pc_context.info)(fmt, ap);
147 | va_end(ap);
148 | }
149 |
150 | void pcwarn(const char *fmt, ...)
151 | {
152 | va_list ap;
153 | va_start(ap, fmt);
154 | (*pc_context.warn)(fmt, ap);
155 | va_end(ap);
156 | }
157 |
--------------------------------------------------------------------------------
/doc/install.rst:
--------------------------------------------------------------------------------
1 | .. _install:
2 |
3 | ******************************************************************************
4 | Install
5 | ******************************************************************************
6 |
7 |
8 | .. contents::
9 | :depth: 3
10 | :backlinks: none
11 |
12 |
13 | ------------------------------------------------------------------------------
14 | Docker image
15 | -----------------------------------------------------------------------------
16 |
17 | An official Docker image is available on `Docker Hub`_. To retrieve the
18 | development version:
19 |
20 | .. code-block:: console
21 |
22 | $ docker pull pgpointcloud/pointcloud
23 |
24 | .. _`Docker Hub`: https://hub.docker.com/r/pgpointcloud/pointcloud
25 |
26 |
27 | ------------------------------------------------------------------------------
28 | GNU/Linux
29 | ------------------------------------------------------------------------------
30 |
31 | **Debian**
32 |
33 | Debian packages are now `available`_ on:
34 |
35 | - Ubuntu 22.04
36 | - Debian Testing
37 | - Debian Unstable
38 | - Raspbian Testing
39 |
40 | .. _`available`: https://tracker.debian.org/pkg/pgpointcloud
41 |
42 |
43 | **AUR**
44 |
45 | pgPointcloud is available on Arch Linux through the `user repository`_.
46 |
47 | .. _`user repository`: https://aur.archlinux.org/packages/pgpointcloud
48 |
49 |
50 | ------------------------------------------------------------------------------
51 | Windows
52 | ------------------------------------------------------------------------------
53 |
54 | pgPointcloud is directly included in the `PostGIS`_ bundle.
55 |
56 | .. _`PostGIS`: https://postgis.net/windows_downloads/
57 |
58 |
59 | ------------------------------------------------------------------------------
60 | MacOS
61 | ------------------------------------------------------------------------------
62 |
63 | pgpointcloud is available on macOS via `MacPorts`_.
64 |
65 | .. _`MacPorts`: https://ports.macports.org/port/pgpointcloud/
66 |
67 |
68 | ------------------------------------------------------------------------------
69 | Releases tarballs
70 | ------------------------------------------------------------------------------
71 |
72 | **Current Release**
73 |
74 | * **19-09-2023** `pointcloud-1.2.5.tar.gz`_ (`Release Notes`_)
75 |
76 | .. _`Release Notes`: https://github.com/pgpointcloud/pointcloud/blob/v1.2.5/NEWS
77 |
78 |
79 | **Past Releases**
80 |
81 | * **26-09-2022** `pointcloud-1.2.4.tar.gz`_
82 | * **12-09-2022** `pointcloud-1.2.3.tar.gz`_
83 | * **10-05-2022** `pointcloud-1.2.2.tar.gz`_
84 | * **01-07-2020** `pointcloud-1.2.1.tar.gz`_
85 | * **22-08-2018** `pointcloud-1.2.0.tar.gz`_
86 | * **18-06-2018** `pointcloud-1.1.1.tar.gz`_
87 | * **30-04-2018** `pointcloud-1.1.0.tar.gz`_
88 | * **30-04-2018** `pointcloud-1.0.1.tar.gz`_
89 | * **23-10-2013** `pointcloud-0.1.0.tar.gz`_
90 |
91 | .. _`pointcloud-1.2.5.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.2.5.tar.gz
92 | .. _`pointcloud-1.2.4.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.2.4.tar.gz
93 | .. _`pointcloud-1.2.3.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.2.3.tar.gz
94 | .. _`pointcloud-1.2.2.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.2.2.tar.gz
95 | .. _`pointcloud-1.2.1.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.2.1.tar.gz
96 | .. _`pointcloud-1.2.0.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.2.0.tar.gz
97 | .. _`pointcloud-1.1.1.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.1.1.tar.gz
98 | .. _`pointcloud-1.1.0.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.1.0.tar.gz
99 | .. _`pointcloud-1.0.1.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v1.0.1.tar.gz
100 | .. _`pointcloud-0.1.0.tar.gz`: https://github.com/pgpointcloud/pointcloud/archive/v0.1.0.tar.gz
101 |
102 |
103 | .. _source:
104 |
105 | ------------------------------------------------------------------------------
106 | Sources
107 | ------------------------------------------------------------------------------
108 |
109 | The source code repository for pgPointcloud is on `GitHub`_. You can retrieve
110 | the development version with ``git``. See :ref:`build_sources` for
111 | instructions.
112 |
113 | .. code-block:: console
114 |
115 | $ git clone https://github.com/pgpointcloud/pointcloud/
116 |
117 | .. _`github`: https://github.com/pgpointcloud/pointcloud/
118 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Pointcloud
2 |
3 | [![Release][release-image]][releases] [![Dockerhub][dockerhub-image]][dockerhub]
4 |
5 |    
6 |
7 | [release-image]: https://img.shields.io/badge/release-1.2.5-green.svg?style=plastic
8 | [releases]: https://github.com/pgpointcloud/pointcloud/releases
9 |
10 | [dockerhub-image]: https://img.shields.io/docker/pulls/pgpointcloud/pointcloud?logo=docker&label=pulls
11 | [dockerhub]: https://hub.docker.com/r/pgpointcloud/pointcloud
12 |
13 | A PostgreSQL extension for storing point cloud (LIDAR) data. See
14 | https://pgpointcloud.github.io/pointcloud/ for more information.
15 |
16 | ## Continuous integration
17 |
18 | | | w/o PostGIS | PostGIS 3.3 |
19 | | ------------------ |:-----------:|:-----------:|
20 | | PostgreSQL 13 | 
 | 
 |
21 | | PostgreSQL 14 | 
 | 
 |
22 | | PostgreSQL 15 | 
 | 
 |
23 | | PostgreSQL 16 | 
 | 
 |
24 | | PostgreSQL 17 | 
 | 
 |
25 |
--------------------------------------------------------------------------------
/NEWS:
--------------------------------------------------------------------------------
1 | 1.2.5, 2023-09-19
2 | -----------------
3 |
4 | - Bug fixes
5 | - Fix compilation with PostgreSQL 16 (#340)
6 |
7 | 1.2.4, 2022-09-26
8 | -----------------
9 |
10 | - Bug fixes
11 | - Add upgrade paths for pointcloud_postgis (#327)
12 |
13 | - Enhancements
14 | - Add support for PostgreSQL 15 (#318)
15 |
16 | 1.2.3, 2022-09-12
17 | -----------------
18 |
19 | - Bug fixes
20 | - Fix segmentation fault in pcpatch_transform (#322)
21 | - Fixes uninitialised schema cache during first call (#317)
22 | - Don't call SPI_finish() when erroring out (#312)
23 | - No crash when an invalid dimensional patch is compressed (#320)
24 |
25 | - Enhancements
26 | - Add documentation for debugging with Valgrind (#316)
27 | - Add documentation for debugging with GDB (#321)
28 |
29 | 1.2.2, 2022-05-10
30 | -----------------
31 |
32 | - Breaking changes
33 | - Drop support for PG 9.6
34 |
35 | - Bug fixes
36 | - Fix dump and restore #301 #306
37 | - Fix compilation with GCC 10.1 #273
38 | - Fix compilation with PostgreSQL 13 beta 1 #270
39 | - Fix Debian regression tests on 32-bits architecture #260
40 | - Fix CXXFLAGS #257
41 |
42 | - Enhancements
43 | - Add documentation for NESTED_QSORT #293
44 | - Add tutorial for schema and compression #299
45 | - Add Dockerfile with PostgreSQL 12 and PostGIS 3 #265
46 |
47 | 1.2.1, 2020-01-01
48 | -----------------
49 |
50 | - Bug fixes
51 | - Fix compilation with PostgreSQL 11 #237
52 | - Fix compilation with PostgreSQL 12 #243
53 |
54 | - Enhancements
55 | - Improve documentation for PC_MemSize #232
56 | - Fix unit tests with PostgreSQL >= 10 #253
57 | - Fix typo in README #238
58 |
59 | 1.2.0, 2018-08-22
60 | -----------------
61 |
62 | - New features
63 | - Add PC_MakePatch(pcid integer, values float8[]) #220
64 | - Mark functions as PARALLEL SAFE #227
65 |
66 | - Bug fixes
67 | - patch fails to compress as GHT #35
68 | - libght breaks schema parsing #196
69 | - Fix memcpy size in pc_patch_wkb_set_int32 #226
70 |
71 | - Enhancements
72 | - Improve doc for PC_FilterBetween #217
73 | - Remove GHT support #214
74 | - Fix installcheck when lazperf is disabled #222
75 | - Fix compilation and installcheck when lazperf is disabled #224
76 |
77 | 1.1.1, 2018-06-12
78 | -----------------
79 |
80 | - Bug fixes
81 | - Fix bug in pc_patch_uncompressed_from_lazperf (#215)
82 |
83 | - Enhancements
84 | - Document creating a PC_EnvelopeGeometry-based index (#208)
85 | - Use PostgreSQL 9.6 with PostGIS 2.4 on Travis (#210)
86 | - Add missing description in AC_DEFINE (#213)
87 |
88 | 1.1.0, 2018-04-30
89 | -----------------
90 |
91 | - New functions
92 | - PC_PCId(pcpoint|pcpatch)
93 | - PC_Get(pcpoint) returns float8[]
94 | - PC_Summary(pcpatch)
95 | - PC_Patch{Min,Max,Avg}(pcpatch) returns pcpoint (#77)
96 | - PC_Upgrade([])
97 | - PC_Lib_Version(), PC_Script_Version() (#40)
98 | - PC_Sort(pcpatch,text[]) (#106)
99 | - PC_IsSorted(pcpatch,text[],boolean) (#106)
100 | - PC_Range(pcpatch, int, int) returns pcpatch (#152)
101 | - PC_BoundingDiagonalAsBinary(pcpatch) and PC_BoundingDiagonalGeometry(pcpach) (#158)
102 | - PC_SetPCId(pcpatch, int, float8 default 0.0) (#163)
103 | - PC_Transform(pcpatch, int, float8 default 0.0) (#165)
104 | - PC_Full_Version() (#201)
105 |
106 | - Enhancements
107 | - Support sigbits encoding for 64bit integers (#61)
108 | - Warn about truncated values (#68)
109 | - Warn about script/lib version mismatch (#40)
110 | - Compatibility with PostgreSQL 9.5 (#90)
111 | - Support LAZ compression for PcPatch (#105)
112 |
113 | 1.0.1, 2015-08-09
114 | -----------------
115 |
116 | - Bug fixes
117 | - #79, Fix PC_Filter stats computation with dimensionally-compressed
118 | patches
119 | - #78, Fix PC_Filter stats computation with scaled dimensions
120 | - #71, Fix crash in pc_schema_from_xml
121 | - #66, Fix crash in pc_schema_clone (unexploitable via SQL)
122 | - #37, Fix access to uninitialized variables
123 | - #31, Fix crash in pc_patch
124 | - #28, Fix uninterruptible loop from pc_schemaisvalid
125 | - #27, Fix pointcloud_columns view definition to handle
126 | null-typmoded cols
127 | - #23, Fix computation of dimensional patch extent
128 | - #26, Fix cmake build with GCC 4.8 (-fPIC)
129 |
130 | - Enhancements
131 | - #39, Provide upgrade scripts
132 |
133 | 1.0.0
134 | -----
135 |
136 | Initial release, also known as:
137 | - 0.1.0 (git tag v0.1.0)
138 | - 1.0 (extension script filename)
139 |
140 |
--------------------------------------------------------------------------------
/doc/concepts/schemas.rst:
--------------------------------------------------------------------------------
1 | .. _schemas:
2 |
3 | ********************************************************************************
4 | Schemas
5 | ********************************************************************************
6 |
7 | Much of the complexity in handling LIDAR comes from the need to deal with
8 | multiple variables per point. The variables captured by LIDAR sensors varies by
9 | sensor and capture process. Some data sets might contain only X/Y/Z values.
10 | Others will contain dozens of variables: X, Y, Z; intensity and return number;
11 | red, green, and blue values; return times; and many more. There is no
12 | consistency in how variables are stored: intensity might be stored in a 4-byte
13 | integer, or in a single byte; X/Y/Z might be doubles, or they might be scaled
14 | 4-byte integers.
15 |
16 | PostgreSQL Pointcloud deals with all this variability by using a "schema
17 | document" to describe the contents of any particular LIDAR point. Each point
18 | contains a number of dimensions, and each dimension can be of any data type,
19 | with scaling and/or offsets applied to move between the actual value and the
20 | value stored in the database. The schema document format used by PostgreSQL
21 | Pointcloud is the same one used by the PDAL_ library.
22 |
23 | Here is a simple 4-dimensional schema document you can insert into
24 | ``pointcloud_formats`` to work with the examples below:
25 |
26 | .. code-block:: xml
27 |
28 | INSERT INTO pointcloud_formats (pcid, srid, schema) VALUES (1, 4326,
29 | '
30 |
32 |
33 | 1
34 | 4
35 | X coordinate as a long integer. You must use the
36 | scale and offset information of the header to
37 | determine the double value.
38 | X
39 | int32_t
40 | 0.01
41 |
42 |
43 | 2
44 | 4
45 | Y coordinate as a long integer. You must use the
46 | scale and offset information of the header to
47 | determine the double value.
48 | Y
49 | int32_t
50 | 0.01
51 |
52 |
53 | 3
54 | 4
55 | Z coordinate as a long integer. You must use the
56 | scale and offset information of the header to
57 | determine the double value.
58 | Z
59 | int32_t
60 | 0.01
61 |
62 |
63 | 4
64 | 2
65 | The intensity value is the integer representation
66 | of the pulse return magnitude. This value is optional
67 | and system specific. However, it should always be
68 | included if available.
69 | Intensity
70 | uint16_t
71 | 1
72 |
73 |
74 | dimensional
75 |
76 | ');
77 |
78 |
79 | Schema documents are stored in the ``pointcloud_formats`` table, along with a
80 | ``pcid`` or "pointcloud identifier". Rather than store the whole schema
81 | information with each database object, each object just has a ``pcid``, which
82 | serves as a key to find the schema in ``pointcloud_formats``. This is similar
83 | to the way the ``srid`` is resolved for spatial reference system support in
84 | PostGIS_.
85 |
86 | The central role of the schema document in interpreting the contents of a point
87 | cloud object means that care must be taken to ensure that the right ``pcid``
88 | reference is being used in objects, and that it references a valid schema
89 | document in the ``pointcloud_formats`` table.
90 |
91 | .. _PDAL: https://pdal.io/
92 | .. _PostGIS: http://postgis.net/
93 |
--------------------------------------------------------------------------------
/lib/pc_dimstats.c:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * pc_dimstats.c
3 | *
4 | * Support for "dimensional compression", which is a catch-all
5 | * term for applying compression separately on each dimension
6 | * of a PCPATCH collection of PCPOINTS.
7 | *
8 | * Depending on the character of the data, one of these schemes
9 | * will be used:
10 | *
11 | * - run-length encoding
12 | * - significant-bit removal
13 | * - deflate
14 | *
15 | * PgSQL Pointcloud is free and open source software provided
16 | * by the Government of Canada
17 | * Copyright (c) 2013 Natural Resources Canada
18 | *
19 | ***********************************************************************/
20 |
21 | #include "pc_api_internal.h"
22 | #include "stringbuffer.h"
23 | #include
24 | #include
25 |
26 | PCDIMSTATS *pc_dimstats_make(const PCSCHEMA *schema)
27 | {
28 | PCDIMSTATS *pds;
29 | pds = pcalloc(sizeof(PCDIMSTATS));
30 | pds->ndims = schema->ndims;
31 | pds->stats = pcalloc(pds->ndims * sizeof(PCDIMSTAT));
32 | return pds;
33 | }
34 |
35 | void pc_dimstats_free(PCDIMSTATS *pds)
36 | {
37 | if (pds->stats)
38 | pcfree(pds->stats);
39 | pcfree(pds);
40 | }
41 | /*
42 | typedef struct
43 | {
44 | uint32_t total_runs;
45 | uint32_t total_commonbits;
46 | uint32_t recommended_compression;
47 | } PCDIMSTAT;
48 |
49 | typedef struct
50 | {
51 | int32_t ndims;
52 | uint32_t total_points;
53 | uint32_t total_patches;
54 | PCDIMSTAT *stats;
55 | } PCDIMSTATS;
56 | */
57 |
58 | char *pc_dimstats_to_string(const PCDIMSTATS *pds)
59 | {
60 | int i;
61 | stringbuffer_t *sb = stringbuffer_create();
62 | char *str;
63 |
64 | stringbuffer_aprintf(
65 | sb, "{\"ndims\":%d,\"total_points\":%d,\"total_patches\":%d,\"dims\":[",
66 | pds->ndims, pds->total_points, pds->total_patches);
67 |
68 | for (i = 0; i < pds->ndims; i++)
69 | {
70 | if (i)
71 | stringbuffer_append(sb, ",");
72 | stringbuffer_aprintf(sb,
73 | "{\"total_runs\":%d,\"total_commonbits\":%d,"
74 | "\"recommended_compression\":%d}",
75 | pds->stats[i].total_runs,
76 | pds->stats[i].total_commonbits,
77 | pds->stats[i].recommended_compression);
78 | }
79 | stringbuffer_append(sb, "]}");
80 |
81 | str = stringbuffer_getstringcopy(sb);
82 | stringbuffer_destroy(sb);
83 | return str;
84 | }
85 |
86 | int pc_dimstats_update(PCDIMSTATS *pds, const PCPATCH_DIMENSIONAL *pdl)
87 | {
88 | int i;
89 | const PCSCHEMA *schema = pdl->schema;
90 |
91 | /* Update global stats */
92 | pds->total_points += pdl->npoints;
93 | pds->total_patches += 1;
94 |
95 | /* Update dimensional stats */
96 | for (i = 0; i < pds->ndims; i++)
97 | {
98 | PCBYTES pcb = pdl->bytes[i];
99 | pds->stats[i].total_runs += pc_bytes_run_count(&pcb);
100 | pds->stats[i].total_commonbits += pc_bytes_sigbits_count(&pcb);
101 | }
102 |
103 | /* Update recommended compression schema */
104 | for (i = 0; i < pds->ndims; i++)
105 | {
106 | PCDIMENSION *dim = pc_schema_get_dimension(schema, i);
107 | /* Uncompressed size, foreach point, one value entry */
108 | double raw_size = pds->total_points * dim->size;
109 | /* RLE size, for each run, one count byte and one value entry */
110 | double rle_size = pds->stats[i].total_runs * (dim->size + 1);
111 | /* Sigbits size, for each patch, one header and n bits for each entry */
112 | double avg_commonbits_per_patch =
113 | pds->stats[i].total_commonbits / pds->total_patches;
114 | double avg_uniquebits_per_patch = 8 * dim->size - avg_commonbits_per_patch;
115 | double sigbits_size = pds->total_patches * 2 * dim->size +
116 | pds->total_points * avg_uniquebits_per_patch / 8;
117 | /* Default to ZLib */
118 | pds->stats[i].recommended_compression = PC_DIM_ZLIB;
119 | /* Only use rle and sigbits compression on integer values */
120 | /* If we can do better than 4:1 we might beat zlib */
121 | if (dim->interpretation != PC_DOUBLE)
122 | {
123 | /* If sigbits is better than 4:1, use that */
124 | if (raw_size / sigbits_size > 1.6)
125 | {
126 | pds->stats[i].recommended_compression = PC_DIM_SIGBITS;
127 | }
128 | /* If RLE size is even better, use that. */
129 | if (raw_size / rle_size > 4.0)
130 | {
131 | pds->stats[i].recommended_compression = PC_DIM_RLE;
132 | }
133 | }
134 | }
135 | return PC_SUCCESS;
136 | }
137 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = build
9 | DATE=$(shell date +'%y.%m.%d %H:%M:%S')
10 |
11 | # Internal variables.
12 | PAPEROPT_a4 = -D latex_paper_size=a4
13 | PAPEROPT_letter = -D latex_paper_size=letter
14 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
15 |
16 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest doxygen
17 |
18 | help:
19 | @echo "Please use \`make ' where is one of"
20 | @echo " html to make standalone HTML files"
21 | @echo " dirhtml to make HTML files named index.html in directories"
22 | @echo " singlehtml to make a single large HTML file"
23 | @echo " pickle to make pickle files"
24 | @echo " json to make JSON files"
25 | @echo " htmlhelp to make HTML files and a HTML help project"
26 | @echo " qthelp to make HTML files and a qthelp project"
27 | @echo " devhelp to make HTML files and a Devhelp project"
28 | @echo " epub to make an epub"
29 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
30 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
31 | @echo " text to make text files"
32 | @echo " pdf to make PDF files"
33 | @echo " man to make manual pages"
34 | @echo " changes to make an overview of all changed/added/deprecated items"
35 | @echo " linkcheck to check all external links for integrity"
36 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
37 |
38 | clean:
39 | -rm -rf $(BUILDDIR)/*
40 | -rm -rf doxygen/xml*;
41 | -rm -rf doxygen/html*;
42 |
43 | html:
44 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
45 | @touch $(BUILDDIR)/html/.nojekyll
46 | @echo
47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
48 |
49 | dirhtml:
50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
51 | @echo
52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
53 |
54 | singlehtml:
55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
56 | @echo
57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
58 |
59 | pickle:
60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
61 | @echo
62 | @echo "Build finished; now you can process the pickle files."
63 |
64 | json:
65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
66 | @echo
67 | @echo "Build finished; now you can process the JSON files."
68 |
69 | htmlhelp:
70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
71 | @echo
72 | @echo "Build finished; now you can run HTML Help Workshop with the" \
73 | ".hhp project file in $(BUILDDIR)/htmlhelp."
74 |
75 | epub:
76 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
77 | @echo
78 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
79 |
80 | latex:
81 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
82 | @echo
83 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
84 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
85 | "(use \`make latexpdf' here to do that automatically)."
86 |
87 | latexpdf:
88 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
89 | @echo "Running LaTeX files through pdflatex..."
90 | make -C $(BUILDDIR)/latex all-pdf
91 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
92 |
93 | text:
94 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
95 | @echo
96 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
97 |
98 | man:
99 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
100 | @echo
101 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
102 |
103 | changes:
104 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
105 | @echo
106 | @echo "The overview file is in $(BUILDDIR)/changes."
107 |
108 | linkcheck:
109 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
110 | @echo
111 | @echo "Link check complete; look for any errors in the above output " \
112 | "or in $(BUILDDIR)/linkcheck/output.txt."
113 |
114 | doctest:
115 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
116 | @echo "Testing of doctests in the sources finished, look at the " \
117 | "results in $(BUILDDIR)/doctest/output.txt."
118 |
119 | pdf:
120 | $(SPHINXBUILD) -b pdf $(ALLSPHINXOPTS) $(BUILDDIR)/pdf
121 | @echo
122 | @echo "Build finished. The PDF files are in _build/pdf."
123 |
--------------------------------------------------------------------------------
/doc/functions/postgis.rst:
--------------------------------------------------------------------------------
1 | .. _postgis:
2 |
3 | ********************************************************************************
4 | PostGIS
5 | ********************************************************************************
6 |
7 | The ``pointcloud_postgis`` extension adds functions that allow you to use
8 | PostgreSQL Pointcloud with PostGIS, converting ``PcPoint`` and ``PcPatch`` to
9 | Geometry and doing spatial filtering on point cloud data. The
10 | ``pointcloud_postgis`` extension depends on both the ``postgis`` and
11 | ``pointcloud extensions``, so they must be installed first:
12 |
13 | .. code-block:: sql
14 |
15 | CREATE EXTENSION postgis;
16 | CREATE EXTENSION pointcloud;
17 | CREATE EXTENSION pointcloud_postgis;
18 |
19 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 | Geometry
21 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 |
23 | :Geometry(pcpoint) returns geometry:
24 |
25 | :pcpoint::geometry returns geometry:
26 |
27 | Casts ``PcPoint`` to the PostGIS geometry equivalent, placing the x/y/z/m of the
28 | ``PcPoint`` into the x/y/z/m of the PostGIS point.
29 |
30 | .. code-block::
31 |
32 | SELECT ST_AsText(PC_MakePoint(1, ARRAY[-127, 45, 124.0, 4.0])::geometry);
33 |
34 | POINT Z (-127 45 124)
35 |
36 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
37 | PC_BoundingDiagonalGeometry
38 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
39 |
40 | :PC_BoundingDiagonalGeometry(pcpatch) returns geometry:
41 |
42 | Returns the bounding diagonal of a patch. This is a LineString (2D), a
43 | LineString Z or a LineString M or a LineString ZM, based on the existence of
44 | the Z and M dimensions in the patch. This function is useful for creating an
45 | index on a patch column.
46 |
47 | .. code-block::
48 |
49 | SELECT ST_AsText(PC_BoundingDiagonalGeometry(pa)) FROM patches;
50 | st_astext
51 | ------------------------------------------------
52 | LINESTRING Z (-126.99 45.01 1,-126.91 45.09 9)
53 | LINESTRING Z (-126 46 100,-126 46 100)
54 | LINESTRING Z (-126.2 45.8 80,-126.11 45.89 89)
55 | LINESTRING Z (-126.4 45.6 60,-126.31 45.69 69)
56 | LINESTRING Z (-126.3 45.7 70,-126.21 45.79 79)
57 | LINESTRING Z (-126.8 45.2 20,-126.71 45.29 29)
58 | LINESTRING Z (-126.5 45.5 50,-126.41 45.59 59)
59 | LINESTRING Z (-126.6 45.4 40,-126.51 45.49 49)
60 | LINESTRING Z (-126.9 45.1 10,-126.81 45.19 19)
61 | LINESTRING Z (-126.7 45.3 30,-126.61 45.39 39)
62 | LINESTRING Z (-126.1 45.9 90,-126.01 45.99 99)
63 |
64 | For example, this is how one may want to create an index:
65 |
66 | .. code-block::
67 |
68 | CREATE INDEX ON patches USING GIST(PC_BoundingDiagonalGeometry(patch) gist_geometry_ops_nd);
69 |
70 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
71 | PC_EnvelopeGeometry
72 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
73 |
74 | :PC_EnvelopeGeometry(pcpatch) returns geometry:
75 |
76 | Returns the 2D bounds of the patch as a PostGIS Polygon 2D. Useful for
77 | performing 2D intersection tests with PostGIS geometries.
78 |
79 | .. code-block::
80 |
81 | SELECT ST_AsText(PC_EnvelopeGeometry(pa)) FROM patches LIMIT 1;
82 |
83 | POLYGON((-126.99 45.01,-126.99 45.09,-126.91 45.09,-126.91 45.01,-126.99 45.01))
84 |
85 | For example, this is how one may want to create an index:
86 |
87 | .. code-block::
88 |
89 | CREATE INDEX ON patches USING GIST(PC_EnvelopeGeometry(patch));
90 |
91 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
92 | PC_Intersection
93 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
94 |
95 | :PC_Intersection(pcpatch, geometry) returns pcpatch:
96 |
97 | Returns a PcPatch which only contains points that intersected the geometry.
98 |
99 | .. code-block::
100 |
101 | SELECT PC_AsText(PC_Explode(PC_Intersection(
102 | pa,
103 | 'SRID=4326;POLYGON((-126.451 45.552, -126.42 47.55, -126.40 45.552, -126.451 45.552))'::geometry
104 | )))
105 | FROM patches WHERE id = 7;
106 |
107 | pc_astext
108 | --------------------------------------
109 | {"pcid":1,"pt":[-126.44,45.56,56,5]}
110 | {"pcid":1,"pt":[-126.43,45.57,57,5]}
111 | {"pcid":1,"pt":[-126.42,45.58,58,5]}
112 | {"pcid":1,"pt":[-126.41,45.59,59,5]}
113 |
114 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
115 | PC_Intersects
116 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
117 |
118 | :PC_Intersects(p pcpatch, g geometry) returns boolean:
119 |
120 | :PC_Intersects(g geometry, p pcpatch) returns boolean:
121 |
122 | Returns true if the bounds of the patch intersect the geometry.
123 |
124 | .. code-block::
125 |
126 | SELECT PC_Intersects('SRID=4326;POINT(-126.451 45.552)'::geometry, pa)
127 | FROM patches WHERE id = 7;
128 |
129 | t
130 |
--------------------------------------------------------------------------------
/doc/_static/sphinx.css:
--------------------------------------------------------------------------------
1 | /**
2 | * Sphinx stylesheet -- default theme
3 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 | */
5 |
6 | @import url("basic.css");
7 |
8 | /* -- page layout ----------------------------------------------------------- */
9 |
10 | body {
11 | font-family: Helvetica, Arial, sans-serif;
12 | font-size: 100%;
13 | background-color: #FFF;
14 | color: #555;
15 | margin: 0;
16 | padding: 0;
17 | }
18 |
19 | div.documentwrapper {
20 | float: left;
21 | width: 100%;
22 | }
23 |
24 | div.bodywrapper {
25 | margin: 0 0 0 230px;
26 | }
27 |
28 | hr{
29 | border: 1px solid #B1B4B6;
30 | }
31 |
32 | div.document {
33 | background-color: #eee;
34 | }
35 |
36 | div.body {
37 | background-color: #ffffff;
38 | color: #3E4349;
39 | padding: 0 30px 30px 30px;
40 | font-size: 0.8em;
41 | }
42 |
43 | div.footer {
44 | color: #fff;
45 | width: 100%;
46 | padding: 13px 0;
47 | text-align: center;
48 | font-size: 75%;
49 | background-color: #2c7fb8;
50 | }
51 |
52 | div.footer a {
53 | color: #eee;
54 | }
55 |
56 | div.footer a.hover {
57 | color: #eee;
58 | text-decoration: underline;
59 | }
60 |
61 | div.related {
62 | background-color: #2c7fb8;
63 | line-height: 32px;
64 | color: #fff;
65 | font-size: 0.80em;
66 | }
67 |
68 | div.related a {
69 | color: #fff;
70 | }
71 |
72 | div.sphinxsidebar {
73 | font-size: 0.75em;
74 | line-height: 1.5em;
75 | z-index: 1000;
76 | }
77 |
78 | div.sphinxsidebarwrapper{
79 | padding: 20px 0;
80 | background-color: #b6f2fb;
81 | }
82 |
83 | div.sphinxsidebar h3,
84 | div.sphinxsidebar h4 {
85 | font-family: Arial, sans-serif;
86 | color: #222;
87 | font-size: 1.2em;
88 | font-weight: normal;
89 | margin: 0;
90 | padding: 5px 10px;
91 | background-color: #b6f2fb;
92 |
93 | }
94 |
95 | div.sphinxsidebar h4{
96 | font-size: 1.1em;
97 | }
98 |
99 | div.sphinxsidebar h3 a {
100 | color: #444;
101 | }
102 |
103 |
104 | div.sphinxsidebar p {
105 | color: #888;
106 | padding: 5px 20px;
107 | }
108 |
109 | div.sphinxsidebar p.topless {
110 | }
111 |
112 | div.sphinxsidebar ul {
113 | margin: 10px 20px;
114 | padding: 0;
115 | color: #000;
116 | }
117 |
118 | div.sphinxsidebar a {
119 | color: #444;
120 | }
121 |
122 | div.sphinxsidebar input {
123 | border: 1px solid #ccc;
124 | font-family: sans-serif;
125 | font-size: 1em;
126 | }
127 |
128 | div.sphinxsidebar input[type=text]{
129 | margin-left: 20px;
130 | }
131 |
132 | /* -- body styles ----------------------------------------------------------- */
133 |
134 | a {
135 | color: #1A1A56;
136 | text-decoration: underline;
137 | }
138 |
139 | a:hover {
140 | color: #A33333;
141 | text-decoration: underline;
142 | }
143 |
144 | div.body h1,
145 | div.body h2,
146 | div.body h3,
147 | div.body h4,
148 | div.body h5,
149 | div.body h6 {
150 | font-family: Helvetica, Arial, sans-serif;
151 | background-color: #ccc;
152 | font-weight: normal;
153 | color: #000;
154 | margin: 30px 0px 10px 0px;
155 | padding: 5px 0 5px 10px;
156 | text-shadow: 0px 1px 0 white
157 | }
158 |
159 | div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; }
160 | div.body h2 { font-size: 150%; background-color: #ccc; }
161 | div.body h3 { font-size: 120%; background-color: #ccc; }
162 | div.body h4 { font-size: 110%; background-color: #ccc; }
163 | div.body h5 { font-size: 100%; background-color: #ccc; }
164 | div.body h6 { font-size: 100%; background-color: #ccc; }
165 |
166 | h1 a,
167 | h2 a,
168 | h3 a,
169 | h1 a:hover,
170 | h2 a:hover,
171 | h3 a:hover {
172 | text-decoration: none;
173 | }
174 |
175 | a.headerlink {
176 | color: #c60f0f;
177 | font-size: 0.8em;
178 | padding: 0 4px 0 4px;
179 | text-decoration: none;
180 | }
181 |
182 | a.headerlink:hover {
183 | background-color: #c60f0f;
184 | color: white;
185 | }
186 |
187 | div.body p, div.body dd, div.body li {
188 | line-height: 1.5em;
189 | }
190 |
191 | div.admonition p.admonition-title + p {
192 | display: inline;
193 | }
194 |
195 | div.highlight{
196 | background-color: white;
197 | }
198 |
199 | div.note {
200 | background-color: #eee;
201 | border: 1px solid #ccc;
202 | }
203 |
204 | div.seealso {
205 | background-color: #eee;
206 | border: 1px solid #ccc;
207 | }
208 |
209 | div.topic {
210 | background-color: #eee;
211 | }
212 |
213 | div.warning {
214 | background-color: #ffe4e4;
215 | border: 1px solid #f66;
216 | }
217 |
218 | div.admonition#embed{
219 | background-color: #ffe4e4;
220 | border: 1px solid #f66;
221 | }
222 |
223 | p.admonition-title {
224 | display: inline;
225 | }
226 |
227 | p.admonition-title:after {
228 | content: ":";
229 | }
230 |
231 | pre {
232 | padding: 10px;
233 | background-color: #eee;
234 | color: #222;
235 | line-height: 1.2em;
236 | border: 1px solid #C6C9CB;
237 | font-size: 1.2em;
238 | margin: 1.5em 0 1.5em 0;
239 | -webkit-box-shadow: 1px 1px 1px #d8d8d8;
240 | -moz-box-shadow: 1px 1px 1px #d8d8d8;
241 | }
242 |
243 | tt {
244 | background-color: #ecf0f3;
245 | color: #222;
246 | padding: 1px 2px;
247 | font-size: 1.2em;
248 | font-family: monospace;
249 | }
250 |
--------------------------------------------------------------------------------
/doc/tutorials/storing.rst:
--------------------------------------------------------------------------------
1 | ******************************************************************************
2 | Storing points
3 | ******************************************************************************
4 |
5 | This tutorial is a basic introduction to pgPointcloud to store points in a
6 | PostgreSQL database hosted on a Docker container.
7 |
8 | ------------------------------------------------------------------------------
9 | Start Docker container
10 | ------------------------------------------------------------------------------
11 |
12 | First we download the latest tag of the pgPoincloud Docker image:
13 |
14 | .. code-block:: bash
15 |
16 | $ docker pull pgpointcloud/pointcloud
17 |
18 | This Docker image is based on the official PostgreSQL image and the full
19 | documentation is available `here`_.
20 |
21 | For a basic usage, we have to define two environment variables:
22 |
23 | + the PostgreSQL database: ``POSTGRES_DB``
24 | + the PostgreSQL password: ``POSTGRES_PASSWORD``
25 |
26 | Then we can start a new container:
27 |
28 | .. code-block:: bash
29 |
30 | $ docker run --name pgpointcloud -e POSTGRES_DB=pointclouds -e POSTGRES_PASSWORD=mysecretpassword -d pgpointcloud/pointcloud
31 |
32 | Extensions are automatically created in the new database named ``pointclouds``:
33 |
34 | .. code-block:: bash
35 |
36 | $ docker exec -it pgpointcloud psql -U postgres -d pointclouds -c "\dx"
37 | List of installed extensions
38 | Name | Version | Schema | Description
39 | ------------------------+---------+------------+---------------------------------------------------------------------
40 | fuzzystrmatch | 1.1 | public | determine similarities and distance between strings
41 | plpgsql | 1.0 | pg_catalog | PL/pgSQL procedural language
42 | pointcloud | 1.2.1 | public | data type for lidar point clouds
43 | pointcloud_postgis | 1.2.1 | public | integration for pointcloud LIDAR data and PostGIS geometry data
44 | postgis | 3.0.1 | public | PostGIS geometry, geography, and raster spatial types and functions
45 | postgis_tiger_geocoder | 3.0.1 | tiger | PostGIS tiger geocoder and reverse geocoder
46 | postgis_topology | 3.0.1 | topology | PostGIS topology spatial types and functions
47 | (7 rows)
48 |
49 | .. _`here`: https://hub.docker.com/_/postgres
50 |
51 | ------------------------------------------------------------------------------
52 | Run PDAL pipeline
53 | ------------------------------------------------------------------------------
54 |
55 | For the need of the tutorial, we can download sample data from the `PDAL`_
56 | organization:
57 |
58 | .. code-block:: bash
59 |
60 | $ wget https://github.com/PDAL/data/blob/main/liblas/LAS12_Sample_withRGB_Quick_Terrain_Modeler_fixed.laz -P /tmp
61 |
62 | Thanks to the ``pdal info`` command, we can obtain some information on the dataset:
63 |
64 | + Number of points: 3811489
65 | + Spatial reference: EPSG:32616
66 |
67 | To configure the json PDAL pipeline, we need to set up the ``connection``
68 | parameter for the ``pgpointcloud`` writer. To do that, the Docker container IP
69 | adress on which the PostgreSQL database is running is necessary:
70 |
71 | .. code-block:: bash
72 |
73 | $ docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' pgpointcloud
74 | 172.17.0.2
75 |
76 |
77 | So the ``pipeline.json`` file looks like:
78 |
79 | .. code-block:: json
80 |
81 | {
82 | "pipeline":[
83 | {
84 | "type":"readers.las",
85 | "filename":"/tmp/LAS12_Sample_withRGB_Quick_Terrain_Modeler_fixed.laz"
86 | },
87 | {
88 | "type":"filters.chipper",
89 | "capacity":"400"
90 | },
91 | {
92 | "type":"writers.pgpointcloud",
93 | "connection":"host='172.17.0.2' dbname='pointclouds' user='postgres' password='mysecretpassword' port='5432'",
94 | "table":"airport",
95 | "compression":"dimensional",
96 | "srid":"32616"
97 | }
98 | ]
99 | }
100 |
101 | The PDAL pipeline can finally be execute with ``pdal pipeline pipeline.json``
102 | and an ``airport`` table is created.
103 |
104 |
105 | .. _`PDAL`: https://github.com/PDAL
106 |
107 | ------------------------------------------------------------------------------
108 | Configure connection service file
109 | ------------------------------------------------------------------------------
110 |
111 | To facilitate the access to the database hosted on the Docker container, we can
112 | configure the PostgreSQL connection service file:
113 |
114 | .. code-block:: bash
115 |
116 | [pgpointcloud]
117 | host=172.17.0.2
118 | port=5432
119 | dbname=pointclouds
120 | user=postgres
121 | password=mysecretpassword
122 |
123 | Then we can explore the content of the new ``airport`` table:
124 |
125 | .. code-block:: bash
126 |
127 | $ psql service=pgpointcloud
128 | psql (12.3)
129 | Type "help" for help.
130 |
131 | pointclouds=# SELECT COUNT(*) FROM airport;
132 | count
133 | -------
134 | 9529
135 | (1 row)
136 |
137 | In this case, we have ``9529`` patchs containing ``400`` points (the size of
138 | the chipper filter), meaning about ``3811600`` points. So the last patch isn't
139 | fully filled.
140 |
--------------------------------------------------------------------------------
/doc/concepts/binary.rst:
--------------------------------------------------------------------------------
1 | .. _binary_formats:
2 |
3 | ********************************************************************************
4 | Binary Formats
5 | ********************************************************************************
6 |
7 | In order to preserve some compactness in dump files and network transmissions,
8 | the binary formats need to retain their native compression. All binary formats
9 | are hex-encoded before output.
10 |
11 | The point and patch binary formats start with a common header, which provides:
12 |
13 | - endianness flag, to allow portability between architectures
14 | - pcid number, to look up the schema information in the ``pointcloud_formats`` table
15 |
16 | The patch binary formats have additional standard header information:
17 |
18 | - the compression number, which indicates how to interpret the data
19 | - the number of points in the patch
20 |
21 | --------------------------------------------------------------------------------
22 | Point Binary
23 | --------------------------------------------------------------------------------
24 |
25 | .. code-block::
26 |
27 | byte: endianness (1 = NDR, 0 = XDR)
28 | uint32: pcid (key to POINTCLOUD_SCHEMAS)
29 | uchar[]: pointdata (interpret relative to pcid)
30 |
31 | --------------------------------------------------------------------------------
32 | Patch Binary
33 | --------------------------------------------------------------------------------
34 |
35 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
36 | Uncompressed
37 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
38 |
39 | .. code-block::
40 |
41 | byte: endianness (1 = NDR, 0 = XDR)
42 | uint32: pcid (key to POINTCLOUD_SCHEMAS)
43 | uint32: 0 = no compression
44 | uint32: npoints
45 | pointdata[]: interpret relative to pcid
46 |
47 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
48 | Dimensional
49 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50 |
51 | .. code-block::
52 |
53 | byte: endianness (1 = NDR, 0 = XDR)
54 | uint32: pcid (key to POINTCLOUD_SCHEMAS)
55 | uint32: 2 = dimensional compression
56 | uint32: npoints
57 | dimensions[]: dimensionally compressed data for each dimension
58 |
59 | Each compressed dimension starts with a byte, that gives the compression type,
60 | and then a uint32 that gives the size of the segment in bytes.
61 |
62 | .. code-block::
63 |
64 | byte: dimensional compression type (0-3)
65 | uint32: size of the compressed dimension in bytes
66 | data[]: the compressed dimensional values
67 |
68 | There are four possible compression types used in dimensional compression:
69 |
70 | - no compression = 0,
71 | - run-length compression = 1,
72 | - significant bits removal = 2,
73 | - deflate = 3
74 |
75 | **No dimension compress**
76 |
77 | For dimensional compression 0 (no compression) the values just appear in order.
78 | The length of words in this dimension must be determined from the schema
79 | document.
80 |
81 | .. code-block::
82 |
83 | word[]:
84 |
85 | **Run-length compress dimension**
86 |
87 | For run-length compression, the data stream consists of a set of pairs: a byte
88 | value indicating the length of the run, and a data value indicating the value
89 | that is repeated.
90 |
91 | .. code-block::
92 |
93 | byte: number of times the word repeats
94 | word: value of the word being repeated
95 | .... repeated for the number of runs
96 |
97 | The length of words in this dimension must be determined from the schema document.
98 |
99 | **Significant bits removal on dimension**
100 |
101 | Significant bits removal starts with two words. The first word just gives the
102 | number of bits that are "significant", that is the number of bits left after
103 | the common bits are removed from any given word. The second word is a bitmask
104 | of the common bits, with the final, variable bits zeroed out.
105 |
106 | .. code-block::
107 |
108 | word1: number of variable bits in this dimension
109 | word2: the bits that are shared by every word in this dimension
110 | data[]: variable bits packed into a data buffer
111 |
112 | **Deflate dimension**
113 |
114 | Where simple compression schemes fail, general purpose compression is applied
115 | to the dimension using zlib. The data area is a raw zlib buffer suitable for
116 | passing directly to the inflate() function. The size of the input buffer is
117 | given in the common dimension header. The size of the output buffer can be
118 | derived from the patch metadata by multiplying the dimension word size by the
119 | number of points in the patch.
120 |
121 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
122 | LAZ
123 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
124 |
125 | .. code-block::
126 |
127 | byte: endianness (1 = NDR, 0 = XDR)
128 | uint32: pcid (key to POINTCLOUD_SCHEMAS)
129 | uint32: 2 = LAZ compression
130 | uint32: npoints
131 | uint32: LAZ data size
132 | data[]: LAZ data
133 |
134 | Use laz-perf_ library to read the LAZ data buffer out into a LAZ buffer.
135 |
136 | .. _`laz-perf`: https://github.com/hobu/laz-perf
137 |
--------------------------------------------------------------------------------
/lib/pc_stats.c:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * pc_stats.c
3 | *
4 | * Pointclound patch statistics generation.
5 | *
6 | * Copyright (c) 2013 OpenGeo
7 | *
8 | ***********************************************************************/
9 |
10 | #include "pc_api_internal.h"
11 | #include
12 |
13 | /*
14 | * Instantiate a new PCDOUBLESTATS for calculation, and set up
15 | * initial values for min/max/sum
16 | */
17 | static PCDOUBLESTATS *pc_dstats_new(int ndims)
18 | {
19 | int i;
20 | PCDOUBLESTATS *stats = pcalloc(sizeof(PCDOUBLESTATS));
21 | stats->dims = pcalloc(sizeof(PCDOUBLESTAT) * ndims);
22 | for (i = 0; i < ndims; i++)
23 | {
24 | stats->dims[i].min = DBL_MAX;
25 | stats->dims[i].max = -1 * DBL_MAX;
26 | stats->dims[i].sum = 0;
27 | }
28 | stats->npoints = 0;
29 | return stats;
30 | }
31 |
32 | static void pc_dstats_free(PCDOUBLESTATS *stats)
33 | {
34 | if (!stats)
35 | return;
36 | if (stats->dims)
37 | pcfree(stats->dims);
38 | pcfree(stats);
39 | return;
40 | }
41 |
42 | /**
43 | * Free the standard stats object for in memory patches
44 | */
45 | void pc_stats_free(PCSTATS *stats)
46 | {
47 | if (stats->min.readonly != PC_TRUE)
48 | pcfree(stats->min.data);
49 |
50 | if (stats->max.readonly != PC_TRUE)
51 | pcfree(stats->max.data);
52 |
53 | if (stats->avg.readonly != PC_TRUE)
54 | pcfree(stats->avg.data);
55 |
56 | pcfree(stats);
57 | return;
58 | }
59 |
60 | /**
61 | * Build a standard stats object on top of a serialization, allocate just the
62 | * point shells and set the pointers to look into the data area of the
63 | * serialization.
64 | */
65 | PCSTATS *pc_stats_new_from_data(const PCSCHEMA *schema, const uint8_t *mindata,
66 | const uint8_t *maxdata, const uint8_t *avgdata)
67 | {
68 | /*size_t sz = schema->size;*/
69 | PCSTATS *stats = pcalloc(sizeof(PCSTATS));
70 | /* All share the schema with the patch */
71 | stats->min.schema = schema;
72 | stats->max.schema = schema;
73 | stats->avg.schema = schema;
74 | /* Data points into serialization */
75 | stats->min.data = (uint8_t *)mindata;
76 | stats->max.data = (uint8_t *)maxdata;
77 | stats->avg.data = (uint8_t *)avgdata;
78 | /* Can't modify external data */
79 | stats->min.readonly = PC_TRUE;
80 | stats->max.readonly = PC_TRUE;
81 | stats->avg.readonly = PC_TRUE;
82 | /* Done */
83 | return stats;
84 | }
85 |
86 | /**
87 | * Build a standard stats object with read/write memory, allocate the
88 | * point shells and the data areas underneath. Used for initial calcution
89 | * of patch stats, when objects first created.
90 | */
91 | PCSTATS *pc_stats_new(const PCSCHEMA *schema)
92 | {
93 | /*size_t sz = schema->size;*/
94 | PCSTATS *stats = pcalloc(sizeof(PCSTATS));
95 | stats->min.schema = schema;
96 | stats->max.schema = schema;
97 | stats->avg.schema = schema;
98 | stats->min.readonly = PC_FALSE;
99 | stats->max.readonly = PC_FALSE;
100 | stats->avg.readonly = PC_FALSE;
101 | stats->min.data = pcalloc(schema->size);
102 | stats->max.data = pcalloc(schema->size);
103 | stats->avg.data = pcalloc(schema->size);
104 | return stats;
105 | }
106 |
107 | /**
108 | * Allocate and populate a new PCSTATS from the raw data in
109 | * a PCDOUBLESTATS
110 | */
111 | static PCSTATS *pc_stats_new_from_dstats(const PCSCHEMA *schema,
112 | const PCDOUBLESTATS *dstats)
113 | {
114 | int i;
115 | PCSTATS *stats = pc_stats_new(schema);
116 |
117 | for (i = 0; i < schema->ndims; i++)
118 | {
119 | pc_point_set_double(&(stats->min), schema->dims[i], dstats->dims[i].min);
120 | pc_point_set_double(&(stats->max), schema->dims[i], dstats->dims[i].max);
121 | pc_point_set_double(&(stats->avg), schema->dims[i],
122 | dstats->dims[i].sum / dstats->npoints);
123 | }
124 | return stats;
125 | }
126 |
127 | PCSTATS *pc_stats_clone(const PCSTATS *stats)
128 | {
129 | PCSTATS *s;
130 | if (!stats)
131 | return NULL;
132 | s = pcalloc(sizeof(PCSTATS));
133 | s->min.readonly = s->max.readonly = s->avg.readonly = PC_FALSE;
134 | s->min.schema = stats->min.schema;
135 | s->max.schema = stats->max.schema;
136 | s->avg.schema = stats->avg.schema;
137 | s->min.data = pcalloc(stats->min.schema->size);
138 | s->max.data = pcalloc(stats->max.schema->size);
139 | s->avg.data = pcalloc(stats->avg.schema->size);
140 | memcpy(s->min.data, stats->min.data, stats->min.schema->size);
141 | memcpy(s->max.data, stats->max.data, stats->max.schema->size);
142 | memcpy(s->avg.data, stats->avg.data, stats->avg.schema->size);
143 | return s;
144 | }
145 |
146 | int pc_patch_uncompressed_compute_stats(PCPATCH_UNCOMPRESSED *pa)
147 | {
148 | int i, j;
149 | const PCSCHEMA *schema = pa->schema;
150 | double val;
151 | PCDOUBLESTATS *dstats = pc_dstats_new(pa->schema->ndims);
152 |
153 | if (pa->stats)
154 | pc_stats_free(pa->stats);
155 |
156 | /* Point on stack for fast access to values in patch */
157 | PCPOINT pt;
158 | pt.readonly = PC_TRUE;
159 | pt.schema = schema;
160 | pt.data = pa->data;
161 |
162 | /* We know npoints right away */
163 | dstats->npoints = pa->npoints;
164 |
165 | for (i = 0; i < pa->npoints; i++)
166 | {
167 | for (j = 0; j < schema->ndims; j++)
168 | {
169 | pc_point_get_double(&pt, schema->dims[j], &val);
170 | /* Check minimum */
171 | if (val < dstats->dims[j].min)
172 | dstats->dims[j].min = val;
173 | /* Check maximum */
174 | if (val > dstats->dims[j].max)
175 | dstats->dims[j].max = val;
176 | /* Add to sum */
177 | dstats->dims[j].sum += val;
178 | }
179 | /* Advance to next point */
180 | pt.data += schema->size;
181 | }
182 |
183 | pa->stats = pc_stats_new_from_dstats(pa->schema, dstats);
184 | pc_dstats_free(dstats);
185 | return PC_SUCCESS;
186 | }
187 |
188 | size_t pc_stats_size(const PCSCHEMA *schema) { return 3 * schema->size; }
189 |
--------------------------------------------------------------------------------
/lib/pc_val.c:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * pc_val.c
3 | *
4 | * Pointclound value handling. Create, get and set values.
5 | *
6 | * PgSQL Pointcloud is free and open source software provided
7 | * by the Government of Canada
8 | * Copyright (c) 2013 Natural Resources Canada
9 | *
10 | ***********************************************************************/
11 |
12 | #include "pc_api_internal.h"
13 | #include
14 | #include
15 |
16 | double pc_value_unscale_unoffset(double val, const PCDIMENSION *dim)
17 | {
18 | /* Offset value */
19 | if (dim->offset)
20 | val -= dim->offset;
21 |
22 | /* Scale value */
23 | if (dim->scale != 1)
24 | val /= dim->scale;
25 |
26 | return val;
27 | }
28 |
29 | double pc_value_scale_offset(double val, const PCDIMENSION *dim)
30 | {
31 | /* Scale value */
32 | if (dim->scale != 1)
33 | val *= dim->scale;
34 |
35 | /* Offset value */
36 | if (dim->offset)
37 | val += dim->offset;
38 |
39 | return val;
40 | }
41 |
42 | double pc_value_from_ptr(const uint8_t *ptr, const PCDIMENSION *dim)
43 | {
44 | double val = pc_double_from_ptr(ptr, dim->interpretation);
45 | return pc_value_scale_offset(val, dim);
46 | }
47 |
48 | double pc_double_from_ptr(const uint8_t *ptr, uint32_t interpretation)
49 | {
50 | switch (interpretation)
51 | {
52 | case PC_UINT8:
53 | {
54 | uint8_t v;
55 | memcpy(&(v), ptr, sizeof(uint8_t));
56 | return (double)v;
57 | }
58 | case PC_UINT16:
59 | {
60 | uint16_t v;
61 | memcpy(&(v), ptr, sizeof(uint16_t));
62 | return (double)v;
63 | }
64 | case PC_UINT32:
65 | {
66 | uint32_t v;
67 | memcpy(&(v), ptr, sizeof(uint32_t));
68 | return (double)v;
69 | }
70 | case PC_UINT64:
71 | {
72 | uint64_t v;
73 | memcpy(&(v), ptr, sizeof(uint64_t));
74 | return (double)v;
75 | }
76 | case PC_INT8:
77 | {
78 | int8_t v;
79 | memcpy(&(v), ptr, sizeof(int8_t));
80 | return (double)v;
81 | }
82 | case PC_INT16:
83 | {
84 | int16_t v;
85 | memcpy(&(v), ptr, sizeof(int16_t));
86 | return (double)v;
87 | }
88 | case PC_INT32:
89 | {
90 | int32_t v;
91 | memcpy(&(v), ptr, sizeof(int32_t));
92 | return (double)v;
93 | }
94 | case PC_INT64:
95 | {
96 | int64_t v;
97 | memcpy(&(v), ptr, sizeof(int64_t));
98 | return (double)v;
99 | }
100 | case PC_FLOAT:
101 | {
102 | float v;
103 | memcpy(&(v), ptr, sizeof(float));
104 | return (double)v;
105 | }
106 | case PC_DOUBLE:
107 | {
108 | double v;
109 | memcpy(&(v), ptr, sizeof(double));
110 | return v;
111 | }
112 | default:
113 | {
114 | pcerror("unknown interpretation type %d encountered in pc_double_from_ptr",
115 | interpretation);
116 | }
117 | }
118 | return 0.0;
119 | }
120 |
121 | #define CLAMP(v, min, max, t, format) \
122 | do \
123 | { \
124 | if (v > max) \
125 | { \
126 | pcwarn("Value %g truncated to " format " to fit in " t, v, max); \
127 | v = max; \
128 | } \
129 | else if (v < min) \
130 | { \
131 | pcwarn("Value %g truncated to " format " to fit in " t, v, min); \
132 | v = min; \
133 | } \
134 | } while (0)
135 |
136 | int pc_double_to_ptr(uint8_t *ptr, uint32_t interpretation, double val)
137 | {
138 | switch (interpretation)
139 | {
140 | case PC_UINT8:
141 | {
142 | uint8_t v;
143 | CLAMP(val, 0, UINT8_MAX, "uint8_t", "%u");
144 | v = (uint8_t)lround(val);
145 | memcpy(ptr, &(v), sizeof(uint8_t));
146 | break;
147 | }
148 | case PC_UINT16:
149 | {
150 | uint16_t v;
151 | CLAMP(val, 0, UINT16_MAX, "uint16_t", "%u");
152 | v = (uint16_t)lround(val);
153 | memcpy(ptr, &(v), sizeof(uint16_t));
154 | break;
155 | }
156 | case PC_UINT32:
157 | {
158 | uint32_t v;
159 | CLAMP(val, 0, UINT32_MAX, "uint32", "%u");
160 | v = (uint32_t)lround(val);
161 | memcpy(ptr, &(v), sizeof(uint32_t));
162 | break;
163 | }
164 | case PC_UINT64:
165 | {
166 | uint64_t v;
167 | CLAMP(val, 0, UINT64_MAX, "uint64", "%u");
168 | v = (uint64_t)lround(val);
169 | memcpy(ptr, &(v), sizeof(uint64_t));
170 | break;
171 | }
172 | case PC_INT8:
173 | {
174 | int8_t v;
175 | CLAMP(val, INT8_MIN, INT8_MAX, "int8", "%d");
176 | v = (int8_t)lround(val);
177 | memcpy(ptr, &(v), sizeof(int8_t));
178 | break;
179 | }
180 | case PC_INT16:
181 | {
182 | int16_t v;
183 | CLAMP(val, INT16_MIN, INT16_MAX, "int16", "%d");
184 | v = (int16_t)lround(val);
185 | memcpy(ptr, &(v), sizeof(int16_t));
186 | break;
187 | }
188 | case PC_INT32:
189 | {
190 | int32_t v;
191 | CLAMP(val, INT32_MIN, INT32_MAX, "int32", "%d");
192 | v = (int32_t)lround(val);
193 | memcpy(ptr, &(v), sizeof(int32_t));
194 | break;
195 | }
196 | case PC_INT64:
197 | {
198 | int64_t v;
199 | CLAMP(val, INT64_MIN, INT64_MAX, "int64", "%d");
200 | v = (int64_t)lround(val);
201 | memcpy(ptr, &(v), sizeof(int64_t));
202 | break;
203 | }
204 | case PC_FLOAT:
205 | {
206 | float v = (float)val;
207 | memcpy(ptr, &(v), sizeof(float));
208 | break;
209 | }
210 | case PC_DOUBLE:
211 | {
212 | double v = val;
213 | memcpy(ptr, &(v), sizeof(double));
214 | break;
215 | }
216 | default:
217 | {
218 | pcerror("unknown interpretation type %d encountered in pc_double_to_ptr",
219 | interpretation);
220 | return PC_FAILURE;
221 | }
222 | }
223 |
224 | return PC_SUCCESS;
225 | }
226 |
--------------------------------------------------------------------------------
/pgsql/pc_pgsql.h:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * pc_pgsql.h
3 | *
4 | * Common header file for all PgSQL pointcloud functions.
5 | *
6 | * PgSQL Pointcloud is free and open source software provided
7 | * by the Government of Canada
8 | * Copyright (c) 2013 Natural Resources Canada
9 | *
10 | ***********************************************************************/
11 |
12 | #include "pc_api.h"
13 |
14 | #include "postgres.h"
15 | #include "utils/elog.h"
16 |
17 | /* Try to move these down */
18 | #include "catalog/pg_type.h" /* for CSTRINGOID */
19 | #include "lib/stringinfo.h" /* For binary input */
20 | #include "utils/array.h"
21 | #include "utils/builtins.h" /* for pg_atoi */
22 |
23 | #define PG_GETARG_SERPOINT_P(argnum) \
24 | (SERIALIZED_POINT *)PG_DETOAST_DATUM(PG_GETARG_DATUM(argnum))
25 | #define PG_GETARG_SERPATCH_P(argnum) \
26 | (SERIALIZED_PATCH *)PG_DETOAST_DATUM(PG_GETARG_DATUM(argnum))
27 |
28 | #define PG_GETHEADER_SERPATCH_P(argnum) \
29 | (SERIALIZED_PATCH *)PG_DETOAST_DATUM_SLICE(PG_GETARG_DATUM(argnum), 0, \
30 | sizeof(SERIALIZED_PATCH))
31 |
32 | #define PG_GETHEADERX_SERPATCH_P(argnum, extra) \
33 | (SERIALIZED_PATCH *)PG_DETOAST_DATUM_SLICE(PG_GETARG_DATUM(argnum), 0, \
34 | sizeof(SERIALIZED_PATCH) + extra)
35 |
36 | #define PG_GETHEADER_STATS_P(argnum, statsize) \
37 | (uint8_t *)(((SERIALIZED_PATCH *)PG_DETOAST_DATUM_SLICE( \
38 | PG_GETARG_DATUM(argnum), 0, \
39 | sizeof(SERIALIZED_PATCH) + statsize)) \
40 | ->data)
41 |
42 | #define AUTOCOMPRESS_NO 0
43 | #define AUTOCOMPRESS_YES 1
44 |
45 | typedef struct
46 | {
47 | char *schema;
48 | char *formats;
49 | char *formats_srid;
50 | char *formats_schema;
51 | } PC_CONSTANTS;
52 |
53 | /**
54 | * Serialized point type for clouds. Variable length, because there can be
55 | * an arbitrary number of dimensions. The pcid is a foreign key
56 | * reference to the POINTCLOUD_SCHEMAS table, where
57 | * the underlying structure of the data is described in XML,
58 | * the spatial reference system is indicated, and the data
59 | * packing scheme is indicated.
60 | */
61 | typedef struct
62 | {
63 | uint32_t size;
64 | uint32_t pcid;
65 | uint8_t data[1];
66 | } SERIALIZED_POINT;
67 |
68 | /**
69 | * PgSQL patch type (collection of points) for clouds.
70 | * Variable length, because there can be
71 | * an arbitrary number of points encoded within.
72 | * The pcid is a foriegn key reference to the
73 | * POINTCLOUD_SCHEMAS table, where
74 | * the underlying structure of the data is described in XML,
75 | * the spatial reference system is indicated, and the data
76 | * packing scheme is indicated.
77 | */
78 | typedef struct
79 | {
80 | uint32_t size;
81 | uint32_t pcid;
82 | uint32_t compression;
83 | uint32_t npoints;
84 | PCBOUNDS bounds;
85 | uint8_t data[1];
86 | } SERIALIZED_PATCH;
87 |
88 | /* PGSQL / POINTCLOUD UTILITY FUNCTIONS */
89 | uint32 pcid_from_typmod(const int32 typmod);
90 |
91 | /** Look-up the PCID in the POINTCLOUD_FORMATS table, and construct a PC_SCHEMA
92 | * from the XML therein */
93 | #if PGSQL_VERSION < 120
94 | PCSCHEMA *pc_schema_from_pcid(uint32_t pcid, FunctionCallInfoData *fcinfo);
95 | #else
96 | PCSCHEMA *pc_schema_from_pcid(uint32_t pcid, FunctionCallInfo fcinfo);
97 | #endif
98 |
99 | /** Look-up the PCID in the POINTCLOUD_FORMATS table, and construct a PC_SCHEMA
100 | * from the XML therein */
101 | PCSCHEMA *pc_schema_from_pcid_uncached(uint32 pcid);
102 |
103 | /** Turn a PCPOINT into a byte buffer suitable for saving in PgSQL */
104 | SERIALIZED_POINT *pc_point_serialize(const PCPOINT *pcpt);
105 |
106 | /** Turn a byte buffer into a PCPOINT for processing */
107 | PCPOINT *pc_point_deserialize(const SERIALIZED_POINT *serpt,
108 | const PCSCHEMA *schema);
109 |
110 | /** Create a new readwrite PCPOINT from a hex string */
111 | #if PGSQL_VERSION < 120
112 | PCPOINT *pc_point_from_hexwkb(const char *hexwkb, size_t hexlen,
113 | FunctionCallInfoData *fcinfo);
114 | #else
115 | PCPOINT *pc_point_from_hexwkb(const char *hexwkb, size_t hexlen,
116 | FunctionCallInfo fcinfo);
117 | #endif
118 | /** Create a hex representation of a PCPOINT */
119 | char *pc_point_to_hexwkb(const PCPOINT *pt);
120 |
121 | /** How big will this thing be on disk? */
122 | size_t pc_patch_serialized_size(const PCPATCH *patch);
123 |
124 | /** Turn a PCPATCH into a byte buffer suitable for saving in PgSQL */
125 | SERIALIZED_PATCH *pc_patch_serialize(const PCPATCH *patch, void *userdata);
126 |
127 | /** Turn a PCPATCH into an uncompressed byte buffer */
128 | SERIALIZED_PATCH *pc_patch_serialize_to_uncompressed(const PCPATCH *patch);
129 |
130 | /** Turn a byte buffer into a PCPATCH for processing */
131 | PCPATCH *pc_patch_deserialize(const SERIALIZED_PATCH *serpatch,
132 | const PCSCHEMA *schema);
133 |
134 | /** Create a new readwrite PCPATCH from a hex string */
135 | #if PGSQL_VERSION < 120
136 | PCPATCH *pc_patch_from_hexwkb(const char *hexwkb, size_t hexlen,
137 | FunctionCallInfoData *fcinfo);
138 | #else
139 | PCPATCH *pc_patch_from_hexwkb(const char *hexwkb, size_t hexlen,
140 | FunctionCallInfo fcinfo);
141 | #endif
142 |
143 | /** Create a hex representation of a PCPOINT */
144 | char *pc_patch_to_hexwkb(const PCPATCH *patch);
145 |
146 | /** Returns OGC WKB for envelope of PCPATCH */
147 | uint8_t *pc_patch_to_geometry_wkb_envelope(const SERIALIZED_PATCH *pa,
148 | const PCSCHEMA *schema,
149 | size_t *wkbsize);
150 |
151 | /** Read the first few bytes off an object to get the datum */
152 | uint32 pcid_from_datum(Datum d);
153 |
154 | PCSTATS *pc_patch_stats_deserialize(const PCSCHEMA *schema, const uint8_t *buf);
155 |
156 | void pointcloud_init_constants_cache(void);
157 |
--------------------------------------------------------------------------------
/doc/embed.py:
--------------------------------------------------------------------------------
1 | import sphinx.locale
2 | import docutils.statemachine
3 | sphinx.locale.admonitionlabels['embed'] = u'' #u'Default Embedded Stage'
4 | sphinx.locale.admonitionlabels['plugin'] = u''# u'Non-default Dynamic Plugin Stage'
5 | sphinx.locale.admonitionlabels['streamable'] = u''# u'Streamable Stage'
6 |
7 | def setup(app):
8 | app.add_node(embed,
9 | html=(visit_embed_node, depart_node),
10 | latex=(visit_admonition, depart_node),
11 | text=(visit_admonition, depart_node))
12 |
13 | app.add_node(plugin,
14 | html=(visit_plugin_node, depart_node),
15 | latex=(visit_admonition, depart_node),
16 | text=(visit_admonition, depart_node))
17 | app.add_node(streamable,
18 | html=(visit_streamable_node, depart_node),
19 | latex=(visit_admonition, depart_node),
20 | text=(visit_admonition, depart_node))
21 | app.add_directive('embed', EmbedDirective)
22 | app.add_directive('plugin', PluginDirective)
23 | app.add_directive('streamable', StreamableDirective)
24 | app.connect('env-purge-doc', purge_embeds)
25 | return {'version': '0.1'} # identifies the version of our extension
26 |
27 | from docutils import nodes
28 |
29 | class embed(nodes.Admonition, nodes.Element):
30 | pass
31 |
32 | class plugin(nodes.Admonition, nodes.Element):
33 | pass
34 |
35 | class streamable(nodes.Admonition, nodes.Element):
36 | pass
37 |
38 | def visit_admonition(self, node):
39 | self.visit_admonition(node)
40 |
41 | def visit_embed_node(self, node):
42 | self.body.append(self.starttag(
43 | node, 'div', CLASS=('admonition embed')))
44 | # self.set_first_last(node)
45 |
46 | def visit_plugin_node(self, node):
47 | self.body.append(self.starttag(
48 | node, 'div', CLASS=('admonition plugin')))
49 | # self.set_first_last(node)
50 |
51 | def visit_streamable_node(self, node):
52 | self.body.append(self.starttag(
53 | node, 'div', CLASS=('admonition streamable')))
54 | # self.set_first_last(node)
55 |
56 | def depart_node(self, node):
57 | self.depart_admonition(node)
58 |
59 |
60 | from docutils.parsers.rst import Directive
61 |
62 |
63 | from sphinx.locale import _
64 |
65 | class EmbedDirective(Directive):
66 |
67 | # this enables content in the directive
68 | has_content = True
69 |
70 | def run(self):
71 | env = self.state.document.settings.env
72 |
73 | targetid = "embed-%d" % env.new_serialno('embed')
74 | targetnode = nodes.target('', '', ids=[targetid])
75 |
76 | # self.content = 'This stage is enabled by default'
77 | self.content = docutils.statemachine.StringList(['This stage is enabled by default'])
78 | embed_node = embed('\n'.join(self.content))
79 | embed_node += nodes.title(_('Default Embedded Stage'), _('Default Embedded Stage '))
80 | self.state.nested_parse(self.content, self.content_offset, embed_node)
81 |
82 | if not hasattr(env, 'embed_all_embeds'):
83 | env.embed_all_embeds = []
84 | env.embed_all_embeds.append({
85 | 'docname': env.docname,
86 | 'lineno': self.lineno,
87 | 'embed': embed_node.deepcopy(),
88 | 'target': targetnode,
89 | })
90 |
91 | return [targetnode, embed_node]
92 |
93 | class PluginDirective(Directive):
94 |
95 | # this enables content in the directive
96 | has_content = True
97 |
98 | def run(self):
99 | env = self.state.document.settings.env
100 |
101 | targetid = "plugin-%d" % env.new_serialno('plugin')
102 | targetnode = nodes.target('', '', ids=[targetid])
103 |
104 | # self.content = 'This stage requires a dynamic plugin to operate'
105 | self.content = docutils.statemachine.StringList(['This stage requires a dynamic plugin to operate'])
106 |
107 | plugin_node = plugin('\n'.join(self.content))
108 | plugin_node += nodes.title(_('Dynamic Plugin'), _('Dynamic Plugin'))
109 | self.state.nested_parse(self.content, self.content_offset, plugin_node)
110 |
111 | if not hasattr(env, 'plugin_all_plugins'):
112 | env.plugin_all_plugins = []
113 | env.plugin_all_plugins.append({
114 | 'docname': env.docname,
115 | 'lineno': self.lineno,
116 | 'plugin': plugin_node.deepcopy(),
117 | 'target': targetnode,
118 | })
119 |
120 | return [targetnode, plugin_node]
121 |
122 | class StreamableDirective(Directive):
123 |
124 | # this enables content in the directive
125 | has_content = True
126 |
127 | def run(self):
128 | env = self.state.document.settings.env
129 |
130 | targetid = "streamable-%d" % env.new_serialno('streamable')
131 | targetnode = nodes.target('', '', ids=[targetid])
132 |
133 | # self.content = 'This stage supports streaming operations'
134 | self.content = docutils.statemachine.StringList(['This stage supports streaming operations'])
135 | streamable_node = streamable('\n'.join(self.content))
136 | streamable_node += nodes.title(_('Streamable Stage'), _('Streamable Stage'))
137 | self.state.nested_parse(self.content, self.content_offset, streamable_node)
138 |
139 | if not hasattr(env, 'streamable_all_streamable'):
140 | env.streamable_all_streamable = []
141 | env.streamable_all_streamable.append({
142 | 'docname': env.docname,
143 | 'lineno': self.lineno,
144 | 'plugin': streamable_node.deepcopy(),
145 | 'target': targetnode,
146 | })
147 |
148 | return [targetnode, streamable_node]
149 |
150 | def purge_embeds(app, env, docname):
151 | if not hasattr(env, 'embed_all_embeds'):
152 | return
153 | env.embed_all_embeds = [embed for embed in env.embed_all_embeds
154 | if embed['docname'] != docname]
155 |
156 | if not hasattr(env, 'plugin_all_plugins'):
157 | return
158 | env.plugin_all_plugins = [embed for embed in env.plugin_all_plugins
159 | if embed['docname'] != docname]
160 |
161 | if not hasattr(env, 'streamable_all_streamable'):
162 | return
163 | env.streamable_all_streamable = [embed for embed in env.streamable_all_streamable
164 | if embed['docname'] != docname]
165 |
--------------------------------------------------------------------------------
/doc/quickstart.rst:
--------------------------------------------------------------------------------
1 | .. _quickstart:
2 |
3 | ******************************************************************************
4 | Getting Started
5 | ******************************************************************************
6 |
7 | Introduction
8 | ------------------------------------------------------------------------------
9 |
10 | Once pgPointcloud installed, the first step is to load a point cloud in
11 | PostgreSQL to start playing with ``PcPatch`` and ``PcPoint``.
12 |
13 | To do that you can write your own loader, using the uncompressed WKB format, or
14 | more simply you can load existing LIDAR files using the `PDAL`_ processing and
15 | format conversion library.
16 |
17 |
18 | Install PDAL
19 | ------------------------------------------------------------------------------
20 |
21 | To install PDAL check out the PDAL `development documentation`_.
22 |
23 | With PDAL installed you're ready to run a PDAL import into PostgreSQL
24 | PointCloud thanks to the dedicated `pgPointcloud writer`_.
25 |
26 |
27 | Running a pipeline
28 | ------------------------------------------------------------------------------
29 |
30 | PDAL includes a `command line program`_ that allows both simple format
31 | translations and more complex "pipelines" of transformation. The pdal translate
32 | does simple format transformations. In order to load data into Pointcloud we
33 | use a "PDAL pipeline", by calling pdal pipeline. A pipeline combines a format
34 | reader, and format writer, with filters that can alter or group the points
35 | together.
36 |
37 | PDAL pipelines are JSON files, which declare readers, filters, and writers
38 | forming a processing chain that will be applied to the LIDAR data.
39 |
40 | To execute a pipeline file, run it through the pdal pipeline command:
41 |
42 | .. code-block:: bash
43 |
44 | $ pdal pipeline --input pipelinefile.json
45 |
46 | Here is a simple example pipeline that reads a LAS file and writes into a
47 | PostgreSQL Pointcloud database.
48 |
49 | .. code-block:: json
50 |
51 | {
52 | "pipeline":[
53 | {
54 | "type":"readers.las",
55 | "filename":"/home/lidar/st-helens-small.las",
56 | "spatialreference":"EPSG:26910"
57 | },
58 | {
59 | "type":"filters.chipper",
60 | "capacity":400
61 | },
62 | {
63 | "type":"writers.pgpointcloud",
64 | "connection":"host='localhost' dbname='pc' user='lidar' password='lidar' port='5432'",
65 | "table":"sthsm",
66 | "compression":"dimensional",
67 | "srid":"26910"
68 | }
69 | ]
70 | }
71 |
72 | PostgreSQL Pointcloud storage of LIDAR works best when each "patch" of points
73 | consists of points that are close together, and when most patches do not
74 | overlap. In order to convert unordered data from a LIDAR file into
75 | patch-organized data in the database, we need to pass it through a filter to
76 | "chip" the data into compact patches. The "chipper" is one of the filters we
77 | need to apply to the data while loading.
78 |
79 | Similarly, reading data from a PostgreSQL Pointcloud uses a Pointcloud reader
80 | and a file writer of some sort. This example reads from the database and writes
81 | to a CSV text file:
82 |
83 | .. code-block:: json
84 |
85 | {
86 | "pipeline":[
87 | {
88 | "type":"readers.pgpointcloud",
89 | "connection":"host='localhost' dbname='pc' user='lidar' password='lidar' port='5432'",
90 | "table":"sthsm",
91 | "column":"pa",
92 | "spatialreference":"EPSG:26910"
93 | },
94 | {
95 | "type":"writers.text",
96 | "filename":"/home/lidar/st-helens-small-out.txt"
97 | }
98 | ]
99 | }
100 |
101 | Note that we do not need to chip the data stream when reading from the
102 | database, as the writer does not care if the points are blocked into patches or
103 | not.
104 |
105 | You can use the "where" option to restrict a read to just an envelope, allowing
106 | partial extracts from a table:
107 |
108 | .. code-block:: json
109 |
110 | {
111 | "pipeline":[
112 | {
113 | "type":"readers.pgpointcloud",
114 | "connection":"host='localhost' dbname='pc' user='lidar' password='lidar' port='5432'",
115 | "table":"sthsm",
116 | "column":"pa",
117 | "spatialreference":"EPSG:26910",
118 | "where":"PC_Intersects(pa, ST_MakeEnvelope(560037.36, 5114846.45, 562667.31, 5118943.24, 26910))",
119 | },
120 | {
121 | "type":"writers.text",
122 | "filename":"/home/lidar/st-helens-small-out.txt"
123 | }
124 | ]
125 | }
126 |
127 |
128 | pgpointcloud reader/writer
129 | ------------------------------------------------------------------------------
130 |
131 | The PDAL `writers.pgpointcloud`_ for PostgreSQL Pointcloud takes the following
132 | options:
133 |
134 | - **connection**: The PostgreSQL database connection string. E.g. ``host=localhost user=username password=pw db=dbname port=5432``
135 | - **table**: The database table create to write the patches to.
136 | - **schema**: The schema to create the table in. [Optional]
137 | - **column**: The column name to use in the patch table. [Optional: "pa"]
138 | - **compression**: The patch compression format to use [Optional: "dimensional"]
139 | - **overwrite**: Replace any existing table [Optional: true]
140 | - **srid**: The spatial reference id to store data in [Optional: 4326]
141 | - **pcid**: An existing PCID to use for the point cloud schema [Optional]
142 | - **pre_sql**: Before the pipeline runs, read and execute this SQL file or command [Optional]
143 | - **post_sql**: After the pipeline runs, read and execute this SQL file or command [Optional]
144 |
145 | The PDAL `readers.pgpointcloud`_ for PostgreSQL Pointcloud takes the following
146 | options:
147 |
148 | - **connection**: The PostgreSQL database connection string. E.g. ``host=localhost user=username password=pw db=dbname port=5432``
149 | - **table**: The database table to read the patches from.
150 | - **schema**: The schema to read the table from. [Optional]
151 | - **column**: The column name in the patch table to read from. [Optional: "pa"]
152 | - **where**: SQL where clause to constrain the query [Optional]
153 | - **spatialreference**: Overrides the database declared SRID [Optional]
154 |
155 |
156 | .. _`PDAL`: https://pdal.io/
157 | .. _`development documentation`: https://pdal.io/development/
158 | .. _`pgPointcloud writer`: https://pdal.io/stages/writers.pgpointcloud.html#writers-pgpointcloud
159 | .. _`command line program`: https://pdal.io/apps/index.html
160 | .. _`writers.pgpointcloud`: https://pdal.io/stages/writers.pgpointcloud.html
161 | .. _`readers.pgpointcloud`: https://pdal.io/stages/readers.pgpointcloud.html
162 |
--------------------------------------------------------------------------------