├── .github
├── dependabot.yml
└── workflows
│ ├── buildimage.yml
│ ├── publish.yml
│ ├── stale.yml
│ └── test.yml
├── .gitignore
├── .golangci.yml
├── .idea
├── .gitignore
├── modules.xml
├── steampipe-postgres-fdw.iml
└── vcs.xml
├── CHANGELOG.md
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── conversion_infos.go
├── errors.go
├── exec.go
├── explain.go
├── fdw.go
├── fdw
├── .DS_Store
├── Makefile
├── README.md
├── common.h
├── datum.c
├── fdw.c
├── fdw_handlers.h
├── fdw_helpers.h
├── logging.c
├── query.c
├── steampipe_postgres_fdw--1.0.sql
└── steampipe_postgres_fdw.control
├── generate
└── generator.go
├── go.mod
├── go.sum
├── helpers.go
├── hub
├── connection_factory.go
├── constants.go
├── hub.go
├── hub_base.go
├── hub_create.go
├── hub_local.go
├── hub_local_plugin.go
├── hub_quals.go
├── hub_remote.go
├── in_memory_iterator.go
├── interface.go
├── query_result.go
├── query_status.go
├── query_timing_metadata.go
├── scan_iterator.go
├── scan_iterator_base.go
└── scan_iterator_local.go
├── out
└── README.md
├── prebuild.tmpl
├── quals.go
├── schema.go
├── scripts
├── README.md
├── build-linux-arm-pg14.sh
├── build-linux-arm-pg15.sh
├── install.sh
├── script_to_build.sh
├── steampipe_postgres_installer.sh
└── upload_arm_asset.sh
├── settings
├── keys.go
├── setter_func.go
└── settings.go
├── sql
├── sql.go
└── sql_test.go
├── standalone_setup
├── install_standalone.sh
└── setup.sql
├── templates
├── fdw
│ ├── fdw_handlers.h.tmpl
│ ├── steampipe_postgres_fdw--1.0.sql.tmpl
│ └── steampipe_postgres_fdw.control.tmpl
├── hub
│ ├── hub_create.go.tmpl
│ └── hub_local_plugin.go.tmpl
└── scripts
│ ├── README.md.tmpl
│ └── install.sh.tmpl
├── types
├── pathkeys.go
├── pathkeys_test.go
└── types.go
└── version
└── version.go
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # See https://docs.github.com/en/github/administering-a-repository/configuration-options-for-dependency-updates#package-ecosystem
2 | version: 2
3 | updates:
4 | # Maintain dependencies for GitHub Actions
5 | - package-ecosystem: "github-actions"
6 | directory: "/"
7 | schedule:
8 | interval: "weekly"
9 |
10 | - package-ecosystem: "gomod"
11 | directory: "/"
12 | schedule:
13 | # check every week
14 | interval: "weekly"
15 | # on monday
16 | day: "monday"
17 | # at 2:01 am
18 | time: "02:01"
19 | commit-message:
20 | prefix: "[Dependabot]"
21 | include: "scope"
22 | pull-request-branch-name:
23 | separator: "-"
24 | assignees:
25 | - "binaek"
26 | - "kaidaguerre"
27 | - "pskrbasu"
28 | labels:
29 | - "dependencies"
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish FDW Image
2 | on:
3 | workflow_dispatch:
4 | inputs:
5 | release:
6 | description: "The published release to package as an image(must be prefixed with 'v')"
7 | required: true
8 |
9 | env:
10 | PROJECT_ID: steampipe
11 | IMAGE_NAME: fdw
12 | CORE_REPO: ghcr.io/turbot/steampipe
13 | ORG: turbot
14 | CONFIG_SCHEMA_VERSION: "2020-11-18"
15 | VERSION: ${{ github.event.inputs.release }}
16 |
17 | jobs:
18 | publish:
19 | name: Publish
20 | runs-on: ubuntu-22.04
21 | steps:
22 | - name: Get Release assets
23 | env:
24 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
25 | run: |-
26 | gh release download ${{ github.event.inputs.release }} --dir . --repo ${{ github.repository }}
27 |
28 | - name: Release assets downloaded
29 | run: |-
30 | ls -la .
31 |
32 | - name: Sanitize Version
33 | run: |-
34 | echo $VERSION
35 | trim=${VERSION#"v"}
36 | echo $trim
37 | echo "VERSION=${trim}" >> $GITHUB_ENV
38 |
39 | - name: Validate Version
40 | run: |-
41 | if [[ $VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+(-.+)?$ ]]; then
42 | echo "Version OK: $VERSION"
43 | else
44 | echo "Invalid version: $VERSION"
45 | exit 1
46 | fi
47 |
48 | # Login to GHCR
49 | - name: Log in to the Container registry
50 | uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0
51 | with:
52 | registry: ghcr.io
53 | username: ${{ github.repository_owner }}
54 | password: ${{ secrets.GH_PUBLISH_ACCESS_TOKEN }}
55 |
56 | - name: Verify ORAS installations
57 | run: oras version
58 |
59 | - name: Create Config JSON
60 | run: |-
61 | JSON_STRING=$( jq -n \
62 | --arg name "$IMAGE_NAME" \
63 | --arg organization "$ORG" \
64 | --arg version "$VERSION" \
65 | --arg schemaVersion "$CONFIG_SCHEMA_VERSION" \
66 | '{schemaVersion: $schemaVersion, fdw: { name: $name, organization: $organization, version: $version} }' )
67 | echo $JSON_STRING > config.json
68 | cat config.json
69 |
70 | - name: Create Annotations JSON
71 | run: |-
72 | JSON_STRING=$( jq -n \
73 | --arg title "$IMAGE_NAME" \
74 | --arg desc "$ORG" \
75 | --arg version "$VERSION" \
76 | --arg timestamp "$(date +%FT%TZ)" \
77 | --arg vendor "Turbot HQ, Inc." \
78 | '{
79 | "$manifest": {
80 | "org.opencontainers.image.title": $title,
81 | "org.opencontainers.image.description": $desc,
82 | "org.opencontainers.image.version": $version,
83 | "org.opencontainers.image.created": $timestamp,
84 | "org.opencontainers.image.vendor": $vendor
85 | }
86 | }' )
87 | echo $JSON_STRING > annotations.json
88 | cat annotations.json
89 |
90 | # Push to GHCR
91 | - name: Push to registry
92 | run: |-
93 | REF="$CORE_REPO/$IMAGE_NAME:$VERSION"
94 | oras push $REF \
95 | --config config.json:application/vnd.turbot.steampipe.config.v1+json \
96 | --annotation-file annotations.json \
97 | steampipe_postgres_fdw.so.darwin_amd64.gz:application/vnd.turbot.steampipe.fdw.darwin-amd64.layer.v1+gzip \
98 | steampipe_postgres_fdw.so.linux_amd64.gz:application/vnd.turbot.steampipe.fdw.linux-amd64.layer.v1+gzip \
99 | steampipe_postgres_fdw.so.darwin_arm64.gz:application/vnd.turbot.steampipe.fdw.darwin-arm64.layer.v1+gzip \
100 | steampipe_postgres_fdw.so.linux_arm64.gz:application/vnd.turbot.steampipe.fdw.linux-arm64.layer.v1+gzip \
101 | steampipe_postgres_fdw.control:application/vnd.turbot.steampipe.fdw.control.layer.v1+text \
102 | steampipe_postgres_fdw--1.0.sql:application/vnd.turbot.steampipe.fdw.sql.layer.v1+text
103 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: Stale Issues and PRs
2 | on:
3 | schedule:
4 | - cron: "0 8 * * *"
5 | workflow_dispatch:
6 | inputs:
7 | dryRun:
8 | description: Set to true for a dry run
9 | required: false
10 | default: "false"
11 | type: string
12 |
13 | jobs:
14 | stale:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Stale issues and PRs
18 | id: stale-issues-and-prs
19 | uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
20 | with:
21 | close-issue-message: |
22 | This issue was closed because it has been stalled for 90 days with no activity.
23 | close-issue-reason: 'not_planned'
24 | close-pr-message: |
25 | This PR was closed because it has been stalled for 90 days with no activity.
26 | # Set days-before-close to 30 because we want to close the issue/PR after 90 days total, since days-before-stale is set to 60
27 | days-before-close: 30
28 | days-before-stale: 60
29 | debug-only: ${{ inputs.dryRun }}
30 | exempt-issue-labels: 'good first issue,help wanted'
31 | repo-token: ${{ secrets.GITHUB_TOKEN }}
32 | stale-issue-label: 'stale'
33 | stale-issue-message: |
34 | This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 30 days.
35 | stale-pr-label: 'stale'
36 | stale-pr-message: |
37 | This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 30 days.
38 | start-date: "2021-02-09"
39 | operations-per-run: 1000
40 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: FDW Acceptance Tests
2 | on:
3 | pull_request:
4 |
5 | jobs:
6 | golangci_lint:
7 | name: golangci-lint
8 | runs-on: ubuntu-22.04
9 | steps:
10 | - name: Checkout
11 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
12 |
13 | - name: Set up Go
14 | uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
15 | with:
16 | go-version: 1.22
17 |
18 | - name: golangci-lint
19 | uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # v6.5.2
20 | continue-on-error: true # we dont want to enforce just yet
21 | with:
22 | version: v1.52.2
23 | args: --timeout=15m --config=.golangci.yml
24 |
25 | build_and_test:
26 | name: Build and run tests
27 | needs: golangci_lint
28 | runs-on: ubuntu-22.04
29 | steps:
30 | - name: Set up Go
31 | uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
32 | with:
33 | go-version: 1.22
34 |
35 | - name: Checkout Steampipe
36 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
37 | with:
38 | submodules: true
39 | repository: turbot/steampipe
40 | path: steampipe
41 |
42 | - name: Fetching Go Cache Paths
43 | id: go-cache-paths
44 | run: |
45 | echo "go-build=$(go env GOCACHE)" >> $GITHUB_OUTPUT
46 | echo "go-mod=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
47 |
48 | - name: Go Build Cache
49 | id: build-cache
50 | uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
51 | with:
52 | path: ${{ steps.go-cache-paths.outputs.go-build }}
53 | key: ${{ runner.os }}-go-build-${{ hashFiles('**/go.sum') }}
54 |
55 | - name: Go Mod Cache
56 | id: mod-cache
57 | uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
58 | with:
59 | path: ${{ steps.go-cache-paths.outputs.go-mod }}
60 | key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
61 |
62 | - name: Build Steampipe
63 | run: |
64 | echo "PATH=$PATH:$HOME/build:/home/runner" >> $GITHUB_ENV
65 | ls /home/runner/work/steampipe-postgres-fdw/steampipe-postgres-fdw
66 | cd /home/runner/work/steampipe-postgres-fdw/steampipe-postgres-fdw/steampipe
67 | go get
68 | go build -o /home/runner/steampipe
69 |
70 | - name: Run steampipe
71 | run: |
72 | steampipe query "select 1 as col"
73 |
74 | - name: Checkout FDW
75 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
76 | with:
77 | repository: turbot/steampipe-postgres-fdw
78 | path: steampipe-postgres-fdw
79 |
80 | - name: Setup apt-get
81 | run: |-
82 | sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
83 | wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
84 | sudo env ACCEPT_EULA=Y apt-get update
85 | sudo env ACCEPT_EULA=Y apt-get upgrade
86 |
87 | - name: Install PostgreSQL14 Dev
88 | run: |-
89 | sudo apt-get -y install postgresql-server-dev-14
90 |
91 | - name: Find stuff and set env
92 | run: |-
93 |
94 | which pg_config
95 | pg_config --version
96 |
97 | export PATH=$(pg_config --bindir):$PATH
98 | export PGXS=$(pg_config --pgxs)
99 |
100 | export SERVER_LIB=$(pg_config --includedir)/14/server
101 | export INTERNAL_LIB=$(pg_config --includedir)/internal
102 |
103 | export CFLAGS="$(pg_config --cflags) -I${SERVER_LIB} -I${INTERNAL_LIB} -g"
104 | export PG_CFLAGS="$(pg_config --cflags) -I${SERVER_LIB} -I${INTERNAL_LIB} -g"
105 |
106 | export LDFLAGS=$(pg_config --ldflags)
107 | export PG_LDFLAGS=$(pg_config --ldflags)
108 |
109 | ls -la $SERVER_LIB
110 | ls -la $INTERNAL_LIB
111 |
112 | - name: Build FDW
113 | run: |
114 | cd ~/work/steampipe-postgres-fdw/steampipe-postgres-fdw/steampipe-postgres-fdw/
115 | make install
116 |
117 | - name: GZip
118 | run: |
119 | cd ~/work/steampipe-postgres-fdw/steampipe-postgres-fdw/steampipe-postgres-fdw
120 | tar -czvf ../build.tar.gz ./build-$(uname)
121 |
122 | - name: Upload FDW Build
123 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
124 | with:
125 | name: fdw-build
126 | path: build.tar.gz
127 |
128 | - name: Setup BATS
129 | uses: mig4/setup-bats@af9a00deb21b5d795cabfeaa8d9060410377686d # v1.2.0
130 | with:
131 | bats-version: 1.2.1
132 |
133 | - name: Install Chaos plugin from registry
134 | run: steampipe plugin install chaos
135 |
136 | - name: Go install jd
137 | run: |
138 | go install github.com/josephburnett/jd@latest
139 |
140 | - name: Run tests
141 | timeout-minutes: 6
142 | run: |
143 | cd /home/runner/work/steampipe-postgres-fdw/steampipe-postgres-fdw/steampipe
144 | chmod +x tests/acceptance/run.sh
145 | ./tests/acceptance/run.sh chaos_and_query.bats
146 |
147 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # intermediate files
2 | *.a
3 | *.o
4 | # zipped files
5 | *.gz
6 | # generated header
7 | steampipe_postgres_fdw.h
8 | # generated C imports
9 | 0_prebuild.go
10 |
11 | # Binaries for programs and plugins
12 | *.exe
13 | *.exe~
14 | *.dll
15 | *.so
16 | *.dylib
17 |
18 | # Test binary, built with `go test -c`
19 | *.test
20 |
21 | # Output of the go coverage tool, specifically when used with LiteIDE
22 | *.out
23 |
24 | # Dependency directories (remove the comment below to include it)
25 | # vendor/
26 | build-*/
27 | # intermediate files from clang
28 | *.bc
29 | # work directory created by the standalone fdw building
30 | /work
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | linters:
2 | disable-all: true
3 | enable:
4 | # default rules
5 | - errcheck
6 | - gosimple
7 | - govet
8 | - ineffassign
9 | - staticcheck
10 | - typecheck
11 | - unused
12 | # other rules
13 | - asasalint
14 | - asciicheck
15 | - bidichk
16 | - durationcheck
17 | - exportloopref
18 | - forbidigo
19 | - gocritic
20 | - gocheckcompilerdirectives
21 | - gosec
22 | - makezero
23 | - nilerr
24 | - nolintlint
25 | - reassign
26 | - sqlclosecheck
27 | - unconvert
28 |
29 | linters-settings:
30 | nolintlint:
31 | require-explanation: true
32 | require-specific: true
33 |
34 | gocritic:
35 | disabled-checks:
36 | - ifElseChain # style
37 | - singleCaseSwitch # style & it's actually not a bad idea to use single case switch in some cases
38 | - assignOp # style
39 | - commentFormatting # style
40 |
41 | run:
42 | timeout: 5m
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/steampipe-postgres-fdw.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | #####
2 | #
3 | # Dockerfile for cross-compiling for Linux on MacOS
4 | # Build the image with:
5 | # docker build --pull -f Dockerfile -t steampipe_fdw_builder:15 --build-arg="pg_version=15" .
6 | #
7 | # Run with:
8 | # docker run -it --rm --name sp_fdw_builder -v $(pwd):/tmp/ext steampipe_fdw_builder:15
9 | #
10 | #####
11 |
12 | FROM ubuntu:focal
13 |
14 | # We know that the FDW does not compile with PG12.
15 | # Use this so that the build fails if an ARG is not passed in.
16 | # This is useful since we can use the same container definition for the SQLite builder as well
17 | ARG pg_version=12
18 | ARG go_repo="deb http://ppa.launchpad.net/longsleep/golang-backports/ubuntu bionic main"
19 | ARG pg_repo="deb http://apt.postgresql.org/pub/repos/apt/ focal-pgdg main"
20 |
21 | ENV PG_VERS=$pg_version
22 | ENV GO_VERS=1.21
23 |
24 | ## for apt to be noninteractive
25 | ARG DEBIAN_FRONTEND=noninteractive
26 | ARG DEBCONF_NONINTERACTIVE_SEEN=true
27 |
28 | RUN apt-get update
29 | RUN apt-get install -y --no-install-recommends apt-transport-https
30 | RUN apt-get install -y --no-install-recommends dirmngr
31 | RUN apt-get install -y --no-install-recommends gnupg
32 | RUN apt-get install -y --no-install-recommends curl
33 | RUN apt-get install -y --no-install-recommends ca-certificates
34 |
35 | RUN mkdir -p /etc/apt/sources.list.d \
36 | && apt-key adv --keyserver keyserver.ubuntu.com --recv 56A3D45E \
37 | && apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
38 | && echo $go_repo > /etc/apt/sources.list.d/golang.list \
39 | && echo $pg_repo > /etc/apt/sources.list.d/pgdb.list \
40 | && curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
41 |
42 | RUN apt-get update
43 | RUN env DEBIAN_FRONTEND=noninteractive \
44 | apt-get install -y --no-install-recommends golang-${GO_VERS} \
45 | postgresql-${PG_VERS} postgresql-server-dev-${PG_VERS} libpq-dev wget build-essential \
46 | libgcc-7-dev \
47 | locales \
48 | tzdata \
49 | git \
50 | && rm -rf \
51 | /var/lib/apt/lists/* \
52 | /var/cache/debconf \
53 | /tmp/* \
54 | && apt-get clean
55 |
56 | RUN ln -s /usr/lib/go-${GO_VERS}/bin/go /usr/bin/go
57 | RUN locale-gen en_US.UTF-8
58 | ENV LANG en_US.UTF-8
59 | ENV LANGUAGE en_US:en
60 | ENV LC_ALL en_US.UTF-8
61 |
62 | WORKDIR /tmp/ext
63 | COPY . /tmp/ext
64 |
65 | RUN chown -R postgres:postgres /tmp/ext
66 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile
2 | default: build
3 |
4 | STEAMPIPE_INSTALL_DIR ?= ~/.steampipe
5 |
6 | PLATFORM=$(shell uname)
7 | GETTEXT_INCLUDE=$(shell dirname $(shell dirname $(shell readlink -f $(shell which gettext))))/include
8 |
9 | install: build
10 | if test -d ~/.steampipe/db/14.2.0; then \
11 | cp ./build-$(PLATFORM)/steampipe_postgres_fdw--1.0.sql $(STEAMPIPE_INSTALL_DIR)/db/14.2.0/postgres/share/postgresql/extension/; \
12 | cp ./build-$(PLATFORM)/steampipe_postgres_fdw.control $(STEAMPIPE_INSTALL_DIR)/db/14.2.0/postgres/share/postgresql/extension/; \
13 | cp ./build-$(PLATFORM)/steampipe_postgres_fdw.so $(STEAMPIPE_INSTALL_DIR)/db/14.2.0/postgres/lib/postgresql/; \
14 | fi
15 |
16 | # build standalone
17 | standalone: validate_plugin validate_version prebuild.go
18 | @echo "Building standalone FDW for plugin: $(plugin)"
19 |
20 | # Remove existing work dir and create a new directory for the render process
21 | rm -rf work && \
22 | mkdir -p work
23 |
24 | # Copy the entire source tree, excluding .git directory, into the new directory
25 | rsync -a --exclude='.git' . work/ >/dev/null 2>&1
26 |
27 | # Change to the new directory to perform operations
28 | cd work && \
29 | go run generate/generator.go templates . $(plugin) $(plugin_version) $(plugin_github_url) && \
30 | if [ ! -z "$(plugin_version)" ]; then \
31 | echo "go get $(plugin_github_url)@$(plugin_version)" && \
32 | go get $(plugin_github_url)@$(plugin_version); \
33 | fi && \
34 | go mod tidy && \
35 | $(MAKE) -C ./fdw clean && \
36 | $(MAKE) -C ./fdw go && \
37 | $(MAKE) -C ./fdw && \
38 | $(MAKE) -C ./fdw standalone
39 |
40 | # Delete existing build-${PLATFORM} and copy the binaries to the actual
41 | # build-${PLATFORM} folder
42 | rm -rf build-${PLATFORM} && \
43 | mkdir -p build-${PLATFORM} && \
44 | cp -a work/build-${PLATFORM}/* build-${PLATFORM}/
45 |
46 | # Note: The work directory will contain the full code tree with changes,
47 | # binaries will be copied to build-${PLATFORM} folder
48 |
49 | # render target
50 | render: validate_plugin validate_version prebuild.go
51 | @echo "Rendering code for plugin: $(plugin)"
52 |
53 | # Remove existing work dir and create a new directory for the render process
54 | rm -rf work && \
55 | mkdir -p work
56 |
57 | # Copy the entire source tree, excluding .git directory, into the new directory
58 | rsync -a --exclude='.git' . work/ >/dev/null 2>&1
59 |
60 | # Change to the new directory to perform operations
61 | cd work && \
62 | go run generate/generator.go templates . $(plugin) $(plugin_version) $(plugin_github_url) && \
63 | if [ ! -z "$(plugin_version)" ]; then \
64 | echo "go get $(plugin_github_url)@$(plugin_version)" && \
65 | go get $(plugin_github_url)@$(plugin_version); \
66 | fi && \
67 | go mod tidy
68 |
69 | # Note: The work directory will contain the full code tree with rendered changes
70 |
71 | # build_from_work target
72 | build_from_work:
73 | @if [ ! -d "work" ]; then \
74 | echo "Error: 'work' directory does not exist. Please run the render target first." >&2; \
75 | exit 1; \
76 | fi
77 | @echo "Building from work directory for plugin: $(plugin)"
78 |
79 | # Change to the work directory to perform build operations
80 | cd work && \
81 | $(MAKE) -C ./fdw clean && \
82 | $(MAKE) -C ./fdw go && \
83 | $(MAKE) -C ./fdw && \
84 | $(MAKE) -C ./fdw standalone
85 |
86 | # Delete existing build-${PLATFORM} and copy the binaries to the actual
87 | # build-${PLATFORM} folder
88 | rm -rf build-${PLATFORM} && \
89 | mkdir -p build-${PLATFORM} && \
90 | cp -a work/build-${PLATFORM}/* build-${PLATFORM}/
91 |
92 | # Note: This target builds from the 'work' directory and copies binaries to the build-${PLATFORM} folder
93 |
94 | validate_plugin:
95 | ifndef plugin
96 | $(error "The 'plugin' variable is missing. Usage: make build plugin= [plugin_version=] [plugin_github_url=]")
97 | endif
98 |
99 | # Check if plugin_github_url is provided when plugin_version is specified
100 | validate_version:
101 | ifdef plugin_version
102 | ifndef plugin_github_url
103 | $(error "The 'plugin_github_url' variable is required when 'plugin_version' is specified")
104 | endif
105 | endif
106 |
107 | build: prebuild.go
108 | $(MAKE) -C ./fdw clean
109 | $(MAKE) -C ./fdw go
110 | $(MAKE) -C ./fdw
111 | $(MAKE) -C ./fdw inst
112 |
113 | rm -f prebuild.go
114 |
115 | # make target to generate a go file containing the C includes containing bindings to the
116 | # postgres functions
117 | prebuild.go:
118 | # copy the template which contains the C includes
119 | # this is used to import the postgres bindings by the underlying C compiler
120 | cp prebuild.tmpl prebuild.go
121 |
122 | # set the GOOS in the template
123 | sed -i.bak 's|OS_PLACEHOLDER|$(shell go env GOOS)|' prebuild.go
124 |
125 | # replace known placeholders with values from 'pg_config'
126 | sed -i.bak 's|INTERNAL_INCLUDE_PLACEHOLDER|$(shell pg_config --includedir)|' prebuild.go
127 | sed -i.bak 's|SERVER_INCLUDE_PLACEHOLDER|$(shell pg_config --includedir-server)|' prebuild.go
128 | sed -i.bak 's|DISCLAIMER|This is generated. Do not check this in to Git|' prebuild.go
129 | sed -i.bak 's|LIB_INTL_PLACEHOLDER|$(GETTEXT_INCLUDE)|' prebuild.go
130 | rm -f prebuild.go.bak
131 |
132 | clean:
133 | $(MAKE) -C ./fdw clean
134 | rm -f prebuild.go
135 | rm -f steampipe_postgres_fdw.a
136 | rm -f steampipe_postgres_fdw.h
137 | rm -rf work
138 |
139 | # Used to build the Darwin ARM binaries and upload to the github draft release.
140 | # Usage: make release input="v1.7.2"
141 | release:
142 | ./scripts/upload_arm_asset.sh $(input)
143 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Steampipe Postgres FDW
2 |
3 | ## Overview
4 |
5 | The Steampipe Postgres Foreign Data Wrapper (FDW) is a Postgres extension that translates APIs to foreign tables. It does not directly interface with external systems, but instead relies on plugins to implement API- or provider-specific code that returns data in a standard format via gRPC. See the [Writing Plugins](https://steampipe.io/docs/develop/writing-plugins) guide to get started writing Steampipe plugins.
6 |
7 | The FDW is part of the [Steampipe project](https://github.com/turbot/steampipe). Bundled with the Steampipe CLI, it works with one or more of the [plugins](https://hub.steampipe.io/plugins) you install in Steampipe. You can also [install](https://steampipe.io/docs/steampipe_postgres/install) one or more plugin-specific extensions in your own instance of Postgres.
8 |
9 | ## Getting Started
10 |
11 | To use the FDW with Steampipe, [download Steampipe](https://steampipe.io/downloads) and use it to install one or more plugins.
12 |
13 | You can also use a standalone installer that enables you to choose a plugin and download the FDW for that plugin.
14 |
15 | **[Installation guide →](https://steampipe.io/docs/steampipe_sqlite/install)**
16 |
17 | ## Developing
18 |
19 | ### Building the FDW for Steampipe
20 |
21 | Make sure that you have the following installed in your system:
22 | 1. `Postgresql v14`
23 | 1. `go`
24 | 1. `gcc` for Linux
25 |
26 | > For instructions on how to install PostgreSQL, please visit: https://www.postgresql.org/download/
27 | >
28 | > For instruction on how to install `golang`, please visit: https://go.dev/dl/
29 |
30 | Steps:
31 | 1. Clone this repository onto your system
32 | 1. Change to the cloned directory
33 | 1. Run the following commands:
34 | ```
35 | $ make
36 | ```
37 |
38 | This will compile the FDW (`steampipe_postgres_fdw.so`) along with the `control` and `sql` file in the `build-$PLATFORM` directory. This will install the compiled FDW into the default Steampipe installation directory (`~/.steampipe`) - if it exists.
39 |
40 | ### Building the FDW as a standalone extension
41 |
42 | To build the FDW for one particular plugin, and run it as a standalone extension in any PostgreSQL database without relying on Steampipe:
43 |
44 | Make sure that you have the following installed in your system:
45 | 1. `Postgresql v14`
46 | 1. `go`
47 | 1. `gcc` for Linux
48 |
49 | Steps:
50 | 1. Clone this repository onto your system
51 | 1. Change to the cloned directory
52 | 1. Run the following commands:
53 | ```
54 | $ make standalone plugin=""
55 | ```
56 | Replace plugin alias with the alias or short name of your plugin.
57 |
58 | This command will compile the FDW specifically for the chosen plugin, and the resulting binary, control file, and SQL files will be generated.
59 |
60 | #### Example
61 |
62 | Suppose you want to build the FDW for a plugin with an alias `aws` from a GitHub repository located at https://github.com/turbot/steampipe-plugin-aws. You would run the following command:
63 | ```
64 | $ make standalone plugin="aws"
65 | ```
66 |
67 | #### To build a local plugin or an external plugin(not maintained by Turbot)
68 |
69 | Suppose you want to build the FDW for your own plugin(not maintained by Turbot) located at https://github.com/francois2metz/steampipe-plugin-scalingo. You would need to build the FDW by running the following command:
70 | ```
71 | $ make standalone plugin="scalingo" plugin_github_url="github.com/francois2metz/steampipe-plugin-scalingo"
72 | ```
73 |
74 | #### Installing the built standalone FDW
75 |
76 | Once you have built the standalone FDW, the binaries will be available in a folder `build-Darwin` or `build-Linux` depending on your OS. Run the `install.sh` script available in that directory. This will detect the installed PostrgeSQL version and location and copy the binaries there.
77 |
78 | ```
79 | ➜ steampipe-postgres-fdw ✗ cd build-Darwin
80 | ➜ build-Darwin ✗ ./install.sh
81 |
82 | Discovered:
83 | - PostgreSQL version: 14
84 | - PostgreSQL location: /opt/homebrew/Cellar/postgresql@14/14.13_1
85 |
86 | Install Steampipe PostgreSQL FDW for version 14 in /opt/homebrew/Cellar/postgresql@14/14.13_1? (Y/n):
87 |
88 | Installing...
89 |
90 | Successfully installed steampipe_postgres_scalingo extension!
91 |
92 | Files have been copied to:
93 | - Library directory: /opt/homebrew/lib/postgresql@14
94 | - Extension directory: /opt/homebrew/share/postgresql@14/extension/
95 | ```
96 |
97 | ## Open Source & Contributing
98 |
99 | This repository is published under the [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) license. Please see our [code of conduct](https://github.com/turbot/.github/blob/main/CODE_OF_CONDUCT.md). We look forward to collaborating with you!
100 |
101 | [Steampipe](https://steampipe.io) is a product produced exclusively by [Turbot HQ, Inc](https://turbot.com). It is distributed under our commercial terms. Others are allowed to make their own distribution of the software, but cannot use any of the Turbot trademarks, cloud services, etc. You can learn more in our [Open Source FAQ](https://turbot.com/open-source).
102 |
103 |
104 |
--------------------------------------------------------------------------------
/conversion_infos.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | /*
4 | #cgo linux LDFLAGS: -Wl,-unresolved-symbols=ignore-all
5 | #cgo darwin LDFLAGS: -Wl,-undefined,dynamic_lookup
6 | #include "postgres.h"
7 | #include "common.h"
8 | #include "fdw_helpers.h"
9 | */
10 | import "C"
11 |
12 | // safe wrapper for **C.ConversionInfo with array bounds checking
13 | type conversionInfos struct {
14 | numAttrs int
15 | cinfos **C.ConversionInfo
16 | }
17 |
18 | func newConversionInfos(execState *C.FdwExecState) *conversionInfos {
19 | return &conversionInfos{cinfos: execState.cinfos, numAttrs: int(execState.numattrs)}
20 | }
21 | func (c *conversionInfos) get(idx int) *C.ConversionInfo {
22 | if idx < c.numAttrs {
23 | return C.getConversionInfo(c.cinfos, C.int(idx))
24 | }
25 | return nil
26 | }
27 |
--------------------------------------------------------------------------------
/errors.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | /*
4 | #cgo linux LDFLAGS: -Wl,-unresolved-symbols=ignore-all
5 | #cgo darwin LDFLAGS: -Wl,-undefined,dynamic_lookup
6 | #include "fdw_helpers.h"
7 | */
8 | import "C"
9 | import "unsafe"
10 |
11 | func FdwError(e error) {
12 | cmsg := C.CString(e.Error())
13 | defer C.free(unsafe.Pointer(cmsg))
14 | C.fdw_errorReport(C.ERROR, C.ERRCODE_FDW_ERROR, cmsg)
15 | }
16 |
17 | func FdwErrorReport(level int, code int, msg string, hint string) {
18 | cmsg := C.CString(msg)
19 | defer func() { C.free(unsafe.Pointer(cmsg)) }()
20 | chint := C.CString(hint)
21 | defer C.free(unsafe.Pointer(chint))
22 | C.fdw_errorReportWithHint(C.ERROR, C.ERRCODE_FDW_INVALID_ATTRIBUTE_VALUE, cmsg, chint)
23 | }
24 |
--------------------------------------------------------------------------------
/exec.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | /*
4 | #include "postgres.h"
5 | #include "common.h"
6 |
7 | typedef struct GoFdwExecutionState
8 | {
9 | uint tok;
10 | } GoFdwExecutionState;
11 |
12 | static inline GoFdwExecutionState* makeState(){
13 | GoFdwExecutionState *s = (GoFdwExecutionState *) malloc(sizeof(GoFdwExecutionState));
14 | return s;
15 | }
16 |
17 | static inline void freeState(GoFdwExecutionState * s){ if (s) free(s); }
18 | */
19 | import "C"
20 |
21 | import (
22 | "sync"
23 | "unsafe"
24 |
25 | "github.com/turbot/steampipe-postgres-fdw/hub"
26 | "github.com/turbot/steampipe-postgres-fdw/types"
27 | )
28 |
29 | type ExecState struct {
30 | Rel *types.Relation
31 | Opts map[string]string
32 | Iter hub.Iterator
33 | State *C.FdwExecState
34 | }
35 |
36 | var (
37 | mu sync.RWMutex
38 | si uint64
39 | sess = make(map[uint64]*ExecState)
40 | )
41 |
42 | func SaveExecState(s *ExecState) unsafe.Pointer {
43 | mu.Lock()
44 | si++
45 | i := si
46 | sess[i] = s
47 | mu.Unlock()
48 | cs := C.makeState()
49 | cs.tok = C.uint(i)
50 | return unsafe.Pointer(cs)
51 | }
52 |
53 | func ClearExecState(p unsafe.Pointer) {
54 | if p == nil {
55 | return
56 | }
57 | cs := (*C.GoFdwExecutionState)(p)
58 | i := uint64(cs.tok)
59 | mu.Lock()
60 | delete(sess, i)
61 | mu.Unlock()
62 | C.freeState(cs)
63 | }
64 |
65 | func GetExecState(p unsafe.Pointer) *ExecState {
66 | if p == nil {
67 | return nil
68 | }
69 | cs := (*C.GoFdwExecutionState)(p)
70 | i := uint64(cs.tok)
71 | mu.RLock()
72 | s := sess[i]
73 | mu.RUnlock()
74 | return s
75 | }
76 |
--------------------------------------------------------------------------------
/explain.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | /*
4 | #cgo linux LDFLAGS: -Wl,-unresolved-symbols=ignore-all
5 | #cgo darwin LDFLAGS: -Wl,-undefined,dynamic_lookup
6 | #include "fdw_helpers.h"
7 | */
8 | import "C"
9 |
10 | // Explainable is an optional interface for Iterator that can explain it's execution plan.
11 | type Explainable interface {
12 | // Explain is called during EXPLAIN query.
13 | Explain(e Explainer)
14 | }
15 |
16 | // Explainer is an helper build an EXPLAIN response.
17 | type Explainer struct {
18 | ES *C.ExplainState
19 | }
20 |
21 | // Property adds a key-value property to results of EXPLAIN query.
22 | func (e Explainer) Property(k, v string) {
23 | C.ExplainPropertyText(C.CString(k), C.CString(v), e.ES)
24 | }
25 |
--------------------------------------------------------------------------------
/fdw/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/turbot/steampipe-postgres-fdw/53e4f3c10f2078206888b5ad2e5321b87c66d8b2/fdw/.DS_Store
--------------------------------------------------------------------------------
/fdw/Makefile:
--------------------------------------------------------------------------------
1 | # fdw/Makefile
2 |
3 | MODULE_big = steampipe_postgres_fdw
4 | OBJS = datum.o query.o fdw.o logging.o
5 |
6 | SHLIB_LINK = steampipe_postgres_fdw.a
7 |
8 | PLATFORM=$(shell uname)
9 |
10 | ifeq ($(shell uname), Darwin)
11 | PG_LDFLAGS=-framework Foundation -framework AppKit -framework Security
12 | endif
13 |
14 | EXTENSION = steampipe_postgres_fdw
15 | DATA = steampipe_postgres_fdw--1.0.sql
16 |
17 | REGRESS = steampipe_postgres-fdw
18 |
19 | EXTRA_CLEAN = steampipe_postgres_fdw.a fdw.h
20 |
21 | PG_CONFIG = pg_config
22 | PGXS := $(shell $(PG_CONFIG) --pgxs)
23 | SERVER_LIB = $(shell $(PG_CONFIG) --includedir)/server
24 | INTERNAL_LIB = $(shell $(PG_CONFIG) --includedir)/internal
25 |
26 | ifeq ($(shell uname), Darwin)
27 | SERVER_LIB = $(shell $(PG_CONFIG) --includedir)/postgresql/server
28 | INTERNAL_LIB = $(shell $(PG_CONFIG) --includedir)/postgresql/internal
29 | endif
30 |
31 | PG_CFLAGS = -I${SERVER_LIB} -I${INTERNAL_LIB} -g
32 |
33 | include $(PGXS)
34 |
35 | # Determine the operating system
36 | OS := $(shell uname)
37 |
38 | # Always enable netgo for the build
39 | BUILD_TAGS = netgo
40 |
41 | ifeq ($(shell uname -s),Darwin)
42 | export CGO_LDFLAGS = -Wl,-undefined,dynamic_lookup
43 | endif
44 |
45 | # if we are building for pg16, we need to use the pg10 build tags
46 | ifeq ($(shell $(PG_CONFIG) --version | cut -d' ' -f2 | cut -d'.' -f1), 14)
47 | BUILD_TAGS := $(BUILD_TAGS),pg14
48 | endif
49 |
50 | go: ../fdw.go
51 | @echo $(BUILD_TAGS)
52 | # we are building with the net package from go
53 | # this has the caveat that, since we are not binding to lresolv, DNS resolution may
54 | # have some subtle differences from system DNS resolution
55 | CGO_ENABLED=1 go build -v -o steampipe_postgres_fdw.a -tags "$(BUILD_TAGS)" -buildmode=c-archive ../*.go
56 |
57 | inst:
58 | mkdir -p ../build-${PLATFORM}
59 | rm -f ../build-${PLATFORM}/*
60 |
61 | cp steampipe_postgres_fdw.so ../build-${PLATFORM}
62 | cp steampipe_postgres_fdw.control ../build-${PLATFORM}
63 | cp steampipe_postgres_fdw--1.0.sql ../build-${PLATFORM}
64 |
65 | rm steampipe_postgres_fdw.so
66 | rm steampipe_postgres_fdw.a
67 | rm steampipe_postgres_fdw.h
68 |
69 | rm ./*.o
70 |
71 | standalone:
72 | mkdir -p ../build-${PLATFORM}
73 | rm -f ../build-${PLATFORM}/*
74 |
75 | cp steampipe_postgres_fdw.so ../build-${PLATFORM}/steampipe_postgres_$(plugin).so
76 | cp steampipe_postgres_fdw.control ../build-${PLATFORM}/steampipe_postgres_$(plugin).control
77 | cp steampipe_postgres_fdw--1.0.sql ../build-${PLATFORM}/steampipe_postgres_$(plugin)--1.0.sql
78 | cp ../scripts/README.md ../build-${PLATFORM}/README.md
79 | cp ../scripts/install.sh ../build-${PLATFORM}/install.sh
80 |
81 | rm steampipe_postgres_fdw.so
82 | rm steampipe_postgres_fdw.a
83 | rm steampipe_postgres_fdw.h
84 |
85 | rm ./*.o
--------------------------------------------------------------------------------
/fdw/README.md:
--------------------------------------------------------------------------------
1 | # Fdw
2 |
3 | Fdw is a Postgres Foreign Data Wrapper interface written in Go.
4 | Dynamic Foreign Tables are defined through gRPC plugins, making them
5 | safe, performant and easy to build.
6 |
7 |
8 | ## Loading the fdw server and tables - Option 1
9 |
10 | Each provider acts as a separate fdw server.
11 |
12 | ```
13 | create server
14 | fdw_aws
15 | foreign data wrapper
16 | fdw
17 | options (
18 | wrapper 'aws'
19 | );
20 | ```
21 |
22 | ```
23 | create foreign table
24 | aws_acm_certificate (
25 | arn text,
26 | domain_name text
27 | )
28 | server
29 | "fdw_aws"
30 | options (
31 | table 'aws_acm_certificate'
32 | );
33 | ```
34 |
35 |
36 | ## Loading the fdw server and tables - Option 2
37 |
38 | A single fdw server loads various providers, and allows tables from in them
39 | to be loaded specifically.
40 |
41 | ```
42 | create server
43 | fdw
44 | foreign data wrapper
45 | fdw
46 | ;
47 | ```
48 |
49 | ```
50 | create foreign table
51 | aws_acm_certificate (
52 | arn text,
53 | domain_name text
54 | )
55 | server
56 | "fdw"
57 | options (
58 | table 'aws.aws_acm_certificate'
59 | )
60 | ```
61 |
62 | ## Architecture
63 |
64 | Fdw is a postgres foreign data wrapper. It is implemented in Go,
65 | but tightly coupled with the Postgres C code.
66 |
67 | Fdw then acts as a gRPC client, allowing tables to be plugged in
68 | as extensions (servers). This interface deliberately hides the Postgres
69 | internals, making it much faster and easier to implement tables. (Heavily
70 | optimized implementations needing more access to Postgres internals should
71 | consider forking Fdw.)
72 |
73 | ```
74 | +-----------+
75 | +-->| aws_* |
76 | | +-----------+
77 | |
78 | +----------+ +-----------+ gRPC | +-----------+
79 | | Postgres |=====| Fdw |--------+-->| google_* |
80 | +----------+ +-----------+ | +-----------+
81 | |
82 | | +-----------+
83 | +-->| ... |
84 | +-----------+
85 | ```
86 |
87 |
88 | ## Phases
89 |
90 | 1. Wrapper and plugin registration
91 | 2. Schema import
92 | 3. Query planning
93 | 4. Query execution
94 |
95 |
96 |
97 | ## Related
98 |
99 | * Multicorn offers a python based FDW.
100 | * [oracle_fdw](https://github.com/laurenz/oracle_fdw) is C-based.
101 |
102 |
103 | * [Query planning in Foreign Data Wrappers](https://www.postgresql.org/docs/14/fdw-planning.html)
104 | * [Parallel safety](https://www.postgresql.org/docs/14/parallel-safety.html)
105 | * [FDW Routines for Parallel Execution](https://www.postgresql.org/docs/14/fdw-callbacks.html#FDW-CALLBACKS-PARALLEL)
106 |
--------------------------------------------------------------------------------
/fdw/common.h:
--------------------------------------------------------------------------------
1 | #include "postgres.h"
2 | #include "access/attnum.h"
3 | #include "access/relscan.h"
4 | #include "catalog/pg_foreign_server.h"
5 | #include "catalog/pg_foreign_table.h"
6 | #include "catalog/pg_type.h"
7 | #include "commands/defrem.h"
8 | #include "commands/explain.h"
9 | #include "foreign/fdwapi.h"
10 | #include "foreign/foreign.h"
11 | #include "funcapi.h"
12 | #include "lib/stringinfo.h"
13 | #include "nodes/bitmapset.h"
14 | #include "nodes/makefuncs.h"
15 | #include "nodes/pg_list.h"
16 | #include "utils/builtins.h"
17 | #include "utils/inet.h"
18 | #include "utils/jsonb.h"
19 | #include "utils/rel.h"
20 | #include "utils/syscache.h"
21 | #if PG_VERSION_NUM < 120000
22 | #include "nodes/relation.h"
23 | #endif
24 |
25 | #ifndef FDW_COMMON_H
26 | #define FDW_COMMON_H
27 |
28 | typedef struct ConversionInfo
29 | {
30 | char *attrname;
31 | FmgrInfo *attinfunc;
32 | FmgrInfo *attoutfunc;
33 | Oid atttypoid;
34 | Oid attioparam;
35 | int32 atttypmod;
36 | int attnum;
37 | bool is_array;
38 | int attndims;
39 | bool need_quote;
40 | } ConversionInfo;
41 |
42 | typedef struct FdwPathData {
43 | List *deparsed_pathkeys;
44 | bool canPushdownAllSortFields;
45 | } FdwPathData;
46 |
47 | typedef struct FdwPlanState
48 | {
49 | Oid foreigntableid;
50 | AttrNumber numattrs;
51 | int fdw_instance;
52 | List *target_list;
53 | int startupCost;
54 | ConversionInfo **cinfos;
55 | List *pathkeys; /* list of FdwDeparsedSortGroup) */
56 | /* For some reason, `baserel->reltarget->width` gets changed
57 | * outside of our control somewhere between GetForeignPaths and
58 | * GetForeignPlan, which breaks tests.
59 | *
60 | * XXX: This is very crude hack to transfer width, calculated by
61 | * getRelSize to GetForeignPlan.
62 | */
63 | // can all sort fields be pushed down?
64 | // this is tru if there are NO sort fields, or if ALL sort fields can be pushed down
65 | // this is used by goFdwBeginForeignScan to decide whether to push down the limit
66 | bool canPushdownAllSortFields;
67 | int width;
68 | // the number of rows to return (limit+offset). -1 means no limit
69 | int limit;
70 |
71 | } FdwPlanState;
72 |
73 | typedef struct FdwExecState
74 | {
75 | /* Information carried from the plan phase. */
76 | List *target_list;
77 | Datum *values;
78 | bool *nulls;
79 | int numattrs;
80 | ConversionInfo **cinfos;
81 | /* Common buffer to avoid repeated allocations */
82 | StringInfo buffer;
83 | AttrNumber rowidAttno;
84 | char *rowidAttrName;
85 | List *pathkeys; /* list of FdwDeparsedSortGroup) */
86 | // the number of rows to return (limit+offset). -1 means no limit
87 | int limit;
88 | // can all sort fields be pushed down?
89 | // this is tru if there are NO sort fields, or if ALL sort fields can be pushed down
90 | // this is used by goFdwBeginForeignScan to decide whether to push down the limit
91 | bool canPushdownAllSortFields;
92 | } FdwExecState;
93 |
94 | typedef struct FdwDeparsedSortGroup
95 | {
96 | Name attname;
97 | int attnum;
98 | bool reversed;
99 | bool nulls_first;
100 | Name collate;
101 | PathKey *key;
102 | } FdwDeparsedSortGroup;
103 |
104 | static inline FdwDeparsedSortGroup *cellGetFdwDeparsedSortGroup(ListCell *n) { return (FdwDeparsedSortGroup *)n->ptr_value; }
105 |
106 | // datum.c
107 | char *datumString(Datum datum, ConversionInfo *cinfo);
108 | int64 datumInt16(Datum datum, ConversionInfo *cinfo);
109 | int64 datumInt32(Datum datum, ConversionInfo *cinfo);
110 | int64 datumInt64(Datum datum, ConversionInfo *cinfo);
111 | inet *datumInet(Datum datum, ConversionInfo *cinfo);
112 | inet *datumCIDR(Datum datum, ConversionInfo *cinfo);
113 | double datumFloat4(Datum datum, ConversionInfo *cinfo);
114 | double datumFloat8(Datum datum, ConversionInfo *cinfo);
115 | bool datumBool(Datum datum, ConversionInfo *cinfo);
116 | Jsonb *datumJsonb(Datum datum, ConversionInfo *cinfo);
117 | Timestamp datumDate(Datum datum, ConversionInfo *cinfo);
118 | Timestamp datumTimestamp(Datum datum, ConversionInfo *cinfo);
119 |
120 | // query.c
121 | List *extractColumns(List *reltargetlist, List *restrictinfolist);
122 | void initConversioninfo(ConversionInfo **cinfo, AttInMetadata *attinmeta);
123 | #if PG_VERSION_NUM >= 150000
124 | String *colnameFromVar(Var *var, PlannerInfo *root, FdwPlanState *state);
125 | #else
126 | Value *colnameFromVar(Var *var, PlannerInfo *root, FdwPlanState *state);
127 | #endif
128 | bool computeDeparsedSortGroup(List *deparsed, FdwPlanState *planstate, List **apply_pathkeys, List **deparsed_pathkeys);
129 | List *findPaths(PlannerInfo *root, RelOptInfo *baserel, List *possiblePaths, int startupCost, FdwPlanState *state, List *apply_pathkeys, List *deparsed_pathkeys);
130 | List *deparse_sortgroup(PlannerInfo *root, Oid foreigntableid, RelOptInfo *rel);
131 | List *serializeDeparsedSortGroup(List *pathkeys);
132 | List *deserializeDeparsedSortGroup(List *items);
133 | OpExpr *canonicalOpExpr(OpExpr *opExpr, Relids base_relids);
134 | ScalarArrayOpExpr *canonicalScalarArrayOpExpr(ScalarArrayOpExpr *opExpr, Relids base_relids);
135 | char *getOperatorString(Oid opoid);
136 | #endif // FDW_COMMON_H
--------------------------------------------------------------------------------
/fdw/datum.c:
--------------------------------------------------------------------------------
1 |
2 | #include "postgres.h"
3 | #include "common.h"
4 | #include "utils/inet.h"
5 | #include "utils/timestamp.h"
6 |
7 | char *datumString(Datum datum, ConversionInfo *cinfo) {
8 | if (datum == 0) {
9 | return "?";
10 | }
11 | return TextDatumGetCString(datum);
12 | }
13 |
14 | inet *datumInet(Datum datum, ConversionInfo *cinfo) {
15 | if (datum == 0) {
16 | return (inet *)0;
17 | }
18 | return DatumGetInetPP(datum);
19 | }
20 |
21 |
22 | int64 datumInt16(Datum datum, ConversionInfo *cinfo) {
23 | return DatumGetInt16(datum);
24 | }
25 |
26 | int64 datumInt32(Datum datum, ConversionInfo *cinfo) {
27 | return DatumGetInt32(datum);
28 | }
29 |
30 | int64 datumInt64(Datum datum, ConversionInfo *cinfo) {
31 | return DatumGetInt64(datum);
32 | }
33 |
34 | double datumFloat4(Datum datum, ConversionInfo *cinfo) {
35 | return DatumGetFloat4(datum);
36 | }
37 |
38 | double datumFloat8(Datum datum, ConversionInfo *cinfo) {
39 | return DatumGetFloat8(datum);
40 | }
41 |
42 | bool datumBool(Datum datum, ConversionInfo *cinfo) {
43 | return DatumGetBool(datum);
44 | }
45 |
46 | Jsonb * datumJsonb(Datum datum, ConversionInfo *cinfo) {
47 | return DatumGetJsonbP(datum);
48 | }
49 |
50 | Timestamp datumDate(Datum datum, ConversionInfo *cinfo) {
51 | datum = DirectFunctionCall1(date_timestamp, datum);
52 | return DatumGetInt64(datum);
53 | }
54 |
55 | Timestamp datumTimestamp(Datum datum, ConversionInfo *cinfo) {
56 | return DatumGetTimestamp(datum);
57 | }
--------------------------------------------------------------------------------
/fdw/fdw_handlers.h:
--------------------------------------------------------------------------------
1 | // Generated by cgo from fdw.go. Included here so our functions are
2 | // defined and available.
3 | #include "fmgr.h"
4 |
5 | static bool fdwIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte);
6 | static void fdwGetForeignRelSize(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid);
7 | static void fdwGetForeignPaths(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid);
8 | static ForeignScan *fdwGetForeignPlan(
9 | PlannerInfo *root,
10 | RelOptInfo *baserel,
11 | Oid foreigntableid,
12 | ForeignPath *best_path,
13 | List *tlist,
14 | List *scan_clauses,
15 | Plan *outer_plan
16 | );
17 |
18 | // Define our handling functions with Postgres, following the V1 protocol.
19 | PG_FUNCTION_INFO_V1(fdw_handler);
20 | PG_FUNCTION_INFO_V1(fdw_validator);
21 |
22 |
23 | Datum fdw_handler(PG_FUNCTION_ARGS) {
24 | FdwRoutine *fdw_routine = makeNode(FdwRoutine);
25 | fdw_routine->IsForeignScanParallelSafe = fdwIsForeignScanParallelSafe;
26 | fdw_routine->GetForeignRelSize = fdwGetForeignRelSize;
27 | fdw_routine->GetForeignPaths = fdwGetForeignPaths;
28 | fdw_routine->GetForeignPlan = fdwGetForeignPlan;
29 | fdw_routine->ExplainForeignScan = goFdwExplainForeignScan;
30 | fdw_routine->BeginForeignScan = goFdwBeginForeignScan;
31 | fdw_routine->IterateForeignScan = goFdwIterateForeignScan;
32 | fdw_routine->ReScanForeignScan = goFdwReScanForeignScan;
33 | fdw_routine->EndForeignScan = goFdwEndForeignScan;
34 | fdw_routine->ImportForeignSchema = goFdwImportForeignSchema;
35 | fdw_routine->ExecForeignInsert = goFdwExecForeignInsert;
36 |
37 | PG_RETURN_POINTER(fdw_routine);
38 | }
39 |
40 | // TODO - Use this to validate the arguments passed to the FDW
41 | // https://github.com/laurenz/oracle_fdw/blob/9d7b5c331b0c8851c71f410f77b41c1a83c89ece/oracle_fdw.c#L420
42 | Datum fdw_validator(PG_FUNCTION_ARGS) {
43 | Oid catalog = PG_GETARG_OID(1);
44 | List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
45 | goFdwValidate(catalog, options_list);
46 | PG_RETURN_VOID();
47 | }
--------------------------------------------------------------------------------
/fdw/fdw_helpers.h:
--------------------------------------------------------------------------------
1 |
2 | #include "postgres.h"
3 | #include "common.h"
4 | #include "access/reloptions.h"
5 | #include "catalog/pg_foreign_server.h"
6 | #include "catalog/pg_foreign_table.h"
7 | #include "commands/defrem.h"
8 | #include "foreign/fdwapi.h"
9 | #include "foreign/foreign.h"
10 | #include "funcapi.h"
11 | #include "nodes/extensible.h"
12 | #include "nodes/pg_list.h"
13 | #include "optimizer/optimizer.h"
14 | #include "optimizer/pathnode.h"
15 | #include "optimizer/planmain.h"
16 | #include "optimizer/restrictinfo.h"
17 | #include "storage/ipc.h"
18 | #include "utils/inet.h"
19 | #include "utils/rel.h"
20 | #include "utils/syscache.h"
21 | #include "utils/lsyscache.h"
22 | #include "netinet/in.h"
23 |
24 | extern char **environ;
25 |
26 | // Macro expansions
27 | static inline FormData_pg_attribute *fdw_tupleDescAttr(TupleDesc tupdesc, int i) { return TupleDescAttr(tupdesc, i); }
28 | static inline TupleDescData *fdw_relationGetDescr(Relation relation) { return RelationGetDescr(relation); }
29 | static inline Oid fdw_relationGetNamespace(Relation relation) { return RelationGetNamespace(relation); }
30 |
31 | static inline void fdw_errorReport(int level, int code, char *msg) { ereport(level, (errcode(code), errmsg("%s", msg))); }
32 | static inline void fdw_errorReportWithHint(int level, int code, char *msg, char *hint) { ereport(level, (errcode(code), errmsg("%s", msg), errhint("%s", hint))); }
33 |
34 | static inline HeapTuple fdw_searchSysCache1Oid(Datum key1) { return SearchSysCache1(TYPEOID, key1); }
35 | static inline HeapTuple fdw_searchSysCache1(Oid id, Datum key1) { return SearchSysCache1(id, key1); }
36 | static inline Datum fdw_objectIdGetDatum(Oid id) { return ObjectIdGetDatum(id); }
37 | static inline bool fdw_heapTupleIsValid(HeapTuple tuple) { return HeapTupleIsValid(tuple); }
38 | static inline void *fdw_getStruct(HeapTuple tuple) { return GETSTRUCT(tuple); }
39 |
40 | static inline NodeTag fdw_nodeTag(Expr *node) { return nodeTag(node); }
41 |
42 | #if PG_VERSION_NUM >= 160000
43 | static inline Datum fdw_cStringGetDatum(const char *str) { PG_RETURN_DATUM(CStringGetTextDatum((char *)str)); }
44 | static inline Datum fdw_jsonbGetDatum(const char *str) { PG_RETURN_JSONB_P((char *)DirectFunctionCall1(jsonb_in, CStringGetDatum(str))); }
45 | #else
46 | static inline Datum fdw_cStringGetDatum(const char *str) { PG_RETURN_TEXT_P(CStringGetTextDatum(str)); }
47 | static inline Datum fdw_jsonbGetDatum(const char *str) { PG_RETURN_JSONB_P(DirectFunctionCall1(jsonb_in, CStringGetDatum(str))); }
48 | #endif
49 |
50 | static inline Datum fdw_boolGetDatum(bool b) { PG_RETURN_BOOL(b); }
51 | static inline Datum fdw_numericGetDatum(int64_t num) { PG_RETURN_INT64(Int64GetDatum(num)); }
52 | static inline Datum fdw_floatGetDatum(double num) { PG_RETURN_FLOAT8(Float8GetDatum(num)); }
53 | static inline Datum fdw_pointerGetDatum(void *num) { PG_RETURN_DATUM(PointerGetDatum(num)); }
54 |
55 | static inline void fdw_saveTuple(Datum *data, bool *isnull, ScanState *state)
56 | {
57 | HeapTuple tuple = heap_form_tuple(state->ss_currentRelation->rd_att, data, isnull);
58 | ExecStoreHeapTuple(tuple, state->ss_ScanTupleSlot, false);
59 | }
60 | static inline ArrayType *fdw_datumGetArrayTypeP(Datum datum) { return ((ArrayType *)PG_DETOAST_DATUM(datum)); }
61 | static inline char *fdw_datumGetString(Datum datum) { return text_to_cstring((text *)DatumGetPointer(datum)); }
62 |
63 | // Helpers
64 | List *extractColumns(List *reltargetlist, List *restrictinfolist);
65 | FdwExecState *initializeExecState(void *internalstate);
66 |
67 | static inline ConversionInfo *getConversionInfo(ConversionInfo **cinfos, int i) { return cinfos[i]; }
68 | #if PG_VERSION_NUM >= 150000
69 | static inline char *valueString(String *v) { return strVal(v); }
70 | #else
71 | static inline char *valueString(Value *v) { return (((Value *)(v))->val.str); }
72 | #endif
73 |
74 | #if PG_VERSION_NUM >= 160000
75 | static inline void fdw_appendBinaryStringInfo(StringInfo str, const char *data, int datalen) { appendBinaryStringInfo(str,(void *)data,datalen); }
76 | #else
77 | static inline void fdw_appendBinaryStringInfo(StringInfo str, const char *data, int datalen) { appendBinaryStringInfo(str,data,datalen); }
78 | #endif
79 |
80 | static inline char **incStringPointer(char **ptr) { return ++ptr; }
81 | static inline unsigned char *incUcharPointer(unsigned char *ptr) { return ++ptr; }
82 | static inline unsigned char *ipAddr(inet *i) { return ip_addr(i); }
83 | static inline unsigned char netmaskBits(inet *i) { return ip_bits(i); }
84 | static inline bool isIpV6(inet *i) { return ip_family(i) == PGSQL_AF_INET6; }
85 |
86 | // Loop helpers
87 | static inline RangeVar *cellGetRangeVar(ListCell *n) { return (RangeVar *)n->ptr_value; }
88 | static inline DefElem *cellGetDef(ListCell *n) { return (DefElem *)n->ptr_value; }
89 | static inline Expr *cellGetExpr(ListCell *n) { return (Expr *)n->ptr_value; }
90 | static inline Node *cellGetNode(ListCell *n) { return (Node *)n->ptr_value; }
91 | #if PG_VERSION_NUM >= 150000
92 | static inline String *cellGetString(ListCell *n) { return (String *)n->ptr_value; }
93 | #else
94 | static inline Value *cellGetString(ListCell *n) { return (Value *)n->ptr_value; }
95 | #endif
96 | static inline Var *cellGetVar(ListCell *n) { return (Var *)n->ptr_value; }
97 | static inline OpExpr *cellGetOpExpr(ListCell *n) { return (OpExpr *)n->ptr_value; }
98 | static inline ScalarArrayOpExpr *cellGetScalarArrayOpExpr(ListCell *n) { return (ScalarArrayOpExpr *)n->ptr_value; }
99 | static inline NullTest *cellGetNullTest(ListCell *n) { return (NullTest *)n->ptr_value; }
100 | static inline BooleanTest *cellGetBooleanTest(ListCell *n) { return (BooleanTest *)n->ptr_value; }
101 | static inline BoolExpr *cellGetBoolExpr(ListCell *n) { return (BoolExpr *)n->ptr_value; }
102 |
103 | static inline RestrictInfo *cellGetRestrictInfo(ListCell *n) { return (RestrictInfo *)n->ptr_value; }
104 | static inline char *nameStr(Name n) { return NameStr(*n); }
105 |
106 |
107 | // logging
108 | char *tagTypeToString(NodeTag type);
--------------------------------------------------------------------------------
/fdw/logging.c:
--------------------------------------------------------------------------------
1 |
2 | #include "common.h"
3 | #include "fdw_helpers.h"
4 |
5 | // convert NodeTag to string. At the moment it only handles primitive types, bu twoul dbe easy to add all if neeeded
6 | char* tagTypeToString(NodeTag type)
7 | {
8 | char *tagNames[] ={
9 | "T_Alias",
10 | "T_RangeVar",
11 | "T_TableFunc",
12 | "T_Expr",
13 | "T_Var",
14 | "T_Const",
15 | "T_Param",
16 | "T_Aggref",
17 | "T_GroupingFunc",
18 | "T_WindowFunc",
19 | "T_SubscriptingRef",
20 | "T_FuncExpr",
21 | "T_NamedArgExpr",
22 | "T_OpExpr",
23 | "T_DistinctExpr",
24 | "T_NullIfExpr",
25 | "T_ScalarArrayOpExpr",
26 | "T_BoolExpr",
27 | "T_SubLink",
28 | "T_SubPlan",
29 | "T_AlternativeSubPlan",
30 | "T_FieldSelect",
31 | "T_FieldStore",
32 | "T_RelabelType",
33 | "T_CoerceViaIO",
34 | "T_ArrayCoerceExpr",
35 | "T_ConvertRowtypeExpr",
36 | "T_CollateExpr",
37 | "T_CaseExpr",
38 | "T_CaseWhen",
39 | "T_CaseTestExpr",
40 | "T_ArrayExpr",
41 | "T_RowExpr",
42 | "T_RowCompareExpr",
43 | "T_CoalesceExpr",
44 | "T_MinMaxExpr",
45 | "T_SQLValueFunction",
46 | "T_XmlExpr",
47 | "T_NullTest",
48 | "T_BooleanTest",
49 | "T_CoerceToDomain",
50 | "T_CoerceToDomainValue",
51 | "T_SetToDefault",
52 | "T_CurrentOfExpr",
53 | "T_NextValueExpr",
54 | "T_InferenceElem",
55 | "T_TargetEntry",
56 | "T_RangeTblRef",
57 | "T_JoinExpr",
58 | "T_FromExpr",
59 | "T_OnConflictExpr",
60 | "T_IntoClause"
61 | };
62 | int idx = (int)type - (int)T_Alias;
63 | if (idx < sizeof(tagNames) / sizeof(tagNames[0])){
64 | return tagNames[idx];
65 | }
66 | return "";
67 |
68 | }
--------------------------------------------------------------------------------
/fdw/steampipe_postgres_fdw--1.0.sql:
--------------------------------------------------------------------------------
1 | /* fdw-c/steampipe_postgres_fdw--1.0.sql */
2 |
3 | -- complain if script is sourced in psql, rather than via CREATE EXTENSION
4 | \echo Use "CREATE EXTENSION fdw" to load this extension. \quit
5 |
6 | -- CREATE FUNCTION fdw_handler()
7 | CREATE FUNCTION fdw_handler()
8 | RETURNS fdw_handler
9 | AS 'MODULE_PATHNAME'
10 | LANGUAGE C STRICT;
11 |
12 | CREATE FUNCTION fdw_validator(text[], oid)
13 | RETURNS void
14 | AS 'MODULE_PATHNAME'
15 | LANGUAGE C STRICT;
16 |
17 | CREATE FOREIGN DATA WRAPPER steampipe_postgres_fdw
18 | HANDLER fdw_handler
19 | VALIDATOR fdw_validator;
20 |
--------------------------------------------------------------------------------
/fdw/steampipe_postgres_fdw.control:
--------------------------------------------------------------------------------
1 | # fdw extension
2 | comment = 'Steampipe Foreign Data Wrapper'
3 | default_version = '1.0'
4 | module_pathname = '$libdir/steampipe_postgres_fdw.so'
5 | relocatable = true
6 |
--------------------------------------------------------------------------------
/generate/generator.go:
--------------------------------------------------------------------------------
1 | //go:build tool
2 |
3 | package main
4 |
5 | import (
6 | "fmt"
7 | "log"
8 | "os"
9 | "os/exec"
10 | "path"
11 | "path/filepath"
12 | "strings"
13 | "text/template"
14 | )
15 |
16 | const templateExt = ".tmpl"
17 |
18 | func RenderDir(templatePath, root, plugin, pluginGithubUrl, pluginVersion, pgVersion string) {
19 | var targetFilePath string
20 | err := filepath.Walk(templatePath, func(filePath string, info os.FileInfo, err error) error {
21 | if err != nil {
22 | fmt.Printf("Error accessing path %s: %v\n", filePath, err)
23 | return nil
24 | }
25 |
26 | fmt.Println("filePath:", filePath)
27 | if info.IsDir() {
28 | fmt.Println("not a file, continuing...\n")
29 | return nil
30 | }
31 |
32 | relativeFilePath := strings.TrimPrefix(filePath, root)
33 | // fmt.Println("relative path:", relativeFilePath)
34 | ext := filepath.Ext(filePath)
35 | // fmt.Println("extension:", ext)
36 |
37 | if ext != templateExt {
38 | fmt.Println("not tmpl, continuing...\n")
39 | return nil
40 | }
41 |
42 | templateFileName := strings.TrimPrefix(relativeFilePath, "/templates/")
43 | // fmt.Println("template fileName:", templateFileName)
44 | fileName := strings.TrimSuffix(templateFileName, ext)
45 | // fmt.Println("actual fileName:", fileName)
46 |
47 | targetFilePath = path.Join(root, fileName)
48 | // fmt.Println("targetFilePath:", targetFilePath)
49 |
50 | // read template file
51 | templateContent, err := os.ReadFile(filePath)
52 | if err != nil {
53 | fmt.Printf("Error reading template file: %v\n", err)
54 | return err
55 | }
56 |
57 | // create a new template and parse the content
58 | tmpl := template.Must(template.New(targetFilePath).Parse(string(templateContent)))
59 |
60 | // create a buffer to render the template
61 | var renderedContent strings.Builder
62 |
63 | // define the data to be used in the template
64 | data := struct {
65 | Plugin string
66 | PluginGithubUrl string
67 | PluginVersion string
68 | PgVersion string
69 | }{
70 | plugin,
71 | pluginGithubUrl,
72 | pluginVersion,
73 | pgVersion,
74 | }
75 |
76 | // execute the template with the data
77 | if err := tmpl.Execute(&renderedContent, data); err != nil {
78 | fmt.Printf("Error rendering template: %v\n", err)
79 | return err
80 | }
81 |
82 | // write the rendered content to the target file
83 | if err := os.WriteFile(targetFilePath, []byte(renderedContent.String()), 0644); err != nil {
84 | fmt.Printf("Error writing to target file: %v\n", err)
85 | return err
86 | }
87 |
88 | return nil
89 | })
90 |
91 | if err != nil {
92 | fmt.Println(err)
93 | return
94 | }
95 | }
96 |
97 | func main() {
98 | // Check if the correct number of command-line arguments are provided
99 | if len(os.Args) < 4 {
100 | fmt.Println("Usage: go run generator.go [plugin_version] [pluginGithubUrl]")
101 | return
102 | }
103 |
104 | templatePath := os.Args[1]
105 | root := os.Args[2]
106 | plugin := os.Args[3]
107 | var pluginVersion string
108 | var pluginGithubUrl string
109 |
110 | // Check if pluginVersion is provided as a command-line argument
111 | if len(os.Args) >= 5 {
112 | pluginVersion = os.Args[4]
113 | }
114 |
115 | // Check if PluginGithubUrl is provided as a command-line argument
116 | if len(os.Args) >= 6 {
117 | pluginGithubUrl = os.Args[5]
118 | } else {
119 | // If PluginGithubUrl is not provided, generate it based on PluginAlias
120 | pluginGithubUrl = "github.com/turbot/steampipe-plugin-" + plugin
121 | }
122 |
123 | // If pluginVersion is provided but pluginGithubUrl is not, error out
124 | if pluginVersion != "" && pluginGithubUrl == "" {
125 | fmt.Println("Error: plugin_github_url is required when plugin_version is specified")
126 | return
127 | }
128 |
129 | // Convert relative paths to absolute paths
130 | absTemplatePath, err := filepath.Abs(templatePath)
131 | if err != nil {
132 | fmt.Printf("Error converting templatePath to absolute path: %v\n", err)
133 | return
134 | }
135 |
136 | absRoot, err := filepath.Abs(root)
137 | if err != nil {
138 | fmt.Printf("Error converting root to absolute path: %v\n", err)
139 | return
140 | }
141 |
142 | // get the postgres version used
143 | pgVersion := getPostgreSQLVersion()
144 |
145 | RenderDir(absTemplatePath, absRoot, plugin, pluginGithubUrl, pluginVersion, pgVersion)
146 | }
147 |
148 | func getPostgreSQLVersion() string {
149 | cmd := exec.Command("pg_config", "--version")
150 | out, err := cmd.Output()
151 | if err != nil {
152 | log.Fatalf("Failed to execute pg_config command: %s", err)
153 | }
154 | return string(out)
155 | }
156 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/turbot/steampipe-postgres-fdw
2 |
3 | go 1.23.1
4 |
5 | toolchain go1.23.3
6 |
7 | require (
8 | github.com/dgraph-io/ristretto v0.2.0 // indirect
9 | github.com/golang/protobuf v1.5.4
10 | github.com/hashicorp/go-hclog v1.6.3
11 | github.com/hashicorp/go-version v1.7.0 // indirect
12 | github.com/turbot/go-kit v1.0.0
13 | github.com/turbot/steampipe v1.7.0-rc.0.0.20250210104953-c81e20d80731
14 | github.com/turbot/steampipe-plugin-sdk/v5 v5.11.3
15 | go.opentelemetry.io/otel v1.26.0
16 | google.golang.org/protobuf v1.35.2
17 | )
18 |
19 | require (
20 | github.com/Masterminds/semver/v3 v3.2.1
21 | github.com/turbot/pipe-fittings/v2 v2.1.1
22 | go.opentelemetry.io/otel/metric v1.26.0
23 | )
24 |
25 | require (
26 | cloud.google.com/go v0.112.1 // indirect
27 | cloud.google.com/go/compute/metadata v0.3.0 // indirect
28 | cloud.google.com/go/iam v1.1.6 // indirect
29 | cloud.google.com/go/storage v1.38.0 // indirect
30 | github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
31 | github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
32 | github.com/aws/aws-sdk-go v1.44.189 // indirect
33 | github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect
34 | github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect
35 | github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect
36 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect
37 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
38 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
39 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
40 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
41 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
42 | github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect
43 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect
44 | github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect
45 | github.com/aws/smithy-go v1.20.2 // indirect
46 | github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
47 | github.com/briandowns/spinner v1.23.0 // indirect
48 | github.com/containerd/errdefs v0.3.0 // indirect
49 | github.com/containerd/log v0.1.0 // indirect
50 | github.com/containerd/platforms v0.2.1 // indirect
51 | github.com/cyphar/filepath-securejoin v0.2.5 // indirect
52 | github.com/eko/gocache/lib/v4 v4.1.6 // indirect
53 | github.com/eko/gocache/store/bigcache/v4 v4.2.1 // indirect
54 | github.com/eko/gocache/store/ristretto/v4 v4.2.1 // indirect
55 | github.com/felixge/httpsnoop v1.0.4 // indirect
56 | github.com/gabriel-vasile/mimetype v1.4.3 // indirect
57 | github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
58 | github.com/go-git/go-billy/v5 v5.6.0 // indirect
59 | github.com/go-git/go-git/v5 v5.13.0 // indirect
60 | github.com/go-playground/locales v0.14.1 // indirect
61 | github.com/go-playground/universal-translator v0.18.1 // indirect
62 | github.com/go-playground/validator/v10 v10.20.0 // indirect
63 | github.com/goccy/go-yaml v1.11.2 // indirect
64 | github.com/golang/mock v1.6.0 // indirect
65 | github.com/google/s2a-go v0.1.7 // indirect
66 | github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
67 | github.com/googleapis/gax-go/v2 v2.12.3 // indirect
68 | github.com/hashicorp/go-getter v1.7.5 // indirect
69 | github.com/hashicorp/go-safetemp v1.0.0 // indirect
70 | github.com/hashicorp/terraform-registry-address v0.2.1 // indirect
71 | github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f // indirect
72 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
73 | github.com/jackc/pgx/v5 v5.7.1 // indirect
74 | github.com/jackc/puddle/v2 v2.2.2 // indirect
75 | github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
76 | github.com/jmespath/go-jmespath v0.4.0 // indirect
77 | github.com/karrick/gows v0.3.0 // indirect
78 | github.com/klauspost/compress v1.17.2 // indirect
79 | github.com/leodido/go-urn v1.4.0 // indirect
80 | github.com/moby/locker v1.0.1 // indirect
81 | github.com/pjbgf/sha1cd v0.3.0 // indirect
82 | github.com/rs/xid v1.5.0 // indirect
83 | github.com/sagikazarmark/locafero v0.4.0 // indirect
84 | github.com/sagikazarmark/slog-shim v0.1.0 // indirect
85 | github.com/sourcegraph/conc v0.3.0 // indirect
86 | github.com/spf13/cobra v1.8.1 // indirect
87 | github.com/thediveo/enumflag/v2 v2.0.5 // indirect
88 | github.com/turbot/pipes-sdk-go v0.12.0 // indirect
89 | github.com/turbot/steampipe-plugin-code v1.0.1-alpha.1 // indirect
90 | github.com/turbot/terraform-components v0.0.0-20231213122222-1f3526cab7a7 // indirect
91 | github.com/ulikunitz/xz v0.5.10 // indirect
92 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
93 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
94 | go.uber.org/atomic v1.9.0 // indirect
95 | go.uber.org/multierr v1.9.0 // indirect
96 | golang.org/x/mod v0.19.0 // indirect
97 | golang.org/x/term v0.30.0 // indirect
98 | golang.org/x/time v0.5.0 // indirect
99 | golang.org/x/tools v0.23.0 // indirect
100 | golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
101 | google.golang.org/api v0.171.0 // indirect
102 | google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
103 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
104 | gopkg.in/warnings.v0 v0.1.2 // indirect
105 | oras.land/oras-go/v2 v2.5.0 // indirect
106 | sigs.k8s.io/yaml v1.4.0 // indirect
107 | )
108 |
109 | require (
110 | github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
111 | github.com/agext/levenshtein v1.2.3 // indirect
112 | github.com/allegro/bigcache/v3 v3.1.0 // indirect
113 | github.com/apparentlymart/go-cidr v1.1.0 // indirect
114 | github.com/beorn7/perks v1.0.1 // indirect
115 | github.com/bgentry/speakeasy v0.2.0 // indirect
116 | github.com/bmatcuk/doublestar v1.3.4 // indirect
117 | github.com/btubbs/datetime v0.1.1 // indirect
118 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect
119 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
120 | github.com/containerd/containerd v1.7.27 // indirect
121 | github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 // indirect
122 | github.com/dustin/go-humanize v1.0.1 // indirect
123 | github.com/fatih/color v1.17.0 // indirect
124 | github.com/fsnotify/fsnotify v1.7.0 // indirect
125 | github.com/gertd/go-pluralize v0.2.1
126 | github.com/ghodss/yaml v1.0.0 // indirect
127 | github.com/go-logr/logr v1.4.2 // indirect
128 | github.com/go-logr/stdr v1.2.2 // indirect
129 | github.com/go-ole/go-ole v1.2.6 // indirect
130 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
131 | github.com/google/go-cmp v0.6.0 // indirect
132 | github.com/google/uuid v1.6.0 // indirect
133 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
134 | github.com/hashicorp/errwrap v1.1.0 // indirect
135 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
136 | github.com/hashicorp/go-multierror v1.1.1 // indirect
137 | github.com/hashicorp/go-plugin v1.6.1 // indirect
138 | github.com/hashicorp/go-uuid v1.0.3 // indirect
139 | github.com/hashicorp/hcl v1.0.0 // indirect
140 | github.com/hashicorp/hcl/v2 v2.22.0 // indirect
141 | github.com/hashicorp/terraform-svchost v0.1.1 // indirect
142 | github.com/hashicorp/yamux v0.1.1 // indirect
143 | github.com/iancoleman/strcase v0.3.0 // indirect
144 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect
145 | github.com/jackc/pgconn v1.14.3 // indirect
146 | github.com/jackc/pgio v1.0.0 // indirect
147 | github.com/jackc/pgpassfile v1.0.0 // indirect
148 | github.com/jackc/pgproto3/v2 v2.3.3 // indirect
149 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
150 | github.com/logrusorgru/aurora v2.0.3+incompatible // indirect
151 | github.com/magiconair/properties v1.8.7 // indirect
152 | github.com/mattn/go-colorable v0.1.13 // indirect
153 | github.com/mattn/go-isatty v0.0.20 // indirect
154 | github.com/mattn/go-runewidth v0.0.15 // indirect
155 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
156 | github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
157 | github.com/mitchellh/go-homedir v1.1.0 // indirect
158 | github.com/mitchellh/go-testing-interface v1.14.1 // indirect
159 | github.com/mitchellh/go-wordwrap v1.0.0 // indirect
160 | github.com/mitchellh/mapstructure v1.5.0 // indirect
161 | github.com/oklog/run v1.0.0 // indirect
162 | github.com/olekukonko/tablewriter v0.0.5 // indirect
163 | github.com/opencontainers/go-digest v1.0.0 // indirect
164 | github.com/opencontainers/image-spec v1.1.0 // indirect
165 | github.com/pelletier/go-toml/v2 v2.2.2 // indirect
166 | github.com/pkg/errors v0.9.1 // indirect
167 | github.com/prometheus/client_golang v1.16.0 // indirect
168 | github.com/prometheus/client_model v0.3.0 // indirect
169 | github.com/prometheus/common v0.42.0 // indirect
170 | github.com/prometheus/procfs v0.10.1 // indirect
171 | github.com/rivo/uniseg v0.2.0 // indirect
172 | github.com/sethvargo/go-retry v0.3.0 // indirect
173 | github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
174 | github.com/shirou/gopsutil v3.21.11+incompatible // indirect
175 | github.com/sirupsen/logrus v1.9.3 // indirect
176 | github.com/spf13/afero v1.11.0 // indirect
177 | github.com/spf13/cast v1.6.0 // indirect
178 | github.com/spf13/pflag v1.0.5 // indirect
179 | github.com/spf13/viper v1.19.0 // indirect
180 | github.com/stevenle/topsort v0.2.0 // indirect
181 | github.com/subosito/gotenv v1.6.0 // indirect
182 | github.com/tklauser/go-sysconf v0.3.9 // indirect
183 | github.com/tklauser/numcpus v0.3.0 // indirect
184 | github.com/tkrajina/go-reflector v0.5.6 // indirect
185 | github.com/xlab/treeprint v1.2.0 // indirect
186 | github.com/yusufpapurcu/wmi v1.2.2 // indirect
187 | github.com/zclconf/go-cty v1.14.4 // indirect
188 | github.com/zclconf/go-cty-yaml v1.0.3 // indirect
189 | go.opencensus.io v0.24.0 // indirect
190 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0 // indirect
191 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect
192 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 // indirect
193 | go.opentelemetry.io/otel/sdk v1.26.0 // indirect
194 | go.opentelemetry.io/otel/sdk/metric v1.26.0 // indirect
195 | go.opentelemetry.io/otel/trace v1.26.0 // indirect
196 | go.opentelemetry.io/proto/otlp v1.2.0 // indirect
197 | golang.org/x/crypto v0.36.0 // indirect
198 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
199 | golang.org/x/net v0.38.0 // indirect
200 | golang.org/x/oauth2 v0.21.0 // indirect
201 | golang.org/x/sync v0.12.0 // indirect
202 | golang.org/x/sys v0.31.0 // indirect
203 | golang.org/x/text v0.23.0 // indirect
204 | google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
205 | google.golang.org/grpc v1.66.0 // indirect
206 | gopkg.in/ini.v1 v1.67.0 // indirect
207 | gopkg.in/yaml.v2 v2.4.0 // indirect
208 | gopkg.in/yaml.v3 v3.0.1 // indirect
209 | )
210 |
211 | replace (
212 | github.com/chronark/vercel-go => github.com/judell/vercel-go v0.1.4
213 | github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible
214 | github.com/duosecurity/duo_api_golang => github.com/e-gineer/duo_api_golang v0.0.0-20220501141413-213eea3b2b7b
215 | github.com/hashicorp/consul => github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089
216 | github.com/m-mizutani/urlscan-go => github.com/e-gineer/urlscan-go v1.0.1-0.20210701205010-6cf8288d0d10
217 | github.com/mattn/go-mastodon => github.com/turbot/go-mastodon v0.0.1
218 | github.com/piquette/edgr => github.com/e-gineer/edgr v0.0.2-0.20210901021602-7664639af765
219 | github.com/vartanbeno/go-reddit/v2 => github.com/abhiturbot/go-reddit/v2 v2.0.0-20220917030010-f0fe7d8ac15c
220 | oras.land/oras-go => oras.land/oras-go v1.1.0
221 | )
222 |
--------------------------------------------------------------------------------
/helpers.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | /*
4 | #cgo linux LDFLAGS: -Wl,-unresolved-symbols=ignore-all
5 | #cgo darwin LDFLAGS: -Wl,-undefined,dynamic_lookup
6 | #include "fdw_helpers.h"
7 | */
8 | import "C"
9 |
10 | import (
11 | "bytes"
12 | "encoding/json"
13 | "fmt"
14 | "log"
15 | "os"
16 | "regexp"
17 | "strings"
18 | "time"
19 | "unsafe"
20 |
21 | "github.com/golang/protobuf/ptypes"
22 | "github.com/golang/protobuf/ptypes/timestamp"
23 | "github.com/turbot/go-kit/helpers"
24 | typeHelpers "github.com/turbot/go-kit/types"
25 | "github.com/turbot/steampipe-postgres-fdw/types"
26 | "golang.org/x/exp/maps"
27 | )
28 |
29 | // CStringListToGoArray converts a C string list into a go array
30 | func CStringListToGoArray(values *C.List) []string {
31 | valueMap := map[string]struct{}{}
32 | for it := C.list_head(values); it != nil; it = C.lnext(values, it) {
33 | val := C.cellGetString(it)
34 | s := C.GoString(C.valueString(val))
35 | valueMap[s] = struct{}{}
36 |
37 | }
38 | return maps.Keys(valueMap)
39 | }
40 |
41 | // HACK: env vars do not all get copied into the Go env vars so explicitly copy them
42 | func SetEnvVars() {
43 | var penv **C.char = C.environ
44 | s := C.GoString(*C.environ)
45 |
46 | for s != "" {
47 | idx := strings.Index(s, "=")
48 | key := s[:idx]
49 | value := s[idx+1:]
50 | os.Setenv(key, value)
51 | penv = C.incStringPointer(penv)
52 | s = C.GoString(*penv)
53 | }
54 | }
55 |
56 | func GetFTableOptions(id types.Oid) types.Options {
57 | // TODO - We need a sanitized form of the table name, e.g. all lowercase
58 | f := C.GetForeignTable(C.Oid(id))
59 |
60 | tmp := getOptions(f.options)
61 | return tmp
62 | }
63 |
64 | func GetSchemaNameFromForeignTableId(id types.Oid) string {
65 | ftable := C.GetForeignTable(C.Oid(id))
66 | rel := C.RelationIdGetRelation(ftable.relid)
67 | defer C.RelationClose(rel)
68 | return getNamespace(rel)
69 | }
70 |
71 | func GetForeignServerOptionsFromFTableId(id types.Oid) types.Options {
72 | serverId := C.GetForeignServerIdByRelId(C.Oid(id))
73 | f := C.GetForeignServer(serverId)
74 | tmp := getOptions(f.options)
75 | return tmp
76 | }
77 |
78 | func GetForeignServerOptions(server *C.ForeignServer) types.Options {
79 | return getOptions(server.options)
80 | }
81 |
82 | func getOptions(opts *C.List) types.Options {
83 | m := make(types.Options)
84 | for it := C.list_head(opts); it != nil; it = C.lnext(opts, it) {
85 | el := C.cellGetDef(it)
86 | name := C.GoString(el.defname)
87 | val := C.GoString(C.defGetString(el))
88 | m[name] = val
89 | }
90 | return m
91 | }
92 |
93 | func BuildRelation(rel C.Relation) *types.Relation {
94 | r := &types.Relation{
95 | ID: types.Oid(rel.rd_id),
96 | IsValid: fdwBool(rel.rd_isvalid),
97 | Attr: buildTupleDesc(rel.rd_att),
98 | Namespace: getNamespace(rel),
99 | }
100 | return r
101 | }
102 |
103 | func getNamespace(rel C.Relation) string {
104 | schema := C.get_namespace_name(C.fdw_relationGetNamespace(rel))
105 | return C.GoString(schema)
106 | }
107 |
108 | func fdwBool(b C.bool) bool {
109 | return bool(b)
110 | }
111 |
112 | func fdwString(p unsafe.Pointer, n int) string {
113 | b := C.GoBytes(p, C.int(n))
114 | i := bytes.IndexByte(b, 0)
115 | if i < 0 {
116 | i = len(b)
117 | }
118 | return string(b[:i])
119 | }
120 |
121 | func buildTupleDesc(desc C.TupleDesc) *types.TupleDesc {
122 | if desc == nil {
123 | return nil
124 | }
125 | d := &types.TupleDesc{
126 | TypeID: types.Oid(desc.tdtypeid),
127 | TypeMod: int(desc.tdtypmod),
128 | //HasOid: fdwBool(desc.tdhasoid),
129 | Attrs: make([]types.Attr, 0, int(desc.natts)),
130 | }
131 | for i := 0; i < cap(d.Attrs); i++ {
132 | p := C.fdw_tupleDescAttr(desc, C.int(i))
133 | d.Attrs = append(d.Attrs, buildAttr(p))
134 | }
135 | return d
136 | }
137 |
138 | const nameLen = C.NAMEDATALEN
139 |
140 | func buildAttr(attr *C.FormData_pg_attribute) (out types.Attr) {
141 | out.Name = fdwString(unsafe.Pointer(&attr.attname.data[0]), nameLen)
142 | out.Type = types.Oid(attr.atttypid)
143 | out.Dimensions = int(attr.attndims)
144 | out.NotNull = fdwBool(attr.attnotnull)
145 | out.Dropped = fdwBool(attr.attisdropped)
146 | return
147 | }
148 |
149 | // convert a value from C StringInfo buffer into a C Datum
150 | func ValToDatum(val interface{}, cinfo *C.ConversionInfo, buffer C.StringInfo) (res C.Datum, err error) {
151 | defer func() {
152 | if r := recover(); r != nil {
153 | err = helpers.ToError(r)
154 | }
155 | }()
156 | // init an empty return result
157 | datum := C.fdw_cStringGetDatum(C.CString(""))
158 |
159 | // write value into C buffer
160 | if err := valToBuffer(val, cinfo.atttypoid, buffer); err != nil {
161 | return datum, err
162 | }
163 |
164 | if buffer.len >= 0 {
165 | if cinfo.atttypoid == C.BYTEAOID ||
166 | cinfo.atttypoid == C.TEXTOID ||
167 | cinfo.atttypoid == C.VARCHAROID {
168 | // Special case, since the value is already a byte string.
169 | datum = C.fdw_pointerGetDatum(unsafe.Pointer(C.cstring_to_text_with_len(buffer.data, buffer.len)))
170 | } else {
171 | datum = C.InputFunctionCall(cinfo.attinfunc,
172 | buffer.data,
173 | cinfo.attioparam,
174 | cinfo.atttypmod)
175 | }
176 | }
177 | return datum, nil
178 | }
179 |
180 | // write the value into the C StringInfo buffer
181 | func valToBuffer(val interface{}, oid C.Oid, buffer C.StringInfo) (err error) {
182 | defer func() {
183 | if r := recover(); r != nil {
184 | err = fmt.Errorf("%v", r)
185 | }
186 | }()
187 |
188 | var valueString string
189 | // handle json explicitly
190 | if oid == C.JSONBOID {
191 | valueString, err = jsonValueString(val)
192 | if err != nil {
193 | return err
194 | }
195 | } else {
196 | valueString = typeHelpers.ToString(val)
197 | }
198 |
199 | C.resetStringInfo(buffer)
200 | C.fdw_appendBinaryStringInfo(buffer, C.CString(valueString), C.int(len(valueString)))
201 | return
202 | }
203 |
204 | func jsonValueString(val interface{}) (string, error) {
205 | jsonBytes, err := json.Marshal(val)
206 | if err != nil {
207 | return "", err
208 | }
209 | valueString := string(jsonBytes)
210 |
211 | // remove unicode null char "\u0000", UNLESS escaped, i.e."\\u0000"
212 | if strings.Contains(valueString, `\u0000`) {
213 | log.Printf("[TRACE] null unicode character detected in JSON value - removing if not escaped")
214 | re := regexp.MustCompile(`((?:^|[^\\])(?:\\\\)*)(?:\\u0000)+`)
215 | valueString = re.ReplaceAllString(valueString, "$1")
216 | }
217 |
218 | return valueString, nil
219 | }
220 |
221 | func TimeToPgTime(t time.Time) int64 {
222 | // Postgres stores dates as microseconds since Jan 1, 2000
223 | // https://www.postgresql.org/docs/9.1/datatype-datetime.html
224 | ts := t.UTC()
225 | epoch := time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
226 | its := ts.Sub(epoch) / 1000
227 | return int64(its)
228 | }
229 |
230 | func PgTimeToTimestamp(t int64) (*timestamp.Timestamp, error) {
231 | // Postgres stores dates as microseconds since Jan 1, 2000
232 | // https://www.postgresql.org/docs/9.1/datatype-datetime.html
233 | // convert to go time
234 | epoch := time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
235 | time := epoch.Add(time.Duration(t*1000) * time.Nanosecond)
236 |
237 | // now convert to protoibuf timestamp
238 | return ptypes.TimestampProto(time)
239 | }
240 |
--------------------------------------------------------------------------------
/hub/connection_factory.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "strings"
7 | "sync"
8 |
9 | "github.com/turbot/pipe-fittings/v2/utils"
10 | "github.com/turbot/steampipe/pkg/pluginmanager"
11 |
12 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
13 | "github.com/turbot/steampipe/pkg/steampipeconfig"
14 | )
15 |
16 | const keySeparator = `\\`
17 |
18 | // connectionFactory is responsible for creating and storing connectionPlugins
19 | type connectionFactory struct {
20 | connectionPlugins map[string]*steampipeconfig.ConnectionPlugin
21 | hub *RemoteHub
22 | connectionLock sync.Mutex
23 | }
24 |
25 | func newConnectionFactory(hub *RemoteHub) *connectionFactory {
26 | return &connectionFactory{
27 | connectionPlugins: make(map[string]*steampipeconfig.ConnectionPlugin),
28 | hub: hub,
29 | }
30 | }
31 |
32 | // extract the plugin FQN and connection name from a map key
33 | func (f *connectionFactory) parsePluginKey(key string) (pluginFQN, connectionName string) {
34 | split := strings.Split(key, keySeparator)
35 | pluginFQN = split[0]
36 | connectionName = split[1]
37 | return
38 | }
39 |
40 | // if a connection plugin for the plugin and connection, return it. If it does not, create it, store in map and return it
41 | // NOTE: there is special case logic got aggregate connections
42 | func (f *connectionFactory) get(pluginFQN, connectionName string) (*steampipeconfig.ConnectionPlugin, error) {
43 | log.Printf("[TRACE] connectionFactory get plugin: %s connection %s", pluginFQN, connectionName)
44 |
45 | f.connectionLock.Lock()
46 | defer f.connectionLock.Unlock()
47 |
48 | // build a map key for the plugin
49 | key := f.connectionPluginKey(pluginFQN, connectionName)
50 |
51 | c, gotConnectionPlugin := f.connectionPlugins[key]
52 | if gotConnectionPlugin && !c.PluginClient.Exited() {
53 | return c, nil
54 | }
55 |
56 | // so either we have not yet instantiated the connection plugin, or it has exited
57 | if !gotConnectionPlugin {
58 | log.Printf("[TRACE] no connectionPlugin loaded with key %s (len %d)", key, len(f.connectionPlugins))
59 | for k := range f.connectionPlugins {
60 | log.Printf("[TRACE] key: %s", k)
61 | }
62 | } else {
63 | log.Printf("[TRACE] connectionPluginwith key %s has exited - reloading", key)
64 | }
65 |
66 | log.Printf("[TRACE] failed to get plugin: %s connection %s", pluginFQN, connectionName)
67 | return nil, nil
68 | }
69 |
70 | func (f *connectionFactory) connectionPluginKey(pluginFQN string, connectionName string) string {
71 | return fmt.Sprintf("%s%s%s", pluginFQN, keySeparator, connectionName)
72 | }
73 |
74 | func (f *connectionFactory) getOrCreate(pluginFQN, connectionName string) (*steampipeconfig.ConnectionPlugin, error) {
75 | log.Printf("[TRACE] connectionFactory getOrCreate %s %s", pluginFQN, connectionName)
76 | c, err := f.get(pluginFQN, connectionName)
77 | if err != nil {
78 | return nil, err
79 | }
80 | if c != nil {
81 | return c, nil
82 | }
83 |
84 | // otherwise create the connection plugin, setting connection config
85 | return f.createConnectionPlugin(pluginFQN, connectionName)
86 | }
87 |
88 | func (f *connectionFactory) createConnectionPlugin(pluginFQN string, connectionName string) (*steampipeconfig.ConnectionPlugin, error) {
89 | f.connectionLock.Lock()
90 | defer f.connectionLock.Unlock()
91 | log.Printf("[TRACE] connectionFactory.createConnectionPlugin create connection %s", connectionName)
92 |
93 | // load the config for this connection
94 | connection, ok := steampipeconfig.GlobalConfig.Connections[connectionName]
95 | if !ok {
96 | log.Printf("[INFO] connectionFactory.createConnectionPlugin create connection %s - no config found so reloading config!", connectionName)
97 |
98 | // ask hub to reload config - it's possible we are being asked to import a newly added connection
99 | // TODO remove need for hub to load config at all
100 | if _, err := f.hub.LoadConnectionConfig(); err != nil {
101 | log.Printf("[ERROR] LoadConnectionConfig failed %v ", err)
102 | return nil, err
103 | }
104 | // now try to get config again
105 | connection, ok = steampipeconfig.GlobalConfig.Connections[connectionName]
106 | if !ok {
107 | log.Printf("[WARN] no config found for connection %s", connectionName)
108 | return nil, fmt.Errorf("no config found for connection %s", connectionName)
109 | }
110 | }
111 |
112 | log.Printf("[TRACE] createConnectionPlugin plugin %s, connection %s, config: %s\n", utils.PluginFQNToSchemaName(pluginFQN), connectionName, connection.Config)
113 |
114 | // get plugin manager
115 | pluginManager, err := pluginmanager.GetPluginManager()
116 | if err != nil {
117 | return nil, err
118 | }
119 |
120 | connectionPlugins, res := steampipeconfig.CreateConnectionPlugins(pluginManager, []string{connection.Name})
121 | if res.Error != nil {
122 | return nil, res.Error
123 | }
124 | if connectionPlugins[connection.Name] == nil {
125 | if len(res.Warnings) > 0 {
126 | return nil, fmt.Errorf("%s", strings.Join(res.Warnings, ","))
127 | }
128 | return nil, fmt.Errorf("CreateConnectionPlugins did not return error but '%s' not found in connection map", connection.Name)
129 | }
130 |
131 | connectionPlugin := connectionPlugins[connection.Name]
132 | f.add(connectionPlugin, connectionName)
133 |
134 | return connectionPlugin, nil
135 | }
136 |
137 | func (f *connectionFactory) add(connectionPlugin *steampipeconfig.ConnectionPlugin, connectionName string) {
138 | log.Printf("[TRACE] connectionFactory add %s - adding all connections supported by plugin", connectionName)
139 |
140 | // add a map entry for all connections supported by the plugib
141 | for c := range connectionPlugin.ConnectionMap {
142 | connectionPluginKey := f.connectionPluginKey(connectionPlugin.PluginName, c)
143 | log.Printf("[TRACE] add %s (%s)", c, connectionPluginKey)
144 | // NOTE: there may already be map entries for some connections
145 | // - this could occur if the filewatcher detects a connection added for a plugin
146 | if _, ok := f.connectionPlugins[connectionPluginKey]; !ok {
147 | f.connectionPlugins[connectionPluginKey] = connectionPlugin
148 | }
149 | }
150 | }
151 |
152 | func (f *connectionFactory) getSchema(pluginFQN, connectionName string) (*proto.Schema, error) {
153 | log.Printf("[TRACE] connectionFactory getSchema %s %s", pluginFQN, connectionName)
154 | // do we have this connection already loaded
155 | c, err := f.get(pluginFQN, connectionName)
156 | if err != nil {
157 | return nil, err
158 | }
159 | if c != nil {
160 | // we already have a connection plugin - refetch the schema
161 | log.Printf("[TRACE] already loaded %s %s: ", pluginFQN, connectionName)
162 |
163 | return c.GetSchema(connectionName)
164 | }
165 |
166 | // create the connection
167 | log.Printf("[TRACE] creating connection plugin to get schema")
168 | c, err = f.createConnectionPlugin(pluginFQN, connectionName)
169 | if err != nil {
170 | return nil, err
171 | }
172 | return c.ConnectionMap[connectionName].Schema, nil
173 | }
174 |
--------------------------------------------------------------------------------
/hub/constants.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | const (
4 | AppName = "steampipe"
5 | FdwName = "steampipe-postgres-fdw"
6 | )
7 |
--------------------------------------------------------------------------------
/hub/hub.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "time"
5 |
6 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
7 | "github.com/turbot/steampipe-postgres-fdw/types"
8 | )
9 |
10 | // TODO check which can be non-imported
11 | type Hub interface {
12 | GetConnectionConfigByName(string) *proto.ConnectionConfig
13 | LoadConnectionConfig() (bool, error)
14 | GetSchema(remoteSchema string, localSchema string) (*proto.Schema, error)
15 | GetIterator(columns []string, quals *proto.Quals, unhandledRestrictions int, limit int64, sortOrder []*proto.SortColumn, queryTimestamp int64, opts types.Options) (Iterator, error)
16 | GetRelSize(columns []string, quals []*proto.Qual, opts types.Options) (types.RelSize, error)
17 | GetPathKeys(opts types.Options) ([]types.PathKey, error)
18 | Explain(columns []string, quals []*proto.Qual, sortKeys []string, verbose bool, opts types.Options) ([]string, error)
19 | ApplySetting(key string, value string) error
20 | GetSettingsSchema() map[string]*proto.TableSchema
21 | GetLegacySettingsSchema() map[string]*proto.TableSchema
22 | StartScan(i Iterator) error
23 | RemoveIterator(iterator Iterator)
24 | EndScan(iter Iterator, limit int64)
25 | AddScanMetadata(iter Iterator)
26 | Abort()
27 | Close()
28 | ProcessImportForeignSchemaOptions(opts types.Options, connectionName string) error
29 | HandleLegacyCacheCommand(command string) error
30 | ValidateCacheCommand(command string) error
31 | cacheTTL(name string) time.Duration
32 | cacheEnabled(name string) bool
33 | GetSortableFields(table, connection string) map[string]proto.SortOrder
34 | }
35 |
--------------------------------------------------------------------------------
/hub/hub_create.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "github.com/turbot/steampipe-plugin-sdk/v5/logging"
5 | "sync"
6 | )
7 |
8 | // global hub instance
9 | var hubSingleton Hub
10 |
11 | // mutex protecting hub creation
12 | var hubMux sync.Mutex
13 |
14 | // GetHub returns a hub singleton
15 | func GetHub() Hub {
16 | // lock access to singleton
17 | hubMux.Lock()
18 | defer hubMux.Unlock()
19 | return hubSingleton
20 | }
21 |
22 | // CreateHub creates the hub
23 | func CreateHub() error {
24 | logging.LogTime("GetHub start")
25 |
26 | // lock access to singleton
27 | hubMux.Lock()
28 | defer hubMux.Unlock()
29 |
30 | var err error
31 | hubSingleton, err = newRemoteHub()
32 |
33 | if err != nil {
34 | return err
35 | }
36 | logging.LogTime("GetHub end")
37 | return err
38 | }
39 |
--------------------------------------------------------------------------------
/hub/hub_local.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "log"
5 | "os"
6 | "time"
7 |
8 | typehelpers "github.com/turbot/go-kit/types"
9 | "github.com/turbot/pipe-fittings/v2/ociinstaller"
10 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc"
11 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
12 | "github.com/turbot/steampipe-plugin-sdk/v5/logging"
13 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
14 | "github.com/turbot/steampipe-plugin-sdk/v5/telemetry"
15 | "github.com/turbot/steampipe-postgres-fdw/settings"
16 | "github.com/turbot/steampipe-postgres-fdw/types"
17 | "github.com/turbot/steampipe/pkg/constants"
18 | "golang.org/x/exp/maps"
19 | )
20 |
21 | type HubLocal struct {
22 | hubBase
23 | plugin *grpc.PluginServer
24 | pluginName string
25 | pluginAlias string
26 | connections map[string]*proto.ConnectionConfig
27 | }
28 |
29 | func newLocalHub() (*HubLocal, error) {
30 | imageRef := ociinstaller.NewImageRef(pluginAlias).DisplayImageRef()
31 |
32 | hub := &HubLocal{
33 | hubBase: newHubBase(false),
34 | plugin: plugin.Server(&plugin.ServeOpts{
35 | PluginFunc: getPluginFunc(),
36 | }),
37 | pluginName: imageRef,
38 | pluginAlias: pluginAlias,
39 | connections: make(map[string]*proto.ConnectionConfig),
40 | }
41 | hub.cacheSettings = settings.NewCacheSettings(hub.clearConnectionCache, hub.getServerCacheEnabled())
42 |
43 | // TODO CHECK TELEMETRY ENABLED?
44 | if err := hub.initialiseTelemetry(); err != nil {
45 | return nil, err
46 | }
47 |
48 | return hub, nil
49 | }
50 |
51 | func (l *HubLocal) SetConnectionConfig(connectionName, configString string) error {
52 | log.Printf("[INFO] HubLocal SetConnectionConfig: connection: %s, config: %s", connectionName, configString)
53 |
54 | l.connections[connectionName] =
55 | &proto.ConnectionConfig{
56 | Connection: connectionName,
57 | Plugin: l.pluginName,
58 | PluginShortName: l.pluginAlias,
59 | Config: configString,
60 | PluginInstance: l.pluginName,
61 | }
62 |
63 | _, err := l.plugin.SetAllConnectionConfigs(&proto.SetAllConnectionConfigsRequest{
64 | Configs: maps.Values(l.connections),
65 | })
66 | return err
67 | }
68 |
69 | func (l *HubLocal) UpdateConnectionConfig(connectionName, configString string) error {
70 | log.Printf("[INFO] HubLocal UpdateConnectionConfig: connection: %s, config: %s", connectionName, configString)
71 |
72 | // if the connection already exists and the config is the same, do nothing
73 | // this situation could arise when a session is restarted, the server options are loaded again, and
74 | // the connection config is set again which results in the cache getting cleared
75 | if conn, ok := l.connections[connectionName]; ok && conn.Config == configString {
76 | return nil
77 | }
78 |
79 | l.connections[connectionName] =
80 | &proto.ConnectionConfig{
81 | Connection: connectionName,
82 | Plugin: l.pluginName,
83 | PluginShortName: l.pluginAlias,
84 | Config: configString,
85 | PluginInstance: l.pluginName,
86 | }
87 |
88 | _, err := l.plugin.UpdateConnectionConfigs(&proto.UpdateConnectionConfigsRequest{
89 | Changed: []*proto.ConnectionConfig{l.connections[connectionName]},
90 | })
91 | return err
92 | }
93 |
94 | func (l *HubLocal) LoadConnectionConfig() (bool, error) {
95 | // do nothing
96 | return false, nil
97 | }
98 |
99 | func (l *HubLocal) GetSchema(_, connectionName string) (*proto.Schema, error) {
100 | log.Printf("[INFO] GetSchema")
101 | res, err := l.plugin.GetSchema(&proto.GetSchemaRequest{Connection: connectionName})
102 |
103 | if err != nil {
104 | log.Printf("[INFO] GetSchema retry")
105 | // TODO tactical - if no connection config has been set for this connection, set now
106 | if err := l.SetConnectionConfig(connectionName, ""); err != nil {
107 |
108 | return nil, err
109 | }
110 | res, err = l.plugin.GetSchema(&proto.GetSchemaRequest{Connection: connectionName})
111 | if err != nil {
112 | log.Printf("[INFO] GetSchema retry failed")
113 | return nil, err
114 | }
115 | }
116 | return res.GetSchema(), nil
117 | }
118 |
119 | func (l *HubLocal) GetIterator(columns []string, quals *proto.Quals, unhandledRestrictions int, limit int64, sortOrder []*proto.SortColumn, queryTimestamp int64, opts types.Options) (Iterator, error) {
120 | logging.LogTime("GetIterator start")
121 | qualMap, err := buildQualMap(quals)
122 | connectionName := opts["connection"]
123 | table := opts["table"]
124 | log.Printf("[TRACE] RemoteHub GetIterator() table '%s'", table)
125 |
126 | if connectionName == constants.InternalSchema || connectionName == constants.LegacyCommandSchema {
127 | return l.executeCommandScan(connectionName, table, queryTimestamp)
128 | }
129 |
130 | // create a span for this scan
131 | scanTraceCtx := l.traceContextForScan(table, columns, limit, qualMap, connectionName)
132 | iterator, err := l.startScanForConnection(connectionName, table, qualMap, unhandledRestrictions, columns, limit, sortOrder, queryTimestamp, scanTraceCtx)
133 |
134 | if err != nil {
135 | log.Printf("[TRACE] RemoteHub GetIterator() failed :( %s", err)
136 | return nil, err
137 | }
138 | log.Printf("[TRACE] RemoteHub GetIterator() created iterator (%p)", iterator)
139 |
140 | return iterator, nil
141 | }
142 |
143 | func (l *HubLocal) GetPathKeys(opts types.Options) ([]types.PathKey, error) {
144 | connectionName := opts["connection"]
145 |
146 | connectionSchema, err := l.GetSchema("", connectionName)
147 | if err != nil {
148 | return nil, err
149 | }
150 |
151 | return l.getPathKeys(connectionSchema, opts)
152 |
153 | }
154 |
155 | func (l *HubLocal) GetConnectionConfigByName(name string) *proto.ConnectionConfig {
156 | return l.connections[name]
157 | }
158 |
159 | func (l *HubLocal) ProcessImportForeignSchemaOptions(opts types.Options, connection string) error {
160 | // NOTE: if no connection config is passed, set an empty connection config
161 | config, _ := opts["config"]
162 |
163 | // do we already have this connection
164 | connectionConfig, ok := l.connections[connection]
165 | if ok {
166 | log.Println("[INFO] connection already exists, updating ")
167 | // we have already set the config - update it
168 | connectionConfig.Config = config
169 | return l.UpdateConnectionConfig(connection, config)
170 | }
171 |
172 | // we have not yet set the config - set it
173 | return l.SetConnectionConfig(connection, config)
174 | }
175 |
176 | // startScanForConnection starts a scan for a single connection, using a scanIterator or a legacyScanIterator
177 | func (l *HubLocal) startScanForConnection(connectionName string, table string, qualMap map[string]*proto.Quals, unhandledRestrictions int, columns []string, limit int64, sortOrder []*proto.SortColumn, queryTimestamp int64, scanTraceCtx *telemetry.TraceCtx) (_ Iterator, err error) {
178 | defer func() {
179 | if err != nil {
180 | // close the span in case of errir
181 | scanTraceCtx.Span.End()
182 | }
183 | }()
184 |
185 | // ok so this is a multi connection plugin, build list of connections,
186 | // if this connection is NOT an aggregator, only execute for the named connection
187 |
188 | //// get connection config
189 | //connectionConfig, ok := l.getConnectionconfig(ConnectionName)
190 | //if !ok {
191 | // return nil, fmt.Errorf("no connection config loaded for connection '%s'", ConnectionName)
192 | //}
193 |
194 | // determine whether to pushdown the limit
195 | connectionLimitMap, err := l.buildConnectionLimitMap(connectionName, table, qualMap, unhandledRestrictions, limit)
196 | if err != nil {
197 | return nil, err
198 | }
199 |
200 | if len(qualMap) > 0 {
201 | log.Printf("[INFO] connection '%s', table '%s', quals %s", connectionName, table, grpc.QualMapToString(qualMap, true))
202 | } else {
203 | log.Println("[INFO] --------")
204 | log.Println("[INFO] no quals")
205 | log.Println("[INFO] --------")
206 | }
207 |
208 | log.Printf("[TRACE] startScanForConnection creating a new scan iterator")
209 | iterator := newScanIteratorLocal(l, connectionName, table, l.pluginName, connectionLimitMap, qualMap, columns, limit, sortOrder, queryTimestamp, scanTraceCtx)
210 | return iterator, nil
211 | }
212 |
213 | func (l *HubLocal) buildConnectionLimitMap(connection, table string, qualMap map[string]*proto.Quals, unhandledRestrictions int, limit int64) (map[string]int64, error) {
214 | connectionSchema, err := l.GetSchema("", connection)
215 | if err != nil {
216 | return nil, err
217 | }
218 | schemaMode := connectionSchema.Mode
219 |
220 | // pushing the limit down or not is dependent on the schema.
221 | // for a static schema, the limit will be the same for all connections (i.e. we either pushdown for all or none)
222 | // check once whether we should push down
223 | if limit != -1 && schemaMode == plugin.SchemaModeStatic {
224 | log.Printf("[TRACE] static schema - using same limit for all connections")
225 | if !l.shouldPushdownLimit(table, qualMap, unhandledRestrictions, connectionSchema) {
226 | limit = -1
227 | }
228 | }
229 |
230 | // set the limit for the one and only connection
231 | var connectionLimitMap = make(map[string]int64)
232 | connectionLimit := limit
233 | // if schema mode is dynamic, check whether we should push down for each connection
234 | if schemaMode == plugin.SchemaModeDynamic && !l.shouldPushdownLimit(table, qualMap, unhandledRestrictions, connectionSchema) {
235 | log.Printf("[INFO] not pushing limit down for connection %s", connection)
236 | connectionLimit = -1
237 | }
238 | connectionLimitMap[connection] = connectionLimit
239 |
240 | return connectionLimitMap, nil
241 | }
242 |
243 | func (l *HubLocal) clearConnectionCache(connection string) error {
244 |
245 | _, err := l.plugin.SetConnectionCacheOptions(&proto.SetConnectionCacheOptionsRequest{ClearCacheForConnection: connection})
246 | if err != nil {
247 | log.Printf("[WARN] clearConnectionCache failed for connection %s: SetConnectionCacheOptions returned %s", connection, err)
248 | }
249 | log.Printf("[INFO] clear connection cache succeeded")
250 | return err
251 | }
252 |
253 | func (l *HubLocal) cacheEnabled(s string) bool {
254 | // if the caching is disabled for the server, just return false
255 | if !l.cacheSettings.ServerCacheEnabled {
256 | return false
257 | }
258 |
259 | if l.cacheSettings.ClientCacheEnabled != nil {
260 | return *l.cacheSettings.ClientCacheEnabled
261 | }
262 |
263 | if envStr, ok := os.LookupEnv(constants.EnvCacheEnabled); ok {
264 | // set this so that we don't keep looking up the env var
265 | l.cacheSettings.SetEnabled(envStr)
266 | return l.cacheEnabled(s)
267 | }
268 | return true
269 | }
270 |
271 | func (l *HubLocal) cacheTTL(s string) time.Duration {
272 | log.Printf("[INFO] cacheTTL 1")
273 | // if the cache ttl has been overridden, then enforce the value
274 | if l.cacheSettings.Ttl != nil {
275 | return *l.cacheSettings.Ttl
276 | }
277 | if envStr, ok := os.LookupEnv(constants.EnvCacheMaxTTL); ok {
278 | // set this so that we don't keep looking up the env var
279 | l.cacheSettings.SetTtl(envStr)
280 | return l.cacheTTL(s)
281 | }
282 | return 10 * time.Hour
283 | }
284 |
285 | // resolve the server cache enabled property
286 | func (l *HubLocal) getServerCacheEnabled() bool {
287 | var res = true
288 | if val, ok := os.LookupEnv(constants.EnvCacheEnabled); ok {
289 | if boolVal, err := typehelpers.ToBool(val); err == nil {
290 | res = boolVal
291 | }
292 | }
293 |
294 | log.Printf("[INFO] Hub.getServerCacheEnabled returning %v", res)
295 |
296 | return res
297 | }
298 |
--------------------------------------------------------------------------------
/hub/hub_local_plugin.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
5 | )
6 |
7 | // this is provided to ensure GRPC FDW version builds - it will be replaced by template for standalone version
8 | var pluginAlias string
9 |
10 | func getPluginFunc() plugin.PluginFunc {
11 | panic("this function version should not be called - the templated version should be used")
12 | }
13 |
--------------------------------------------------------------------------------
/hub/hub_quals.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
5 | )
6 |
7 | func buildQualMap(quals *proto.Quals) (map[string]*proto.Quals, error) {
8 | qualMap := make(map[string]*proto.Quals)
9 |
10 | for _, qual := range quals.Quals {
11 | if qual == nil {
12 | continue
13 | }
14 |
15 | columnQuals, ok := qualMap[qual.FieldName]
16 | if ok {
17 | columnQuals.Append(qual)
18 | } else {
19 | columnQuals = &proto.Quals{Quals: []*proto.Qual{qual}}
20 | }
21 | qualMap[qual.FieldName] = columnQuals
22 | }
23 | return qualMap, nil
24 | }
25 |
--------------------------------------------------------------------------------
/hub/hub_remote.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "log"
8 | "os"
9 | "path"
10 | "time"
11 |
12 | typehelpers "github.com/turbot/go-kit/types"
13 | "github.com/turbot/pipe-fittings/v2/app_specific"
14 | "github.com/turbot/pipe-fittings/v2/modconfig"
15 | "github.com/turbot/pipe-fittings/v2/utils"
16 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc"
17 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
18 | "github.com/turbot/steampipe-plugin-sdk/v5/logging"
19 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
20 | "github.com/turbot/steampipe-plugin-sdk/v5/telemetry"
21 | "github.com/turbot/steampipe-postgres-fdw/settings"
22 | "github.com/turbot/steampipe-postgres-fdw/types"
23 | "github.com/turbot/steampipe/pkg/constants"
24 | "github.com/turbot/steampipe/pkg/steampipeconfig"
25 | )
26 |
27 | const (
28 | rowBufferSize = 100
29 | )
30 |
31 | // RemoteHub is a structure representing plugin hub
32 | type RemoteHub struct {
33 | hubBase
34 | connections *connectionFactory
35 | }
36 |
37 | //// lifecycle ////
38 |
39 | func newRemoteHub() (*RemoteHub, error) {
40 | hub := &RemoteHub{
41 | hubBase: newHubBase(true),
42 | }
43 | hub.connections = newConnectionFactory(hub)
44 |
45 | // TODO CHECK TELEMETRY ENABLED?
46 | if err := hub.initialiseTelemetry(); err != nil {
47 | return nil, err
48 | }
49 |
50 | // NOTE: Steampipe determine it's install directory from the input arguments (with a default)
51 | // as we are using shared Steampipe code we must set the install directory.
52 | // we can derive it from the working directory (which is underneath the install directectory)
53 | steampipeDir, err := getInstallDirectory()
54 | if err != nil {
55 | return nil, err
56 | }
57 | app_specific.InstallDir = steampipeDir
58 |
59 | log.Printf("[INFO] newRemoteHub RemoteHub.LoadConnectionConfig ")
60 | if _, err := hub.LoadConnectionConfig(); err != nil {
61 | return nil, err
62 | }
63 |
64 | hub.cacheSettings = settings.NewCacheSettings(hub.clearConnectionCache, hub.getServerCacheEnabled())
65 |
66 | return hub, nil
67 | }
68 |
69 | // get the install folder - derive from our working folder
70 | func getInstallDirectory() (string, error) {
71 | // we need to do this as we are sharing steampipe code to read the config
72 | // and steampipe may set the install folder from a cmd line arg, so it cannot be hard coded
73 | wd, err := os.Getwd()
74 | if err != nil {
75 | return "", err
76 | }
77 | return path.Join(wd, "../../.."), nil
78 | }
79 |
80 | //// public fdw functions ////
81 |
82 | // GetSchema returns the schema for a name. Load the plugin for the connection if needed
83 | func (h *RemoteHub) GetSchema(remoteSchema string, localSchema string) (*proto.Schema, error) {
84 | log.Printf("[TRACE] RemoteHub GetSchema %s %s", remoteSchema, localSchema)
85 | pluginFQN := remoteSchema
86 | connectionName := localSchema
87 | log.Printf("[TRACE] getSchema remoteSchema: %s, name %s\n", remoteSchema, connectionName)
88 |
89 | return h.connections.getSchema(pluginFQN, connectionName)
90 | }
91 |
92 | // GetIterator creates and returns an iterator
93 | func (h *RemoteHub) GetIterator(columns []string, quals *proto.Quals, unhandledRestrictions int, limit int64, sortOrder []*proto.SortColumn, queryTimestamp int64, opts types.Options) (Iterator, error) {
94 | logging.LogTime("GetIterator start")
95 | qualMap, err := buildQualMap(quals)
96 | connectionName := opts["connection"]
97 | table := opts["table"]
98 | log.Printf("[TRACE] RemoteHub GetIterator() table '%s'", table)
99 |
100 | if connectionName == constants.InternalSchema || connectionName == constants.LegacyCommandSchema {
101 | return h.executeCommandScan(connectionName, table, queryTimestamp)
102 | }
103 |
104 | // create a span for this scan
105 | scanTraceCtx := h.traceContextForScan(table, columns, limit, qualMap, connectionName)
106 | iterator, err := h.startScanForConnection(connectionName, table, qualMap, unhandledRestrictions, columns, limit, sortOrder, queryTimestamp, scanTraceCtx)
107 |
108 | if err != nil {
109 | log.Printf("[TRACE] RemoteHub GetIterator() failed :( %s", err)
110 | return nil, err
111 | }
112 | log.Printf("[TRACE] RemoteHub GetIterator() created iterator (%p)", iterator)
113 |
114 | return iterator, nil
115 | }
116 |
117 | // LoadConnectionConfig loads the connection config and returns whether it has changed
118 | func (h *RemoteHub) LoadConnectionConfig() (bool, error) {
119 | log.Printf("[INFO] RemoteHub.LoadConnectionConfig ")
120 | // load connection conFig
121 | connectionConfig, errorsAndWarnings := steampipeconfig.LoadConnectionConfig(context.Background())
122 | if errorsAndWarnings.GetError() != nil {
123 | log.Printf("[WARN] LoadConnectionConfig failed %v ", errorsAndWarnings)
124 | return false, errorsAndWarnings.GetError()
125 | }
126 |
127 | configChanged := steampipeconfig.GlobalConfig == connectionConfig
128 | steampipeconfig.GlobalConfig = connectionConfig
129 |
130 | return configChanged, nil
131 | }
132 |
133 | // GetPathKeys Is a method called from the planner to add additional Path to the planner.
134 | //
135 | // fetch schema and call base implementation
136 | func (h *RemoteHub) GetPathKeys(opts types.Options) ([]types.PathKey, error) {
137 | connectionName := opts["connection"]
138 | table := opts["table"]
139 |
140 | log.Printf("[TRACE] hub.GetPathKeys for connection '%s`, table `%s`", connectionName, table)
141 |
142 | // get the schema for this connection
143 | connectionPlugin, err := h.getConnectionPlugin(connectionName)
144 | if err != nil {
145 | return nil, err
146 | }
147 |
148 | connectionSchema, err := connectionPlugin.GetSchema(connectionName)
149 | if err != nil {
150 | return nil, err
151 | }
152 |
153 | return h.getPathKeys(connectionSchema, opts)
154 | }
155 |
156 | //// internal implementation ////
157 |
158 | // startScanForConnection starts a scan for a single connection, using a scanIterator or a legacyScanIterator
159 | func (h *RemoteHub) startScanForConnection(connectionName string, table string, qualMap map[string]*proto.Quals, unhandledRestrictions int, columns []string, limit int64, sortOrder []*proto.SortColumn, queryTimestamp int64, scanTraceCtx *telemetry.TraceCtx) (_ Iterator, err error) {
160 | defer func() {
161 | if err != nil {
162 | // close the span in case of errir
163 | scanTraceCtx.Span.End()
164 | }
165 | }()
166 |
167 | log.Printf("[INFO] RemoteHub startScanForConnection '%s' limit %d", connectionName, limit)
168 | // get connection plugin for this connection
169 | connectionPlugin, err := h.getConnectionPlugin(connectionName)
170 | if err != nil {
171 | log.Printf("[TRACE] getConnectionPlugin failed: %s", err.Error())
172 | return nil, err
173 | }
174 |
175 | // ok so this is a multi connection plugin, build list of connections,
176 | // if this connection is NOT an aggregator, only execute for the named connection
177 |
178 | // get connection config
179 | connectionConfig, ok := steampipeconfig.GlobalConfig.Connections[connectionName]
180 | if !ok {
181 | return nil, fmt.Errorf("no connection config loaded for connection '%s'", connectionName)
182 | }
183 |
184 | var connectionNames = []string{connectionName}
185 | if connectionConfig.Type == modconfig.ConnectionTypeAggregator {
186 | connectionNames = connectionConfig.GetResolveConnectionNames()
187 | // if there are no connections, do not proceed
188 | if len(connectionNames) == 0 {
189 | return nil, errors.New(connectionConfig.GetEmptyAggregatorError())
190 | }
191 | }
192 |
193 | // for each connection, determine whether to pushdown the limit
194 | connectionLimitMap, err := h.buildConnectionLimitMap(table, qualMap, unhandledRestrictions, connectionNames, limit, connectionPlugin)
195 | if err != nil {
196 | return nil, err
197 | }
198 |
199 | if len(qualMap) > 0 {
200 | log.Printf("[INFO] connection '%s', table '%s', quals %s", connectionName, table, grpc.QualMapToString(qualMap, true))
201 | } else {
202 | log.Println("[INFO] --------")
203 | log.Println("[INFO] no quals")
204 | log.Println("[INFO] --------")
205 | }
206 |
207 | log.Printf("[TRACE] startScanForConnection creating a new scan iterator")
208 | iterator := newScanIterator(h, connectionPlugin, connectionName, table, connectionLimitMap, qualMap, columns, limit, sortOrder, queryTimestamp, scanTraceCtx)
209 | return iterator, nil
210 | }
211 |
212 | func (h *RemoteHub) buildConnectionLimitMap(table string, qualMap map[string]*proto.Quals, unhandledRestrictions int, connectionNames []string, limit int64, connectionPlugin *steampipeconfig.ConnectionPlugin) (map[string]int64, error) {
213 | log.Printf("[INFO] buildConnectionLimitMap, table: '%s', %d %s, limit: %d", table, len(connectionNames), utils.Pluralize("connection", len(connectionNames)), limit)
214 |
215 | connectionSchema, err := connectionPlugin.GetSchema(connectionNames[0])
216 | if err != nil {
217 | return nil, err
218 | }
219 | schemaMode := connectionSchema.Mode
220 |
221 | // pushing the limit down or not is dependent on the schema.
222 | // for a static schema, the limit will be the same for all connections (i.e. we either pushdown for all or none)
223 | // check once whether we should push down
224 | if limit != -1 && schemaMode == plugin.SchemaModeStatic {
225 | log.Printf("[INFO] static schema - using same limit for all connections")
226 | if !h.shouldPushdownLimit(table, qualMap, unhandledRestrictions, connectionSchema) {
227 | limit = -1
228 | }
229 | }
230 |
231 | // set the limit for the one and only connection
232 | var connectionLimitMap = make(map[string]int64)
233 | for _, c := range connectionNames {
234 | connectionLimit := limit
235 | // if schema mode is dynamic, check whether we should push down for each connection
236 | if schemaMode == plugin.SchemaModeDynamic && !h.shouldPushdownLimit(table, qualMap, unhandledRestrictions, connectionSchema) {
237 | log.Printf("[INFO] not pushing limit down for connection %s", c)
238 | connectionLimit = -1
239 | }
240 | connectionLimitMap[c] = connectionLimit
241 | }
242 |
243 | return connectionLimitMap, nil
244 | }
245 |
246 | // getConnectionPlugin returns the connectionPlugin for the provided connection
247 | // it also makes sure that the plugin is up and running.
248 | // if the plugin is not running, it attempts to restart the plugin - errors if unable
249 | func (h *RemoteHub) getConnectionPlugin(connectionName string) (*steampipeconfig.ConnectionPlugin, error) {
250 | log.Printf("[TRACE] hub.getConnectionPlugin for connection '%s`", connectionName)
251 |
252 | // get the plugin FQN
253 | connectionConfig, ok := steampipeconfig.GlobalConfig.Connections[connectionName]
254 | if !ok {
255 | log.Printf("[WARN] no connection config loaded for connection '%s'", connectionName)
256 | return nil, fmt.Errorf("no connection config loaded for connection '%s'", connectionName)
257 | }
258 | pluginFQN := connectionConfig.Plugin
259 |
260 | // ask connection map to get or create this connection
261 | c, err := h.connections.getOrCreate(pluginFQN, connectionName)
262 | if err != nil {
263 | log.Printf("[TRACE] getConnectionPlugin getConnectionPlugin failed: %s", err.Error())
264 | return nil, err
265 | }
266 |
267 | return c, nil
268 | }
269 |
270 | func (h *RemoteHub) clearConnectionCache(connection string) error {
271 | log.Printf("[INFO] clear connection cache for connection '%s'", connection)
272 | connectionPlugin, err := h.getConnectionPlugin(connection)
273 | if err != nil {
274 | log.Printf("[WARN] clearConnectionCache failed for connection %s: %s", connection, err)
275 | return err
276 | }
277 |
278 | _, err = connectionPlugin.PluginClient.SetConnectionCacheOptions(&proto.SetConnectionCacheOptionsRequest{ClearCacheForConnection: connection})
279 | if err != nil {
280 | log.Printf("[WARN] clearConnectionCache failed for connection %s: SetConnectionCacheOptions returned %s", connection, err)
281 | }
282 | log.Printf("[INFO] clear connection cache succeeded")
283 | return err
284 | }
285 |
286 | func (h *RemoteHub) cacheEnabled(connectionName string) bool {
287 | // if the caching is disabled for the server, just return false
288 | if !h.cacheSettings.ServerCacheEnabled {
289 | log.Printf("[INFO] cacheEnabled returning false since server cache is disabled")
290 | return false
291 | }
292 |
293 | if h.cacheSettings.ClientCacheEnabled != nil {
294 | log.Printf("[INFO] cacheEnabled returning %v since client cache is enabled", *h.cacheSettings.ClientCacheEnabled)
295 | return *h.cacheSettings.ClientCacheEnabled
296 | }
297 | log.Printf("[INFO] default cacheEnabled returning true")
298 |
299 | return true
300 | }
301 |
302 | func (h *RemoteHub) cacheTTL(connectionName string) time.Duration {
303 | // initialise to default
304 | ttl := 300 * time.Second
305 | // if the cache ttl has been overridden, then enforce the value
306 | if h.cacheSettings.Ttl != nil {
307 | ttl = *h.cacheSettings.Ttl
308 | }
309 | // would this give data earlier than the cacheClearTime
310 | now := time.Now()
311 | if now.Add(-ttl).Before(h.cacheSettings.ClearTime) {
312 | ttl = now.Sub(h.cacheSettings.ClearTime)
313 | }
314 |
315 | return ttl
316 | }
317 |
318 | // resolve the server cache enabled property
319 | func (h *RemoteHub) getServerCacheEnabled() bool {
320 | var res = true
321 | if val, ok := os.LookupEnv(constants.EnvCacheEnabled); ok {
322 | if boolVal, err := typehelpers.ToBool(val); err == nil {
323 | res = boolVal
324 | }
325 | }
326 |
327 | if steampipeconfig.GlobalConfig.DatabaseOptions != nil && steampipeconfig.GlobalConfig.DatabaseOptions.Cache != nil {
328 | res = *steampipeconfig.GlobalConfig.DatabaseOptions.Cache
329 | }
330 |
331 | log.Printf("[INFO] Hub.getServerCacheEnabled returning %v", res)
332 |
333 | return res
334 | }
335 |
336 | // GetSortableFields returns a slice of fields which are defined as sortable bythe plugin schema,
337 | // as well as the sort order(s) supported
338 | func (h *RemoteHub) GetSortableFields(tableName, connectionName string) map[string]proto.SortOrder {
339 | connectionPlugin, err := h.getConnectionPlugin(connectionName)
340 | if err != nil {
341 | log.Printf("[WARN] GetSortableFields getConnectionPlugin failed for connection %s: %s", connectionName, err.Error())
342 | return nil
343 | }
344 |
345 | schema, err := connectionPlugin.GetSchema(connectionName)
346 | if err != nil {
347 | log.Printf("[WARN] GetSortableFields GetSchema failed for connection %s: %s", connectionName, err.Error())
348 | return nil
349 | }
350 |
351 | tableSchema, ok := schema.Schema[tableName]
352 | if !ok {
353 | log.Printf("[WARN] GetSortableFields table schema not found for connection %s, table %s", connectionName, tableName)
354 | return nil
355 | }
356 |
357 | // build map of sortable fields
358 | var sortableFields = make(map[string]proto.SortOrder)
359 | for _, column := range tableSchema.Columns {
360 | sortableFields[column.Name] = column.SortOrder
361 | }
362 |
363 | if len(sortableFields) > 0 {
364 | log.Printf("[INFO] GetSortableFields for connection '%s`, table `%s`: %v", connectionName, tableName, sortableFields)
365 | }
366 | return sortableFields
367 | }
368 |
--------------------------------------------------------------------------------
/hub/in_memory_iterator.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "context"
5 | "github.com/turbot/steampipe/pkg/query/queryresult"
6 | "log"
7 |
8 | "github.com/turbot/steampipe-plugin-sdk/v5/telemetry"
9 | )
10 |
11 | type inMemoryIterator struct {
12 | name string
13 | rows []map[string]interface{}
14 | index int
15 | status queryStatus
16 | queryTimestamp int64
17 | }
18 |
19 | func newInMemoryIterator(name string, result *QueryResult, queryTimestamp int64) *inMemoryIterator {
20 | return &inMemoryIterator{
21 | name: name,
22 | rows: result.Rows,
23 | status: QueryStatusStarted, // set as started
24 | queryTimestamp: queryTimestamp,
25 | }
26 | }
27 |
28 | // GetConnectionName implements Iterator
29 | func (i *inMemoryIterator) GetConnectionName() string {
30 | return i.name
31 | }
32 |
33 | // GetPluginName implements Iterator
34 | func (i *inMemoryIterator) GetPluginName() string {
35 | return ""
36 | }
37 |
38 | func (i *inMemoryIterator) Status() queryStatus {
39 | return i.status
40 | }
41 |
42 | func (i *inMemoryIterator) Error() error {
43 | return nil
44 | }
45 |
46 | // Next implements Iterator
47 | // return next row (tuple). Nil slice means there is no more rows to scan.
48 | func (i *inMemoryIterator) Next() (map[string]interface{}, error) {
49 | if idx := i.index; idx < len(i.rows) {
50 | i.index++
51 | return i.rows[idx], nil
52 | }
53 | log.Printf("[TRACE] inMemoryIterator Next() complete (%p)", i)
54 | i.status = QueryStatusComplete
55 | return nil, nil
56 | }
57 |
58 | // Close implements Iterator
59 | // clear the rows and the index
60 | func (i *inMemoryIterator) Close() {
61 | log.Printf("[TRACE] inMemoryIterator Close() (%p)", i)
62 | i.index = 0
63 | i.rows = nil
64 | i.status = QueryStatusReady
65 | }
66 |
67 | func (i *inMemoryIterator) CanIterate() bool {
68 | switch i.status {
69 | case QueryStatusError, QueryStatusComplete:
70 | return false
71 | default:
72 | return true
73 | }
74 | }
75 |
76 | func (i *inMemoryIterator) GetScanMetadata() []queryresult.ScanMetadataRow {
77 | return nil
78 | }
79 | func (i *inMemoryIterator) GetTraceContext() *telemetry.TraceCtx {
80 | return &telemetry.TraceCtx{Ctx: context.Background()}
81 | }
82 |
83 | func (i *inMemoryIterator) GetQueryTimestamp() int64 {
84 | return i.queryTimestamp
85 | }
86 |
--------------------------------------------------------------------------------
/hub/interface.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "context"
5 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
6 | "github.com/turbot/steampipe-plugin-sdk/v5/row_stream"
7 | "github.com/turbot/steampipe-plugin-sdk/v5/telemetry"
8 | "github.com/turbot/steampipe/pkg/query/queryresult"
9 | )
10 |
11 | // Iterator is an interface for table scanner implementations.
12 | type Iterator interface {
13 | // GetConnectionName returns the connection name that this iterator uses.
14 | GetConnectionName() string
15 | GetPluginName() string
16 | // Next returns a row. Nil slice means there is no more rows to scan.
17 | Next() (map[string]interface{}, error)
18 | // Close stops an iteration and frees any resources.
19 | Close()
20 | Status() queryStatus
21 | Error() error
22 | CanIterate() bool
23 | GetQueryTimestamp() int64
24 | GetTraceContext() *telemetry.TraceCtx
25 | }
26 |
27 | type pluginExecutor interface {
28 | execute(req *proto.ExecuteRequest) (str row_stream.Receiver, ctx context.Context, cancel context.CancelFunc, err error)
29 | }
30 |
31 | type pluginIterator interface {
32 | Iterator
33 | GetQueryContext() *proto.QueryContext
34 | GetCallId() string
35 | GetConnectionLimitMap() map[string]int64
36 | SetError(err error)
37 | GetTable() string
38 | GetScanMetadata() []queryresult.ScanMetadataRow
39 | Start(pluginExecutor) error
40 | }
41 |
--------------------------------------------------------------------------------
/hub/query_result.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | type QueryResult struct {
4 | Rows []map[string]interface{}
5 | }
6 |
7 | func (q *QueryResult) Append(row map[string]interface{}) {
8 | q.Rows = append(q.Rows, row)
9 | }
10 |
--------------------------------------------------------------------------------
/hub/query_status.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | type queryStatus string
4 |
5 | const (
6 | QueryStatusReady queryStatus = "ready"
7 | QueryStatusStarted queryStatus = "started"
8 | QueryStatusError queryStatus = "error"
9 | QueryStatusComplete queryStatus = "complete"
10 | )
11 |
--------------------------------------------------------------------------------
/hub/query_timing_metadata.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "github.com/turbot/steampipe/pkg/query/queryresult"
5 | "log"
6 | "sort"
7 | "sync"
8 | )
9 |
10 | const (
11 | scanMetadataBufferSize = 500
12 | )
13 |
14 | type queryTimingMetadata struct {
15 | // map of scan metadata, keyed by query id
16 | // we append to this every time a scan completes (either due to end of data, or Postgres terminating)
17 | // each array is ordered slowest to fasteyt
18 | scanMetadata map[int64][]queryresult.ScanMetadataRow
19 | // map of scan totals, keyed by query timestamp (which is a unique query identifier
20 | queryRowSummary map[int64]*queryresult.QueryRowSummary
21 |
22 | scanMetadataLock sync.RWMutex
23 | }
24 |
25 | func (m *queryTimingMetadata) addScanMetadata(queryTimestamp int64, scanMetadata queryresult.ScanMetadataRow) {
26 | metadataForQuery := m.scanMetadata[queryTimestamp]
27 |
28 | if len(metadataForQuery) < scanMetadataBufferSize {
29 | metadataForQuery = append(metadataForQuery, scanMetadata)
30 | m.scanMetadata[queryTimestamp] = metadataForQuery
31 | // sort the metadata by decreasing time
32 | sort.Slice(metadataForQuery, func(i, j int) bool {
33 | return metadataForQuery[i].DurationMs > metadataForQuery[j].DurationMs
34 | })
35 | return
36 | }
37 |
38 | // so we have the maximum number of scan metadata items - if this scan is faster than the slowest item, ignore it
39 | if scanMetadata.DurationMs < metadataForQuery[len(metadataForQuery)-1].DurationMs {
40 | return
41 | }
42 |
43 | // add the scan metadata to the list, resort and trim to the max number of items
44 | metadataForQuery = append(metadataForQuery, scanMetadata)
45 | sort.Slice(metadataForQuery, func(i, j int) bool {
46 | return metadataForQuery[i].DurationMs > metadataForQuery[j].DurationMs
47 |
48 | })
49 | m.scanMetadata[queryTimestamp] = metadataForQuery[:scanMetadataBufferSize]
50 | }
51 |
52 | func newQueryTimingMetadata() *queryTimingMetadata {
53 | return &queryTimingMetadata{
54 | scanMetadata: make(map[int64][]queryresult.ScanMetadataRow),
55 | queryRowSummary: make(map[int64]*queryresult.QueryRowSummary),
56 | }
57 | }
58 |
59 | func (m *queryTimingMetadata) removeStaleScanMetadata(currentTimestamp int64) {
60 | log.Printf("[INFO] removeStaleScanMetadata for current query timestamp %d", currentTimestamp)
61 |
62 | // clear all query metadata for previous queries
63 | for existingTimestamp := range m.scanMetadata {
64 | if existingTimestamp != currentTimestamp {
65 | log.Printf("[INFO] REMOVING timestamp %d", existingTimestamp)
66 | delete(m.scanMetadata, existingTimestamp)
67 | delete(m.queryRowSummary, existingTimestamp)
68 | }
69 | }
70 | }
71 |
72 | func (m *queryTimingMetadata) clearSummary() {
73 | m.scanMetadataLock.Lock()
74 | defer m.scanMetadataLock.Unlock()
75 | m.queryRowSummary = make(map[int64]*queryresult.QueryRowSummary)
76 | }
77 |
78 | func (m *queryTimingMetadata) clearScanMetadata() {
79 | m.scanMetadataLock.Lock()
80 | defer m.scanMetadataLock.Unlock()
81 | m.scanMetadata = make(map[int64][]queryresult.ScanMetadataRow)
82 | }
83 |
--------------------------------------------------------------------------------
/hub/scan_iterator.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "context"
5 | "log"
6 |
7 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc"
8 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
9 | "github.com/turbot/steampipe-plugin-sdk/v5/row_stream"
10 | "github.com/turbot/steampipe-plugin-sdk/v5/telemetry"
11 | "github.com/turbot/steampipe/pkg/query/queryresult"
12 | "github.com/turbot/steampipe/pkg/steampipeconfig"
13 | "golang.org/x/exp/maps"
14 | )
15 |
16 | // TODO think about when we reset status from complete to ready
17 |
18 | type scanIterator struct {
19 | scanIteratorBase
20 | connectionPlugin *steampipeconfig.ConnectionPlugin
21 | hub *RemoteHub
22 | }
23 |
24 | func newScanIterator(hub Hub, connectionPlugin *steampipeconfig.ConnectionPlugin, connectionName, table string, connectionLimitMap map[string]int64, qualMap map[string]*proto.Quals, columns []string, limit int64, sortOrder []*proto.SortColumn, queryTimestamp int64, traceCtx *telemetry.TraceCtx) *scanIterator {
25 | return &scanIterator{
26 | scanIteratorBase: newBaseScanIterator(hub, connectionName, table, connectionLimitMap, qualMap, columns, limit, sortOrder, queryTimestamp, traceCtx),
27 | connectionPlugin: connectionPlugin,
28 | }
29 | }
30 |
31 | // GetPluginName implements Iterator
32 | func (i *scanIterator) GetPluginName() string {
33 | return i.connectionPlugin.PluginName
34 | }
35 |
36 | // execute implements executor
37 | func (i *scanIterator) execute(req *proto.ExecuteRequest) (row_stream.Receiver, context.Context, context.CancelFunc, error) {
38 | log.Printf("[INFO] StartScan for table: %s, cache enabled: %v, iterator %p, %d quals (%s)", i.table, req.CacheEnabled, i, len(i.queryContext.Quals), i.callId)
39 | stream, ctx, cancel, err := i.connectionPlugin.PluginClient.Execute(req)
40 | // format GRPC errors
41 | err = grpc.HandleGrpcError(err, i.connectionPlugin.PluginName, "Execute")
42 | if err != nil {
43 | return nil, nil, nil, err
44 | }
45 | return stream, ctx, cancel, nil
46 | }
47 |
48 | // GetScanMetadata returns the scan metadata for this iterator
49 | // note: if this is an aggregator query, we will have a scan metadata for each connection
50 | // we need to combine them into a single scan metadata object
51 | func (i *scanIterator) GetScanMetadata() []queryresult.ScanMetadataRow {
52 | // if we have scan metadata, return it
53 | if len(i.scanMetadata) > 0 {
54 | return maps.Values(i.scanMetadata)
55 | }
56 |
57 | // if there is no scan metadata, add an empty one
58 | var res []queryresult.ScanMetadataRow
59 | for connection := range i.connectionLimitMap {
60 | res = append(res, i.newScanMetadata(connection, nil))
61 | }
62 | return res
63 | }
64 |
--------------------------------------------------------------------------------
/hub/scan_iterator_base.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "github.com/golang/protobuf/ptypes"
8 | "github.com/turbot/go-kit/helpers"
9 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc"
10 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
11 | "github.com/turbot/steampipe-plugin-sdk/v5/logging"
12 | "github.com/turbot/steampipe-plugin-sdk/v5/row_stream"
13 | "github.com/turbot/steampipe-plugin-sdk/v5/telemetry"
14 | "github.com/turbot/steampipe-postgres-fdw/types"
15 | "github.com/turbot/steampipe/pkg/query/queryresult"
16 | "google.golang.org/protobuf/reflect/protoreflect"
17 | "log"
18 | "time"
19 | )
20 |
21 | type scanIteratorBase struct {
22 | status queryStatus
23 | err error
24 | rows chan *proto.Row
25 | scanMetadata map[string]queryresult.ScanMetadataRow
26 | pluginRowStream row_stream.Receiver
27 | rel *types.Relation
28 | hub Hub
29 | table string
30 | connectionName string
31 | connectionLimitMap map[string]int64
32 | cancel context.CancelFunc
33 | traceCtx *telemetry.TraceCtx
34 | queryContext *proto.QueryContext
35 | // the query timestamp is used to uniquely identify the parent query
36 | // NOTE: all scans for the query will have the same timestamp
37 | queryTimestamp int64
38 |
39 | startTime time.Time
40 | callId string
41 | }
42 |
43 | func newBaseScanIterator(hub Hub, connectionName, table string, connectionLimitMap map[string]int64, qualMap map[string]*proto.Quals, columns []string, limit int64, sortOrder []*proto.SortColumn, queryTimestamp int64, traceCtx *telemetry.TraceCtx) scanIteratorBase {
44 | return scanIteratorBase{
45 | status: QueryStatusReady,
46 | rows: make(chan *proto.Row, rowBufferSize),
47 | scanMetadata: make(map[string]queryresult.ScanMetadataRow),
48 | hub: hub,
49 | table: table,
50 | connectionName: connectionName,
51 | connectionLimitMap: connectionLimitMap,
52 | traceCtx: traceCtx,
53 | startTime: time.Now(),
54 | queryContext: proto.NewQueryContext(columns, qualMap, limit, sortOrder),
55 | callId: grpc.BuildCallId(),
56 | queryTimestamp: queryTimestamp,
57 | }
58 | }
59 |
60 | // access functions
61 |
62 | func (i *scanIteratorBase) GetConnectionName() string {
63 | return i.connectionName
64 | }
65 |
66 | func (i *scanIteratorBase) Status() queryStatus {
67 | return i.status
68 | }
69 |
70 | func (i *scanIteratorBase) Error() error {
71 | return i.err
72 | }
73 |
74 | // Next implements Iterator
75 | // return the next row. Nil row means there are no more rows to scan.
76 | func (i *scanIteratorBase) Next() (map[string]interface{}, error) {
77 | // check the iterator state - has an error occurred
78 | if i.status == QueryStatusError {
79 | return nil, i.err
80 | }
81 |
82 | if !i.CanIterate() {
83 | // this is a bug
84 | log.Printf("[WARN] scanIteratorBase cannot iterate: connection %s, status: %s", i.GetConnectionName(), i.Status())
85 | return nil, fmt.Errorf("scanIteratorBase cannot iterate: connection %s, status: %s", i.GetConnectionName(), i.Status())
86 | }
87 |
88 | row := <-i.rows
89 |
90 | // if the row channel closed, complete the iterator state
91 | var res map[string]interface{}
92 | if row == nil {
93 | // close the span and set the status
94 | i.Close()
95 | // remove from hub running iterators
96 | i.hub.RemoveIterator(i)
97 |
98 | // if iterator is in error, return the error
99 | if i.Status() == QueryStatusError {
100 | // return error
101 | return nil, i.err
102 | }
103 | // otherwise mark iterator complete, caching result
104 | } else {
105 | // so we got a row
106 | var err error
107 | res, err = i.populateRow(row)
108 | if err != nil {
109 | return nil, err
110 | }
111 | }
112 | return res, nil
113 | }
114 |
115 | func (i *scanIteratorBase) closeSpan() {
116 | i.traceCtx.Span.End()
117 | }
118 |
119 | func (i *scanIteratorBase) Close() {
120 | // call the context cancellation function
121 | i.cancel()
122 |
123 | // set status to complete
124 | if i.status != QueryStatusError {
125 | i.status = QueryStatusComplete
126 | }
127 |
128 | i.closeSpan()
129 | }
130 |
131 | // CanIterate returns true if this iterator has results available to iterate
132 | func (i *scanIteratorBase) CanIterate() bool {
133 | switch i.status {
134 | case QueryStatusError, QueryStatusReady, QueryStatusComplete:
135 | // scan iterator must be explicitly started - so we cannot iterate is in ready state
136 | return false
137 | default:
138 | return true
139 | }
140 |
141 | }
142 |
143 | func (i *scanIteratorBase) GetTraceContext() *telemetry.TraceCtx {
144 | return i.traceCtx
145 | }
146 |
147 | func (i *scanIteratorBase) GetQueryContext() *proto.QueryContext {
148 | return i.queryContext
149 | }
150 |
151 | func (i *scanIteratorBase) GetCallId() string {
152 | return i.callId
153 | }
154 |
155 | func (i *scanIteratorBase) GetConnectionLimitMap() map[string]int64 {
156 | return i.connectionLimitMap
157 | }
158 |
159 | func (i *scanIteratorBase) SetError(err error) {
160 | i.err = err
161 | }
162 |
163 | func (i *scanIteratorBase) GetTable() string {
164 | return i.table
165 | }
166 |
167 | func (i *scanIteratorBase) Start(executor pluginExecutor) error {
168 | req := i.newExecuteRequest()
169 |
170 | // create context anc cancel function
171 |
172 | stream, ctx, cancel, err := executor.execute(req)
173 | if err != nil {
174 | return err
175 | }
176 |
177 | logging.LogTime("[hub] start")
178 | i.status = QueryStatusStarted
179 | i.pluginRowStream = stream
180 | i.cancel = cancel
181 |
182 | // read the results - this will loop until it hits an error or the stream is closed
183 | go i.readThread(ctx)
184 | return nil
185 | }
186 |
187 | // GetPluginName implements Iterator (this should be implemented by the concrete iterator)
188 | func (i *scanIteratorBase) GetPluginName() string {
189 | panic("method GetPluginName not implemented")
190 | }
191 |
192 | func (i *scanIteratorBase) GetQueryTimestamp() int64 {
193 | return i.queryTimestamp
194 | }
195 |
196 | func (i *scanIteratorBase) GetScanMetadata() []queryresult.ScanMetadataRow {
197 | return nil
198 | }
199 |
200 | func (i *scanIteratorBase) newExecuteRequest() *proto.ExecuteRequest {
201 | req := &proto.ExecuteRequest{
202 | Table: i.table,
203 | QueryContext: i.queryContext,
204 | CallId: i.callId,
205 | // pass connection name - used for aggregators
206 | Connection: i.connectionName,
207 | TraceContext: grpc.CreateCarrierFromContext(i.traceCtx.Ctx),
208 | ExecuteConnectionData: make(map[string]*proto.ExecuteConnectionData),
209 | }
210 |
211 | log.Printf("[INFO] build executeConnectionData map: hub: %p, i.connectionLimitMap: %v", i.hub, i.connectionLimitMap)
212 | // build executeConnectionData map
213 | for connectionName, limit := range i.connectionLimitMap {
214 | data := &proto.ExecuteConnectionData{}
215 | if limit != -1 {
216 | data.Limit = &proto.NullableInt{Value: limit}
217 | }
218 | data.CacheTtl = int64(i.hub.cacheTTL(connectionName).Seconds())
219 | data.CacheEnabled = i.hub.cacheEnabled(connectionName)
220 | req.ExecuteConnectionData[connectionName] = data
221 |
222 | }
223 | log.Printf("[INFO] build executeConnectionData map returning %v", req)
224 |
225 | return req
226 | }
227 | func (i *scanIteratorBase) populateRow(row *proto.Row) (map[string]interface{}, error) {
228 | res := make(map[string]interface{}, len(row.Columns))
229 | for columnName, column := range row.Columns {
230 | // extract column value as interface from protobuf message
231 | var val interface{}
232 | if bytes := column.GetJsonValue(); bytes != nil {
233 | if err := json.Unmarshal(bytes, &val); err != nil {
234 | err = fmt.Errorf("failed to populate column '%s': %v", columnName, err)
235 | i.setError(err)
236 | return nil, err
237 | }
238 | } else if timestamp := column.GetTimestampValue(); timestamp != nil {
239 | // convert from protobuf timestamp to a RFC 3339 time string
240 | val = ptypes.TimestampString(timestamp)
241 | } else {
242 | // get the first field descriptor and value (we only expect column message to contain a single field
243 | column.ProtoReflect().Range(func(descriptor protoreflect.FieldDescriptor, v protoreflect.Value) bool {
244 | // is this value null?
245 | if descriptor.JSONName() == "nullValue" {
246 | val = nil
247 | } else {
248 | val = v.Interface()
249 | }
250 | return false
251 | })
252 | }
253 | res[columnName] = val
254 | }
255 | return res, nil
256 | }
257 |
258 | // readThread is run in a goroutine and reads results from the GRPC stream until either:
259 | // - the stream is complete
260 | // - there stream returns an error
261 | // there is a signal on the cancel channel
262 | func (i *scanIteratorBase) readThread(ctx context.Context) {
263 | // if the iterator is not in a started state, skip
264 | // (this can happen if postgres cancels the scan before receiving any results)
265 | if i.status == QueryStatusStarted {
266 | // keep calling readPluginResult until it returns false
267 | for i.readPluginResult(ctx) {
268 | }
269 | }
270 |
271 | // now we are done
272 | close(i.rows)
273 | }
274 |
275 | func (i *scanIteratorBase) readPluginResult(ctx context.Context) bool {
276 | continueReading := true
277 | var rcvChan = make(chan *proto.ExecuteResponse)
278 | var errChan = make(chan error)
279 |
280 | // put the stream receive code into a goroutine to ensure cancellation is possible in case of a plugin hang
281 | go func() {
282 | defer func() {
283 | if r := recover(); r != nil {
284 | errChan <- helpers.ToError(r)
285 | }
286 | }()
287 | rowResult, err := i.pluginRowStream.Recv()
288 |
289 | if err != nil {
290 | errChan <- err
291 | } else {
292 | rcvChan <- rowResult
293 | }
294 | }()
295 |
296 | select {
297 | // check for cancellation first - this takes precedence over reading the grpc stream
298 | case <-ctx.Done():
299 | log.Printf("[TRACE] readPluginResult context is cancelled (%p)", i)
300 | continueReading = false
301 | case rowResult := <-rcvChan:
302 | if rowResult == nil {
303 | log.Printf("[TRACE] readPluginResult nil row received - stop reading (%p) (%s)", i, i.callId)
304 | // stop reading
305 | continueReading = false
306 | } else {
307 | // update the scan metadata for this connection (this will overwrite any existing from the previous row)
308 | i.scanMetadata[rowResult.Connection] = i.newScanMetadata(rowResult.Connection, rowResult.Metadata)
309 |
310 | // so we have a row
311 | i.rows <- rowResult.Row
312 | }
313 | case err := <-errChan:
314 | if err.Error() == "EOF" {
315 | log.Printf("[TRACE] readPluginResult EOF error received - stop reading (%p) (%s)", i, i.callId)
316 | } else {
317 | log.Printf("[WARN] stream receive error %v (%p)\n", err, i)
318 | i.setError(err)
319 | }
320 | // stop reading
321 | continueReading = false
322 | }
323 | return continueReading
324 | }
325 |
326 | func (i *scanIteratorBase) newScanMetadata(connection string, m *proto.QueryMetadata) queryresult.ScanMetadataRow {
327 | return queryresult.NewScanMetadataRow(connection, i.table, i.queryContext.Columns, i.queryContext.Quals, i.startTime, time.Since(i.startTime), i.connectionLimitMap[connection], m)
328 | }
329 |
330 | // if there is an error other than EOF, save error and set state to QueryStatusError
331 | func (i *scanIteratorBase) setError(err error) {
332 | if err != nil && err.Error() != "EOF" {
333 | i.status = QueryStatusError
334 | i.err = err
335 | }
336 | }
337 |
--------------------------------------------------------------------------------
/hub/scan_iterator_local.go:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "context"
5 | "log"
6 |
7 | "github.com/turbot/steampipe-plugin-sdk/v5/anywhere"
8 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
9 | "github.com/turbot/steampipe-plugin-sdk/v5/row_stream"
10 | "github.com/turbot/steampipe-plugin-sdk/v5/telemetry"
11 | )
12 |
13 | type scanIteratorLocal struct {
14 | scanIteratorBase
15 | pluginName string
16 | }
17 |
18 | func newScanIteratorLocal(hub Hub, connectionName, table, pluginName string, connectionLimitMap map[string]int64, qualMap map[string]*proto.Quals, columns []string, limit int64, sortOrder []*proto.SortColumn, queryTimestamp int64, traceCtx *telemetry.TraceCtx) *scanIteratorLocal {
19 | return &scanIteratorLocal{
20 | scanIteratorBase: newBaseScanIterator(hub, connectionName, table, connectionLimitMap, qualMap, columns, limit, sortOrder, queryTimestamp, traceCtx),
21 | pluginName: pluginName,
22 | }
23 | }
24 |
25 | // GetPluginName implements Iterator
26 | func (i *scanIteratorLocal) GetPluginName() string {
27 | return i.pluginName
28 | }
29 |
30 | // execute implements executor
31 | func (i *scanIteratorLocal) execute(req *proto.ExecuteRequest) (row_stream.Receiver, context.Context, context.CancelFunc, error) {
32 | ctx, cancel := context.WithCancel(context.Background())
33 | // create a local stream
34 | stream := anywhere.NewLocalPluginStream(ctx)
35 |
36 | plugin := i.hub.(*HubLocal).plugin
37 | log.Printf("[INFO] StartScan for table: %s, cache enabled: %v, iterator %p, %d quals (%s)", i.table, req.CacheEnabled, i, len(i.queryContext.Quals), i.callId)
38 | plugin.CallExecuteAsync(req, stream)
39 |
40 | return stream, ctx, cancel, nil
41 | }
42 |
--------------------------------------------------------------------------------
/out/README.md:
--------------------------------------------------------------------------------
1 | # Steampipe Postgres FDW AWS
2 |
3 | This Postgres Foreign Data Wrapper (FDW) allows you to query AWS data using the [Steampipe Plugin](https://github.com/turbot/steampipe-plugin-aws).
4 |
5 | ## Prerequisites
6 | Before you can use this Postgres FDW, make sure you have the following prerequisites in place:
7 |
8 | - You need to have a Postgres database installed and running. You can download and install PostgreSQL from the [official PostgreSQL website](https://www.postgresql.org/download/).
9 |
10 | ## Configuration
11 | To set up the configuration for this PostgreSQL FDW, follow these steps:
12 |
13 | - Download the SQL and Control extension files from the latest release of this FDW.
14 |
15 | - Copy the downloaded SQL and Control files into your Postgres extensions directory and the downloaded binary file into your PostgreSQL lib directory:
16 | ```bash
17 | cp steampipe_postgres_fdw_aws--1.0.sql /path/to/your/extensions/directory
18 | cp steampipe_postgres_fdw_aws.control /path/to/your/extensions/directory
19 | cp steampipe_postgres_fdw_aws.so /path/to/your/lib/directory
20 | ```
21 |
22 | - Run the following SQL commands to create extensions and servers:
23 | ```sql
24 | DROP EXTENSION IF EXISTS steampipe_postgres_fdw_aws CASCADE;
25 | CREATE EXTENSION IF NOT EXISTS steampipe_postgres_fdw_aws;
26 | DROP SERVER IF EXISTS steampipe_aws;
27 | CREATE SERVER steampipe_aws FOREIGN DATA WRAPPER steampipe_postgres_fdw_aws;
28 | DROP SCHEMA IF EXISTS aws CASCADE;
29 | CREATE SCHEMA aws;
30 | IMPORT FOREIGN SCHEMA aws FROM SERVER steampipe_aws INTO aws;
31 | ```
32 | Once you have completed these steps, your PostgreSQL environment will be configured to work with the FDW. You can then proceed to use the FDW to query AWS data.
33 |
34 | ## Usage
35 | Please refer to the [Table Documentation](https://hub.steampipe.io/plugins/turbot/aws/tables).
36 |
--------------------------------------------------------------------------------
/prebuild.tmpl:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | /*
4 | #cgo OS_PLACEHOLDER CFLAGS: -ILIB_INTL_PLACEHOLDER -g
5 | #cgo OS_PLACEHOLDER CFLAGS: -Ifdw -ISERVER_INCLUDE_PLACEHOLDER -IINTERNAL_INCLUDE_PLACEHOLDER -g
6 | #include "postgres.h"
7 | #include "common.h"
8 | #include "fdw_helpers.h"
9 | */
10 | import "C"
11 |
12 | /*
13 | DISCLAIMER
14 | */
--------------------------------------------------------------------------------
/schema.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | /*
4 | #cgo linux LDFLAGS: -Wl,-unresolved-symbols=ignore-all
5 | #cgo darwin LDFLAGS: -Wl,-undefined,dynamic_lookup
6 | #include "fdw_helpers.h"
7 | */
8 | import "C"
9 |
10 | import (
11 | "log"
12 | "slices"
13 | "unsafe"
14 |
15 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
16 | "github.com/turbot/steampipe-postgres-fdw/sql"
17 | )
18 |
19 | func SchemaToSql(schema map[string]*proto.TableSchema, stmt *C.ImportForeignSchemaStmt, serverOid C.Oid) *C.List {
20 | var commands *C.List
21 |
22 | server := C.GetForeignServer(serverOid)
23 | if server == nil {
24 | return nil
25 | }
26 | serverName := C.GoString(server.servername)
27 | localSchema := C.GoString(stmt.local_schema)
28 |
29 | // first figure out which tables we want
30 | var tables []string
31 | // iterate over table list
32 | if stmt.table_list != nil {
33 | for it := C.list_head(stmt.table_list); it != nil; it = C.lnext(stmt.table_list, it) {
34 | var rv *C.RangeVar = C.cellGetRangeVar(it)
35 | t := C.GoString(rv.relname)
36 | tables = append(tables, t)
37 | }
38 | }
39 | log.Printf("[TRACE] SchemaToSql: tables %v\n", tables)
40 |
41 | // TODO we do not handle any options currently
42 |
43 | for table, tableSchema := range schema {
44 | if stmt.list_type == C.FDW_IMPORT_SCHEMA_LIMIT_TO {
45 | log.Printf("[TRACE] list_type is FDW_IMPORT_SCHEMA_LIMIT_TO: %v", tables)
46 |
47 | if !slices.Contains(tables, table) {
48 | log.Printf("[TRACE] Skipping table %s", table)
49 |
50 | continue
51 | }
52 | } else if stmt.list_type == C.FDW_IMPORT_SCHEMA_EXCEPT {
53 | log.Printf("[TRACE] list_type is FDW_IMPORT_SCHEMA_EXCEPT: %v", tables)
54 |
55 | if slices.Contains(tables, table) {
56 | log.Printf("[TRACE] Skipping table %s", table)
57 | continue
58 | }
59 | }
60 | log.Printf("[TRACE] Import table %s", table)
61 |
62 | sql, err := sql.GetSQLForTable(table, tableSchema, localSchema, serverName)
63 | if err != nil {
64 | FdwError(err)
65 | return nil
66 | }
67 |
68 | log.Printf("[TRACE] SQL %s", sql)
69 | commands = C.lappend(commands, unsafe.Pointer(C.CString(sql)))
70 | }
71 |
72 | return commands
73 | }
74 |
--------------------------------------------------------------------------------
/scripts/README.md:
--------------------------------------------------------------------------------
1 | This file will be created when you run make standalone.
--------------------------------------------------------------------------------
/scripts/build-linux-arm-pg14.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | list=(
6 | "steampipe-plugin-$1"
7 | )
8 |
9 | if [[ ! ${MY_PATH} ]];
10 | then
11 | MY_PATH="`dirname \"$0\"`" # relative
12 | MY_PATH="`( cd \"$MY_PATH\" && pwd )`" # absolutized and normalized
13 | fi
14 |
15 | echo $MY_PATH
16 |
17 | mkdir $MY_PATH/go-cache || true
18 | mkdir $MY_PATH/go-mod-cache || true
19 |
20 | GOCACHE=$(docker run --memory="10g" --memory-swap="16g" --rm --name sp_fdw_builder steampipe_fdw_builder:14 go env GOCACHE)
21 | MODCACHE=$(docker run --memory="10g" --memory-swap="16g" --rm --name sp_fdw_builder steampipe_fdw_builder:14 go env GOMODCACHE)
22 |
23 | for item in "${list[@]}"; do
24 | plugin_name=${item##*-}
25 |
26 | echo "Processing plugin: ${plugin_name}"
27 |
28 | # Step 1: Switch to steampipe-postgres-fdw directory
29 | cd $GITHUB_WORKSPACE || exit 1
30 |
31 | # Step 2: Run Docker commands for Postgres FDW Builder v14
32 | echo "Building Postgres FDW 14 for plugin: ${plugin_name}"
33 | docker run --memory="10g" --memory-swap="16g" --rm --name sp_fdw_builder -v "$(pwd)":/tmp/ext steampipe_fdw_builder:14 make clean
34 | docker run --memory="10g" --memory-swap="16g" --rm --name sp_fdw_builder -v "$(pwd)":/tmp/ext steampipe_fdw_builder:14 make standalone plugin="${plugin_name}"
35 |
36 | # Step 3: Check if build-Linux directory is created
37 | if [ ! -d "build-Linux" ] || [ ! -f "build-Linux/steampipe_postgres_${plugin_name}.so" ]; then
38 | echo "Error: build-Linux directory or steampipe_postgres_${plugin_name}.so not found."
39 | exit 1
40 | fi
41 |
42 | # Step 4: Move and tar for Postgres v14
43 | echo "Move and tar for Postgres v14 for plugin: ${plugin_name}"
44 | mv build-Linux steampipe_postgres_${plugin_name}.pg14.linux_arm64
45 | tar -czvf steampipe_postgres_${plugin_name}.pg14.linux_arm64.tar.gz steampipe_postgres_${plugin_name}.pg14.linux_arm64
46 |
47 | # Step 5: Check if tar.gz file is created for v14
48 | if [ ! -f "steampipe_postgres_${plugin_name}.pg14.linux_arm64.tar.gz" ]; then
49 | echo "Error: Tar file for Postgres v14 not created."
50 | exit 1
51 | fi
52 |
53 | echo "Processing completed for plugin: ${plugin_name}"
54 | done
55 |
56 | echo "All plugins processed successfully."
57 |
--------------------------------------------------------------------------------
/scripts/build-linux-arm-pg15.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | list=(
6 | "steampipe-plugin-$1"
7 | )
8 |
9 | if [[ ! ${MY_PATH} ]];
10 | then
11 | MY_PATH="`dirname \"$0\"`" # relative
12 | MY_PATH="`( cd \"$MY_PATH\" && pwd )`" # absolutized and normalized
13 | fi
14 |
15 | echo $MY_PATH
16 |
17 | mkdir $MY_PATH/go-cache || true
18 | mkdir $MY_PATH/go-mod-cache || true
19 |
20 | GOCACHE=$(docker run --memory="10g" --memory-swap="16g" --rm --name sp_fdw_builder steampipe_fdw_builder:15 go env GOCACHE)
21 | MODCACHE=$(docker run --memory="10g" --memory-swap="16g" --rm --name sp_fdw_builder steampipe_fdw_builder:15 go env GOMODCACHE)
22 |
23 | for item in "${list[@]}"; do
24 | plugin_name=${item##*-}
25 |
26 | echo "Processing plugin: ${plugin_name}"
27 |
28 | # Step 1: Switch to steampipe-postgres-fdw directory
29 | cd $GITHUB_WORKSPACE || exit 1
30 |
31 | # Step 2: Run Docker commands for Postgres FDW Builder v15
32 | echo "Building Postgres FDW 15 for plugin: ${plugin_name}"
33 | docker run --memory="10g" --memory-swap="16g" --rm --name sp_fdw_builder -v "$(pwd)":/tmp/ext steampipe_fdw_builder:15 make clean
34 | docker run --memory="10g" --memory-swap="16g" --rm --name sp_fdw_builder -v "$(pwd)":/tmp/ext steampipe_fdw_builder:15 make standalone plugin="${plugin_name}"
35 |
36 | # Step 3: Check if build-Linux directory is created
37 | if [ ! -d "build-Linux" ] || [ ! -f "build-Linux/steampipe_postgres_${plugin_name}.so" ]; then
38 | echo "Error: build-Linux directory or steampipe_postgres_${plugin_name}.so not found."
39 | exit 1
40 | fi
41 |
42 | # Step 4: Move and tar for Postgres v15
43 | echo "Move and tar for Postgres v15 for plugin: ${plugin_name}"
44 | mv build-Linux steampipe_postgres_${plugin_name}.pg15.linux_arm64
45 | tar -czvf steampipe_postgres_${plugin_name}.pg15.linux_arm64.tar.gz steampipe_postgres_${plugin_name}.pg15.linux_arm64
46 |
47 | # Step 5: Check if tar.gz file is created for v15
48 | if [ ! -f "steampipe_postgres_${plugin_name}.pg15.linux_arm64.tar.gz" ]; then
49 | echo "Error: Tar file for Postgres v15 not created."
50 | exit 1
51 | fi
52 |
53 | echo "Processing completed for plugin: ${plugin_name}"
54 | done
55 |
56 | echo "All plugins processed successfully."
57 |
--------------------------------------------------------------------------------
/scripts/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This sql script will be created when you run make standalone.
--------------------------------------------------------------------------------
/scripts/script_to_build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This script is used to build FDW binaries for Linux ARM. This script is used in
4 | # the build-linux-arm job in Build Draft Release workflow to ssh into the ec2
5 | # instance and build the binaries.
6 |
7 | #function that makes the script exit, if any command fails
8 | exit_if_failed () {
9 | if [ $? -ne 0 ]
10 | then
11 | exit 1
12 | fi
13 | }
14 |
15 | echo "Check arch and export GOROOT & GOPATH"
16 | uname -m
17 | export GOROOT=/usr/local/go
18 | export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
19 | echo ""
20 |
21 | echo "Try an existing file"
22 | cat test.txt
23 | exit_if_failed
24 | echo ""
25 |
26 | echo "Check go version"
27 | go version
28 | exit_if_failed
29 | echo ""
30 |
31 | echo "Checkout to cloned fdw repo"
32 | cd steampipe-postgres-fdw
33 | pwd
34 | echo ""
35 |
36 | echo "git reset"
37 | git reset
38 | exit_if_failed
39 | echo ""
40 |
41 | echo "git restore all changed files(if any)"
42 | git restore .
43 | exit_if_failed
44 | echo ""
45 |
46 | echo "git fetch"
47 | git fetch
48 | exit_if_failed
49 | echo ""
50 |
51 | echo "git pull origin main"
52 | git checkout main
53 | git pull origin main
54 | exit_if_failed
55 | echo ""
56 |
57 |
58 | echo "git checkout "
59 | input=$1
60 | echo $input
61 | git checkout $input
62 | exit_if_failed
63 | git branch --list
64 | exit_if_failed
65 | echo ""
66 |
67 | echo "Remove existing build-Linux dir"
68 | rm -rf build-Linux
69 | exit_if_failed
70 | echo ""
71 |
72 | echo "Run build"
73 | make
74 | exit_if_failed
75 | echo ""
76 |
77 | echo "Check if binary is created"
78 | file build-Linux/steampipe_postgres_fdw.so
79 | exit_if_failed
80 | echo ""
81 |
82 | echo "Hallelujah!"
83 | exit 0
--------------------------------------------------------------------------------
/scripts/steampipe_postgres_installer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | main() {
6 | # ANSI escape code variables
7 | BOLD=$(tput bold)
8 | NORMAL=$(tput sgr0)
9 |
10 | if ! command -v tar >/dev/null; then
11 | echo "Error: 'tar' is required." 1>&2
12 | exit 1
13 | fi
14 |
15 | OS=$(uname -s)
16 | if [ "$OS" = "Windows_NT" ]; then
17 | echo "Error: Windows is not supported yet." 1>&2
18 | exit 1
19 | else
20 | UNAME_SM=$(uname -sm)
21 | case "$UNAME_SM" in
22 | "Darwin x86_64") target="darwin_amd64.tar.gz" ;;
23 | "Darwin arm64") target="darwin_arm64.tar.gz" ;;
24 | "Linux x86_64") target="linux_amd64.tar.gz" ;;
25 | "Linux aarch64") target="linux_arm64.tar.gz" ;;
26 | *) echo "Error: '$UNAME_SM' is not supported yet." 1>&2; exit 1 ;;
27 | esac
28 | fi
29 |
30 | # Check if plugin is provided as an argument
31 | if [ $# -eq 0 ] || [ -z "$1" ]; then
32 | printf "Enter the plugin name: "
33 | read plugin
34 | else
35 | plugin=$1
36 | fi
37 |
38 | # Check if version is provided as an argument
39 | if [ $# -lt 2 ] || [ -z "$2" ]; then
40 | printf "Enter the version (latest): "
41 | read version
42 | version=${version:-latest} # Default to 'latest' if input is empty
43 | else
44 | version=$2
45 | fi
46 |
47 | # Locate the PostgreSQL installation directory and version
48 | if command -v pg_config >/dev/null; then
49 | PG_CONFIG=$(command -v pg_config)
50 | fi
51 | if [ -z "$PG_CONFIG" ]; then
52 | echo "Warning: 'pg_config' was not found in your PATH."
53 | printf "Please enter the full path to your PostgreSQL installation directory (e.g., /usr/lib/postgresql/14): "
54 | read PG_DIR
55 | PG_CONFIG="${PG_DIR%/}/bin/pg_config"
56 |
57 | if [ ! -x "$PG_CONFIG" ]; then
58 | echo "Error: 'pg_config' could not be found in the provided directory." 1>&2
59 | exit 1
60 | fi
61 | fi
62 |
63 | # Extract the major version number from the PostgreSQL version and PG_DIR
64 | get_postgresql_details $plugin
65 | echo ""
66 |
67 | # Prompt the user to confirm the installation of the FDW for the detected version
68 | echo "Proceed with installing Steampipe PostgreSQL FDW for version $PG_VERSION at $PG_DIR?"
69 | echo "- Press 'y' to continue with the current version."
70 | printf -- "- Press 'n' to customize your PostgreSQL installation directory and select a different version. (Y/n): "
71 | read REPLY
72 |
73 | echo
74 | if [ "$REPLY" = "n" ] || [ "$REPLY" = "N" ]; then
75 | echo ""
76 | printf "Please enter the full path to your PostgreSQL installation directory (e.g., /usr/lib/postgresql/14): "
77 | read PG_DIR
78 |
79 | # Check if the input is empty
80 | if [ -z "$PG_DIR" ]; then
81 | echo ""
82 | echo "Error: No path entered. Exiting script." 1>&2
83 | exit 1
84 | fi
85 |
86 | PG_CONFIG="${PG_DIR%/}/bin/pg_config"
87 |
88 | if [ ! -x "$PG_CONFIG" ]; then
89 | echo ""
90 | echo "Error: 'pg_config' could not be found in the provided directory." 1>&2
91 | exit 1
92 | fi
93 |
94 | # Extract the major version number from the PostgreSQL version and PG_DIR
95 | get_postgresql_details $plugin
96 | fi
97 |
98 | asset_name="steampipe_postgres_${plugin}.pg${PG_VERSION}.${target}"
99 |
100 | # Generate the URI for the FDW
101 | if [ "$version" = "latest" ]; then
102 | uri="https://github.com/turbot/steampipe-plugin-${plugin}/releases/latest/download/${asset_name}"
103 | else
104 | uri="https://github.com/turbot/steampipe-plugin-${plugin}/releases/download/${version}/${asset_name}"
105 | fi
106 |
107 | echo ""
108 | echo "Downloading ${asset_name}..."
109 | if ! curl --fail --location --progress-bar --output ${asset_name} "$uri"; then
110 | echo "Could not find version $version"
111 | exit 1
112 | fi
113 |
114 | # If the .gz file is expected to contain a tar archive then extract it
115 | tar -xzvf $asset_name
116 |
117 | # Remove the downloaded tar.gz file
118 | rm -f $asset_name
119 |
120 | echo ""
121 | echo "Download and extraction completed."
122 | echo ""
123 | echo "Installing steampipe_postgres_${plugin} in ${BOLD}$PG_DIR${NORMAL}..."
124 | echo ""
125 | # Get the name of the extracted directory
126 | ext_dir=$(echo $asset_name | sed 's/\.tar\.gz$//')
127 | cd $ext_dir
128 |
129 | # Get directories from pg_config
130 | LIBDIR=$($PG_CONFIG --pkglibdir)
131 | EXTDIR=$($PG_CONFIG --sharedir)/extension/
132 |
133 | # Copy the files to the PostgreSQL installation directory
134 | cp steampipe_postgres_${plugin}.so "$LIBDIR"
135 | cp steampipe_postgres_${plugin}.control "$EXTDIR"
136 | cp steampipe_postgres_${plugin}--1.0.sql "$EXTDIR"
137 |
138 | # Check if the files were copied correctly
139 | if [ $? -eq 0 ]; then
140 | echo "Successfully installed steampipe_postgres_${plugin} extension!"
141 | echo ""
142 | echo "Files have been copied to:"
143 | echo "- Library directory: ${LIBDIR}"
144 | echo "- Extension directory: ${EXTDIR}"
145 | cd ../
146 | rm -rf $ext_dir
147 | else
148 | echo "Failed to install steampipe_postgres_${plugin} extension. Please check permissions and try again."
149 | exit 1
150 | fi
151 | }
152 |
153 | # Extract the PostgreSQL version and directory
154 | get_postgresql_details() {
155 | PG_VERSION_FULL=$($PG_CONFIG --version)
156 | PG_VERSION=$(echo $PG_VERSION_FULL | awk '{print $2}' | awk -F '.' '{print $1}')
157 | PG_DIR=$($PG_CONFIG --bindir)
158 | PG_DIR=${PG_DIR%/bin}
159 | PLUGIN=$1
160 |
161 | echo ""
162 | echo "Discovered:"
163 | echo "- PostgreSQL version: ${BOLD}$PG_VERSION${NORMAL}"
164 | echo "- PostgreSQL location: ${BOLD}$PG_DIR${NORMAL}"
165 | echo "- Operating system: ${BOLD}$(uname -s)${NORMAL}"
166 | echo "- System architecture: ${BOLD}$(uname -m)${NORMAL}"
167 | echo ""
168 | check_postgresql_version
169 | echo "Based on the above, ${BOLD}steampipe_postgres_${PLUGIN}.pg${PG_VERSION}.${target}${NORMAL} will be downloaded, extracted and installed at: ${BOLD}$PG_DIR${NORMAL}"
170 | }
171 |
172 | check_postgresql_version(){
173 | # Check if the PostgreSQL version is supported(either 14 or 15)
174 | if [ "$PG_VERSION" != "14" ] && [ "$PG_VERSION" != "15" ]; then
175 | echo "Error: PostgreSQL version $PG_VERSION is not supported. Only versions 14 and 15 are supported." >&2
176 | return 1
177 | fi
178 | }
179 |
180 | # Call the main function to run the script
181 | main "$@"
182 |
--------------------------------------------------------------------------------
/scripts/upload_arm_asset.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This script uploads the created binary to the draft release candidate.
4 | # This is called from make release.
5 |
6 | ARCH=$(uname -m)
7 | # exit if the architecture is not arm64(darwin) or aarch64(linux)
8 | if [[ "$ARCH" != "arm64" ]] && [[ "$ARCH" != "aarch64" ]]; then
9 | echo "Not an ARM64 system"
10 | exit
11 | fi
12 |
13 | # Must have these commands for the script to run
14 | declare -a required_commands=("gh" "gzip" "postgres")
15 |
16 | for required_command in "${required_commands[@]}"
17 | do
18 | if [[ $(command -v $required_command | head -c1 | wc -c) -eq 0 ]]; then
19 | echo "$required_command is required for this script to run."
20 | exit -1
21 | fi
22 | done
23 |
24 | # Zip, rename and upload the binary
25 | gzip build-Darwin/steampipe_postgres_fdw.so
26 | mv build-Darwin/steampipe_postgres_fdw.so.gz build-Darwin/steampipe_postgres_fdw.so.darwin_arm64.gz
27 | gh release upload $1 build-Darwin/steampipe_postgres_fdw.so.darwin_arm64.gz
28 |
--------------------------------------------------------------------------------
/settings/keys.go:
--------------------------------------------------------------------------------
1 | package settings
2 |
3 | type HubSettingKey string
4 |
5 | const (
6 | SettingKeyCacheEnabled HubSettingKey = "cache"
7 | SettingKeyCacheTtlOverride HubSettingKey = "cache_ttl"
8 | SettingKeyCacheClearTimeOverride HubSettingKey = "cache_clear_time"
9 | SettingKeyConnectionCacheClear HubSettingKey = "connection_cache_clear"
10 | )
11 |
--------------------------------------------------------------------------------
/settings/setter_func.go:
--------------------------------------------------------------------------------
1 | package settings
2 |
3 | import (
4 | "encoding/json"
5 | "log"
6 | "time"
7 | )
8 |
9 | func (s *HubCacheSettings) SetEnabled(jsonValue string) error {
10 | log.Printf("[TRACE] SetEnabled %s", jsonValue)
11 | var enable bool
12 | if err := json.Unmarshal([]byte(jsonValue), &enable); err != nil {
13 | return err
14 | }
15 | s.ClientCacheEnabled = &enable
16 | return nil
17 | }
18 |
19 | func (s *HubCacheSettings) SetTtl(jsonValue string) error {
20 | log.Printf("[TRACE] SetTtl %s", jsonValue)
21 | var enable int
22 | if err := json.Unmarshal([]byte(jsonValue), &enable); err != nil {
23 | return err
24 | }
25 | ttl := time.Duration(enable) * time.Second
26 | s.Ttl = &ttl
27 | return nil
28 | }
29 |
30 | func (s *HubCacheSettings) SetClearTime(_ string) error {
31 | log.Printf("[TRACE] SetClearTime")
32 | s.ClearTime = time.Now()
33 | return nil
34 | }
35 |
--------------------------------------------------------------------------------
/settings/settings.go:
--------------------------------------------------------------------------------
1 | package settings
2 |
3 | import (
4 | "log"
5 | "time"
6 | )
7 |
8 | type setterFunc func(string) error
9 |
10 | type HubCacheSettings struct {
11 | ServerCacheEnabled bool
12 | ClientCacheEnabled *bool
13 | Ttl *time.Duration
14 | ClearTime time.Time
15 |
16 | // a map of handler function which map settings key to setter functions
17 | // for individual properties
18 | setters map[HubSettingKey]setterFunc
19 | }
20 |
21 | func NewCacheSettings(clearConnectionCache func(string) error, serverCacheEnabled bool) *HubCacheSettings {
22 | hs := &HubCacheSettings{
23 | ServerCacheEnabled: serverCacheEnabled,
24 | }
25 | hs.setters = map[HubSettingKey]setterFunc{
26 | SettingKeyCacheEnabled: hs.SetEnabled,
27 | SettingKeyCacheTtlOverride: hs.SetTtl,
28 | SettingKeyCacheClearTimeOverride: hs.SetClearTime,
29 | SettingKeyConnectionCacheClear: clearConnectionCache,
30 | }
31 | return hs
32 | }
33 |
34 | func (s *HubCacheSettings) Apply(key string, jsonValue string) error {
35 | if applySetting, found := s.setters[HubSettingKey(key)]; found {
36 | return applySetting(jsonValue)
37 | }
38 | log.Println("[WARN] trying to apply unknown setting:", key, "=>", jsonValue)
39 | return nil
40 | }
41 |
--------------------------------------------------------------------------------
/sql/sql.go:
--------------------------------------------------------------------------------
1 | package sql
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
8 | "github.com/turbot/steampipe/pkg/db/db_common"
9 | )
10 |
11 | func GetSQLForTable(table string, tableSchema *proto.TableSchema, localSchema string, serverName string) (string, error) {
12 | // escape everything
13 | serverName = db_common.PgEscapeName(serverName)
14 | localSchema = db_common.PgEscapeName(localSchema)
15 | escapedTableName := db_common.PgEscapeName(table)
16 | // we must escape differently for the option
17 | escapedTableString := db_common.PgEscapeString(table)
18 |
19 | var columnsString []string
20 | for i, c := range tableSchema.Columns {
21 | column := db_common.PgEscapeName(c.Name)
22 | t, err := sqlTypeForColumnType(c.Type)
23 | if err != nil {
24 | return "", err
25 | }
26 | trailing := ","
27 | if i+1 == len(tableSchema.Columns) {
28 | trailing = ""
29 | }
30 |
31 | columnsString = append(columnsString, fmt.Sprintf("%s %s%s", column, t, trailing))
32 | }
33 |
34 | sql := fmt.Sprintf(`CREATE FOREIGN TABLE IF NOT EXISTS %s.%s
35 | (
36 | %s
37 | )
38 | SERVER %s OPTIONS (table %s)`,
39 | localSchema,
40 | escapedTableName,
41 | strings.Join(columnsString, "\n "),
42 | serverName,
43 | escapedTableString)
44 |
45 | return sql, nil
46 | }
47 |
48 | func sqlTypeForColumnType(columnType proto.ColumnType) (string, error) {
49 | switch columnType {
50 | case proto.ColumnType_BOOL:
51 | return "bool", nil
52 | case proto.ColumnType_INT:
53 | return "bigint", nil
54 | case proto.ColumnType_DOUBLE:
55 | return "double precision", nil
56 | case proto.ColumnType_STRING:
57 | return "text", nil
58 | case proto.ColumnType_IPADDR, proto.ColumnType_INET:
59 | return "inet", nil
60 | case proto.ColumnType_CIDR:
61 | return "cidr", nil
62 | case proto.ColumnType_JSON:
63 | return "jsonb", nil
64 | case proto.ColumnType_DATETIME, proto.ColumnType_TIMESTAMP:
65 | return "timestamptz", nil
66 | case proto.ColumnType_LTREE:
67 | return "ltree", nil
68 | }
69 | return "", fmt.Errorf("unsupported column type %v", columnType)
70 |
71 | }
72 |
--------------------------------------------------------------------------------
/sql/sql_test.go:
--------------------------------------------------------------------------------
1 | package sql
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
7 | )
8 |
9 | type getSQLForTableTest struct {
10 | table string
11 | tableSchema *proto.TableSchema
12 | localSchema string
13 | serverName string
14 | expected string
15 | }
16 |
17 | var testCasesgetSQLForTable = map[string]getSQLForTableTest{
18 | "no descriptions": {
19 | table: "t1",
20 | tableSchema: &proto.TableSchema{
21 | Columns: []*proto.ColumnDefinition{
22 | {
23 | Name: "c1",
24 | Type: proto.ColumnType_STRING,
25 | },
26 | {
27 | Name: "c2",
28 | Type: proto.ColumnType_STRING,
29 | },
30 | },
31 | },
32 | localSchema: "aws",
33 | serverName: "steampipe",
34 | expected: `create foreign table "aws"."t1"
35 | (
36 | "c1" text,
37 | "c2" text
38 | )
39 | server "steampipe" OPTIONS (table $steampipe_escape$t1$steampipe_escape$)`},
40 | "quotes in names": {
41 | table: "t1",
42 | tableSchema: &proto.TableSchema{
43 | Columns: []*proto.ColumnDefinition{
44 | {
45 | Name: `"c1"`,
46 | Type: proto.ColumnType_STRING,
47 | },
48 | {
49 | Name: `c2 "is" partially quoted`,
50 | Type: proto.ColumnType_STRING,
51 | },
52 | },
53 | },
54 | localSchema: "aws",
55 | serverName: "steampipe",
56 | expected: `create foreign table "aws"."t1"
57 | (
58 | """c1""" text,
59 | "c2 ""is"" partially quoted" text
60 | )
61 | server "steampipe" OPTIONS (table $steampipe_escape$t1$steampipe_escape$)`},
62 | }
63 |
64 | func TestGetSQLForTable(t *testing.T) {
65 | for name, test := range testCasesgetSQLForTable {
66 |
67 | result, err := GetSQLForTable(test.table, test.tableSchema, test.localSchema, test.serverName)
68 | if err != nil {
69 | if test.expected != "ERROR" {
70 | t.Errorf(`Test: '%s'' FAILED : unexpected error %v`, name, err)
71 | }
72 | continue
73 | }
74 |
75 | if test.expected != result {
76 | t.Errorf("Test: '%s' FAILED : expected \n%s\ngot\n%s", name, test.expected, result)
77 | }
78 | }
79 | }
80 |
81 | /*
82 | test cases
83 | invalid table
84 | invalid columns
85 | list error
86 | hydrate error
87 |
88 | */
89 |
--------------------------------------------------------------------------------
/standalone_setup/install_standalone.sh:
--------------------------------------------------------------------------------
1 | echo copied 3 files
2 | cp /Users/kai/Dev/github/turbot/steampipe-postgres-fdw/build-Darwin/steampipe_postgres_fdw_aws--1.0.sql ~/.steampipe/db/14.2.0/postgres/share/postgresql/extension/; \
3 | cp /Users/kai/Dev/github/turbot/steampipe-postgres-fdw/build-Darwin/steampipe_postgres_fdw_aws.control ~/.steampipe/db/14.2.0/postgres/share/postgresql/extension/; \
4 | cp /Users/kai/Dev/github/turbot/steampipe-postgres-fdw/build-Darwin/steampipe_postgres_fdw_aws.so ~/.steampipe/db/14.2.0/postgres/lib/postgresql/; \
5 | echo copied 3 files
--------------------------------------------------------------------------------
/standalone_setup/setup.sql:
--------------------------------------------------------------------------------
1 | DROP EXTENSION IF EXISTS steampipe_postgres_fdw_aws CASCADE;
2 | CREATE EXTENSION IF NOT EXISTS steampipe_postgres_fdw_aws;
3 | DROP SERVER IF EXISTS steampipe_aws;
4 | CREATE SERVER steampipe_aws FOREIGN DATA WRAPPER steampipe_postgres_fdw_aws
5 | DROP SCHEMA IF EXISTS aws CASCADE;
6 | CREATE SCHEMA aws;
7 | COMMENT ON SCHEMA aws IS 'steampipe aws fdw';
8 | GRANT USAGE ON SCHEMA aws TO steampipe_users;
9 | GRANT SELECT ON ALL TABLES IN SCHEMA aws TO steampipe_users;
10 | IMPORT FOREIGN SCHEMA aws FROM SERVER steampipe_aws INTO aws OPTIONS(config 'profile="morales-aaa"');
11 |
--------------------------------------------------------------------------------
/templates/fdw/fdw_handlers.h.tmpl:
--------------------------------------------------------------------------------
1 | // Generated by cgo from fdw.go. Included here so our functions are
2 | // defined and available.
3 | #include "fmgr.h"
4 |
5 | static void fdwGetForeignRelSize(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid);
6 | static void fdwGetForeignPaths(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid);
7 | static ForeignScan *fdwGetForeignPlan(
8 | PlannerInfo *root,
9 | RelOptInfo *baserel,
10 | Oid foreigntableid,
11 | ForeignPath *best_path,
12 | List *tlist,
13 | List *scan_clauses,
14 | Plan *outer_plan
15 | );
16 |
17 | // Define our handling functions with Postgres, following the V1 protocol.
18 | PG_FUNCTION_INFO_V1(steampipe_{{.Plugin}}_fdw_handler);
19 | PG_FUNCTION_INFO_V1(steampipe_{{.Plugin}}_fdw_validator);
20 |
21 |
22 | Datum steampipe_{{.Plugin}}_fdw_handler(PG_FUNCTION_ARGS) {
23 | FdwRoutine *fdw_routine = makeNode(FdwRoutine);
24 | fdw_routine->GetForeignRelSize = fdwGetForeignRelSize;
25 | fdw_routine->GetForeignPaths = fdwGetForeignPaths;
26 | fdw_routine->GetForeignPlan = fdwGetForeignPlan;
27 | fdw_routine->ExplainForeignScan = goFdwExplainForeignScan;
28 | fdw_routine->BeginForeignScan = goFdwBeginForeignScan;
29 | fdw_routine->IterateForeignScan = goFdwIterateForeignScan;
30 | fdw_routine->ReScanForeignScan = goFdwReScanForeignScan;
31 | fdw_routine->EndForeignScan = goFdwEndForeignScan;
32 | fdw_routine->ImportForeignSchema = goFdwImportForeignSchema;
33 | fdw_routine->ExecForeignInsert = goFdwExecForeignInsert;
34 |
35 | PG_RETURN_POINTER(fdw_routine);
36 | }
37 |
38 | // TODO - Use this to validate the arguments passed to the FDW
39 | // https://github.com/laurenz/oracle_fdw/blob/9d7b5c331b0c8851c71f410f77b41c1a83c89ece/oracle_fdw.c#L420
40 | Datum steampipe_{{.Plugin}}_fdw_validator(PG_FUNCTION_ARGS) {
41 | Oid catalog = PG_GETARG_OID(1);
42 | List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
43 | goFdwValidate(catalog, options_list);
44 | PG_RETURN_VOID();
45 | }
--------------------------------------------------------------------------------
/templates/fdw/steampipe_postgres_fdw--1.0.sql.tmpl:
--------------------------------------------------------------------------------
1 | /* fdw-c/steampipe_postgres_fdw--1.0.sql */
2 |
3 | -- complain if script is sourced in psql, rather than via CREATE EXTENSION
4 | \echo Use "CREATE EXTENSION fdw" to load this extension. \quit
5 |
6 | CREATE FUNCTION steampipe_{{.Plugin}}_fdw_handler()
7 | RETURNS fdw_handler
8 | AS 'MODULE_PATHNAME'
9 | LANGUAGE C STRICT;
10 |
11 | CREATE FUNCTION steampipe_{{.Plugin}}_fdw_validator(text[], oid)
12 | RETURNS void
13 | AS 'MODULE_PATHNAME'
14 | LANGUAGE C STRICT;
15 |
16 | CREATE FOREIGN DATA WRAPPER steampipe_postgres_{{.Plugin}}
17 | HANDLER steampipe_{{.Plugin}}_fdw_handler
18 | VALIDATOR steampipe_{{.Plugin}}_fdw_validator;
19 |
--------------------------------------------------------------------------------
/templates/fdw/steampipe_postgres_fdw.control.tmpl:
--------------------------------------------------------------------------------
1 | # fdw extension
2 | comment = 'Steampipe {{.Plugin}} Foreign Data Wrapper'
3 | default_version = '1.0'
4 | module_pathname = '$libdir/steampipe_postgres_{{.Plugin}}.so'
5 | relocatable = true
6 |
--------------------------------------------------------------------------------
/templates/hub/hub_create.go.tmpl:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | "github.com/turbot/steampipe-plugin-sdk/v5/logging"
5 | "sync"
6 | )
7 |
8 | // global hub instance
9 | var hubSingleton Hub
10 |
11 | // mutex protecting hub creation
12 | var hubMux sync.Mutex
13 |
14 | // GetHub returns a hub singleton
15 | func GetHub() Hub {
16 | // lock access to singleton
17 | hubMux.Lock()
18 | defer hubMux.Unlock()
19 | return hubSingleton
20 |
21 | }
22 |
23 | // create the hub
24 | func CreateHub() error {
25 | logging.LogTime("GetHub start")
26 |
27 | // lock access to singleton
28 | hubMux.Lock()
29 | defer hubMux.Unlock()
30 |
31 | var err error
32 | // TODO configure build to select between local and remote hub
33 | // TODO get connection config from import foreign schema options
34 |
35 | hubSingleton, err = newLocalHub()
36 | if err != nil {
37 | return err
38 | }
39 | logging.LogTime("GetHub end")
40 | return err
41 | }
42 |
--------------------------------------------------------------------------------
/templates/hub/hub_local_plugin.go.tmpl:
--------------------------------------------------------------------------------
1 | package hub
2 |
3 | import (
4 | {{.Plugin}} "{{.PluginGithubUrl}}/{{.Plugin}}"
5 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
6 | )
7 |
8 | var pluginAlias = "{{.Plugin}}"
9 |
10 | func getPluginFunc() plugin.PluginFunc {
11 | return {{.Plugin}}.Plugin
12 | }
--------------------------------------------------------------------------------
/templates/scripts/README.md.tmpl:
--------------------------------------------------------------------------------
1 | # Installation Guide for Steampipe Postgres {{.Plugin}} FDW
2 |
3 | This README provides instructions on how to set up the Steampipe Postgres {{.Plugin}} Foreign Data Wrapper (FDW) extension.
4 |
5 | ## Prerequisites
6 |
7 | Before proceeding with the installation, ensure that you have:
8 |
9 | - Installed PostgreSQL on your system.
10 | - Obtained the necessary permissions to create extensions, servers, and schemas in your PostgreSQL database.
11 |
12 | ## Installation Steps
13 |
14 | 1. Run the `install.sh` script to copy the binary files (`.so`, `.control`, and `.sql`) into your PostgreSQL installation directories.
15 |
16 | ```bash
17 | ./install.sh
18 | ```
19 |
20 | 2. Connect to your PostgreSQL database using your preferred method (e.g., psql command line tool).
21 |
22 | 3. Execute the following SQL commands to set up the extension:
23 |
24 | ```sql
25 | -- Drop the extension if it already exists
26 | DROP EXTENSION IF EXISTS steampipe_postgres_{{.Plugin}} CASCADE;
27 |
28 | -- Create the extension
29 | CREATE EXTENSION IF NOT EXISTS steampipe_postgres_{{.Plugin}};
30 |
31 | -- Drop the server if it already exists
32 | DROP SERVER IF EXISTS steampipe_{{.Plugin}};
33 |
34 | -- Create the foreign server
35 | -- To pass configuration, set it as an OPTION. eg: CREATE SERVER steampipe_{{.Plugin}} FOREIGN DATA WRAPPER steampipe_postgres_{{.Plugin}} OPTIONS (config 'you_config_here');
36 | CREATE SERVER steampipe_{{.Plugin}} FOREIGN DATA WRAPPER steampipe_postgres_{{.Plugin}};
37 |
38 | -- Drop the schema if it already exists
39 | DROP SCHEMA IF EXISTS {{.Plugin}} CASCADE;
40 |
41 | -- Create the schema
42 | CREATE SCHEMA {{.Plugin}};
43 |
44 | -- Add a comment to the schema
45 | COMMENT ON SCHEMA {{.Plugin}} IS 'steampipe {{.Plugin}} fdw';
46 |
47 | -- Import the foreign schema
48 | IMPORT FOREIGN SCHEMA {{.Plugin}} FROM SERVER steampipe_{{.Plugin}} INTO {{.Plugin}};
49 | ```
50 |
51 | ## Post-Installation
52 |
53 | After the installation, you should be able to use the Steampipe Postgres {{.Plugin}} FDW to query {{.Plugin}} data directly from your PostgreSQL database.
54 |
55 | For more information on using the FDW, refer to the Steampipe Hub documentation https://hub.steampipe.io/plugins/turbot/{{.Plugin}}.
56 |
--------------------------------------------------------------------------------
/templates/scripts/install.sh.tmpl:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | # Desired PostgreSQL version
6 | PG_VERSION="{{.PgVersion}}"
7 | desired_pg_version=$(echo "$PG_VERSION" | sed 's/[^0-9]*\([0-9]\{1,\}\.[0-9]\{1,\}\).*/\1/' | cut -d'.' -f1)
8 |
9 | main() {
10 | # ANSI escape code variables
11 | BOLD=$(tput bold)
12 | NORMAL=$(tput sgr0)
13 |
14 | attempt_count=0
15 | while true; do
16 | PG_CONFIG=$(command -v pg_config)
17 |
18 | if [ -z "$PG_CONFIG" ] || [ $attempt_count -gt 0 ]; then
19 | request_pg_config_path
20 | fi
21 |
22 | get_postgresql_details
23 |
24 | if [ "$PG_VERSION" != "$desired_pg_version" ]; then
25 | echo "Warning: Your pg_config points to version $PG_VERSION, but the desired version is $desired_pg_version." >&2
26 | display_discovered_details
27 |
28 | if [ $attempt_count -ge 1 ]; then
29 | echo "Error: The downloaded/built Postgres FDW is built for version $desired_pg_version. Your pg_config points to version $PG_VERSION."
30 | exit 1
31 | fi
32 |
33 | attempt_count=$((attempt_count + 1))
34 | continue
35 | fi
36 |
37 | display_discovered_details
38 | confirm_and_install
39 | done
40 | }
41 |
42 | request_pg_config_path() {
43 | echo "Please enter the full path to your PostgreSQL $desired_pg_version installation directory (e.g., /usr/lib/postgresql/$desired_pg_version): "
44 | read PG_DIR
45 | PG_CONFIG="${PG_DIR%/}/bin/pg_config"
46 |
47 | if [ ! -x "$PG_CONFIG" ]; then
48 | echo "Error: 'pg_config' could not be found in the provided directory." >&2
49 | exit 1
50 | fi
51 | }
52 |
53 | get_postgresql_details() {
54 | PG_VERSION_FULL=$("$PG_CONFIG" --version)
55 | PG_VERSION=$(echo "$PG_VERSION_FULL" | sed 's/[^0-9]*\([0-9]\{1,\}\.[0-9]\{1,\}\).*/\1/' | cut -d'.' -f1)
56 | PG_DIR=$("$PG_CONFIG" --bindir)
57 | PG_DIR=${PG_DIR%/bin}
58 | }
59 |
60 | display_discovered_details() {
61 | echo ""
62 | echo "Discovered:"
63 | echo "- PostgreSQL version: ${BOLD}$PG_VERSION${NORMAL}"
64 | echo "- PostgreSQL location: ${BOLD}$PG_DIR${NORMAL}"
65 | echo ""
66 | }
67 |
68 | confirm_and_install() {
69 | printf "Install Steampipe PostgreSQL FDW for version $PG_VERSION in $PG_DIR? (Y/n): "
70 | read REPLY
71 | echo
72 |
73 | if [ "$REPLY" = "y" ] || [ "$REPLY" = "Y" ] || [ -z "$REPLY" ]; then
74 | echo "Installing..."
75 |
76 | # Get directories from pg_config
77 | LIBDIR=$("$PG_DIR/bin/pg_config" --pkglibdir)
78 | EXTDIR=$("$PG_DIR/bin/pg_config" --sharedir)/extension/
79 |
80 | # Copy the files to the PostgreSQL installation directory
81 | cp steampipe_postgres_{{.Plugin}}.so "$LIBDIR"
82 | cp steampipe_postgres_{{.Plugin}}.control "$EXTDIR"
83 | cp steampipe_postgres_{{.Plugin}}--1.0.sql "$EXTDIR"
84 |
85 | # Check if the files were copied correctly
86 | if [ $? -eq 0 ]; then
87 | echo ""
88 | echo "Successfully installed steampipe_postgres_{{.Plugin}} extension!"
89 | echo ""
90 | echo "Files have been copied to:"
91 | echo "- Library directory: ${LIBDIR}"
92 | echo "- Extension directory: ${EXTDIR}"
93 | else
94 | echo "Failed to install steampipe_postgres_{{.Plugin}} extension. Please check permissions and try again."
95 | exit 1
96 | fi
97 | exit 0
98 | else
99 | echo ""
100 | fi
101 | }
102 |
103 | # Call the main function
104 | main "$@"
--------------------------------------------------------------------------------
/types/pathkeys.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "log"
5 | "slices"
6 | "sort"
7 | "strings"
8 |
9 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
10 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
11 | )
12 |
13 | const requiredKeyColumnBaseCost = 1
14 | const optionalKeyColumnBaseCost = 100
15 | const keyColumnOnlyCostMultiplier = 2
16 |
17 | type PathKey struct {
18 | ColumnNames []string
19 | Rows Cost
20 | }
21 |
22 | func (p *PathKey) Equals(other PathKey) bool {
23 | sort.Strings(p.ColumnNames)
24 | sort.Strings(other.ColumnNames)
25 | return strings.Join(p.ColumnNames, ",") == strings.Join(other.ColumnNames, ",") &&
26 | p.Rows == other.Rows
27 | }
28 |
29 | func KeyColumnsToPathKeys(keyColumns []*proto.KeyColumn, allColumns []string) []PathKey {
30 | // get the possible paths and cost for the key columns
31 | columnPaths, baseCost := keyColumnsToColumnPath(keyColumns)
32 | // remove key columns from allColumns
33 | allColumnsExceptKeyColumns := removeKeyColumnsFromAllColumns(keyColumns, allColumns)
34 | // now convert the paths to PathKeys
35 | return columnPathsToPathKeys(columnPaths, allColumnsExceptKeyColumns, baseCost)
36 | }
37 |
38 | func removeKeyColumnsFromAllColumns(keyColumns []*proto.KeyColumn, allColumns []string) []string {
39 | var allColumnsExceptKeyColumns = make([]string, len(allColumns)-len(keyColumns))
40 | idx := 0
41 | for _, c := range allColumns {
42 | if !keyColumnArrayContainsColumn(keyColumns, c) {
43 | allColumnsExceptKeyColumns[idx] = c
44 | idx++
45 | }
46 | }
47 | return allColumnsExceptKeyColumns
48 | }
49 |
50 | func keyColumnArrayContainsColumn(keyColumns []*proto.KeyColumn, c string) bool {
51 | for _, k := range keyColumns {
52 | if k.Name == c {
53 | return true
54 | }
55 | }
56 | return false
57 | }
58 |
59 | // keyColumnsToColumnPath returns a list of all the column sets to use in path keys
60 | func keyColumnsToColumnPath(keyColumns []*proto.KeyColumn) (columnPaths [][]string, baseCost Cost) {
61 | if len(keyColumns) == 0 {
62 | return
63 | }
64 |
65 | // collect required and optional columns - we build a single path for all of them
66 | var requiredKeys, optionalKeys []string
67 | // an array of paths for any of keys - each path will have a single element (the any-of key)
68 | var anyOfKeys [][]string
69 | for _, c := range keyColumns {
70 | if c.Require == plugin.Required {
71 | requiredKeys = append(requiredKeys, c.Name)
72 | } else if c.Require == plugin.Optional {
73 | optionalKeys = append(optionalKeys, c.Name)
74 | } else if c.Require == plugin.AnyOf {
75 | anyOfKeys = append(anyOfKeys, []string{c.Name})
76 | }
77 | }
78 |
79 | if len(requiredKeys) > 0 {
80 | // add required keys as a single path
81 | columnPaths = append(columnPaths, requiredKeys)
82 | }
83 | if len(anyOfKeys) > 0 {
84 | // add a separate path for _each_ any-of key
85 | columnPaths = append(columnPaths, anyOfKeys...)
86 | }
87 | // if we have any column paths (meaning we have aither required or any-of columns),
88 | // we have required keys so make the base cost CHEAP
89 | if len(columnPaths) > 0 {
90 | baseCost = requiredKeyColumnBaseCost
91 | } else {
92 | baseCost = optionalKeyColumnBaseCost
93 | }
94 | // if there are optional keys, add:
95 | // - a path with required keys and each optional key
96 | // - a path with each any-of key and each optional key
97 | for _, optional := range optionalKeys {
98 | // NOTE: append onto optional, NOT requiredKeys - otherwise we end up reusing the underlying array
99 | // and mutating values in columnPaths
100 |
101 | // if there are any required keys, build path from optional AND required
102 | if len(requiredKeys) > 0 {
103 | p := append([]string{optional}, requiredKeys...)
104 | columnPaths = append(columnPaths, p)
105 | }
106 | // if there are any anyOf keys, build path from optional AND required
107 | for _, a := range anyOfKeys {
108 | p := append([]string{optional}, a...)
109 | columnPaths = append(columnPaths, p)
110 | }
111 | // if there are no required keys or anyof keys, just create a path from the optional
112 | if baseCost == optionalKeyColumnBaseCost {
113 | columnPaths = append(columnPaths, []string{optional})
114 | }
115 | }
116 |
117 | return
118 | }
119 |
120 | func LegacyKeyColumnsToPathKeys(requiredColumns, optionalColumns *proto.KeyColumnsSet, allColumns []string) []PathKey {
121 | requiredColumnSets := LegacyKeyColumnsToColumnPaths(requiredColumns)
122 | optionalColumnSets := LegacyKeyColumnsToColumnPaths(optionalColumns)
123 |
124 | if len(requiredColumnSets)+len(optionalColumnSets) == 0 {
125 | return nil
126 | }
127 |
128 | // if there are only optional, build paths based on those
129 | if len(requiredColumnSets) == 0 {
130 | return columnPathsToPathKeys(optionalColumnSets, allColumns, 1)
131 | }
132 |
133 | // otherwise build paths based just on required columns
134 | return columnPathsToPathKeys(requiredColumnSets, allColumns, 1)
135 | }
136 |
137 | // LegacyKeyColumnsToColumnPaths returns a list of all the column sets to use in path keys
138 | func LegacyKeyColumnsToColumnPaths(k *proto.KeyColumnsSet) [][]string {
139 | var res [][]string
140 | if k == nil {
141 | return res
142 | }
143 |
144 | // if a single key column is specified add it
145 | if k.Single != "" {
146 | res = append(res, []string{k.Single})
147 | }
148 | // if 'Any' key columns are specified, add them all separately
149 | for _, c := range k.Any {
150 | res = append(res, []string{c})
151 | }
152 | // if 'All' key columns are specified, add them as a single path
153 | if k.All != nil {
154 | res = append(res, k.All)
155 | }
156 | return res
157 | }
158 | func columnPathsToPathKeys(columnPaths [][]string, allColumns []string, baseCost Cost) []PathKey {
159 |
160 | var res []PathKey
161 |
162 | // generate path keys each column set
163 | for _, s := range columnPaths {
164 | // create a path for just the column path
165 | res = append(res, PathKey{
166 | ColumnNames: s,
167 | // make this cheap so the planner prefers to give us the qual
168 | Rows: baseCost * keyColumnOnlyCostMultiplier,
169 | })
170 | // also create paths for the columns path WITH each other column
171 | for _, c := range allColumns {
172 | if !slices.Contains(s, c) {
173 | // NOTE: create a new slice rather than appending onto s - to avoid clash between loop iterations
174 | columnNames := append([]string{c}, s...)
175 |
176 | res = append(res, PathKey{
177 | ColumnNames: columnNames,
178 | // make this even cheaper - prefer to include all quals which were provided
179 | Rows: baseCost,
180 | })
181 | }
182 | }
183 | }
184 |
185 | log.Printf("[TRACE] columnPathsToPathKeys %d column paths %d all columns, %d pathkeys", len(columnPaths), len(allColumns), len(res))
186 |
187 | return res
188 | }
189 |
--------------------------------------------------------------------------------
/types/pathkeys_test.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
7 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
8 | )
9 |
10 | type keyColumnsToPathKeysTest struct {
11 | keyColumns []*proto.KeyColumn
12 | allColumns []string
13 | expected []PathKey
14 | }
15 |
16 | var testCasesKeyColumnsToPathKeys = map[string]keyColumnsToPathKeysTest{
17 | "single required": {
18 | allColumns: []string{"id", "c1", "c2"},
19 | keyColumns: []*proto.KeyColumn{
20 | {
21 | Name: "id",
22 | Operators: []string{"="},
23 | Require: plugin.Required,
24 | },
25 | },
26 | expected: []PathKey{
27 | {
28 | ColumnNames: []string{"id"},
29 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
30 | },
31 | {
32 | ColumnNames: []string{"c1", "id"},
33 | Rows: requiredKeyColumnBaseCost,
34 | },
35 | {
36 | ColumnNames: []string{"c2", "id"},
37 | Rows: requiredKeyColumnBaseCost,
38 | },
39 | },
40 | },
41 | "multiple required": {
42 | allColumns: []string{"id", "req1", "req2", "c1", "c2"},
43 | keyColumns: []*proto.KeyColumn{
44 | {
45 | Name: "id",
46 | Operators: []string{"="},
47 | Require: plugin.Required,
48 | },
49 | {
50 | Name: "req1",
51 | Operators: []string{"="},
52 | Require: plugin.Required,
53 | },
54 | {
55 | Name: "req2",
56 | Operators: []string{"="},
57 | Require: plugin.Required,
58 | },
59 | },
60 | expected: []PathKey{
61 | {
62 | ColumnNames: []string{"id", "req1", "req2"},
63 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
64 | },
65 | {
66 | ColumnNames: []string{"c1", "id", "req1", "req2"},
67 | Rows: requiredKeyColumnBaseCost,
68 | },
69 | {
70 | ColumnNames: []string{"c2", "id", "req1", "req2"},
71 | Rows: requiredKeyColumnBaseCost,
72 | },
73 | },
74 | },
75 | "multiple any of": {
76 | allColumns: []string{"anyof1", "anyof2", "c1", "c2"},
77 | keyColumns: []*proto.KeyColumn{
78 | {
79 | Name: "anyof1",
80 | Operators: []string{"="},
81 | Require: plugin.AnyOf,
82 | },
83 | {
84 | Name: "anyof2",
85 | Operators: []string{"="},
86 | Require: plugin.AnyOf,
87 | },
88 | },
89 | expected: []PathKey{
90 | {
91 | ColumnNames: []string{"anyof1"},
92 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
93 | },
94 | {
95 | ColumnNames: []string{"c1", "anyof1"},
96 | Rows: requiredKeyColumnBaseCost,
97 | },
98 | {
99 | ColumnNames: []string{"c2", "anyof1"},
100 | Rows: requiredKeyColumnBaseCost,
101 | },
102 | {
103 | ColumnNames: []string{"anyof2"},
104 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
105 | },
106 | {
107 | ColumnNames: []string{"c1", "anyof2"},
108 | Rows: requiredKeyColumnBaseCost,
109 | },
110 | {
111 | ColumnNames: []string{"c2", "anyof2"},
112 | Rows: requiredKeyColumnBaseCost,
113 | },
114 | },
115 | },
116 | "multiple any of and single optional": {
117 | allColumns: []string{"anyof1", "anyof2", "opt", "c1", "c2"},
118 | keyColumns: []*proto.KeyColumn{
119 | {
120 | Name: "anyof1",
121 | Operators: []string{"="},
122 | Require: plugin.AnyOf,
123 | },
124 | {
125 | Name: "anyof2",
126 | Operators: []string{"="},
127 | Require: plugin.AnyOf,
128 | },
129 | {
130 | Name: "opt",
131 | Operators: []string{"="},
132 | Require: plugin.Optional,
133 | },
134 | },
135 | expected: []PathKey{
136 | {
137 | ColumnNames: []string{"anyof1"},
138 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
139 | },
140 | {
141 | ColumnNames: []string{"c1", "anyof1"},
142 | Rows: requiredKeyColumnBaseCost,
143 | },
144 | {
145 | ColumnNames: []string{"c2", "anyof1"},
146 | Rows: requiredKeyColumnBaseCost,
147 | },
148 | {
149 | ColumnNames: []string{"anyof2"},
150 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
151 | },
152 | {
153 | ColumnNames: []string{"c1", "anyof2"},
154 | Rows: requiredKeyColumnBaseCost,
155 | },
156 | {
157 | ColumnNames: []string{"c2", "anyof2"},
158 | Rows: requiredKeyColumnBaseCost,
159 | },
160 | {
161 | ColumnNames: []string{"opt", "anyof1"},
162 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
163 | },
164 | {
165 | ColumnNames: []string{"c1", "opt", "anyof1"},
166 | Rows: requiredKeyColumnBaseCost,
167 | },
168 | {
169 | ColumnNames: []string{"c2", "opt", "anyof1"},
170 | Rows: requiredKeyColumnBaseCost,
171 | },
172 | {
173 | ColumnNames: []string{"opt", "anyof2"},
174 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
175 | },
176 | {
177 | ColumnNames: []string{"c1", "opt", "anyof2"},
178 | Rows: requiredKeyColumnBaseCost,
179 | },
180 | {
181 | ColumnNames: []string{"c2", "opt", "anyof2"},
182 | Rows: requiredKeyColumnBaseCost,
183 | },
184 | },
185 | },
186 | "multiple any of and multiple optional": {
187 | allColumns: []string{"anyof1", "anyof2", "opt1", "opt2", "c1", "c2"},
188 | keyColumns: []*proto.KeyColumn{
189 | {
190 | Name: "anyof1",
191 | Operators: []string{"="},
192 | Require: plugin.AnyOf,
193 | },
194 | {
195 | Name: "anyof2",
196 | Operators: []string{"="},
197 | Require: plugin.AnyOf,
198 | },
199 | {
200 | Name: "opt1",
201 | Operators: []string{"="},
202 | Require: plugin.Optional,
203 | },
204 | {
205 | Name: "opt2",
206 | Operators: []string{"="},
207 | Require: plugin.Optional,
208 | },
209 | },
210 | expected: []PathKey{
211 | {
212 | ColumnNames: []string{"anyof1"},
213 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
214 | },
215 | {
216 | ColumnNames: []string{"c1", "anyof1"},
217 | Rows: requiredKeyColumnBaseCost,
218 | },
219 | {
220 | ColumnNames: []string{"c2", "anyof1"},
221 | Rows: requiredKeyColumnBaseCost,
222 | },
223 | {
224 | ColumnNames: []string{"anyof2"},
225 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
226 | },
227 | {
228 | ColumnNames: []string{"c1", "anyof2"},
229 | Rows: requiredKeyColumnBaseCost,
230 | },
231 | {
232 | ColumnNames: []string{"c2", "anyof2"},
233 | Rows: requiredKeyColumnBaseCost,
234 | },
235 | {
236 | ColumnNames: []string{"opt1", "anyof1"},
237 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
238 | },
239 | {
240 | ColumnNames: []string{"c1", "opt1", "anyof1"},
241 | Rows: requiredKeyColumnBaseCost,
242 | },
243 | {
244 | ColumnNames: []string{"c2", "opt1", "anyof1"},
245 | Rows: requiredKeyColumnBaseCost,
246 | },
247 | {
248 | ColumnNames: []string{"opt1", "anyof2"},
249 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
250 | },
251 | {
252 | ColumnNames: []string{"c1", "opt1", "anyof2"},
253 | Rows: requiredKeyColumnBaseCost,
254 | },
255 | {
256 | ColumnNames: []string{"c2", "opt1", "anyof2"},
257 | Rows: requiredKeyColumnBaseCost,
258 | },
259 | {
260 | ColumnNames: []string{"opt2", "anyof1"},
261 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
262 | },
263 | {
264 | ColumnNames: []string{"c1", "opt2", "anyof1"},
265 | Rows: requiredKeyColumnBaseCost,
266 | },
267 | {
268 | ColumnNames: []string{"c2", "opt2", "anyof1"},
269 | Rows: requiredKeyColumnBaseCost,
270 | },
271 | {
272 | ColumnNames: []string{"opt2", "anyof2"},
273 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
274 | },
275 | {
276 | ColumnNames: []string{"c1", "opt2", "anyof2"},
277 | Rows: requiredKeyColumnBaseCost,
278 | },
279 | {
280 | ColumnNames: []string{"c2", "opt2", "anyof2"},
281 | Rows: requiredKeyColumnBaseCost,
282 | },
283 | },
284 | },
285 | "single optional": {
286 | allColumns: []string{"id", "c1", "c2"},
287 | keyColumns: []*proto.KeyColumn{
288 | {
289 | Name: "id",
290 | Operators: []string{"="},
291 | Require: plugin.Optional,
292 | },
293 | },
294 | expected: []PathKey{
295 | {
296 | ColumnNames: []string{"id"},
297 | Rows: optionalKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
298 | },
299 | {
300 | ColumnNames: []string{"c1", "id"},
301 | Rows: optionalKeyColumnBaseCost,
302 | },
303 | {
304 | ColumnNames: []string{"c2", "id"},
305 | Rows: optionalKeyColumnBaseCost,
306 | },
307 | },
308 | },
309 | "multiple optional": {
310 | allColumns: []string{"id", "opt1", "opt2", "c1", "c2"},
311 | keyColumns: []*proto.KeyColumn{
312 | {
313 | Name: "id",
314 | Operators: []string{"="},
315 | Require: plugin.Optional,
316 | },
317 | {
318 | Name: "opt1",
319 | Operators: []string{"="},
320 | Require: plugin.Optional,
321 | },
322 | {
323 | Name: "opt2",
324 | Operators: []string{"="},
325 | Require: plugin.Optional,
326 | },
327 | },
328 | expected: []PathKey{
329 | {
330 | ColumnNames: []string{"id"},
331 | Rows: optionalKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
332 | },
333 | {
334 | ColumnNames: []string{"c1", "id"},
335 | Rows: optionalKeyColumnBaseCost,
336 | },
337 | {
338 | ColumnNames: []string{"c2", "id"},
339 | Rows: optionalKeyColumnBaseCost,
340 | }, {
341 | ColumnNames: []string{"opt1"},
342 | Rows: optionalKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
343 | },
344 | {
345 | ColumnNames: []string{"c1", "opt1"},
346 | Rows: optionalKeyColumnBaseCost,
347 | },
348 | {
349 | ColumnNames: []string{"c2", "opt1"},
350 | Rows: optionalKeyColumnBaseCost,
351 | },
352 | {
353 | ColumnNames: []string{"opt2"},
354 | Rows: optionalKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
355 | },
356 | {
357 | ColumnNames: []string{"c1", "opt2"},
358 | Rows: optionalKeyColumnBaseCost,
359 | },
360 | {
361 | ColumnNames: []string{"c2", "opt2"},
362 | Rows: optionalKeyColumnBaseCost,
363 | },
364 | },
365 | },
366 | "required and optional": {
367 | allColumns: []string{"id", "opt", "c1", "c2"},
368 | keyColumns: []*proto.KeyColumn{
369 | {
370 | Name: "id",
371 | Operators: []string{"="},
372 | Require: plugin.Required,
373 | },
374 | {
375 | Name: "opt",
376 | Operators: []string{"="},
377 | Require: plugin.Optional,
378 | },
379 | },
380 | expected: []PathKey{
381 | {
382 | ColumnNames: []string{"id"},
383 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
384 | },
385 | {
386 | ColumnNames: []string{"c1", "id"},
387 | Rows: requiredKeyColumnBaseCost,
388 | },
389 | {
390 | ColumnNames: []string{"c2", "id"},
391 | Rows: requiredKeyColumnBaseCost,
392 | }, {
393 | ColumnNames: []string{"opt", "id"},
394 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
395 | },
396 | {
397 | ColumnNames: []string{"c1", "opt", "id"},
398 | Rows: requiredKeyColumnBaseCost,
399 | },
400 | {
401 | ColumnNames: []string{"c2", "opt", "id"},
402 | Rows: requiredKeyColumnBaseCost,
403 | },
404 | },
405 | },
406 | "multiple required and single optional": {
407 | allColumns: []string{"id", "req1", "req2", "opt", "c1", "c2"},
408 | keyColumns: []*proto.KeyColumn{
409 | {
410 | Name: "id",
411 | Operators: []string{"="},
412 | Require: plugin.Required,
413 | },
414 | {
415 | Name: "req1",
416 | Operators: []string{"="},
417 | Require: plugin.Required,
418 | },
419 | {
420 | Name: "req2",
421 | Operators: []string{"="},
422 | Require: plugin.Required,
423 | },
424 | {
425 | Name: "opt",
426 | Operators: []string{"="},
427 | Require: plugin.Optional,
428 | },
429 | },
430 | expected: []PathKey{
431 | {
432 | ColumnNames: []string{"id", "req1", "req2"},
433 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
434 | },
435 | {
436 | ColumnNames: []string{"c1", "id", "req1", "req2"},
437 | Rows: requiredKeyColumnBaseCost,
438 | },
439 | {
440 | ColumnNames: []string{"c2", "id", "req1", "req2"},
441 | Rows: requiredKeyColumnBaseCost,
442 | },
443 | {
444 | ColumnNames: []string{"opt", "id", "req1", "req2"},
445 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
446 | },
447 | {
448 | ColumnNames: []string{"c1", "opt", "id", "req1", "req2"},
449 | Rows: requiredKeyColumnBaseCost,
450 | },
451 | {
452 | ColumnNames: []string{"c2", "opt", "id", "req1", "req2"},
453 | Rows: requiredKeyColumnBaseCost,
454 | },
455 | },
456 | },
457 |
458 | "multiple required and multiple optional": {
459 | allColumns: []string{"id", "req1", "req2", "opt1", "opt2", "opt3", "c1", "c2"},
460 | keyColumns: []*proto.KeyColumn{
461 | {
462 | Name: "id",
463 | Operators: []string{"="},
464 | Require: plugin.Required,
465 | },
466 | {
467 | Name: "req1",
468 | Operators: []string{"="},
469 | Require: plugin.Required,
470 | },
471 | {
472 | Name: "req2",
473 | Operators: []string{"="},
474 | Require: plugin.Required,
475 | },
476 | {
477 | Name: "opt1",
478 | Operators: []string{"="},
479 | Require: plugin.Optional,
480 | },
481 | {
482 | Name: "opt2",
483 | Operators: []string{"="},
484 | Require: plugin.Optional,
485 | },
486 | {
487 | Name: "opt3",
488 | Operators: []string{"="},
489 | Require: plugin.Optional,
490 | },
491 | },
492 | expected: []PathKey{
493 | {
494 | ColumnNames: []string{"id", "req1", "req2"},
495 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
496 | },
497 | {
498 | ColumnNames: []string{"c1", "id", "req1", "req2"},
499 | Rows: requiredKeyColumnBaseCost,
500 | },
501 | {
502 | ColumnNames: []string{"c2", "id", "req1", "req2"},
503 | Rows: requiredKeyColumnBaseCost,
504 | },
505 | {
506 | ColumnNames: []string{"opt1", "id", "req1", "req2"},
507 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
508 | },
509 | {
510 | ColumnNames: []string{"c1", "opt1", "id", "req1", "req2"},
511 | Rows: requiredKeyColumnBaseCost,
512 | },
513 | {
514 | ColumnNames: []string{"c2", "opt1", "id", "req1", "req2"},
515 | Rows: requiredKeyColumnBaseCost,
516 | },
517 | {
518 | ColumnNames: []string{"opt2", "id", "req1", "req2"},
519 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
520 | },
521 | {
522 | ColumnNames: []string{"c1", "opt2", "id", "req1", "req2"},
523 | Rows: requiredKeyColumnBaseCost,
524 | },
525 | {
526 | ColumnNames: []string{"c2", "opt2", "id", "req1", "req2"},
527 | Rows: requiredKeyColumnBaseCost,
528 | },
529 | {
530 | ColumnNames: []string{"opt3", "id", "req1", "req2"},
531 | Rows: requiredKeyColumnBaseCost * keyColumnOnlyCostMultiplier,
532 | },
533 | {
534 | ColumnNames: []string{"c1", "opt3", "id", "req1", "req2"},
535 | Rows: requiredKeyColumnBaseCost,
536 | },
537 | {
538 | ColumnNames: []string{"c2", "opt3", "id", "req1", "req2"},
539 | Rows: requiredKeyColumnBaseCost,
540 | },
541 | },
542 | },
543 | }
544 |
545 | func TestKeyColumnsToPathKeysTest(t *testing.T) {
546 | for name, test := range testCasesKeyColumnsToPathKeys {
547 | result := KeyColumnsToPathKeys(test.keyColumns, test.allColumns)
548 | if !pathKeyArraysEqual(result, test.expected) {
549 | t.Errorf("Test: '%s'' FAILED : expected \n%v\ngot \n%v", name, test.expected, result)
550 | }
551 | }
552 | }
553 |
554 | func pathKeyArraysEqual(l []PathKey, r []PathKey) bool {
555 | if len(l) != len(r) {
556 | return false
557 | }
558 | // check in both directions - inefficient but it's only a test
559 | for i, lkey := range l {
560 | eq := lkey.Equals(r[i])
561 | if !eq {
562 | return false
563 | }
564 | }
565 |
566 | return true
567 | }
568 |
--------------------------------------------------------------------------------
/types/types.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | // Options is a set of FDW options provided by user during table creation.
4 | type Options map[string]string
5 |
6 | type Relation struct {
7 | ID Oid
8 | IsValid bool
9 | Attr *TupleDesc
10 | Namespace string
11 | }
12 |
13 | type TupleDesc struct {
14 | TypeID Oid
15 | TypeMod int
16 | //HasOid bool
17 | Attrs []Attr // columns
18 | }
19 |
20 | type Attr struct {
21 | Name string
22 | Type Oid
23 | Dimensions int
24 | NotNull bool
25 | Dropped bool
26 | }
27 |
28 | // Cost is a approximate cost of an operation. See Postgres docs for details.
29 | type Cost float64
30 |
31 | // Oid is a Postgres internal object ID.
32 | type Oid uint
33 |
34 | type RelSize struct {
35 | Rows int
36 | Width int
37 | Tuples int
38 | }
39 |
--------------------------------------------------------------------------------
/version/version.go:
--------------------------------------------------------------------------------
1 | // Package version :: The version package provides a location to set the release versions for all
2 | // packages to consume, without creating import cycles.
3 | //
4 | // This package should not import any other steampipe packages.
5 | package version
6 |
7 | import (
8 | "fmt"
9 |
10 | "github.com/Masterminds/semver/v3"
11 | )
12 |
13 | // The main version number that is being run at the moment.
14 | var fdwVersion = "1.12.7"
15 |
16 | // A pre-release marker for the version. If this is "" (empty string)
17 | // then it means that it is a final release. Otherwise, this is a pre-release
18 | // such as "dev" (in development), "beta", "rc1", etc.
19 | var prerelease = ""
20 |
21 | // FdwVersion is an instance of semver.Version. This has the secondary
22 | // benefit of verifying during tests and init time that our version is a
23 | // proper semantic version, which should always be the case.
24 | var FdwVersion *semver.Version
25 |
26 | var VersionString string
27 |
28 | func init() {
29 | VersionString = fdwVersion
30 | if prerelease != "" {
31 | VersionString = fmt.Sprintf("%s-%s", fdwVersion, prerelease)
32 | }
33 | FdwVersion = semver.MustParse(VersionString)
34 | }
35 |
--------------------------------------------------------------------------------