├── .github └── workflows │ ├── project_auto_add_issue.yml │ └── publish_docker.yml ├── Dockerfile ├── Jenkinsfile ├── LICENSE ├── Makefile ├── README.md ├── compose └── psconfig │ ├── archives.d │ └── .gitkeep │ ├── pscheduler-agent-logger.conf │ ├── pscheduler-agent.json │ ├── pscheduler.d │ └── .gitkeep │ └── transforms.d │ └── .gitkeep ├── docker-compose.systemd.yml ├── docker-compose.yml ├── postgresql ├── pg_hba.conf ├── postgresql.conf └── pscheduler-build-database ├── rsyslog ├── listen.conf ├── owamp-syslog.conf ├── python-pscheduler.conf ├── rsyslog └── rsyslog.conf ├── supervisord.conf ├── systemd └── Dockerfile └── utils └── psdocker /.github/workflows/project_auto_add_issue.yml: -------------------------------------------------------------------------------- 1 | name: Adds all new issues to project board 2 | 3 | on: 4 | issues: 5 | types: 6 | - opened 7 | 8 | jobs: 9 | add-to-project: 10 | name: Add issue to perfSONAR project 11 | runs-on: ubuntu-latest 12 | steps: 13 | # NOTE: "uses" cannot be a variable so name and version hard-coded here 14 | - uses: actions/add-to-project@v0.5.0 15 | with: 16 | project-url: ${{ vars.PROJECT_PS_URL }} 17 | github-token: ${{ secrets.PAT_PROJECT_PS_AUTO_ADD }} -------------------------------------------------------------------------------- /.github/workflows/publish_docker.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Multi-platform Docker Image Push 3 | 4 | ### 5 | # Description: 6 | # This workflow builds and pushes Docker images for multiple architectures using GitHub Actions. It builds both the supervisord and systemd images. There are quite a bit of non-obvious steps in this workflow, so here is a brief description of some of the important pieces: 7 | # - One way to build multi-arch images is to use something called QEMU. This allows you to run a different architecture on a host machine different from the target architecture. This does not work consistently in GitHub actions, so we use a different approach. We build the images on the target architecture using a different runner for each architecture. This is done using the 'runs-on' property in the matrix and an image using the target architecture. 8 | # - Because we build each image in different containers, we cannot push them all to the same tag as they will overwrite each other. Instead on the initial build we have to include the architecture in the tag name. 9 | # - We ultimately want one tag the users can pull the image from independent of architecture. The way docker publishes an image with multiple architectures is to create a manifest file. This is a special type of image that contains references to other images and information about which platform is supported by the referenced images. The manifest is created in a separate job after the images are built and pushed. The manifest is created using the docker manifest command. 10 | # 11 | # See the comments in the remainder of the file for more details on each step. 12 | ### 13 | 14 | ## 15 | # Controls when the workflow will run 16 | on: 17 | push: 18 | # Run on pushes to any branch 19 | branches: [ '*' ] 20 | # Run when a tag starting with 'v' is pushed 21 | tags: [ 'v*.*.*' ] 22 | pull_request: 23 | # Run on pull requests to the master branch 24 | branches: [ 'master' ] 25 | # Allows you to run this workflow manually from the Actions tab 26 | workflow_dispatch: 27 | 28 | ## 29 | # Global variables 30 | env: 31 | REGISTRY: ghcr.io 32 | # github.repository as / 33 | IMAGE_BASE: 'perfsonar/testpoint' 34 | BUILD_DIR: . 35 | 36 | ## 37 | # The jobs to run that build and push the Docker images 38 | jobs: 39 | # build: This job builds the supervisord and systemd images for multiple architectures. 40 | # It pushes the images to the registry with separate tags for each architecture. 41 | build: 42 | # A single dimensional matrix that loops through the different platforms. 43 | strategy: 44 | matrix: 45 | platform: [ linux/amd64, linux/arm64 ] 46 | # This 'include' section keeps the matrix single dimensional, but allows us to set additional properties for each item 47 | include: 48 | - platform: linux/amd64 49 | runs_on: ubuntu-latest 50 | suffix_tag: amd64 51 | - platform: linux/arm64 52 | runs_on: ubuntu-24.04-arm 53 | suffix_tag: arm64 54 | # Runs-on set using a matrix variable since each architecture needs to run on an image using the same architecture as the target platform 55 | runs-on: ${{ matrix.runs_on }} 56 | 57 | # Steps represent a sequence of tasks that will be executed as part of the job 58 | steps: 59 | # Checkout the code from this repository 60 | - uses: actions/checkout@v3 61 | # Setup docker buildx in this container 62 | - name: Setup Docker Buildx 63 | uses: docker/setup-buildx-action@v3 64 | 65 | # Login against a Docker registry except on PR 66 | - name: Log into registry ${{ env.REGISTRY }} 67 | if: github.event_name != 'pull_request' 68 | uses: docker/login-action@v2 69 | with: 70 | registry: ${{ env.REGISTRY }} 71 | username: ${{ github.actor }} 72 | password: ${{ secrets.GITHUB_TOKEN }} 73 | 74 | # Extract Docker metadata to be used in supervisord image. This primarily calculates the image tags and labels based on branch name, tag name, etc 75 | # Note that the tag at this phase includes the architecture name so that different architectures don't collide. 76 | - name: Extract Docker metadata 77 | id: meta 78 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 79 | with: 80 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_BASE }} 81 | flavor: | 82 | latest=false 83 | tags: | 84 | type=schedule,suffix=-${{ matrix.suffix_tag }} 85 | type=ref,event=branch,suffix=-${{ matrix.suffix_tag }} 86 | type=ref,event=tag,suffix=-${{ matrix.suffix_tag }} 87 | type=ref,event=pr,suffix=-${{ matrix.suffix_tag }} 88 | type=raw,value=latest-${{ matrix.suffix_tag }},enable=${{ github.ref == format('refs/heads/{0}', 'master') }} 89 | 90 | # Extract Docker metadata to be used in systemd image. This primarily calculates the image tags and labels based on branch name, tag name, etc 91 | # Note that the tag at this phase includes the architecture name so that different architectures don't collide. 92 | - name: Extract Systemd Docker metadata 93 | id: metasysd 94 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 95 | with: 96 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_BASE }} 97 | flavor: | 98 | latest=false 99 | tags: | 100 | type=schedule,suffix=-systemd-${{ matrix.suffix_tag }} 101 | type=ref,event=branch,suffix=-systemd-${{ matrix.suffix_tag }} 102 | type=ref,event=tag,suffix=-systemd-${{ matrix.suffix_tag }} 103 | type=ref,event=pr,suffix=-systemd-${{ matrix.suffix_tag }} 104 | type=raw,value=systemd-${{ matrix.suffix_tag }},enable=${{ github.ref == format('refs/heads/{0}', 'master') }} 105 | 106 | # Build and push Docker image with Buildx (don't push on PR) 107 | # Note the 'provenance: false' option is set to prevent attestations from being created. Attestations leads to a manifest being created with some unhelpful features in our context: 108 | # - The manifest created references the single architecture built and then has another reference to an "unkown/unkown" architecture. Not sure what it does, but at the very least it causes confusion. 109 | # - A much bigger reason is that it prevents our abilty to create a multi-arch manifest in later steps since you can't create a manifest from another manifest 110 | # https://github.com/docker/build-push-action 111 | - name: Build and push Docker images 112 | id: build-and-push 113 | uses: docker/build-push-action@v6 114 | with: 115 | context: ${{ env.BUILD_DIR }} 116 | file: ${{ env.BUILD_DIR }}/Dockerfile 117 | push: ${{ github.event_name != 'pull_request' }} 118 | tags: ${{ steps.meta.outputs.tags }} 119 | labels: ${{ steps.meta.outputs.labels }} 120 | provenance: false 121 | platforms: ${{ matrix.platform }} 122 | 123 | # Build and push systemd images. Same notes apply as for supervisord image above with regard to multi-arch push. 124 | - name: Build and push Systemd Docker images 125 | id: build-and-push-systemd 126 | uses: docker/build-push-action@v6 127 | with: 128 | context: ${{ env.BUILD_DIR }} 129 | file: ${{ env.BUILD_DIR }}/systemd/Dockerfile 130 | push: ${{ github.event_name != 'pull_request' }} 131 | tags: ${{ steps.metasysd.outputs.tags }} 132 | labels: ${{ steps.metasysd.outputs.labels }} 133 | provenance: false 134 | platforms: ${{ matrix.platform }} 135 | 136 | ## 137 | # manifest_metadata: This job caluclates the tags and labels used to generate the final multi-arch manifest. Basically same tags as used in build step but without the arch in the name. 138 | manifest_metadata: 139 | runs-on: ubuntu-latest 140 | # Outputs define the variable that can be used in other jobs. These are strings, so JSON needs to be parsed with fromJson when read by other jobs. 141 | # The 'manifest' output is the metadata for the supervisord image and 'manifest_sysd' is the metadata for the systemd image. 142 | outputs: 143 | manifest: ${{ steps.manifest_meta.outputs.json }} 144 | manifest_sysd: ${{ steps.manifest_sysd_meta.outputs.json }} 145 | steps: 146 | - name: Extract Docker metadata for manifest 147 | id: manifest_meta 148 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 149 | with: 150 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_BASE }} 151 | flavor: | 152 | latest=false 153 | tags: | 154 | type=schedule 155 | type=ref,event=branch 156 | type=ref,event=tag 157 | type=ref,event=pr 158 | type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} 159 | - name: Extract Docker metadata for systemd manifest 160 | id: manifest_sysd_meta 161 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 162 | with: 163 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_BASE }} 164 | flavor: | 165 | latest=false 166 | tags: | 167 | type=schedule,suffix=-systemd 168 | type=ref,event=branch,suffix=-systemd 169 | type=ref,event=tag,suffix=-systemd 170 | type=ref,event=pr,suffix=-systemd 171 | type=raw,value=systemd,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} 172 | ## 173 | # create_manifest: This job creates the multi-arch manifest for the supervisord image. It uses the metadata from the manifest_metadata job to create the manifest. 174 | create_manifest: 175 | needs: 176 | - build 177 | - manifest_metadata 178 | # Loop through each tag in the metadata created in the manifest_metadata job 179 | strategy: 180 | matrix: 181 | tags: ${{ fromJson(needs.manifest_metadata.outputs.manifest).tags }} 182 | runs-on: ubuntu-latest 183 | steps: 184 | # Login to registry 185 | - name: Log into registry ${{ env.REGISTRY }} 186 | if: github.event_name != 'pull_request' 187 | uses: docker/login-action@v2 188 | with: 189 | registry: ${{ env.REGISTRY }} 190 | username: ${{ github.actor }} 191 | password: ${{ secrets.GITHUB_TOKEN }} 192 | # create and push the manifest 193 | - name: Create manifest 194 | run: | 195 | docker manifest create ${{ matrix.tags }} \ 196 | ${{ matrix.tags }}-amd64 \ 197 | ${{ matrix.tags }}-arm64 198 | docker manifest push ${{ matrix.tags }} 199 | 200 | ## 201 | # create_sysd_manifest: This job creates the multi-arch manifest for the systemd image. It uses the metadata from the manifest_metadata job to create the manifest. 202 | create_sysd_manifest: 203 | needs: 204 | - build 205 | - manifest_metadata 206 | # Loop through each tag in the metadata created in the manifest_metadata job 207 | strategy: 208 | matrix: 209 | tags: ${{ fromJson(needs.manifest_metadata.outputs.manifest_sysd).tags }} 210 | runs-on: ubuntu-latest 211 | steps: 212 | # Login to registry 213 | - name: Log into registry ${{ env.REGISTRY }} 214 | if: github.event_name != 'pull_request' 215 | uses: docker/login-action@v2 216 | with: 217 | registry: ${{ env.REGISTRY }} 218 | username: ${{ github.actor }} 219 | password: ${{ secrets.GITHUB_TOKEN }} 220 | # create and push the manifest 221 | - name: Create manifest 222 | run: | 223 | docker manifest create ${{ matrix.tags }} \ 224 | ${{ matrix.tags }}-amd64 \ 225 | ${{ matrix.tags }}-arm64 226 | docker manifest push ${{ matrix.tags }} -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # perfSONAR Testpoint 2 | 3 | FROM ubuntu:22.04 4 | 5 | ENV container docker 6 | ENV LC_ALL C 7 | ENV DEBIAN_FRONTEND noninteractive 8 | 9 | RUN apt-get update \ 10 | && apt-get install -y vim curl gnupg rsyslog net-tools sysstat iproute2 dnsutils tcpdump software-properties-common supervisor \ 11 | && apt-get clean \ 12 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 13 | 14 | # ----------------------------------------------------------------------- 15 | 16 | # 17 | # PostgreSQL Server 18 | # 19 | 20 | ENV PG_VERSION=14 \ 21 | PG_USER=postgres 22 | 23 | ENV PG_HOME=/etc/postgresql/$PG_VERSION/main \ 24 | PG_BINDIR=/usr/lib/postgresql/$PG_VERSION/bin \ 25 | PGDATA=/var/lib/postgresql/$PG_VERSION/main 26 | 27 | RUN apt-get update \ 28 | && apt-get install -y postgresql-$PG_VERSION postgresql-client-$PG_VERSION \ 29 | && rm -rf $PGDATA 30 | 31 | RUN su - $PG_USER -c "$PG_BINDIR/pg_ctl init -D $PGDATA" 32 | 33 | COPY --chown=$PG_USER:$PG_USER postgresql/postgresql.conf $PG_HOME/postgresql.conf 34 | COPY --chown=$PG_USER:$PG_USER postgresql/pg_hba.conf $PG_HOME/pg_hba.conf 35 | 36 | RUN su - $PG_USER -c "$PG_BINDIR/pg_ctl start -w -t 60 -D $PGDATA" 37 | 38 | # ----------------------------------------------------------------------- 39 | 40 | # Rsyslog 41 | 42 | COPY rsyslog/rsyslog /etc/init.d/rsyslog 43 | COPY rsyslog/rsyslog.conf /etc/rsyslog.conf 44 | COPY rsyslog/listen.conf /etc/rsyslog.d/listen.conf 45 | COPY rsyslog/python-pscheduler.conf /etc/rsyslog.d/python-pscheduler.conf 46 | COPY rsyslog/owamp-syslog.conf /etc/rsyslog.d/owamp-syslog.conf 47 | 48 | # ----------------------------------------------------------------------------- 49 | 50 | RUN curl -o /etc/apt/sources.list.d/perfsonar-minor-staging.list http://downloads.perfsonar.net/debian/perfsonar-minor-staging.list \ 51 | && curl http://downloads.perfsonar.net/debian/perfsonar-staging.gpg.key | apt-key add - \ 52 | && add-apt-repository universe 53 | 54 | RUN apt-get update \ 55 | && apt-get install -y perfsonar-testpoint \ 56 | && apt-get clean \ 57 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 58 | 59 | # ----------------------------------------------------------------------------- 60 | 61 | RUN mkdir -p /var/log/supervisor 62 | ADD supervisord.conf /etc/supervisord.conf 63 | 64 | # The following ports are used: 65 | # pScheduler: 443 66 | # owamp:861, 8760-9960 (tcp and udp) 67 | # twamp: 862, 18760-19960 (tcp and udp) 68 | # simplestream: 5890-5900 69 | # nuttcp: 5000, 5101 70 | # iperf2: 5001 71 | # iperf3: 5201 72 | # ntp: 123 (udp) 73 | EXPOSE 123/udp 443 861 862 5000 5001 5101 5201 5890-5900 8760-9960/tcp 8760-9960/udp 18760-19960/tcp 18760-19960/udp 74 | 75 | # add pid directory, logging, and postgres directory 76 | VOLUME ["/var/run", "/var/lib/pgsql", "/var/log", "/etc/rsyslog.d" ] 77 | 78 | CMD /usr/bin/supervisord -c /etc/supervisord.conf 79 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | environment { 4 | DOCKERHUB_CREDENTIALS = credentials('cs1867') 5 | } 6 | stages { 7 | stage('SCM Checkout') { 8 | steps{ 9 | git 'https://github.com/perfsonar/perfsonar-testpoint-docker.git' 10 | } 11 | } 12 | 13 | stage('Build docker image') { 14 | steps { 15 | sh 'docker build -t cs1867/perfsonar-testpoint:latest .' 16 | } 17 | } 18 | stage('login to dockerhub') { 19 | steps{ 20 | sh 'echo $DOCKERHUB_CREDENTIALS_PSW | docker login -u $DOCKERHUB_CREDENTIALS_USR --password-stdin' 21 | } 22 | } 23 | stage('push image') { 24 | steps{ 25 | sh 'docker push cs1867/perfsonar-testpoint:latest' 26 | } 27 | } 28 | } 29 | post { 30 | always { 31 | sh 'docker logout' 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # Makefile for perfSONAR Toolkit Docker Container 3 | # 4 | 5 | NAME=perfsonar/testpoint-docker 6 | 7 | GETID=$$(docker ps --format "{{.ID}}" --filter "image=$(NAME)") 8 | 9 | default: 10 | @echo Nothing to do by default 11 | 12 | 13 | # Build the container from scratch 14 | build: 15 | $(GETID) | xargs -r docker rmi -f 16 | docker build \ 17 | --no-cache --rm=true \ 18 | -t "$(NAME)" \ 19 | . 20 | 21 | 22 | # Start the container 23 | start: 24 | docker run -d -P --net=host $(NAME) 25 | 26 | 27 | # Start a shell on the container 28 | login: 29 | docker exec -it $(GETID) bash 30 | 31 | 32 | # Stop the container 33 | stop: 34 | docker kill $(GETID) 35 | 36 | 37 | # Get rid of the images and containers 38 | remove: 39 | docker ps -a -q | fgrep "$(NAME)" | xargs -r docker stop 40 | docker ps -a -q | fgrep "$(NAME)" | xargs -r docker rm 41 | docker images -a -q | fgrep "$(NAME)" | xargs -r docker rmi 42 | 43 | 44 | clean: 45 | rm -rf $(TO_CLEAN) 46 | find . -name "*~" | xargs rm -rf 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # perfSONAR 5.X Testpoint docker container 2 | 3 | Please submit problems to: 4 | https://github.com/perfsonar/perfsonar-testpoint-docker/issues 5 | 6 | The docker container runs all perfSONAR 5.x Services in the "Testpoint" bundle, as described at: 7 | http://docs.perfsonar.net/install_options.html 8 | 9 | This can be used to run perfSONAR 5.x Testpoint services on any OS that supports docker. 10 | 11 | ## Running the Docker Container 12 | 13 | ### Systemd-based Version (Recommended) 14 | 15 | We recommend using the systemd-based version of the Docker container due to its better stability. However, as this version requires the host to support [cgroups v2](https://docs.kernel.org/admin-guide/cgroup-v2.html), a supervisord-based version is also provided. 16 | 17 | To run the systemd-based version, follow these steps: 18 | 19 | Docker version required >= 20.0.0 20 | ```bash 21 | docker pull perfsonar/testpoint:systemd 22 | docker run -td --name perfsonar-testpoint --net=host --tmpfs /run --tmpfs /run/lock --tmpfs /tmp -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v ./compose/psconfig:/etc/perfsonar/psconfig --cgroupns host perfsonar/testpoint:systemd 23 | ``` 24 | 25 | Or, build and run it using [docker compose](https://docs.docker.com/compose/) >= 2.16.0: 26 | ```bash 27 | docker compose -f docker-compose.systemd.yml build 28 | docker compose -f docker-compose.systemd.yml up -d 29 | ``` 30 | 31 | ### Supervisord-based Version 32 | 33 | If you prefer to use the supervisord-based version of the Docker container, you can follow these steps: 34 | 35 | ```bash 36 | docker pull perfsonar/testpoint 37 | docker run -d --name perfsonar-testpoint --net=host -v ./compose/psconfig:/etc/perfsonar/psconfig perfsonar/testpoint 38 | ``` 39 | 40 | Or, build and run it using docker compose: 41 | ```bash 42 | docker compose -f docker-compose.yml build 43 | docker compose -f docker-compose.yml up -d 44 | ``` 45 | 46 | ## Lookup Service Registration 47 | 48 | To register your perfSONAR testpoint, start a container shell, and edit the file 49 | `/etc/perfsonar/lsregistrationdaemon.conf` with the location and administrator information for your site. 50 | 51 | If this host will be part of a centrally configured mesh, also edit the file 52 | `/etc/perfsonar/meshconfig-agent.conf`, and update the **configuration_url**. 53 | 54 | ```bash 55 | docker exec -it perfsonar-testpoint bash 56 | ``` 57 | 58 | After editing the configuration files, exit the container and restart it. 59 | ```bash 60 | docker restart perfsonar-testpoint 61 | ``` 62 | 63 | If you want to persist these settings even after the container is removed, you can commit the running container. 64 | ```bash 65 | docker commit -m "added config settings" CONTAINER_ID perfsonar/testpoint 66 | ``` 67 | 68 | ## Testing 69 | 70 | Test the perfSONAR tools from another host with pscheduler and owamp installed: 71 | ```bash 72 | owping hostname 73 | 74 | pscheduler task clock --source hostname --dest localhost 75 | pscheduler task throughput --dest hostname 76 | ``` 77 | 78 | ## Troubleshooting 79 | 80 | To get a shell in the Docker container on your host, run `docker ps -a` to get your container ID, 81 | and then run: 82 | ```bash 83 | docker exec -it CONTAINER_ID bash 84 | ``` 85 | 86 | ## Notes: 87 | The perfSONAR hostname/IP is assumed to be the same as the base host. To use a different 88 | name/IP for the perfSONAR container, see: https://docs.docker.com/articles/networking/ 89 | It also assumes the base host is running NTP, and not running httpd, postgres, or anything else 90 | listening on the list of ports below. 91 | 92 | ## Security: 93 | Make sure the following ports are allowed by the base host: 94 | 95 | pScheduler: 443 96 | owamp:861, 8760-9960 (tcp and udp) 97 | twamp: 862, 18760-19960 (tcp and udp) 98 | simplestream: 5890-5900 99 | nuttcp: 5000, 5101 100 | iperf2: 5001 101 | iperf3: 5201 102 | ntp: 123 (udp) 103 | 104 | See: http://www.perfsonar.net/deploy/security-considerations/ 105 | 106 | 107 | -------------------------------------------------------------------------------- /compose/psconfig/archives.d/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/perfsonar/perfsonar-testpoint-docker/970ffd40a2a04f29d3807f429868d5bed947345d/compose/psconfig/archives.d/.gitkeep -------------------------------------------------------------------------------- /compose/psconfig/pscheduler-agent-logger.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root,TaskLogger,TransactionLogger,psconfig 3 | 4 | [formatters] 5 | keys=patternLayout,rootPatternLayout 6 | 7 | [handlers] 8 | keys=fileRotateTask,fileRotateTransaction,nullRoot,fileRotatePSConfig 9 | 10 | [formatter_patternLayout] 11 | format=%(asctime)s %(levelname)s %(message)s 12 | datefmt=%Y-%m-%d %H:%M:%S 13 | 14 | [formatter_rootPatternLayout] 15 | format=%(asctime)s %(levelname)s pid=%(process)d prog=%(funcName)s line=%(lineno)d %(message)s 16 | datefmt=%Y-%m-%d %H:%M:%S 17 | 18 | [logger_root] 19 | level=NOTSET 20 | handlers=nullRoot 21 | 22 | [logger_psconfig] 23 | level=NOTSET 24 | qualname=psconfig 25 | handlers=fileRotatePSConfig 26 | 27 | [logger_TaskLogger] 28 | level=INFO 29 | qualname=TaskLogger 30 | handlers=fileRotateTask 31 | 32 | [logger_TransactionLogger] 33 | level=INFO 34 | qualname=TransactionLogger 35 | handlers=fileRotateTransaction 36 | 37 | [handler_fileRotateTask] 38 | class=handlers.RotatingFileHandler 39 | level=INFO 40 | formatter=patternLayout 41 | maxBytes=16777216 42 | backupCount=7 43 | args=('/var/log/perfsonar/psconfig-pscheduler-agent-tasks.log', 'a') 44 | 45 | [handler_nullRoot] 46 | class=logging.NullHandler 47 | 48 | [handler_fileRotatePSConfig] 49 | class=handlers.RotatingFileHandler 50 | level=INFO 51 | formatter=rootPatternLayout 52 | maxBytes=16777216 53 | backupCount=7 54 | args=('/var/log/perfsonar/psconfig-pscheduler-agent.log', 'a') 55 | 56 | [handler_fileRotateTransaction] 57 | class=handlers.RotatingFileHandler 58 | level=DEBUG 59 | formatter=patternLayout 60 | maxBytes=16777216 61 | backupCount=7 62 | args=('/var/log/perfsonar/psconfig-pscheduler-agent-transactions.log', 'a') 63 | -------------------------------------------------------------------------------- /compose/psconfig/pscheduler-agent.json: -------------------------------------------------------------------------------- 1 | { 2 | "remotes": [] 3 | } -------------------------------------------------------------------------------- /compose/psconfig/pscheduler.d/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/perfsonar/perfsonar-testpoint-docker/970ffd40a2a04f29d3807f429868d5bed947345d/compose/psconfig/pscheduler.d/.gitkeep -------------------------------------------------------------------------------- /compose/psconfig/transforms.d/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/perfsonar/perfsonar-testpoint-docker/970ffd40a2a04f29d3807f429868d5bed947345d/compose/psconfig/transforms.d/.gitkeep -------------------------------------------------------------------------------- /docker-compose.systemd.yml: -------------------------------------------------------------------------------- 1 | services: 2 | testpoint: 3 | container_name: perfsonar-testpoint 4 | image: perfsonar/testpoint:systemd 5 | build: 6 | context: . 7 | dockerfile: systemd/Dockerfile 8 | cgroup: host 9 | environment: 10 | - TZ=UTC 11 | network_mode: "host" 12 | restart: on-failure 13 | tmpfs: 14 | - /run 15 | - /run/lock 16 | - /tmp 17 | volumes: 18 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 19 | - ./compose/psconfig:/etc/perfsonar/psconfig 20 | tty: true -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | testpoint: 4 | container_name: perfsonar-testpoint 5 | image: perfsonar/testpoint:latest 6 | build: 7 | context: . 8 | dockerfile: Dockerfile 9 | network_mode: "host" 10 | restart: on-failure 11 | volumes: 12 | - ./compose/psconfig:/etc/perfsonar/psconfig 13 | -------------------------------------------------------------------------------- /postgresql/pg_hba.conf: -------------------------------------------------------------------------------- 1 | # PostgreSQL Client Authentication Configuration File 2 | # =================================================== 3 | # 4 | # Refer to the "Client Authentication" section in the PostgreSQL 5 | # documentation for a complete description of this file. A short 6 | # synopsis follows. 7 | # 8 | # This file controls: which hosts are allowed to connect, how clients 9 | # are authenticated, which PostgreSQL user names they can use, which 10 | # databases they can access. Records take one of these forms: 11 | # 12 | # local DATABASE USER METHOD [OPTIONS] 13 | # host DATABASE USER ADDRESS METHOD [OPTIONS] 14 | # hostssl DATABASE USER ADDRESS METHOD [OPTIONS] 15 | # hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] 16 | # hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] 17 | # hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] 18 | # 19 | # (The uppercase items must be replaced by actual values.) 20 | # 21 | # The first field is the connection type: 22 | # - "local" is a Unix-domain socket 23 | # - "host" is a TCP/IP socket (encrypted or not) 24 | # - "hostssl" is a TCP/IP socket that is SSL-encrypted 25 | # - "hostnossl" is a TCP/IP socket that is not SSL-encrypted 26 | # - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted 27 | # - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted 28 | # 29 | # DATABASE can be "all", "sameuser", "samerole", "replication", a 30 | # database name, or a comma-separated list thereof. The "all" 31 | # keyword does not match "replication". Access to replication 32 | # must be enabled in a separate record (see example below). 33 | # 34 | # USER can be "all", a user name, a group name prefixed with "+", or a 35 | # comma-separated list thereof. In both the DATABASE and USER fields 36 | # you can also write a file name prefixed with "@" to include names 37 | # from a separate file. 38 | # 39 | # ADDRESS specifies the set of hosts the record matches. It can be a 40 | # host name, or it is made up of an IP address and a CIDR mask that is 41 | # an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that 42 | # specifies the number of significant bits in the mask. A host name 43 | # that starts with a dot (.) matches a suffix of the actual host name. 44 | # Alternatively, you can write an IP address and netmask in separate 45 | # columns to specify the set of hosts. Instead of a CIDR-address, you 46 | # can write "samehost" to match any of the server's own IP addresses, 47 | # or "samenet" to match any address in any subnet that the server is 48 | # directly connected to. 49 | # 50 | # METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", 51 | # "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". 52 | # Note that "password" sends passwords in clear text; "md5" or 53 | # "scram-sha-256" are preferred since they send encrypted passwords. 54 | # 55 | # OPTIONS are a set of options for the authentication in the format 56 | # NAME=VALUE. The available options depend on the different 57 | # authentication methods -- refer to the "Client Authentication" 58 | # section in the documentation for a list of which options are 59 | # available for which authentication methods. 60 | # 61 | # Database and user names containing spaces, commas, quotes and other 62 | # special characters must be quoted. Quoting one of the keywords 63 | # "all", "sameuser", "samerole" or "replication" makes the name lose 64 | # its special character, and just match a database or username with 65 | # that name. 66 | # 67 | # This file is read on server startup and when the server receives a 68 | # SIGHUP signal. If you edit the file on a running system, you have to 69 | # SIGHUP the server for the changes to take effect, run "pg_ctl reload", 70 | # or execute "SELECT pg_reload_conf()". 71 | # 72 | # Put your actual configuration here 73 | # ---------------------------------- 74 | 75 | #BEGIN-pscheduler-server 76 | # 77 | # pScheduler 78 | # 79 | # This user should never need to access the database from anywhere 80 | # other than locally. 81 | # 82 | local pscheduler pscheduler scram-sha-256 83 | host pscheduler pscheduler 127.0.0.1/32 scram-sha-256 84 | host pscheduler pscheduler ::1/128 scram-sha-256 85 | #END-pscheduler-server 86 | 87 | # 88 | # If you want to allow non-local connections, you need to add more 89 | # "host" records. In that case you will also need to make PostgreSQL 90 | # listen on a non-local interface via the listen_addresses 91 | # configuration parameter, or via the -i or -h command line switches. 92 | 93 | 94 | 95 | 96 | # DO NOT DISABLE! 97 | # If you change this first entry you will need to make sure that the 98 | # database superuser can access the database using some other method. 99 | # Noninteractive access to all databases is required during automatic 100 | # maintenance (custom daily cronjobs, replication, and similar tasks). 101 | # 102 | # Database administrative login by Unix domain socket 103 | local all postgres peer 104 | 105 | # TYPE DATABASE USER ADDRESS METHOD 106 | 107 | # "local" is for Unix domain socket connections only 108 | local all all peer 109 | # IPv4 local connections: 110 | host all all 127.0.0.1/32 scram-sha-256 111 | # IPv6 local connections: 112 | host all all ::1/128 scram-sha-256 113 | # Allow replication connections from localhost, by a user with the 114 | # replication privilege. 115 | local replication all peer 116 | host replication all 127.0.0.1/32 scram-sha-256 117 | host replication all ::1/128 scram-sha-256 -------------------------------------------------------------------------------- /postgresql/postgresql.conf: -------------------------------------------------------------------------------- 1 | # ----------------------------- 2 | # PostgreSQL configuration file 3 | # ----------------------------- 4 | # 5 | # This file consists of lines of the form: 6 | # 7 | # name = value 8 | # 9 | # (The "=" is optional.) Whitespace may be used. Comments are introduced with 10 | # "#" anywhere on a line. The complete list of parameter names and allowed 11 | # values can be found in the PostgreSQL documentation. 12 | # 13 | # The commented-out settings shown in this file represent the default values. 14 | # Re-commenting a setting is NOT sufficient to revert it to the default value; 15 | # you need to reload the server. 16 | # 17 | # This file is read on server startup and when the server receives a SIGHUP 18 | # signal. If you edit the file on a running system, you have to SIGHUP the 19 | # server for the changes to take effect, run "pg_ctl reload", or execute 20 | # "SELECT pg_reload_conf()". Some parameters, which are marked below, 21 | # require a server shutdown and restart to take effect. 22 | # 23 | # Any parameter can also be given as a command-line option to the server, e.g., 24 | # "postgres -c log_connections=on". Some parameters can be changed at run time 25 | # with the "SET" SQL command. 26 | # 27 | # Memory units: B = bytes Time units: us = microseconds 28 | # kB = kilobytes ms = milliseconds 29 | # MB = megabytes s = seconds 30 | # GB = gigabytes min = minutes 31 | # TB = terabytes h = hours 32 | # d = days 33 | 34 | 35 | #------------------------------------------------------------------------------ 36 | # FILE LOCATIONS 37 | #------------------------------------------------------------------------------ 38 | 39 | # The default values of these variables are driven from the -D command-line 40 | # option or PGDATA environment variable, represented here as ConfigDir. 41 | 42 | data_directory = '/var/lib/postgresql/14/main' # use data in another directory 43 | # (change requires restart) 44 | hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file 45 | # (change requires restart) 46 | ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file 47 | # (change requires restart) 48 | 49 | # If external_pid_file is not explicitly set, no extra PID file is written. 50 | external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file 51 | # (change requires restart) 52 | 53 | 54 | #------------------------------------------------------------------------------ 55 | # CONNECTIONS AND AUTHENTICATION 56 | #------------------------------------------------------------------------------ 57 | 58 | # - Connection Settings - 59 | 60 | #listen_addresses = 'localhost' # what IP address(es) to listen on; 61 | # comma-separated list of addresses; 62 | # defaults to 'localhost'; use '*' for all 63 | # (change requires restart) 64 | port = 5432 # (change requires restart) 65 | max_connections = 500 # (change requires restart) 66 | #superuser_reserved_connections = 3 # (change requires restart) 67 | unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories 68 | # (change requires restart) 69 | #unix_socket_group = '' # (change requires restart) 70 | #unix_socket_permissions = 0777 # begin with 0 to use octal notation 71 | # (change requires restart) 72 | #bonjour = off # advertise server via Bonjour 73 | # (change requires restart) 74 | #bonjour_name = '' # defaults to the computer name 75 | # (change requires restart) 76 | 77 | # - TCP settings - 78 | # see "man tcp" for details 79 | 80 | #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; 81 | # 0 selects the system default 82 | #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; 83 | # 0 selects the system default 84 | #tcp_keepalives_count = 0 # TCP_KEEPCNT; 85 | # 0 selects the system default 86 | #tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; 87 | # 0 selects the system default 88 | 89 | #client_connection_check_interval = 0 # time between checks for client 90 | # disconnection while running queries; 91 | # 0 for never 92 | 93 | # - Authentication - 94 | 95 | #authentication_timeout = 1min # 1s-600s 96 | #password_encryption = scram-sha-256 # scram-sha-256 or md5 97 | #db_user_namespace = off 98 | 99 | # GSSAPI using Kerberos 100 | #krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' 101 | #krb_caseins_users = off 102 | 103 | # - SSL - 104 | 105 | ssl = on 106 | #ssl_ca_file = '' 107 | ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' 108 | #ssl_crl_file = '' 109 | #ssl_crl_dir = '' 110 | ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' 111 | #ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers 112 | #ssl_prefer_server_ciphers = on 113 | #ssl_ecdh_curve = 'prime256v1' 114 | #ssl_min_protocol_version = 'TLSv1.2' 115 | #ssl_max_protocol_version = '' 116 | #ssl_dh_params_file = '' 117 | #ssl_passphrase_command = '' 118 | #ssl_passphrase_command_supports_reload = off 119 | 120 | 121 | #------------------------------------------------------------------------------ 122 | # RESOURCE USAGE (except WAL) 123 | #------------------------------------------------------------------------------ 124 | 125 | # - Memory - 126 | 127 | shared_buffers = 128MB # min 128kB 128 | # (change requires restart) 129 | #huge_pages = try # on, off, or try 130 | # (change requires restart) 131 | #huge_page_size = 0 # zero for system default 132 | # (change requires restart) 133 | #temp_buffers = 8MB # min 800kB 134 | #max_prepared_transactions = 0 # zero disables the feature 135 | # (change requires restart) 136 | # Caution: it is not advisable to set max_prepared_transactions nonzero unless 137 | # you actively intend to use prepared transactions. 138 | #work_mem = 4MB # min 64kB 139 | #hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem 140 | #maintenance_work_mem = 64MB # min 1MB 141 | #autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem 142 | #logical_decoding_work_mem = 64MB # min 64kB 143 | #max_stack_depth = 2MB # min 100kB 144 | #shared_memory_type = mmap # the default is the first option 145 | # supported by the operating system: 146 | # mmap 147 | # sysv 148 | # windows 149 | # (change requires restart) 150 | dynamic_shared_memory_type = posix # the default is the first option 151 | # supported by the operating system: 152 | # posix 153 | # sysv 154 | # windows 155 | # mmap 156 | # (change requires restart) 157 | #min_dynamic_shared_memory = 0MB # (change requires restart) 158 | 159 | # - Disk - 160 | 161 | #temp_file_limit = -1 # limits per-process temp file space 162 | # in kilobytes, or -1 for no limit 163 | 164 | # - Kernel Resources - 165 | 166 | #max_files_per_process = 1000 # min 64 167 | # (change requires restart) 168 | 169 | # - Cost-Based Vacuum Delay - 170 | 171 | #vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) 172 | #vacuum_cost_page_hit = 1 # 0-10000 credits 173 | #vacuum_cost_page_miss = 2 # 0-10000 credits 174 | #vacuum_cost_page_dirty = 20 # 0-10000 credits 175 | #vacuum_cost_limit = 200 # 1-10000 credits 176 | 177 | # - Background Writer - 178 | 179 | #bgwriter_delay = 200ms # 10-10000ms between rounds 180 | #bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables 181 | #bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round 182 | #bgwriter_flush_after = 512kB # measured in pages, 0 disables 183 | 184 | # - Asynchronous Behavior - 185 | 186 | #backend_flush_after = 0 # measured in pages, 0 disables 187 | #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching 188 | #maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching 189 | #max_worker_processes = 8 # (change requires restart) 190 | #max_parallel_workers_per_gather = 2 # taken from max_parallel_workers 191 | #max_parallel_maintenance_workers = 2 # taken from max_parallel_workers 192 | #max_parallel_workers = 8 # maximum number of max_worker_processes that 193 | # can be used in parallel operations 194 | #parallel_leader_participation = on 195 | #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate 196 | # (change requires restart) 197 | 198 | 199 | #------------------------------------------------------------------------------ 200 | # WRITE-AHEAD LOG 201 | #------------------------------------------------------------------------------ 202 | 203 | # - Settings - 204 | 205 | #wal_level = replica # minimal, replica, or logical 206 | # (change requires restart) 207 | #fsync = on # flush data to disk for crash safety 208 | # (turning this off can cause 209 | # unrecoverable data corruption) 210 | #synchronous_commit = on # synchronization level; 211 | # off, local, remote_write, remote_apply, or on 212 | #wal_sync_method = fsync # the default is the first option 213 | # supported by the operating system: 214 | # open_datasync 215 | # fdatasync (default on Linux and FreeBSD) 216 | # fsync 217 | # fsync_writethrough 218 | # open_sync 219 | #full_page_writes = on # recover from partial page writes 220 | #wal_log_hints = off # also do full page writes of non-critical updates 221 | # (change requires restart) 222 | #wal_compression = off # enable compression of full-page writes 223 | #wal_init_zero = on # zero-fill new WAL files 224 | #wal_recycle = on # recycle WAL files 225 | #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers 226 | # (change requires restart) 227 | #wal_writer_delay = 200ms # 1-10000 milliseconds 228 | #wal_writer_flush_after = 1MB # measured in pages, 0 disables 229 | #wal_skip_threshold = 2MB 230 | 231 | #commit_delay = 0 # range 0-100000, in microseconds 232 | #commit_siblings = 5 # range 1-1000 233 | 234 | # - Checkpoints - 235 | 236 | #checkpoint_timeout = 5min # range 30s-1d 237 | #checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 238 | #checkpoint_flush_after = 256kB # measured in pages, 0 disables 239 | #checkpoint_warning = 30s # 0 disables 240 | max_wal_size = 1GB 241 | min_wal_size = 80MB 242 | 243 | # - Archiving - 244 | 245 | #archive_mode = off # enables archiving; off, on, or always 246 | # (change requires restart) 247 | #archive_command = '' # command to use to archive a logfile segment 248 | # placeholders: %p = path of file to archive 249 | # %f = file name only 250 | # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' 251 | #archive_timeout = 0 # force a logfile segment switch after this 252 | # number of seconds; 0 disables 253 | 254 | # - Archive Recovery - 255 | 256 | # These are only used in recovery mode. 257 | 258 | #restore_command = '' # command to use to restore an archived logfile segment 259 | # placeholders: %p = path of file to restore 260 | # %f = file name only 261 | # e.g. 'cp /mnt/server/archivedir/%f %p' 262 | #archive_cleanup_command = '' # command to execute at every restartpoint 263 | #recovery_end_command = '' # command to execute at completion of recovery 264 | 265 | # - Recovery Target - 266 | 267 | # Set these only when performing a targeted recovery. 268 | 269 | #recovery_target = '' # 'immediate' to end recovery as soon as a 270 | # consistent state is reached 271 | # (change requires restart) 272 | #recovery_target_name = '' # the named restore point to which recovery will proceed 273 | # (change requires restart) 274 | #recovery_target_time = '' # the time stamp up to which recovery will proceed 275 | # (change requires restart) 276 | #recovery_target_xid = '' # the transaction ID up to which recovery will proceed 277 | # (change requires restart) 278 | #recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed 279 | # (change requires restart) 280 | #recovery_target_inclusive = on # Specifies whether to stop: 281 | # just after the specified recovery target (on) 282 | # just before the recovery target (off) 283 | # (change requires restart) 284 | #recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID 285 | # (change requires restart) 286 | #recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' 287 | # (change requires restart) 288 | 289 | 290 | #------------------------------------------------------------------------------ 291 | # REPLICATION 292 | #------------------------------------------------------------------------------ 293 | 294 | # - Sending Servers - 295 | 296 | # Set these on the primary and on any standby that will send replication data. 297 | 298 | #max_wal_senders = 10 # max number of walsender processes 299 | # (change requires restart) 300 | #max_replication_slots = 10 # max number of replication slots 301 | # (change requires restart) 302 | #wal_keep_size = 0 # in megabytes; 0 disables 303 | #max_slot_wal_keep_size = -1 # in megabytes; -1 disables 304 | #wal_sender_timeout = 60s # in milliseconds; 0 disables 305 | #track_commit_timestamp = off # collect timestamp of transaction commit 306 | # (change requires restart) 307 | 308 | # - Primary Server - 309 | 310 | # These settings are ignored on a standby server. 311 | 312 | #synchronous_standby_names = '' # standby servers that provide sync rep 313 | # method to choose sync standbys, number of sync standbys, 314 | # and comma-separated list of application_name 315 | # from standby(s); '*' = all 316 | #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed 317 | 318 | # - Standby Servers - 319 | 320 | # These settings are ignored on a primary server. 321 | 322 | #primary_conninfo = '' # connection string to sending server 323 | #primary_slot_name = '' # replication slot on sending server 324 | #promote_trigger_file = '' # file name whose presence ends recovery 325 | #hot_standby = on # "off" disallows queries during recovery 326 | # (change requires restart) 327 | #max_standby_archive_delay = 30s # max delay before canceling queries 328 | # when reading WAL from archive; 329 | # -1 allows indefinite delay 330 | #max_standby_streaming_delay = 30s # max delay before canceling queries 331 | # when reading streaming WAL; 332 | # -1 allows indefinite delay 333 | #wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name 334 | # is not set 335 | #wal_receiver_status_interval = 10s # send replies at least this often 336 | # 0 disables 337 | #hot_standby_feedback = off # send info from standby to prevent 338 | # query conflicts 339 | #wal_receiver_timeout = 60s # time that receiver waits for 340 | # communication from primary 341 | # in milliseconds; 0 disables 342 | #wal_retrieve_retry_interval = 5s # time to wait before retrying to 343 | # retrieve WAL after a failed attempt 344 | #recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery 345 | 346 | # - Subscribers - 347 | 348 | # These settings are ignored on a publisher. 349 | 350 | #max_logical_replication_workers = 4 # taken from max_worker_processes 351 | # (change requires restart) 352 | #max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers 353 | 354 | 355 | #------------------------------------------------------------------------------ 356 | # QUERY TUNING 357 | #------------------------------------------------------------------------------ 358 | 359 | # - Planner Method Configuration - 360 | 361 | #enable_async_append = on 362 | #enable_bitmapscan = on 363 | #enable_gathermerge = on 364 | #enable_hashagg = on 365 | #enable_hashjoin = on 366 | #enable_incremental_sort = on 367 | #enable_indexscan = on 368 | #enable_indexonlyscan = on 369 | #enable_material = on 370 | #enable_memoize = on 371 | #enable_mergejoin = on 372 | #enable_nestloop = on 373 | #enable_parallel_append = on 374 | #enable_parallel_hash = on 375 | #enable_partition_pruning = on 376 | #enable_partitionwise_join = off 377 | #enable_partitionwise_aggregate = off 378 | #enable_seqscan = on 379 | #enable_sort = on 380 | #enable_tidscan = on 381 | 382 | # - Planner Cost Constants - 383 | 384 | #seq_page_cost = 1.0 # measured on an arbitrary scale 385 | #random_page_cost = 4.0 # same scale as above 386 | #cpu_tuple_cost = 0.01 # same scale as above 387 | #cpu_index_tuple_cost = 0.005 # same scale as above 388 | #cpu_operator_cost = 0.0025 # same scale as above 389 | #parallel_setup_cost = 1000.0 # same scale as above 390 | #parallel_tuple_cost = 0.1 # same scale as above 391 | #min_parallel_table_scan_size = 8MB 392 | #min_parallel_index_scan_size = 512kB 393 | #effective_cache_size = 4GB 394 | 395 | #jit_above_cost = 100000 # perform JIT compilation if available 396 | # and query more expensive than this; 397 | # -1 disables 398 | #jit_inline_above_cost = 500000 # inline small functions if query is 399 | # more expensive than this; -1 disables 400 | #jit_optimize_above_cost = 500000 # use expensive JIT optimizations if 401 | # query is more expensive than this; 402 | # -1 disables 403 | 404 | # - Genetic Query Optimizer - 405 | 406 | #geqo = on 407 | #geqo_threshold = 12 408 | #geqo_effort = 5 # range 1-10 409 | #geqo_pool_size = 0 # selects default based on effort 410 | #geqo_generations = 0 # selects default based on effort 411 | #geqo_selection_bias = 2.0 # range 1.5-2.0 412 | #geqo_seed = 0.0 # range 0.0-1.0 413 | 414 | # - Other Planner Options - 415 | 416 | #default_statistics_target = 100 # range 1-10000 417 | #constraint_exclusion = partition # on, off, or partition 418 | #cursor_tuple_fraction = 0.1 # range 0.0-1.0 419 | #from_collapse_limit = 8 420 | #jit = on # allow JIT compilation 421 | #join_collapse_limit = 8 # 1 disables collapsing of explicit 422 | # JOIN clauses 423 | #plan_cache_mode = auto # auto, force_generic_plan or 424 | # force_custom_plan 425 | 426 | 427 | #------------------------------------------------------------------------------ 428 | # REPORTING AND LOGGING 429 | #------------------------------------------------------------------------------ 430 | 431 | # - Where to Log - 432 | 433 | #log_destination = 'stderr' # Valid values are combinations of 434 | # stderr, csvlog, syslog, and eventlog, 435 | # depending on platform. csvlog 436 | # requires logging_collector to be on. 437 | 438 | # This is used when logging to stderr: 439 | #logging_collector = off # Enable capturing of stderr and csvlog 440 | # into log files. Required to be on for 441 | # csvlogs. 442 | # (change requires restart) 443 | 444 | # These are only used if logging_collector is on: 445 | #log_directory = 'log' # directory where log files are written, 446 | # can be absolute or relative to PGDATA 447 | #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, 448 | # can include strftime() escapes 449 | #log_file_mode = 0600 # creation mode for log files, 450 | # begin with 0 to use octal notation 451 | #log_rotation_age = 1d # Automatic rotation of logfiles will 452 | # happen after that time. 0 disables. 453 | #log_rotation_size = 10MB # Automatic rotation of logfiles will 454 | # happen after that much log output. 455 | # 0 disables. 456 | #log_truncate_on_rotation = off # If on, an existing log file with the 457 | # same name as the new log file will be 458 | # truncated rather than appended to. 459 | # But such truncation only occurs on 460 | # time-driven rotation, not on restarts 461 | # or size-driven rotation. Default is 462 | # off, meaning append to existing files 463 | # in all cases. 464 | 465 | # These are relevant when logging to syslog: 466 | #syslog_facility = 'LOCAL0' 467 | #syslog_ident = 'postgres' 468 | #syslog_sequence_numbers = on 469 | #syslog_split_messages = on 470 | 471 | # This is only relevant when logging to eventlog (Windows): 472 | # (change requires restart) 473 | #event_source = 'PostgreSQL' 474 | 475 | # - When to Log - 476 | 477 | #log_min_messages = warning # values in order of decreasing detail: 478 | # debug5 479 | # debug4 480 | # debug3 481 | # debug2 482 | # debug1 483 | # info 484 | # notice 485 | # warning 486 | # error 487 | # log 488 | # fatal 489 | # panic 490 | 491 | #log_min_error_statement = error # values in order of decreasing detail: 492 | # debug5 493 | # debug4 494 | # debug3 495 | # debug2 496 | # debug1 497 | # info 498 | # notice 499 | # warning 500 | # error 501 | # log 502 | # fatal 503 | # panic (effectively off) 504 | 505 | #log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements 506 | # and their durations, > 0 logs only 507 | # statements running at least this number 508 | # of milliseconds 509 | 510 | #log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements 511 | # and their durations, > 0 logs only a sample of 512 | # statements running at least this number 513 | # of milliseconds; 514 | # sample fraction is determined by log_statement_sample_rate 515 | 516 | #log_statement_sample_rate = 1.0 # fraction of logged statements exceeding 517 | # log_min_duration_sample to be logged; 518 | # 1.0 logs all such statements, 0.0 never logs 519 | 520 | 521 | #log_transaction_sample_rate = 0.0 # fraction of transactions whose statements 522 | # are logged regardless of their duration; 1.0 logs all 523 | # statements from all transactions, 0.0 never logs 524 | 525 | # - What to Log - 526 | 527 | #debug_print_parse = off 528 | #debug_print_rewritten = off 529 | #debug_print_plan = off 530 | #debug_pretty_print = on 531 | #log_autovacuum_min_duration = -1 # log autovacuum activity; 532 | # -1 disables, 0 logs all actions and 533 | # their durations, > 0 logs only 534 | # actions running at least this number 535 | # of milliseconds. 536 | #log_checkpoints = off 537 | #log_connections = off 538 | #log_disconnections = off 539 | #log_duration = off 540 | #log_error_verbosity = default # terse, default, or verbose messages 541 | #log_hostname = off 542 | log_line_prefix = '%m [%p] %q%u@%d ' # special values: 543 | # %a = application name 544 | # %u = user name 545 | # %d = database name 546 | # %r = remote host and port 547 | # %h = remote host 548 | # %b = backend type 549 | # %p = process ID 550 | # %P = process ID of parallel group leader 551 | # %t = timestamp without milliseconds 552 | # %m = timestamp with milliseconds 553 | # %n = timestamp with milliseconds (as a Unix epoch) 554 | # %Q = query ID (0 if none or not computed) 555 | # %i = command tag 556 | # %e = SQL state 557 | # %c = session ID 558 | # %l = session line number 559 | # %s = session start timestamp 560 | # %v = virtual transaction ID 561 | # %x = transaction ID (0 if none) 562 | # %q = stop here in non-session 563 | # processes 564 | # %% = '%' 565 | # e.g. '<%u%%%d> ' 566 | #log_lock_waits = off # log lock waits >= deadlock_timeout 567 | #log_recovery_conflict_waits = off # log standby recovery conflict waits 568 | # >= deadlock_timeout 569 | #log_parameter_max_length = -1 # when logging statements, limit logged 570 | # bind-parameter values to N bytes; 571 | # -1 means print in full, 0 disables 572 | #log_parameter_max_length_on_error = 0 # when logging an error, limit logged 573 | # bind-parameter values to N bytes; 574 | # -1 means print in full, 0 disables 575 | #log_statement = 'none' # none, ddl, mod, all 576 | #log_replication_commands = off 577 | #log_temp_files = -1 # log temporary files equal or larger 578 | # than the specified size in kilobytes; 579 | # -1 disables, 0 logs all temp files 580 | log_timezone = 'Etc/UTC' 581 | 582 | 583 | #------------------------------------------------------------------------------ 584 | # PROCESS TITLE 585 | #------------------------------------------------------------------------------ 586 | 587 | cluster_name = '14/main' # added to process titles if nonempty 588 | # (change requires restart) 589 | #update_process_title = on 590 | 591 | 592 | #------------------------------------------------------------------------------ 593 | # STATISTICS 594 | #------------------------------------------------------------------------------ 595 | 596 | # - Query and Index Statistics Collector - 597 | 598 | #track_activities = on 599 | #track_activity_query_size = 1024 # (change requires restart) 600 | #track_counts = on 601 | #track_io_timing = off 602 | #track_wal_io_timing = off 603 | #track_functions = none # none, pl, all 604 | stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp' 605 | 606 | 607 | # - Monitoring - 608 | 609 | #compute_query_id = auto 610 | #log_statement_stats = off 611 | #log_parser_stats = off 612 | #log_planner_stats = off 613 | #log_executor_stats = off 614 | 615 | 616 | #------------------------------------------------------------------------------ 617 | # AUTOVACUUM 618 | #------------------------------------------------------------------------------ 619 | 620 | #autovacuum = on # Enable autovacuum subprocess? 'on' 621 | # requires track_counts to also be on. 622 | #autovacuum_max_workers = 3 # max number of autovacuum subprocesses 623 | # (change requires restart) 624 | #autovacuum_naptime = 1min # time between autovacuum runs 625 | #autovacuum_vacuum_threshold = 50 # min number of row updates before 626 | # vacuum 627 | #autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts 628 | # before vacuum; -1 disables insert 629 | # vacuums 630 | #autovacuum_analyze_threshold = 50 # min number of row updates before 631 | # analyze 632 | #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum 633 | #autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table 634 | # size before insert vacuum 635 | #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze 636 | #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum 637 | # (change requires restart) 638 | #autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age 639 | # before forced vacuum 640 | # (change requires restart) 641 | #autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for 642 | # autovacuum, in milliseconds; 643 | # -1 means use vacuum_cost_delay 644 | #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for 645 | # autovacuum, -1 means use 646 | # vacuum_cost_limit 647 | 648 | 649 | #------------------------------------------------------------------------------ 650 | # CLIENT CONNECTION DEFAULTS 651 | #------------------------------------------------------------------------------ 652 | 653 | # - Statement Behavior - 654 | 655 | #client_min_messages = notice # values in order of decreasing detail: 656 | # debug5 657 | # debug4 658 | # debug3 659 | # debug2 660 | # debug1 661 | # log 662 | # notice 663 | # warning 664 | # error 665 | #search_path = '"$user", public' # schema names 666 | #row_security = on 667 | #default_table_access_method = 'heap' 668 | #default_tablespace = '' # a tablespace name, '' uses the default 669 | #default_toast_compression = 'pglz' # 'pglz' or 'lz4' 670 | #temp_tablespaces = '' # a list of tablespace names, '' uses 671 | # only default tablespace 672 | #check_function_bodies = on 673 | #default_transaction_isolation = 'read committed' 674 | #default_transaction_read_only = off 675 | #default_transaction_deferrable = off 676 | #session_replication_role = 'origin' 677 | #statement_timeout = 0 # in milliseconds, 0 is disabled 678 | #lock_timeout = 0 # in milliseconds, 0 is disabled 679 | #idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled 680 | #idle_session_timeout = 0 # in milliseconds, 0 is disabled 681 | #vacuum_freeze_table_age = 150000000 682 | #vacuum_freeze_min_age = 50000000 683 | #vacuum_failsafe_age = 1600000000 684 | #vacuum_multixact_freeze_table_age = 150000000 685 | #vacuum_multixact_freeze_min_age = 5000000 686 | #vacuum_multixact_failsafe_age = 1600000000 687 | #bytea_output = 'hex' # hex, escape 688 | #xmlbinary = 'base64' 689 | #xmloption = 'content' 690 | #gin_pending_list_limit = 4MB 691 | 692 | # - Locale and Formatting - 693 | 694 | datestyle = 'iso, mdy' 695 | #intervalstyle = 'postgres' 696 | timezone = 'Etc/UTC' 697 | #timezone_abbreviations = 'Default' # Select the set of available time zone 698 | # abbreviations. Currently, there are 699 | # Default 700 | # Australia (historical usage) 701 | # India 702 | # You can create your own file in 703 | # share/timezonesets/. 704 | #extra_float_digits = 1 # min -15, max 3; any value >0 actually 705 | # selects precise output mode 706 | #client_encoding = sql_ascii # actually, defaults to database 707 | # encoding 708 | 709 | # These settings are initialized by initdb, but they can be changed. 710 | lc_messages = 'C' # locale for system error message 711 | # strings 712 | lc_monetary = 'C' # locale for monetary formatting 713 | lc_numeric = 'C' # locale for number formatting 714 | lc_time = 'C' # locale for time formatting 715 | 716 | # default configuration for text search 717 | default_text_search_config = 'pg_catalog.english' 718 | 719 | # - Shared Library Preloading - 720 | 721 | #local_preload_libraries = '' 722 | #session_preload_libraries = '' 723 | #shared_preload_libraries = '' # (change requires restart) 724 | #jit_provider = 'llvmjit' # JIT library to use 725 | 726 | # - Other Defaults - 727 | 728 | #dynamic_library_path = '$libdir' 729 | #extension_destdir = '' # prepend path when loading extensions 730 | # and shared objects (added by Debian) 731 | #gin_fuzzy_search_limit = 0 732 | 733 | 734 | #------------------------------------------------------------------------------ 735 | # LOCK MANAGEMENT 736 | #------------------------------------------------------------------------------ 737 | 738 | #deadlock_timeout = 1s 739 | #max_locks_per_transaction = 64 # min 10 740 | # (change requires restart) 741 | #max_pred_locks_per_transaction = 64 # min 10 742 | # (change requires restart) 743 | #max_pred_locks_per_relation = -2 # negative values mean 744 | # (max_pred_locks_per_transaction 745 | # / -max_pred_locks_per_relation) - 1 746 | #max_pred_locks_per_page = 2 # min 0 747 | 748 | 749 | #------------------------------------------------------------------------------ 750 | # VERSION AND PLATFORM COMPATIBILITY 751 | #------------------------------------------------------------------------------ 752 | 753 | # - Previous PostgreSQL Versions - 754 | 755 | #array_nulls = on 756 | #backslash_quote = safe_encoding # on, off, or safe_encoding 757 | #escape_string_warning = on 758 | #lo_compat_privileges = off 759 | #quote_all_identifiers = off 760 | #standard_conforming_strings = on 761 | #synchronize_seqscans = on 762 | 763 | # - Other Platforms and Clients - 764 | 765 | #transform_null_equals = off 766 | 767 | 768 | #------------------------------------------------------------------------------ 769 | # ERROR HANDLING 770 | #------------------------------------------------------------------------------ 771 | 772 | #exit_on_error = off # terminate session on any error? 773 | #restart_after_crash = on # reinitialize after backend crash? 774 | #data_sync_retry = off # retry or panic on failure to fsync 775 | # data? 776 | # (change requires restart) 777 | #recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) 778 | 779 | 780 | #------------------------------------------------------------------------------ 781 | # CONFIG FILE INCLUDES 782 | #------------------------------------------------------------------------------ 783 | 784 | # These options allow settings to be loaded from files other than the 785 | # default postgresql.conf. Note that these are directives, not variable 786 | # assignments, so they can usefully be given more than once. 787 | 788 | include_dir = 'conf.d' # include files ending in '.conf' from 789 | # a directory, e.g., 'conf.d' 790 | #include_if_exists = '...' # include file only if it exists 791 | #include = '...' # include file 792 | 793 | 794 | #------------------------------------------------------------------------------ 795 | # CUSTOMIZED OPTIONS 796 | #------------------------------------------------------------------------------ 797 | 798 | # Add settings for extensions here -------------------------------------------------------------------------------- /postgresql/pscheduler-build-database: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | # 3 | # Build the pScheduler Database 4 | # 5 | 6 | 7 | # Start the server 8 | su - postgres -c "/usr/pgsql-10/bin/pg_ctl start -w -t 60" 9 | echo "Server started" 10 | 11 | ROLE="pscheduler" 12 | 13 | # Generate the password file 14 | random-string --safe --length 60 --randlength > '/etc/pscheduler/database/database-password' 15 | echo "Password generated" 16 | 17 | # Generate the DSN file 18 | awk -v "ROLE=${ROLE}" '{ printf "host=localhost dbname=pscheduler user=%s password=%s\n", ROLE, $1 }' \ 19 | "/etc/pscheduler/database/database-password" \ 20 | > "/etc/pscheduler/database/database-dsn" 21 | echo "DSN generated" 22 | 23 | # Generate a PostgreSQL password file 24 | # Format is hostname:port:database:username:password 25 | awk -v "ROLE=${ROLE}" '{ printf "*:*:pscheduler:%s:%s\n", ROLE, $1 }' \ 26 | "/etc/pscheduler/database/database-password" \ 27 | > "/etc/pscheduler/database/pgpassfile" 28 | chmod 400 "/etc/pscheduler/database/pgpassfile" 29 | echo "Pgpassword generated" 30 | 31 | 32 | # Hot patch the database loader so it doesn't barf when not 33 | # interactive. 34 | # TODO: Remove this after pS 4.0.0.3. Probably harmless if left here. 35 | sed -i -e 's/^\(\$INTERACTIVE.*\)$/\1 || true/g' \ 36 | /usr/libexec/pscheduler/internals/db-update 37 | echo "Loader patched" 38 | 39 | # Build the database 40 | pscheduler internal db-update 41 | echo "Database loaded" 42 | 43 | # Set the password in the pScheduler database to match what's on the 44 | # disk. 45 | ( \ 46 | printf "ALTER ROLE pscheduler WITH PASSWORD '" \ 47 | && tr -d "\n" < "/etc/pscheduler/database/database-password" \ 48 | && printf "';\n" \ 49 | ) | postgresql-load 50 | echo "Password set" 51 | 52 | # Shut down 53 | su - postgres -c "/usr/pgsql-10/bin/pg_ctl stop -w -t 60" 54 | echo "Server stopped" 55 | 56 | exit 0 57 | -------------------------------------------------------------------------------- /rsyslog/listen.conf: -------------------------------------------------------------------------------- 1 | # comment out to work with docker and CentOS7 2 | #$SystemLogSocketName /run/systemd/journal/syslog 3 | -------------------------------------------------------------------------------- /rsyslog/owamp-syslog.conf: -------------------------------------------------------------------------------- 1 | # Save owamp messages to /var/log/perfsonar/owamp.log 2 | local5.* -/var/log/perfsonar/owamp.log 3 | -------------------------------------------------------------------------------- /rsyslog/python-pscheduler.conf: -------------------------------------------------------------------------------- 1 | # Syslog configuration for pScheduler 2 | 3 | local4.* /var/log/pscheduler/pscheduler.log 4 | -------------------------------------------------------------------------------- /rsyslog/rsyslog: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | ### BEGIN INIT INFO 3 | # Provides: rsyslog 4 | # Required-Start: $remote_fs $time 5 | # Required-Stop: umountnfs $time 6 | # X-Stop-After: sendsigs 7 | # Default-Start: 2 3 4 5 8 | # Default-Stop: 0 1 6 9 | # Short-Description: enhanced syslogd 10 | # Description: Rsyslog is an enhanced multi-threaded syslogd. 11 | # It is quite compatible to stock sysklogd and can be 12 | # used as a drop-in replacement. 13 | ### END INIT INFO 14 | 15 | # 16 | # Author: Michael Biebl 17 | # 18 | 19 | # PATH should only include /usr/* if it runs after the mountnfs.sh script 20 | PATH=/sbin:/usr/sbin:/bin:/usr/bin 21 | DESC="enhanced syslogd" 22 | NAME=rsyslog 23 | 24 | RSYSLOGD=rsyslogd 25 | DAEMON=/usr/sbin/rsyslogd 26 | PIDFILE=/run/rsyslogd.pid 27 | 28 | SCRIPTNAME=/etc/init.d/$NAME 29 | 30 | # Exit if the package is not installed 31 | [ -x "$DAEMON" ] || exit 0 32 | 33 | # Read configuration variable file if it is present 34 | [ -r /etc/default/$NAME ] && . /etc/default/$NAME 35 | 36 | # Define LSB log_* functions. 37 | . /lib/lsb/init-functions 38 | 39 | do_start() 40 | { 41 | # Return 42 | # 0 if daemon has been started 43 | # 1 if daemon was already running 44 | # other if daemon could not be started or a failure occured 45 | start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- $RSYSLOGD_OPTIONS 46 | } 47 | 48 | do_stop() 49 | { 50 | # Return 51 | # 0 if daemon has been stopped 52 | # 1 if daemon was already stopped 53 | # other if daemon could not be stopped or a failure occurred 54 | start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --exec $DAEMON 55 | } 56 | 57 | # 58 | # Tell rsyslogd to close all open files 59 | # 60 | do_rotate() { 61 | start-stop-daemon --stop --signal HUP --quiet --pidfile $PIDFILE --exec $DAEMON 62 | } 63 | 64 | create_xconsole() { 65 | XCONSOLE=/dev/xconsole 66 | if [ "$(uname -s)" != "Linux" ]; then 67 | XCONSOLE=/run/xconsole 68 | ln -sf $XCONSOLE /dev/xconsole 69 | fi 70 | if [ ! -e $XCONSOLE ]; then 71 | mknod -m 640 $XCONSOLE p 72 | chown root:adm $XCONSOLE 73 | [ -x /sbin/restorecon ] && /sbin/restorecon $XCONSOLE 74 | fi 75 | } 76 | 77 | sendsigs_omit() { 78 | OMITDIR=/run/sendsigs.omit.d 79 | mkdir -p $OMITDIR 80 | ln -sf $PIDFILE $OMITDIR/rsyslog 81 | } 82 | 83 | case "$1" in 84 | start) 85 | log_daemon_msg "Starting $DESC" "$RSYSLOGD" 86 | create_xconsole 87 | do_start 88 | case "$?" in 89 | 0) sendsigs_omit 90 | log_end_msg 0 ;; 91 | 1) log_progress_msg "already started" 92 | log_end_msg 0 ;; 93 | *) log_end_msg 1 ;; 94 | esac 95 | 96 | ;; 97 | stop) 98 | log_daemon_msg "Stopping $DESC" "$RSYSLOGD" 99 | do_stop 100 | case "$?" in 101 | 0) log_end_msg 0 ;; 102 | 1) log_progress_msg "already stopped" 103 | log_end_msg 0 ;; 104 | *) log_end_msg 1 ;; 105 | esac 106 | 107 | ;; 108 | rotate) 109 | log_daemon_msg "Closing open files" "$RSYSLOGD" 110 | do_rotate 111 | log_end_msg $? 112 | ;; 113 | restart|force-reload) 114 | $0 stop 115 | $0 start 116 | ;; 117 | try-restart) 118 | $0 status >/dev/null 2>&1 && $0 restart 119 | ;; 120 | status) 121 | status_of_proc -p $PIDFILE $DAEMON $RSYSLOGD && exit 0 || exit $? 122 | ;; 123 | *) 124 | echo "Usage: $SCRIPTNAME {start|stop|rotate|restart|force-reload|try-restart|status}" >&2 125 | exit 3 126 | ;; 127 | esac 128 | 129 | : 130 | -------------------------------------------------------------------------------- /rsyslog/rsyslog.conf: -------------------------------------------------------------------------------- 1 | # rsyslog configuration file 2 | # 3 | # NOTE: this file modified to work with Docker, as described here: 4 | # http://www.projectatomic.io/blog/2014/09/running-syslog-within-a-docker-container/ 5 | 6 | # For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html 7 | # If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html 8 | 9 | #### MODULES #### 10 | 11 | # The imjournal module bellow is now used as a message source instead of imuxsock. 12 | $ModLoad imuxsock # provides support for local system logging (e.g. via logger command) 13 | #$ModLoad imjournal # provides access to the systemd journal 14 | #$ModLoad imklog # reads kernel messages (the same are read from journald) 15 | #$ModLoad immark # provides --MARK-- message capability 16 | 17 | # Provides UDP syslog reception 18 | #$ModLoad imudp 19 | #$UDPServerRun 514 20 | 21 | # Provides TCP syslog reception 22 | #$ModLoad imtcp 23 | #$InputTCPServerRun 514 24 | 25 | 26 | 27 | #### GLOBAL DIRECTIVES #### 28 | 29 | # Where to place auxiliary files 30 | $WorkDirectory /var/lib/rsyslog 31 | 32 | # Use default timestamp format 33 | $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat 34 | 35 | # File syncing capability is disabled by default. This feature is usually not required, 36 | # not useful and an extreme performance hit 37 | #$ActionFileEnableSync on 38 | 39 | # Include all config files in /etc/rsyslog.d/ 40 | $IncludeConfig /etc/rsyslog.d/*.conf 41 | 42 | # Turn off message reception via local log socket; 43 | # local messages are retrieved through imjournal now. 44 | #$OmitLocalLogging on 45 | #for docker 46 | $OmitLocalLogging off 47 | 48 | # File to store the position in the journal 49 | #$IMJournalStateFile imjournal.state 50 | 51 | 52 | #### RULES #### 53 | 54 | # Log all kernel messages to the console. 55 | # Logging much else clutters up the screen. 56 | #kern.* /dev/console 57 | 58 | # Log anything (except mail) of level info or higher. 59 | # Don't log private authentication messages! 60 | *.info;mail.none;authpriv.none;cron.none /var/log/messages 61 | 62 | # The authpriv file has restricted access. 63 | authpriv.* /var/log/secure 64 | 65 | # Log all the mail messages in one place. 66 | mail.* -/var/log/maillog 67 | 68 | 69 | # Log cron stuff 70 | cron.* /var/log/cron 71 | 72 | # Everybody gets emergency messages 73 | *.emerg :omusrmsg:* 74 | 75 | # Save news errors of level crit and higher in a special file. 76 | uucp,news.crit /var/log/spooler 77 | 78 | # Save boot messages also to boot.log 79 | local7.* /var/log/ 80 | 81 | 82 | 83 | # ### begin forwarding rule ### 84 | # The statement between the begin ... end define a SINGLE forwarding 85 | # rule. They belong together, do NOT split them. If you create multiple 86 | # forwarding rules, duplicate the whole block! 87 | # Remote Logging (we use TCP for reliable delivery) 88 | # 89 | # An on-disk queue is created for this action. If the remote host is 90 | # down, messages are spooled to disk and sent when it is up again. 91 | #$ActionQueueFileName fwdRule1 # unique name prefix for spool files 92 | #$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible) 93 | #$ActionQueueSaveOnShutdown on # save messages to disk on shutdown 94 | #$ActionQueueType LinkedList # run asynchronously 95 | #$ActionResumeRetryCount -1 # infinite retries if host is down 96 | # remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional 97 | #*.* @@remote-host:514 98 | # ### end of the forwarding rule ### 99 | 100 | 101 | -------------------------------------------------------------------------------- /supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | 4 | [program:rsyslog] 5 | command=rsyslogd -n 6 | redirect_stderr=true 7 | 8 | [program:owamp-server] 9 | chown=owamp:owamp 10 | command=/usr/sbin/owampd -c /etc/owamp-server -R /var/run 11 | 12 | [program:twamp-server] 13 | chown=twamp:twamp 14 | command=/usr/sbin/twampd -c /etc/twamp-server -R /var/run 15 | 16 | [program:perfsonar-lsregistrationdaemon] 17 | chown=perfsonar:perfsonar 18 | command=/usr/lib/perfsonar/bin/lsregistrationdaemon.pl --config=/etc/perfsonar/lsregistrationdaemon.conf --pidfile=/var/run/lsregistrationdaemon.pid --logger=/etc/perfsonar/lsregistrationdaemon-logger.conf --user=perfsonar --group=perfsonar 19 | 20 | [program:postgresql] ; postgress is needed by pscheduler 21 | command=/usr/lib/postgresql/14/bin/postgres -D /var/lib/postgresql/14/main -c config_file=/etc/postgresql/14/main/postgresql.conf 22 | user=postgres 23 | 24 | [program:apache2] ; apache is needed by pscheduler 25 | command=/usr/bin/pidproxy /var/run/apache2/apache2.pid /bin/bash -c "/usr/sbin/apache2ctl -DFOREGROUND -k start" 26 | 27 | [program:pscheduler-ticker] 28 | chown=pscheduler:pscheduler 29 | command=/usr/bin/python3 /usr/lib/pscheduler/daemons/ticker --dsn @/etc/pscheduler/database/database-dsn 30 | redirect_stderr=true 31 | stdout_logfile = /dev/stdout 32 | stdout_logfile_maxbytes = 0 33 | 34 | [program:pscheduler-archiver] 35 | chown=pscheduler:pscheduler 36 | command=/usr/bin/python3 /usr/lib/pscheduler/daemons/archiver --dsn @/etc/pscheduler/database/database-dsn 37 | redirect_stderr=true 38 | stdout_logfile = /dev/stdout 39 | stdout_logfile_maxbytes = 0 40 | 41 | [program:pscheduler-scheduler] 42 | chown=pscheduler:pscheduler 43 | command=/usr/bin/python3 /usr/lib/pscheduler/daemons/scheduler --dsn @/etc/pscheduler/database/database-dsn 44 | redirect_stderr=true 45 | stdout_logfile = /dev/stdout 46 | stdout_logfile_maxbytes = 0 47 | 48 | [program:pscheduler-runner] 49 | chown=pscheduler:pscheduler 50 | command=/usr/bin/python3 /usr/lib/pscheduler/daemons/runner --dsn @/etc/pscheduler/database/database-dsn 51 | redirect_stderr=true 52 | stdout_logfile = /dev/stdout 53 | stdout_logfile_maxbytes = 0 54 | 55 | [program:psconfig-pscheduler-agent] 56 | chown=perfsonar:perfsonar 57 | command=/usr/bin/python3 /usr/lib/perfsonar/psconfig/bin/psconfig_pscheduler_agent --config=/etc/perfsonar/psconfig/pscheduler-agent.json --logger=/etc/perfsonar/psconfig/pscheduler-agent-logger.conf 58 | 59 | [program:perfsonar-host-exporter] 60 | chown=perfsonar:perfsonar 61 | command=/usr/bin/python3 /usr/lib/perfsonar/host_metrics/perfsonar_host_exporter -------------------------------------------------------------------------------- /systemd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | ENV container docker 4 | ENV LC_ALL C 5 | ENV DEBIAN_FRONTEND noninteractive 6 | 7 | RUN sed -i 's/# deb/deb/g' /etc/apt/sources.list 8 | 9 | RUN apt-get update \ 10 | && apt-get install -y systemd systemd-sysv software-properties-common \ 11 | && apt-get clean \ 12 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 13 | 14 | RUN cd /lib/systemd/system/sysinit.target.wants/ \ 15 | && ls | grep -v systemd-tmpfiles-setup | xargs rm -f $1 16 | 17 | RUN rm -f /lib/systemd/system/multi-user.target.wants/* \ 18 | /etc/systemd/system/*.wants/* \ 19 | /lib/systemd/system/local-fs.target.wants/* \ 20 | /lib/systemd/system/sockets.target.wants/*udev* \ 21 | /lib/systemd/system/sockets.target.wants/*initctl* \ 22 | /lib/systemd/system/basic.target.wants/* \ 23 | /lib/systemd/system/anaconda.target.wants/* \ 24 | /lib/systemd/system/plymouth* \ 25 | /lib/systemd/system/systemd-update-utmp* 26 | 27 | # ----------------------------------------------------------------------- 28 | 29 | RUN apt-get update \ 30 | && apt-get install -y vim curl gnupg rsyslog net-tools sysstat iproute2 dnsutils tcpdump \ 31 | && apt-get clean \ 32 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 33 | 34 | # ----------------------------------------------------------------------- 35 | 36 | # 37 | # PostgreSQL Server 38 | # 39 | 40 | ENV PG_VERSION=14 \ 41 | PG_USER=postgres 42 | 43 | ENV PG_HOME=/etc/postgresql/$PG_VERSION/main \ 44 | PG_BINDIR=/usr/lib/postgresql/$PG_VERSION/bin \ 45 | PGDATA=/var/lib/postgresql/$PG_VERSION/main 46 | 47 | RUN apt-get update \ 48 | && apt-get install -y postgresql-$PG_VERSION postgresql-client-$PG_VERSION \ 49 | && rm -rf $PGDATA 50 | 51 | RUN su - $PG_USER -c "$PG_BINDIR/pg_ctl init -D $PGDATA" 52 | 53 | COPY --chown=$PG_USER:$PG_USER postgresql/postgresql.conf $PG_HOME/postgresql.conf 54 | COPY --chown=$PG_USER:$PG_USER postgresql/pg_hba.conf $PG_HOME/pg_hba.conf 55 | 56 | RUN su - $PG_USER -c "$PG_BINDIR/pg_ctl start -w -t 60 -D $PGDATA" 57 | 58 | # ----------------------------------------------------------------------------- 59 | 60 | # Rsyslog 61 | 62 | COPY rsyslog/rsyslog /etc/init.d/rsyslog 63 | COPY rsyslog/rsyslog.conf /etc/rsyslog.conf 64 | COPY rsyslog/listen.conf /etc/rsyslog.d/listen.conf 65 | COPY rsyslog/python-pscheduler.conf /etc/rsyslog.d/python-pscheduler.conf 66 | COPY rsyslog/owamp-syslog.conf /etc/rsyslog.d/owamp-syslog.conf 67 | 68 | # ----------------------------------------------------------------------- 69 | 70 | RUN curl -o /etc/apt/sources.list.d/perfsonar-minor-staging.list http://downloads.perfsonar.net/debian/perfsonar-minor-staging.list \ 71 | && curl http://downloads.perfsonar.net/debian/perfsonar-staging.gpg.key | apt-key add - \ 72 | && add-apt-repository universe 73 | 74 | RUN apt-get update \ 75 | && apt-get install -y perfsonar-testpoint \ 76 | && apt-get clean \ 77 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 78 | 79 | # ----------------------------------------------------------------------------- 80 | 81 | # The following ports are used: 82 | # pScheduler: 443 83 | # owamp:861, 8760-9960 (tcp and udp) 84 | # twamp: 862, 18760-19960 (tcp and udp) 85 | # simplestream: 5890-5900 86 | # nuttcp: 5000, 5101 87 | # iperf2: 5001 88 | # iperf3: 5201 89 | # ntp: 123 (udp) 90 | EXPOSE 123/udp 443 861 862 5000 5001 5101 5201 5890-5900 8760-9960/tcp 8760-9960/udp 18760-19960/tcp 18760-19960/udp 91 | 92 | # add cgroups, logging, and postgres directory 93 | VOLUME [ "/var/lib/postgresql", "/var/log", "/sys/fs/cgroup" ] 94 | 95 | CMD [ "/lib/systemd/systemd" ] -------------------------------------------------------------------------------- /utils/psdocker: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | ############################################################################### 4 | # NAME: psdocker 5 | # 6 | # USAGE: psdocker [[start|stop|status] tag] 7 | # 8 | # DESCIPTION: 9 | # A script to easily start and stop the perfsonar/testpoint container, 10 | # especially on Mac OS X where the container does not always behave as 11 | # expected with default settings. If given no parameters this script will start 12 | # the latest testpoint bundle and login with a bash shell. If the container is 13 | # already running it will just login. The stop command can be used to stop the 14 | # container and status to obtain the containr ID (if running). It takes and 15 | # optional "tag" to get other versions of the docker container. See the 16 | # following URL for a list of valid tags: 17 | # * https://hub.docker.com/r/perfsonar/testpoint/tags 18 | # 19 | ############################################################################### 20 | 21 | #get action of start (default), stop or status 22 | action=$1 23 | if [ -z "$action" ]; then 24 | action="start" 25 | fi 26 | #get tag or default to latest 27 | tag=$2 28 | if [ -z "$tag" ]; then 29 | tag="latest" 30 | fi 31 | #set bundle 32 | bundle="testpoint:${tag}" 33 | 34 | #Set docker hostname 35 | hostname="perfsonar-docker" 36 | 37 | if [ "$action" == "start" ]; then 38 | #check for container and start if not running 39 | container_id=$(docker ps | grep "$bundle" | awk '{print $1}') 40 | if [ -z "$container_id" ]; then 41 | echo "Setting-up new container..." 42 | docker pull perfsonar/${bundle} 43 | docker run -h $hostname -d --net=host perfsonar/${bundle} 44 | container_id=$(docker ps | grep "$bundle" | awk '{print $1}') 45 | sleep 3 46 | ip_address=$(docker exec -it $container_id ifconfig eth0 | sed -En 's/localhost//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p') 47 | docker exec -it $container_id bash -c "echo \"$ip_address $hostname\" >> /etc/hosts" 48 | else 49 | echo "Using existing container $container_id" 50 | fi 51 | docker exec -it $container_id bash 52 | elif [ "$action" == "stop" ]; then 53 | container_id=$(docker ps | grep "$bundle" | awk '{print $1}') 54 | if [ -z "$container_id" ]; then 55 | >&2 echo "Unable to find container running perfsonar/${bundle}" 56 | exit 1 57 | fi 58 | docker stop $container_id 59 | docker rm $container_id 60 | elif [ "$action" == "status" ]; then 61 | container_id=$(docker ps | grep "$bundle" | awk '{print $1}') 62 | if [ -z "$container_id" ]; then 63 | >&2 echo "Unable to find container running perfsonar/${bundle}" 64 | exit 1 65 | else 66 | echo "Container running with id $container_id" 67 | exit 0 68 | fi 69 | else 70 | echo "" 71 | echo "Usage: $0 [[start|stop|status] [tag]]" 72 | echo "" 73 | if [ "$action" == "help" ] || [ "$action" == "--help" ] || [ "$action" == "-h" ]; then 74 | exit 0 75 | else 76 | >&2 echo "Unknown action $action" 77 | echo "" 78 | exit 1 79 | fi 80 | fi 81 | --------------------------------------------------------------------------------