├── .github
├── bin
│ └── free-disk-space.sh
└── workflows
│ ├── ci.yml
│ └── release.yml
├── .gitignore
├── LICENSE.txt
├── Makefile
├── README.md
├── archived
├── accumulo
│ ├── Dockerfile
│ └── files
│ │ └── etc
│ │ ├── accumulo.properties
│ │ ├── supervisord.conf
│ │ └── supervisord.d
│ │ ├── accumulo-gc.conf
│ │ ├── accumulo-manager.conf
│ │ ├── accumulo-monitor.conf
│ │ ├── accumulo-tserver.conf
│ │ └── zookeeper.conf
├── centos6-oj8
│ ├── Dockerfile
│ └── files
│ │ ├── etc
│ │ └── yum.repos.d
│ │ │ └── CentOS-Base.repo
│ │ ├── opt
│ │ └── trinodev
│ │ │ └── site-override.xslt
│ │ └── usr
│ │ └── local
│ │ └── bin
│ │ ├── apply-all-site-xml-overrides
│ │ └── apply-site-xml-override
├── centos7-oj8
│ ├── Dockerfile
│ └── files
│ │ ├── opt
│ │ └── trinodev
│ │ │ └── site-override.xslt
│ │ └── usr
│ │ └── local
│ │ └── bin
│ │ ├── apply-all-site-xml-overrides
│ │ └── apply-site-xml-override
├── gpdb-6
│ ├── Dockerfile
│ ├── README.md
│ └── files
│ │ ├── etc
│ │ ├── security
│ │ │ └── limits.conf
│ │ └── sysctl.conf
│ │ ├── home
│ │ └── gpadmin
│ │ │ ├── .bash_profile
│ │ │ ├── gpdb-hosts
│ │ │ └── gpinitsystem_singlenode
│ │ └── usr
│ │ └── local
│ │ └── bin
│ │ ├── configure_gpdb.sh
│ │ ├── entrypoint.sh
│ │ └── psql
├── hdp3.1-hive
│ ├── Dockerfile
│ └── files
│ │ ├── etc
│ │ ├── hadoop
│ │ │ └── conf
│ │ │ │ ├── capacity-scheduler.xml
│ │ │ │ ├── core-site.xml
│ │ │ │ ├── hadoop-env.sh
│ │ │ │ ├── hdfs-site.xml
│ │ │ │ ├── log4j.properties
│ │ │ │ ├── mapred-site.xml
│ │ │ │ └── yarn-site.xml
│ │ ├── hive
│ │ │ └── conf
│ │ │ │ ├── beeline-site.xml
│ │ │ │ ├── hive-site.xml
│ │ │ │ └── hiveserver2-site.xml
│ │ ├── supervisord.conf
│ │ ├── supervisord.d
│ │ │ ├── hdfs-datanode.conf
│ │ │ ├── hdfs-namenode.conf
│ │ │ ├── hive-metastore.conf
│ │ │ ├── hive-server2.conf
│ │ │ ├── mysql-metastore.conf
│ │ │ ├── socks-proxy.conf
│ │ │ ├── sshd.conf
│ │ │ ├── yarn-nodemanager.conf
│ │ │ └── yarn-resourcemanager.conf
│ │ └── tez
│ │ │ └── conf
│ │ │ └── tez-site.xml
│ │ └── root
│ │ └── setup.sh
└── phoenix5
│ ├── Dockerfile
│ └── files
│ └── etc
│ ├── hbase
│ └── hbase-site.xml
│ ├── supervisord.conf
│ └── supervisord.d
│ ├── hbase-master.conf
│ ├── hbase-regionserver.conf
│ └── zookeeper.conf
├── bin
├── build.sh
├── depend.sh
├── flag.sh
├── lib.sh
├── push.sh
├── tag.sh
└── test.sh
├── build
└── sphinx
│ ├── Dockerfile
│ ├── requirements.in
│ └── requirements.txt
├── etc
└── compose
│ ├── hdp3.1-hive
│ └── docker-compose.yml
│ ├── hive3.1-hive
│ └── docker-compose.yml
│ ├── hive4.0-hive
│ └── docker-compose.yml
│ ├── kerberos
│ └── docker-compose.yml
│ ├── openldap-active-directory
│ └── docker-compose.yml
│ ├── openldap
│ └── docker-compose.yml
│ ├── polaris-catalog
│ └── docker-compose.yml
│ ├── spark3-delta
│ └── docker-compose.yml
│ ├── spark3-hudi
│ └── docker-compose.yml
│ ├── spark3-iceberg
│ └── docker-compose.yml
│ └── unity-catalog
│ └── docker-compose.yml
├── testing
├── almalinux9-oj11
│ ├── Dockerfile
│ └── files
│ │ ├── opt
│ │ └── trinodev
│ │ │ └── site-override.xslt
│ │ └── usr
│ │ └── local
│ │ └── bin
│ │ ├── apply-all-site-xml-overrides
│ │ └── apply-site-xml-override
├── almalinux9-oj17-openldap-active-directory
│ ├── Dockerfile
│ └── files
│ │ └── etc
│ │ └── openldap
│ │ └── setup
│ │ └── msuser.ldif
├── almalinux9-oj17-openldap-base
│ ├── Dockerfile
│ ├── files
│ │ ├── etc
│ │ │ ├── openldap
│ │ │ │ ├── certs
│ │ │ │ │ ├── active-directory-certificate.crt
│ │ │ │ │ ├── openldap-certificate.pem
│ │ │ │ │ ├── private.pem
│ │ │ │ │ ├── serial.txt
│ │ │ │ │ └── trino-coordinator-for-ldap.pem
│ │ │ │ └── setup
│ │ │ │ │ ├── createOU.ldif
│ │ │ │ │ ├── ldap_disable_bind_anon.ldif
│ │ │ │ │ └── modify_server.ldif
│ │ │ ├── supervisord.conf
│ │ │ └── supervisord.d
│ │ │ │ └── slapd.conf
│ │ └── usr
│ │ │ └── bin
│ │ │ └── wait-for-slapd.sh
│ └── generate-certificates.md
├── almalinux9-oj17-openldap-referrals
│ ├── Dockerfile
│ └── files
│ │ └── etc
│ │ └── openldap
│ │ └── setup
│ │ └── createReferrals.ldif
├── almalinux9-oj17-openldap
│ ├── Dockerfile
│ └── files
│ │ └── etc
│ │ └── openldap
│ │ └── setup
│ │ ├── memberof.ldif
│ │ └── refint.ldif
├── almalinux9-oj17
│ ├── Dockerfile
│ └── files
│ │ ├── opt
│ │ └── trinodev
│ │ │ └── site-override.xslt
│ │ └── usr
│ │ └── local
│ │ └── bin
│ │ ├── apply-all-site-xml-overrides
│ │ └── apply-site-xml-override
├── hdp3.1-hive-kerberized-2
│ ├── Dockerfile
│ └── files
│ │ ├── etc
│ │ ├── hive
│ │ │ └── conf
│ │ │ │ └── hiveserver2-site.xml
│ │ ├── krb5.conf
│ │ └── supervisord.d
│ │ │ └── kdc.conf
│ │ ├── overrides
│ │ └── etc
│ │ │ ├── hadoop
│ │ │ └── conf
│ │ │ │ ├── core-site.xml
│ │ │ │ ├── hdfs-site.xml
│ │ │ │ ├── mapred-site.xml
│ │ │ │ └── yarn-site.xml
│ │ │ └── hive
│ │ │ └── conf
│ │ │ └── hive-site.xml
│ │ └── var
│ │ └── kerberos
│ │ └── krb5kdc
│ │ ├── kadm5-other.acl
│ │ ├── kadm5.acl
│ │ └── kdc.conf
├── hdp3.1-hive-kerberized-kms
│ ├── Dockerfile
│ └── files
│ │ ├── etc
│ │ ├── hadoop-kms
│ │ │ └── conf
│ │ │ │ ├── core-site.xml
│ │ │ │ ├── kms-acls.xml
│ │ │ │ ├── kms-site.xml
│ │ │ │ └── passwordfile
│ │ ├── hadoop
│ │ │ └── conf
│ │ │ │ ├── core-site.xml
│ │ │ │ ├── hdfs-site.xml
│ │ │ │ └── taskcontroller.cfg
│ │ ├── hive
│ │ │ └── conf
│ │ │ │ ├── hive-site.xml
│ │ │ │ └── hiveserver2-site.xml
│ │ └── supervisord.d
│ │ │ └── kms.conf
│ │ └── root
│ │ └── setup_kms.sh
├── hdp3.1-hive-kerberized
│ ├── Dockerfile
│ └── files
│ │ ├── etc
│ │ ├── hive
│ │ │ └── conf
│ │ │ │ └── hiveserver2-site.xml
│ │ ├── krb5.conf
│ │ └── supervisord.d
│ │ │ └── kdc.conf
│ │ ├── overrides
│ │ └── etc
│ │ │ ├── hadoop
│ │ │ └── conf
│ │ │ │ ├── core-site.xml
│ │ │ │ ├── hdfs-site.xml
│ │ │ │ ├── mapred-site.xml
│ │ │ │ └── yarn-site.xml
│ │ │ └── hive
│ │ │ └── conf
│ │ │ └── hive-site.xml
│ │ └── var
│ │ └── kerberos
│ │ └── krb5kdc
│ │ ├── kadm5-other.acl
│ │ ├── kadm5.acl
│ │ └── kdc.conf
├── hdp3.1-hive
│ ├── Dockerfile
│ └── files
│ │ ├── etc
│ │ ├── hadoop
│ │ │ └── conf
│ │ │ │ ├── capacity-scheduler.xml
│ │ │ │ ├── core-site.xml
│ │ │ │ ├── hadoop-env.sh
│ │ │ │ ├── hdfs-site.xml
│ │ │ │ ├── log4j.properties
│ │ │ │ ├── mapred-site.xml
│ │ │ │ └── yarn-site.xml
│ │ ├── hive
│ │ │ └── conf
│ │ │ │ ├── beeline-site.xml
│ │ │ │ ├── hive-site.xml
│ │ │ │ └── hiveserver2-site.xml
│ │ ├── supervisord.conf
│ │ ├── supervisord.d
│ │ │ ├── hdfs-datanode.conf
│ │ │ ├── hdfs-namenode.conf
│ │ │ ├── hive-metastore.conf
│ │ │ ├── hive-server2.conf
│ │ │ ├── mysql-metastore.conf
│ │ │ ├── socks-proxy.conf
│ │ │ ├── sshd.conf
│ │ │ ├── yarn-nodemanager.conf
│ │ │ └── yarn-resourcemanager.conf
│ │ └── tez
│ │ │ └── conf
│ │ │ └── tez-site.xml
│ │ └── root
│ │ └── setup.sh
├── hive3.1-hive
│ ├── Dockerfile
│ └── files
│ │ ├── etc
│ │ ├── hadoop-init.d
│ │ │ ├── init-hdfs.sh
│ │ │ └── set-aws-creds.sh
│ │ ├── supervisord.conf
│ │ └── supervisord.d
│ │ │ ├── hdfs-datanode.conf
│ │ │ ├── hdfs-namenode.conf
│ │ │ ├── hive-metastore.conf
│ │ │ ├── hive-server2.conf
│ │ │ ├── mysql-metastore.conf
│ │ │ ├── socks-proxy.conf
│ │ │ └── sshd.conf
│ │ ├── opt
│ │ ├── hadoop
│ │ │ └── etc
│ │ │ │ └── hadoop
│ │ │ │ ├── core-site.xml
│ │ │ │ ├── hadoop-env.sh
│ │ │ │ └── hdfs-site.xml
│ │ └── hive
│ │ │ └── conf
│ │ │ ├── hive-env.sh
│ │ │ └── hive-site.xml
│ │ └── root
│ │ ├── entrypoint.sh
│ │ └── setup.sh
├── hive4.0-hive
│ └── Dockerfile
├── kerberos
│ ├── Dockerfile
│ └── files
│ │ ├── etc
│ │ ├── krb5.conf
│ │ ├── supervisord.conf
│ │ └── supervisord.d
│ │ │ └── kdc.conf
│ │ ├── opt
│ │ └── entrypoint.sh
│ │ ├── usr
│ │ └── local
│ │ │ └── bin
│ │ │ └── create_principal
│ │ └── var
│ │ └── kerberos
│ │ └── krb5kdc
│ │ ├── kadm5-other.acl
│ │ ├── kadm5.acl
│ │ └── kdc.conf
├── polaris-catalog
│ └── Dockerfile
├── spark3-delta
│ └── Dockerfile
├── spark3-hudi
│ └── Dockerfile
├── spark3-iceberg
│ └── Dockerfile
└── unity-catalog
│ └── Dockerfile
└── version
/.github/bin/free-disk-space.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -euo pipefail
3 |
4 | function list_installed_packages()
5 | {
6 | apt list --installed "$1" 2>/dev/null | awk -F'/' 'NR>1{print $1}'
7 | }
8 |
9 | function free_up_disk_space_ubuntu()
10 | {
11 | local packages=(
12 | 'azure-cli'
13 | 'aspnetcore-*'
14 | 'firefox*'
15 | 'google-chrome-*'
16 | 'google-cloud-*'
17 | 'libmono-*'
18 | 'llvm-*'
19 | 'mysql-server-core-*'
20 | 'powershell*'
21 | 'microsoft-edge*')
22 |
23 | for package in "${packages[@]}"; do
24 | mapfile -t installed_packages < <(list_installed_packages "${package}")
25 | if [ ${#installed_packages[@]} -eq 0 ]; then
26 | echo "No packages matched by pattern ${package}"
27 | else
28 | echo "Removing packages by pattern ${package}: ${installed_packages[*]}"
29 | sudo apt-get --auto-remove -y purge "${installed_packages[@]}"
30 | fi
31 | done
32 |
33 | echo "Autoremoving packages"
34 | sudo apt-get autoremove -y
35 |
36 | echo "Autocleaning"
37 | sudo apt-get autoclean -y
38 |
39 | echo "Removing toolchains"
40 | sudo rm -rf \
41 | /usr/local/graalvm \
42 | /usr/local/lib/android/ \
43 | /usr/share/dotnet/ \
44 | /opt/ghc/ \
45 | /usr/local/share/boost/ \
46 | "${AGENT_TOOLSDIRECTORY}"
47 |
48 | echo "Prune docker images"
49 | sudo docker system prune --all -f
50 | }
51 |
52 | echo "Disk space usage before cleaning:"
53 | df -k .
54 |
55 | echo "::group::Clearing up disk usage"
56 | free_up_disk_space_ubuntu
57 | echo "::endgroup::"
58 |
59 | echo "Disk space usage after cleaning:"
60 | df -k .
61 |
62 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: ci
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | build-images:
7 | runs-on: ubuntu-latest
8 | timeout-minutes: 60
9 | strategy:
10 | fail-fast: false
11 | matrix:
12 | include:
13 | - image: testing/almalinux9-oj17
14 | platforms: linux/amd64,linux/arm64
15 | - image: testing/almalinux9-oj17-openldap-referrals
16 | platforms: linux/amd64,linux/arm64
17 | test: openldap
18 | - image: testing/almalinux9-oj17-openldap-active-directory
19 | platforms: linux/amd64,linux/arm64
20 | test: openldap-active-directory
21 | - image: testing/polaris-catalog
22 | test: polaris-catalog
23 | - image: testing/spark3-iceberg
24 | platforms: linux/amd64,linux/arm64
25 | test: spark3-iceberg
26 | - image: testing/spark3-delta
27 | platforms: linux/amd64,linux/arm64
28 | test: spark3-delta
29 | - image: testing/spark3-hudi
30 | platforms: linux/amd64,linux/arm64
31 | test: spark3-hudi
32 | - image: testing/kerberos
33 | platforms: linux/amd64,linux/arm64
34 | test: kerberos
35 | - image: testing/hive3.1-hive
36 | platforms: linux/amd64,linux/arm64
37 | test: hive3.1-hive
38 | - image: testing/hive4.0-hive
39 | # Haven't added `linux/arm64` platform as test image fails with `The requested image's platform (linux/arm64) does not match the detected host platform (linux/amd64/v3) and no specific platform was requested`
40 | platforms: linux/amd64
41 | test: hive4.0-hive
42 | - image: testing/hdp3.1-hive-kerberized
43 | test: hdp3.1-hive
44 | - image: testing/unity-catalog
45 | test: unity-catalog
46 | - image: build/sphinx
47 | platforms: linux/amd64,linux/arm64
48 | steps:
49 | - uses: actions/checkout@v3
50 | with:
51 | fetch-depth: 0 # checkout tags so version in Manifest is set properly
52 | - name: Set up QEMU
53 | uses: docker/setup-qemu-action@v2
54 | with:
55 | ## Temporary due to bug in qemu: https://github.com/docker/setup-qemu-action/issues/198
56 | image: tonistiigi/binfmt:qemu-v7.0.0-28
57 | - name: Build ${{ matrix.image }}
58 | env:
59 | PLATFORMS: ${{ matrix.platforms }}
60 | run: make "${{ matrix.image }}"
61 | - name: Test ${{ matrix.test }}
62 | env:
63 | PLATFORMS: ${{ matrix.platforms }}
64 | if: ${{ matrix.test != '' }}
65 | shell: 'script -q -e -c "bash {0}"'
66 | run: make test IMAGE_TO_TEST="${{ matrix.test }}"
67 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | *.ipr
3 | *.iws
4 | .DS_Store
5 | .idea
6 | depends
7 | tmp
8 | flags
9 | graphviz
10 | dependency_graph.svg
11 | *.swp
12 |
--------------------------------------------------------------------------------
/archived/accumulo/files/etc/accumulo.properties:
--------------------------------------------------------------------------------
1 | instance.volumes=file:///tmp/accumulo
2 | instance.zookeeper.host=localhost:2181
3 | instance.secret=accumulofortrino
4 | tserver.memory.maps.native.enabled=true
5 |
--------------------------------------------------------------------------------
/archived/accumulo/files/etc/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = /var/log/supervisord.log
3 | logfile_maxbytes = 50MB
4 | logfile_backups=10
5 | loglevel = info
6 | pidfile = /var/run/supervisord.pid
7 | nodaemon = true
8 | directory = /tmp
9 | strip_ansi = false
10 | user=root
11 |
12 | [unix_http_server]
13 | file = /tmp/supervisor.sock
14 |
15 | [rpcinterface:supervisor]
16 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
17 |
18 | [supervisorctl]
19 | serverurl = unix:///tmp/supervisor.sock
20 |
21 | [include]
22 | files = /etc/supervisord.d/*.conf
23 |
--------------------------------------------------------------------------------
/archived/accumulo/files/etc/supervisord.d/accumulo-gc.conf:
--------------------------------------------------------------------------------
1 | [program:accumulo-gc]
2 | command=/usr/local/lib/accumulo/bin/accumulo gc
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/accumulo/gc.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/accumulo/files/etc/supervisord.d/accumulo-manager.conf:
--------------------------------------------------------------------------------
1 | [program:accumulo-master]
2 | command=/usr/local/lib/accumulo/bin/accumulo manager
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/accumulo/manager.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/accumulo/files/etc/supervisord.d/accumulo-monitor.conf:
--------------------------------------------------------------------------------
1 | [program:accumulo-monitor]
2 | command=/usr/local/lib/accumulo/bin/accumulo monitor
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/accumulo/monitor.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/accumulo/files/etc/supervisord.d/accumulo-tserver.conf:
--------------------------------------------------------------------------------
1 | [program:accumulo-tserver]
2 | command=/usr/local/lib/accumulo/bin/accumulo tserver
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/accumulo/tserver.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/accumulo/files/etc/supervisord.d/zookeeper.conf:
--------------------------------------------------------------------------------
1 | [program:zookeeper]
2 | command=/usr/local/lib/zookeeper/bin/zkServer.sh start-foreground
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/zookeeper/zookeeper.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/centos6-oj8/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM library/centos:6.10
14 |
15 | COPY ./files /
16 |
17 | # Install Java and presto-admin dependences (already has python 2.6)
18 | RUN \
19 | set -xeu && \
20 | yum install -y \
21 | java-1.8.0-openjdk-devel \
22 | nc \
23 | wget \
24 | && \
25 | \
26 | # install Zulu JDK (will not be the default `java`)
27 | rpm -i https://cdn.azul.com/zulu/bin/zulu11.39.15-ca-jdk11.0.7-linux.x86_64.rpm && \
28 | \
29 | # install supervisor
30 | yum install -y setuptools epel-release && \
31 | yum install -y python-pip && \
32 | pip install --upgrade pip==9.0.3 `# latest version for Python 2.6` && \
33 | pip install --upgrade setuptools==36.8.0 `# latest version for Python 2.6` && \
34 | pip install supervisor==3.4.0 && \
35 | \
36 | # install commonly needed packages
37 | yum install -y \
38 | net-tools `# netstat` \
39 | sudo \
40 | libxslt \
41 | telnet `# helpful when troubleshooting product tests` \
42 | vim `# helpful when troubleshooting product tests` \
43 | && \
44 | # cleanup
45 | yum -y clean all && rm -rf /tmp/* /var/tmp/*
46 |
47 | ENV PATH="/usr/local/bin:${PATH}"
48 | ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk.x86_64
49 | ENV LANG=en_US.UTF-8
50 |
--------------------------------------------------------------------------------
/archived/centos6-oj8/files/etc/yum.repos.d/CentOS-Base.repo:
--------------------------------------------------------------------------------
1 | [C6.10-base]
2 | name=CentOS-6.10 - Base
3 | baseurl=http://vault.centos.org/6.10/os/$basearch/
4 | gpgcheck=1
5 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
6 | enabled=1
7 | metadata_expire=never
8 |
9 | [C6.10-updates]
10 | name=CentOS-6.10 - Updates
11 | baseurl=http://vault.centos.org/6.10/updates/$basearch/
12 | gpgcheck=1
13 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
14 | enabled=1
15 | metadata_expire=never
16 |
17 | [C6.10-extras]
18 | name=CentOS-6.10 - Extras
19 | baseurl=http://vault.centos.org/6.10/extras/$basearch/
20 | gpgcheck=1
21 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
22 | enabled=1
23 | metadata_expire=never
24 |
25 | [C6.10-contrib]
26 | name=CentOS-6.10 - Contrib
27 | baseurl=http://vault.centos.org/6.10/contrib/$basearch/
28 | gpgcheck=1
29 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
30 | enabled=0
31 | metadata_expire=never
32 |
33 | [C6.10-centosplus]
34 | name=CentOS-6.10 - CentOSPlus
35 | baseurl=http://vault.centos.org/6.10/centosplus/$basearch/
36 | gpgcheck=1
37 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
38 | enabled=0
39 | metadata_expire=never
40 |
--------------------------------------------------------------------------------
/archived/centos6-oj8/files/opt/trinodev/site-override.xslt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/archived/centos6-oj8/files/usr/local/bin/apply-all-site-xml-overrides:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | fail() {
6 | echo "$(basename "$0"): $*" >&2
7 | exit 1
8 | }
9 |
10 | if [ $# -ne 1 ]; then
11 | fail "Usage: $0 " >&2
12 | fi
13 |
14 | overrides_dir="$1"
15 |
16 | for file in $(find $overrides_dir -name '*.xml'); do
17 | target_filename="${file#"$overrides_dir"}"
18 | echo "Applying configuration override from $file to $target_filename"
19 | apply-site-xml-override "$target_filename" "$file"
20 | done
21 |
--------------------------------------------------------------------------------
/archived/centos6-oj8/files/usr/local/bin/apply-site-xml-override:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | fail() {
6 | echo "$(basename "$0"): $*" >&2
7 | exit 1
8 | }
9 |
10 | if [ $# -ne 2 ]; then
11 | fail "Usage: $0 " >&2
12 | fi
13 |
14 | site_xml="$1"
15 | overrides="$2"
16 | site_xml_new="$1.new"
17 |
18 | test -f "${site_xml}" || fail "${site_xml} does not exist or is not a file"
19 | test -f "${overrides}" || fail "${overrides} does not exist or is not a file"
20 | test ! -e "${site_xml_new}" || fail "${site_xml_new} already exists"
21 |
22 | xsltproc --param override-path "'${overrides}'" "/opt/trinodev/site-override.xslt" "${site_xml}" > "${site_xml_new}"
23 | cat "${site_xml_new}" > "${site_xml}" # Preserve file owner & permissions
24 | rm "${site_xml_new}"
25 |
--------------------------------------------------------------------------------
/archived/centos7-oj8/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM library/centos:7
14 |
15 | COPY ./files /
16 |
17 | # Install Java and presto-admin dependences
18 | RUN \
19 | set -xeu && \
20 | yum install -y \
21 | java-1.8.0-openjdk-devel \
22 | nc \
23 | wget \
24 | && \
25 | \
26 | # install Zulu JDK (will not be the default `java`)
27 | rpm -i https://cdn.azul.com/zulu/bin/zulu11.48.21-ca-jdk11.0.11-linux.x86_64.rpm && \
28 | \
29 | # install supervisor
30 | yum --enablerepo=extras install -y setuptools epel-release && \
31 | yum install -y python-pip && \
32 | pip install supervisor && \
33 | \
34 | # install commonly needed packages
35 | yum install -y \
36 | less `# helpful when troubleshooting product tests` \
37 | net-tools `# netstat is required by run_on_docker.sh` \
38 | sudo \
39 | telnet `# helpful when troubleshooting product tests` \
40 | vim `# helpful when troubleshooting product tests` \
41 | && \
42 | # cleanup
43 | yum -y clean all && rm -rf /tmp/* /var/tmp/*
44 |
45 | ENV PATH="/usr/local/bin:${PATH}"
46 | ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk
47 | ENV LANG=en_US.UTF-8
48 |
--------------------------------------------------------------------------------
/archived/centos7-oj8/files/opt/trinodev/site-override.xslt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/archived/centos7-oj8/files/usr/local/bin/apply-all-site-xml-overrides:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | fail() {
6 | echo "$(basename "$0"): $*" >&2
7 | exit 1
8 | }
9 |
10 | if [ $# -ne 1 ]; then
11 | fail "Usage: $0 " >&2
12 | fi
13 |
14 | overrides_dir="$1"
15 |
16 | for file in $(find $overrides_dir -name '*.xml'); do
17 | target_filename="${file#"$overrides_dir"}"
18 | echo "Applying configuration override from $file to $target_filename"
19 | apply-site-xml-override "$target_filename" "$file"
20 | done
21 |
--------------------------------------------------------------------------------
/archived/centos7-oj8/files/usr/local/bin/apply-site-xml-override:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | fail() {
6 | echo "$(basename "$0"): $*" >&2
7 | exit 1
8 | }
9 |
10 | if [ $# -ne 2 ]; then
11 | fail "Usage: $0 " >&2
12 | fi
13 |
14 | site_xml="$1"
15 | overrides="$2"
16 | site_xml_new="$1.new"
17 |
18 | test -f "${site_xml}" || fail "${site_xml} does not exist or is not a file"
19 | test -f "${overrides}" || fail "${overrides} does not exist or is not a file"
20 | test ! -e "${site_xml_new}" || fail "${site_xml_new} already exists"
21 |
22 | xsltproc --param override-path "'${overrides}'" "/opt/trinodev/site-override.xslt" "${site_xml}" > "${site_xml_new}"
23 | cat "${site_xml_new}" > "${site_xml}" # Preserve file owner & permissions
24 | rm "${site_xml_new}"
25 |
--------------------------------------------------------------------------------
/archived/gpdb-6/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM ubuntu:18.04
14 |
15 | SHELL ["/bin/bash", "-c"]
16 |
17 | # Version arguments and install dir, GPHOME is symlink
18 | ARG GPDB_VERSION=6.11.1
19 | ARG GPHOME=/usr/gpdb
20 | ARG GPDB_DIR=/usr/local/greenplum-db-$GPDB_VERSION
21 |
22 | ENV GPHOME=/usr/gpdb
23 | ENV DATABASE gpadmin
24 |
25 | COPY ./files /
26 |
27 | # Install dependencies and GPDB
28 | RUN apt-get update -y && \
29 | apt-get install -y \
30 | iputils-ping \
31 | locales \
32 | locales-all \
33 | openssh-client \
34 | openssh-server && \
35 | apt-get clean
36 |
37 | # Install GPDB
38 | RUN wget https://github.com/greenplum-db/gpdb/releases/download/$GPDB_VERSION/greenplum-db-$GPDB_VERSION-ubuntu18.04-amd64.deb && \
39 | apt install -y ./greenplum-db-$GPDB_VERSION-ubuntu18.04-amd64.deb && \
40 | rm greenplum-db-$GPDB_VERSION-ubuntu18.04-amd64.deb && \
41 | ln -s $GPDB_DIR $GPHOME && \
42 | apt-get clean
43 |
44 | # Create gpadmin user
45 | RUN adduser --home /home/gpadmin gpadmin --disabled-password --gecos GECOS && \
46 | usermod --password gpadmin gpadmin && \
47 | chown -R gpadmin: /home/gpadmin && \
48 | # Create data directories and set ownership
49 | mkdir -p /gpmaster /gpdata1 /gpdata2 && chown gpadmin: /gpmaster /gpdata1 /gpdata2 && \
50 | # Start SSH service and initialize GPDB
51 | service ssh start && \
52 | su gpadmin -l -c configure_gpdb.sh && \
53 | hostname > ~/original_hostname && \
54 | # Allow client access from any host
55 | echo "host all all 0.0.0.0/0 md5" >> /gpmaster/gpsne-1/pg_hba.conf
56 |
57 | # Expose client port
58 | EXPOSE 5432
59 |
60 | # Start SSH service, start GPDB, then tail nothing to keep the container running
61 | CMD entrypoint.sh
62 |
63 | HEALTHCHECK --start-period=5m \
64 | CMD su gpadmin -l -c "pg_isready"
65 |
--------------------------------------------------------------------------------
/archived/gpdb-6/README.md:
--------------------------------------------------------------------------------
1 | ## Running the Docker Container
2 |
3 | The container exposes port 5432 to allow for external connections to Greenplum database.
4 |
5 | ```
6 | docker run --name gpdb -p 5432:5432 -d testing/gpdb-6:latest
7 | ```
8 |
9 | The container can take anywhere from 30 seconds to a few minutes to start up, depending on the host machine.
10 | Use `docker logs gpdb` if you have difficulty connecting to see what isn't going to plan.
11 | You'll see a message `Database successfully started` in the logs when it is ready to accept connections.
12 |
13 | The default database is called `gpadmin`, but you can specify it by setting the `DATABASE` environment variable
14 | when starting your container, e.g.:
15 |
16 | ```
17 | docker run --name gpdb -p 5432:5432 -e DATABASE=tpch -d testing/gpdb-6:latest
18 | ```
19 |
20 | ## Usage
21 |
22 | `gpadmin` is both the user name and password.
23 |
24 | You can login to GPDB using `psql` that is inside the container or on the host if you have `psql` installed.
25 |
26 | In the container:
27 |
28 | ```bash
29 | $ docker exec -it gpdb psql
30 | psql (9.4.24)
31 | Type "help" for help.
32 |
33 | gpadmin=# CREATE TABLE foo (a BIGINT) DISTRIBUTED RANDOMLY;
34 | CREATE TABLE
35 | gpadmin=# insert into foo values (1), (2), (3);
36 | INSERT 0 3
37 | gpadmin=# SELECT * FROM foo;
38 | 2
39 | 3
40 | 1
41 | ```
42 |
43 | On the host, if you have `psql`:
44 |
45 | ```bash
46 | psql -h localhost -U gpadmin
47 | Password for user gpadmin:
48 | psql (12.3, server 9.4.24)
49 | Type "help" for help.
50 |
51 | gpadmin=#
52 | ```
53 |
54 | If you changed the database name, provide it as an extra argument to `psql` (not necessary when using `docker exec`):
55 |
56 | ```bash
57 | psql -h localhost -U gpadmin tpch
58 | ```
59 |
--------------------------------------------------------------------------------
/archived/gpdb-6/files/etc/security/limits.conf:
--------------------------------------------------------------------------------
1 | * soft nofile 65536
2 | * hard nofile 65536
3 | * soft nproc 131072
4 | * hard nproc 131072
5 |
--------------------------------------------------------------------------------
/archived/gpdb-6/files/etc/sysctl.conf:
--------------------------------------------------------------------------------
1 | kernel.shmmax = 1000000000
2 | kernel.shmmni = 4096
3 | kernel.shmall = 4000000000
4 | kernel.sem = 250 512000 100 2048
5 | kernel.sysrq = 1
6 | kernel.core_uses_pid = 1
7 | kernel.msgmnb = 65536
8 | kernel.msgmax = 65536
9 | kernel.msgmni = 2048
10 | net.ipv4.tcp_syncookies = 0
11 | net.ipv4.ip_forward = 0
12 | net.ipv4.conf.default.accept_source_route = 0
13 | net.ipv4.tcp_tw_recycle = 1
14 | net.ipv4.tcp_max_syn_backlog = 200000
15 | net.ipv4.conf.all.arp_filter = 1
16 | net.ipv4.ip_local_port_range = 1025 65535
17 | net.core.netdev_max_backlog = 200000
18 | fs.nr_open = 3000000
19 | kernel.threads-max = 798720
20 | kernel.pid_max = 798720
21 | net.core.rmem_max=2097152
22 | net.core.wmem_max=2097152
23 | vm.overcommit_memory=2
24 |
--------------------------------------------------------------------------------
/archived/gpdb-6/files/home/gpadmin/.bash_profile:
--------------------------------------------------------------------------------
1 | export MASTER_DATA_DIRECTORY=/gpmaster/gpsne-1
2 | source /usr/gpdb/greenplum_path.sh
3 |
--------------------------------------------------------------------------------
/archived/gpdb-6/files/home/gpadmin/gpdb-hosts:
--------------------------------------------------------------------------------
1 | localhost
2 |
--------------------------------------------------------------------------------
/archived/gpdb-6/files/usr/local/bin/configure_gpdb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | source ${GPHOME}/greenplum_path.sh
4 | export MASTER_DATA_DIRECTORY=/gpmaster/gpsne-1
5 |
6 | # SSH is still required by the initialization and start scripts for GPDB even though it is installing it on a single host
7 |
8 | # Create and exchange keys
9 | ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa
10 | cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys && chmod 644 ~/.ssh/authorized_keys
11 | ssh-keyscan -H localhost >> ~/.ssh/known_hosts
12 | gpssh-exkeys -f /home/gpadmin/gpdb-hosts
13 |
14 | # Initialize GPDB
15 | gpinitsystem -a -c /home/gpadmin/gpinitsystem_singlenode -h /home/gpadmin/gpdb-hosts
16 |
17 | # Set the password
18 | psql -d template1 -c "alter user gpadmin password 'gpadmin'"
19 |
--------------------------------------------------------------------------------
/archived/gpdb-6/files/usr/local/bin/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "127.0.0.1 $(cat ~/original_hostname)" >> /etc/hosts
4 |
5 | service ssh start
6 | su gpadmin -l -c "export MASTER_DATA_DIRECTORY=/gpmaster/gpsne-1 ; source ${GPHOME}/greenplum_path.sh ; gpstart -a ; createdb ${DATABASE}"
7 |
8 | if test -d /docker/gpdb-init.d; then
9 | for init_script in /docker/gpdb-init.d/*; do
10 | "${init_script}"
11 | done
12 | fi
13 |
14 | tail -f /gpmaster/gpsne-1/pg_log/gpdb-*.csv
15 |
--------------------------------------------------------------------------------
/archived/gpdb-6/files/usr/local/bin/psql:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Wrapper script to access GPDB via psql
4 |
5 | su gpadmin -c "source ${GPHOME}/greenplum_path.sh; psql ${DATABASE}"
6 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM testing/centos7-oj8:unlabelled
14 |
15 | # Change default timezone
16 | RUN ln -snf "/usr/share/zoneinfo/Asia/Kathmandu" /etc/localtime && echo "Asia/Kathmandu" > /etc/timezone
17 |
18 | # Install HDP repo
19 | RUN set -xeu; \
20 | wget -nv http://public-repo-1.hortonworks.com/HDP/centos7/3.x/updates/3.1.0.0/hdp.repo -P /etc/yum.repos.d; \
21 | wget -nv http://public-repo-1.hortonworks.com/HDP-GPL/centos7/3.x/updates/3.1.0.0/hdp.gpl.repo -P /etc/yum.repos.d; \
22 | echo OK
23 |
24 | # Install Hadoop, Hive
25 | RUN yum install -y \
26 | hadoop-hdfs-namenode \
27 | hadoop-hdfs-secondarynamenode \
28 | hadoop-hdfs-datanode \
29 |
30 | hadoop-yarn-resourcemanager \
31 | hadoop-yarn-nodemanager \
32 |
33 | hive \
34 | hive-metastore \
35 | hive-server2 \
36 | tez \
37 |
38 | hadooplzo \
39 | hadooplzo-native \
40 | lzo \
41 | lzo-devel \
42 | lzop \
43 |
44 | # Mysql is not present in Alma Linux 9 repositories, use mariadb as a replacement
45 | mariadb-server \
46 | mysql-connector-java \
47 |
48 | # Cleanup
49 | && yum -y clean all && rm -rf /tmp/* /var/tmp/* \
50 | && ln -s /usr/share/java/mysql-connector-java.jar /usr/hdp/current/hive-metastore/lib/mysql-connector-java.jar
51 |
52 | # Delete original configuration
53 | RUN rm -r /etc/hadoop/conf/* \
54 | && rm -r /etc/hive/conf/*
55 |
56 | # Copy configuration files
57 | COPY ./files /
58 |
59 | # Setup sock proxy
60 | RUN yum install -y openssh openssh-clients openssh-server && yum -y clean all
61 | RUN ssh-keygen -t rsa -b 4096 -C "automation@trino.io" -N "" -f /root/.ssh/id_rsa \
62 | && ssh-keygen -t rsa -b 4096 -N "" -f /etc/ssh/ssh_host_rsa_key \
63 | && ssh-keygen -t dsa -b 1024 -N "" -f /etc/ssh/ssh_host_dsa_key \
64 | && cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
65 | RUN chmod 755 /root && chmod 700 /root/.ssh
66 | RUN passwd --unlock root
67 |
68 | # Run setup script
69 | RUN /root/setup.sh \
70 | && rm -rf /tmp/* /var/tmp/*
71 |
72 | # Provide convenience bash history
73 | RUN set -xeu; \
74 | echo "supervisorctl restart all" >> ~root/.bash_history; \
75 | for user in root hive hdfs; do \
76 | sudo -u "${user}" bash -c ' echo "netstat -ltnp" >> ~/.bash_history '; \
77 | sudo -u "${user}" bash -c ' echo "beeline -n hive" >> ~/.bash_history '; \
78 | sudo -u "${user}" bash -c ' echo "hdfs dfs -ls -R /user/hive/warehouse" >> ~/.bash_history '; \
79 | sudo -u "${user}" bash -c ' mkdir -p ~/.beeline '; \
80 | sudo -u "${user}" bash -c ' echo "SELECT current_user();" >> ~/.beeline/history '; \
81 | done
82 |
83 | # HDFS ports
84 | EXPOSE 1004 1006 8020 9866 9867 9870 9864 50470
85 |
86 | # YARN ports
87 | EXPOSE 8030 8031 8032 8033 8040 8041 8042 8088 10020 19888
88 |
89 | # HIVE ports
90 | EXPOSE 9083 10000
91 |
92 | # SOCKS port
93 | EXPOSE 1180
94 |
95 | CMD supervisord -c /etc/supervisord.conf
96 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hadoop/conf/capacity-scheduler.xml:
--------------------------------------------------------------------------------
1 |
2 |
15 |
16 |
17 |
18 | yarn.scheduler.capacity.maximum-applications
19 | 10000
20 |
21 | Maximum number of applications that can be pending and running.
22 |
23 |
24 |
25 |
26 | yarn.scheduler.capacity.maximum-am-resource-percent
27 | 1
28 |
29 | Maximum percent of resources in the cluster which can be used to run
30 | application masters i.e. controls number of concurrent running
31 | applications.
32 |
33 |
34 |
35 |
36 | yarn.scheduler.capacity.root.queues
37 | default
38 |
39 | The queues at the this level (root is the root queue).
40 |
41 |
42 |
43 |
44 | yarn.scheduler.capacity.root.default.capacity
45 | 100
46 | Default queue target capacity.
47 |
48 |
49 |
50 | yarn.scheduler.capacity.root.default.maximum-capacity
51 | 100
52 |
53 | The maximum capacity of the default queue.
54 |
55 |
56 |
57 |
58 | yarn.scheduler.capacity.root.default.state
59 | RUNNING
60 |
61 | The state of the default queue. State can be one of RUNNING or STOPPED.
62 |
63 |
64 |
65 |
66 | yarn.scheduler.capacity.root.default.acl_submit_applications
67 | *
68 |
69 | The ACL of who can submit jobs to the default queue.
70 |
71 |
72 |
73 |
74 | yarn.scheduler.capacity.root.default.user-limit-factor
75 | 1
76 |
77 | Default queue user limit a percentage from 0.0 to 1.0.
78 |
79 |
80 |
81 |
82 | yarn.scheduler.capacity.root.default.acl_administer_queue
83 | *
84 |
85 | The ACL of who can administer jobs on the default queue.
86 |
87 |
88 |
89 |
90 | yarn.scheduler.capacity.node-locality-delay
91 | -1
92 |
93 | Number of missed scheduling opportunities after which the CapacityScheduler
94 | attempts to schedule rack-local containers.
95 | Typically this should be set to number of racks in the cluster, this
96 | feature is disabled by default, set to -1.
97 |
98 |
99 |
100 |
101 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hadoop/conf/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 | fs.defaultFS
22 | hdfs://hadoop-master:9000
23 |
24 |
25 |
26 |
27 | hadoop.proxyuser.oozie.hosts
28 | *
29 |
30 |
31 | hadoop.proxyuser.oozie.groups
32 | *
33 |
34 |
35 |
36 |
37 | hadoop.proxyuser.httpfs.hosts
38 | *
39 |
40 |
41 | hadoop.proxyuser.httpfs.groups
42 | *
43 |
44 |
45 |
46 |
47 | hadoop.proxyuser.llama.hosts
48 | *
49 |
50 |
51 | hadoop.proxyuser.llama.groups
52 | *
53 |
54 |
55 |
56 |
57 | hadoop.proxyuser.hue.hosts
58 | *
59 |
60 |
61 | hadoop.proxyuser.hue.groups
62 | *
63 |
64 |
65 |
66 |
67 | hadoop.proxyuser.mapred.hosts
68 | *
69 |
70 |
71 | hadoop.proxyuser.mapred.groups
72 | *
73 |
74 |
75 |
76 |
77 | hadoop.proxyuser.hive.hosts
78 | *
79 |
80 |
81 |
82 | hadoop.proxyuser.hive.groups
83 | *
84 |
85 |
86 |
87 |
88 | hadoop.proxyuser.hdfs.groups
89 | *
90 |
91 |
92 |
93 | hadoop.proxyuser.hdfs.hosts
94 | *
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hadoop/conf/hadoop-env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # Set Hadoop-specific environment variables here.
17 | # Forcing YARN-based mapreduce implementaion.
18 | # Make sure to comment out if you want to go back to the default or
19 | # if you want this to be tweakable on a per-user basis
20 | # export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
21 |
22 | # The maximum amount of heap to use, in MB. Default is 1000.
23 | export HADOOP_HEAPSIZE=256
24 |
25 | # Extra Java runtime options. Empty by default.
26 | export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
27 | export YARN_OPTS="$YARN_OPTS -Xmx256m"
28 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hadoop/conf/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
16 |
17 |
18 |
19 | dfs.namenode.name.dir
20 | /var/lib/hadoop-hdfs/cache/name/
21 |
22 |
23 |
24 | dfs.datanode.data.dir
25 | /var/lib/hadoop-hdfs/cache/data/
26 |
27 |
28 |
29 | fs.viewfs.mounttable.hadoop-viewfs.link./default
30 | hdfs://hadoop-master:9000/user/hive/warehouse
31 |
32 |
33 |
34 |
35 | dfs.safemode.threshold.pct
36 | 0
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hadoop/conf/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO,CONSOLE
2 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
3 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
4 | log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
5 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hadoop/conf/mapred-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 | mapred.job.tracker
22 | hadoop-master:8021
23 |
24 |
25 |
26 | mapreduce.framework.name
27 | yarn
28 |
29 |
30 |
31 | mapreduce.jobhistory.address
32 | hadoop-master:10020
33 |
34 |
35 |
36 | mapreduce.jobhistory.webapp.address
37 | hadoop-master:19888
38 |
39 |
40 |
41 | To set the value of tmp directory for map and reduce tasks.
42 | mapreduce.task.tmp.dir
43 | /var/lib/hadoop-mapreduce/cache/${user.name}/tasks
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hive/conf/beeline-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | beeline.hs2.jdbc.url.tcpUrl
5 | jdbc:hive2://localhost:10000/default;user=hdfs;password=hive
6 |
7 |
8 |
9 | beeline.hs2.jdbc.url.httpUrl
10 | jdbc:hive2://localhost:10000/default;user=hdfs;password=hive;transportMode=http;httpPath=cliservice
11 |
12 |
13 |
14 | beeline.hs2.jdbc.url.default
15 | tcpUrl
16 |
17 |
18 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hive/conf/hive-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 | javax.jdo.option.ConnectionURL
22 | jdbc:mysql://localhost/metastore
23 | the URL of the MySQL database
24 |
25 |
26 |
27 | javax.jdo.option.ConnectionDriverName
28 | com.mysql.jdbc.Driver
29 |
30 |
31 |
32 | javax.jdo.option.ConnectionUserName
33 | root
34 |
35 |
36 |
37 | javax.jdo.option.ConnectionPassword
38 | root
39 |
40 |
41 |
42 | datanucleus.autoCreateSchema
43 | false
44 |
45 |
46 |
47 | datanucleus.fixedDatastore
48 | true
49 |
50 |
51 |
52 | datanucleus.autoStartMechanism
53 | SchemaTable
54 |
55 |
56 |
57 | hive.security.authorization.createtable.owner.grants
58 | ALL
59 | The set of privileges automatically granted to the owner whenever a table gets created.
60 |
61 |
62 |
63 | hive.users.in.admin.role
64 | hdfs,hive
65 |
66 |
67 |
68 |
69 | metastore.storage.schema.reader.impl
70 | org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader
71 |
72 |
73 |
74 | hive.support.concurrency
75 | true
76 |
77 |
78 |
79 | hive.compactor.initiator.on
80 | true
81 |
82 |
83 |
84 | hive.compactor.worker.threads
85 | 1
86 |
87 |
88 |
89 | hive.txn.manager
90 | org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
91 |
92 |
93 |
94 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/hive/conf/hiveserver2-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | hive.metastore.uris
7 | thrift://localhost:9083
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = /var/log/supervisord.log
3 | logfile_maxbytes = 50MB
4 | logfile_backups=10
5 | loglevel = info
6 | pidfile = /var/run/supervisord.pid
7 | nodaemon = true
8 | directory = /tmp
9 | strip_ansi = false
10 |
11 | [unix_http_server]
12 | file = /tmp/supervisor.sock
13 |
14 | [rpcinterface:supervisor]
15 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
16 |
17 | [supervisorctl]
18 | serverurl = unix:///tmp/supervisor.sock
19 |
20 | [include]
21 | files = /etc/supervisord.d/*.conf
22 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/hdfs-datanode.conf:
--------------------------------------------------------------------------------
1 | [program:hdfs-datanode]
2 | command=hdfs datanode
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=hdfs
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/hdfs-namenode.conf:
--------------------------------------------------------------------------------
1 | [program:hdfs-namenode]
2 | command=hdfs namenode
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=hdfs
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/hive-metastore.conf:
--------------------------------------------------------------------------------
1 | [program:hive-metastore]
2 | # Add `--debug:port=5006` for debugging
3 | command=hive --service metastore
4 | startsecs=2
5 | stopwaitsecs=10
6 | user=hive
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/hive/hive-metastore.log
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/hive-server2.conf:
--------------------------------------------------------------------------------
1 | [program:hive-server2]
2 | command=hive --service hiveserver2
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=hive
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hive/hive-server2.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/mysql-metastore.conf:
--------------------------------------------------------------------------------
1 | [program:mysql-metastore]
2 | command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=mysql
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/mysql/mysql.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/socks-proxy.conf:
--------------------------------------------------------------------------------
1 | [program:socks-proxy]
2 | command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
3 | startsecs=2
4 | stopwaitsecs=10
5 | startretries=30
6 | user=root
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/socks-proxy
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/sshd.conf:
--------------------------------------------------------------------------------
1 | [program:sshd]
2 | command=/usr/sbin/sshd -D -e
3 | startsecs=2
4 | stopwaitsecs=10
5 | startretries=30
6 | user=root
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/sshd
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/yarn-nodemanager.conf:
--------------------------------------------------------------------------------
1 | [program:yarn-nodemanager]
2 | command=yarn nodemanager
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=yarn
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-nodemanager.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/supervisord.d/yarn-resourcemanager.conf:
--------------------------------------------------------------------------------
1 | [program:yarn-resourcemanager]
2 | command=yarn resourcemanager
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=yarn
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-resourcemanager.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/etc/tez/conf/tez-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
14 |
15 |
16 |
17 | tez.lib.uris.ignore
18 | false
19 |
20 |
21 | tez.lib.uris
22 | file:///usr/hdp/current/tez-client/lib/tez.tar.gz
23 |
24 |
25 | tez.am.mode.session
26 | false
27 |
28 |
29 | tez.am.acl.enabled
30 | false
31 |
32 |
33 | tez.am.log.level
34 | WARN
35 |
36 |
37 | tez.task.log.level
38 | WARN
39 |
40 |
41 | tez.runtime.io.sort.mb
42 | 8
43 |
44 |
45 | tez.am.max.app.attempts
46 | 1
47 |
48 |
49 | tez.am.task.max.failed.attempts
50 | 1
51 |
52 |
53 | tez.shuffle-vertex-manager.min-src-fraction
54 | 0.10
55 |
56 |
57 | tez.shuffle-vertex-manager.max-src-fraction
58 | 1.00
59 |
60 |
61 | tez.am.launch.cmd-opts
62 | -server -Djava.net.preferIPv4Stack=true -XX:+UseParallelGC -Dhadoop.metrics.log.level=WARN
63 |
64 |
65 | tez.am.resource.memory.mb
66 | 128
67 |
68 |
69 | tez.task.launch.cmd-opts
70 | -server -Djava.net.preferIPv4Stack=true -XX:+UseParallelGC -Dhadoop.metrics.log.level=WARN
71 |
72 |
73 | tez.task.resource.memory.mb
74 | 128
75 |
76 |
77 | tez.task.resource.cpu.vcores
78 | 1
79 |
80 |
81 | tez.runtime.sort.threads
82 | 1
83 |
84 |
85 | tez.runtime.io.sort.factor
86 | 100
87 |
88 |
89 | tez.runtime.shuffle.memory-to-memory.enable
90 | false
91 |
92 |
93 | tez.runtime.optimize.local.fetch
94 | true
95 |
96 |
97 | hive.tez.container.size
98 | 1024
99 |
100 |
101 |
--------------------------------------------------------------------------------
/archived/hdp3.1-hive/files/root/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -ex
2 |
3 | # make file system hostname resolvable
4 | echo "127.0.0.1 hadoop-master" >> /etc/hosts
5 |
6 | # format namenode
7 | chown hdfs:hdfs /var/lib/hadoop-hdfs/cache/
8 |
9 | mkdir /usr/hdp/current/hadoop-client/logs /var/log/hadoop-hdfs /var/log/hadoop-yarn
10 | chgrp -R hadoop /usr/hdp/current/hadoop-client/logs /var/log/hadoop-hdfs /var/log/hadoop-yarn
11 | chmod -R 770 /usr/hdp/current/hadoop-client/logs /var/log/hadoop-hdfs /var/log/hadoop-yarn
12 |
13 | # workaround for 'could not open session' bug as suggested here:
14 | # https://github.com/docker/docker/issues/7056#issuecomment-49371610
15 | rm -f /etc/security/limits.d/hdfs.conf
16 | su -c "echo 'N' | hdfs namenode -format" hdfs
17 |
18 | # start hdfs
19 | su -c "hdfs namenode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-namenode.log" hdfs&
20 |
21 | # wait for process starting
22 | sleep 15
23 |
24 | # init basic hdfs directories
25 | /usr/hdp/current/hadoop-client/libexec/init-hdfs.sh
26 |
27 | # 4.1 Create an hdfs home directory for the yarn user. For some reason, init-hdfs doesn't do so.
28 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/yarn && /usr/bin/hadoop fs -chown yarn:yarn /user/yarn'
29 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn'
30 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /tmp/hadoop-yarn/staging && /usr/bin/hadoop fs -chown mapred:mapred /tmp/hadoop-yarn/staging && /usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging'
31 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /tmp/hadoop-yarn/staging/history && /usr/bin/hadoop fs -chown mapred:mapred /tmp/hadoop-yarn/staging/history && /usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging/history'
32 |
33 | # init hive directories
34 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/hive/warehouse'
35 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod 1777 /user/hive/warehouse'
36 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chown hive /user/hive/warehouse'
37 |
38 | # stop hdfs
39 | killall java
40 |
41 | # setup metastore
42 | ln -s /usr/bin/resolveip /usr/libexec # mariadb-server installs resolveip in /usr/bin but mysql_install_db expects it in /usr/libexec
43 | mysql_install_db
44 |
45 | chown -R mysql:mysql /var/lib/mysql
46 |
47 | /usr/bin/mysqld_safe &
48 | sleep 10s
49 |
50 | echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
51 | echo "CREATE DATABASE metastore;" | mysql
52 | /usr/bin/mysqladmin -u root password 'root'
53 | /usr/hdp/current/hive-client/bin/schematool -dbType mysql -initSchema
54 |
55 | killall mysqld
56 | sleep 10s
57 | mkdir /var/log/mysql/
58 | chown -R mysql:mysql /var/log/mysql/
59 |
60 | # Create `information_schema` and `sys` schemas in Hive
61 | supervisord -c /etc/supervisord.conf &
62 | while ! beeline -n hive -e "SELECT 1"; do
63 | echo "Waiting for HiveServer2 ..."
64 | sleep 10s
65 | done
66 | /usr/hdp/current/hive-client/bin/schematool -userName hive -metaDbType mysql -dbType hive -initSchema \
67 | -url jdbc:hive2://localhost:10000/default -driver org.apache.hive.jdbc.HiveDriver
68 | supervisorctl stop all
69 |
70 | # Additional libs
71 | cp -av /usr/hdp/current/hadoop-client/lib/native/Linux-amd64-64/* /usr/lib64/
72 | mkdir -v /usr/hdp/current/hive-client/auxlib || test -d /usr/hdp/current/hive-client/auxlib
73 | ln -vs /usr/hdp/current/hadoop-client/lib/hadoop-lzo-*.jar /usr/hdp/current/hive-client/auxlib
74 |
--------------------------------------------------------------------------------
/archived/phoenix5/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | ARG ARCH
14 | FROM testing/almalinux9-oj11:unlabelled$ARCH
15 |
16 | ARG HBASE_VERSION=2.5.10
17 | ARG ZOOKEEPER_VERSION=3.9.3
18 | ARG PHOENIX_VERSION=5.2.1
19 | ARG PHOENIX_HBASE_VERSION=2.5-$PHOENIX_VERSION
20 | ARG IMAGE_ROOT_INSTALL=/usr/local/lib
21 |
22 | ENV HBASE_HOME=$IMAGE_ROOT_INSTALL/hbase
23 | ENV ZOOKEEPER_HOME=$IMAGE_ROOT_INSTALL/zookeeper
24 | ENV PHOENIX_HOME=$IMAGE_ROOT_INSTALL/phoenix
25 |
26 | # TODO Apache Archive is rate limited -- these should probably go in S3
27 | ARG BASE_URL=https://dlcdn.apache.org
28 | ARG ZOOKEEPER_BINARY_PATH=$BASE_URL/zookeeper/zookeeper-$ZOOKEEPER_VERSION/apache-zookeeper-$ZOOKEEPER_VERSION-bin.tar.gz
29 | ARG HBASE_BINARY_PATH=$BASE_URL/hbase/$HBASE_VERSION/hbase-$HBASE_VERSION-bin.tar.gz
30 | ARG PHOENIX_BINARY_PATH=$BASE_URL/phoenix/phoenix-$PHOENIX_VERSION/phoenix-hbase-$PHOENIX_HBASE_VERSION-bin.tar.gz
31 |
32 | RUN mkdir -p $IMAGE_ROOT_INSTALL
33 |
34 | # Download ZooKeeper
35 | RUN wget --quiet $ZOOKEEPER_BINARY_PATH && \
36 | tar -xf apache-zookeeper-$ZOOKEEPER_VERSION-bin.tar.gz -C $IMAGE_ROOT_INSTALL && \
37 | rm apache-zookeeper-$ZOOKEEPER_VERSION-bin.tar.gz && \
38 | ln -s $IMAGE_ROOT_INSTALL/apache-zookeeper-$ZOOKEEPER_VERSION-bin $ZOOKEEPER_HOME && \
39 | cp $ZOOKEEPER_HOME/conf/zoo_sample.cfg $ZOOKEEPER_HOME/conf/zoo.cfg
40 |
41 | # Download HBase
42 | RUN wget --quiet $HBASE_BINARY_PATH && \
43 | tar -xf hbase-$HBASE_VERSION-bin.tar.gz -C $IMAGE_ROOT_INSTALL && \
44 | rm hbase-$HBASE_VERSION-bin.tar.gz && \
45 | ln -s $IMAGE_ROOT_INSTALL/hbase-$HBASE_VERSION $HBASE_HOME
46 |
47 | # Download Phoenix
48 | RUN wget --quiet $PHOENIX_BINARY_PATH && \
49 | tar -xf phoenix-hbase-$PHOENIX_HBASE_VERSION-bin.tar.gz -C $IMAGE_ROOT_INSTALL && \
50 | rm phoenix-hbase-$PHOENIX_HBASE_VERSION-bin.tar.gz && \
51 | ln -s $IMAGE_ROOT_INSTALL/phoenix-hbase-$PHOENIX_HBASE_VERSION-bin $PHOENIX_HOME
52 |
53 | RUN ln -s $PHOENIX_HOME/phoenix-server-hbase-$PHOENIX_HBASE_VERSION.jar $HBASE_HOME/lib/phoenix.jar
54 |
55 | RUN mkdir -p /var/log/zookeeper /var/log/hbase
56 |
57 | COPY ./files /
58 |
59 | RUN cp /etc/hbase/hbase-site.xml $HBASE_HOME/conf/hbase-site.xml
60 |
61 | # ZooKeeper
62 | EXPOSE 2181
63 | # HBase Master
64 | EXPOSE 16000
65 | # HBase Master WebUI
66 | EXPOSE 16010
67 | # HBase RegionServer
68 | EXPOSE 16020
69 | # HBase RegionServer UI
70 | EXPOSE 16030
71 |
72 | ENTRYPOINT supervisord -c /etc/supervisord.conf
73 |
--------------------------------------------------------------------------------
/archived/phoenix5/files/etc/hbase/hbase-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
21 |
22 |
27 |
28 | hbase.cluster.distributed
29 | true
30 |
31 |
32 | hbase.rootdir
33 | /var/local/lib/hbase/data/
34 |
35 |
36 | hbase.regionserver.wal.codec
37 | org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec
38 |
39 |
40 | hbase.unsafe.stream.capability.enforce
41 | false
42 |
43 |
44 | phoenix.schema.isNamespaceMappingEnabled
45 | true
46 |
47 |
48 |
--------------------------------------------------------------------------------
/archived/phoenix5/files/etc/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = /var/log/supervisord.log
3 | logfile_maxbytes = 50MB
4 | logfile_backups=10
5 | loglevel = info
6 | pidfile = /var/run/supervisord.pid
7 | nodaemon = true
8 | directory = /tmp
9 | strip_ansi = false
10 |
11 | [unix_http_server]
12 | file = /tmp/supervisor.sock
13 |
14 | [rpcinterface:supervisor]
15 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
16 |
17 | [supervisorctl]
18 | serverurl = unix:///tmp/supervisor.sock
19 |
20 | [include]
21 | files = /etc/supervisord.d/*.conf
22 |
--------------------------------------------------------------------------------
/archived/phoenix5/files/etc/supervisord.d/hbase-master.conf:
--------------------------------------------------------------------------------
1 | [program:hbase-master]
2 | command=/usr/local/lib/hbase/bin/hbase-daemons.sh --config /usr/local/lib/hbase/conf foreground_start master
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hbase/hbase-master.log
8 | autostart=true
9 | priority=998
10 |
--------------------------------------------------------------------------------
/archived/phoenix5/files/etc/supervisord.d/hbase-regionserver.conf:
--------------------------------------------------------------------------------
1 | [program:hbase-regionserver]
2 | command=/usr/local/lib/hbase/bin/hbase-daemons.sh --config /usr/local/lib/hbase/conf foreground_start regionserver
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hbase/hbase-regionserver.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/archived/phoenix5/files/etc/supervisord.d/zookeeper.conf:
--------------------------------------------------------------------------------
1 | [program:zookeeper]
2 | command=/usr/local/lib/zookeeper/bin/zkServer.sh start-foreground
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/zookeeper/zookeeper.log
8 | autostart=true
9 | priority=997
10 |
--------------------------------------------------------------------------------
/bin/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -xeuo pipefail
4 |
5 | usage() {
6 | echo "$0 {image} [args]" >&2
7 | }
8 |
9 | if [ $# -lt 1 ]; then
10 | usage
11 | exit 1
12 | fi
13 |
14 | image=$1
15 | shift
16 |
17 | if [ -z "${PLATFORMS:-}" ]; then
18 | docker buildx build \
19 | --compress \
20 | --progress=plain \
21 | --add-host hadoop-master:127.0.0.2 \
22 | -t "$image" \
23 | --load \
24 | "$@" \
25 | .
26 | exit 0
27 | fi
28 |
29 | IFS=, read -ra platforms <<<"$PLATFORMS"
30 | export ARCH
31 | for platform in "${platforms[@]}"; do
32 | IFS=: read -r name tag <<<"$image"
33 | ARCH="-${platform//\//-}"
34 | docker buildx build \
35 | --platform "$platform" \
36 | --compress \
37 | --progress=plain \
38 | --add-host hadoop-master:127.0.0.2 \
39 | -t "${name}:${tag}${ARCH}" \
40 | --load \
41 | "$@" \
42 | .
43 | done
44 |
--------------------------------------------------------------------------------
/bin/flag.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | usage() {
4 | echo "$0 {target image}" >&2
5 | }
6 |
7 | find_args() {
8 | local target_image=$(dirname "$target_dockerfile")
9 | awk -v image="$target_image" '
10 | BEGIN {
11 | ARG_PATTERN = "^\\s*ARG";
12 | print "DBFLAGS_" image " :=";
13 | }
14 |
15 | $1 == "ARG" {
16 | n = split($2, arr, "=")
17 | if (n >= 2) {
18 | # the argument has a default value in the Dockerfile; parse out the argument name
19 | key = arr[1];
20 | } else {
21 | key = $2;
22 | }
23 | print "DBFLAGS_" image " += --build-arg " key;
24 | }' "$1"
25 | }
26 |
27 | if [ $# -lt 1 ]; then
28 | usage
29 | exit 1
30 | fi
31 |
32 | target_dockerfile=$1
33 |
34 | find_args "$target_dockerfile"
35 |
--------------------------------------------------------------------------------
/bin/lib.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -xeuo pipefail
4 |
5 | function expand_multiarch_tags() {
6 | local platforms
7 | local name
8 | local tag=$1
9 | shift
10 |
11 | if [ -z "${PLATFORMS:-}" ]; then
12 | echo "$tag"
13 | return
14 | fi
15 |
16 | IFS=, read -ra platforms <<<"$PLATFORMS"
17 | IFS=: read -r name tag <<<"$tag"
18 |
19 | for platform in "${platforms[@]}"; do
20 | echo "${name}:${tag}-${platform//\//-}"
21 | done
22 | }
23 |
--------------------------------------------------------------------------------
/bin/push.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -xeuo pipefail
4 |
5 | function push_retry() {
6 | local image=$1
7 |
8 | while ! docker push "$image"; do
9 | echo "Failed to push $image, retrying in 30s..."
10 | sleep 30
11 | done
12 | }
13 |
14 | if [ -z "${PLATFORMS:-}" ]; then
15 | for image in "$@"; do
16 | push_retry "$image"
17 | done
18 | exit 0
19 | fi
20 |
21 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
22 | # shellcheck source=bin/lib.sh
23 | source "$SCRIPT_DIR/lib.sh"
24 |
25 | for image in "$@"; do
26 | mapfile -t expanded_names < <(expand_multiarch_tags "$image")
27 | for name in "${expanded_names[@]}"; do
28 | push_retry "$name"
29 | done
30 | docker manifest create "$image" "${expanded_names[@]}"
31 | docker manifest push "$image"
32 | done
33 |
--------------------------------------------------------------------------------
/bin/tag.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -xeuo pipefail
4 |
5 | usage() {
6 | echo "$0 {src} {dst}" >&2
7 | }
8 |
9 | if [ $# -lt 2 ]; then
10 | usage
11 | exit 1
12 | fi
13 |
14 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
15 | # shellcheck source=bin/lib.sh
16 | source "$SCRIPT_DIR/lib.sh"
17 |
18 | mapfile -t src_tags < <(expand_multiarch_tags "$1")
19 | mapfile -t dst_tags < <(expand_multiarch_tags "$2")
20 |
21 | for i in "${!src_tags[@]}"; do
22 | src=${src_tags[$i]}
23 | dst=${dst_tags[$i]}
24 | docker tag "$src" "$dst"
25 | done
26 |
--------------------------------------------------------------------------------
/build/sphinx/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM python:3.12-slim
14 |
15 | # Required by pillow
16 | RUN apt update -y && apt install -y zlib1g-dev libjpeg-dev libxml2-dev libxslt-dev build-essential xsltproc
17 |
18 | WORKDIR /docs
19 | ADD requirements.txt /docs
20 | RUN pip3 install -r requirements.txt
21 |
--------------------------------------------------------------------------------
/build/sphinx/requirements.in:
--------------------------------------------------------------------------------
1 | markupsafe==3.0.2
2 | myst-parser==4.0.1
3 | pillow
4 | sphinx==8.2.3
5 | sphinx-immaterial
6 | sphinx-copybutton
7 | sphinxcontrib-jquery==4.1
8 |
--------------------------------------------------------------------------------
/build/sphinx/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.12
3 | # by the following command:
4 | #
5 | # pip-compile --output-file=requirements.txt requirements.in
6 | #
7 | alabaster==0.7.14
8 | # via sphinx
9 | annotated-types==0.7.0
10 | # via pydantic
11 | appdirs==1.4.4
12 | # via sphinx-immaterial
13 | babel==2.17.0
14 | # via sphinx
15 | certifi==2024.7.4
16 | # via requests
17 | charset-normalizer==3.4.2
18 | # via requests
19 | docutils==0.21.2
20 | # via
21 | # myst-parser
22 | # sphinx
23 | idna==3.7
24 | # via requests
25 | imagesize==1.4.1
26 | # via sphinx
27 | jinja2==3.1.6
28 | # via
29 | # myst-parser
30 | # sphinx
31 | markdown-it-py==3.0.0
32 | # via
33 | # mdit-py-plugins
34 | # myst-parser
35 | markupsafe==3.0.2
36 | # via
37 | # -r requirements.in
38 | # jinja2
39 | # sphinx-immaterial
40 | mdit-py-plugins==0.4.2
41 | # via myst-parser
42 | mdurl==0.1.1
43 | # via markdown-it-py
44 | myst-parser==4.0.1
45 | # via -r requirements.in
46 | packaging==25.0
47 | # via sphinx
48 | pillow==10.3.0
49 | # via -r requirements.in
50 | pydantic==2.11.4
51 | # via
52 | # pydantic-extra-types
53 | # sphinx-immaterial
54 | pydantic-core==2.33.2
55 | # via pydantic
56 | pydantic-extra-types==2.10.4
57 | # via sphinx-immaterial
58 | pygments==2.17.2
59 | # via sphinx
60 | pyyaml==6.0.1
61 | # via myst-parser
62 | requests==2.32.2
63 | # version 2.32.2 replaces 2.32.0 as it has been yanked from PyPI,
64 | # due to the conflict with a CVE mitigation
65 | # via
66 | # sphinx
67 | # sphinx-immaterial
68 | roman-numerals-py==3.1.0
69 | # via sphinx
70 | snowballstemmer==2.2.0
71 | # via sphinx
72 | sphinx==8.2.3
73 | # via
74 | # -r requirements.in
75 | # myst-parser
76 | # sphinx-copybutton
77 | # sphinx-immaterial
78 | # sphinxcontrib-jquery
79 | # sphinxcontrib-serializinghtml
80 | sphinx-copybutton==0.5.2
81 | # via -r requirements.in
82 | sphinx-immaterial==0.13.5
83 | # via -r requirements.in
84 | sphinxcontrib-applehelp==1.0.8
85 | # via sphinx
86 | sphinxcontrib-devhelp==1.0.6
87 | # via sphinx
88 | sphinxcontrib-htmlhelp==2.1.0
89 | # via sphinx
90 | sphinxcontrib-jquery==4.1
91 | # via -r requirements.in
92 | sphinxcontrib-jsmath==1.0.1
93 | # via sphinx
94 | sphinxcontrib-qthelp==2.0.0
95 | # via sphinx
96 | sphinxcontrib-serializinghtml==1.1.9
97 | # via sphinx
98 | typing-extensions==4.13.2
99 | # via
100 | # pydantic
101 | # pydantic-core
102 | # pydantic-extra-types
103 | # sphinx-immaterial
104 | # typing-inspection
105 | typing-inspection==0.4.0
106 | # via pydantic
107 | urllib3==2.2.2
108 | # via requests
109 |
--------------------------------------------------------------------------------
/etc/compose/hdp3.1-hive/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | hadoop-master:
4 | hostname: hadoop-master
5 | image: testing/hdp3.1-hive:latest
6 |
--------------------------------------------------------------------------------
/etc/compose/hive3.1-hive/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | hadoop-master:
4 | hostname: hadoop-master
5 | image: testing/hive3.1-hive:latest$ARCH
6 |
--------------------------------------------------------------------------------
/etc/compose/hive4.0-hive/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | hiveserver2:
4 | hostname: hiveserver2
5 | image: testing/hive4.0-hive:latest$ARCH
6 | environment:
7 | - SERVICE_NAME=hiveserver2
8 |
--------------------------------------------------------------------------------
/etc/compose/kerberos/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | kerberos:
4 | image: testing/kerberos:latest$ARCH
5 | environment:
6 | - TRINODEV_POST_BOOTSTRAP_COMMAND=create_principal -p ala -k ala.keytab
7 |
--------------------------------------------------------------------------------
/etc/compose/openldap-active-directory/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | openldap:
4 | image: testing/almalinux9-oj17-openldap-active-directory:latest$ARCH
5 |
--------------------------------------------------------------------------------
/etc/compose/openldap/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | openldap:
4 | image: testing/almalinux9-oj17-openldap:latest$ARCH
5 |
--------------------------------------------------------------------------------
/etc/compose/polaris-catalog/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | polaris:
4 | image: testing/polaris-catalog:latest
5 |
--------------------------------------------------------------------------------
/etc/compose/spark3-delta/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | spark:
4 | image: testing/spark3-delta:latest$ARCH
5 |
--------------------------------------------------------------------------------
/etc/compose/spark3-hudi/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | spark:
4 | image: testing/spark3-hudi:latest$ARCH
5 |
--------------------------------------------------------------------------------
/etc/compose/spark3-iceberg/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | spark:
4 | image: testing/spark3-iceberg:latest$ARCH
5 |
--------------------------------------------------------------------------------
/etc/compose/unity-catalog/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2.0'
2 | services:
3 | polaris:
4 | image: testing/unity-catalog:latest
5 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj11/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM library/almalinux:9
14 |
15 | COPY ./files /
16 |
17 | # Install Java and presto-admin dependences
18 | RUN \
19 | set -xeu && \
20 | yum update -y && \
21 | yum install -y \
22 | python \
23 | nc \
24 | wget \
25 | && \
26 | \
27 | # Install Zulu JDK 11.0.17 \
28 | rpm -i https://cdn.azul.com/zulu$([ "$(arch)" != "aarch64" ] || echo "-embedded")/bin/zulu11.60.19-ca-jdk11.0.17-linux."$(arch)".rpm && \
29 | # Set JDK 11 as a default one
30 | alternatives --set java /usr/lib/jvm/zulu-11/bin/java && \
31 | alternatives --set javac /usr/lib/jvm/zulu-11/bin/javac && \
32 | \
33 | # install supervisor
34 | yum --enablerepo=extras install -y epel-release && \
35 | yum install -y supervisor && \
36 | \
37 | # install commonly needed packages
38 | yum install -y \
39 | less `# helpful when troubleshooting product tests` \
40 | net-tools `# netstat is required by run_on_docker.sh` \
41 | sudo \
42 | telnet `# helpful when troubleshooting product tests` \
43 | vim `# helpful when troubleshooting product tests` \
44 | jq `# helpful json processing tool` \
45 | procps \
46 | && \
47 | # cleanup
48 | yum -y clean all && rm -rf /tmp/* /var/tmp/*
49 |
50 | ENV PATH="/usr/local/bin:${PATH}"
51 | ENV JAVA_HOME=/usr/lib/jvm/zulu-11
52 | ENV LANG=en_US.UTF-8
53 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj11/files/opt/trinodev/site-override.xslt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj11/files/usr/local/bin/apply-all-site-xml-overrides:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | fail() {
6 | echo "$(basename "$0"): $*" >&2
7 | exit 1
8 | }
9 |
10 | if [ $# -ne 1 ]; then
11 | fail "Usage: $0 " >&2
12 | fi
13 |
14 | overrides_dir="$1"
15 |
16 | for file in $(find $overrides_dir -name '*.xml'); do
17 | target_filename="${file#"$overrides_dir"}"
18 | echo "Applying configuration override from $file to $target_filename"
19 | apply-site-xml-override "$target_filename" "$file"
20 | done
21 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj11/files/usr/local/bin/apply-site-xml-override:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | fail() {
6 | echo "$(basename "$0"): $*" >&2
7 | exit 1
8 | }
9 |
10 | if [ $# -ne 2 ]; then
11 | fail "Usage: $0 " >&2
12 | fi
13 |
14 | site_xml="$1"
15 | overrides="$2"
16 | site_xml_new="$1.new"
17 |
18 | test -f "${site_xml}" || fail "${site_xml} does not exist or is not a file"
19 | test -f "${overrides}" || fail "${overrides} does not exist or is not a file"
20 | test ! -e "${site_xml_new}" || fail "${site_xml_new} already exists"
21 |
22 | xsltproc --param override-path "'${overrides}'" "/opt/trinodev/site-override.xslt" "${site_xml}" > "${site_xml_new}"
23 | cat "${site_xml_new}" > "${site_xml}" # Preserve file owner & permissions
24 | rm "${site_xml_new}"
25 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-active-directory/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | ARG ARCH
14 | FROM testing/almalinux9-oj17-openldap-base:unlabelled$ARCH
15 |
16 | # COPY CONFIGURATION
17 | COPY ./files /
18 |
19 | # CONFIGURE OPENLDAP SERVER
20 | RUN supervisord -c /etc/supervisord.conf && \
21 | /usr/bin/wait-for-slapd.sh && \
22 | # active dictory schema from https://git.openldap.org/openldap/openldap/-/raw/master/servers/slapd/schema/msuser.ldif
23 | ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/setup/msuser.ldif && \
24 | ldapadd -f /etc/openldap/setup/createOU.ldif -D cn=admin,dc=trino,dc=testldap,dc=com -w admin
25 |
26 | CMD supervisord -n -c /etc/supervisord.conf
27 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | ARG ARCH
14 | FROM testing/almalinux9-oj17:unlabelled$ARCH
15 |
16 | # INSTALL OPENLDAP
17 | RUN yum -y install openssl openldap openldap-clients openldap-servers \
18 | # Cleanup
19 | && yum -y clean all && rm -rf /tmp/* /var/tmp/*
20 |
21 | # COPY CONFIGURATION
22 | COPY ./files /
23 |
24 | # Run supervisord in background
25 | RUN supervisord -c /etc/supervisord.conf && \
26 | /usr/bin/wait-for-slapd.sh && \
27 | ldapmodify -Y EXTERNAL -H ldapi:/// -f /etc/openldap/setup/modify_server.ldif && \
28 | ldapmodify -Y EXTERNAL -H ldapi:/// -f /etc/openldap/setup/ldap_disable_bind_anon.ldif && \
29 | # ldif are required in order to have inetOrgPerson object class
30 | ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif && \
31 | ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
32 |
33 | # Generate a keystore.
34 | # TODO: https://github.com/trinodb/trino/issues/8239
35 | RUN keytool -genkey -alias coordinator -storepass testldap -keystore /etc/openldap/certs/coordinator.jks \
36 | -keypass testldap -keyalg RSA -sigalg SHA1withRSA -dname "CN=presto-master, OU=, O=, L=, S=, C=" -validity 100000
37 |
38 | # Generate a certificate and CSR for the keystore. CN should match the hostname of trino-coordinator
39 | RUN keytool -export -alias coordinator -storepass testldap -keystore /etc/openldap/certs/coordinator.jks \
40 | -file /etc/openldap/certs/coordinator.cer && \
41 | keytool -certreq -alias coordinator -storepass testldap -keystore /etc/openldap/certs/coordinator.jks \
42 | -file /etc/openldap/certs/coordinator.csr
43 |
44 | # create a test CA and generate caroot.cer( root certificate of the CA ).
45 | RUN openssl genrsa -out /etc/openldap/certs/cakey.pem 2048 && \
46 | openssl req -x509 -new -nodes -key /etc/openldap/certs/cakey.pem -sha256 -days 3650 -out /etc/openldap/certs/caroot.cer \
47 | -subj "/C=US/ST=Massachusetts/L=Boston/O=Teradata/OU=Finance/CN=teradata" \
48 | -addext "basicConstraints=CA:TRUE" \
49 | -addext "keyUsage=keyCertSign,cRLSign"
50 |
51 | # sign the server certificate using the testCA
52 | RUN openssl x509 -req -in /etc/openldap/certs/coordinator.csr -out /etc/openldap/certs/TestCA.cer -days 100000 \
53 | -CA /etc/openldap/certs/caroot.cer -CAkey /etc/openldap/certs/cakey.pem -CAserial /etc/openldap/certs/serial.txt
54 |
55 | # Import the caroot.cer to the keystore and replace the previously generated self-signed cert with the new CA signed one in the keystore
56 | # Note: The -alias name (coordinator) should be the same as the alias used when coordinator.cer was generated
57 | RUN keytool -import -alias TestCA -storepass testldap -keystore /etc/openldap/certs/coordinator.jks \
58 | -noprompt -file /etc/openldap/certs/caroot.cer && \
59 | keytool -import -alias coordinator -storepass testldap -keystore /etc/openldap/certs/coordinator.jks \
60 | -file /etc/openldap/certs/TestCA.cer
61 |
62 | # Import the root certificate of the issuer of the trino-coordinator certificate.
63 | # This would be used by clients ( cli, jdbc ) to run tests
64 | RUN keytool -import -alias caroot -storepass testldap -keystore /etc/openldap/certs/cacerts.jks -noprompt \
65 | -file /etc/openldap/certs/caroot.cer
66 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/openldap/certs/active-directory-certificate.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIGJTCCBQ2gAwIBAgITHQAAAAUM8gMo/ohRwwAAAAAABTANBgkqhkiG9w0BAQUF
3 | ADBlMRMwEQYKCZImiZPyLGQBGRYDY29tMRgwFgYKCZImiZPyLGQBGRYIdGVzdGxk
4 | YXAxFjAUBgoJkiaJk/IsZAEZFgZwcmVzdG8xHDAaBgNVBAMTE3ByZXN0by1BRC1U
5 | RVNULUNBLTEwHhcNMTcwNjI3MDA0NTM1WhcNMTgwNjI3MDA0NTM1WjAAMIIBIjAN
6 | BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoFyafCrwVdNqQ1Y1D1HMQVScNAc9
7 | m5l0nBbezmtbWExSgKXKE0DOIHLLZB38F8TKJZoa3m3J7Oin+GISrzWgyhoW6+jg
8 | KKGD9PZR5y4Xhu23Fv8IVlN9wf08vDploq34uzRFoBAwvjUcW42FPe+6MoKeMkat
9 | KgrfhCkcXAGhg0SOYmdCAHcPA63HBCNmpgYAZ38iqwiZugnraIXgn0VGhcmLlj7c
10 | S5Sv1GYOUtEd0MXELepAZPUXx8HivPvnz09tz++W6aKAkLsYvQShijPVnFinOAa9
11 | 8ZNlHyz8v5UiUbS/9kyQOY86mM+1U+qK9BQVIbRVvBSkLXPY2ZAvyZ9pCwIDAQAB
12 | o4IDMTCCAy0wPQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIhdXlWYazpGSH5Z0V
13 | hczUNYOhlVGBRKj2FYbb3DICAWQCAQEwMgYDVR0lBCswKQYHKwYBBQIDBQYKKwYB
14 | BAGCNxQCAgYIKwYBBQUHAwEGCCsGAQUFBwMCMA4GA1UdDwEB/wQEAwIFoDBABgkr
15 | BgEEAYI3FQoEMzAxMAkGBysGAQUCAwUwDAYKKwYBBAGCNxQCAjAKBggrBgEFBQcD
16 | ATAKBggrBgEFBQcDAjAdBgNVHQ4EFgQU74Oc6DMfI847F2OplR+db0pIQJEwHwYD
17 | VR0jBBgwFoAUYVLOSj8PMaLbsTkN+P/R5RwEvMIwgdwGA1UdHwSB1DCB0TCBzqCB
18 | y6CByIaBxWxkYXA6Ly8vQ049cHJlc3RvLUFELVRFU1QtQ0EtMSxDTj1hZC10ZXN0
19 | LENOPUNEUCxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2aWNlcyxD
20 | Tj1Db25maWd1cmF0aW9uLERDPXByZXN0byxEQz10ZXN0bGRhcCxEQz1jb20/Y2Vy
21 | dGlmaWNhdGVSZXZvY2F0aW9uTGlzdD9iYXNlP29iamVjdENsYXNzPWNSTERpc3Ry
22 | aWJ1dGlvblBvaW50MIHQBggrBgEFBQcBAQSBwzCBwDCBvQYIKwYBBQUHMAKGgbBs
23 | ZGFwOi8vL0NOPXByZXN0by1BRC1URVNULUNBLTEsQ049QUlBLENOPVB1YmxpYyUy
24 | MEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2VzLENOPUNvbmZpZ3VyYXRpb24sREM9
25 | cHJlc3RvLERDPXRlc3RsZGFwLERDPWNvbT9jQUNlcnRpZmljYXRlP2Jhc2U/b2Jq
26 | ZWN0Q2xhc3M9Y2VydGlmaWNhdGlvbkF1dGhvcml0eTB0BgNVHREBAf8EajBooCwG
27 | CisGAQQBgjcUAgOgHgwcQUQtVEVTVCRAcHJlc3RvLnRlc3RsZGFwLmNvbYIbYWQt
28 | dGVzdC5wcmVzdG8udGVzdGxkYXAuY29tghNwcmVzdG8udGVzdGxkYXAuY29tggZQ
29 | UkVTVE8wDQYJKoZIhvcNAQEFBQADggEBAAtuksGPz4cwkIB7g8nqI1ysi0uaPxBO
30 | DSdq3wXPA9jS+W6bR39iiflugaeOW1pfQsZhMJadVCMkvQ/sYC4y1WmD+j6VSVs2
31 | 1znc4Gh+41teF3x1XRHgvQ1Vum+7GJIeKhZEUuB1F9SpuYxQT3wOIX2VnKq+1Z3/
32 | 7TlRyZ9bAG8tmSeYXK4/dL5q+L7o1UgRU47V7RzhOJp/bmQFQd0gtXRpSH5LJZhk
33 | +lIlmCxgsjkawBBvXHWd9K6R/5SIr/S/RfOIVdU/WbIcOzzAzZr9YqhmB3POc4aL
34 | f4IY0N7Rxc/dBW1bO833+lNOI4dJJgtdZoOxCe3BiHsM2EbZzUiu1HA=
35 | -----END CERTIFICATE-----
36 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/openldap/certs/openldap-certificate.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIFoDCCA4igAwIBAgIUGEWppDtSXD4BGCYHA5wOExBg8XwwDQYJKoZIhvcNAQEL
3 | BQAwUzETMBEGA1UEAwwKbGRhcHNlcnZlcjENMAsGA1UECwwEVEVTVDEOMAwGA1UE
4 | CgwFVFJJTk8xEDAOBgNVBAcMB0NoZW5uYWkxCzAJBgNVBAYTAklOMCAXDTIyMDIx
5 | NzA2MTY0OVoYDzIxMjIwMTI0MDYxNjQ5WjBTMRMwEQYDVQQDDApsZGFwc2VydmVy
6 | MQ0wCwYDVQQLDARURVNUMQ4wDAYDVQQKDAVUUklOTzEQMA4GA1UEBwwHQ2hlbm5h
7 | aTELMAkGA1UEBhMCSU4wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDi
8 | Z5jfxFrL5tRp0EPgIfXUrQsTRzEO40Je5OogZ32nuMpNIfiveJBMq4aKiFXIr0z/
9 | aBKvgCTaNmeiMCJpOxuY7sUgZIHMqxHjuqCGFUvjrqdKHSiS4oHa2pvBpJE1Jo7z
10 | T7X3VTnE+1sLlHQvhI9aN/nbsJP75LVjfKqOHo7DjEUH5kTJlw76k1BjYMpm9HXN
11 | Z+AMnsk9jRitBVtI9XMroJ2ks7rM2E1rp+5LQUtkjm8ceFjQs71tmQs1SC1mDhWn
12 | 0DWP6JOyUbyjtM01SCrIRFwV6IGkFqdGvH3x5gf71OWlmD8cX5aZOjVB1Lx7gGG8
13 | vsxMIfOBAdCOrgJvUp3cRAYRnqxgIgJHqbdxlSEk/MaIIzaWgIsy9q8dOpqSKYFR
14 | C5oExM/flyaBCWGlcN+lDgzUBCE5Qyj96kxztByCbYpiVt/Zn4ZOhmfWjJ5RRdUY
15 | K1UGM+Vmv9fz4OYmzvcB2VdpFGrZrNJUQHx32hXru+wweNe3q+lg0EzAaCbDekIy
16 | VFDSWvSVHPv3LD+SiwG/uP2HDGizaC1TQism7P5gwZj9kSGdjhOfBLmYL3Ul2Iye
17 | NFOBBawBfBMXH0mVZ4BWdouvHQH+gXnMB60mOJGT14sEo8sF4y9S8g7EpC5wzGel
18 | lT5dCfIGQYFvKdj/L1uXg6/ysEsnHShPCXiL4Ry3fQIDAQABo2owaDAdBgNVHQ4E
19 | FgQU0m2h8Pz48N+PYmBC91kmXAIjg4EwHwYDVR0jBBgwFoAU0m2h8Pz48N+PYmBC
20 | 91kmXAIjg4EwDwYDVR0TAQH/BAUwAwEB/zAVBgNVHREEDjAMggpsZGFwc2VydmVy
21 | MA0GCSqGSIb3DQEBCwUAA4ICAQCBAf/TF36JorIb8NU9p2sqY/YS614cmVYEIgrf
22 | N/90QTJXOuJHQlCZC8gs9Wyue/A09SSNqPQMSnpOmGfQ20KKFEeh97kVJWfPzXWP
23 | c9vA/bwCz7tLALyA9k49nAcLnXYhHM7gVQ8HXVBpWKTUIDv8TmB4s4UU7MPlOXsI
24 | z3DXR7B7it6eLJzaN/y6W99ncD+nxxbp/MxBUqpgDXuiE2YO+lllDqbV8PrAbwYI
25 | M9HIac/R4al8ica6ZrrBWjeV3xVnmmJmNHqW4c1WhFGWc/xVrFswkF/NLnQrXxol
26 | GXXNSXZTXwwV0kZouDg8lk8z+VrfDQNv6TWhMdXPhEISCQqyDAyenA2U9avdFZer
27 | ZhMAcdPP3b5G56Yzs86TzSRef4rpnbZgRdohzyMhtg3xRJnrVKKZHmiWwsMRUR2H
28 | C/iZk5DFSQA5kFLmwJYjBrsZcw4uoHnR6De3IEgq8i8EAIl8sLUxXKVFKxDnbX4R
29 | yQ+dU998fkNj8/s315b6ugvBI5woM85Zeeh0YujSwVmcaU6rlZ/fdbeYUC7gfvSO
30 | c3ddulZ8UHnQDdchbLX8+KASl8cqCRKTo6d2NL+gI0ThYi/jgiybyNPxzl36msAB
31 | looTQ7mvgcOFld8PI48kc4k7gIL4b552gE4+3RRhnkgg4Ao8XdFfPcRHg4qEJuyv
32 | RFrLcA==
33 | -----END CERTIFICATE-----
34 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/openldap/certs/private.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDiZ5jfxFrL5tRp
3 | 0EPgIfXUrQsTRzEO40Je5OogZ32nuMpNIfiveJBMq4aKiFXIr0z/aBKvgCTaNmei
4 | MCJpOxuY7sUgZIHMqxHjuqCGFUvjrqdKHSiS4oHa2pvBpJE1Jo7zT7X3VTnE+1sL
5 | lHQvhI9aN/nbsJP75LVjfKqOHo7DjEUH5kTJlw76k1BjYMpm9HXNZ+AMnsk9jRit
6 | BVtI9XMroJ2ks7rM2E1rp+5LQUtkjm8ceFjQs71tmQs1SC1mDhWn0DWP6JOyUbyj
7 | tM01SCrIRFwV6IGkFqdGvH3x5gf71OWlmD8cX5aZOjVB1Lx7gGG8vsxMIfOBAdCO
8 | rgJvUp3cRAYRnqxgIgJHqbdxlSEk/MaIIzaWgIsy9q8dOpqSKYFRC5oExM/flyaB
9 | CWGlcN+lDgzUBCE5Qyj96kxztByCbYpiVt/Zn4ZOhmfWjJ5RRdUYK1UGM+Vmv9fz
10 | 4OYmzvcB2VdpFGrZrNJUQHx32hXru+wweNe3q+lg0EzAaCbDekIyVFDSWvSVHPv3
11 | LD+SiwG/uP2HDGizaC1TQism7P5gwZj9kSGdjhOfBLmYL3Ul2IyeNFOBBawBfBMX
12 | H0mVZ4BWdouvHQH+gXnMB60mOJGT14sEo8sF4y9S8g7EpC5wzGellT5dCfIGQYFv
13 | Kdj/L1uXg6/ysEsnHShPCXiL4Ry3fQIDAQABAoICAF1tKqSMPnuN0XGow5ml6e/N
14 | SJi39AKUvTqzc/nTOwMPOkwCDME8Gy/ISEi6Ycviqv9qTuulEZAKi1FeU5ThE05U
15 | mXv4faeboFI9HSi3eQ2HcPkgdzHFzpglRcjuxA0O0EaPSiO6Q5CJuNRaemdxaEZu
16 | euixQR/dDQz00Ny8t2TyaToNR614bHHygGXB90Ps9P/CMa6Dh7+ijeA4Kac9wjzq
17 | lqL0l80YJTqbnbXoR9hC/o7dCql0wmKLmRhvpGxMu5r4TGIKM4iOOi3D2jp2gVEL
18 | BIT14Sl5P6Xs+nvr1LeS7wEahlCLEXZBkuCLjQvZuntMsLlp+TXvLcTzAYY2rXUE
19 | jD3HhpCr8b+o1r+SqpSkMvvQQIEwQpzcSAKIBncZi7SD8UWSfPO2bZpjkGMnYaKb
20 | PTAK30SIBZkkKZsvWT6eTFl6Jm5rhLxxygwY++kHiUOqjDcOEhpKLLkTOcbnaO1R
21 | XNZTrPniw/hkoVFXuJ94UE+nSFX0pCKncqu+r36zUFHqbBhgsvWUV92fTqGB6F5V
22 | 2L8LZ0zphlfIrNDfyP+kAz9IujYnC8wZDZ9OzqGAvtRGJuBCmcTrfPS44GueD/JN
23 | 6Qgp4tMfmixPFSkL3JeT2ryw6NlXHq6Z50j4jRoFdGYENSbuOTgYGCscbxMpISaC
24 | f6YDiXJ7DjntyTjXhfRxAoIBAQDjrWEapJ6KoVkZ4LIMMlAj69srHNAN9EQFjRob
25 | JoMDOT2xC7LzoO6wGKvE973poGb+Ey2+k/VmKxVqTl4fDk09c+HUud6igxmeJ52p
26 | zRXS9pZmqiCbfHdMLT6GB4bWMMdHyqA3sDZ+pMH2NhZ98RwcChw5hX7Ir6Jk3uMm
27 | gKlPLjFrFwkGft1xHFMQKGoV4BHJ4r+b9h2g1l8AT2h9FrOODRa4dziKxwZhb8jv
28 | wrQetaADR488JekjJThsBnSGm/VaIjFBNnxzsuJ7YyiHMgoExnYFXXGfGlWehzz0
29 | s5bV8YHy73759K02BnRU5icn1vi5Iy2ihzx5Mu4bd30jV/3zAoIBAQD+kbDlhwQY
30 | Uj6LYs3VjLtWkSSqSWUNuJ812SKDEI9u9EzpIrumX3ZN18sIaRViUmu356u5O8l0
31 | d2dKY32pddLSKBK3YuYH7OetnQwjwR7xM0rd7JN77NAir0Jcn0u2cBSuwr57iccd
32 | sIOOJjcElUaCY5M1vdvFLe/WHjSiXHsmWrjnuuB+zF36Rtce49gkdofluRS3/SCU
33 | wpb4F9uMjOvlGjO/suy003oJFsPiryWpoXssDn8VHpQwePF2qyy03DJ7iIVKStX5
34 | z26+EQq/EIVjk5fv0lCG8zLI92Q/AOd+cmVI6yQnsPQpgRVec2WQWzz0EQZNMWaa
35 | aPgCASVz6CDPAoIBAQCEwaYraO3hG+/efPyUzAmayH5Fh0lxuYqoSfx89FnUWV41
36 | jALOuHo1SE1wTpz4S0WCxSAZTiD7cAfOst+C8E3tlVKLPc4pqJQZejQevK0LFCni
37 | tzTINS1Cx+b7NAxVfJ/WproBfshn1B4dOLCowxyG5QZMLCEfV/ozIxwEaWVP/Dlo
38 | nkfu966bs85jT418L+lvdL4RbNMd46IFhkgBGGmeXNdwvGPF2ANKcWoTE87/URx6
39 | jRFiGyD0E1Hw+1qa2vmXkvRxeBndRON22vzYyD67RhWvTytfgHWIOIc8kDNVkLsb
40 | MrcxfJA7Pkw67PDL9DeXjIGhkYWbjiz06xmWsEotAoIBADqUehmHZUjXfC8FEIqo
41 | tN9pzP8mnot2xaRVkByK7gbBTwWDCZ1ycD3ff4gy7knO35zVQe4BNQ2JAIrctCkP
42 | xPC5af0rU/XU7UdFUWrU4PLuM7c33oRw7UBeBi5cNccIqanBhKXXAavyV4OZhaWX
43 | WD9e/JwsUWthc8RY9RG7qFceH1ir50cmbiXOsWgZVD6nzAPwADWKmTnCQY1bqBkC
44 | wLHawrfFS7UcyQNYasE6Prd3QmnT+Ch6/ezdl+sP3tzLow5MTyfWasDum/Q+5bMr
45 | 8RYF+7ujMQas5i3ayAW7honRdz89ZPeBP95hJvKyKVf7tt4PIN0YlhzINPH5I441
46 | h38CggEBAMEP+lVmPdeidZRlz3dT286wenysCjv3xVwhYIoo6NDRV3mTAHTnogxS
47 | vTC2DP70t9iYJr4EH4nS0eTeKiNqZvyq3auVb0l6YEh3jsf/9qeO+htiTi4yLngW
48 | C/cbkrs7ehuzUSS/rPBSCDHXfJqYskzBKJSf2z68Gp15uvav9XRdiGagRsVnNCUu
49 | +gI5XwXZfxung7w2TQXrvKgvZRMcF81tz/f69hcGStFdeg79ZL7dOgjg4gmj6Rep
50 | rESNCC8wxKvGGDlAMBAhKjjsBZdeWZiFC3RI4J/GTg+FejtMHVAuXBpIlAI6kXe3
51 | 6AZVjiYtyCHFpjEr9vNEbP4vqeE/KxM=
52 | -----END PRIVATE KEY-----
53 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/openldap/certs/serial.txt:
--------------------------------------------------------------------------------
1 | 1234
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/openldap/setup/createOU.ldif:
--------------------------------------------------------------------------------
1 | dn: dc=trino,dc=testldap,dc=com
2 | dc: trino
3 | o: teradata
4 | objectClass: organization
5 | objectClass: dcObject
6 |
7 | dn: ou=America,dc=trino,dc=testldap,dc=com
8 | objectClass: organizationalUnit
9 | ou: America
10 |
11 | dn: ou=Asia,dc=trino,dc=testldap,dc=com
12 | objectClass: organizationalUnit
13 | ou: Asia
14 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/openldap/setup/ldap_disable_bind_anon.ldif:
--------------------------------------------------------------------------------
1 | dn: cn=config
2 | changetype: modify
3 | add: olcDisallows
4 | olcDisallows: bind_anon
5 |
6 | dn: cn=config
7 | changetype: modify
8 | add: olcRequires
9 | olcRequires: authc
10 |
11 | dn: olcDatabase={-1}frontend,cn=config
12 | changetype: modify
13 | add: olcRequires
14 | olcRequires: authc
15 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/openldap/setup/modify_server.ldif:
--------------------------------------------------------------------------------
1 | dn: olcDatabase={2}mdb,cn=config
2 | changetype: modify
3 | replace: olcRootDN
4 | olcRootDN: cn=admin,dc=trino,dc=testldap,dc=com
5 | -
6 | replace: olcSuffix
7 | olcSuffix: dc=trino,dc=testldap,dc=com
8 | -
9 | replace: olcRootPW
10 | olcRootPW: admin
11 | -
12 | replace: olcAccess
13 | olcAccess: {0}to attrs=userPassword by self write by dn.base="cn=admin,dc=trino,dc=testldap,dc=com" write by anonymous auth by * none
14 | olcAccess: {1}to * by dn.base="cn=admin,dc=trino,dc=testldap,dc=com" write by self write by * read
15 |
16 | dn: olcDatabase={1}monitor,cn=config
17 | changetype: modify
18 | replace: olcAccess
19 | olcAccess: {0}to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=exter
20 | nal,cn=auth" read by dn.base="cn=admin,dc=trino,dc=testldap,dc=com" read by * n
21 | one
22 |
23 | dn: cn=config
24 | changetype: modify
25 | replace: olcTLSCACertificateFile
26 | olcTLSCACertificateFile: /etc/openldap/certs/openldap-certificate.pem
27 | -
28 | replace: olcTLSCertificateFile
29 | olcTLSCertificateFile: /etc/openldap/certs/openldap-certificate.pem
30 | -
31 | replace: olcTLSCertificateKeyFile
32 | olcTLSCertificateKeyFile: /etc/openldap/certs/private.pem
33 | -
34 | replace: olcTLSVerifyClient
35 | olcTLSVerifyClient: demand
36 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = /var/log/supervisord.log
3 | logfile_maxbytes = 50MB
4 | logfile_backups=10
5 | loglevel = info
6 | pidfile = /var/run/supervisord.pid
7 | directory = /tmp
8 | strip_ansi = false
9 |
10 | [unix_http_server]
11 | file = /tmp/supervisor.sock
12 |
13 | [rpcinterface:supervisor]
14 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
15 |
16 | [supervisorctl]
17 | serverurl = unix:///tmp/supervisor.sock
18 |
19 | [include]
20 | files = /etc/supervisord.d/*.conf
21 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/etc/supervisord.d/slapd.conf:
--------------------------------------------------------------------------------
1 | [program:slapd]
2 | command=/usr/bin/pidproxy /var/run/openldap/slapd.pid /usr/sbin/slapd -d 1 -F /etc/openldap/slapd.d/ -h 'ldaps://0.0.0.0:636 ldap://0.0.0.0:389 ldapi:///'
3 | startsecs=2
4 | stopwaitsecs=10
5 | startretries=30
6 | user=root
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/slapd
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/files/usr/bin/wait-for-slapd.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -xeu
4 |
5 | timeout=30
6 | while ((timeout > 0)); do
7 | # -LLL would print responses in LDIF format without comments and version
8 | # An invalid filter is applied to avoid actual response which spams the build process.
9 | if ldapsearch -Y EXTERNAL -H ldapi:/// -b cn=config -LLL "(emptyAttribute=emptyValue)"; then
10 | echo "Slapd is running... Exiting"
11 | exit 0
12 | fi
13 |
14 | sleep 1
15 | ((timeout -= 1))
16 | done
17 |
18 | echo "Slapd startup timed out"
19 | exit 1
20 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-base/generate-certificates.md:
--------------------------------------------------------------------------------
1 | ## OpenLdap Server
2 |
3 | ### Generate private key and certificate
4 |
5 | ```shell
6 | openssl req -new -x509 -newkey rsa:4096 -sha256 -nodes -days 36500 \
7 | -subj '/CN=ldapserver/OU=TEST/O=TRINO/L=Chennai/S=TN/C=IN' \
8 | -addext "subjectAltName = DNS:ldapserver" \
9 | -keyout files/etc/openldap/certs/private.pem \
10 | -out files/etc/openldap/certs/openldap-certificate.pem
11 | ```
12 |
13 | ## Trino coordinator
14 |
15 | ### Generate CSR for Trino coordinator
16 |
17 | ```shell
18 | openssl req -newkey rsa:4096 -nodes -days 36500 \
19 | -subj '/CN=presto-master/OU=TEST/O=TRINO/L=Chennai,S=TN,C=IN' \
20 | -keyout files/etc/openldap/certs/trino-coordinator-for-ldap.key \
21 | -addext "subjectAltName = DNS:presto-master" \
22 | -out files/etc/openldap/certs/trino-coordinator-for-ldap.csr
23 | ```
24 |
25 | ### Sign CSR using openldap-certificate.pem
26 |
27 | ```shell
28 | openssl x509 -req -days 36500 -in files/etc/openldap/certs/trino-coordinator-for-ldap.csr \
29 | -out files/etc/openldap/certs/trino-coordinator-for-ldap.crt \
30 | -CA files/etc/openldap/certs/openldap-certificate.pem \
31 | -CAkey files/etc/openldap/certs/private.pem \
32 | -CAserial files/etc/openldap/certs/serial.txt
33 | ```
34 |
35 | ### Bundle them to a PEM file
36 |
37 | ```shell
38 | cat files/etc/openldap/certs/trino-coordinator-for-ldap.crt \
39 | files/etc/openldap/certs/trino-coordinator-for-ldap.key \
40 | > files/etc/openldap/certs/trino-coordinator-for-ldap.pem
41 | ```
42 |
43 | ### Remove unnecessary files
44 | ```shell
45 | rm files/etc/openldap/certs/trino-coordinator-for-ldap.csr \
46 | files/etc/openldap/certs/trino-coordinator-for-ldap.key \
47 | files/etc/openldap/certs/trino-coordinator-for-ldap.crt
48 | ```
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-referrals/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | ARG ARCH
14 | FROM testing/almalinux9-oj17-openldap:unlabelled$ARCH
15 |
16 | # COPY CONFIGURATION
17 | COPY ./files /
18 |
19 | # CONFIGURE OPENLDAP SERVER
20 | # Run supervisord in background
21 | RUN supervisord -c /etc/supervisord.conf && \
22 | /usr/bin/wait-for-slapd.sh && \
23 | ldapadd -f /etc/openldap/setup/createReferrals.ldif -D cn=admin,dc=trino,dc=testldap,dc=com -w admin
24 |
25 | CMD supervisord -n -c /etc/supervisord.conf
26 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap-referrals/files/etc/openldap/setup/createReferrals.ldif:
--------------------------------------------------------------------------------
1 | dn: ou=World,dc=trino,dc=testldap,dc=com
2 | objectClass: organizationalUnit
3 | ou: World
4 |
5 | dn: ou=West,ou=World,dc=trino,dc=testldap,dc=com
6 | objectClass: referral
7 | objectClass: extensibleObject
8 | ou: West
9 | ref: ldaps://ldapserver:636/ou=America,dc=trino,dc=testldap,dc=com
10 |
11 | dn: ou=East,ou=World,dc=trino,dc=testldap,dc=com
12 | objectClass: referral
13 | objectClass: extensibleObject
14 | ou: East
15 | ref: ldaps://ldapserver:636/ou=Asia,dc=trino,dc=testldap,dc=com
16 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | ARG ARCH
14 | FROM testing/almalinux9-oj17-openldap-base:unlabelled$ARCH
15 |
16 | # COPY CONFIGURATION
17 | COPY ./files /
18 |
19 | # CONFIGURE OPENLDAP SERVER
20 | RUN supervisord -c /etc/supervisord.conf && \
21 | /usr/bin/wait-for-slapd.sh && \
22 | ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/setup/memberof.ldif && \
23 | ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/setup/refint.ldif && \
24 | ldapadd -f /etc/openldap/setup/createOU.ldif -D cn=admin,dc=trino,dc=testldap,dc=com -w admin
25 |
26 | CMD supervisord -n -c /etc/supervisord.conf
27 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap/files/etc/openldap/setup/memberof.ldif:
--------------------------------------------------------------------------------
1 | # This overlay helps in determining which group a given user belongs to
2 | dn: cn=module,cn=config
3 | cn: module
4 | objectClass: olcModuleList
5 | olcModuleLoad: memberof.la
6 | olcModulePath: /usr/lib64/openldap
7 |
8 | dn: olcOverlay={0}memberof,olcDatabase={2}mdb,cn=config
9 | objectClass: olcConfig
10 | objectClass: olcMemberOf
11 | objectClass: olcOverlayConfig
12 | objectClass: top
13 | olcOverlay: memberof
14 | olcMemberOfDangling: ignore
15 | olcMemberOfRefInt: TRUE
16 | olcMemberOfGroupOC: groupOfNames
17 | olcMemberOfMemberAD: member
18 | olcMemberOfMemberOfAD: memberOf
19 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17-openldap/files/etc/openldap/setup/refint.ldif:
--------------------------------------------------------------------------------
1 | # This overlay ensures that when user accounts are modified or deleted,
2 | # the corresponding member attribute in groups is also handled accordingly.
3 | dn: cn=module{1},cn=config
4 | cn: module
5 | objectclass: olcModuleList
6 | objectclass: top
7 | olcmoduleload: refint.la
8 |
9 | dn: olcOverlay={1}refint,olcDatabase={2}mdb,cn=config
10 | objectClass: olcConfig
11 | objectClass: olcOverlayConfig
12 | objectClass: olcRefintConfig
13 | objectClass: top
14 | olcOverlay: {1}refint
15 | olcRefintAttribute: memberof member manager owner
16 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM library/almalinux:9
14 |
15 | COPY ./files /
16 |
17 | # Install Java and presto-admin dependences
18 | RUN \
19 | set -xeu && \
20 | yum update -y && \
21 | yum install -y \
22 | python \
23 | nc \
24 | wget \
25 | && \
26 | \
27 | # Install Zulu JDK 17.0.5 \
28 | rpm -i "https://cdn.azul.com/zulu/bin/zulu17.38.21-ca-jdk17.0.5-linux.$(arch).rpm" && \
29 | # Set JDK 17 as a default one
30 | alternatives --set java /usr/lib/jvm/zulu-17/bin/java && \
31 | alternatives --set javac /usr/lib/jvm/zulu-17/bin/javac && \
32 | \
33 | # install supervisor
34 | yum --enablerepo=extras install -y epel-release && \
35 | yum install -y supervisor && \
36 | \
37 | # install commonly needed packages
38 | yum install -y \
39 | less `# helpful when troubleshooting product tests` \
40 | net-tools `# netstat is required by run_on_docker.sh` \
41 | sudo \
42 | telnet `# helpful when troubleshooting product tests` \
43 | vim `# helpful when troubleshooting product tests` \
44 | jq `# helpful json processing tool` \
45 | procps \
46 | && \
47 | # cleanup
48 | yum -y clean all && rm -rf /tmp/* /var/tmp/*
49 |
50 | ENV PATH="/usr/local/bin:${PATH}"
51 | ENV JAVA_HOME=/usr/lib/jvm/zulu-17
52 | ENV LANG=en_US.UTF-8
53 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17/files/opt/trinodev/site-override.xslt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17/files/usr/local/bin/apply-all-site-xml-overrides:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | fail() {
6 | echo "$(basename "$0"): $*" >&2
7 | exit 1
8 | }
9 |
10 | if [ $# -ne 1 ]; then
11 | fail "Usage: $0 " >&2
12 | fi
13 |
14 | overrides_dir="$1"
15 |
16 | for file in $(find $overrides_dir -name '*.xml'); do
17 | target_filename="${file#"$overrides_dir"}"
18 | echo "Applying configuration override from $file to $target_filename"
19 | apply-site-xml-override "$target_filename" "$file"
20 | done
21 |
--------------------------------------------------------------------------------
/testing/almalinux9-oj17/files/usr/local/bin/apply-site-xml-override:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | fail() {
6 | echo "$(basename "$0"): $*" >&2
7 | exit 1
8 | }
9 |
10 | if [ $# -ne 2 ]; then
11 | fail "Usage: $0 " >&2
12 | fi
13 |
14 | site_xml="$1"
15 | overrides="$2"
16 | site_xml_new="$1.new"
17 |
18 | test -f "${site_xml}" || fail "${site_xml} does not exist or is not a file"
19 | test -f "${overrides}" || fail "${overrides} does not exist or is not a file"
20 | test ! -e "${site_xml_new}" || fail "${site_xml_new} already exists"
21 |
22 | xsltproc --param override-path "'${overrides}'" "/opt/trinodev/site-override.xslt" "${site_xml}" > "${site_xml_new}"
23 | cat "${site_xml_new}" > "${site_xml}" # Preserve file owner & permissions
24 | rm "${site_xml_new}"
25 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/etc/hive/conf/hiveserver2-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 |
22 | hive.metastore.uris
23 | thrift://localhost:9083
24 |
25 |
26 |
27 |
28 | hive.security.authenticator.manager
29 | org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
30 |
31 |
32 |
33 |
34 | hive.security.authorization.manager
35 | org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory
36 | SQL standards based Hive authorization
37 |
38 |
39 |
40 | hive.security.authorization.enabled
41 | true
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/etc/krb5.conf:
--------------------------------------------------------------------------------
1 | [logging]
2 | default = FILE:/var/log/krb5libs.log
3 | kdc = FILE:/var/log/krb5kdc.log
4 | admin_server = FILE:/var/log/kadmind.log
5 |
6 | [libdefaults]
7 | default_realm = OTHERREALM.COM
8 | dns_lookup_realm = false
9 | dns_lookup_kdc = false
10 | forwardable = true
11 | allow_weak_crypto = true
12 |
13 | [realms]
14 | OTHERREALM.COM = {
15 | kdc = hadoop-master-2:88
16 | admin_server = hadoop-master-2
17 | }
18 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/etc/supervisord.d/kdc.conf:
--------------------------------------------------------------------------------
1 | [program:krb5kdc]
2 | command=/bin/bash -c "exec /usr/sbin/krb5kdc -P /var/run/krb5kdc.pid -n -r OTHERREALM.COM"
3 | autostart=true
4 | autorestart=true
5 | redirect_stderr=true
6 | stdout_logfile=/dev/stdout
7 | stdout_logfile_maxbytes=0
8 |
9 | [program:kadmind]
10 | command=/bin/bash -c "exec /usr/sbin/kadmind -P /var/run/kadmind.pid -nofork -r OTHERREALM.COM"
11 | autostart=true
12 | autorestart=true
13 | redirect_stderr=true
14 | stdout_logfile=/dev/stdout
15 | stdout_logfile_maxbytes=0
16 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/overrides/etc/hadoop/conf/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 | fs.defaultFS
22 | hdfs://hadoop-master-2:9000
23 |
24 |
25 |
26 |
27 | hadoop.proxyuser.presto-server.groups
28 | *
29 |
30 |
31 |
32 | hadoop.proxyuser.presto-server.hosts
33 | *
34 |
35 |
36 |
37 |
38 | hadoop.security.authentication
39 | kerberos
40 |
41 |
42 |
43 | hadoop.security.authorization
44 | true
45 |
46 |
47 |
48 | hadoop.security.auth_to_local
49 |
50 | RULE:[2:$1@$0](.*@OTHERLABS.TERADATA.COM)s/@.*//
51 | DEFAULT
52 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/overrides/etc/hadoop/conf/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
16 |
17 |
18 |
19 |
20 | dfs.block.access.token.enable
21 | true
22 |
23 |
24 |
25 |
26 | dfs.namenode.keytab.file
27 | /etc/hadoop/conf/hdfs.keytab
28 |
29 |
30 | dfs.namenode.kerberos.principal
31 | hdfs/hadoop-master-2@OTHERREALM.COM
32 |
33 |
34 | dfs.namenode.kerberos.internal.spnego.principal
35 | HTTP/hadoop-master-2@OTHERREALM.COM
36 |
37 |
38 |
39 |
40 | dfs.secondary.namenode.keytab.file
41 | /etc/hadoop/conf/hdfs.keytab
42 |
43 |
44 | dfs.secondary.namenode.kerberos.principal
45 | hdfs/hadoop-master-2@OTHERREALM.COM
46 |
47 |
48 | dfs.secondary.namenode.kerberos.internal.spnego.principal
49 | HTTP/hadoop-master-2@OTHERREALM.COM
50 |
51 |
52 |
53 |
54 | dfs.datanode.keytab.file
55 | /etc/hadoop/conf/hdfs.keytab
56 |
57 |
58 | dfs.datanode.kerberos.principal
59 | hdfs/hadoop-master-2@OTHERREALM.COM
60 |
61 |
62 |
63 |
64 | dfs.webhdfs.enabled
65 | true
66 |
67 |
68 |
69 |
70 | dfs.web.authentication.kerberos.principal
71 | HTTP/hadoop-master-2@OTHERREALM.COM
72 |
73 |
74 |
75 | dfs.web.authentication.kerberos.keytab
76 | /etc/hadoop/conf/HTTP.keytab
77 |
78 |
79 |
80 | ignore.secure.ports.for.testing
81 | true
82 |
83 |
84 |
85 | dfs.http.policy
86 | HTTP_ONLY
87 |
88 |
89 |
90 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/overrides/etc/hadoop/conf/mapred-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 | mapred.job.tracker
22 | hadoop-master-2:8021
23 |
24 |
25 |
26 | mapreduce.jobhistory.address
27 | hadoop-master-2:10020
28 |
29 |
30 |
31 | mapreduce.jobhistory.webapp.address
32 | hadoop-master-2:19888
33 |
34 |
35 |
36 |
37 | mapreduce.jobhistory.keytab
38 | /etc/hadoop/conf/mapred.keytab
39 |
40 |
41 |
42 | mapreduce.jobhistory.principal
43 | mapred/hadoop-master-2@OTHERREALM.COM
44 |
45 |
46 |
47 |
48 | mapreduce.jobtracker.kerberos.principal
49 | mapred/hadoop-master-2@OTHERREALM.COM
50 |
51 |
52 |
53 | mapreduce.jobtracker.keytab.file
54 | /etc/hadoop/conf/mapred.keytab
55 |
56 |
57 |
58 |
59 | mapreduce.tasktracker.kerberos.principal
60 | mapred/hadoop-master-2@OTHERREALM.COM
61 |
62 |
63 |
64 | mapreduce.tasktracker.keytab.file
65 | /etc/hadoop/conf/mapred.keytab
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/overrides/etc/hadoop/conf/yarn-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 |
22 | yarn.resourcemanager.hostname
23 | hadoop-master-2
24 |
25 |
26 |
27 | yarn.log.server.url
28 | http://hadoop-master-2:19888/jobhistory/logs
29 |
30 |
31 |
32 |
33 | yarn.resourcemanager.keytab
34 | /etc/hadoop/conf/yarn.keytab
35 |
36 |
37 |
38 | yarn.resourcemanager.principal
39 | yarn/hadoop-master-2@OTHERREALM.COM
40 |
41 |
42 |
43 |
44 | yarn.nodemanager.keytab
45 | /etc/hadoop/conf/yarn.keytab
46 |
47 |
48 |
49 | yarn.nodemanager.principal
50 | yarn/hadoop-master-2@OTHERREALM.COM
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/overrides/etc/hive/conf/hive-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 |
22 | hive.server2.authentication
23 | KERBEROS
24 |
25 |
26 |
27 | hive.server2.enable.impersonation
28 | true
29 |
30 |
31 |
32 | hive.server2.authentication.kerberos.principal
33 | hive/hadoop-master-2@OTHERREALM.COM
34 |
35 |
36 |
37 | hive.server2.authentication.kerberos.keytab
38 | /etc/hive/conf/hive.keytab
39 |
40 |
41 |
42 | hive.metastore.sasl.enabled
43 | true
44 |
45 |
46 |
47 | hive.metastore.kerberos.keytab.file
48 | /etc/hive/conf/hive.keytab
49 |
50 |
51 |
52 | hive.metastore.kerberos.principal
53 | hive/hadoop-master-2@OTHERREALM.COM
54 |
55 |
56 |
57 |
58 | hive.security.authorization.manager
59 | org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory
60 |
61 |
62 |
63 | hive.security.authorization.task.factory
64 | org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/var/kerberos/krb5kdc/kadm5-other.acl:
--------------------------------------------------------------------------------
1 | */admin@OTHERLABS.TERADATA.COM *
2 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/var/kerberos/krb5kdc/kadm5.acl:
--------------------------------------------------------------------------------
1 | */admin@OTHERREALM.COM *
2 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-2/files/var/kerberos/krb5kdc/kdc.conf:
--------------------------------------------------------------------------------
1 | [kdcdefaults]
2 | kdc_ports = 88
3 | kdc_tcp_ports = 88
4 |
5 | [realms]
6 | OTHERREALM.COM = {
7 | acl_file = /var/kerberos/krb5kdc/kadm5.acl
8 | dict_file = /usr/share/dict/words
9 | admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
10 | supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM testing/hdp3.1-hive-kerberized:unlabelled
14 | ARG ADDPRINC_ARGS="-maxrenewlife \"10 days\" +allow_renewable"
15 |
16 | # Install KMS
17 | ARG HADOOP_VERSION=3.1.4
18 | ARG HADOOP_BINARY_PATH=https://archive.apache.org/dist/hadoop/common/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
19 | RUN curl -fLsS -o /tmp/hadoop.tar.gz --url $HADOOP_BINARY_PATH && \
20 | tar xzf /tmp/hadoop.tar.gz --directory /opt && mv /opt/hadoop-$HADOOP_VERSION /opt/hadoop
21 |
22 | # COPY CONFIGURATION
23 | COPY ./files /
24 | COPY ./files/etc/hadoop-kms/conf /opt/hadoop/etc/hadoop/
25 |
26 | # add users and group for testing purposes
27 | RUN set -xeu && \
28 | for username in alice bob charlie; do \
29 | groupadd "${username}_group" && \
30 | useradd -g "${username}_group" "${username}" && \
31 | /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey ${username}/hadoop-master@LABS.TERADATA.COM" && \
32 | /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hive/conf/${username}.keytab ${username}/hadoop-master"; \
33 | done && \
34 | echo OK
35 |
36 | RUN /root/setup_kms.sh
37 |
38 | CMD supervisord -c /etc/supervisord.conf
39 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/hadoop-kms/conf/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | fs.defaultFS
6 | hdfs://hadoop-master:9000
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-acls.xml:
--------------------------------------------------------------------------------
1 |
2 |
10 |
11 | default.key.acl.ALL
12 | *
13 |
14 |
15 |
16 | default.key.acl.MANAGEMENT
17 | *
18 |
19 |
20 |
21 | default.key.acl.READ
22 | *
23 |
24 |
25 |
26 | default.key.acl.GENERATE_EEK
27 | *
28 |
29 |
30 |
31 | default.key.acl.DECRYPT_EEK
32 | *
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | hadoop.kms.key.provider.uri
6 | jceks://file@/${user.home}/kms.keystore
7 |
8 |
9 |
10 | hadoop.kms.authentication.type
11 | kerberos
12 |
13 |
14 |
15 | hadoop.kms.authentication.kerberos.keytab
16 | /etc/hadoop/conf/HTTP.keytab
17 |
18 |
19 |
20 | hadoop.kms.authentication.kerberos.principal
21 | HTTP/hadoop-master
22 |
23 |
24 |
25 | hadoop.kms.authentication.kerberos.name.rules
26 | DEFAULT
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/hadoop-kms/conf/passwordfile:
--------------------------------------------------------------------------------
1 | abc1234
2 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/hadoop/conf/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | fs.defaultFS
6 | hdfs://hadoop-master:9000
7 |
8 |
9 |
10 | fs.permissions.umask-mode
11 | 000
12 |
13 |
14 |
15 |
16 | hadoop.proxyuser.httpfs.hosts
17 | *
18 |
19 |
20 |
21 | hadoop.proxyuser.httpfs.groups
22 | *
23 |
24 |
25 |
26 |
27 | hadoop.proxyuser.hive.hosts
28 | *
29 |
30 |
31 |
32 | hadoop.proxyuser.hive.groups
33 | *
34 |
35 |
36 |
37 |
38 | hadoop.proxyuser.hdfs.groups
39 | *
40 |
41 |
42 |
43 | hadoop.proxyuser.hdfs.hosts
44 | *
45 |
46 |
47 |
48 |
49 | hadoop.proxyuser.presto-server.groups
50 | *
51 |
52 |
53 |
54 | hadoop.proxyuser.presto-server.hosts
55 | *
56 |
57 |
58 |
59 |
60 | hadoop.security.authentication
61 | kerberos
62 |
63 |
64 |
65 | hadoop.security.authorization
66 | true
67 |
68 |
69 |
70 |
71 | hadoop.security.key.provider.path
72 | kms://http@hadoop-master:9600/kms
73 |
74 |
75 |
76 | dfs.encryption.key.provider.uri
77 | kms://http@hadoop-master:9600/kms
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/hadoop/conf/taskcontroller.cfg:
--------------------------------------------------------------------------------
1 | hadoop.log.dir=/var/log/hadoop-mapreduce
2 | mapreduce.tasktracker.group=mapred
3 | banned.users=mapred,bin
4 | min.user.id=0
5 | allowed.system.users=nobody,hive
6 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/hive/conf/hive-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | javax.jdo.option.ConnectionURL
6 | jdbc:mysql://localhost/metastore
7 |
8 |
9 |
10 | javax.jdo.option.ConnectionDriverName
11 | com.mysql.jdbc.Driver
12 |
13 |
14 |
15 | javax.jdo.option.ConnectionUserName
16 | root
17 |
18 |
19 |
20 | javax.jdo.option.ConnectionPassword
21 | root
22 |
23 |
24 |
25 | datanucleus.autoCreateSchema
26 | false
27 |
28 |
29 |
30 | datanucleus.fixedDatastore
31 | true
32 |
33 |
34 |
35 | datanucleus.autoStartMechanism
36 | SchemaTable
37 |
38 |
39 |
40 | hive.security.authorization.createtable.owner.grants
41 | ALL
42 |
43 |
44 |
45 | hive.users.in.admin.role
46 | hdfs,hive
47 |
48 |
49 |
50 |
51 | hive.server2.authentication
52 | KERBEROS
53 |
54 |
55 |
56 | hive.server2.enable.impersonation
57 | false
58 |
59 |
60 |
61 | hive.server2.authentication.kerberos.principal
62 | hive/hadoop-master@LABS.TERADATA.COM
63 |
64 |
65 |
66 | hive.server2.authentication.kerberos.keytab
67 | /etc/hive/conf/hive.keytab
68 |
69 |
70 |
71 | hive.metastore.sasl.enabled
72 | true
73 |
74 |
75 |
76 | hive.metastore.kerberos.keytab.file
77 | /etc/hive/conf/hive.keytab
78 |
79 |
80 |
81 | hive.metastore.kerberos.principal
82 | hive/hadoop-master@LABS.TERADATA.COM
83 |
84 |
85 |
86 |
87 | metastore.storage.schema.reader.impl
88 | org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader
89 |
90 |
91 |
92 | hive.support.concurrency
93 | true
94 |
95 |
96 |
97 | hive.compactor.initiator.on
98 | true
99 |
100 |
101 |
102 | hive.compactor.worker.threads
103 | 1
104 |
105 |
106 |
107 | hive.txn.manager
108 | org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
109 |
110 |
111 |
112 | hive.metastore.disallow.incompatible.col.type.changes
113 | false
114 |
115 |
116 |
117 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/hive/conf/hiveserver2-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | hive.metastore.uris
7 | thrift://localhost:9083
8 |
9 |
10 |
11 |
12 | hive.security.authenticator.manager
13 | org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/etc/supervisord.d/kms.conf:
--------------------------------------------------------------------------------
1 | [program:kms]
2 | environment=HADOOP_KEYSTORE_PASSWORD="abc1234"
3 | command=/opt/hadoop/sbin/kms.sh run
4 | autostart=true
5 | autorestart=true
6 | redirect_stderr=true
7 | ##### stdout_logfile=/var/log/hadoop-kms/kms.log
8 | stdout_logfile=/dev/stdout
9 | stdout_logfile_maxbytes=0
10 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized-kms/files/root/setup_kms.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -xeuo pipefail
4 |
5 | function retry() {
6 | END=$(($(date +%s) + 600))
7 |
8 | while (( $(date +%s) < $END )); do
9 | set +e
10 | "$@"
11 | EXIT_CODE=$?
12 | set -e
13 |
14 | if [[ ${EXIT_CODE} == 0 ]]; then
15 | break
16 | fi
17 | sleep 5
18 | done
19 |
20 | return ${EXIT_CODE}
21 | }
22 |
23 | supervisord -c /etc/supervisord.conf &
24 |
25 | retry kinit -kt /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master@LABS.TERADATA.COM
26 | retry hdfs dfsadmin -safemode leave
27 |
28 | retry kinit -kt /etc/hive/conf/hive.keytab hive/hadoop-master@LABS.TERADATA.COM
29 | while ! beeline -n hive -e "SELECT 1"; do
30 | echo "Waiting for HiveServer2 ..."
31 | sleep 10s
32 | done
33 |
34 | # the default directory must be empty before enabling encryption
35 | hiveUrl="jdbc:hive2://hadoop-master:10000/default;principal=hive/hadoop-master@LABS.TERADATA.COM"
36 | beeline -u "$hiveUrl" -e "drop schema information_schema cascade; drop schema sys cascade;"
37 | hadoop fs -rm -f -r /user/hive/warehouse/.Trash
38 |
39 | retry kinit -kt /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master@LABS.TERADATA.COM
40 | hadoop key create key1 -size 256
41 | hdfs crypto -createZone -keyName key1 -path /user/hive/warehouse
42 | hdfs crypto -listZones
43 |
44 | # Create `information_schema` and `sys` schemas in Hive
45 | retry kinit -kt /etc/hive/conf/hive.keytab hive/hadoop-master@LABS.TERADATA.COM
46 | /usr/hdp/current/hive-client/bin/schematool -userName hive -metaDbType mysql -dbType hive \
47 | -url "$hiveUrl" -driver org.apache.hive.jdbc.HiveDriver \
48 | -initSchema
49 |
50 | su -s /bin/bash hdfs -c 'kinit -kt /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master@LABS.TERADATA.COM'
51 | for username in alice bob charlie; do
52 | su -s /bin/bash hdfs -c "/usr/bin/hadoop fs -mkdir /user/$username"
53 | su -s /bin/bash hdfs -c "/usr/bin/hadoop fs -chown $username /user/$username"
54 | done
55 |
56 | supervisorctl stop all
57 | pkill -F /var/run/supervisord.pid
58 | wait
59 |
60 | # Purge Kerberos credential cache of root user
61 | kdestroy
62 |
63 | find /var/log -type f -name \*.log -printf "truncate %p\n" -exec truncate --size 0 {} \; && \
64 | # Purge /tmp, this includes credential caches of other users
65 | find /tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} +
66 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 |
22 | hive.metastore.uris
23 | thrift://localhost:9083
24 |
25 |
26 |
27 |
28 | hive.security.authenticator.manager
29 | org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
30 |
31 |
32 |
33 |
34 | hive.security.authorization.manager
35 | org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory
36 | SQL standards based Hive authorization
37 |
38 |
39 |
40 | hive.security.authorization.enabled
41 | true
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/etc/krb5.conf:
--------------------------------------------------------------------------------
1 | [logging]
2 | default = FILE:/var/log/krb5libs.log
3 | kdc = FILE:/var/log/krb5kdc.log
4 | admin_server = FILE:/var/log/kadmind.log
5 |
6 | [libdefaults]
7 | default_realm = LABS.TERADATA.COM
8 | dns_lookup_realm = false
9 | dns_lookup_kdc = false
10 | forwardable = true
11 | allow_weak_crypto = true
12 |
13 | [realms]
14 | LABS.TERADATA.COM = {
15 | kdc = hadoop-master:88
16 | admin_server = hadoop-master
17 | }
18 | OTHERLABS.TERADATA.COM = {
19 | kdc = hadoop-master:89
20 | admin_server = hadoop-master
21 | }
22 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/etc/supervisord.d/kdc.conf:
--------------------------------------------------------------------------------
1 | [program:krb5kdc]
2 | command=/bin/bash -c "exec /usr/sbin/krb5kdc -P /var/run/krb5kdc.pid -n -r LABS.TERADATA.COM -n -d /var/kerberos/krb5kdc/principal-other -r OTHERLABS.TERADATA.COM"
3 | autostart=true
4 | autorestart=true
5 | redirect_stderr=true
6 | stdout_logfile=/dev/stdout
7 | stdout_logfile_maxbytes=0
8 |
9 | [program:kadmind]
10 | command=/bin/bash -c "exec /usr/sbin/kadmind -P /var/run/kadmind.pid -nofork -r LABS.TERADATA.COM"
11 | autostart=true
12 | autorestart=true
13 | redirect_stderr=true
14 | stdout_logfile=/dev/stdout
15 | stdout_logfile_maxbytes=0
16 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 |
22 | hadoop.proxyuser.presto-server.groups
23 | *
24 |
25 |
26 |
27 | hadoop.proxyuser.presto-server.hosts
28 | *
29 |
30 |
31 |
32 |
33 | hadoop.security.authentication
34 | kerberos
35 |
36 |
37 |
38 | hadoop.security.authorization
39 | true
40 |
41 |
42 |
43 | hadoop.security.auth_to_local
44 |
45 | RULE:[2:$1@$0](.*@OTHERLABS.TERADATA.COM)s/@.*//
46 | DEFAULT
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
16 |
17 |
18 |
19 | dfs.block.access.token.enable
20 | true
21 |
22 |
23 |
24 |
25 | dfs.namenode.keytab.file
26 | /etc/hadoop/conf/hdfs.keytab
27 |
28 |
29 | dfs.namenode.kerberos.principal
30 | hdfs/hadoop-master@LABS.TERADATA.COM
31 |
32 |
33 | dfs.namenode.kerberos.internal.spnego.principal
34 | HTTP/hadoop-master@LABS.TERADATA.COM
35 |
36 |
37 |
38 |
39 | dfs.secondary.namenode.keytab.file
40 | /etc/hadoop/conf/hdfs.keytab
41 |
42 |
43 | dfs.secondary.namenode.kerberos.principal
44 | hdfs/hadoop-master@LABS.TERADATA.COM
45 |
46 |
47 | dfs.secondary.namenode.kerberos.internal.spnego.principal
48 | HTTP/hadoop-master@LABS.TERADATA.COM
49 |
50 |
51 |
52 |
53 | dfs.datanode.keytab.file
54 | /etc/hadoop/conf/hdfs.keytab
55 |
56 |
57 | dfs.datanode.kerberos.principal
58 | hdfs/hadoop-master@LABS.TERADATA.COM
59 |
60 |
61 |
62 |
63 | dfs.webhdfs.enabled
64 | true
65 |
66 |
67 |
68 |
69 | dfs.web.authentication.kerberos.principal
70 | HTTP/hadoop-master@LABS.TERADATA.COM
71 |
72 |
73 |
74 | dfs.web.authentication.kerberos.keytab
75 | /etc/hadoop/conf/HTTP.keytab
76 |
77 |
78 |
79 | ignore.secure.ports.for.testing
80 | true
81 |
82 |
83 |
84 | dfs.http.policy
85 | HTTP_ONLY
86 |
87 |
88 |
89 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 |
22 | mapreduce.jobhistory.keytab
23 | /etc/hadoop/conf/mapred.keytab
24 |
25 |
26 |
27 | mapreduce.jobhistory.principal
28 | mapred/hadoop-master@LABS.TERADATA.COM
29 |
30 |
31 |
32 |
33 | mapreduce.jobtracker.kerberos.principal
34 | mapred/hadoop-master@LABS.TERADATA.COM
35 |
36 |
37 |
38 | mapreduce.jobtracker.keytab.file
39 | /etc/hadoop/conf/mapred.keytab
40 |
41 |
42 |
43 |
44 | mapreduce.tasktracker.kerberos.principal
45 | mapred/hadoop-master@LABS.TERADATA.COM
46 |
47 |
48 |
49 | mapreduce.tasktracker.keytab.file
50 | /etc/hadoop/conf/mapred.keytab
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 |
22 |
23 | yarn.resourcemanager.keytab
24 | /etc/hadoop/conf/yarn.keytab
25 |
26 |
27 |
28 | yarn.resourcemanager.principal
29 | yarn/hadoop-master@LABS.TERADATA.COM
30 |
31 |
32 |
33 |
34 | yarn.nodemanager.keytab
35 | /etc/hadoop/conf/yarn.keytab
36 |
37 |
38 |
39 | yarn.nodemanager.principal
40 | yarn/hadoop-master@LABS.TERADATA.COM
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 |
22 | hive.server2.authentication
23 | KERBEROS
24 |
25 |
26 |
27 | hive.server2.enable.impersonation
28 | true
29 |
30 |
31 |
32 | hive.server2.authentication.kerberos.principal
33 | hive/hadoop-master@LABS.TERADATA.COM
34 |
35 |
36 |
37 | hive.server2.authentication.kerberos.keytab
38 | /etc/hive/conf/hive.keytab
39 |
40 |
41 |
42 | hive.metastore.sasl.enabled
43 | true
44 |
45 |
46 |
47 | hive.metastore.kerberos.keytab.file
48 | /etc/hive/conf/hive.keytab
49 |
50 |
51 |
52 | hive.metastore.kerberos.principal
53 | hive/hadoop-master@LABS.TERADATA.COM
54 |
55 |
56 |
57 |
58 | hive.security.authorization.manager
59 | org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory
60 |
61 |
62 |
63 | hive.security.authorization.task.factory
64 | org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/var/kerberos/krb5kdc/kadm5-other.acl:
--------------------------------------------------------------------------------
1 | */admin@OTHERLABS.TERADATA.COM *
2 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl:
--------------------------------------------------------------------------------
1 | */admin@LABS.TERADATA.COM *
2 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf:
--------------------------------------------------------------------------------
1 | [kdcdefaults]
2 | kdc_ports = 88
3 | kdc_tcp_ports = 88
4 |
5 | [realms]
6 | LABS.TERADATA.COM = {
7 | acl_file = /var/kerberos/krb5kdc/kadm5.acl
8 | dict_file = /usr/share/dict/words
9 | admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
10 | supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
11 | }
12 |
13 | OTHERLABS.TERADATA.COM = {
14 | acl_file = /var/kerberos/krb5kdc/kadm5-other.acl
15 | dict_file = /usr/share/dict/words
16 | admin_keytab = /var/kerberos/krb5kdc/kadm5-other.keytab
17 | supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
18 | kdc_listen = 89
19 | kdc_tcp_listen = 89
20 | kdc_ports = 89
21 | kdc_tcp_ports = 89
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | # Cloudera removed an access to HDP repositories in April 2021.
14 | # In order to build new image version we use last released version as base.
15 | # Previous Dockerfile version is archived in archived/hdp3.1-hive.
16 | FROM ghcr.io/trinodb/testing/hdp3.1-hive:38
17 |
18 | COPY ./files /
19 |
20 | # replace mirrorlist.centos.org and mirror.centos.org with vault.centos.org
21 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* && \
22 | sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
23 |
24 | RUN \
25 | set -xeu && \
26 | # Remove unaccessible HDP3 repos so yum is still usable
27 | rm /etc/yum.repos.d/hdp*.repo && \
28 | # Install Zulu JDK 17.0.4
29 | rpm -i https://cdn.azul.com/zulu/bin/zulu17.36.13-ca-jdk17.0.4-linux.x86_64.rpm && \
30 | # Set JDK 17 as a default one
31 | alternatives --set java /usr/lib/jvm/zulu-17/bin/java && \
32 | alternatives --set javac /usr/lib/jvm/zulu-17/bin/javac && \
33 | echo "Done"
34 |
35 | # HDFS ports
36 | EXPOSE 1004 1006 8020 9866 9867 9870 9864 50470
37 |
38 | # YARN ports
39 | EXPOSE 8030 8031 8032 8033 8040 8041 8042 8088 10020 19888
40 |
41 | # HIVE ports
42 | EXPOSE 9083 10000
43 |
44 | # SOCKS port
45 | EXPOSE 1180
46 |
47 | CMD supervisord -c /etc/supervisord.conf
48 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hadoop/conf/capacity-scheduler.xml:
--------------------------------------------------------------------------------
1 |
2 |
15 |
16 |
17 |
18 | yarn.scheduler.capacity.maximum-applications
19 | 10000
20 |
21 | Maximum number of applications that can be pending and running.
22 |
23 |
24 |
25 |
26 | yarn.scheduler.capacity.maximum-am-resource-percent
27 | 1
28 |
29 | Maximum percent of resources in the cluster which can be used to run
30 | application masters i.e. controls number of concurrent running
31 | applications.
32 |
33 |
34 |
35 |
36 | yarn.scheduler.capacity.root.queues
37 | default
38 |
39 | The queues at the this level (root is the root queue).
40 |
41 |
42 |
43 |
44 | yarn.scheduler.capacity.root.default.capacity
45 | 100
46 | Default queue target capacity.
47 |
48 |
49 |
50 | yarn.scheduler.capacity.root.default.maximum-capacity
51 | 100
52 |
53 | The maximum capacity of the default queue.
54 |
55 |
56 |
57 |
58 | yarn.scheduler.capacity.root.default.state
59 | RUNNING
60 |
61 | The state of the default queue. State can be one of RUNNING or STOPPED.
62 |
63 |
64 |
65 |
66 | yarn.scheduler.capacity.root.default.acl_submit_applications
67 | *
68 |
69 | The ACL of who can submit jobs to the default queue.
70 |
71 |
72 |
73 |
74 | yarn.scheduler.capacity.root.default.user-limit-factor
75 | 1
76 |
77 | Default queue user limit a percentage from 0.0 to 1.0.
78 |
79 |
80 |
81 |
82 | yarn.scheduler.capacity.root.default.acl_administer_queue
83 | *
84 |
85 | The ACL of who can administer jobs on the default queue.
86 |
87 |
88 |
89 |
90 | yarn.scheduler.capacity.node-locality-delay
91 | -1
92 |
93 | Number of missed scheduling opportunities after which the CapacityScheduler
94 | attempts to schedule rack-local containers.
95 | Typically this should be set to number of racks in the cluster, this
96 | feature is disabled by default, set to -1.
97 |
98 |
99 |
100 |
101 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hadoop/conf/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 | fs.defaultFS
22 | hdfs://hadoop-master:9000
23 |
24 |
25 |
26 |
27 | hadoop.proxyuser.oozie.hosts
28 | *
29 |
30 |
31 | hadoop.proxyuser.oozie.groups
32 | *
33 |
34 |
35 |
36 |
37 | hadoop.proxyuser.httpfs.hosts
38 | *
39 |
40 |
41 | hadoop.proxyuser.httpfs.groups
42 | *
43 |
44 |
45 |
46 |
47 | hadoop.proxyuser.llama.hosts
48 | *
49 |
50 |
51 | hadoop.proxyuser.llama.groups
52 | *
53 |
54 |
55 |
56 |
57 | hadoop.proxyuser.hue.hosts
58 | *
59 |
60 |
61 | hadoop.proxyuser.hue.groups
62 | *
63 |
64 |
65 |
66 |
67 | hadoop.proxyuser.mapred.hosts
68 | *
69 |
70 |
71 | hadoop.proxyuser.mapred.groups
72 | *
73 |
74 |
75 |
76 |
77 | hadoop.proxyuser.hive.hosts
78 | *
79 |
80 |
81 |
82 | hadoop.proxyuser.hive.groups
83 | *
84 |
85 |
86 |
87 |
88 | hadoop.proxyuser.hdfs.groups
89 | *
90 |
91 |
92 |
93 | hadoop.proxyuser.hdfs.hosts
94 | *
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hadoop/conf/hadoop-env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # Set Hadoop-specific environment variables here.
17 | # Forcing YARN-based mapreduce implementaion.
18 | # Make sure to comment out if you want to go back to the default or
19 | # if you want this to be tweakable on a per-user basis
20 | # export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
21 |
22 | # The maximum amount of heap to use, in MB. Default is 1000.
23 | export HADOOP_HEAPSIZE=256
24 |
25 | # Extra Java runtime options. Empty by default.
26 | export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
27 | export YARN_OPTS="$YARN_OPTS -Xmx256m"
28 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hadoop/conf/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
16 |
17 |
18 |
19 | dfs.namenode.name.dir
20 | /var/lib/hadoop-hdfs/cache/name/
21 |
22 |
23 |
24 | dfs.datanode.data.dir
25 | /var/lib/hadoop-hdfs/cache/data/
26 |
27 |
28 |
29 | fs.viewfs.mounttable.hadoop-viewfs.link./default
30 | hdfs://hadoop-master:9000/user/hive/warehouse
31 |
32 |
33 |
34 |
35 | dfs.safemode.threshold.pct
36 | 0
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hadoop/conf/log4j.properties:
--------------------------------------------------------------------------------
1 | log4j.rootLogger=INFO,CONSOLE
2 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
3 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
4 | log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
5 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hadoop/conf/mapred-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 | mapred.job.tracker
22 | hadoop-master:8021
23 |
24 |
25 |
26 | mapreduce.framework.name
27 | yarn
28 |
29 |
30 |
31 | mapreduce.jobhistory.address
32 | hadoop-master:10020
33 |
34 |
35 |
36 | mapreduce.jobhistory.webapp.address
37 | hadoop-master:19888
38 |
39 |
40 |
41 | To set the value of tmp directory for map and reduce tasks.
42 | mapreduce.task.tmp.dir
43 | /var/lib/hadoop-mapreduce/cache/${user.name}/tasks
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hive/conf/beeline-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | beeline.hs2.jdbc.url.tcpUrl
5 | jdbc:hive2://localhost:10000/default;user=hdfs;password=hive
6 |
7 |
8 |
9 | beeline.hs2.jdbc.url.httpUrl
10 | jdbc:hive2://localhost:10000/default;user=hdfs;password=hive;transportMode=http;httpPath=cliservice
11 |
12 |
13 |
14 | beeline.hs2.jdbc.url.default
15 | tcpUrl
16 |
17 |
18 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hive/conf/hive-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
18 |
19 |
20 |
21 | javax.jdo.option.ConnectionURL
22 | jdbc:mysql://localhost/metastore
23 | the URL of the MySQL database
24 |
25 |
26 |
27 | javax.jdo.option.ConnectionDriverName
28 | com.mysql.jdbc.Driver
29 |
30 |
31 |
32 | javax.jdo.option.ConnectionUserName
33 | root
34 |
35 |
36 |
37 | javax.jdo.option.ConnectionPassword
38 | root
39 |
40 |
41 |
42 | datanucleus.autoCreateSchema
43 | false
44 |
45 |
46 |
47 | datanucleus.fixedDatastore
48 | true
49 |
50 |
51 |
52 | datanucleus.autoStartMechanism
53 | SchemaTable
54 |
55 |
56 |
57 | hive.security.authorization.createtable.owner.grants
58 | ALL
59 | The set of privileges automatically granted to the owner whenever a table gets created.
60 |
61 |
62 |
63 | hive.users.in.admin.role
64 | hdfs,hive
65 |
66 |
67 |
68 |
69 | metastore.storage.schema.reader.impl
70 | org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader
71 |
72 |
73 |
74 | hive.support.concurrency
75 | true
76 |
77 |
78 |
79 | hive.compactor.initiator.on
80 | true
81 |
82 |
83 |
84 | hive.compactor.worker.threads
85 | 1
86 |
87 |
88 |
89 | hive.txn.manager
90 | org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
91 |
92 |
93 |
94 | hive.metastore.disallow.incompatible.col.type.changes
95 | false
96 |
97 |
98 |
99 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/hive/conf/hiveserver2-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | hive.metastore.uris
7 | thrift://localhost:9083
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = /var/log/supervisord.log
3 | logfile_maxbytes = 50MB
4 | logfile_backups=10
5 | loglevel = info
6 | pidfile = /var/run/supervisord.pid
7 | nodaemon = true
8 | directory = /tmp
9 | strip_ansi = false
10 |
11 | [unix_http_server]
12 | file = /tmp/supervisor.sock
13 |
14 | [rpcinterface:supervisor]
15 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
16 |
17 | [supervisorctl]
18 | serverurl = unix:///tmp/supervisor.sock
19 |
20 | [include]
21 | files = /etc/supervisord.d/*.conf
22 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/hdfs-datanode.conf:
--------------------------------------------------------------------------------
1 | [program:hdfs-datanode]
2 | command=hdfs datanode
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=hdfs
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/hdfs-namenode.conf:
--------------------------------------------------------------------------------
1 | [program:hdfs-namenode]
2 | command=hdfs namenode
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=hdfs
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/hive-metastore.conf:
--------------------------------------------------------------------------------
1 | [program:hive-metastore]
2 | # Add `--debug:port=5006` for debugging
3 | command=hive --service metastore
4 | startsecs=2
5 | stopwaitsecs=10
6 | user=hive
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/hive/hive-metastore.log
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/hive-server2.conf:
--------------------------------------------------------------------------------
1 | [program:hive-server2]
2 | command=hive --service hiveserver2
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=hive
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hive/hive-server2.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/mysql-metastore.conf:
--------------------------------------------------------------------------------
1 | [program:mysql-metastore]
2 | command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=mysql
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/mysql/mysql.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/socks-proxy.conf:
--------------------------------------------------------------------------------
1 | [program:socks-proxy]
2 | command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
3 | startsecs=2
4 | stopwaitsecs=10
5 | startretries=30
6 | user=root
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/socks-proxy
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/sshd.conf:
--------------------------------------------------------------------------------
1 | [program:sshd]
2 | command=/usr/sbin/sshd -D -e
3 | startsecs=2
4 | stopwaitsecs=10
5 | startretries=30
6 | user=root
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/sshd
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/yarn-nodemanager.conf:
--------------------------------------------------------------------------------
1 | [program:yarn-nodemanager]
2 | command=yarn nodemanager
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=yarn
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-nodemanager.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/supervisord.d/yarn-resourcemanager.conf:
--------------------------------------------------------------------------------
1 | [program:yarn-resourcemanager]
2 | command=yarn resourcemanager
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=yarn
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-resourcemanager.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/etc/tez/conf/tez-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
14 |
15 |
16 |
17 | tez.lib.uris.ignore
18 | false
19 |
20 |
21 | tez.lib.uris
22 | file:///usr/hdp/current/tez-client/lib/tez.tar.gz
23 |
24 |
25 | tez.am.mode.session
26 | false
27 |
28 |
29 | tez.am.acl.enabled
30 | false
31 |
32 |
33 | tez.am.log.level
34 | WARN
35 |
36 |
37 | tez.task.log.level
38 | WARN
39 |
40 |
41 | tez.runtime.io.sort.mb
42 | 8
43 |
44 |
45 | tez.am.max.app.attempts
46 | 1
47 |
48 |
49 | tez.am.task.max.failed.attempts
50 | 1
51 |
52 |
53 | tez.shuffle-vertex-manager.min-src-fraction
54 | 0.10
55 |
56 |
57 | tez.shuffle-vertex-manager.max-src-fraction
58 | 1.00
59 |
60 |
61 | tez.am.launch.cmd-opts
62 | -server -Djava.net.preferIPv4Stack=true -XX:+UseParallelGC -Dhadoop.metrics.log.level=WARN
63 |
64 |
65 | tez.am.resource.memory.mb
66 | 128
67 |
68 |
69 | tez.task.launch.cmd-opts
70 | -server -Djava.net.preferIPv4Stack=true -XX:+UseParallelGC -Dhadoop.metrics.log.level=WARN
71 |
72 |
73 | tez.task.resource.memory.mb
74 | 128
75 |
76 |
77 | tez.task.resource.cpu.vcores
78 | 1
79 |
80 |
81 | tez.runtime.sort.threads
82 | 1
83 |
84 |
85 | tez.runtime.io.sort.factor
86 | 100
87 |
88 |
89 | tez.runtime.shuffle.memory-to-memory.enable
90 | false
91 |
92 |
93 | tez.runtime.optimize.local.fetch
94 | true
95 |
96 |
97 | hive.tez.container.size
98 | 1024
99 |
100 |
101 |
--------------------------------------------------------------------------------
/testing/hdp3.1-hive/files/root/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -ex
2 |
3 | # format namenode
4 | chown hdfs:hdfs /var/lib/hadoop-hdfs/cache/
5 |
6 | mkdir /usr/hdp/current/hadoop-client/logs /var/log/hadoop-hdfs /var/log/hadoop-yarn
7 | chgrp -R hadoop /usr/hdp/current/hadoop-client/logs /var/log/hadoop-hdfs /var/log/hadoop-yarn
8 | chmod -R 770 /usr/hdp/current/hadoop-client/logs /var/log/hadoop-hdfs /var/log/hadoop-yarn
9 |
10 | # workaround for 'could not open session' bug as suggested here:
11 | # https://github.com/docker/docker/issues/7056#issuecomment-49371610
12 | rm -f /etc/security/limits.d/hdfs.conf
13 | su -c "echo 'N' | hdfs namenode -format" hdfs
14 |
15 | # start hdfs
16 | su -c "hdfs namenode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-namenode.log" hdfs&
17 |
18 | # wait for process starting
19 | sleep 15
20 |
21 | # init basic hdfs directories
22 | /usr/hdp/current/hadoop-client/libexec/init-hdfs.sh
23 |
24 | # 4.1 Create an hdfs home directory for the yarn user. For some reason, init-hdfs doesn't do so.
25 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/yarn && /usr/bin/hadoop fs -chown yarn:yarn /user/yarn'
26 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn'
27 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /tmp/hadoop-yarn/staging && /usr/bin/hadoop fs -chown mapred:mapred /tmp/hadoop-yarn/staging && /usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging'
28 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /tmp/hadoop-yarn/staging/history && /usr/bin/hadoop fs -chown mapred:mapred /tmp/hadoop-yarn/staging/history && /usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging/history'
29 |
30 | # init hive directories
31 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/hive/warehouse'
32 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod 1777 /user/hive/warehouse'
33 | su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chown hive /user/hive/warehouse'
34 |
35 | # stop hdfs
36 | killall java
37 |
38 | # setup metastore
39 | ln -s /usr/bin/resolveip /usr/libexec # mariadb-server installs resolveip in /usr/bin but mysql_install_db expects it in /usr/libexec
40 | mysql_install_db
41 |
42 | chown -R mysql:mysql /var/lib/mysql
43 |
44 | /usr/bin/mysqld_safe &
45 | sleep 10s
46 |
47 | echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
48 | echo "CREATE DATABASE metastore;" | mysql
49 | /usr/bin/mysqladmin -u root password 'root'
50 | /usr/hdp/current/hive-client/bin/schematool -dbType mysql -initSchema
51 |
52 | killall mysqld
53 | sleep 10s
54 | mkdir /var/log/mysql/
55 | chown -R mysql:mysql /var/log/mysql/
56 |
57 | # Create `information_schema` and `sys` schemas in Hive
58 | supervisord -c /etc/supervisord.conf &
59 | while ! beeline -n hive -e "SELECT 1"; do
60 | echo "Waiting for HiveServer2 ..."
61 | sleep 10s
62 | done
63 | /usr/hdp/current/hive-client/bin/schematool -userName hive -metaDbType mysql -dbType hive -initSchema \
64 | -url jdbc:hive2://localhost:10000/default -driver org.apache.hive.jdbc.HiveDriver
65 | supervisorctl stop all
66 |
67 | # Additional libs
68 | cp -av /usr/hdp/current/hadoop-client/lib/native/Linux-amd64-64/* /usr/lib64/
69 | mkdir -v /usr/hdp/current/hive-client/auxlib || test -d /usr/hdp/current/hive-client/auxlib
70 | ln -vs /usr/hdp/current/hadoop-client/lib/hadoop-lzo-*.jar /usr/hdp/current/hive-client/auxlib
71 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | ARG ARCH
14 | FROM testing/almalinux9-oj17:unlabelled$ARCH
15 |
16 | ARG JAVA8_ZULU_VERSION=8.64.0.19-ca-jdk8.0.345
17 |
18 | RUN yum install -y \
19 | mariadb-server \
20 | openssh \
21 | openssh-clients \
22 | openssh-server \
23 | psmisc \
24 | passwd \
25 | which && \
26 | # Install Zulu JDK
27 | echo "Downloading zulu${JAVA8_ZULU_VERSION}-linux.$(uname -m).rpm..." && \
28 | curl -fLsS -o /tmp/jdk8.rpm --url https://cdn.azul.com/zulu$(test "$(uname -m)" != "aarch64" || echo "-embedded")/bin/zulu${JAVA8_ZULU_VERSION}-linux.$(uname -m).rpm && \
29 | yum -y localinstall /tmp/jdk8.rpm && \
30 | rm /tmp/jdk8.rpm && \
31 | # Set JDK 8 as a default one
32 | alternatives --set java /usr/lib/jvm/zulu-8/jre/bin/java && \
33 | alternatives --set javac /usr/lib/jvm/zulu-8/bin/javac \
34 | && yum -q clean all && rm -rf /var/cache/yum
35 |
36 | # Override JAVA_HOME inherited from testing/almalinux9-oj17
37 | ENV JAVA_HOME=/usr/lib/jvm/zulu-8
38 |
39 | ARG HADOOP_VERSION=3.1.2
40 | ARG HIVE_VERSION=3.1.3
41 |
42 | # TODO Apache Archive is rate limited -- these should probably go in S3
43 | ARG HADOOP_BINARY_PATH=https://archive.apache.org/dist/hadoop/common/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
44 | ARG HIVE_BINARY_PATH=https://archive.apache.org/dist/hive/hive-$HIVE_VERSION/apache-hive-$HIVE_VERSION-bin.tar.gz
45 |
46 | RUN curl -fLsS -o /tmp/hadoop.tar.gz --url $HADOOP_BINARY_PATH && \
47 | tar xzf /tmp/hadoop.tar.gz --directory /opt && mv /opt/hadoop-$HADOOP_VERSION /opt/hadoop
48 |
49 | RUN curl -fLsS -o /tmp/hive.tar.gz --url $HIVE_BINARY_PATH && \
50 | tar xzf /tmp/hive.tar.gz --directory /opt && mv /opt/apache-hive-${HIVE_VERSION}-bin /opt/hive
51 |
52 | ARG MYSQL_CONNECTOR_VERSION=8.0.13
53 | ARG AWS_SDK_VERSION=1.11.906
54 | RUN mkdir /opt/hive/auxlib && \
55 | curl -fLsS -o /opt/hive/auxlib/mysql-connector-java-$MYSQL_CONNECTOR_VERSION.jar https://repo1.maven.org/maven2/mysql/mysql-connector-java/$MYSQL_CONNECTOR_VERSION/mysql-connector-java-$MYSQL_CONNECTOR_VERSION.jar && \
56 | curl -fLsS -o /opt/hive/auxlib/aws-java-sdk-core-$AWS_SDK_VERSION.jar https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-core/$AWS_SDK_VERSION/aws-java-sdk-core-$AWS_SDK_VERSION.jar && \
57 | curl -fLsS -o /opt/hive/auxlib/aws-java-sdk-s3-$AWS_SDK_VERSION.jar https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-s3/$AWS_SDK_VERSION/aws-java-sdk-s3-$AWS_SDK_VERSION.jar
58 |
59 | ENV HADOOP_HOME=/opt/hadoop
60 | ENV HIVE_HOME=/opt/hive
61 | ENV HADOOP_CLASSPATH=${HADOOP_HOME}/share/hadoop/tools/lib/*
62 | ENV PATH=${HIVE_HOME}/bin:${HADOOP_HOME}/bin:${PATH}
63 |
64 | RUN ssh-keygen -t rsa -b 4096 -C "automation@trino.io" -N "" -f /root/.ssh/id_rsa && \
65 | ssh-keygen -t rsa -b 4096 -N "" -f /etc/ssh/ssh_host_rsa_key && \
66 | cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
67 | RUN chmod 755 /root && chmod 700 /root/.ssh
68 | RUN passwd --unlock root
69 |
70 | # Copy configuration files
71 | COPY ./files /
72 |
73 | # Run setup script
74 | RUN /root/setup.sh
75 |
76 | # HDFS port
77 | EXPOSE 9000 9870
78 |
79 | # HIVE Metastore port
80 | EXPOSE 9083 10000
81 |
82 | CMD /root/entrypoint.sh
83 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/hadoop-init.d/init-hdfs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x
2 |
3 | echo 'N' | hdfs namenode -format
4 | sed -i -e "s|hdfs://localhost|hdfs://$(hostname)|g" /opt/hadoop/etc/hadoop/core-site.xml
5 | hdfs namenode &
6 | sleep 10 && hdfs dfs -mkdir -p /user/hive/warehouse && killall java
7 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/hadoop-init.d/set-aws-creds.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x
2 |
3 | if [[ -n "${AWS_ACCESS_KEY_ID}" ]]
4 | then
5 | sed -i -e "s|\"Use AWS_ACCESS_KEY_ID .*\"|${AWS_ACCESS_KEY_ID}|g" \
6 | -e "s|\"Use AWS_SECRET_ACCESS_KEY .*\"|${AWS_SECRET_ACCESS_KEY}|g" \
7 | /opt/hive/conf/hive-site.xml
8 | fi
9 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = /var/log/supervisord.log
3 | logfile_maxbytes = 50MB
4 | logfile_backups=10
5 | loglevel = info
6 | pidfile = /var/run/supervisord.pid
7 | nodaemon = true
8 | directory = /tmp
9 | strip_ansi = false
10 |
11 | [unix_http_server]
12 | file = /tmp/supervisor.sock
13 |
14 | [rpcinterface:supervisor]
15 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
16 |
17 | [supervisorctl]
18 | serverurl = unix:///tmp/supervisor.sock
19 |
20 | [include]
21 | files = /etc/supervisord.d/*.conf
22 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/supervisord.d/hdfs-datanode.conf:
--------------------------------------------------------------------------------
1 | [program:hdfs-datanode]
2 | command=hdfs datanode
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/supervisord.d/hdfs-namenode.conf:
--------------------------------------------------------------------------------
1 | [program:hdfs-namenode]
2 | command=hdfs namenode
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/supervisord.d/hive-metastore.conf:
--------------------------------------------------------------------------------
1 | [program:hive-metastore]
2 | # Add `--debug:port=5006` for debugging
3 | command=hive --service metastore
4 | startsecs=2
5 | stopwaitsecs=10
6 | user=root
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/hive/hive-metastore.log
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/supervisord.d/hive-server2.conf:
--------------------------------------------------------------------------------
1 | [program:hive-server2]
2 | command=hive --service hiveserver2
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=root
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/hive/hive-server2.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/supervisord.d/mysql-metastore.conf:
--------------------------------------------------------------------------------
1 | [program:mysql-metastore]
2 | command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
3 | startsecs=2
4 | stopwaitsecs=10
5 | user=mysql
6 | redirect_stderr=true
7 | stdout_logfile=/var/log/mysql/mysql.log
8 | autostart=true
9 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/supervisord.d/socks-proxy.conf:
--------------------------------------------------------------------------------
1 | [program:socks-proxy]
2 | command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
3 | startsecs=2
4 | stopwaitsecs=10
5 | startretries=30
6 | user=root
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/socks-proxy
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/etc/supervisord.d/sshd.conf:
--------------------------------------------------------------------------------
1 | [program:sshd]
2 | command=/usr/sbin/sshd -D
3 | startsecs=2
4 | stopwaitsecs=10
5 | startretries=30
6 | user=root
7 | redirect_stderr=true
8 | stdout_logfile=/var/log/sshd
9 | autostart=true
10 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/opt/hadoop/etc/hadoop/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | fs.defaultFS
8 | hdfs://localhost:9000
9 |
10 |
11 |
12 | hadoop.proxyuser.root.hosts
13 | *
14 |
15 |
16 |
17 | hadoop.proxyuser.root.groups
18 | *
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/opt/hadoop/etc/hadoop/hadoop-env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | # Set Hadoop-specific environment variables here.
17 | # Forcing YARN-based mapreduce implementaion.
18 | # Make sure to comment out if you want to go back to the default or
19 | # if you want this to be tweakable on a per-user basis
20 | # export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
21 |
22 | # The maximum amount of heap to use, in MB. Default is 1000.
23 | export HADOOP_HEAPSIZE=256
24 |
25 | # Copied from hdp3.1
26 | # Extra Java runtime options. Empty by default.
27 | export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
28 | export YARN_OPTS="$YARN_OPTS -Xmx256m"
29 |
30 | # Necessary to prevent map reduce jobs triggered by hive queries from dying with OOM error
31 | export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Xmx512m"
32 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/opt/hadoop/etc/hadoop/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 |
22 | dfs.permissions.enabled
23 | false
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/opt/hive/conf/hive-env.sh:
--------------------------------------------------------------------------------
1 | export HADOOP_CLIENT_OPTS="${HADOOP_CLIENT_OPTS} -Xmx256m -Djava.io.tmpdir=/tmp"
2 | export HADOOP_HEAPSIZE=256
3 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/opt/hive/conf/hive-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | hive.metastore.uris
7 | thrift://localhost:9083
8 |
9 |
10 |
11 | javax.jdo.option.ConnectionURL
12 | jdbc:mysql://localhost:3306/metastore?useSSL=false
13 |
14 |
15 |
16 | javax.jdo.option.ConnectionDriverName
17 | com.mysql.cj.jdbc.Driver
18 |
19 |
20 |
21 | javax.jdo.option.ConnectionUserName
22 | root
23 |
24 |
25 |
26 | javax.jdo.option.ConnectionPassword
27 | root
28 |
29 |
30 |
31 | hive.metastore.connect.retries
32 | 15
33 |
34 |
35 |
36 |
37 | metastore.storage.schema.reader.impl
38 | org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader
39 |
40 |
41 |
42 | hive.support.concurrency
43 | true
44 |
45 |
46 |
47 | hive.txn.manager
48 | org.apache.hadoop.hive.ql.lockmgr.DbTxnManager
49 |
50 |
51 |
52 | hive.compactor.initiator.on
53 | true
54 |
55 |
56 |
57 | hive.compactor.worker.threads
58 | 1
59 |
60 |
61 |
62 | fs.s3.awsAccessKeyId
63 | "Use AWS_ACCESS_KEY_ID environment variable to set this value"
64 |
65 |
66 |
67 | fs.s3.awsSecretAccessKey
68 | "Use AWS_SECRET_ACCESS_KEY environment variable to set this value"
69 |
70 |
71 |
72 | fs.s3a.access.key
73 | "Use AWS_ACCESS_KEY_ID environment variable to set this value"
74 |
75 |
76 |
77 | fs.s3a.secret.key
78 | "Use AWS_SECRET_ACCESS_KEY environment variable to set this value"
79 |
80 |
81 |
82 | hive.metastore.disallow.incompatible.col.type.changes
83 | false
84 |
85 |
86 |
87 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/root/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -x
2 |
3 | for init_script in /etc/hadoop-init.d/*; do
4 | "${init_script}"
5 | done
6 |
7 | supervisord -c /etc/supervisord.conf
8 |
--------------------------------------------------------------------------------
/testing/hive3.1-hive/files/root/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -ex
2 |
3 | ln -s /usr/bin/re solveip /usr/libexec # mariadb-server installs resolveip in /usr/bin but mysql_install_db expects it in /usr/libexec
4 | mkdir /var/log/mysql /var/log/hive /var/log/hadoop-hdfs
5 |
6 | mysql_install_db
7 |
8 | chown -R mysql:mysql /var/lib/mysql
9 | chown -R mysql:mysql /var/log/mariadb
10 |
11 | /usr/bin/mysqld_safe &
12 | sleep 10s
13 |
14 | echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
15 | echo "CREATE DATABASE metastore;" | mysql
16 | /usr/bin/mysqladmin -u root password 'root'
17 | /opt/hive/bin/schematool -dbType mysql -initSchema
18 |
19 | sleep 10s
20 | chown -R mysql:mysql /var/log/mysql/
21 | rm -rf /tmp/* /var/tmp/*
22 |
--------------------------------------------------------------------------------
/testing/hive4.0-hive/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | FROM apache/hive:4.0.0
14 |
15 | # TODO replace with aws sdk v2 by following https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/aws_sdk_upgrade.html
16 | ARG AWS_JAVA_SDK_BUNDLE_VERSION=1.12.367
17 | ARG HADOOP_AWS_VERSION=3.3.6
18 |
19 | USER root
20 | RUN apt-get -y update
21 | RUN apt install curl -y
22 |
23 | # Install AWS SDK so we can access S3; the version must match the hadoop-aws* jars which are part of SPARK distribution
24 | RUN mkdir -p /opt/hive/auxlib && \
25 | curl -fLsS -o /opt/hive/auxlib/aws-java-sdk-bundle-$AWS_JAVA_SDK_BUNDLE_VERSION.jar https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-bundle/$AWS_JAVA_SDK_BUNDLE_VERSION/aws-java-sdk-bundle-$AWS_JAVA_SDK_BUNDLE_VERSION.jar && \
26 | curl -fLsS -o /opt/hive/auxlib/hadoop-aws-$HADOOP_AWS_VERSION.jar https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/$HADOOP_AWS_VERSION/hadoop-aws-$HADOOP_AWS_VERSION.jar
27 |
--------------------------------------------------------------------------------
/testing/kerberos/Dockerfile:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 | # See the License for the specific language governing permissions and
11 | # limitations under the License.
12 |
13 | ARG ARCH
14 | FROM testing/almalinux9-oj17:unlabelled$ARCH
15 |
16 | ARG ADDPRINC_ARGS="-maxrenewlife \"10 days\" +allow_renewable"
17 |
18 | # INSTALL KERBEROS
19 | RUN yum install -y krb5-libs krb5-server krb5-workstation \
20 | && yum -y clean all && rm -rf /tmp/* /var/tmp/*
21 |
22 | # COPY CONFIGURATION
23 | COPY ./files /
24 |
25 | # CREATE KERBEROS DATABASE
26 | RUN /usr/sbin/kdb5_util create -s -P password
27 |
28 | # CREATE ANOTHER KERBEROS DATABASE
29 | RUN /usr/sbin/kdb5_util create -d /var/kerberos/krb5kdc/principal-other -r OTHER.STARBURSTDATA.COM -s -P password
30 |
31 | # MAKE '.STARBURSTDATA.COM' TRUST 'OTHER.STARBURSTDATA.COM'
32 | RUN /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -pw 123456 krbtgt/.STARBURSTDATA.COM@OTHER.STARBURSTDATA.COM"
33 | RUN /usr/sbin/kadmin.local -r OTHER.STARBURSTDATA.COM -d /var/kerberos/krb5kdc/principal-other -q "addprinc ${ADDPRINC_ARGS} -pw 123456 krbtgt/STARBURSTDATA.COM"
34 |
35 | # EXPOSE KERBEROS PORTS
36 | EXPOSE 88
37 | EXPOSE 89
38 | EXPOSE 749
39 |
40 | CMD ["supervisord", "-c", "/etc/supervisord.conf"]
41 | ENTRYPOINT ["/opt/entrypoint.sh"]
42 |
--------------------------------------------------------------------------------
/testing/kerberos/files/etc/krb5.conf:
--------------------------------------------------------------------------------
1 | [logging]
2 | default = FILE:/var/log/krb5libs.log
3 | kdc = FILE:/var/log/krb5kdc.log
4 | admin_server = FILE:/var/log/kadmind.log
5 |
6 | [libdefaults]
7 | default_realm = STARBURSTDATA.COM
8 | dns_lookup_realm = false
9 | dns_lookup_kdc = false
10 | forwardable = true
11 | allow_weak_crypto = true
12 |
13 | [realms]
14 | STARBURSTDATA.COM = {
15 | kdc = kerberos:88
16 | admin_server = kerberos
17 | }
18 | OTHER.STARBURSTDATA.COM = {
19 | kdc = kerberos:89
20 | admin_server = kerberos
21 | }
22 |
--------------------------------------------------------------------------------
/testing/kerberos/files/etc/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | logfile = /var/log/supervisord.log
3 | logfile_maxbytes = 50MB
4 | logfile_backups=10
5 | loglevel = info
6 | pidfile = /var/run/supervisord.pid
7 | nodaemon = true
8 | directory = /tmp
9 | strip_ansi = false
10 |
11 | [unix_http_server]
12 | file = /tmp/supervisor.sock
13 |
14 | [rpcinterface:supervisor]
15 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
16 |
17 | [supervisorctl]
18 | serverurl = unix:///tmp/supervisor.sock
19 |
20 | [include]
21 | files = /etc/supervisord.d/*.conf
22 |
--------------------------------------------------------------------------------
/testing/kerberos/files/etc/supervisord.d/kdc.conf:
--------------------------------------------------------------------------------
1 | [program:krb5kdc]
2 | command=/bin/bash -c "exec /usr/sbin/krb5kdc -P /var/run/krb5kdc.pid -n -r STARBURSTDATA.COM -n -d /var/kerberos/krb5kdc/principal-other -r OTHER.STARBURSTDATA.COM"
3 | autostart=true
4 | autorestart=true
5 | redirect_stderr=true
6 | stdout_logfile=/dev/stdout
7 | stdout_logfile_maxbytes=0
8 |
9 | [program:kadmind]
10 | command=/bin/bash -c "exec /usr/sbin/kadmind -P /var/run/kadmind.pid -nofork -r STARBURSTDATA.COM"
11 | autostart=true
12 | autorestart=true
13 | redirect_stderr=true
14 | stdout_logfile=/dev/stdout
15 | stdout_logfile_maxbytes=0
16 |
--------------------------------------------------------------------------------
/testing/kerberos/files/opt/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -exuo pipefail
4 |
5 | "$@" &
6 |
7 | if [[ -v TRINODEV_POST_BOOTSTRAP_COMMAND ]]; then
8 | $TRINODEV_POST_BOOTSTRAP_COMMAND
9 | fi
10 |
11 | if test -d /docker/kerberos-init.d; then
12 | for init_script in /docker/kerberos-init.d/*; do
13 | "${init_script}"
14 | done
15 | fi
16 |
17 | wait
18 |
--------------------------------------------------------------------------------
/testing/kerberos/files/usr/local/bin/create_principal:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | function usage() {
6 | if [ $# -ne 2 ]; then
7 | echo "Usage: $0 [-o] -p -k