├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
├── docker-images
│ ├── Dockerfile
│ ├── base-images
│ │ ├── amazonlinux
│ │ │ └── Dockerfile
│ │ ├── debian-ubuntu
│ │ │ └── Dockerfile
│ │ ├── fedora
│ │ │ └── Dockerfile
│ │ └── ubi8
│ │ │ └── Dockerfile
│ ├── bin-images
│ │ ├── amazonlinux
│ │ │ └── Dockerfile
│ │ ├── debian-ubuntu
│ │ │ └── Dockerfile
│ │ └── ubi8
│ │ │ └── Dockerfile
│ └── oss-compliance
│ │ ├── build-from-source-packages
│ │ └── build-from-source-package-licenses.txt
│ │ ├── generate-oss-compliance.sh
│ │ ├── linux-packages
│ │ ├── dpkg-packages.sh
│ │ └── yum-packages.sh
│ │ └── test
│ │ └── test-oss-compliance.sh
├── pull_request_template.md
└── workflows
│ ├── base-images.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── notification.yml
│ └── release.yml
├── .gitignore
├── .travis.yml
├── CHANGELOG.md
├── CMakeLists.txt
├── CMakeLists.txt.versioning
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── NOTICE
├── README.md
├── THIRD_PARTY_LICENSES
├── V1WebSocketProtocolGuide.md
├── V2WebSocketProtocolGuide.md
├── V3WebSocketProtocolGuide.md
├── docker-build.sh
├── docker-run.sh
├── example
└── crosscompile
│ └── raspberry_pi_3_b_plus.cmake.tc
├── resources
└── Message.proto
├── src
├── LocalproxyConfig.h
├── ProxySettings.cpp
├── ProxySettings.h
├── TcpAdapterProxy.cpp
├── TcpAdapterProxy.h
├── TcpClient.h
├── TcpConnection.h
├── TcpServer.h
├── Url.cpp
├── Url.h
├── Version.h.in
├── WebProxyAdapter.cpp
├── WebProxyAdapter.h
├── WebSocketStream.cpp
├── WebSocketStream.h
├── config
│ ├── ConfigFile.cpp
│ └── ConfigFile.h
└── main.cpp
├── test
├── AdapterTests.cpp
├── TestHttpServer.cpp
├── TestHttpServer.h
├── TestWebsocketServer.cpp
├── TestWebsocketServer.h
├── Url.cpp
└── WebProxyAdapterTests.cpp
└── windows-localproxy-build.md
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees:
7 |
8 | ---
9 |
10 | **Describe the bug**
11 |
12 | A clear and concise description of what the bug is.
13 |
14 | **To Reproduce**
15 |
16 | Steps to reproduce the behavior:
17 | 1. Go to '...'
18 | 2. Click on '....'
19 | 3. Scroll down to '....'
20 | 4. See error
21 |
22 | **Expected behavior**
23 |
24 | A clear and concise description of what you expected to happen.
25 |
26 | **Actual behavior**
27 |
28 | A clear and concise description of what actually happened.
29 |
30 | **Logs**
31 |
32 | If applicable, add full logs of errors and outputs to help explain your problem. Preferabbly, you can also [increase the verbosity](https://github.com/aws-samples/aws-iot-securetunneling-localproxy#options-set-via-command-line-arguments), for example to enable debug logs for the localproxy, you can use the cli option `-v 6`
33 |
34 | **Environment (please complete the following information):**
35 | - OS: [e.g. Ubuntu]
36 | - Version [e.g. 16]
37 | - Architecture: [e.g. x86-64 or x86-32]
38 | - Localproxy commit: [e.g. [8980ea8e0190c7e8fd942f2bce8bfc01aa6a6a52](https://github.com/aws-samples/aws-iot-securetunneling-localproxy/commit/8980ea8e0190c7e8fd942f2bce8bfc01aa6a6a52)]
39 |
40 | **Additional context**
41 |
42 | Add any other context about the problem here.
43 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
22 | **Impact**
23 | How would this feature impact the project? Who would benefit from this feature?
24 |
25 | **Implementation ideas (optional)**
26 | If you have any ideas on how this could be implemented, please share them here.
27 |
28 |
29 |
30 |
31 | **Are you willing to work on this feature?**
32 | - [ ] Yes, I'm willing to submit a PR for this feature
33 | - [ ] No, I'm just suggesting an idea
34 |
35 | **Checklist**
36 | - [ ] I have searched the existing issues to make sure this is not a duplicate
37 | - [ ] I have provided all the necessary information for this feature request
38 | - [ ] I understand that this is just a request and may not be implemented since Local Proxy is just a reference implementation for AWS IOT Secure Tunneling
39 |
40 |
--------------------------------------------------------------------------------
/.github/docker-images/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG OS
2 | ARG BASE_IMAGE
3 | FROM ${BASE_IMAGE} AS deploy
4 |
5 | ###############################################################################
6 | # Copy and build local proxy
7 | ###############################################################################
8 |
9 | COPY . /root/aws-iot-securetunneling-localproxy
10 | RUN mkdir -p /root/aws-iot-securetunneling-localproxy/build \
11 | && cd /root/aws-iot-securetunneling-localproxy/build \
12 | && cmake .. \
13 | && make
14 |
15 | FROM ${OS} AS minimum_size
16 |
17 | COPY --from=deploy /root/aws-iot-securetunneling-localproxy/build/bin/localproxy /root/bin/localproxy
18 |
19 | COPY ./.github/docker-images/oss-compliance /root/oss-compliance
20 | RUN HOME_DIR=/root \
21 | && cd ${HOME_DIR}/oss-compliance \
22 | && chmod +x ${HOME_DIR}/oss-compliance/generate-oss-compliance.sh \
23 | && chmod +x ${HOME_DIR}/oss-compliance/test/test-oss-compliance.sh \
24 | && bash ${HOME_DIR}/oss-compliance/generate-oss-compliance.sh ${HOME_DIR} \
25 | && rm -rf ${HOME_DIR}/oss-compliance*
26 |
27 | ENTRYPOINT ["/root/bin/localproxy"]
--------------------------------------------------------------------------------
/.github/docker-images/base-images/amazonlinux/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM amazonlinux:2023 as base
2 |
3 | # Install Prerequisites
4 |
5 | RUN yum check-update; yum upgrade -y && \
6 | yum install -y git boost-devel autoconf automake libatomic perl \
7 | wget libtool make gcc-c++ unzip cmake3 python-devel openssl-devel which
8 |
9 | # Install Dependencies
10 |
11 | RUN mkdir /home/dependencies
12 |
13 | WORKDIR /home/dependencies
14 | RUN wget https://www.openssl.org/source/openssl-3.0.12.tar.gz \
15 | && tar xzvf openssl-3.0.12.tar.gz \
16 | && cd openssl-3.0.12 \
17 | && ./config \
18 | && make \
19 | && make install
20 |
21 | WORKDIR /home/dependencies
22 | RUN wget https://github.com/madler/zlib/archive/v1.2.13.tar.gz -O /tmp/zlib-1.2.13.tar.gz && \
23 | tar xzvf /tmp/zlib-1.2.13.tar.gz && \
24 | cd zlib-1.2.13 && \
25 | ./configure && \
26 | make && \
27 | make install
28 |
29 | WORKDIR /home/dependencies
30 | RUN wget https://archives.boost.io/release/1.87.0/source/boost_1_87_0.tar.gz -O /tmp/boost_1_87_0.tar.gz && \
31 | tar xzvf /tmp/boost_1_87_0.tar.gz && \
32 | cd boost_1_87_0 && \
33 | ./bootstrap.sh && \
34 | ./b2 install link=static
35 |
36 | WORKDIR /home/dependencies
37 | RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz && \
38 | tar xzvf /tmp/protobuf-all-3.17.3.tar.gz && \
39 | cd protobuf-3.17.3 && \
40 | mkdir build && \
41 | cd build && \
42 | cmake ../cmake && \
43 | make && \
44 | make install
45 |
46 | WORKDIR /home/dependencies
47 | RUN git clone --branch v3.7.0 https://github.com/catchorg/Catch2.git && \
48 | cd Catch2 && \
49 | mkdir build && \
50 | cd build && \
51 | cmake ../ && \
52 | make && \
53 | make install
54 |
--------------------------------------------------------------------------------
/.github/docker-images/base-images/debian-ubuntu/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG OS
2 | FROM ${OS} AS base
3 |
4 | # Install Prerequisites
5 |
6 | RUN apt update && apt upgrade -y && \
7 | apt install -y git libboost-all-dev autoconf automake \
8 | wget libtool curl make g++ unzip cmake libssl-dev python3
9 | # Install Dependencies
10 |
11 | RUN mkdir /home/dependencies
12 |
13 | WORKDIR /home/dependencies
14 | RUN wget https://www.openssl.org/source/openssl-3.0.12.tar.gz \
15 | && tar xzvf openssl-3.0.12.tar.gz \
16 | && cd openssl-3.0.12 \
17 | && ./config \
18 | && make \
19 | && make install
20 |
21 | WORKDIR /home/dependencies
22 | RUN wget https://github.com/madler/zlib/archive/v1.2.13.tar.gz -O /tmp/zlib-1.2.13.tar.gz && \
23 | tar xzvf /tmp/zlib-1.2.13.tar.gz && \
24 | cd zlib-1.2.13 && \
25 | ./configure && \
26 | make && \
27 | make install
28 |
29 | WORKDIR /home/dependencies
30 | RUN wget https://archives.boost.io/release/1.87.0/source/boost_1_87_0.tar.gz -O /tmp/boost_1_87_0.tar.gz && \
31 | tar xzvf /tmp/boost_1_87_0.tar.gz && \
32 | cd boost_1_87_0 && \
33 | ./bootstrap.sh && \
34 | ./b2 install link=static
35 |
36 | WORKDIR /home/dependencies
37 | RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz && \
38 | tar xzvf /tmp/protobuf-all-3.17.3.tar.gz && \
39 | cd protobuf-3.17.3 && \
40 | mkdir build && \
41 | cd build && \
42 | cmake ../cmake && \
43 | make && \
44 | make install
45 |
46 | WORKDIR /home/dependencies
47 | RUN git clone --branch v3.7.0 https://github.com/catchorg/Catch2.git && \
48 | cd Catch2 && \
49 | mkdir build && \
50 | cd build && \
51 | cmake ../ && \
52 | make && \
53 | make install
54 |
--------------------------------------------------------------------------------
/.github/docker-images/base-images/fedora/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fedora:latest AS base
2 |
3 | # Install Prerequisites
4 |
5 | RUN dnf -y update \
6 | && dnf -y install \
7 | git autoconf automake \
8 | which wget libtool libatomic curl make gcc-c++ unzip cmake python3 openssl-devel perl-core
9 |
10 | RUN mkdir /home/dependencies
11 |
12 | WORKDIR /home/dependencies
13 | RUN wget https://www.openssl.org/source/openssl-3.0.12.tar.gz \
14 | && tar xzvf openssl-3.0.12.tar.gz \
15 | && cd openssl-3.0.12 \
16 | && ./config \
17 | && make \
18 | && make install
19 |
20 | WORKDIR /home/dependencies
21 | RUN wget https://github.com/madler/zlib/archive/v1.2.13.tar.gz -O /tmp/zlib-1.2.13.tar.gz && \
22 | tar xzvf /tmp/zlib-1.2.13.tar.gz && \
23 | cd zlib-1.2.13 && \
24 | ./configure && \
25 | make && \
26 | make install
27 |
28 | WORKDIR /home/dependencies
29 | RUN wget https://archives.boost.io/release/1.87.0/source/boost_1_87_0.tar.gz -O /tmp/boost_1_87_0.tar.gz && \
30 | tar xzvf /tmp/boost_1_87_0.tar.gz && \
31 | cd boost_1_87_0 && \
32 | ./bootstrap.sh && \
33 | ./b2 install link=static
34 |
35 | WORKDIR /home/dependencies
36 | RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz && \
37 | tar xzvf /tmp/protobuf-all-3.17.3.tar.gz && \
38 | cd protobuf-3.17.3 && \
39 | mkdir build && \
40 | cd build && \
41 | cmake ../cmake && \
42 | make && \
43 | make install
44 |
45 | WORKDIR /home/dependencies
46 | RUN git clone --branch v3.7.0 https://github.com/catchorg/Catch2.git && \
47 | cd Catch2 && \
48 | mkdir build && \
49 | cd build && \
50 | cmake ../ && \
51 | make && \
52 | make install
53 |
--------------------------------------------------------------------------------
/.github/docker-images/base-images/ubi8/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM redhat/ubi8:latest AS base
2 |
3 | # Install Prerequisites
4 |
5 | RUN yum -y update \
6 | && yum -y install git autoconf automake libatomic \
7 | wget libtool curl make gcc-c++ unzip cmake python3 openssl-devel perl
8 |
9 | RUN mkdir /home/dependencies
10 |
11 | WORKDIR /home/dependencies
12 | RUN wget https://www.openssl.org/source/openssl-3.0.12.tar.gz \
13 | && tar xzvf openssl-3.0.12.tar.gz \
14 | && cd openssl-3.0.12 \
15 | && ./config \
16 | && make \
17 | && make install
18 |
19 | WORKDIR /home/dependencies
20 | RUN wget https://github.com/madler/zlib/archive/v1.2.13.tar.gz -O /tmp/zlib-1.2.13.tar.gz && \
21 | tar xzvf /tmp/zlib-1.2.13.tar.gz && \
22 | cd zlib-1.2.13 && \
23 | ./configure && \
24 | make && \
25 | make install
26 |
27 | WORKDIR /home/dependencies
28 | RUN wget https://archives.boost.io/release/1.87.0/source/boost_1_87_0.tar.gz -O /tmp/boost_1_87_0.tar.gz && \
29 | tar xzvf /tmp/boost_1_87_0.tar.gz && \
30 | cd boost_1_87_0 && \
31 | ./bootstrap.sh && \
32 | ./b2 install link=static
33 |
34 | WORKDIR /home/dependencies
35 | RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz && \
36 | tar xzvf /tmp/protobuf-all-3.17.3.tar.gz && \
37 | cd protobuf-3.17.3 && \
38 | mkdir build && \
39 | cd build && \
40 | cmake ../cmake && \
41 | make && \
42 | make install
43 |
44 | WORKDIR /home/dependencies
45 | RUN git clone --branch v3.7.0 https://github.com/catchorg/Catch2.git && \
46 | cd Catch2 && \
47 | mkdir build && \
48 | cd build && \
49 | cmake ../ && \
50 | make && \
51 | make install
52 |
--------------------------------------------------------------------------------
/.github/docker-images/bin-images/amazonlinux/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG BASE_IMAGE
2 | FROM ${BASE_IMAGE} AS deploy
3 |
4 | ###############################################################################
5 | # Copy and build local proxy
6 | ###############################################################################
7 |
8 | COPY . /root/aws-iot-securetunneling-localproxy
9 | RUN mkdir -p /root/aws-iot-securetunneling-localproxy/build \
10 | && cd /root/aws-iot-securetunneling-localproxy/build \
11 | && cmake .. \
12 | && make
13 |
14 | FROM amazonlinux:2023 AS minimum_size
15 |
16 | COPY --from=deploy /root/aws-iot-securetunneling-localproxy/build/bin/localproxy /root/bin/localproxy
17 |
18 | COPY ./.github/docker-images/oss-compliance /root/oss-compliance
19 | RUN HOME_DIR=/root \
20 | && cd ${HOME_DIR}/oss-compliance \
21 | && chmod +x ${HOME_DIR}/oss-compliance/generate-oss-compliance.sh \
22 | && chmod +x ${HOME_DIR}/oss-compliance/test/test-oss-compliance.sh \
23 | && bash ${HOME_DIR}/oss-compliance/generate-oss-compliance.sh ${HOME_DIR} \
24 | && rm -rf ${HOME_DIR}/oss-compliance*
25 |
26 | # OS-specific commands
27 | RUN yum check-update; yum upgrade -y && \
28 | yum install -y libatomic libicu
29 |
30 | ENTRYPOINT ["/root/bin/localproxy"]
--------------------------------------------------------------------------------
/.github/docker-images/bin-images/debian-ubuntu/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG OS
2 | ARG BASE_IMAGE
3 | FROM ${BASE_IMAGE} AS deploy
4 |
5 | ###############################################################################
6 | # Copy and build local proxy
7 | ###############################################################################
8 |
9 | COPY . /root/aws-iot-securetunneling-localproxy
10 | RUN mkdir -p /root/aws-iot-securetunneling-localproxy/build \
11 | && cd /root/aws-iot-securetunneling-localproxy/build \
12 | && cmake .. \
13 | && make
14 |
15 | FROM ${OS} AS minimum_size
16 |
17 | COPY --from=deploy /root/aws-iot-securetunneling-localproxy/build/bin/localproxy /root/bin/localproxy
18 |
19 | COPY ./.github/docker-images/oss-compliance /root/oss-compliance
20 | RUN HOME_DIR=/root \
21 | && cd ${HOME_DIR}/oss-compliance \
22 | && chmod +x ${HOME_DIR}/oss-compliance/generate-oss-compliance.sh \
23 | && chmod +x ${HOME_DIR}/oss-compliance/test/test-oss-compliance.sh \
24 | && bash ${HOME_DIR}/oss-compliance/generate-oss-compliance.sh ${HOME_DIR} \
25 | && rm -rf ${HOME_DIR}/oss-compliance*
26 |
27 | # OS-specific commands
28 | RUN apt update && apt upgrade -y && apt install -y ca-certificates && update-ca-certificates
29 |
30 | ENTRYPOINT ["/root/bin/localproxy"]
--------------------------------------------------------------------------------
/.github/docker-images/bin-images/ubi8/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG BASE_IMAGE
2 | FROM ${BASE_IMAGE} AS deploy
3 |
4 | ###############################################################################
5 | # Copy and build local proxy
6 | ###############################################################################
7 |
8 | COPY . /root/aws-iot-securetunneling-localproxy
9 | RUN mkdir -p /root/aws-iot-securetunneling-localproxy/build \
10 | && cd /root/aws-iot-securetunneling-localproxy/build \
11 | && cmake .. \
12 | && make
13 |
14 | FROM redhat/ubi8:latest AS minimum_size
15 |
16 | COPY --from=deploy /root/aws-iot-securetunneling-localproxy/build/bin/localproxy /root/bin/localproxy
17 |
18 | COPY ./.github/docker-images/oss-compliance /root/oss-compliance
19 | RUN HOME_DIR=/root \
20 | && cd ${HOME_DIR}/oss-compliance \
21 | && chmod +x ${HOME_DIR}/oss-compliance/generate-oss-compliance.sh \
22 | && chmod +x ${HOME_DIR}/oss-compliance/test/test-oss-compliance.sh \
23 | && bash ${HOME_DIR}/oss-compliance/generate-oss-compliance.sh ${HOME_DIR} \
24 | && rm -rf ${HOME_DIR}/oss-compliance*
25 |
26 | # OS-specific commands
27 | RUN yum check-update; yum upgrade -y && \
28 | yum install -y libatomic libicu ca-certificates && \
29 | update-ca-trust extract
30 |
31 | ENTRYPOINT ["/root/bin/localproxy"]
--------------------------------------------------------------------------------
/.github/docker-images/oss-compliance/generate-oss-compliance.sh:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | #!/bin/bash
5 |
6 | PRETTY_NAME=$(cat /etc/os-release | grep PRETTY_NAME)
7 |
8 | HOME_DIR=$(pwd)
9 |
10 | export HOME_DIR=${HOME_DIR}
11 | LINUX_PACKAGES=${HOME_DIR}/linux-packages
12 | BUILD_FROM_SOURCE_PACKAGES_LICENCES=${HOME_DIR}/build-from-source-packages/build-from-source-package-licenses.txt
13 |
14 | set -e
15 |
16 | chmod +x ${LINUX_PACKAGES}/yum-packages.sh
17 | chmod +x ${LINUX_PACKAGES}/dpkg-packages.sh
18 |
19 | if [[ $PRETTY_NAME == *"Ubuntu"* || $PRETTY_NAME == *"Debian"* ]]; then
20 | ${LINUX_PACKAGES}/dpkg-packages.sh
21 | fi
22 |
23 | if [[ $PRETTY_NAME == *"Amazon Linux"* || $PRETTY_NAME == *"Red Hat Enterprise Linux"* || $PRETTY_NAME == *"Fedora"* ]]; then
24 | ${LINUX_PACKAGES}/yum-packages.sh
25 | fi
26 |
27 | cp ${BUILD_FROM_SOURCE_PACKAGES_LICENCES} /root/BUILD_FROM_SOURCE_PACKAGES_LICENCES
28 | chmod +x ${HOME_DIR}/test/test-oss-compliance.sh
29 | bash ${HOME_DIR}/test/test-oss-compliance.sh ${HOME_DIR}
--------------------------------------------------------------------------------
/.github/docker-images/oss-compliance/linux-packages/dpkg-packages.sh:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | HOME_DIR=/root
5 | LICENSE_TEXT_FILE_NAME="LINUX_PACKAGES_LICENSES"
6 | LICENSE_TEXT_FILE_PATH=${HOME_DIR}/${LICENSE_TEXT_FILE_NAME}
7 | PACKAGE_LIST_TEXT_FILE_NAME="LINUX_PACKAGES_LIST"
8 |
9 | OUTPUT=$(dpkg -l | grep '^.[iufhwt]')
10 | echo "${OUTPUT}" > ${HOME_DIR}/${PACKAGE_LIST_TEXT_FILE_NAME}
11 |
12 | IFS=$'\n' read -rd '' -a OUTPUT_LIST <<<${OUTPUT}
13 |
14 | for (( i=0; i<${#OUTPUT_LIST[@]}; i++ ))
15 | do
16 | IFS=$' ' read -rd '' -a PACKAGE_DETAILS <<<${OUTPUT_LIST[$i]}
17 | if [ ${#PACKAGE_DETAILS[@]} ]; then
18 | IFS=$':' read -a PACKAGE_NAME_AND_ARCH <<<${PACKAGE_DETAILS[1]}
19 | PACKAGE_NAME="${PACKAGE_NAME_AND_ARCH[0]}"
20 | LICENSE_TEXT=$(cat "/usr/share/doc/${PACKAGE_NAME}/copyright")
21 | if [ -z "${LICENSE_TEXT}" ]; then
22 | LICENSE_TEXT="License is not present for this package."
23 | fi
24 | echo "Package Name: "${PACKAGE_NAME} >> ${LICENSE_TEXT_FILE_PATH}
25 | echo "Package Version: "${PACKAGE_DETAILS[2]} >> ${LICENSE_TEXT_FILE_PATH}
26 | echo "Package License Location: "${PACKAGE_LICENSE_LOCATION} >> ${LICENSE_TEXT_FILE_PATH}
27 | echo -e "Package License Text: "${LICENSE_TEXT}"\n" >> ${LICENSE_TEXT_FILE_PATH}
28 | fi
29 | done
--------------------------------------------------------------------------------
/.github/docker-images/oss-compliance/linux-packages/yum-packages.sh:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | LICENSE_FILE_NAMES=("LICENSE" "LICENSE.txt" "LICENSE.md" "license.txt" "license" "COPYRIGHT" "LICENSE.rst" "COPYING" "COPYING.md" "COPYING.txt")
5 |
6 | LICENSE_FILE_LOCATIONS=("/usr/share/licenses" "/usr/share/doc")
7 |
8 | HOME_DIR=/root
9 | LICENSE_TEXT_FILE_NAME="LINUX_PACKAGES_LICENSES"
10 | LICENSE_TEXT_FILE_PATH=${HOME_DIR}/${LICENSE_TEXT_FILE_NAME}
11 | PACKAGE_LIST_TEXT_FILE_NAME="LINUX_PACKAGES_LIST"
12 |
13 | OUTPUT="$(yum list installed | grep -v @amzn2-core | sort)"
14 | echo "${OUTPUT}" > ${HOME_DIR}/${PACKAGE_LIST_TEXT_FILE_NAME}
15 |
16 | IFS=$'\n' read -rd '' -a OUTPUT_LIST <<<"${OUTPUT}"
17 |
18 | for (( i=0; i<${#OUTPUT_LIST[@]}; i++ ))
19 | do
20 | IFS=$' ' read -rd '' -a PACKAGE_DETAILS <<<${OUTPUT_LIST[$i]}
21 | if [ ${#PACKAGE_DETAILS[@]} -eq "3" ]; then
22 | IFS=$'.' read -rd '' -a PACKAGE_NAME_AND_ARCH <<<${PACKAGE_DETAILS[0]}
23 | PACKAGE_NAME=${PACKAGE_NAME_AND_ARCH[0]}
24 | IFS=$'-:' read -rd '' -a PACKAGE_VERSION_ARR <<<${PACKAGE_DETAILS[1]}
25 | if [ ${#PACKAGE_VERSION[@]} -ge "2" ]; then
26 | PACKAGE_VERSION="${PACKAGE_VERSION_ARR[1]}"
27 | else
28 | PACKAGE_VERSION="${PACKAGE_VERSION_ARR[0]}"
29 | fi
30 | PACKAGE_LOCATION=""
31 | for (( license_file_dir=0; license_file_dir<"${#LICENSE_FILE_LOCATIONS[@]}"; license_file_dir++ ))
32 | do
33 | for (( license_file=0; license_file<"${#LICENSE_FILE_NAMES[@]}"; license_file++ ))
34 | do
35 | if [[ -f "${LICENSE_FILE_LOCATIONS[$license_file_dir]}/${PACKAGE_NAME}-${PACKAGE_VERSION}/${LICENSE_FILE_NAMES[$license_file]}" ]]; then
36 | PACKAGE_LICENSE_LOCATION=${LICENSE_FILE_LOCATIONS[$license_file_dir]}/${PACKAGE_NAME}-${PACKAGE_VERSION}/${LICENSE_FILE_NAMES[$license_file]}
37 | break
38 | elif [[ -f "${LICENSE_FILE_LOCATIONS[$license_file_dir]}/${PACKAGE_NAME}/${LICENSE_FILE_NAMES[$license_file]}" ]]; then
39 | PACKAGE_LICENSE_LOCATION=${LICENSE_FILE_LOCATIONS[$license_file_dir]}/${PACKAGE_NAME}/${LICENSE_FILE_NAMES[$license_file]}
40 | break
41 | fi
42 | done
43 | done
44 | if [ ${PACKAGE_LICENSE_LOCATION} ] && [ -f ${PACKAGE_LICENSE_LOCATION} ]; then
45 | LICENSE_TEXT=$(cat "${PACKAGE_LICENSE_LOCATION}") || true
46 | else
47 | LICENSE_TEXT="License is not present for this package."
48 | fi
49 | echo "Package Name: "${PACKAGE_NAME} >> ${LICENSE_TEXT_FILE_PATH}
50 | echo "Package Version: "${PACKAGE_VERSION} >> ${LICENSE_TEXT_FILE_PATH}
51 | echo "Package License Location: "${PACKAGE_LICENSE_LOCATION} >> ${LICENSE_TEXT_FILE_PATH}
52 | echo -e "Package License Text: "${LICENSE_TEXT}"\n" >> ${LICENSE_TEXT_FILE_PATH}
53 | fi
54 | done
--------------------------------------------------------------------------------
/.github/docker-images/oss-compliance/test/test-oss-compliance.sh:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | #!/bin/bash
5 |
6 | HOME_DIR=/root
7 |
8 | FILES=("${HOME_DIR}/LINUX_PACKAGES_LICENSES" "${HOME_DIR}/BUILD_FROM_SOURCE_PACKAGES_LICENCES")
9 |
10 | for FILE in ${FILES[@]}; do
11 | if [ -f "$FILE" ]; then
12 | echo "$FILE exists."
13 | else
14 | echo "$FILE doesn't exist which is needed for license attribution compliance."
15 | exit 1
16 | fi
17 | done
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ### Motivation
2 | - Please give a brief description for the background of this change.
3 | - Issue number:
4 |
5 |
6 | ### Modifications
7 | #### Change summary
8 | Please describe what changes are included in this pull request.
9 |
10 | #### Revision diff summary
11 | If there is more than one revision, please explain what has been changed since the last revision.
12 |
13 | ### Testing
14 | **Is your change tested? If not, please justify the reason.**
15 | **Please list your testing steps and test results.**
16 | - CI test run result:
17 |
18 |
19 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.
20 |
--------------------------------------------------------------------------------
/.github/workflows/base-images.yml:
--------------------------------------------------------------------------------
1 | name: Base Image Builds
2 |
3 | # This workflow is to allow the building of Docker base images by merging to the base-images branch of the repo
4 | # Building the base images is time-consuming and not necessary unless there have been changes to the Dockerfile
5 | # or a dependency. This workflow allows developers to merge to the base-images to build and publish the base images
6 | # only when needed. This cuts the time needed for typical workflow runs significantly.
7 |
8 | on:
9 | push:
10 | branches: ['base-images', 'docker-builds']
11 | pull_request:
12 | branches: ['base-images', 'docker-builds']
13 | types: [opened, closed]
14 |
15 | env:
16 | PACKAGE_NAME: aws-iot-securetunneling-localproxy
17 | ECR_ACCOUNT_URL: public.ecr.aws
18 | ECR_BASE_UBUNTU: aws-iot-securetunneling-localproxy/ubuntu-base
19 | ECR_BASE_UBI8: aws-iot-securetunneling-localproxy/ubi8-base
20 | ECR_BASE_AMAZONLINUX: aws-iot-securetunneling-localproxy/amazonlinux-base
21 | ECR_BASE_DEBIAN: aws-iot-securetunneling-localproxy/debian-base
22 | ECR_BASE_FEDORA: aws-iot-securetunneling-localproxy/fedora-base
23 |
24 | jobs:
25 | build-base-docker-image-ubuntu-amd64:
26 | runs-on: ubuntu-latest
27 | permissions:
28 | id-token: write
29 | contents: read
30 | steps:
31 | - name: Configure AWS Credentials
32 | uses: aws-actions/configure-aws-credentials@v1
33 | with:
34 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
35 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
36 | aws-region: us-east-1
37 | - name: Login to ECR
38 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
39 | - name: Checkout
40 | uses: actions/checkout@v2
41 | with:
42 | fetch-depth: 0
43 | - name: Set up QEMU
44 | uses: docker/setup-qemu-action@v2
45 | - name: Set up Docker Buildx
46 | uses: docker/setup-buildx-action@v2
47 | - name: Build Ubuntu Base Image
48 | uses: docker/build-push-action@v4
49 | with:
50 | file: .github/docker-images/base-images/debian-ubuntu/Dockerfile
51 | build-args: |
52 | OS=ubuntu:22.04
53 | context: .
54 | push: true
55 | tags: |
56 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:amd64-${{ github.sha }}
57 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:amd64-latest
58 | platforms: linux/amd64
59 | build-base-docker-image-ubuntu-arm64:
60 | runs-on: ubuntu-latest
61 | permissions:
62 | id-token: write
63 | contents: read
64 | steps:
65 | - name: Configure AWS Credentials
66 | uses: aws-actions/configure-aws-credentials@v1
67 | with:
68 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
69 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
70 | aws-region: us-east-1
71 | - name: Login to ECR
72 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
73 | - name: Checkout
74 | uses: actions/checkout@v2
75 | with:
76 | fetch-depth: 0
77 | - name: Set up QEMU
78 | uses: docker/setup-qemu-action@v2
79 | - name: Set up Docker Buildx
80 | uses: docker/setup-buildx-action@v2
81 | - name: Build Ubuntu Base Image
82 | uses: docker/build-push-action@v4
83 | with:
84 | file: .github/docker-images/base-images/debian-ubuntu/Dockerfile
85 | build-args: |
86 | OS=ubuntu:22.04
87 | context: .
88 | push: true
89 | tags: |
90 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:arm64-${{ github.sha }}
91 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:arm64-latest
92 | platforms: linux/arm64
93 | build-base-docker-image-ubuntu-armv7:
94 | runs-on: ubuntu-latest
95 | permissions:
96 | id-token: write
97 | contents: read
98 | steps:
99 | - name: Configure AWS Credentials
100 | uses: aws-actions/configure-aws-credentials@v1
101 | with:
102 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
103 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
104 | aws-region: us-east-1
105 | - name: Login to ECR
106 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
107 | - name: Checkout
108 | uses: actions/checkout@v2
109 | with:
110 | fetch-depth: 0
111 | - name: Set up QEMU
112 | uses: docker/setup-qemu-action@v2
113 | - name: Set up Docker Buildx
114 | uses: docker/setup-buildx-action@v2
115 | - name: Build ubuntu Base Image
116 | uses: docker/build-push-action@v4
117 | with:
118 | file: .github/docker-images/base-images/debian-ubuntu/Dockerfile
119 | build-args: |
120 | OS=ubuntu:18.04
121 | context: .
122 | push: true
123 | tags: |
124 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:armv7-${{ github.sha }}
125 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:armv7-latest
126 | platforms: linux/arm/v7
127 | build-base-docker-image-ubi8-amd64:
128 | runs-on: ubuntu-latest
129 | permissions:
130 | id-token: write
131 | contents: read
132 | steps:
133 | - name: Configure AWS Credentials
134 | uses: aws-actions/configure-aws-credentials@v1
135 | with:
136 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
137 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
138 | aws-region: us-east-1
139 | - name: Login to ECR
140 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
141 | - name: Checkout
142 | uses: actions/checkout@v2
143 | with:
144 | fetch-depth: 0
145 | - name: Set up QEMU
146 | uses: docker/setup-qemu-action@v2
147 | - name: Set up Docker Buildx
148 | uses: docker/setup-buildx-action@v2
149 | - name: Build ubi8 Base Image
150 | uses: docker/build-push-action@v4
151 | with:
152 | file: .github/docker-images/base-images/ubi8/Dockerfile
153 | context: .
154 | push: true
155 | tags: |
156 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBI8 }}:amd64-${{ github.sha }}
157 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBI8 }}:amd64-latest
158 | platforms: linux/amd64
159 | build-base-docker-image-ubi8-arm64:
160 | runs-on: ubuntu-latest
161 | permissions:
162 | id-token: write
163 | contents: read
164 | steps:
165 | - name: Configure AWS Credentials
166 | uses: aws-actions/configure-aws-credentials@v1
167 | with:
168 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
169 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
170 | aws-region: us-east-1
171 | - name: Login to ECR
172 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
173 | - name: Checkout
174 | uses: actions/checkout@v2
175 | with:
176 | fetch-depth: 0
177 | - name: Set up QEMU
178 | uses: docker/setup-qemu-action@v2
179 | - name: Set up Docker Buildx
180 | uses: docker/setup-buildx-action@v2
181 | - name: Build ubi8 Base Image
182 | uses: docker/build-push-action@v4
183 | with:
184 | file: .github/docker-images/base-images/ubi8/Dockerfile
185 | context: .
186 | push: true
187 | tags: |
188 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBI8 }}:arm64-${{ github.sha }}
189 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBI8 }}:arm64-latest
190 | platforms: linux/arm64
191 | build-base-docker-image-amazonlinux-amd64:
192 | runs-on: ubuntu-latest
193 | permissions:
194 | id-token: write
195 | contents: read
196 | steps:
197 | - name: Configure AWS Credentials
198 | uses: aws-actions/configure-aws-credentials@v1
199 | with:
200 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
201 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
202 | aws-region: us-east-1
203 | - name: Login to ECR
204 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
205 | - name: Checkout
206 | uses: actions/checkout@v2
207 | with:
208 | fetch-depth: 0
209 | - name: Set up QEMU
210 | uses: docker/setup-qemu-action@v2
211 | - name: Set up Docker Buildx
212 | uses: docker/setup-buildx-action@v2
213 | - name: Build amazonlinux Base Image
214 | uses: docker/build-push-action@v4
215 | with:
216 | file: .github/docker-images/base-images/amazonlinux/Dockerfile
217 | context: .
218 | push: true
219 | tags: |
220 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_AMAZONLINUX }}:amd64-${{ github.sha }}
221 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_AMAZONLINUX }}:amd64-latest
222 | platforms: linux/amd64
223 | build-base-docker-image-amazonlinux-arm64:
224 | runs-on: ubuntu-latest
225 | permissions:
226 | id-token: write
227 | contents: read
228 | steps:
229 | - name: Configure AWS Credentials
230 | uses: aws-actions/configure-aws-credentials@v1
231 | with:
232 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
233 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
234 | aws-region: us-east-1
235 | - name: Login to ECR
236 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
237 | - name: Checkout
238 | uses: actions/checkout@v2
239 | with:
240 | fetch-depth: 0
241 | - name: Set up QEMU
242 | uses: docker/setup-qemu-action@v2
243 | - name: Set up Docker Buildx
244 | uses: docker/setup-buildx-action@v2
245 | - name: Build amazonlinux Base Image
246 | uses: docker/build-push-action@v4
247 | with:
248 | file: .github/docker-images/base-images/amazonlinux/Dockerfile
249 | context: .
250 | push: true
251 | tags: |
252 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_AMAZONLINUX }}:arm64-${{ github.sha }}
253 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_AMAZONLINUX }}:arm64-latest
254 | platforms: linux/arm64
255 | build-base-docker-image-debian-amd64:
256 | runs-on: ubuntu-latest
257 | permissions:
258 | id-token: write
259 | contents: read
260 | steps:
261 | - name: Configure AWS Credentials
262 | uses: aws-actions/configure-aws-credentials@v1
263 | with:
264 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
265 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
266 | aws-region: us-east-1
267 | - name: Login to ECR
268 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
269 | - name: Checkout
270 | uses: actions/checkout@v2
271 | with:
272 | fetch-depth: 0
273 | - name: Set up QEMU
274 | uses: docker/setup-qemu-action@v2
275 | - name: Set up Docker Buildx
276 | uses: docker/setup-buildx-action@v2
277 | - name: Build debian Base Image
278 | uses: docker/build-push-action@v4
279 | with:
280 | file: .github/docker-images/base-images/debian-ubuntu/Dockerfile
281 | build-args: |
282 | OS=debian:latest
283 | context: .
284 | push: true
285 | tags: |
286 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_DEBIAN }}:amd64-${{ github.sha }}
287 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_DEBIAN }}:amd64-latest
288 | platforms: linux/amd64
289 | build-base-docker-image-debian-arm64:
290 | runs-on: ubuntu-latest
291 | permissions:
292 | id-token: write
293 | contents: read
294 | steps:
295 | - name: Configure AWS Credentials
296 | uses: aws-actions/configure-aws-credentials@v1
297 | with:
298 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
299 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
300 | aws-region: us-east-1
301 | - name: Login to ECR
302 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
303 | - name: Checkout
304 | uses: actions/checkout@v2
305 | with:
306 | fetch-depth: 0
307 | - name: Set up QEMU
308 | uses: docker/setup-qemu-action@v2
309 | - name: Set up Docker Buildx
310 | uses: docker/setup-buildx-action@v2
311 | - name: Build debian Base Image
312 | uses: docker/build-push-action@v4
313 | with:
314 | file: .github/docker-images/base-images/debian-ubuntu/Dockerfile
315 | build-args: |
316 | OS=debian:latest
317 | context: .
318 | push: true
319 | tags: |
320 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_DEBIAN }}:arm64-${{ github.sha }}
321 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_DEBIAN }}:arm64-latest
322 | platforms: linux/arm64
323 | build-base-docker-image-fedora-amd64:
324 | runs-on: ubuntu-latest
325 | permissions:
326 | id-token: write
327 | contents: read
328 | steps:
329 | - name: Configure AWS Credentials
330 | uses: aws-actions/configure-aws-credentials@v1
331 | with:
332 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
333 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
334 | aws-region: us-east-1
335 | - name: Login to ECR
336 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
337 | - name: Checkout
338 | uses: actions/checkout@v2
339 | with:
340 | fetch-depth: 0
341 | - name: Set up QEMU
342 | uses: docker/setup-qemu-action@v2
343 | - name: Set up Docker Buildx
344 | uses: docker/setup-buildx-action@v2
345 | - name: Build fedora Base Image
346 | uses: docker/build-push-action@v4
347 | with:
348 | file: .github/docker-images/base-images/fedora/Dockerfile
349 | context: .
350 | push: true
351 | tags: |
352 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_FEDORA }}:amd64-${{ github.sha }}
353 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_FEDORA }}:amd64-latest
354 | platforms: linux/amd64
355 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | permissions:
4 | contents: read
5 | actions: write
6 |
7 | on:
8 | push:
9 | branches:
10 | - '*'
11 | pull_request:
12 | branches:
13 | - '*'
14 |
15 | env:
16 | PACKAGE_NAME: aws-iot-securetunneling-localproxy
17 |
18 | jobs:
19 | osx:
20 | runs-on: macos-latest
21 | steps:
22 | - uses: actions/checkout@v3
23 | name: 'Checkout'
24 | with:
25 | fetch-depth: 0 # Fetch all history
26 | - name: List tags
27 | run: |
28 | git tag
29 | - name: Install brew dependencies
30 | run: |
31 | brew install openssl@1.1 zlib cmake wget
32 | - name: Install boost
33 | working-directory: ${{ github.workspace }}
34 | run: |
35 | wget https://archives.boost.io/release/1.87.0/source/boost_1_87_0.tar.gz -O /tmp/boost_1_87_0.tar.gz
36 | tar xzvf /tmp/boost_1_87_0.tar.gz
37 | cd boost_1_87_0
38 | ./bootstrap.sh --with-toolset=clang
39 | sudo ./b2 install toolset=clang link=static
40 | - name: Install protobuf
41 | working-directory: ${{ github.workspace }}
42 | run: |
43 | wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz
44 | tar xzvf /tmp/protobuf-all-3.17.3.tar.gz
45 | cd protobuf-3.17.3
46 | mkdir build_make
47 | cd build_make
48 | cmake ../cmake
49 | make
50 | sudo make install
51 | - name: Building localproxy
52 | working-directory: ${{ github.workspace }}
53 | run: |
54 | mkdir build
55 | cd build
56 | cmake .. -DBUILD_TESTS=OFF -DOPENSSL_ROOT_DIR=/usr/local/Cellar/openssl@1.1/1.1.1t/ -DOPENSSL_LIBRARIES=/usr/local/Cellar/openssl@1.1/1.1.1t/lib/
57 | make
58 | - name: Upload Artifact
59 | uses: actions/upload-artifact@v4
60 | with:
61 | name: localproxy-mac
62 | path: ${{ github.workspace }}/build/bin/localproxy
63 | ubuntu:
64 | runs-on: ubuntu-latest
65 | if: (github.event_name == 'push') || ((github.event_name == 'pull_request') && (github.event.pull_request.head.repo.full_name != github.repository))
66 | steps:
67 | - name: 'Checkout'
68 | uses: actions/checkout@v3
69 | with:
70 | fetch-depth: 0 # Fetch all history
71 | - name: List tags
72 | run: |
73 | git tag
74 | - name: Install apt-get dependencies
75 | run: |
76 | sudo apt-get install -y build-essential git python3-dev
77 | sudo apt-get install -y wget tar zlib1g-dev libssl-dev openssl cmake
78 | sudo apt-get clean -y
79 | - name: Install boost
80 | working-directory: ${{ github.workspace }}
81 | run: |
82 | wget https://archives.boost.io/release/1.87.0/source/boost_1_87_0.tar.gz -O /tmp/boost_1_87_0.tar.gz
83 | tar xzvf /tmp/boost_1_87_0.tar.gz
84 | cd boost_1_87_0
85 | ./bootstrap.sh
86 | sudo ./b2 install link=static
87 | - name: Install protobuf
88 | working-directory: ${{ github.workspace }}
89 | run: |
90 | wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz
91 | tar xzvf /tmp/protobuf-all-3.17.3.tar.gz
92 | cd protobuf-3.17.3
93 | mkdir build_make
94 | cd build_make
95 | cmake ../cmake
96 | make
97 | sudo make install
98 | - name: install Catch2
99 | working-directory: ${{ github.workspace }}
100 | run: |
101 | git clone --branch v3.7.0 https://github.com/catchorg/Catch2.git
102 | cd Catch2
103 | mkdir build
104 | cd build
105 | cmake ../
106 | make
107 | sudo make install
108 | - name: Building localproxy
109 | working-directory: ${{ github.workspace }}
110 | run: |
111 | mkdir build
112 | cd build
113 | cmake .. -DBUILD_TESTS=OFF
114 | make
115 | - name: Upload Artifact
116 | uses: actions/upload-artifact@v4
117 | with:
118 | name: localproxy-ubuntu
119 | path: ${{ github.workspace }}/build/bin/localproxy
120 | windows:
121 | runs-on: windows-2019
122 | if: (github.event_name == 'push') || ((github.event_name == 'pull_request') && (github.event.pull_request.head.repo.full_name != github.repository))
123 | steps:
124 | - name: Setup developer command prompt
125 | uses: ilammy/msvc-dev-cmd@v1
126 | with:
127 | toolset: 14.2
128 | arch: x64
129 | - name: Install OpenSSL
130 | run: |
131 | Invoke-WebRequest "https://www.nasm.us/pub/nasm/releasebuilds/2.15.05/win64/nasm-2.15.05-win64.zip" -OutFile "nasm-2.15.05-win64.zip"
132 | Expand-Archive "nasm-2.15.05-win64.zip" -Force -DestinationPath "C:\NASM"
133 | $env:Path += ";C:\NASM\nasm-2.15.05\"
134 | Invoke-WebRequest "https://github.com/openssl/openssl/archive/refs/tags/openssl-3.0.12.zip" -OutFile "openssl-3.0.12.zip"
135 | Expand-Archive "openssl-3.0.12.zip" -Force
136 | cd .\openssl-3.0.12\openssl-openssl-3.0.12\
137 | perl Configure VC-WIN64A
138 | nmake
139 | nmake install
140 | $env:Path += ";C:\Program Files\OpenSSL\bin"
141 | - name: Install Catch2
142 | run: |
143 | git clone --branch v3.7.0 https://github.com/catchorg/Catch2.git
144 | cd Catch2
145 | mkdir build
146 | cd build
147 | cmake -DBUILD_TESTING=OFF -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ../
148 | nmake
149 | nmake install
150 | - name: Install zlib
151 | run: |
152 | git clone -b v1.2.13 https://github.com/madler/zlib.git
153 | cd zlib
154 | mkdir build
155 | cd build
156 | cmake -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ../
157 | nmake
158 | nmake install
159 | $env:Path += ";C:\Program Files (x86)\zlib\bin"
160 | - name: Install boost
161 | run: |
162 | Invoke-WebRequest "https://archives.boost.io/release/1.87.0/source/boost_1_87_0.zip" -OutFile "boost_1_87_0.zip"
163 | Expand-Archive "boost_1_87_0.zip" -Force
164 | cd .\boost_1_87_0\boost_1_87_0\
165 | .\bootstrap.bat
166 | .\b2 toolset=msvc-14.2 address-model=64 install define=_WIN32_WINNT=0x0601 define=BOOST_WINAPI_VERSION_WIN7 link=static
167 | - name: Install protobuf
168 | run: |
169 | cd \
170 | Invoke-WebRequest "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.zip" -OutFile "protobuf-all-3.17.3.zip"
171 | Expand-Archive protobuf-all-3.17.3.zip
172 | cd .\protobuf-all-3.17.3\protobuf-3.17.3\cmake\
173 | mkdir build
174 | cd build
175 | cmake -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release -Dprotobuf_MSVC_STATIC_RUNTIME=OFF ../
176 | nmake
177 | nmake install
178 | $env:Path += ";C:\Program Files (x86)\protobuf\bin"
179 | - name: 'Checkout'
180 | uses: actions/checkout@v3
181 | with:
182 | fetch-depth: 0 # Fetch all history
183 | - name: List tags
184 | run: |
185 | git tag
186 | - name: Build localproxy
187 | working-directory: ${{ github.workspace }}
188 | run: |
189 | mkdir build
190 | cd build
191 | cmake -DBUILD_TESTS=OFF -DLINK_STATIC_OPENSSL=OFF -DBOOST_PKG_VERSION=1.84.0 -DWIN32_WINNT=0x0601 -DBoost_USE_STATIC_LIBS=ON -DCMAKE_PREFIX_PATH="C:\Boost;C:\Program Files (x86)\Catch2;C:\Program Files (x86)\protobuf;C:\Program Files\OpenSSL" -G "Visual Studio 16 2019" -A x64 ..\
192 | msbuild localproxy.vcxproj -p:Configuration=Release
193 | - name: Upload Artifact
194 | uses: actions/upload-artifact@v4
195 | with:
196 | name: localproxy-windows
197 | path: ${{ github.workspace }}\build\bin\Release\localproxy.exe
198 |
--------------------------------------------------------------------------------
/.github/workflows/codeql.yml:
--------------------------------------------------------------------------------
1 | name: "CodeQL C++ Build"
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | pull_request:
7 | branches: [ "main" ]
8 | schedule:
9 | - cron: '33 8 * * 1' # Example: Run 8:33 UTC every Monday
10 |
11 | jobs:
12 | analyze:
13 | name: Analyze (C++)
14 | runs-on: ubuntu-latest
15 | permissions:
16 | security-events: write # required to upload CodeQL results
17 | actions: read # required for private repositories
18 | contents: read # required to check out the code
19 | packages: read # required to fetch internal or private CodeQL packs (if used)
20 |
21 | strategy:
22 | fail-fast: false
23 | matrix:
24 | language: [ 'c-cpp' ]
25 | build-mode: [ 'manual' ] # Using manual build steps
26 |
27 | steps:
28 | - name: Checkout repository
29 | uses: actions/checkout@v4
30 | with:
31 | fetch-depth: 0 # Fetch all history
32 |
33 | # === Dependency Installation Steps ===
34 | - name: Install apt-get dependencies
35 | run: |
36 | sudo apt-get update -y
37 | sudo apt-get install -y \
38 | build-essential \
39 | git \
40 | python3-dev \
41 | wget \
42 | tar \
43 | zlib1g-dev \
44 | libssl-dev \
45 | openssl \
46 | cmake
47 | sudo apt-get clean -y
48 |
49 | - name: Install boost (v1.87.0)
50 | run: |
51 | echo "Downloading Boost..."
52 | wget https://sourceforge.net/projects/boost/files/boost/1.87.0/boost_1_87_0.tar.gz/download -O /tmp/boost_1_87_0.tar.gz
53 | echo "Extracting Boost..."
54 | tar xzvf /tmp/boost_1_87_0.tar.gz -C /tmp
55 | cd /tmp/boost_1_87_0
56 | echo "Bootstrapping Boost..."
57 | ./bootstrap.sh --prefix=/usr/local
58 | echo "Building and installing Boost..."
59 | sudo ./b2 install link=static --prefix=/usr/local -j$(nproc)
60 | cd ${{ github.workspace }}
61 | sudo rm -rf /tmp/boost_1_87_0 /tmp/boost_1_87_0.tar.gz
62 |
63 | - name: Install protobuf (v3.17.3)
64 | run: |
65 | echo "Downloading Protobuf..."
66 | wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz
67 | echo "Extracting Protobuf..."
68 | tar xzvf /tmp/protobuf-all-3.17.3.tar.gz -C /tmp
69 | cd /tmp/protobuf-3.17.3
70 | echo "Configuring Protobuf (CMake)..."
71 | mkdir build_cmake
72 | cd build_cmake
73 | cmake ../cmake -DCMAKE_INSTALL_PREFIX=/usr/local -Dprotobuf_BUILD_TESTS=OFF
74 | echo "Building Protobuf..."
75 | make -j$(nproc)
76 | echo "Installing Protobuf..."
77 | sudo make install
78 | cd ${{ github.workspace }}
79 | rm -rf /tmp/protobuf-3.17.3 /tmp/protobuf-all-3.17.3.tar.gz
80 |
81 | # --- Catch2 Installation Step Removed ---
82 |
83 | # Initialize CodeQL AFTER dependencies are installed and BEFORE the project build
84 | - name: Initialize CodeQL
85 | uses: github/codeql-action/init@v3
86 | with:
87 | languages: ${{ matrix.language }}
88 | build-mode: ${{ matrix.build-mode }}
89 | # queries: security-extended,security-and-quality # Optional
90 |
91 | # === Build Project Steps ===
92 | - name: Build localproxy project
93 | shell: bash
94 | run: |
95 | echo "Creating build directory for localproxy..."
96 | mkdir build
97 | cd build
98 | echo "Configuring localproxy with CMake (tests disabled)..."
99 | # Ensure BUILD_TESTS=OFF is used
100 | cmake .. -DBUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release
101 | echo "Building localproxy with make..."
102 | make -j$(nproc)
103 |
104 | # Perform the CodeQL analysis AFTER the build is complete
105 | - name: Perform CodeQL Analysis
106 | uses: github/codeql-action/analyze@v3
107 | with:
108 | category: "/language:${{matrix.language}}"
109 |
--------------------------------------------------------------------------------
/.github/workflows/notification.yml:
--------------------------------------------------------------------------------
1 | name: GitHub Issue notifications
2 |
3 | on: [issue_comment,issues]
4 |
5 |
6 | permissions:
7 | contents: read
8 | issues: read
9 |
10 | env:
11 | ACTION_NAME: ${{ github.event.action }}
12 | EVENT_NAME: ${{ github.event_name }}
13 | ISSUE_NUMBER: ${{ github.event.issue.number }}
14 | ISSUE_TITLE: ${{ github.event.issue.title }}
15 |
16 | jobs:
17 | issue-notification:
18 | runs-on: ubuntu-latest
19 | steps:
20 | - name: Send notifications on Slack
21 | uses: slackapi/slack-github-action@v1.19.0
22 | with:
23 | payload: |
24 | {
25 | "action": "${{env.ACTION_NAME}}",
26 | "eventName": "${{env.EVENT_NAME}}",
27 | "issueNumber": "${{env.ISSUE_NUMBER}}"
28 | }
29 | env:
30 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
31 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release Image Builds
2 |
3 | # This workflow is to allow the building of Docker release images by copying the binary from a base image to a minimum size OS image.
4 |
5 | on:
6 | push:
7 | branches: ['main', 'docker-builds']
8 | pull_request:
9 | branches: ['main', 'docker-builds']
10 | types: [opened, closed]
11 |
12 | env:
13 | PACKAGE_NAME: aws-iot-securetunneling-localproxy
14 | ECR_ACCOUNT_URL: public.ecr.aws
15 | ECR_BASE_UBUNTU: aws-iot-securetunneling-localproxy/ubuntu-base
16 | ECR_BASE_UBI8: aws-iot-securetunneling-localproxy/ubi8-base
17 | ECR_BASE_AMAZONLINUX: aws-iot-securetunneling-localproxy/amazonlinux-base
18 | ECR_BASE_DEBIAN: aws-iot-securetunneling-localproxy/debian-base
19 | ECR_BASE_FEDORA: aws-iot-securetunneling-localproxy/fedora-base
20 | ECR_RELEASE_UBUNTU: aws-iot-securetunneling-localproxy/ubuntu-bin
21 | ECR_RELEASE_UBI8: aws-iot-securetunneling-localproxy/ubi8-bin
22 | ECR_RELEASE_AMAZONLINUX: aws-iot-securetunneling-localproxy/amazonlinux-bin
23 | ECR_RELEASE_DEBIAN: aws-iot-securetunneling-localproxy/debian-bin
24 | ECR_RELEASE_FEDORA: aws-iot-securetunneling-localproxy/fedora-bin
25 |
26 | jobs:
27 | build-docker-image-ubuntu-amd64:
28 | runs-on: ubuntu-latest
29 | permissions:
30 | id-token: write
31 | contents: read
32 | steps:
33 | - name: Configure AWS Credentials
34 | uses: aws-actions/configure-aws-credentials@v1
35 | with:
36 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
37 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
38 | aws-region: us-east-1
39 | - name: Login to ECR
40 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
41 | - name: Checkout
42 | uses: actions/checkout@v2
43 | with:
44 | fetch-depth: 0
45 | - name: Set up QEMU
46 | uses: docker/setup-qemu-action@v2
47 | - name: Set up Docker Buildx
48 | uses: docker/setup-buildx-action@v2
49 | - name: Build Ubuntu Release Image
50 | uses: docker/build-push-action@v4
51 | with:
52 | file: .github/docker-images/bin-images/debian-ubuntu/Dockerfile
53 | build-args: |
54 | OS=ubuntu:22.04
55 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:amd64-latest
56 | context: .
57 | push: true
58 | tags: |
59 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBUNTU }}:amd64-${{ github.sha }}
60 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBUNTU }}:amd64-latest
61 | platforms: linux/amd64
62 | build-docker-image-ubuntu-arm64:
63 | runs-on: ubuntu-latest
64 | permissions:
65 | id-token: write
66 | contents: read
67 | steps:
68 | - name: Configure AWS Credentials
69 | uses: aws-actions/configure-aws-credentials@v1
70 | with:
71 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
72 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
73 | aws-region: us-east-1
74 | - name: Login to ECR
75 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
76 | - name: Checkout
77 | uses: actions/checkout@v2
78 | with:
79 | fetch-depth: 0
80 | - name: Set up QEMU
81 | uses: docker/setup-qemu-action@v2
82 | - name: Set up Docker Buildx
83 | uses: docker/setup-buildx-action@v2
84 | - name: Build Ubuntu Release Image
85 | uses: docker/build-push-action@v4
86 | with:
87 | file: .github/docker-images/bin-images/debian-ubuntu/Dockerfile
88 | build-args: |
89 | OS=ubuntu:22.04
90 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:arm64-latest
91 | context: .
92 | push: true
93 | tags: |
94 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBUNTU }}:arm64-${{ github.sha }}
95 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBUNTU }}:arm64-latest
96 | platforms: linux/arm64
97 | build-docker-image-ubuntu-armv7:
98 | runs-on: ubuntu-latest
99 | permissions:
100 | id-token: write
101 | contents: read
102 | steps:
103 | - name: Configure AWS Credentials
104 | uses: aws-actions/configure-aws-credentials@v1
105 | with:
106 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
107 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
108 | aws-region: us-east-1
109 | - name: Login to ECR
110 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
111 | - name: Checkout
112 | uses: actions/checkout@v2
113 | with:
114 | fetch-depth: 0
115 | - name: Set up QEMU
116 | uses: docker/setup-qemu-action@v2
117 | - name: Set up Docker Buildx
118 | uses: docker/setup-buildx-action@v2
119 | - name: Build ubuntu Release Image
120 | uses: docker/build-push-action@v4
121 | with:
122 | file: .github/docker-images/bin-images/debian-ubuntu/Dockerfile
123 | build-args: |
124 | OS=ubuntu:18.04
125 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBUNTU }}:armv7-latest
126 | context: .
127 | push: true
128 | tags: |
129 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBUNTU }}:armv7-${{ github.sha }}
130 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBUNTU }}:armv7-latest
131 | platforms: linux/arm/v7
132 | build-docker-image-ubi8-amd64:
133 | runs-on: ubuntu-latest
134 | permissions:
135 | id-token: write
136 | contents: read
137 | steps:
138 | - name: Configure AWS Credentials
139 | uses: aws-actions/configure-aws-credentials@v1
140 | with:
141 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
142 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
143 | aws-region: us-east-1
144 | - name: Login to ECR
145 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
146 | - name: Checkout
147 | uses: actions/checkout@v2
148 | with:
149 | fetch-depth: 0
150 | - name: Set up QEMU
151 | uses: docker/setup-qemu-action@v2
152 | - name: Set up Docker Buildx
153 | uses: docker/setup-buildx-action@v2
154 | - name: Build ubi8 Release Image
155 | uses: docker/build-push-action@v4
156 | with:
157 | file: .github/docker-images/bin-images/ubi8/Dockerfile
158 | build-args: |
159 | OS=redhat/ubi8:latest
160 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBI8 }}:amd64-latest
161 | context: .
162 | push: true
163 | tags: |
164 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBI8 }}:amd64-${{ github.sha }}
165 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBI8 }}:amd64-latest
166 | platforms: linux/amd64
167 | build-docker-image-ubi8-arm64:
168 | runs-on: ubuntu-latest
169 | permissions:
170 | id-token: write
171 | contents: read
172 | steps:
173 | - name: Configure AWS Credentials
174 | uses: aws-actions/configure-aws-credentials@v1
175 | with:
176 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
177 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
178 | aws-region: us-east-1
179 | - name: Login to ECR
180 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
181 | - name: Checkout
182 | uses: actions/checkout@v2
183 | with:
184 | fetch-depth: 0
185 | - name: Set up QEMU
186 | uses: docker/setup-qemu-action@v2
187 | - name: Set up Docker Buildx
188 | uses: docker/setup-buildx-action@v2
189 | - name: Build ubi8 Release Image
190 | uses: docker/build-push-action@v4
191 | with:
192 | file: .github/docker-images/bin-images/ubi8/Dockerfile
193 | build-args: |
194 | OS=redhat/ubi8:latest
195 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_UBI8 }}:arm64-latest
196 | context: .
197 | push: true
198 | tags: |
199 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBI8 }}:arm64-${{ github.sha }}
200 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_UBI8 }}:arm64-latest
201 | platforms: linux/arm64
202 | build-docker-image-amazonlinux-amd64:
203 | runs-on: ubuntu-latest
204 | permissions:
205 | id-token: write
206 | contents: read
207 | steps:
208 | - name: Configure AWS Credentials
209 | uses: aws-actions/configure-aws-credentials@v1
210 | with:
211 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
212 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
213 | aws-region: us-east-1
214 | - name: Login to ECR
215 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
216 | - name: Checkout
217 | uses: actions/checkout@v2
218 | with:
219 | fetch-depth: 0
220 | - name: Set up QEMU
221 | uses: docker/setup-qemu-action@v2
222 | - name: Set up Docker Buildx
223 | uses: docker/setup-buildx-action@v2
224 | - name: Build amazonlinux Release Image
225 | uses: docker/build-push-action@v4
226 | with:
227 | file: .github/docker-images/bin-images/amazonlinux/Dockerfile
228 | build-args: |
229 | OS=amazonlinux:2023
230 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_AMAZONLINUX }}:amd64-latest
231 | context: .
232 | push: true
233 | tags: |
234 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_AMAZONLINUX }}:amd64-${{ github.sha }}
235 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_AMAZONLINUX }}:amd64-latest
236 | platforms: linux/amd64
237 | build-docker-image-amazonlinux-arm64:
238 | runs-on: ubuntu-latest
239 | permissions:
240 | id-token: write
241 | contents: read
242 | steps:
243 | - name: Configure AWS Credentials
244 | uses: aws-actions/configure-aws-credentials@v1
245 | with:
246 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
247 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
248 | aws-region: us-east-1
249 | - name: Login to ECR
250 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
251 | - name: Checkout
252 | uses: actions/checkout@v2
253 | with:
254 | fetch-depth: 0
255 | - name: Set up QEMU
256 | uses: docker/setup-qemu-action@v2
257 | - name: Set up Docker Buildx
258 | uses: docker/setup-buildx-action@v2
259 | - name: Build amazonlinux Release Image
260 | uses: docker/build-push-action@v4
261 | with:
262 | file: .github/docker-images/bin-images/amazonlinux/Dockerfile
263 | build-args: |
264 | OS=amazonlinux:2023
265 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_AMAZONLINUX }}:arm64-latest
266 | context: .
267 | push: true
268 | tags: |
269 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_AMAZONLINUX }}:arm64-${{ github.sha }}
270 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_AMAZONLINUX }}:arm64-latest
271 | platforms: linux/arm64
272 | build-docker-image-debian-amd64:
273 | runs-on: ubuntu-latest
274 | permissions:
275 | id-token: write
276 | contents: read
277 | steps:
278 | - name: Configure AWS Credentials
279 | uses: aws-actions/configure-aws-credentials@v1
280 | with:
281 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
282 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
283 | aws-region: us-east-1
284 | - name: Login to ECR
285 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
286 | - name: Checkout
287 | uses: actions/checkout@v2
288 | with:
289 | fetch-depth: 0
290 | - name: Set up QEMU
291 | uses: docker/setup-qemu-action@v2
292 | - name: Set up Docker Buildx
293 | uses: docker/setup-buildx-action@v2
294 | - name: Build debian Release Image
295 | uses: docker/build-push-action@v4
296 | with:
297 | file: .github/docker-images/bin-images/debian-ubuntu/Dockerfile
298 | build-args: |
299 | OS=debian:latest
300 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_DEBIAN }}:amd64-latest
301 | context: .
302 | push: true
303 | tags: |
304 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_DEBIAN }}:amd64-${{ github.sha }}
305 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_DEBIAN }}:amd64-latest
306 | platforms: linux/amd64
307 | build-docker-image-debian-arm64:
308 | runs-on: ubuntu-latest
309 | permissions:
310 | id-token: write
311 | contents: read
312 | steps:
313 | - name: Configure AWS Credentials
314 | uses: aws-actions/configure-aws-credentials@v1
315 | with:
316 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
317 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
318 | aws-region: us-east-1
319 | - name: Login to ECR
320 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
321 | - name: Checkout
322 | uses: actions/checkout@v2
323 | with:
324 | fetch-depth: 0
325 | - name: Set up QEMU
326 | uses: docker/setup-qemu-action@v2
327 | - name: Set up Docker Buildx
328 | uses: docker/setup-buildx-action@v2
329 | - name: Build debian Release Image
330 | uses: docker/build-push-action@v4
331 | with:
332 | file: .github/docker-images/bin-images/debian-ubuntu/Dockerfile
333 | build-args: |
334 | OS=debian:latest
335 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_DEBIAN }}:arm64-latest
336 | context: .
337 | push: true
338 | tags: |
339 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_DEBIAN }}:arm64-${{ github.sha }}
340 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_DEBIAN }}:arm64-latest
341 | platforms: linux/arm64
342 | build-docker-image-fedora-amd64:
343 | runs-on: ubuntu-latest
344 | permissions:
345 | id-token: write
346 | contents: read
347 | steps:
348 | - name: Configure AWS Credentials
349 | uses: aws-actions/configure-aws-credentials@v1
350 | with:
351 | aws-access-key-id: ${{ secrets.ECR_USER_AWS_KEY_ID }}
352 | aws-secret-access-key: ${{ secrets.ECR_USER_AWS_KEY_SECRET }}
353 | aws-region: us-east-1
354 | - name: Login to ECR
355 | run: aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ env.ECR_ACCOUNT_URL }}
356 | - name: Checkout
357 | uses: actions/checkout@v2
358 | with:
359 | fetch-depth: 0
360 | - name: Set up QEMU
361 | uses: docker/setup-qemu-action@v2
362 | - name: Set up Docker Buildx
363 | uses: docker/setup-buildx-action@v2
364 | - name: Build fedora Release Image
365 | uses: docker/build-push-action@v4
366 | with:
367 | file: .github/docker-images/bin-images/fedora/Dockerfile
368 | build-args: |
369 | OS=fedora:latest
370 | BASE_IMAGE=${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_BASE_FEDORA }}:amd64-latest
371 | context: .
372 | push: true
373 | tags: |
374 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_FEDORA }}:amd64-${{ github.sha }}
375 | ${{ env.ECR_ACCOUNT_URL }}/${{ env.ECR_RELEASE_FEDORA }}:amd64-latest
376 | platforms: linux/amd64
377 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | *build*/
3 | .DS_Store
4 | .vscode/
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | jobs:
2 | include:
3 | - os: linux
4 | dist: xenial
5 | - os: linux
6 | dist: bionic
7 | language: cpp
8 | cache: ccache
9 | compiler:
10 | - gcc
11 | addons:
12 | apt:
13 | sources:
14 | - ubuntu-toolchain-r-test
15 | packages:
16 | - gcc
17 | - cmake
18 | - zlibc
19 | - libssl-dev
20 | install:
21 | - wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz
22 | - tar xzf /tmp/protobuf-all-3.17.3.tar.gz
23 | - cd "protobuf-3.17.3"
24 | - mkdir -p build
25 | - cd build
26 | - cmake ../cmake
27 | - make
28 | - sudo make install
29 | - cd $TRAVIS_BUILD_DIR
30 | - git clone --branch v3.7.0 https://github.com/catchorg/Catch2.git
31 | - cd Catch2
32 | - mkdir -p build
33 | - cd build
34 | - cmake ../
35 | - make
36 | - sudo make install
37 | - wget https://boostorg.jfrog.io/artifactory/main/release/1.76.0/source/boost_1_76_0.tar.gz -O /tmp/boost.tar.gz
38 | - tar xzf /tmp/boost.tar.gz
39 | - cd boost_1_76_0
40 | - ./bootstrap.sh
41 | - sudo ./b2 install
42 | - cd $TRAVIS_BUILD_DIR
43 | script:
44 | - mkdir build
45 | - cd build
46 | - cmake ../
47 | - make
48 |
49 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## 2019-12-07
2 | ### Changed
3 | - CMakeLists.txt to always link libatomic, not only whe cross-compiling
4 |
5 | ## 2019-12-07
6 | ### Added
7 | - Predictive region endpoint for cn-north-1 and cn-northwest-1
8 | - Allow settings-json file to create region endpoint overrides as needed
9 | - Destination mode test and more test functionality
10 |
11 | ### Changed
12 | - Log statement on socket bind failure to include the port
13 |
14 | ### Fixed
15 | - Behavior for -b/--bind-address CLI argument in destination mode to use random port
16 |
17 | ### Removed
18 | - Most non-helpful comments
19 |
20 | ## 2019-11-27
21 | ### Fixed
22 | - README.md install commands for ProtocolBuffers
23 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.2 FATAL_ERROR)
2 |
3 | set(AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME localproxy)
4 | set(AWS_TUNNEL_LOCAL_PROXY_LIB_NAME lproxy)
5 | project(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} CXX)
6 |
7 | option(BUILD_TESTS "Build tests" OFF)
8 | option(LINK_STATIC_OPENSSL "Use static openssl libs" ON)
9 | option(GIT_VERSION "Updates the version number using the Git commit history" ON)
10 | if(BUILD_TESTS)
11 | set(AWS_TUNNEL_LOCAL_PROXY_TEST_NAME localproxytest)
12 | project(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} CXX)
13 | endif(BUILD_TESTS)
14 |
15 | #########################################
16 | # Generate Version Information from Git #
17 | #########################################
18 | find_package(Git)
19 | include(CMakeLists.txt.versioning)
20 | # Now we inject the version information into a header that is accessible from the local proxy executable
21 | configure_file("src/Version.h.in" "${PROJECT_BINARY_DIR}/Version.h")
22 |
23 | ######################################
24 | # Section : Disable in-source builds #
25 | ######################################
26 |
27 | if (${PROJECT_SOURCE_DIR} STREQUAL ${PROJECT_BINARY_DIR})
28 | message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt and CMakeFiles folder.")
29 | endif ()
30 |
31 | ########################################
32 | # Section : Common Build setttings #
33 | ########################################
34 | # Set required compiler standard to standard c++11. Disable extensions.
35 | set(CMAKE_CXX_STANDARD 14) # C++14
36 | set(CMAKE_CXX_STANDARD_REQUIRED ON) #...is required...
37 | set(CMAKE_CXX_EXTENSIONS OFF) #...without compiler extensions like gnu++11
38 |
39 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
40 |
41 | # Configure Compiler flags
42 | if (UNIX OR APPLE)
43 | set(CUSTOM_COMPILER_FLAGS "-O2 -D_FORTIFY_SOURCE=2 -fPIE -fstack-protector-strong -Wall -Werror")
44 | set(TEST_COMPILER_FLAGS "${CUSTOM_COMPILER_FLAGS} -D_AWSIOT_TUNNELING_NO_SSL")
45 | elseif (WIN32 OR MSVC)
46 | set(CUSTOM_COMPILER_FLAGS "/W4 /DYNAMICBASE /NXCOMPAT /analyze")
47 | set(TEST_COMPILER_FLAGS "${CUSTOM_COMPILER_FLAGS} /D_AWSIOT_TUNNELING_NO_SSL")
48 | add_definitions(-D_WIN32_WINNT=${WIN32_WINNT})
49 | endif ()
50 |
51 |
52 | #########################################
53 | # Threading library needed for GCC #
54 | #########################################
55 | find_package(Threads)
56 |
57 | #########################################
58 | # Generate protobuf files #
59 | #########################################
60 | set_property(GLOBAL PROPERTY PROTOBUF_USE_STATIC_LIBS ON) #this flag doesn't actually seem to work yet but it's documented
61 | set(PROTOBUF_PKG_VERSION "3.17.3" CACHE STRING "")
62 | find_package(Protobuf ${PROTOBUF_PKG_VERSION} REQUIRED)
63 | string(REPLACE ${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_STATIC_LIBRARY_SUFFIX} Protobuf_LITE_STATIC_LIBRARY ${Protobuf_LITE_LIBRARY})
64 | include_directories(${Protobuf_INCLUDE_DIRS})
65 |
66 | include_directories(${CMAKE_CURRENT_BINARY_DIR}) #needed to include generated protobuf headers
67 | protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS ${PROJECT_SOURCE_DIR}/resources/Message.proto)
68 |
69 | #########################################
70 | # OpenSSL dependency #
71 | #########################################
72 | set(OPENSSL_USE_STATIC_LIBS TRUE)
73 | find_package(OpenSSL REQUIRED)
74 |
75 | if(LINK_STATIC_OPENSSL)
76 | include_directories(${OPENSSL_INCLUDE_DIR})
77 | string(REPLACE ${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_STATIC_LIBRARY_SUFFIX} OpenSSL_STATIC_SSL_LIBRARY ${OPENSSL_SSL_LIBRARY})
78 | string(REPLACE ${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_STATIC_LIBRARY_SUFFIX} OpenSSL_STATIC_CRYPTO_LIBRARY ${OPENSSL_CRYPTO_LIBRARY})
79 | endif()
80 |
81 | #########################################
82 | # Test framework dependency #
83 | #########################################
84 | #probably comment out for cross compiler as Catch2 is really just a header library
85 | if(BUILD_TESTS)
86 | find_package(Catch2 3 REQUIRED)
87 | endif(BUILD_TESTS)
88 |
89 | #########################################
90 | # Boost dependencies #
91 | #########################################
92 | set(Boost_USE_STATIC_LIBS ON)
93 | set(Boost_USE_DEBUG_RUNTIME OFF)
94 | #set_property(GLOBAL PROPERTY Boost_USE_MULTITHREADED ON)
95 | set(BOOST_PKG_VERSION "1.87.0" CACHE STRING "")
96 | find_package(Boost ${BOOST_PKG_VERSION} REQUIRED COMPONENTS system log log_setup thread program_options date_time filesystem chrono)
97 | include_directories(${Boost_INCLUDE_DIRS})
98 | foreach(BOOST_LIB ${Boost_LIBRARIES})
99 | string(REPLACE ${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_STATIC_LIBRARY_SUFFIX} BOOST_STATIC_LIB ${BOOST_LIB})
100 | list(APPEND Boost_STATIC_LIBRARIES ${BOOST_STATIC_LIB})
101 | endforeach()
102 |
103 | #########################################
104 | # Target : Build aws-tunnel-local-proxy #
105 | #########################################
106 | file(GLOB ALL_SOURCES ${PROJECT_SOURCE_DIR}/src/*.cpp)
107 |
108 | set(UTIL_SOURCE ${PROJECT_SOURCE_DIR}/src/config/ConfigFile.cpp ${PROJECT_SOURCE_DIR}/src/Url.cpp)
109 | set(CORE_SOURCES ${PROJECT_SOURCE_DIR}/src/TcpAdapterProxy.cpp ${PROJECT_SOURCE_DIR}/src/ProxySettings.cpp ${PROJECT_SOURCE_DIR}/src/WebProxyAdapter.cpp ${PROTO_HDRS} ${PROTO_SRCS} ${PROJECT_SOURCE_DIR}/src/WebSocketStream.cpp)
110 | set(MAIN_SOURCES ${PROJECT_SOURCE_DIR}/src/main.cpp ${CORE_SOURCES} ${UTIL_SOURCE})
111 | add_executable(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${MAIN_SOURCES})
112 |
113 | if(BUILD_TESTS)
114 | file(GLOB TEST_CODE ${PROJECT_SOURCE_DIR}/test/*.cpp)
115 | set(TEST_SOURCES ${TEST_CODE} ${CORE_SOURCES} ${UTIL_SOURCE})
116 | add_executable(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${TEST_SOURCES})
117 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} Catch2::Catch2WithMain)
118 | endif(BUILD_TESTS)
119 |
120 | #libatomic ensured for all platforms except OSX and WINDOWS
121 | if(NOT APPLE AND NOT MSVC AND NOT BUILD_TESTS)
122 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} atomic)
123 | elseif(NOT APPLE AND NOT MSVC AND BUILD_TESTS)
124 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} atomic)
125 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} atomic)
126 | endif()
127 |
128 | include_directories(${PROJECT_SOURCE_DIR}/src)
129 |
130 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${CMAKE_THREAD_LIBS_INIT})
131 | if(LINK_STATIC_OPENSSL)
132 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${OpenSSL_STATIC_SSL_LIBRARY})
133 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${OpenSSL_STATIC_CRYPTO_LIBRARY})
134 | else()
135 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} OpenSSL::SSL)
136 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} OpenSSL::Crypto)
137 | endif()
138 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${OpenSSL_STATIC_SSL_LIBRARY})
139 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${OpenSSL_STATIC_CRYPTO_LIBRARY})
140 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${Boost_STATIC_LIBRARIES})
141 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${Protobuf_LITE_STATIC_LIBRARY})
142 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${CMAKE_DL_LIBS})
143 | set_property(TARGET ${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} APPEND_STRING PROPERTY COMPILE_FLAGS ${CUSTOM_COMPILER_FLAGS})
144 |
145 | if(BUILD_TESTS)
146 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${CMAKE_THREAD_LIBS_INIT})
147 | if(LINK_STATIC_OPENSSL)
148 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${OpenSSL_STATIC_SSL_LIBRARY})
149 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${OpenSSL_STATIC_CRYPTO_LIBRARY})
150 | else()
151 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} OpenSSL::SSL)
152 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} OpenSSL::Crypto)
153 | endif()
154 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${OpenSSL_STATIC_SSL_LIBRARY})
155 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${OpenSSL_STATIC_CRYPTO_LIBRARY})
156 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${Boost_STATIC_LIBRARIES})
157 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${Protobuf_LITE_STATIC_LIBRARY})
158 | target_link_libraries(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${CMAKE_DL_LIBS})
159 | set_property(TARGET ${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} APPEND_STRING PROPERTY COMPILE_FLAGS ${TEST_COMPILER_FLAGS})
160 | endif(BUILD_TESTS)
161 |
162 | install(TARGETS ${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} DESTINATION "bin")
163 |
--------------------------------------------------------------------------------
/CMakeLists.txt.versioning:
--------------------------------------------------------------------------------
1 | # This CMake module is used to generate semantic version information for the AWS IoT Secure Tunneling Local Proxy
2 | # and inject it into our source code, making the version information available to the compiled binary
3 | # so that it can be written to the logs for debugging purposes. To increment the major/minor versions
4 | # of the Secure Tunneling Local Proxy, this module expects to find a git tag in the form of "v1.0", where the first number
5 | # is the major version and the second number is the minor version. This module will search starting at HEAD
6 | # until it finds the latest versioned tag - git tags that do not start with "v" will be ignored.
7 | #
8 | # Additionally, the PATCH version of the version number is automatically incremented based on the number of commits
9 | # that we see between the current revision and the latest Git tag. For more information on Semantic Versioning,
10 | # check out https://semver.org/ and for more information on Git tags, check out https://git-scm.com/book/en/v2/Git-Basics-Tagging
11 |
12 | cmake_minimum_required(VERSION 3.10)
13 |
14 | # Marking Secure Tunneling Local Proxy directory safe
15 | execute_process(COMMAND git config --global --add safe.directory ${CMAKE_CURRENT_SOURCE_DIR})
16 |
17 | # Check to make sure we have Git info for this package
18 | execute_process(COMMAND git log --pretty=format:'%h' -n 1
19 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
20 | OUTPUT_VARIABLE GIT_INFO)
21 |
22 | function (load_version_from_file)
23 | # Git is not available (this is the case if the source is packaged as an archive), get version from file
24 | file(STRINGS ${CMAKE_SOURCE_DIR}/.version ${PROJECT_NAME}_VERSION_LIST)
25 | string(REPLACE "*" ";" ${PROJECT_NAME}_VERSION_LIST ${${PROJECT_NAME}_VERSION_LIST})
26 | # Set partial versions
27 | list(GET ${PROJECT_NAME}_VERSION_LIST 0 ${PROJECT_NAME}_VERSION_STRING_FULL)
28 | list(GET ${PROJECT_NAME}_VERSION_LIST 1 ${PROJECT_NAME}_VERSION_STRING)
29 | list(GET ${PROJECT_NAME}_VERSION_LIST 2 ${PROJECT_NAME}_VERSION_MAJOR)
30 | list(GET ${PROJECT_NAME}_VERSION_LIST 3 ${PROJECT_NAME}_VERSION_MINOR)
31 | list(GET ${PROJECT_NAME}_VERSION_LIST 4 ${PROJECT_NAME}_VERSION_PATCH)
32 | list(GET ${PROJECT_NAME}_VERSION_LIST 5 ${PROJECT_NAME}_VERSION_AHEAD)
33 | list(GET ${PROJECT_NAME}_VERSION_LIST 6 ${PROJECT_NAME}_VERSION_GIT_SHA)
34 | unset(${PROJECT_NAME}_VERSION_LIST)
35 |
36 | message("-- Failed to infer patch version from git, loaded AWS IoT Secure Tunneling Local Proxy version from file: ${${PROJECT_NAME}_VERSION_STRING_FULL}")
37 | endfunction()
38 |
39 | if (GIT_VERSION AND NOT ${GIT_INFO} STREQUAL "")
40 | message("-- Using Git to calculate AWS IoT Secure Tunneling Local Proxy version information...")
41 |
42 | # Get last tag from git - this only matches tags starting with v, so we ignore non-versioning tags
43 | execute_process(COMMAND ${GIT_EXECUTABLE} describe --abbrev=0 --tags --match "v[0-9]*"
44 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
45 | OUTPUT_VARIABLE ${PROJECT_NAME}_VERSION_STRING
46 | OUTPUT_STRIP_TRAILING_WHITESPACE
47 | RESULT_VARIABLE exit)
48 |
49 | if (NOT exit EQUAL 0)
50 | load_version_from_file()
51 | return()
52 | endif()
53 |
54 | # Determine how many commits since last tag
55 | execute_process(COMMAND ${GIT_EXECUTABLE} rev-list ${${PROJECT_NAME}_VERSION_STRING}..HEAD --count
56 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
57 | OUTPUT_VARIABLE ${PROJECT_NAME}_VERSION_AHEAD
58 | OUTPUT_STRIP_TRAILING_WHITESPACE
59 | RESULT_VARIABLE exit)
60 |
61 | if (NOT exit EQUAL 0)
62 | load_version_from_file()
63 | return()
64 | endif()
65 |
66 | # Get current commit SHA from git
67 | execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD
68 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
69 | OUTPUT_VARIABLE ${PROJECT_NAME}_VERSION_GIT_SHA
70 | OUTPUT_STRIP_TRAILING_WHITESPACE
71 | RESULT_VARIABLE exit)
72 |
73 | if (NOT exit EQUAL 0)
74 | load_version_from_file()
75 | return()
76 | endif()
77 |
78 | # Collect the partial versions into a list
79 | string(REGEX MATCHALL "[0-9]+" ${PROJECT_NAME}_PARTIAL_VERSION_LIST
80 | ${${PROJECT_NAME}_VERSION_STRING})
81 |
82 | # Set the version numbers
83 | list(GET ${PROJECT_NAME}_PARTIAL_VERSION_LIST
84 | 0 ${PROJECT_NAME}_VERSION_MAJOR)
85 | list(GET ${PROJECT_NAME}_PARTIAL_VERSION_LIST
86 | 1 ${PROJECT_NAME}_VERSION_MINOR)
87 | set(${PROJECT_NAME}_VERSION_PATCH ${${PROJECT_NAME}_VERSION_AHEAD})
88 |
89 | # Unset the list
90 | unset(${PROJECT_NAME}_PARTIAL_VERSION_LIST)
91 |
92 | # Set full project version string
93 | set(${PROJECT_NAME}_VERSION_STRING_FULL
94 | v${${PROJECT_NAME}_VERSION_MAJOR}.${${PROJECT_NAME}_VERSION_MINOR}.${${PROJECT_NAME}_VERSION_PATCH}-${${PROJECT_NAME}_VERSION_GIT_SHA})
95 |
96 |
97 | message("-- Generated AWS IoT Secure Tunneling Local Proxy version: ${${PROJECT_NAME}_VERSION_STRING_FULL}")
98 | # Save version to file (which will be used when Git is not available
99 | # or VERSION_UPDATE_FROM_GIT is disabled)
100 | file(WRITE ${CMAKE_SOURCE_DIR}/.version ${${PROJECT_NAME}_VERSION_STRING_FULL}
101 | "*" ${${PROJECT_NAME}_VERSION_STRING}
102 | "*" ${${PROJECT_NAME}_VERSION_MAJOR}
103 | "*" ${${PROJECT_NAME}_VERSION_MINOR}
104 | "*" ${${PROJECT_NAME}_VERSION_PATCH}
105 | "*" ${${PROJECT_NAME}_VERSION_AHEAD}
106 | "*" ${${PROJECT_NAME}_VERSION_GIT_SHA})
107 |
108 | # exit from cmake processing
109 | return()
110 | endif()
111 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Code of Conduct
2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
4 | opensource-codeofconduct@amazon.com with any additional questions or comments.
5 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
4 | documentation, we greatly value feedback and contributions from our community.
5 |
6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
7 | information to effectively respond to your bug report or contribution.
8 |
9 |
10 | ## Reporting Bugs/Feature Requests
11 |
12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features.
13 |
14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
16 |
17 | * A reproducible test case or series of steps
18 | * The version of our code being used
19 | * Any modifications you've made relevant to the bug
20 | * Anything unusual about your environment or deployment
21 |
22 |
23 | ## Contributing via Pull Requests
24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
25 |
26 | 1. You are working against the latest source on the *main* branch.
27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
29 |
30 | To send us a pull request, please:
31 |
32 | 1. Fork the repository.
33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
34 | 3. Ensure local tests pass.
35 | 4. Commit to your fork using clear commit messages.
36 | 5. Send us a pull request, answering any default questions in the pull request interface.
37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
38 |
39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
41 |
42 |
43 | ## Finding contributions to work on
44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
45 |
46 |
47 | ## Code of Conduct
48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
50 | opensource-codeofconduct@amazon.com with any additional questions or comments.
51 |
52 |
53 | ## Security issue notifications
54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
55 |
56 |
57 | ## Licensing
58 |
59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
60 |
61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
62 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # FROM amazonlinux:2
2 | FROM amazonlinux:latest as builder
3 | ARG OPENSSL_CONFIG
4 |
5 | # Install Prerequisites
6 |
7 | RUN yum check-update; yum upgrade -y && \
8 | yum install -y git boost-devel autoconf automake \
9 | wget libtool curl make gcc-c++ unzip cmake3 openssl11-devel \
10 | python-devel which
11 |
12 | # Install Dependencies
13 |
14 | RUN mkdir /home/dependencies
15 | WORKDIR /home/dependencies
16 |
17 | RUN wget https://github.com/madler/zlib/archive/v1.2.13.tar.gz -O /tmp/zlib-1.2.13.tar.gz && \
18 | tar xzvf /tmp/zlib-1.2.13.tar.gz && \
19 | cd zlib-1.2.13 && \
20 | ./configure && \
21 | make && \
22 | make install && \
23 | cd /home/dependencies
24 |
25 | RUN wget https://boostorg.jfrog.io/artifactory/main/release/1.81.0/source/boost_1_81_0.tar.gz -O /tmp/boost.tar.gz && \
26 | tar xzvf /tmp/boost.tar.gz && \
27 | cd boost_1_81_0 && \
28 | ./bootstrap.sh && \
29 | ./b2 install link=static && \
30 | cd /home/dependencies
31 |
32 | RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz -O /tmp/protobuf-all-3.17.3.tar.gz && \
33 | tar xzvf /tmp/protobuf-all-3.17.3.tar.gz && \
34 | cd protobuf-3.17.3 && \
35 | mkdir build && \
36 | cd build && \
37 | cmake3 ../cmake && \
38 | make && \
39 | make install && \
40 | cd /home/dependencies
41 |
42 | RUN git clone https://github.com/openssl/openssl.git && \
43 | cd openssl && \
44 | git checkout OpenSSL_1_1_1-stable && \
45 | ./Configure $OPENSSL_CONFIG && \
46 | make depend && \
47 | make all && \
48 | cd /home/dependencies
49 |
50 | RUN git clone --branch v3.7.0 https://github.com/catchorg/Catch2.git && \
51 | cd Catch2 && \
52 | mkdir build && \
53 | cd build && \
54 | cmake3 ../ && \
55 | make && \
56 | make install && \
57 | cd /home/dependencies
58 |
59 | RUN git clone https://github.com/aws-samples/aws-iot-securetunneling-localproxy && \
60 | cd aws-iot-securetunneling-localproxy && \
61 | mkdir build && \
62 | cd build && \
63 | cmake3 ../ && \
64 | make
65 |
66 | # If you'd like to use this Dockerfile to build your LOCAL revisions to the
67 | # local proxy source code, uncomment the following three commands and comment
68 | # out the command above. Otherwise, we'll build the local proxy container
69 | # with fresh source from the GitHub repo.
70 |
71 | #RUN mkdir /home/dependencies/aws-iot-securetunneling-localproxy
72 | #
73 | #COPY ./ /home/dependencies/aws-iot-securetunneling-localproxy/
74 | #
75 | #RUN cd /home/dependencies/aws-iot-securetunneling-localproxy && \
76 | # rm -rf build/ && \
77 | # mkdir build && \
78 | # cd build && \
79 | # cmake3 ../ && \
80 | # make
81 |
82 | RUN mkdir -p /home/aws-iot-securetunneling-localproxy && \
83 | cd /home/aws-iot-securetunneling-localproxy && \
84 | cp /home/dependencies/aws-iot-securetunneling-localproxy/build/bin/* /home/aws-iot-securetunneling-localproxy/
85 |
86 | RUN rm -rf /home/dependencies
87 |
88 | WORKDIR /home/aws-iot-securetunneling-localproxy/
89 |
90 | ## Actual docker image
91 |
92 | FROM amazonlinux:2
93 |
94 | # Install openssl for libssl dependency.
95 |
96 | RUN yum check-update; yum upgrade -y && \
97 | yum install -y openssl11 wget libatomic && \
98 | rm -rf /var/cache/yum && \
99 | yum clean all
100 |
101 | RUN mkdir -p /home/aws-iot-securetunneling-localproxy/certs && \
102 | cd /home/aws-iot-securetunneling-localproxy/certs && \
103 | wget https://www.amazontrust.com/repository/AmazonRootCA1.pem && \
104 | openssl11 rehash ./
105 |
106 | # # Copy the binaries from builder stage.
107 |
108 | COPY --from=builder /home/aws-iot-securetunneling-localproxy /home/aws-iot-securetunneling-localproxy
109 |
110 | WORKDIR /home/aws-iot-securetunneling-localproxy
111 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | AWS IoT Secured Tunneling local proxy Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 |
--------------------------------------------------------------------------------
/THIRD_PARTY_LICENSES:
--------------------------------------------------------------------------------
1 | ** Boost; version 1.68.0 -- http://www.boost.org/
2 | Boost Software License - Version 1.0 - August 17th, 2003
3 |
4 | Permission is hereby granted, free of charge, to any person or organization
5 | obtaining a copy of the software and accompanying documentation covered by
6 | this license (the "Software") to use, reproduce, display, distribute,
7 | execute, and transmit the Software, and to prepare derivative works of the
8 | Software, and to permit third-parties to whom the Software is furnished to
9 | do so, all subject to the following:
10 |
11 | The copyright notices in the Software and this entire statement, including
12 | the above license grant, this restriction and the following disclaimer,
13 | must be included in all copies of the Software, in whole or in part, and
14 | all derivative works of the Software, unless such copies or derivative
15 | works are solely in the form of machine-executable object code generated by
16 | a source language processor.
17 |
18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
21 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
22 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
23 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 | DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/docker-build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo This script is deprecated, please refer to the Docker Images section of the README for the recommended method of pulling and running images from ECR.
4 |
5 | architecture=$(uname -m)
6 |
7 | if [ "${architecture}" != aarch64 -a "${architecture}" != arm64 ]; then
8 | openssl_config=linux-generic64
9 | else
10 | openssl_config=linux-aarch64
11 | fi
12 |
13 | echo Architecture: $architecture
14 | echo OpenSSL configurations: $openssl_config
15 | docker build --build-arg OPENSSL_CONFIG=$openssl_config -t aws-iot-securetunneling-localproxy:latest .
16 |
--------------------------------------------------------------------------------
/docker-run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo This script is deprecated, please refer to the Docker Images section of the README for the recommended method of pulling and running images from ECR.
4 |
5 | while getopts p: flag
6 | do
7 | case "${flag}" in
8 | p) port=${OPTARG};;
9 | esac
10 | done
11 |
12 | if [ -z $port ]; then
13 | docker run --name localproxy --rm -it aws-iot-securetunneling-localproxy:latest bash;
14 | else
15 | echo Running the container with exposed port: $port
16 | docker run --name localproxy --expose=$port -p $port:$port --rm -it aws-iot-securetunneling-localproxy:latest bash;
17 | fi
18 |
19 |
--------------------------------------------------------------------------------
/example/crosscompile/raspberry_pi_3_b_plus.cmake.tc:
--------------------------------------------------------------------------------
1 | set(CMAKE_SYSTEM_NAME Linux)
2 | set(CMAKE_SYSTEM_PROCESSOR arm)
3 |
4 | set(CMAKE_SYSROOT /home/fedora/cross_builds/sysroots/arm-unknown-linux-gnueabihf)
5 |
6 | set(tools /home/fedora/x-tools/arm-unknown-linux-gnueabihf)
7 | set(CMAKE_C_COMPILER ${tools}/bin/arm-unknown-linux-gnueabihf-gcc)
8 | set(CMAKE_CXX_COMPILER ${tools}/bin/arm-unknown-linux-gnueabihf-g++)
9 |
10 | set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
11 | set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
12 | set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
13 | set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
14 |
15 |
--------------------------------------------------------------------------------
/resources/Message.proto:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | syntax = "proto3";
4 |
5 | package com.amazonaws.iot.securedtunneling;
6 |
7 | option java_outer_classname = "Protobuf";
8 | option optimize_for = LITE_RUNTIME;
9 |
10 | message Message {
11 | Type type = 1;
12 | int32 streamId = 2;
13 | bool ignorable = 3;
14 | bytes payload = 4;
15 | string serviceId = 5;
16 | repeated string availableServiceIds = 6;
17 | uint32 connectionId = 7;
18 |
19 | enum Type {
20 | UNKNOWN = 0;
21 | DATA = 1;
22 | STREAM_START = 2;
23 | STREAM_RESET = 3;
24 | SESSION_RESET = 4;
25 | SERVICE_IDS = 5;
26 | CONNECTION_START = 6;
27 | CONNECTION_RESET = 7;
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/src/LocalproxyConfig.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | #pragma once
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include "ProxySettings.h"
25 | #include "TcpConnection.h"
26 | #include "TcpServer.h"
27 | #include "TcpClient.h"
28 | #include "Message.pb.h"
29 | #include "Url.h"
30 |
31 | namespace aws {
32 | namespace iot {
33 | namespace securedtunneling {
34 | enum proxy_mode {
35 | UNKNOWN = 0,
36 | SOURCE = 1,
37 | DESTINATION = 2
38 | };
39 |
40 | /**
41 | * This struct is for the global localproxy configurations, most of these properties are provided directly
42 | * by the user, directly deduced from user input or fallback to a default value.
43 | */
44 | struct LocalproxyConfig
45 | {
46 | /**
47 | * Proxy server endpoint URL, either provided as input or deduced from the region
48 | */
49 | std::string proxy_host { };
50 | /**
51 | * Proxy server endpoint port, default to 443 unless the provided endpoint is not https, then it's 80.
52 | */
53 | std::uint16_t proxy_port{ 0 };
54 | /**
55 | * The web proxy endpoint URL. This will be set only if a web proxy is necessary.
56 | */
57 | std::string web_proxy_host { };
58 | /**
59 | * The web proxy endpoint port. This will be set only if a web proxy is necessary. defaults to 3128.
60 | */
61 | std::uint16_t web_proxy_port { 0 };
62 | /**
63 | * The web proxy authN. This will be set only if an web proxy is necessary and it requires authN.
64 | */
65 | std::string web_proxy_auth { };
66 | /**
67 | * This flag indicates whether the connection to the web proxy will be use TLS or not.
68 | */
69 | bool is_web_proxy_using_tls { };
70 | /**
71 | * The tunnel access token which the user gets when they open the tunnel.
72 | */
73 | std::string access_token { };
74 | proxy_mode mode{ proxy_mode::UNKNOWN };
75 | /**
76 | * A unique client-token to ensure only the agent which generated the token may connect to a tunnel
77 | */
78 | std::string client_token;
79 | /**
80 | * local address to bind to for listening in source mode or a local socket address for destination mode,
81 | * defaults localhost.
82 | */
83 | boost::optional bind_address;
84 | /**
85 | * Adds the directory containing certificate authority files to be used for performing verification
86 | */
87 | boost::optional additional_ssl_verify_path;
88 | /**
89 | * Turn off SSL host verification
90 | */
91 | bool no_ssl_host_verify {false};
92 | std::function on_listen_port_assigned;
93 | /**
94 | * the configuration directory where service identifier mappings are stored. If not specified,
95 | * will read mappings from default directory ./config (same directory where local proxy binary is running)
96 | */
97 | std::vector config_files;
98 | /**
99 | * Store mapping serviceId -> address:port
100 | * The end point will store either source listening or destination service depends on the mode of local proxy.
101 | */
102 | std::unordered_map serviceId_to_endpoint_map;
103 |
104 | /**
105 | * A flag to judge if v2 local proxy needs to fallback to communicate using v1 local proxy message format.
106 | * v1 local proxy format fallback will be enabled when a tunnel is opened with no or 1 service id.
107 | * If this is set to true, it means that v2 local proxy won't validate service id field.
108 | */
109 | bool is_v1_message_format {false};
110 | /**
111 | * A flag to judge if v3 local proxy needs to fallback to communicate using v2 local proxy message format.
112 | */
113 | bool is_v2_message_format {false};
114 | };
115 | }
116 | }
117 | }
118 |
--------------------------------------------------------------------------------
/src/ProxySettings.cpp:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #include
4 | #include
5 | #include
6 | #include "ProxySettings.h"
7 |
8 | namespace aws { namespace iot { namespace securedtunneling { namespace settings {
9 | using boost::property_tree::ptree;
10 |
11 | char const * const KEY_DEFAULT_BIND_ADDRESS = "tunneling.proxy.default_bind_address";
12 | std::string DEFAULT_DEFAULT_BIND_ADDRESS = "localhost";
13 |
14 | char const * const KEY_PROXY_ENDPOINT_HOST_FORMAT = "tunneling.proxy.endpoint_format";
15 | std::string DEFAULT_PROXY_ENDPOINT_HOST_FORMAT = "data.tunneling.iot.%s.amazonaws.com";
16 |
17 | char const * const KEY_PROXY_ENDPOINT_REGION_MAP = "tunneling.proxy.region_endpoint_overrides";
18 |
19 | char const * const KEY_DATA_LENGTH_SIZE = "tunneling.proxy.message.data_length_size";
20 | std::size_t const DEFAULT_DATA_LENGTH_SIZE = 2;
21 |
22 | char const * const KEY_MAX_DATA_FRAME_SIZE = "tunneling.proxy.message.data_frame_max_size";
23 | std::size_t const DEFAULT_MAX_DATA_FRAME_SIZE = DEFAULT_MESSAGE_MAX_SIZE + DEFAULT_DATA_LENGTH_SIZE;
24 |
25 | char const * const KEY_TCP_CONNECTION_RETRY_COUNT = "tunneling.proxy.tcp.connection_retry_count";
26 | std::int32_t const DEFAULT_TCP_CONNECTION_RETRY_COUNT = 5;
27 |
28 | char const * const KEY_TCP_CONNECTION_RETRY_DELAY_MS = "tunneling.proxy.tcp.connection_retry_delay_ms";
29 | std::uint32_t const DEFAULT_TCP_CONNECTION_RETRY_DELAY_MS = 2500;
30 |
31 | char const * const KEY_MESSAGE_MAX_PAYLOAD_SIZE = "tunneling.proxy.message.max_payload_size";
32 | //if this is too small with respect to the peer, this client will overflow
33 | std::size_t const DEFAULT_MESSAGE_MAX_PAYLOAD_SIZE = 63 * 1024;
34 |
35 | char const * const KEY_MESSAGE_MAX_SIZE = "tunneling.proxy.message.max_size";
36 | std::size_t const DEFAULT_MESSAGE_MAX_SIZE = 64 * 1024;
37 |
38 | char const * const KEY_MAX_ACTIVE_CONNECTIONS = "tunneling.proxy.tcp.max_active_connections";
39 | std::uint32_t const DEFAULT_MAX_ACTIVE_CONNECTIONS = 128;
40 |
41 | char const * const KEY_WEB_SOCKET_PING_PERIOD_MS = "tunneling.proxy.websocket.ping_period_ms";
42 | std::uint32_t const DEFAULT_WEB_SOCKET_PING_PERIOD_MS = 20000;
43 |
44 | char const * const KEY_WEB_SOCKET_CONNECT_RETRY_DELAY_MS = "tunneling.proxy.websocket.retry_delay_ms";
45 | std::uint32_t const DEFAULT_WEB_SOCKET_CONNECT_RETRY_DELAY_MS = 2500;
46 |
47 | char const * const KEY_WEB_SOCKET_CONNECT_RETRY_COUNT = "tunneling.proxy.websocket.connect_retry_count";
48 | std::int32_t const DEFAULT_WEB_SOCKET_CONNECT_RETRY_COUNT = -1;
49 |
50 | char const * const KEY_WEB_SOCKET_DATA_ERROR_RETRY = "tunneling.proxy.websocket.reconnect_on_data_error";
51 | bool const DEFAULT_WEB_SOCKET_DATA_ERROR_RETRY = true;
52 |
53 | char const * const KEY_WEB_SOCKET_SUBPROTOCOL = "tunneling.proxy.websocket.subprotocol";
54 | std::string const DEFAULT_WEB_SOCKET_SUBPROTOCOL = "aws.iot.securetunneling-3.0";
55 |
56 | char const * const KEY_WEB_SOCKET_MAX_FRAME_SIZE = "tunneling.proxy.websocket.max_frame_size";
57 | std::size_t const DEFAULT_WEB_SOCKET_MAX_FRAME_SIZE = DEFAULT_MAX_DATA_FRAME_SIZE * 2;
58 |
59 | char const * const KEY_TCP_READ_BUFFER_SIZE = "tunneling.proxy.tcp.read_buffer_size";
60 | std::size_t const DEFAULT_TCP_READ_BUFFER_SIZE = DEFAULT_WEB_SOCKET_MAX_FRAME_SIZE;
61 |
62 | char const * const KEY_TCP_WRITE_BUFFER_SIZE = "tunneling.proxy.tcp.write_buffer_size";
63 | std::size_t const DEFAULT_TCP_WRITE_BUFFER_SIZE = DEFAULT_WEB_SOCKET_MAX_FRAME_SIZE;
64 |
65 | char const * const KEY_WEB_SOCKET_WRITE_BUFFER_SIZE = "tunneling.proxy.websocket.write_buffer_size";
66 | std::size_t const DEFAULT_WEB_SOCKET_WRITE_BUFFER_SIZE = DEFAULT_WEB_SOCKET_MAX_FRAME_SIZE;
67 |
68 | char const * const KEY_WEB_SOCKET_READ_BUFFER_SIZE = "tunneling.proxy.websocket.read_buffer_size";
69 | std::size_t const DEFAULT_WEB_SOCKET_READ_BUFFER_SIZE = DEFAULT_WEB_SOCKET_MAX_FRAME_SIZE;
70 |
71 | //Create a more concise way to apply a settings default value only if it isn't already in the
72 | //ptree. Macro saves a bunch of repeat typing of the key in code and needing to repeat the type
73 | #define ADD_SETTING_DEFAULT(settings, key) \
74 | if(!settings.get_optional>(KEY_##key).has_value()) \
75 | { \
76 | settings.add>(KEY_##key, DEFAULT_##key); \
77 | }
78 |
79 | void apply_default_settings(ptree & settings)
80 | {
81 | ADD_SETTING_DEFAULT(settings, DEFAULT_BIND_ADDRESS);
82 | ADD_SETTING_DEFAULT(settings, DATA_LENGTH_SIZE);
83 | ADD_SETTING_DEFAULT(settings, MAX_DATA_FRAME_SIZE);
84 | ADD_SETTING_DEFAULT(settings, TCP_CONNECTION_RETRY_COUNT);
85 | ADD_SETTING_DEFAULT(settings, TCP_CONNECTION_RETRY_DELAY_MS);
86 | ADD_SETTING_DEFAULT(settings, TCP_READ_BUFFER_SIZE);
87 | ADD_SETTING_DEFAULT(settings, MESSAGE_MAX_PAYLOAD_SIZE);
88 | ADD_SETTING_DEFAULT(settings, MESSAGE_MAX_SIZE);
89 | ADD_SETTING_DEFAULT(settings, MAX_ACTIVE_CONNECTIONS);
90 | ADD_SETTING_DEFAULT(settings, WEB_SOCKET_PING_PERIOD_MS);
91 | ADD_SETTING_DEFAULT(settings, WEB_SOCKET_CONNECT_RETRY_DELAY_MS);
92 | ADD_SETTING_DEFAULT(settings, WEB_SOCKET_CONNECT_RETRY_COUNT);
93 | ADD_SETTING_DEFAULT(settings, WEB_SOCKET_DATA_ERROR_RETRY);
94 | ADD_SETTING_DEFAULT(settings, WEB_SOCKET_SUBPROTOCOL);
95 | ADD_SETTING_DEFAULT(settings, WEB_SOCKET_MAX_FRAME_SIZE);
96 | ADD_SETTING_DEFAULT(settings, WEB_SOCKET_WRITE_BUFFER_SIZE);
97 | ADD_SETTING_DEFAULT(settings, WEB_SOCKET_READ_BUFFER_SIZE);
98 |
99 | apply_region_overrides(settings);
100 | }
101 |
102 | void apply_region_overrides(ptree & settings)
103 | {
104 | settings.put((boost::format("%1%.%2%") % KEY_PROXY_ENDPOINT_REGION_MAP % "cn-north-1").str().c_str(),
105 | "data.tunneling.iot.cn-north-1.amazonaws.com.cn");
106 | settings.put((boost::format("%1%.%2%") % KEY_PROXY_ENDPOINT_REGION_MAP % "cn-northwest-1").str().c_str(),
107 | "data.tunneling.iot.cn-northwest-1.amazonaws.com.cn");
108 | }
109 | }}}}
110 |
--------------------------------------------------------------------------------
/src/ProxySettings.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 |
5 | #include
6 | #include
7 | namespace aws { namespace iot { namespace securedtunneling { namespace settings {
8 | using boost::property_tree::ptree;
9 |
10 | extern char const * const KEY_PROXY_ENDPOINT_HOST_FORMAT;
11 | extern std::string DEFAULT_PROXY_ENDPOINT_HOST_FORMAT;
12 |
13 | extern char const * const KEY_PROXY_ENDPOINT_REGION_MAP;
14 |
15 | extern char const * const KEY_DEFAULT_BIND_ADDRESS;
16 | extern std::string DEFAULT_DEFAULT_BIND_ADDRESS;
17 |
18 | extern char const * const KEY_DATA_LENGTH_SIZE;
19 | extern std::size_t const DEFAULT_DATA_LENGTH_SIZE;
20 |
21 | extern char const * const KEY_MAX_DATA_FRAME_SIZE;
22 | extern std::size_t const DEFAULT_MAX_DATA_FRAME_SIZE;
23 |
24 | extern char const * const KEY_TCP_CONNECTION_RETRY_COUNT;
25 | extern std::int32_t const DEFAULT_TCP_CONNECTION_RETRY_COUNT;
26 |
27 | extern char const * const KEY_TCP_CONNECTION_RETRY_DELAY_MS;
28 | extern std::uint32_t const DEFAULT_TCP_CONNECTION_RETRY_DELAY_MS;
29 |
30 | extern char const * const KEY_MESSAGE_MAX_PAYLOAD_SIZE;
31 | extern std::size_t const DEFAULT_MESSAGE_MAX_PAYLOAD_SIZE;
32 |
33 | extern char const * const KEY_MESSAGE_MAX_SIZE;
34 | extern std::size_t const DEFAULT_MESSAGE_MAX_SIZE;
35 |
36 | extern char const * const KEY_MAX_ACTIVE_CONNECTIONS;
37 | extern std::uint32_t const DEFAULT_MAX_ACTIVE_CONNECTIONS;
38 |
39 | extern char const * const KEY_WEB_SOCKET_PING_PERIOD_MS;
40 | extern std::uint32_t const DEFAULT_WEB_SOCKET_PING_PERIOD_MS;
41 |
42 | extern char const * const KEY_WEB_SOCKET_CONNECT_RETRY_DELAY_MS;
43 | extern std::uint32_t const DEFAULT_WEB_SOCKET_CONNECT_RETRY_DELAY_MS;
44 |
45 | extern char const * const KEY_WEB_SOCKET_CONNECT_RETRY_COUNT;
46 | extern std::int32_t const DEFAULT_WEB_SOCKET_CONNECT_RETRY_COUNT;
47 |
48 | extern char const * const KEY_WEB_SOCKET_DATA_ERROR_RETRY;
49 | extern bool const DEFAULT_WEB_SOCKET_DATA_ERROR_RETRY;
50 |
51 | extern char const * const KEY_WEB_SOCKET_SUBPROTOCOL;
52 | extern std::string const DEFAULT_WEB_SOCKET_SUBPROTOCOL;
53 |
54 | extern char const * const KEY_WEB_SOCKET_MAX_FRAME_SIZE;
55 | extern std::size_t const DEFAULT_WEB_SOCKET_MAX_FRAME_SIZE;
56 |
57 | extern char const * const KEY_TCP_WRITE_BUFFER_SIZE;
58 | extern std::size_t const DEFAULT_TCP_WRITE_BUFFER_SIZE;
59 |
60 | extern char const * const KEY_TCP_READ_BUFFER_SIZE;
61 | extern std::size_t const DEFAULT_TCP_READ_BUFFER_SIZE;
62 |
63 | extern char const * const KEY_WEB_SOCKET_WRITE_BUFFER_SIZE;
64 | extern std::size_t const DEFAULT_WEB_SOCKET_WRITE_BUFFER_SIZE;
65 |
66 | extern char const * const KEY_WEB_SOCKET_READ_BUFFER_SIZE;
67 | extern std::size_t const DEFAULT_WEB_SOCKET_READ_BUFFER_SIZE;
68 |
69 | //Create a more concise way to apply a settings default value only if it isn't already in the
70 | //ptree. Macro saves a bunch of repeat typing of the key in code and needing to repeat the type
71 | #define GET_SETTING(settings, key) (settings.get> \
73 | (::aws::iot::securedtunneling::settings::KEY_##key, \
74 | ::aws::iot::securedtunneling::settings::DEFAULT_##key))
75 |
76 | void apply_default_settings(ptree & settings);
77 | void apply_region_overrides(ptree & settings);
78 | }}}}
79 |
--------------------------------------------------------------------------------
/src/TcpClient.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 | #include
5 | #include
6 | #include
7 | #include "Message.pb.h"
8 |
9 | namespace aws { namespace iot { namespace securedtunneling { namespace connection {
10 | class tcp_client
11 | {
12 | public:
13 | typedef boost::shared_ptr pointer;
14 | tcp_client(boost::asio::io_context & io_context, std::size_t write_buf_size, std::size_t read_buf_size, std::size_t ws_write_buf_size)
15 | : resolver_(io_context)
16 | {
17 |
18 | }
19 | static pointer create(boost::asio::io_context& io_context, std::size_t const & write_buf_size, std::size_t const & read_buf_size, std::size_t const & ws_write_buf_size)
20 | {
21 | return pointer(new tcp_client(io_context, write_buf_size, read_buf_size, ws_write_buf_size));
22 | }
23 |
24 | tcp::resolver resolver_;
25 |
26 | std::unordered_map connectionId_to_tcp_connection_map;
27 |
28 | // function object defines what to do after set up a tcp socket
29 | std::function after_setup_tcp_socket = nullptr;
30 |
31 | // function object defines what to do receiving control message: stream start
32 | std::function on_receive_stream_start = nullptr;
33 | };
34 | }}}}
--------------------------------------------------------------------------------
/src/TcpConnection.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "Message.pb.h"
10 |
11 | namespace aws { namespace iot { namespace securedtunneling { namespace connection {
12 | using message = com::amazonaws::iot::securedtunneling::Message;
13 | using boost::asio::ip::tcp;
14 | class tcp_connection
15 | : public boost::enable_shared_from_this
16 | {
17 | public:
18 | typedef boost::shared_ptr pointer;
19 |
20 | static pointer create(boost::asio::io_context& io_context, std::size_t const & write_buf_size, std::size_t const & read_buf_size, std::size_t ws_write_buf_size, uint32_t connection_id)
21 | {
22 | return pointer(new tcp_connection(io_context, write_buf_size, read_buf_size, ws_write_buf_size, connection_id));
23 | }
24 |
25 | tcp::socket& socket()
26 | {
27 | return socket_;
28 | }
29 |
30 | tcp_connection(boost::asio::io_context & io_context, std::size_t write_buf_size, std::size_t read_buf_size, std::size_t ws_write_buf_size, uint32_t connection_id)
31 | : socket_(io_context)
32 | , tcp_write_buffer_(write_buf_size)
33 | , tcp_read_buffer_(read_buf_size)
34 | , web_socket_data_write_buffer_(ws_write_buf_size)
35 | , connection_id_(connection_id)
36 |
37 | {
38 | }
39 |
40 | tcp::socket socket_;
41 | // A buffer holding data writes to customer's application
42 | boost::beast::multi_buffer tcp_write_buffer_;
43 | // A buffer holding data reads from customer's application
44 | boost::beast::flat_buffer tcp_read_buffer_;
45 | /**
46 | * A buffer holding data that will be sent to secure tunneling server through web socket connection.
47 | * This buffer will only hold data belongs to its own stream in a multiplexed tunnel.
48 | */
49 | boost::beast::flat_buffer outgoing_message_buffer_;
50 | //Buffer sequence storing the raw bytes read from the tcp socket reads
51 | //to send over web socket. The bytes in this buffer represent the raw application
52 | //data not already packaged in protobuf messages. This allows us to
53 | //condense smaller TCP read chunks to bigger web socket writes. It also makes
54 | //it impossible to "inject" a non-data message in data sequence order
55 | boost::beast::multi_buffer web_socket_data_write_buffer_;
56 |
57 | uint32_t connection_id_; // assigned connection_id for tcp connection
58 |
59 | // Is this tcp socket currently writing
60 | bool is_tcp_socket_writing_{ false };
61 | // Is this tcp socket currently reading
62 | bool is_tcp_socket_reading_{ false };
63 | // function object defines what to do after send a message
64 | std::function after_send_message;
65 | // function object defines what to do upon receiving control message
66 | std::function on_control_message = nullptr;
67 | // function object defines what to do upon receiving data message
68 | std::function on_data_message = nullptr;
69 | // function object defines what to do if there is a tcp error occurred
70 | std::function on_tcp_error = nullptr;
71 | // function object defines what to do when tcp_write_buffer_ drain has completed
72 | std::function on_tcp_write_buffer_drain_complete = nullptr;
73 | // function object defines what to do when web_socket_data_write_buffer_ drain has completed
74 | std::function on_web_socket_write_buffer_drain_complete = nullptr;
75 | };
76 | }}}}
--------------------------------------------------------------------------------
/src/TcpServer.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include "TcpConnection.h"
9 |
10 | namespace aws { namespace iot { namespace securedtunneling { namespace connection {
11 | using boost::asio::ip::tcp;
12 | class tcp_server
13 | {
14 | public:
15 | typedef boost::shared_ptr pointer;
16 | tcp_server(boost::asio::io_context & io_context, std::size_t write_buf_size, std::size_t read_buf_size, std::size_t ws_write_buf_size)
17 | : acceptor_(io_context)
18 | , resolver_(io_context)
19 | {
20 | highest_connection_id = 0;
21 | }
22 |
23 | static pointer create(boost::asio::io_context& io_context, std::size_t const & write_buf_size, std::size_t const & read_buf_size, std::size_t const & ws_write_buf_size)
24 | {
25 | return pointer(new tcp_server(io_context, write_buf_size, read_buf_size, ws_write_buf_size));
26 | }
27 |
28 | tcp::acceptor & acceptor()
29 | {
30 | return acceptor_;
31 | }
32 |
33 | tcp::acceptor acceptor_;
34 | tcp::resolver resolver_;
35 |
36 | std::unordered_map connectionId_to_tcp_connection_map;
37 |
38 | std::atomic_uint32_t highest_connection_id;
39 |
40 | // function object defines what to do after set up a tcp socket
41 | std::function after_setup_tcp_socket = nullptr;
42 | };
43 | }}}}
--------------------------------------------------------------------------------
/src/Url.cpp:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #include "Url.h"
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | #include
11 | #include
12 | #include
13 | #include
14 | using namespace std;
15 |
16 | aws::iot::securedtunneling::url::url(const std::string &url_s) {
17 | parse(url_s);
18 | }
19 |
20 | void aws::iot::securedtunneling::url::parse(const string& url_s)
21 | {
22 | auto get_substring = [](const string& s, const size_t& start, const size_t& end) {
23 | return s.substr(start, end - start );
24 | };
25 |
26 | // parse protocol
27 | const string protocol_end("://");
28 | const size_t protocol_end_i = url_s.find(protocol_end);
29 | if (protocol_end_i == string::npos) {
30 | BOOST_LOG_TRIVIAL(debug) << "No protocol is provided in the URL, assuming the default protocol: http.";
31 | protocol = "http";
32 | } else {
33 | BOOST_LOG_TRIVIAL(trace) << "Extracting protocol";
34 | protocol = get_substring(url_s, 0, protocol_end_i);
35 | if (protocol.empty()) {
36 | throw invalid_argument("Invalid URL, missing protocol");
37 | }
38 | transform(protocol.begin(), protocol.end(), protocol.begin(), ::tolower);
39 | BOOST_LOG_TRIVIAL(info) << "Parsed URL protocol";
40 | }
41 |
42 | // parse authentication
43 | const size_t authentication_end_i = url_s.find_last_of('@');
44 | const bool is_authN_included = authentication_end_i != string::npos;
45 | if (is_authN_included) {
46 | authentication = aws::iot::securedtunneling::url::url_decode(
47 | get_substring(url_s, protocol_end_i + protocol_end.size(), authentication_end_i)
48 | );
49 | if (authentication.empty())
50 | throw invalid_argument("Empty authentication, if you don't need to authentication information, remove `@`");
51 | if (authentication.find(':') == string::npos)
52 | throw invalid_argument("Missing the colon between the username and password in URL.");
53 | if (authentication.length() < 3)
54 | throw invalid_argument("Invalid authentication format, missing either username or password.");
55 | BOOST_LOG_TRIVIAL(debug) << "Parsed basic auth credentials for the URL";
56 | } else {
57 | BOOST_LOG_TRIVIAL(debug) << "No authentication is found in the URL, assuming no authentication is required.";
58 | }
59 |
60 | // parse the host and port
61 | const size_t host_i = is_authN_included ? authentication_end_i + 1 : protocol_end_i + protocol_end.size();
62 | const size_t port_i = url_s.find(':', host_i);
63 |
64 | host = get_substring(url_s, host_i, port_i);
65 |
66 | if (host.empty()) {
67 | throw invalid_argument("Missing HTTP host address");
68 | }
69 | transform(host.begin(), host.end(), host.begin(), ::tolower);
70 | if (port_i != string::npos) {
71 | const string port_s = get_substring(url_s, port_i + 1, url_s.length());
72 | try {
73 | port = static_cast(stoi(port_s));
74 | } catch (exception &e) {
75 | BOOST_LOG_TRIVIAL(fatal) << "Failed to parse the port";
76 | BOOST_LOG_TRIVIAL(fatal) << e.what();
77 | throw invalid_argument(e.what());
78 | }
79 | }
80 | }
81 |
82 | string aws::iot::securedtunneling::url::url_decode(const string &url_s) {
83 | {
84 | string out;
85 | out.clear();
86 | out.reserve(url_s.size());
87 | for (std::size_t i = 0; i < url_s.size(); ++i) {
88 | if (url_s[i] == '%') {
89 | if (i + 3 <= url_s.size()) {
90 | int value = 0;
91 | std::istringstream is(url_s.substr(i + 1, 2));
92 | if (is >> std::hex >> value) {
93 | out += static_cast(value);
94 | i += 2;
95 | } else {
96 | throw invalid_argument("Invalid Hex number");
97 | }
98 | } else {
99 | throw invalid_argument("Invalid URL token");
100 | }
101 | }
102 | else if (url_s[i] == '+') {
103 | out += ' ';
104 | } else {
105 | out += url_s[i];
106 | }
107 | }
108 | return out;
109 | }
110 | }
--------------------------------------------------------------------------------
/src/Url.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 |
5 | #include
6 | #include
7 | namespace aws {
8 | namespace iot {
9 | namespace securedtunneling {
10 | class url {
11 | private:
12 | void parse(const std::string& url_s);
13 | public:
14 | url(const std::string& url_s);
15 | std::string protocol, host, authentication;
16 | uint16_t port {0};
17 | static std::string url_decode(const std::string& url_s);
18 | };
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/Version.h.in:
--------------------------------------------------------------------------------
1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | /**
5 | * CMake will inject version information into this file at compile time, and will
6 | * make it accessible to our source code as "Version.h"
7 | */
8 | #ifndef AWS_IOT_SECURE_TUNNELING_LOCAL_PROXY_VERSION_H
9 | #define AWS_IOT_SECURE_TUNNELING_LOCAL_PROXY_VERSION_H
10 |
11 | #define LOCAL_PROXY_VERSION_FULL "${${PROJECT_NAME}_VERSION_STRING_FULL}"
12 | #define LOCAL_PROXY_VERSION "${${PROJECT_NAME}_VERSION_STRING}"
13 |
14 | #endif // AWS_IOT_SECURE_TUNNELING_LOCAL_PROXY_VERSION_H
15 |
--------------------------------------------------------------------------------
/src/WebProxyAdapter.cpp:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | #include "WebProxyAdapter.h"
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | namespace base64 = boost::beast::detail::base64;
13 | using boost::log::trivial::trace;
14 | using boost::log::trivial::debug;
15 | using boost::log::trivial::info;
16 | using boost::log::trivial::warning;
17 | using boost::log::trivial::error;
18 | using boost::log::trivial::fatal;
19 |
20 | namespace aws {
21 | namespace iot {
22 | namespace securedtunneling {
23 |
24 | constexpr int BUFFER_SIZE_IN_BYTES = 10*1000; // 10 KB
25 | constexpr int HTTP_VERSION = 11; // HTTP/1.1
26 | WebProxyAdapter::WebProxyAdapter(logger* log,
27 | const LocalproxyConfig &localproxy_config):
28 | log(log), localproxy_config(localproxy_config) { }
29 |
30 | void WebProxyAdapter::async_connect(BoostCallbackFunc on_tcp_tunnel_callback,
31 | const shared_ptr &wss,
32 | const tcp::endpoint &web_proxy_endpoint) {
33 | on_tcp_tunnel = std::make_unique(std::move(on_tcp_tunnel_callback));
34 | websocket_stream = wss.get();
35 | BOOST_LOG_SEV(*log, trace) << "Establishing TCP connection with the Web Proxy";
36 | websocket_stream->get_tcp_socket().async_connect(web_proxy_endpoint, [this](error_code const &ec) {
37 | if (ec) {
38 | BOOST_LOG_SEV(*log, error) << (boost::format("Could not connect to Web Proxy: %1%") % ec.message()).str();
39 | (*on_tcp_tunnel)(WebProxyAdapterErrc::TcpConnectError);
40 | } else {
41 | BOOST_LOG_SEV(*log, debug) << "Connected successfully with Web Proxy";
42 | websocket_stream->lowest_layer().set_option(tcp::no_delay(true));
43 | if (localproxy_config.is_web_proxy_using_tls) {
44 | async_ssl_handshake();
45 | } else {
46 | on_tcp_connect();
47 | }
48 | }
49 | });
50 | }
51 |
52 | void WebProxyAdapter::async_ssl_handshake() {
53 | if (!localproxy_config.no_ssl_host_verify) {
54 | websocket_stream->get_web_proxy_ssl_stream()->set_verify_mode(ssl::verify_peer | ssl::verify_fail_if_no_peer_cert);
55 | websocket_stream->get_web_proxy_ssl_stream()->set_verify_callback(boost::asio::ssl::host_name_verification(localproxy_config.web_proxy_host));
56 | }
57 | websocket_stream->get_web_proxy_ssl_stream()->async_handshake(ssl::stream_base::client, [this](error_code const &ec) {
58 | if (ec) {
59 | BOOST_LOG_SEV(*log, error) << (boost::format("Could not perform SSL handshake with Web Proxy: %1%") % ec.message()).str();
60 | (*on_tcp_tunnel)(WebProxyAdapterErrc::SslHandshakeError);
61 | } else {
62 | BOOST_LOG_SEV(*log, debug) << "Performed SSL handshake with Web proxy successfully";
63 | on_tcp_connect();
64 | }
65 | });
66 | }
67 |
68 | void WebProxyAdapter::on_tcp_connect() {
69 | BOOST_LOG_SEV(*log, trace) << "Preparing HTTP CONNECT request";
70 | request.version(HTTP_VERSION);
71 | request.method(http::verb::connect);
72 | const std::string host = localproxy_config.proxy_host +
73 | ":" + std::to_string(localproxy_config.proxy_port);
74 | request.target(host);
75 | if (!localproxy_config.web_proxy_auth.empty()) {
76 | BOOST_LOG_SEV(*log, trace) << "Web proxy AuthN found, adding them to the request";
77 | request.set(http::field::host, host);
78 | std::string credentials;
79 | credentials.resize(base64::encoded_size(localproxy_config.web_proxy_auth.size()));
80 | credentials.resize(base64::encode(&credentials[0],
81 | localproxy_config.web_proxy_auth.data(),
82 | localproxy_config.web_proxy_auth.size()));
83 | request.set(http::field::proxy_authorization, "basic " + credentials);
84 | }
85 | BOOST_LOG_SEV(*log, trace) << "Sending HTTP CONNECT";
86 | auto on_async_write = [this](error_code const &ec,
87 | std::size_t bytes_transferred) {
88 | boost::ignore_unused(bytes_transferred);
89 | if (ec) {
90 | BOOST_LOG_SEV(*log, error) << (boost::format(
91 | "Could not send HTTP CONNECT request to the Web proxy: %1%") % ec.message()).str();
92 | (*on_tcp_tunnel)(WebProxyAdapterErrc::HttpWriteRequestError);
93 | } else {
94 | BOOST_LOG_SEV(*log, debug) << "Successfully sent HTTP CONNECT to the Web proxy";
95 | on_http_connect_write();
96 | }
97 | };
98 | if (localproxy_config.is_web_proxy_using_tls) {
99 | http::async_write(*websocket_stream->get_web_proxy_ssl_stream(), request, on_async_write);
100 | } else {
101 | http::async_write(websocket_stream->get_tcp_socket(), request, on_async_write);
102 | }
103 | }
104 |
105 | void WebProxyAdapter::on_http_connect_write() {
106 | BOOST_LOG_SEV(*log, trace) << "Waiting for HTTP CONNECT response from the Web proxy";
107 | char *response_buffer = new char[BUFFER_SIZE_IN_BYTES];
108 | read_buffer = boost::asio::buffer(response_buffer, BUFFER_SIZE_IN_BYTES);
109 | auto on_read = [this, response_buffer](error_code const &ec,
110 | std::size_t bytes_transferred){
111 | if (ec) {
112 | BOOST_LOG_SEV(*log, error) << (boost::format(
113 | "Could not read HTTP CONNECT response from the Web proxy: %1%") % ec.message()).str();
114 | (*on_tcp_tunnel)(WebProxyAdapterErrc::ServerError);
115 | }
116 | BOOST_LOG_SEV(*log, trace) << "Parsing the HTTPS response from the Web proxy";
117 | boost::ignore_unused(bytes_transferred);
118 | error_code parser_ec{};
119 | http::response_parser parser{response};
120 | parser.put(boost::asio::buffer(read_buffer),parser_ec);
121 | response = parser.release();
122 | const http::status_class status_class = http::to_status_class(response.result());
123 | if (status_class != http::status_class::successful) {
124 | BOOST_LOG_SEV(*log, error) << boost::format(
125 | "HTTP CONNECT request failed with response code: %1%(%2%)") % response.result_int() %
126 | response.result();
127 | }
128 | BOOST_LOG_SEV(*log, debug) << "Full response from the Web proxy:\n"
129 | << boost::beast::buffers_to_string(read_buffer);
130 | switch (status_class) {
131 | case http::status_class::successful:
132 | if (response.result() == http::status::ok) {
133 | BOOST_LOG_SEV(*log, info) << "TCP tunnel established successfully";
134 | } else {
135 | BOOST_LOG_SEV(*log, warning)
136 | << "TCP tunnel established but with unexpected response code from the Web proxy";
137 | }
138 | (*on_tcp_tunnel)(WebProxyAdapterErrc::Success);
139 | break;
140 | case http::status_class::redirection:
141 | BOOST_LOG_SEV(*log, error) << "Make sure you're using the correct Web proxy address";
142 | (*on_tcp_tunnel)(WebProxyAdapterErrc::RedirectionError);
143 | break;
144 | case http::status_class::client_error:
145 | BOOST_LOG_SEV(*log, error) << "Make sure the Web proxy is configured properly";
146 | (*on_tcp_tunnel)(WebProxyAdapterErrc::ClientError);
147 | break;
148 | case http::status_class::server_error:
149 | BOOST_LOG_SEV(*log, error) << "Web proxy error, make sure to check your server's logs";
150 | (*on_tcp_tunnel)(WebProxyAdapterErrc::ServerError);
151 | break;
152 | default:
153 | BOOST_LOG_SEV(*log, error) << "Unexpected response code";
154 | (*on_tcp_tunnel)(WebProxyAdapterErrc::OtherHttpError);
155 | break;
156 | }
157 | delete[] response_buffer;
158 | };
159 | // Initially I tried to use boost::beast::http::async_read, but for some reason the beast implementation
160 | // of that method disrupted the TCP connection causing "stream truncated" error during the SSL handshake
161 | // with the proxy server. So I had to read from the TCP socket directly and parse the response. This could
162 | // be something to test when we upgrade to a newer version of boost to see if boost::beast::http::async_read
163 | // is fixed in it or not.
164 | if (localproxy_config.is_web_proxy_using_tls) {
165 | websocket_stream->get_web_proxy_ssl_stream()->async_read_some(read_buffer, on_read);
166 | } else {
167 | websocket_stream->get_tcp_socket().async_receive(read_buffer, on_read);
168 | }
169 | }
170 | }
171 | }
172 | }
173 |
--------------------------------------------------------------------------------
/src/WebProxyAdapter.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 |
5 | #include "LocalproxyConfig.h"
6 | #include "WebSocketStream.h"
7 |
8 | #include
9 | #include
10 |
11 | using std::shared_ptr;
12 | using std::unique_ptr;
13 | using std::function;
14 | using boost::system::error_code;
15 | using tcp = boost::asio::ip::tcp;
16 | using logger = boost::log::sources::severity_logger;
17 | namespace http = boost::beast::http;
18 | namespace ssl = boost::asio::ssl;
19 | using BoostCallbackFunc = function;
20 |
21 | namespace aws {
22 | namespace iot {
23 | namespace securedtunneling {
24 | /**
25 | * This class will act as an adapter to do the extra work needed to establish a TCP tunnel and then
26 | * hand control back to the caller to continue its execution. This class is designed to fit in boost asio's
27 | * single-threaded event loop model so it's not thread-safe and it's the responsibility of the consumer to
28 | * synchronize if they decided to access it from different threads.
29 | */
30 | class WebProxyAdapter {
31 | private:
32 | /**
33 | * A pointer the boost logger that will be used by this adapter for logging.
34 | */
35 | logger* log;
36 | /**
37 | * A copy of the localproxy configurations
38 | */
39 | const LocalproxyConfig &localproxy_config;
40 | /**
41 | * A request object that will be used while establishing the TCP tunnel, it needs to be a member field
42 | * because all work is done asynchronously so this object needs to outlive the function scope.
43 | */
44 | http::request request;
45 | /**
46 | * A response object that will be used while establishing the TCP tunnel, it needs to be a member field
47 | * because all work is done asynchronously so this object needs to outlive the function scope.
48 | */
49 | http::response response;
50 | /**
51 | * A request buffer that will be used while establishing the TCP tunnel, it needs to be a member field
52 | * because all work is done asynchronously so this object needs to outlive the function scope.
53 | */
54 | boost::asio::mutable_buffer read_buffer;
55 | WebSocketStream* websocket_stream = nullptr;
56 | unique_ptr on_tcp_tunnel = nullptr;
57 | /**
58 | * An async member function that will be invoked internally once a TCP connection is established between
59 | * the localproxy and the Web proxy, it's responsible for sending the HTTP CONNECT request to
60 | * start the the TCP tunnel.
61 | *
62 | * @param on_tcp_tunnel The callback that will be invoked once the tcp tunnel is established
63 | */
64 | void on_tcp_connect();
65 | /**
66 | * A async function that will be invoked internally once the the HTTP CONNECT request is sent, it is
67 | * responsible for reading and parsing the response of the CONNECT request and handing back control the
68 | * the callback provided by the adapter consumer "on_tcp_tunnel" with the appropirate input based on
69 | * whether the TCP tunnel was established successfully or not.
70 | *
71 | * @param on_tcp_tunnel The callback that will be invoked once the tcp tunnel is established
72 | */
73 | void on_http_connect_write();
74 | void async_ssl_handshake();
75 | public:
76 | /**
77 | * The public constructor
78 | *
79 | * @param log A pointer the boost logger that will be used by this adapter for logging.
80 | * @param localproxy_config the localproxy configurations
81 | */
82 | WebProxyAdapter(logger* log,
83 | const LocalproxyConfig &localproxy_config);
84 | /**
85 | * An async public method to establish the TCP tunnel
86 | *
87 | * @param on_tcp_tunnel The callback that will be invoked once the tcp tunnel is established
88 | * @param tcp_socket The TCP socket over which the TCP tunnel will be established
89 | * @param web_proxy_endpoint The IP of the Web proxy
90 | */
91 | void async_connect(BoostCallbackFunc on_tcp_tunnel,
92 | const shared_ptr &wss,
93 | const tcp::endpoint &web_proxy_endpoint);
94 | };
95 | }
96 | }
97 | }
98 |
99 | enum class WebProxyAdapterErrc
100 | {
101 | Success = 0,
102 | TcpConnectError = 1,
103 | HttpWriteRequestError = 2,
104 | ServerError = 3,
105 | ClientError = 4,
106 | RedirectionError = 5,
107 | OtherHttpError = 6,
108 | SslHandshakeError = 7,
109 | };
110 |
111 | namespace boost
112 | {
113 | namespace system
114 | {
115 | // Tell the C++ 11 STL metaprogramming that enum WebProxyAdapterErrc
116 | // is registered with the standard error code system
117 | template <> struct is_error_code_enum : std::true_type
118 | {
119 | };
120 | }
121 | }
122 |
123 | // Define a custom error code category derived from boost::system::error_category
124 | class WebProxyAdapterErrc_category : public boost::system::error_category
125 | {
126 | public:
127 | // Return a short descriptive name for the category
128 | virtual const char *name() const noexcept override final { return "WebProxyAdapterError"; }
129 | // Return what each enum means in text
130 | virtual std::string message(int c) const override final
131 | {
132 | switch(static_cast(c))
133 | {
134 | case WebProxyAdapterErrc::Success:
135 | return "TCP Tunnel established successfully";
136 | case WebProxyAdapterErrc::ServerError:
137 | return "The Web proxy server responded with 5xx to the HTTP CONNECT request";
138 | case WebProxyAdapterErrc::ClientError:
139 | return "The Web proxy server responded with 4xx to the HTTP CONNECT request";
140 | case WebProxyAdapterErrc::RedirectionError:
141 | return "The Web proxy server responded with 3xx to the HTTP CONNECT request";
142 | case WebProxyAdapterErrc::TcpConnectError:
143 | return "Failed to establish the TCP connection to the Web proxy";
144 | case WebProxyAdapterErrc::OtherHttpError:
145 | return "The Web proxy didn't respond with 200 response code.";
146 | case WebProxyAdapterErrc::HttpWriteRequestError:
147 | return "Failed to send to the CONNECT request to the Web proxy";
148 | case WebProxyAdapterErrc::SslHandshakeError:
149 | return "Failed to perform the SSL handshake with the Web proxy";
150 | default:
151 | return "unknown";
152 | }
153 | }
154 | };
155 |
156 | // Declare a global function returning a static instance of the WebProxyAdapter Error category
157 | extern inline const WebProxyAdapterErrc_category &WebProxyAdapterErrc_category()
158 | {
159 | static class WebProxyAdapterErrc_category c;
160 | return c;
161 | }
162 |
163 |
164 | // Overload the global make_error_code() free function with our
165 | // custom enum. It will be found via ADL by the compiler if needed.
166 | inline boost::system::error_code make_error_code(WebProxyAdapterErrc e)
167 | {
168 | return {static_cast(e), WebProxyAdapterErrc_category()};
169 | }
170 |
--------------------------------------------------------------------------------
/src/WebSocketStream.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 |
5 | #include "LocalproxyConfig.h"
6 |
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | using tcp = boost::asio::ip::tcp;
14 | namespace http = boost::beast::http;
15 | namespace ssl = boost::asio::ssl;
16 | namespace websocket = boost::beast::websocket;
17 | using std::unique_ptr;
18 | using std::shared_ptr;
19 | using std::function;
20 | using BoostCallbackFunc = function;
21 | typedef ssl::stream single_ssl_stream_type;
22 | typedef ssl::stream&> double_ssl_stream_type;
23 | typedef websocket::stream websocket_stream_type;
24 | typedef websocket::stream websocket_stream_single_ssl_type;
25 | typedef websocket::stream websocket_stream_double_ssl_type;
26 | #ifdef _AWSIOT_TUNNELING_NO_SSL
27 | typedef boost::variant, unique_ptr> websocket_variant;
28 | #else
29 | typedef boost::variant, unique_ptr> websocket_variant;
30 | #endif
31 | using logger = boost::log::sources::severity_logger;
32 |
33 | namespace aws {
34 | namespace iot {
35 | namespace securedtunneling {
36 | /**
37 | * This is a wrapper around boost::beast::websocket::stream class. Based on whether TLS will be used
38 | * in the connection between the localproxy and the web proxy or the connection between the localproxy
39 | * and the proxy server, we will need a different subtemplate of the websocket stream class for each case.
40 | * And because those subtemplates don't share a common interface, we can't rely on polymorphic calls to
41 | * determine at run time which subtemplate to use. So the solution to that is to wrap all of that in a
42 | * a separate class that will take care of that complexity and determine which stream type to use/return
43 | * based on the localproxy configurations.
44 | *
45 | * Many of the methods in this class simply pass the the argument to the correct Boost implementation
46 | * and return the result as it.
47 | */
48 | class WebSocketStream : public std::enable_shared_from_this {
49 | /**
50 | * A single SSL stream, used when
51 | * 1. THe localproxy connects directly to the proxy server
52 | * 2. The localproxy connects via web proxy and the connection with the web proxy is not over TLS.
53 | */
54 | shared_ptr single_ssl_stream;
55 | #ifndef _AWSIOT_TUNNELING_NO_SSL
56 | /**
57 | * A double SSL stream (an SSL stream within another SSL stream, i.e. doubly encrypted), used when
58 | * 1. The localproxy connects via web proxy and the connection with the web proxy is over TLS.
59 | */
60 | shared_ptr double_ssl_stream;
61 | #endif
62 | /**
63 | * A boost variant for the websocket stream, it's a convenient way to store object where the type
64 | * will be determined at run time based on some condition.
65 | */
66 | websocket_variant wss;
67 | const LocalproxyConfig localproxyConfig;
68 | /**
69 | * SSL Context, used for all SSL streams.
70 | */
71 | ssl::context ssl_context;
72 | /**
73 | * A reference to Boost I/O Context, provided by the consumer of this class.
74 | */
75 | boost::asio::io_context &io_context;
76 | tcp::socket socket;
77 | /**
78 | * A pointer to Boost logger.
79 | */
80 | logger *log;
81 | public:
82 | WebSocketStream(LocalproxyConfig config,
83 | logger *log,
84 | boost::asio::io_context &io_ctx);
85 |
86 | /**
87 | * Checks whether the websocket stream is open or not
88 | * @return true of the stream is open, false otherwise
89 | */
90 | bool is_open();
91 |
92 | /**
93 | * Get a reference to the lowest layer
94 | * @return a reference to a basic_socket type, which is the lowest layer.
95 | */
96 | boost::asio::basic_socket &lowest_layer();
97 |
98 | /**
99 | * Asynchronous method for sending websocket ping messages, returns immediately.
100 | * @param payload the websocket ping frame payload
101 | * @param handler the handler that will be called when the async operation is complete
102 | */
103 | void async_ping(const websocket::ping_data &payload, const BoostCallbackFunc &handler);
104 |
105 | /**
106 | * Asynchronous method for sending websocket pong messages, returns immediately.
107 | * @param payload the websocket pong frame payload
108 | * @param handler the handler that will be called when the async operation is complete
109 | */
110 | void async_pong(const websocket::ping_data &payload, const BoostCallbackFunc &handler);
111 |
112 | /**
113 | * Set a callback to be invoked on each incoming control frame.
114 | * @param cb The function object to call
115 | */
116 | void control_callback(const function &cb);
118 |
119 | /**
120 | * Set the binary message write option.
121 | * @param ualue `true` if outgoing messages should indicate
122 | * binary, or `false` if they should indicate text.
123 | */
124 | void binary(const bool &value);
125 |
126 | /**
127 | * Determines if outgoing message payloads are broken up into multiple pieces.
128 | * @param value A `bool` indicating if auto fragmentation should be on.
129 | */
130 | void auto_fragment(const bool &value);
131 |
132 | #ifndef _AWSIOT_TUNNELING_NO_SSL
133 | /**
134 | * Sets the SSL verification mode for the SSL stream/layer used for the connection between the
135 | * proxy server and the localproxy.
136 | * @param v the SSL verification mode.
137 | */
138 | void set_ssl_verify_mode(const ssl::verify_mode &v);
139 |
140 | /**
141 | * Sets the SSL verification callback for the SSL stream/layer used for the connection between the
142 | * proxy server and the localproxy.
143 | * @param callback the SSL callback.
144 | */
145 | void set_verify_callback(const ssl::host_name_verification &callback);
146 |
147 | /**
148 | * Performs the SSL handshake between the localproxy and the proxy server asynchronously.
149 | * @param type The handshake type
150 | * @param host the host subdoman and domain
151 | * @param handler the callback handler when the async operation is complete.
152 | */
153 | void
154 | async_ssl_handshake(const ssl::stream_base::handshake_type &type, const std::string &host, const BoostCallbackFunc &handler);
155 | #endif
156 |
157 | /**
158 | * Perform the websocket an asynchronous handshake with the proxy server.
159 | * @param res_type The response type
160 | * @param host the host subdoman and domain
161 | * @param target the URL path and query parameters
162 | * @param decorator A function object which will be called to modify the HTTP request object generated by the implementation.
163 | * @param handler The handler to be called when the request completes.
164 | */
165 | void async_handshake_ex(websocket::response_type &res_type, const std::string &host,
166 | const std::string &target,
167 | const function &decorator,
168 | const BoostCallbackFunc &handler);
169 |
170 | /**
171 | * Read part of a message asynchronously from the proxy server.
172 | * @param buffer A dynamic buffer to hold the message data after any masking or decompression has been applied.
173 | * @param size An upper limit on the number of bytes this function will append into the buffer.
174 | * @param handler handler to be called when the read operation completes.
175 | */
176 | void async_read_some(boost::beast::multi_buffer &buffer, const std::size_t &size,
177 | const function &handler);
178 |
179 | /**
180 | * Write a complete message asynchronously.
181 | * @param buffer A buffer sequence containing the entire message payload.
182 | * @param handler The completion handler to invoke when the operation completes.
183 | */
184 | void async_write(const boost::asio::const_buffer &buffer,
185 | const function &handler);
186 |
187 | /**
188 | * Returns the close reason received from the peer.
189 | * @return websocket::close_reason
190 | */
191 | websocket::close_reason const &reason();
192 |
193 | /**
194 | * Start tearing down a stream(s) underlying the websocket stream.
195 | * @param role The role of the local endpoint
196 | * @param handler The handler to be called when the request completes.
197 | */
198 | void async_teardown(const boost::beast::role_type &role, const BoostCallbackFunc &handler);
199 | /**
200 | * A getter for the socket.
201 | * @return Returns a reference to the underlying TCP socket
202 | */
203 | tcp::socket & get_tcp_socket();
204 | /**
205 | * A getter for the SSL stream used by the web proxy.
206 | * @return Returns a shared_ptr to the SSL stream or nullptr if the web proxy have TLS ports.
207 | */
208 | shared_ptr get_web_proxy_ssl_stream();
209 | };
210 | }
211 | }
212 | }
213 |
--------------------------------------------------------------------------------
/src/config/ConfigFile.cpp:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 |
30 | #include "ConfigFile.h"
31 | #include "Version.h"
32 |
33 | using std::uint16_t;
34 | using std::endl;
35 | using std::exception;
36 | using std::get;
37 | using std::string;
38 | using std::tuple;
39 | using std::unordered_set;
40 | using std::vector;
41 | using std::unordered_map;
42 |
43 | namespace filesys = boost::filesystem;
44 | using boost::log::trivial::trace;
45 | using boost::log::trivial::debug;
46 | using boost::log::trivial::info;
47 | using boost::log::trivial::warning;
48 | using boost::log::trivial::error;
49 | using boost::log::trivial::fatal;
50 | using logger = boost::log::sources::severity_logger;
51 |
52 |
53 |
54 | namespace aws { namespace iot { namespace securedtunneling { namespace config_file {
55 | logger log;
56 | /**
57 | * Check if given path is a valid directory
58 | * @param file_dir : directory file path
59 | * @return true: valid configuration. false: invalid configuration
60 | */
61 |
62 | std::string PrintVersion()
63 | {
64 | return LOCAL_PROXY_VERSION_FULL;
65 | }
66 |
67 | bool is_valid_directory(string const & file_dir) {
68 | bool is_dir = false;
69 | try {
70 | filesys::path path_obj(file_dir);
71 | /**
72 | * Validate if:
73 | * 1. Directory path exists
74 | * 2. Is a directory
75 | * 3. Is an empty folder
76 | */
77 | if (filesys::exists(path_obj) && filesys::is_directory(path_obj) && (!filesys::is_empty(path_obj)))
78 | {
79 | is_dir = true;
80 | }
81 | else if (!filesys::exists(path_obj))
82 | BOOST_LOG_SEV(log, debug) << file_dir << " does not exist!";
83 | else if (!filesys::is_directory(path_obj))
84 | BOOST_LOG_SEV(log, debug) << file_dir << " is not a directory!";
85 | else if (filesys::is_empty(path_obj))
86 | BOOST_LOG_SEV(log, debug) << file_dir << " empty dir! Please add configuration files.";
87 | else
88 | BOOST_LOG_SEV(log, debug) << file_dir << " is not valid!";
89 | }
90 | catch (const filesys::filesystem_error & e) {
91 | BOOST_LOG_SEV(log, fatal) << e.what();
92 | }
93 | return is_dir;
94 | }
95 |
96 | /**
97 | * Recursively get the list of all files in the given directory
98 | * @param file_dir : directory file path
99 | * @return file paths under the given directory and its subdirectories.
100 | */
101 | vector get_all_files(const string & file_dir) {
102 | vector files_under_directory;
103 | filesys::recursive_directory_iterator end_iter;
104 | for (filesys::recursive_directory_iterator dir_itr(file_dir); dir_itr != end_iter; ++dir_itr) {
105 | BOOST_LOG_SEV(log, info) << "Detect configuration files: ";
106 | if (filesys::is_regular_file(dir_itr->status())) {
107 | BOOST_LOG_SEV(log, info) << dir_itr->path().generic_string();
108 | files_under_directory.push_back(dir_itr->path().generic_string());
109 | }
110 | }
111 | return files_under_directory;
112 | }
113 |
114 | void read_service_ids_from_config_files(std::vector const & file_paths, unordered_set const & service_ids, unordered_map & serviceId_to_endpoint_mapping)
115 | {
116 | for (auto file_path: file_paths)
117 | {
118 | boost::property_tree::ptree pt;
119 | // If find all the service ids, stop searching
120 | if (serviceId_to_endpoint_mapping.size() == service_ids.size())
121 | {
122 | break;
123 | }
124 | // Parse file in .ini format, if having issues, skip this file and read the next file in the folder.
125 | try {
126 | boost::property_tree::ini_parser::read_ini(file_path, pt);
127 | }
128 | catch (const std::exception & e) {
129 | BOOST_LOG_SEV(log, warning) << "Fail to parse " << file_path << " .Please make sure your file is in .ini format.";
130 | BOOST_LOG_SEV(log, warning) << "Error message from parsing: " << e.what() << " .Continue to the next file.";
131 | continue;
132 | }
133 | for (auto service_id: service_ids) {
134 | /**
135 | * Search for service ids that does not have a port mapping detected.
136 | * If more than one service id mappings found in the configuration files, use the first one found.
137 | */
138 | if (serviceId_to_endpoint_mapping.find(service_id) != serviceId_to_endpoint_mapping.end())
139 | {
140 | continue;
141 | }
142 | try {
143 | string endpoint = pt.get(service_id);
144 | serviceId_to_endpoint_mapping.insert({service_id, endpoint});
145 | }
146 | catch (boost::property_tree::ptree_bad_path &e) {
147 | BOOST_LOG_SEV(log, warning) << "Fail to read file: " << file_path << ". Error message: " << e.what() << ". Ignore this file.";
148 | }
149 | }
150 | }
151 | }
152 |
153 | /**
154 | * Interpret the CLI mappings for -s and -d and use this information to build: service_id to endpoint(address:port or port) mapping
155 | * @param cli_input: the string from -s and -d in the CLI. Example: -s SSH1=5555,SSH2=6666
156 | * @param serviceId_to_endpoint_mapping: the mapping to be updated: service_id -> endpoint
157 | * Mapping update is in place.
158 | */
159 | void update_port_mapping(const string & input, unordered_map & serviceId_to_endpoint_mapping)
160 | {
161 | vector splitting_1st_res;
162 | // Different mappings are delimited by ,
163 | boost::split(splitting_1st_res, input, boost::is_any_of(","), boost::algorithm::token_compress_on);
164 |
165 | if (splitting_1st_res.empty()) {
166 | throw std::runtime_error("Must provide at least one port or port mapping for destination-app!");
167 | }
168 |
169 | // Process each port mapping tags
170 | for (auto res: splitting_1st_res) {
171 | // Ignore empty string
172 | if (res.empty()) continue;
173 | vector splitting_2rd_res;
174 | // Inside the mapping, the service_id and port are delimited by =
175 | boost::split(splitting_2rd_res,
176 | res,
177 | boost::algorithm::is_any_of("="), boost::algorithm::token_compress_on);
178 | if (splitting_2rd_res.size() != 2) {
179 | /** For v1 format, v2 local proxy will continue to support
180 | * Example 1: Local proxy starts in v1 source mode:
181 | * ./localproxy -r us-east-1 -s 3389 -t
182 | * cli_input will be 3389
183 | * Example 2: Local proxy starts in v1 destination mode:
184 | * ./localproxy -r us-east-1 -d localhost:22 -t
185 | * cli_input will be localhost:22
186 | */
187 | if (splitting_1st_res.size() == 1 && splitting_2rd_res.size() == 1) {
188 | boost::trim(splitting_2rd_res[0]);
189 | serviceId_to_endpoint_mapping[""] = splitting_2rd_res[0];
190 | return;
191 | }
192 | else
193 | {
194 | throw std::runtime_error("Wrong format for the port mappings! Example: SSH1=5555,SSH2=6666.");
195 | }
196 | }
197 |
198 | // Trim whitespace and insert
199 | string service_id = boost::trim_copy(splitting_2rd_res[0]);
200 | string endpoint = boost::trim_copy(splitting_2rd_res[1]);
201 |
202 | if (service_id.empty() || endpoint.empty()) {
203 | string error_message =
204 | string("Wrong format for the port mappings: ") + res + string(" .Example: SSH1=5555");
205 | throw std::runtime_error(error_message);
206 | }
207 | // Check if it's a duplicate mapping, ignore if it has been provided
208 | if (serviceId_to_endpoint_mapping.find(service_id) != serviceId_to_endpoint_mapping.end()) {
209 | BOOST_LOG_SEV(log, warning) << "Duplicate mappings, ignore. This mapping already exists: " << service_id << " : "
210 | << serviceId_to_endpoint_mapping[service_id];
211 | continue;
212 | }
213 | serviceId_to_endpoint_mapping[service_id] = endpoint;
214 | }
215 | }
216 |
217 | std::string get_default_port_mapping_dir()
218 | {
219 | boost::filesystem::path full_path(boost::filesystem::current_path());
220 | return (boost::format("%1%/config") % full_path.string()).str();
221 | }
222 | }}}}
--------------------------------------------------------------------------------
/src/config/ConfigFile.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | using std::string;
11 | using std::unordered_set;
12 | using std::vector;
13 | using std::unordered_map;
14 |
15 | namespace aws { namespace iot { namespace securedtunneling { namespace config_file {
16 | bool is_valid_directory(string const & file_dir);
17 | std::vector get_all_files(const string & file_dir);
18 | std::string get_default_port_mapping_dir();
19 | void read_service_ids_from_config_files(std::vector const & file_paths,
20 | unordered_set const & service_ids,
21 | unordered_map & serviceId_to_endpoint_mapping);
22 | void update_port_mapping(const string & cli_input, unordered_map & serviceId_to_endpoint_mapping);
23 | std::string PrintVersion();
24 | }}}}
25 |
--------------------------------------------------------------------------------
/test/TestHttpServer.cpp:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | #include "TestHttpServer.h"
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 |
13 | namespace base64 = boost::beast::detail::base64;
14 |
15 | /**
16 | * This Async server implementation is based on the following boost example
17 | * https://www.boost.org/doc/libs/1_68_0/libs/beast/example/http/server/async/http_server_async.cpp
18 | */
19 |
20 | /**
21 | * This function produces an HTTP response for the given request.
22 | */
23 | template
24 | void handle_request(http::request>&& req, Send&& send) {
25 | // Returns a 4xx response
26 | auto const client_error =
27 | [&req](std::string why, http::status status) {
28 | http::response res{status, req.version()};
29 | res.set(http::field::content_type, "text/html");
30 | res.keep_alive(req.keep_alive());
31 | res.body() = why;
32 | res.prepare_payload();
33 | return res;
34 | };
35 | // Returns a 500 response
36 | auto const server_error =
37 | [&req](std::string what)
38 | {
39 | http::response res{http::status::internal_server_error, req.version()};
40 | res.set(http::field::content_type, "text/html");
41 | res.keep_alive(req.keep_alive());
42 | res.prepare_payload();
43 | return res;
44 | };
45 |
46 |
47 | // Make sure we can handle the method
48 | if( req.method() != http::verb::connect)
49 | return send(client_error("Unknown HTTP-method", http::status::bad_request));
50 |
51 | // We will use the auth information to indicate to the server how respond so that we can test how
52 | // the HTTPS Proxy adapter will handle different scenarios
53 | std::string encoded_auth{req[http::field::proxy_authorization]};
54 | if (!encoded_auth.empty()) {
55 | std::string incoming_auth;
56 | incoming_auth.resize(base64::decoded_size(encoded_auth.size()));
57 | auto const result = base64::decode(&incoming_auth[0], encoded_auth.substr(6).data(), encoded_auth.length() - 6);
58 | incoming_auth.resize(result.first);
59 | string allowed_auth = aws::iot::securedtunneling::test::username + ":" + aws::iot::securedtunneling::test::password;
60 | if (!incoming_auth.empty() && incoming_auth == "500")
61 | return send(server_error("Server failure"));
62 |
63 | if (!incoming_auth.empty() && incoming_auth == "300")
64 | return send(client_error("REDIRECT", http::status::permanent_redirect));
65 |
66 | if (!incoming_auth.empty() && incoming_auth == "100")
67 | return send(client_error("UNKNOWN", http::status::processing));
68 |
69 | if (!incoming_auth.empty() && incoming_auth != allowed_auth)
70 | return send(client_error("ACCESS DENIED", http::status::forbidden));
71 | }
72 |
73 | // Respond to CONNECT request
74 | http::response res{http::status::ok, 11};
75 | return send(std::move(res));
76 | }
77 |
78 | void fail(error_code ec, char const* what) {
79 | std::cerr << what << ": " << ec.message() << "\n";
80 | }
81 |
82 | namespace aws {
83 | namespace iot {
84 | namespace securedtunneling {
85 | namespace test {
86 |
87 | TestHttpServer::TestHttpServer(const string& address, const unsigned short port) :
88 | port(port) {
89 | this->address = boost::asio::ip::make_address(address);
90 | this->listener = std::make_shared(ioc, tcp::endpoint{this->address, port});
91 | this->listener->run();
92 | }
93 |
94 | int TestHttpServer::run() {
95 | ioc.run();
96 | return EXIT_SUCCESS;
97 | }
98 |
99 | int TestHttpServer::stop() {
100 | ioc.stop();
101 | return EXIT_SUCCESS;
102 | }
103 |
104 | Session::send_lambda::send_lambda(Session &self) : self_(self) { }
105 |
106 | template
107 | void Session::send_lambda::operator()(http::message &&msg) const {
108 | // The lifetime of the message has to extend for the duration of the async operation so
109 | // we use a shared_ptr to manage it.
110 | auto sp = std::make_shared<
111 | http::message>(std::move(msg));
112 |
113 | // Store a type-erased version of the shared pointer in the class to keep it alive.
114 | self_.res_ = sp;
115 |
116 | // Write the response
117 | http::async_write(self_.socket_, *sp, [self = self_.shared_from_this(), need_eof = sp->need_eof()]
118 | (const error_code & ec, const std::size_t & bytes_transferred) {
119 | self->on_write(ec, bytes_transferred, need_eof);
120 | });
121 | }
122 |
123 | Session::Session(tcp::socket socket)
124 | : socket_(std::move(socket))
125 | , lambda_(*this) { }
126 |
127 | void Session::run() {
128 | do_read();
129 | }
130 |
131 | void Session::do_read() {
132 | // Make the request empty before reading, otherwise the operation behavior is undefined.
133 | req_ = {};
134 |
135 | // Read a request
136 | http::async_read(socket_, buffer_, req_,[self = shared_from_this()]
137 | (const error_code & ec, const std::size_t & bytes_transferred) {
138 | self->on_read(ec, bytes_transferred);
139 | });
140 | }
141 |
142 | void Session::on_read(error_code ec, std::size_t bytes_transferred) {
143 | boost::ignore_unused(bytes_transferred);
144 |
145 | // This means they closed the connection
146 | if(ec == http::error::end_of_stream)
147 | return do_close();
148 |
149 | if(ec)
150 | return fail(ec, "read");
151 |
152 | // Send the response
153 | handle_request(std::move(req_), lambda_);
154 | }
155 |
156 | void Session::on_write(error_code ec, std::size_t bytes_transferred, bool close) {
157 | boost::ignore_unused(bytes_transferred);
158 |
159 | if(ec)
160 | return fail(ec, "write");
161 |
162 | if(close) {
163 | // This means we should close the connection, usually because
164 | // the response indicated the "Connection: close" semantic.
165 | return do_close();
166 | }
167 |
168 | // We're done with the response so delete it
169 | res_ = nullptr;
170 |
171 | // Read another request
172 | do_read();
173 | }
174 |
175 | void Session::do_close() {
176 | error_code ec;
177 | socket_.shutdown(tcp::socket::shutdown_send, ec);
178 | }
179 |
180 | Listener::Listener(boost::asio::io_context& ioc, const tcp::endpoint& endpoint)
181 | : acceptor_(ioc) , socket_(ioc) {
182 | error_code ec;
183 | acceptor_.open(endpoint.protocol(), ec);
184 | if(ec) {
185 | fail(ec, "open");
186 | return;
187 | }
188 | acceptor_.set_option(boost::asio::socket_base::reuse_address(true), ec);
189 | if(ec) {
190 | fail(ec, "set_option");
191 | return;
192 | }
193 | acceptor_.bind(endpoint, ec);
194 | if(ec) {
195 | fail(ec, "bind");
196 | return;
197 | }
198 | acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec);
199 | if(ec) {
200 | fail(ec, "listen");
201 | return;
202 | }
203 | }
204 |
205 | void Listener::run() {
206 | if(! acceptor_.is_open())
207 | return;
208 | do_accept();
209 | }
210 |
211 | void Listener::do_accept() {
212 | acceptor_.async_accept(socket_, [self = shared_from_this()](const error_code & ec) {
213 | self->on_accept(ec);
214 | });
215 | }
216 |
217 | void Listener::on_accept(error_code ec) {
218 | if(ec) {
219 | fail(ec, "accept");
220 | }
221 | else {
222 | std::make_shared(std::move(socket_))->run();
223 | }
224 | do_accept();
225 | }
226 | }
227 | }
228 | }
229 | }
--------------------------------------------------------------------------------
/test/TestHttpServer.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 |
5 | #include
6 | #include
7 | #include
8 |
9 | using tcp = boost::asio::ip::tcp;
10 | using string = std::string;
11 | using boost::system::error_code;
12 | namespace http = boost::beast::http;
13 |
14 | namespace aws {
15 | namespace iot {
16 | namespace securedtunneling {
17 | namespace test {
18 | const string username = "user";
19 | const string password = "password";
20 |
21 | class Session : public std::enable_shared_from_this {
22 | // This is the C++14 equivalent of a generic lambda.
23 | // The function object is used to send an HTTP message.
24 | struct send_lambda {
25 | Session& self_;
26 | explicit send_lambda(Session& self);
27 | template
28 | void operator()(http::message&& msg) const;
29 | };
30 | tcp::socket socket_;
31 | boost::beast::flat_buffer buffer_;
32 | http::request req_;
33 | std::shared_ptr res_;
34 | send_lambda lambda_;
35 |
36 | public:
37 | explicit Session(tcp::socket socket);
38 | void run();
39 | void do_read();
40 | void on_read(error_code ec, std::size_t bytes_transferred);
41 | void on_write(error_code ec, std::size_t bytes_transferred, bool close);
42 | void do_close();
43 | };
44 |
45 | class Listener : public std::enable_shared_from_this {
46 | public:
47 | tcp::acceptor acceptor_;
48 | tcp::socket socket_;
49 | Listener(boost::asio::io_context& ioc, const tcp::endpoint& endpoint);
50 | void run();
51 | void do_accept();
52 | void on_accept(error_code ec);
53 | };
54 |
55 | class TestHttpServer {
56 | public:
57 | boost::asio::ip::address address;
58 | unsigned short port;
59 | boost::asio::io_context ioc{};
60 | std::shared_ptr listener;
61 | TestHttpServer(const string& address, const unsigned short port);
62 | int run();
63 | int stop();
64 | };
65 | }
66 | }
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/test/TestWebsocketServer.cpp:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | #include "TestWebsocketServer.h"
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | #include
13 | #include
14 |
15 | using namespace std;
16 | using namespace aws::iot::securedtunneling::settings;
17 |
18 | using boost::asio::ip::tcp;
19 | using web_socket_stream = boost::beast::websocket::stream;
20 | using message = com::amazonaws::iot::securedtunneling::Message;
21 | using boost::property_tree::ptree;
22 |
23 | namespace aws { namespace iot { namespace securedtunneling { namespace test
24 | {
25 |
26 | namespace websocket = boost::beast::websocket;
27 | namespace http = boost::beast::http;
28 |
29 | TestWebsocketServer::TestWebsocketServer(std::string const &address, ptree const &adapter_settings) :
30 | adapter_settings(adapter_settings),
31 | io_ctx(),
32 | acceptor(io_ctx, {boost::asio::ip::make_address(address), 0}),
33 | closed(false),
34 | close_reason{},
35 | code(websocket::internal_error),
36 | incoming_message_buffer{ adapter_settings.get(KEY_WEB_SOCKET_READ_BUFFER_SIZE) },
37 | message_parse_buffer{ adapter_settings.get(KEY_MESSAGE_MAX_SIZE) }
38 | { }
39 |
40 | void TestWebsocketServer::run()
41 | {
42 | tcp::socket socket{io_ctx};
43 | acceptor.accept(socket);
44 |
45 | boost::beast::flat_buffer buffer;
46 | http::read(socket, buffer, handshake_request, ec);
47 | web_socket_stream ws{std::move(socket)};
48 | ws_stream = ws;
49 | ws.set_option(websocket::stream_base::decorator([](boost::beast::websocket::response_type& response)
50 | {
51 | response.set("channel-id", boost::uuids::to_string({})); //default init for uuid is all 0s
52 | response.set("Sec-WebSocket-Protocol", "aws.iot.securetunneling-2.0");
53 | }));
54 | ws.accept(
55 | handshake_request,
56 | ec);
57 | if(ec)
58 | {
59 | throw std::runtime_error((boost::format("Accept handshake error: %1%") % ec.message()).str().c_str());
60 | }
61 | ws.binary(true);
62 | //async for reading
63 | ws.async_read_some(incoming_message_buffer, incoming_message_buffer.max_size() - incoming_message_buffer.size(),
64 | std::bind(&TestWebsocketServer::on_read_complete, this, std::ref(ws),
65 | std::placeholders::_1, std::placeholders::_2));
66 |
67 | io_ctx.run();
68 | }
69 |
70 | void TestWebsocketServer::on_read_complete(web_socket_stream &ws, boost::system::error_code const &ec, size_t bytes_read)
71 | {
72 | if(!ec)
73 | {
74 | process_input_buffer(ws, incoming_message_buffer);
75 | ws.async_read_some(incoming_message_buffer, incoming_message_buffer.max_size() - incoming_message_buffer.size(),
76 | std::bind(&TestWebsocketServer::on_read_complete, this, std::ref(ws),
77 | std::placeholders::_1, std::placeholders::_2));
78 | }
79 | else if(!closed)
80 | {
81 | throw std::runtime_error((boost::format("Error on read: %1%") % ec.message()).str().c_str());
82 | }
83 | }
84 |
85 | void TestWebsocketServer::on_read_message(web_socket_stream &ws, message const &message)
86 | {
87 | using namespace com::amazonaws::iot::securedtunneling;
88 | if(expect_messages.empty())
89 | { //if not explicitly expecting something, ignore control messages, echo back data
90 | if (message.type() != Message_Type_DATA)
91 | { //control message recieved
92 | }
93 | else if (message.type() == Message_Type_DATA)
94 | {
95 | send_message(ws, message);
96 | }
97 | }
98 | else
99 | {
100 | auto expect_check = expect_messages.front();
101 | expect_messages.pop();
102 | if(!expect_check(message))
103 | {
104 | throw std::runtime_error((boost::format("Unexpected message type recievedi: Type: %1%; StreamId: %2%") % message.type() % message.streamid()).str());
105 | }
106 | }
107 | }
108 |
109 | void TestWebsocketServer::on_write_complete(web_socket_stream &ws, boost::system::error_code const &ec, size_t bytes_written)
110 | {
111 | if(ec)
112 | {
113 | throw std::runtime_error((boost::format("Error on write: %1%") % ec.message()).str().c_str());
114 | }
115 | }
116 |
117 | void TestWebsocketServer::process_input_buffer(web_socket_stream &ws_stream, boost::beast::multi_buffer &message_buffer)
118 | {
119 | using namespace com::amazonaws::iot::securedtunneling;
120 |
121 | size_t const data_length_size = adapter_settings.get(KEY_DATA_LENGTH_SIZE);
122 | boost::beast::flat_buffer data_length_buffer{ data_length_size };
123 | while (message_buffer.size() >= data_length_size)
124 | {
125 | boost::asio::buffer_copy(data_length_buffer.prepare(data_length_size), message_buffer.data(), data_length_size);
126 | uint16_t data_length = boost::endian::big_to_native(*reinterpret_cast(data_length_buffer.data().data()));
127 | if (message_buffer.size() >= (data_length + data_length_size))
128 | {
129 | //consume the length since we've already read it
130 | message_buffer.consume(data_length_size);
131 | bool parsed_successfully = parse_protobuf_and_consume_input(message_buffer, static_cast(data_length), incoming_message) && incoming_message.IsInitialized();
132 | if (!parsed_successfully)
133 | {
134 | throw std::runtime_error("Could not parse web socket binary frame into message");
135 | }
136 | on_read_message(ws_stream, incoming_message);
137 | }
138 | else
139 | {
140 | break;
141 | }
142 | }
143 | }
144 |
145 | void TestWebsocketServer::deliver_message(message const &message)
146 | {
147 | send_message(ws_stream.get(), message);
148 | }
149 |
150 | void TestWebsocketServer::send_message(web_socket_stream &ws, message const &message)
151 | {
152 | using namespace com::amazonaws::iot::securedtunneling;
153 | //calculate total frame size
154 | std::size_t const frame_size = static_cast(message.ByteSizeLong()) +
155 | adapter_settings.get(KEY_DATA_LENGTH_SIZE);
156 | boost::beast::flat_buffer outgoing_message_buffer{ frame_size };
157 | //get pointers to where data length and protobuf msg will be written to
158 | void *frame_data = outgoing_message_buffer.prepare(frame_size).data();
159 | void *frame_data_msg_offset = reinterpret_cast(reinterpret_cast(frame_data)
160 | + adapter_settings.get(KEY_DATA_LENGTH_SIZE));
161 | //get the protobuf data length and wirte it to start the frame
162 | std::uint16_t data_length = static_cast(message.ByteSizeLong());
163 | *reinterpret_cast(frame_data) = boost::endian::native_to_big(data_length);
164 | //write the protobuf msg into the buffer next
165 | message.SerializeToArray(frame_data_msg_offset, static_cast(adapter_settings.get(KEY_MESSAGE_MAX_SIZE)));
166 | //commit the entire frame to the outgoing message buffer
167 | outgoing_message_buffer.commit(frame_size);
168 | //no controls in test mode over async writes, test flow dictates this
169 | ws.async_write(outgoing_message_buffer.data(),
170 | std::bind(&TestWebsocketServer::on_write_complete, this, std::ref(ws),
171 | std::placeholders::_1, std::placeholders::_2));
172 | }
173 |
174 | bool TestWebsocketServer::parse_protobuf_and_consume_input(boost::beast::multi_buffer &message_buffer, size_t data_length, message &msg)
175 | {
176 | //copy into a continguous buffer for simplified protobuf parsing
177 | message_parse_buffer.consume(message_parse_buffer.size());
178 | msg.Clear();
179 | boost::asio::buffer_copy(message_parse_buffer.prepare(data_length), message_buffer.data(), data_length);
180 | message_buffer.consume(data_length);
181 | return msg.ParseFromArray(message_parse_buffer.data().data(), static_cast(data_length));
182 | }
183 |
184 | void TestWebsocketServer::close_client(std::string const& close_reason, boost::beast::websocket::close_code code)
185 | {
186 | closed = true; //enable read loop failure to know that it was normal
187 | ws_stream.get().async_close({code, close_reason},
188 | [this](boost::system::error_code const &ec)
189 | {
190 | websocket::async_teardown(boost::beast::role_type::server, ws_stream.get().next_layer(),
191 | [this](boost::system::error_code const &ec)
192 | {
193 | this->io_ctx.stop();
194 | });
195 | });
196 | }
197 |
198 | void TestWebsocketServer::expect_next_message(std::function predicate)
199 | {
200 | expect_messages.push(predicate);
201 | }
202 |
203 |
204 | }}}}
205 |
--------------------------------------------------------------------------------
/test/TestWebsocketServer.h:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 | #pragma once
4 |
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | #include
12 | #include
13 | #include
14 | #include "Message.pb.h"
15 |
16 |
17 | namespace aws { namespace iot { namespace securedtunneling { namespace test {
18 |
19 | using boost::asio::ip::tcp;
20 | using web_socket_stream = boost::beast::websocket::stream;
21 | using message = com::amazonaws::iot::securedtunneling::Message;
22 | using boost::property_tree::ptree;
23 |
24 | class TestWebsocketServer
25 | {
26 | public:
27 | TestWebsocketServer(std::string const &address, ptree const &adapter_settings);
28 |
29 | tcp::endpoint get_endpoint() { return acceptor.local_endpoint(); }
30 |
31 | void close_client(std::string const& close_reason, boost::beast::websocket::close_code code);
32 |
33 | void expect_next_message(std::function predicate);
34 |
35 | void run();
36 |
37 | void deliver_message(message const &message);
38 |
39 | boost::beast::http::request const & get_handshake_request() { return handshake_request; }
40 |
41 | protected:
42 | void process_input_buffer(web_socket_stream &ws, boost::beast::multi_buffer &message_buffer);
43 | void send_message(web_socket_stream &ws, message const &message);
44 | bool parse_protobuf_and_consume_input(boost::beast::multi_buffer &message_buffer, size_t data_length, message &msg);
45 |
46 | void on_read_complete(web_socket_stream &ws, boost::system::error_code const &ec, size_t bytes_read);
47 | void on_read_message(web_socket_stream &ws, message const &message);
48 | void on_write_complete(web_socket_stream &ws, boost::system::error_code const &ec, size_t bytes_written);
49 |
50 | ptree const &adapter_settings;
51 | boost::asio::io_context io_ctx;
52 | boost::system::error_code ec;
53 | tcp::acceptor acceptor;
54 | bool closed;
55 | std::string close_reason;
56 | boost::beast::websocket::close_code code;
57 | boost::optional ws_stream;
58 |
59 | message incoming_message;
60 | boost::beast::multi_buffer incoming_message_buffer;
61 | boost::beast::flat_buffer message_parse_buffer;
62 | boost::beast::http::request handshake_request;
63 |
64 | std::queue> expect_messages;
65 | };
66 |
67 | }}}}
68 |
69 |
--------------------------------------------------------------------------------
/test/Url.cpp:
--------------------------------------------------------------------------------
1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 | // SPDX-License-Identifier: Apache-2.0
3 |
4 | #include
5 | #include
6 | #include
7 | #include