├── .dockerignore ├── requirements-test.txt ├── src ├── _version.py ├── tools │ ├── _version.py │ ├── __init__.py │ ├── connect.py │ ├── gophish_test.py │ ├── gophish_cleaner.py │ ├── gophish_complete.py │ ├── gophish_import.py │ └── gophish_export.py ├── assessment │ ├── _version.py │ ├── __init__.py │ ├── prompters.py │ ├── sample_assessment.json │ └── reschedule.py ├── templates │ ├── _version.py │ ├── __init__.py │ └── generate_template.py ├── models │ ├── __init__.py │ └── models.py └── util │ ├── __init__.py │ ├── set_date.py │ ├── input.py │ └── validate.py ├── requirements.txt ├── .github ├── lineage.yml ├── CODEOWNERS ├── dependabot.yml ├── workflows │ ├── release.yml │ ├── label-prs.yml │ ├── sync-labels.yml │ ├── dependency-review.yml │ ├── codeql-analysis.yml │ └── build.yml ├── labeler.yml └── labels.yml ├── .prettierignore ├── tag.sh ├── requirements-dev.txt ├── .coveragerc ├── .gitignore ├── tests ├── test_assessment_builder.py ├── test_input.py ├── test_models.py ├── test_validate.py ├── test_tools.py ├── test_assessment_json.py └── conftest.py ├── .ansible-lint ├── Dockerfile ├── var └── getenv ├── .mdl_config.yaml ├── .yamllint ├── bump-version ├── pyproject.toml ├── LICENSE ├── CONTRIBUTING.md ├── .pre-commit-config.yaml ├── setup-env └── README.md /.dockerignore: -------------------------------------------------------------------------------- 1 | .* 2 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | --editable .[test] 2 | --requirement requirements.txt 3 | -------------------------------------------------------------------------------- /src/_version.py: -------------------------------------------------------------------------------- 1 | """This file defines the version of this project.""" 2 | 3 | __version__ = "2.0.0" 4 | -------------------------------------------------------------------------------- /src/tools/_version.py: -------------------------------------------------------------------------------- 1 | """This file defines the version of this module.""" 2 | 3 | __version__ = "0.0.6" 4 | -------------------------------------------------------------------------------- /src/assessment/_version.py: -------------------------------------------------------------------------------- 1 | """This file defines the version of this module.""" 2 | 3 | __version__ = "0.0.3" 4 | -------------------------------------------------------------------------------- /src/templates/_version.py: -------------------------------------------------------------------------------- 1 | """This file defines the version of this module.""" 2 | 3 | __version__ = "0.0.3" 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Note: Add any additional requirements to setup.py's install_requires field 2 | --editable . 3 | wheel 4 | -------------------------------------------------------------------------------- /src/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """The tools library.""" 2 | 3 | from .connect import connect_api 4 | 5 | __all__ = ["connect_api"] 6 | -------------------------------------------------------------------------------- /.github/lineage.yml: -------------------------------------------------------------------------------- 1 | --- 2 | lineage: 3 | skeleton: 4 | remote-url: https://github.com/cisagov/skeleton-python-library.git 5 | version: "1" 6 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # Already being linted by pretty-format-json 2 | *.json 3 | # Already being linted by mdl 4 | *.md 5 | # Already being linted by yamllint 6 | *.yaml 7 | *.yml 8 | -------------------------------------------------------------------------------- /src/templates/__init__.py: -------------------------------------------------------------------------------- 1 | """The templates library.""" 2 | 3 | from .generate_template import email_output, targets_output 4 | 5 | __all__ = ["email_output", "targets_output"] 6 | -------------------------------------------------------------------------------- /src/assessment/__init__.py: -------------------------------------------------------------------------------- 1 | """The assessment library.""" 2 | 3 | from ._version import __version__ # noqa: F401 4 | from .builder import build_assessment 5 | 6 | __all__ = ["build_assessment"] 7 | -------------------------------------------------------------------------------- /tag.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o nounset 4 | set -o errexit 5 | set -o pipefail 6 | 7 | version=$(./bump_version.sh project show) 8 | 9 | git tag "v$version" && git push --tags 10 | -------------------------------------------------------------------------------- /src/models/__init__.py: -------------------------------------------------------------------------------- 1 | """The models library.""" 2 | 3 | from .models import SMTP, Assessment, Campaign, Group, Page, Target, Template 4 | 5 | __all__ = ["Assessment", "Campaign", "Group", "Page", "SMTP", "Target", "Template"] 6 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | --editable .[dev] 2 | --requirement requirements-test.txt 3 | ipython 4 | mypy 5 | # The bump-version script requires at least version 3 of semver. 6 | semver>=3 7 | types-docopt 8 | types-Pygments 9 | types-pytz 10 | types-requests 11 | types-urllib3 12 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | # This is the configuration for code coverage checks 2 | # https://coverage.readthedocs.io/en/latest/config.html 3 | 4 | [run] 5 | source = src/ 6 | omit = 7 | branch = true 8 | 9 | [report] 10 | exclude_lines = 11 | if __name__ == "__main__": 12 | show_missing = true 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # This file specifies intentionally untracked files that Git should ignore. 2 | # Files already tracked by Git are not affected. 3 | # See: https://git-scm.com/docs/gitignore 4 | 5 | ## Python ## 6 | __pycache__ 7 | .coverage 8 | .mypy_cache 9 | .pytest_cache 10 | .python-version 11 | *.egg-info 12 | build 13 | dist 14 | -------------------------------------------------------------------------------- /src/assessment/prompters.py: -------------------------------------------------------------------------------- 1 | """The prompter module.""" 2 | 3 | # Third-Party Libraries 4 | from prompt_toolkit import prompt 5 | 6 | # cisagov Libraries 7 | from util.validate import BlankInputValidator 8 | 9 | 10 | def main(): 11 | """Return a URL prompt.""" 12 | url = prompt("Campaign URL: ", default="domain", validator=BlankInputValidator()) 13 | 14 | return url 15 | -------------------------------------------------------------------------------- /tests/test_assessment_builder.py: -------------------------------------------------------------------------------- 1 | """Test for Assessment Builder.""" 2 | 3 | # Standard Python Libraries 4 | from unittest.mock import patch 5 | 6 | # cisagov Libraries 7 | from assessment.builder import set_time_zone 8 | 9 | 10 | class TestSetTimezone: 11 | """Test setting timezone logic.""" 12 | 13 | def test_radiolist_dialog_run_called(self): 14 | """Verify radiolist_dialog run called once.""" 15 | with patch("assessment.builder.radiolist_dialog") as run_patch: 16 | set_time_zone() 17 | run_patch().run.assert_called_once() 18 | -------------------------------------------------------------------------------- /tests/test_input.py: -------------------------------------------------------------------------------- 1 | """Test for Input.""" 2 | 3 | # Third-Party Libraries 4 | import pytest 5 | 6 | # cisagov Libraries 7 | from util import input as _input 8 | 9 | 10 | class TestInput: 11 | """TestInput class.""" 12 | 13 | @pytest.mark.parametrize( 14 | "time_in,time_out", 15 | [ 16 | ("01/20/2020 13:00", "2020-01-20T13:00:00-05:00"), 17 | ("06/20/2020 13:00", "2020-06-20T13:00:00-04:00"), 18 | ], 19 | ) 20 | def test_get_time_input(self, mocker, time_in, time_out): 21 | """Test time input.""" 22 | mocker.patch.object(_input, "get_input") 23 | _input.get_input.return_value = time_in 24 | 25 | assert _input.get_time_input("start", "US/Eastern") == time_out 26 | -------------------------------------------------------------------------------- /src/util/__init__.py: -------------------------------------------------------------------------------- 1 | """The util library.""" 2 | 3 | from .input import get_input, get_number, get_time_input, yes_no_prompt 4 | from .set_date import set_date 5 | from .validate import ( 6 | BlankInputValidator, 7 | BooleanValidator, 8 | EmailValidator, 9 | FormatError, 10 | MissingKey, 11 | email_import_validation, 12 | validate_domain, 13 | validate_email, 14 | ) 15 | 16 | __all__ = [ 17 | "BlankInputValidator", 18 | "BooleanValidator", 19 | "email_import_validation", 20 | "EmailValidator", 21 | "FormatError", 22 | "get_input", 23 | "get_number", 24 | "get_time_input", 25 | "MissingKey", 26 | "set_date", 27 | "validate_domain", 28 | "validate_email", 29 | "yes_no_prompt", 30 | ] 31 | -------------------------------------------------------------------------------- /src/util/set_date.py: -------------------------------------------------------------------------------- 1 | """The set_date function.""" 2 | 3 | # Standard Python Libraries 4 | from datetime import datetime 5 | 6 | 7 | def set_date(type_, assessment, campaign_date): 8 | """Set a date for a campaign.""" 9 | if getattr(assessment, type_): 10 | assessment_time = datetime.strptime( 11 | "".join(getattr(assessment, type_).rsplit(":", 1)), "%Y-%m-%dT%H:%M:%S%z" 12 | ) 13 | campaign_time = datetime.strptime( 14 | "".join(campaign_date.rsplit(":", 1)), "%Y-%m-%dT%H:%M:%S%z" 15 | ) 16 | if type_ == "start_date" and assessment_time > campaign_time: 17 | setattr(assessment, type_, campaign_date) 18 | elif type_ == "end_date" and assessment_time < campaign_time: 19 | setattr(assessment, type_, campaign_date) 20 | else: 21 | setattr(assessment, type_, campaign_date) 22 | -------------------------------------------------------------------------------- /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | # See https://ansible-lint.readthedocs.io/configuring/ for a list of 3 | # the configuration elements that can exist in this file. 4 | enable_list: 5 | # Useful checks that one must opt-into. See here for more details: 6 | # https://ansible-lint.readthedocs.io/rules/ 7 | - fcqn-builtins 8 | - no-log-password 9 | - no-same-owner 10 | exclude_paths: 11 | # This exclusion is implicit, unless exclude_paths is defined 12 | - .cache 13 | # Seems wise to ignore this too 14 | - .github 15 | kinds: 16 | # This will force our systemd specific molecule configurations to be treated 17 | # as plain yaml files by ansible-lint. This mirrors the default kind 18 | # configuration in ansible-lint for molecule configurations: 19 | # yaml: "**/molecule/*/{base,molecule}.{yaml,yml}" 20 | - yaml: "**/molecule/*/molecule-{no,with}-systemd.yml" 21 | use_default_rules: true 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GIT_COMMIT=unspecified 2 | ARG GIT_REMOTE=unspecified 3 | ARG VERSION=unspecified 4 | 5 | FROM python:3.11.14-alpine 6 | 7 | ARG GIT_COMMIT 8 | ARG GIT_REMOTE 9 | ARG VERSION 10 | 11 | LABEL git_commit=$GIT_COMMIT 12 | LABEL git_remote=$GIT_REMOTE 13 | LABEL maintainer="bryce.beuerlein@cisa.dhs.gov" 14 | LABEL vendor="Cyber and Infrastructure Security Agency" 15 | LABEL version=$VERSION 16 | 17 | ARG CISA_UID=421 18 | ENV CISA_HOME="/home/cisa" 19 | ENV GOPHISH_TOOLS_SRC="/usr/src/gophish-tools" 20 | 21 | RUN addgroup --system --gid $CISA_UID cisa \ 22 | && adduser --system --uid $CISA_UID --ingroup cisa cisa 23 | 24 | RUN apk --update --no-cache add \ 25 | bash \ 26 | py-pip 27 | 28 | VOLUME $CISA_HOME 29 | 30 | WORKDIR $GOPHISH_TOOLS_SRC 31 | COPY . $GOPHISH_TOOLS_SRC 32 | 33 | RUN pip install --no-cache-dir . 34 | RUN ln -snf ${GOPHISH_TOOLS_SRC}/var/getenv /usr/local/bin 35 | 36 | USER cisa 37 | WORKDIR $CISA_HOME 38 | CMD ["getenv"] 39 | -------------------------------------------------------------------------------- /src/tools/connect.py: -------------------------------------------------------------------------------- 1 | """Module to create a Gophish API connection.""" 2 | 3 | # Third-Party Libraries 4 | # No type stubs exist for gophish, so we add "type: ignore" to tell mypy to 5 | # ignore this library 6 | from gophish import Gophish # type: ignore 7 | from gophish.models import Error # type: ignore 8 | from requests.exceptions import ConnectionError, MissingSchema 9 | 10 | 11 | def connect_api(api_key, server): 12 | """Create a Gophish API connection.""" 13 | api = Gophish(api_key, host=server, verify=False) 14 | 15 | # Sets up connection and test that it works. 16 | try: 17 | api.campaigns.get() 18 | return api 19 | 20 | except Error as e: # Bad API Key 21 | raise Exception(f"Error Connecting: {e.message}") 22 | except MissingSchema as e: 23 | message = e.args[0].split(" '")[0] 24 | raise Exception(f"Error Connecting: {message}") 25 | 26 | except ConnectionError: 27 | raise Exception("Networking Error, unable to reach Gophish.") 28 | 29 | except Exception: 30 | raise Exception("Cannot connect to Gophish.") 31 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Each line is a file pattern followed by one or more owners. 2 | 3 | # These owners will be the default owners for everything in the 4 | # repo. Unless a later match takes precedence, these owners will be 5 | # requested for review when someone opens a pull request. 6 | * @dav3r @felddy @jsf9k @mcdonnnj 7 | 8 | # These folks own any files in the .github directory at the root of 9 | # the repository and any of its subdirectories. 10 | /.github/ @dav3r @felddy @jsf9k @mcdonnnj 11 | 12 | # These folks own all linting configuration files. 13 | /.ansible-lint @dav3r @felddy @jsf9k @mcdonnnj 14 | /.bandit.yml @dav3r @felddy @jsf9k @mcdonnnj 15 | /.flake8 @dav3r @felddy @jsf9k @mcdonnnj 16 | /.isort.cfg @dav3r @felddy @jsf9k @mcdonnnj 17 | /.mdl_config.yaml @dav3r @felddy @jsf9k @mcdonnnj 18 | /.pre-commit-config.yaml @dav3r @felddy @jsf9k @mcdonnnj 19 | /.prettierignore @dav3r @felddy @jsf9k @mcdonnnj 20 | /.yamllint @dav3r @felddy @jsf9k @mcdonnnj 21 | /requirements.txt @dav3r @felddy @jsf9k @mcdonnnj 22 | /requirements-dev.txt @dav3r @felddy @jsf9k @mcdonnnj 23 | /requirements-test.txt @dav3r @felddy @jsf9k @mcdonnnj 24 | /setup-env @dav3r @felddy @jsf9k @mcdonnnj 25 | -------------------------------------------------------------------------------- /var/getenv: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo ################################################################################ 4 | echo # The following output is used to setup aliases to containerized commands. 5 | echo # To apply these changes in a shell, eval the output of this container: 6 | echo # eval "$(docker run cisagov/gophish-tools)" 7 | echo # 8 | echo # Environment variable: 9 | echo # GOPHISH_TOOLS_IMAGE, defaults to "cisagov/gophish-tools" if not set 10 | echo # 11 | echo ################################################################################ 12 | echo 13 | 14 | cd /usr/local/bin || exit 15 | 16 | # Create output that can be eval'd to create aliases for the various 17 | # commands in gophish-tools 18 | for f in pca-*; do 19 | # shellcheck disable=SC1083,SC2086 20 | echo alias $f=\"docker run -it --network host --rm --volume \\\`pwd\\\`:/home/cisa \\\"\\\${GOPHISH_TOOLS_IMAGE:-cisagov/gophish-tools}\\\" $f\" 21 | done 22 | 23 | for f in gophish-*; do 24 | # shellcheck disable=SC1083,SC2086 25 | echo alias $f=\"docker run -it --network host --rm --volume \\\`pwd\\\`:/home/cisa \\\"\\\${GOPHISH_TOOLS_IMAGE:-cisagov/gophish-tools}\\\" $f\" 26 | done 27 | 28 | # Create an alias to execute a shell in the gophish-tools container 29 | # shellcheck disable=SC1083,SC2086 30 | echo alias gophish-tools-bash=\"docker run -it --rm --network host --volume \\\`pwd\\\`:/home/cisa \\\"\\\${GOPHISH_TOOLS_IMAGE:-cisagov/gophish-tools}\\\" /bin/bash\" 31 | -------------------------------------------------------------------------------- /.mdl_config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Default state for all rules 4 | default: true 5 | 6 | # MD003/heading-style/header-style - Heading style 7 | MD003: 8 | # Enforce the ATX-closed style of header 9 | style: atx_closed 10 | 11 | # MD004/ul-style - Unordered list style 12 | MD004: 13 | # Enforce dashes for unordered lists 14 | style: dash 15 | 16 | # MD013/line-length - Line length 17 | MD013: 18 | # Do not enforce for code blocks 19 | code_blocks: false 20 | # Do not enforce for tables 21 | tables: false 22 | 23 | # MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the 24 | # same content 25 | MD024: 26 | # Allow headers with the same content as long as they are not in the same 27 | # parent heading 28 | allow_different_nesting: true 29 | 30 | # MD029/ol-prefix - Ordered list item prefix 31 | MD029: 32 | # Enforce the `1.` style for ordered lists 33 | style: one 34 | 35 | # MD033/no-inline-html - Inline HTML 36 | MD033: 37 | # The h1 and img elements are allowed to permit header images 38 | allowed_elements: 39 | - h1 40 | - img 41 | 42 | # MD035/hr-style - Horizontal rule style 43 | MD035: 44 | # Enforce dashes for horizontal rules 45 | style: --- 46 | 47 | # MD046/code-block-style - Code block style 48 | MD046: 49 | # Enforce the fenced style for code blocks 50 | style: fenced 51 | 52 | # MD049/emphasis-style - Emphasis style should be consistent 53 | MD049: 54 | # Enforce asterisks as the style to use for emphasis 55 | style: asterisk 56 | 57 | # MD050/strong-style - Strong style should be consistent 58 | MD050: 59 | # Enforce asterisks as the style to use for strong 60 | style: asterisk 61 | -------------------------------------------------------------------------------- /tests/test_models.py: -------------------------------------------------------------------------------- 1 | """Test for Models.""" 2 | 3 | # cisagov Libraries 4 | from models.models import SMTP, Assessment, Campaign, Group, Page, Target, Template 5 | 6 | 7 | class TestParse: 8 | """TestParse class.""" 9 | 10 | def test_email_parse(self, target_object, target_json): 11 | """Test parsing of email target JSON.""" 12 | assert target_object.as_dict() == Target.parse(target_json[0]).as_dict() 13 | 14 | def test_group_parse(self, group_object, group_json): 15 | """Test parsing of group JSON.""" 16 | assert group_object.as_dict() == Group.parse(group_json).as_dict() 17 | 18 | def test_smtp_parse(self, smtp_object, smtp_json): 19 | """Test parsing of SMTP JSON.""" 20 | assert smtp_object.as_dict() == SMTP.parse(smtp_json).as_dict() 21 | 22 | def test_template_parse(self, template_object, template_json): 23 | """Test parsing of template JSON.""" 24 | assert template_object.as_dict() == Template.parse(template_json).as_dict() 25 | 26 | def test_page_parse(self, page_object, page_json): 27 | """Test parsing of page JSON.""" 28 | assert page_object.as_dict() == Page.parse(page_json).as_dict() 29 | 30 | def test_campaign_parse(self, campaign_object, campaign_json): 31 | """Test parsing of campaign JSON.""" 32 | assert campaign_object.as_dict() == Campaign.parse(campaign_json).as_dict() 33 | 34 | def test_assessment_parse(self, assessment_object, assessment_json): 35 | """Test parsing of assessment JSON.""" 36 | assert ( 37 | assessment_object.as_dict() == Assessment.parse(assessment_json).as_dict() 38 | ) 39 | -------------------------------------------------------------------------------- /src/templates/generate_template.py: -------------------------------------------------------------------------------- 1 | """Generate templates for import into an assessment JSON with the PCA Wizard. 2 | 3 | Usage: 4 | pca-wizard-templates (--emails | --targets) 5 | pca-wizard-templates (-h | --help) 6 | pca-wizard-templates --version 7 | Options: 8 | -e --emails Output a JSON template file for phishing emails. 9 | -t --targets Output a CSV template file for phishing targets. 10 | -h --help Show this screen. 11 | --version Show version. 12 | """ 13 | 14 | # Standard Python Libraries 15 | import json 16 | from typing import Dict 17 | 18 | # Third-Party Libraries 19 | from docopt import docopt 20 | 21 | from ._version import __version__ 22 | 23 | EMAIL_TEMPLATE = { 24 | "id": "Database ID", 25 | "from_address": "John Doe ", 26 | "subject": "Subject", 27 | "html": '

', 28 | "text": "", 29 | } 30 | 31 | TARGET_TEMPLATE = "First Name,Last Name,Email,Position" 32 | 33 | 34 | def email_output(): 35 | """Output an email JSON template.""" 36 | print('Saving "template_emails.json"...') 37 | with open("template_email.json", "w") as fp: 38 | json.dump(EMAIL_TEMPLATE, fp, indent=4) 39 | 40 | 41 | def targets_output(): 42 | """Output a target emails CSV.""" 43 | print('Saving "template_targets.csv"...') 44 | with open("template_targets.csv", "w") as fp: 45 | fp.write(TARGET_TEMPLATE) 46 | 47 | 48 | def main() -> None: 49 | """Execute either email_output() or targets_output().""" 50 | args: Dict[str, str] = docopt(__doc__, version=__version__) 51 | 52 | if args["--emails"]: 53 | email_output() 54 | elif args["--targets"]: 55 | targets_output() 56 | -------------------------------------------------------------------------------- /tests/test_validate.py: -------------------------------------------------------------------------------- 1 | """Tests for Validation functions.""" 2 | 3 | # Third-Party Libraries 4 | import pytest 5 | 6 | # cisagov Libraries 7 | from util.validate import ( 8 | FormatError, 9 | validate_assessment_id, 10 | validate_domain, 11 | validate_email, 12 | ) 13 | 14 | 15 | @pytest.mark.parametrize( 16 | "email", ["name.last@domain.test", "name.last+phish@domain.test"] 17 | ) 18 | def test_validate_email_valid(email): 19 | """Test a valid email address.""" 20 | assert validate_email(email) 21 | 22 | 23 | @pytest.mark.parametrize("email", ["@domain.test", "phish@test"]) 24 | def test_validate_email_invalid(email): 25 | """Test an invalid email address.""" 26 | with pytest.raises(FormatError): 27 | validate_email(email) 28 | 29 | 30 | @pytest.mark.parametrize("email", ["name.last@domain.test"]) 31 | @pytest.mark.parametrize("domain", ["domain.test"]) 32 | def test_validate_domain_valid(email, domain): 33 | """Test a valid domain.""" 34 | assert validate_domain(email, domain) 35 | 36 | 37 | @pytest.mark.parametrize("email", ["name.last@domain.test"]) 38 | @pytest.mark.parametrize("domain", ["test.test"]) 39 | def test_validate_domain_invalid(email, domain): 40 | """Test an invalid domain.""" 41 | assert not validate_domain(email, domain) 42 | 43 | 44 | @pytest.mark.parametrize("assessment_id", ["RV1234"]) 45 | def test_validate_assessment_id_valid(assessment_id): 46 | """Test a valid assessment_id.""" 47 | assert validate_assessment_id(assessment_id) 48 | 49 | 50 | @pytest.mark.parametrize("assessment_id", ["RV...."]) 51 | def test_validate_assessment_id_invalid(assessment_id): 52 | """Test an invalid assessment_id.""" 53 | assert not validate_assessment_id(assessment_id) 54 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Any ignore directives should be uncommented in downstream projects to disable 4 | # Dependabot updates for the given dependency. Downstream projects will get 5 | # these updates when the pull request(s) in the appropriate skeleton are merged 6 | # and Lineage processes these changes. 7 | 8 | updates: 9 | - directory: / 10 | ignore: 11 | # Managed by cisagov/skeleton-generic 12 | - dependency-name: actions/cache 13 | - dependency-name: actions/checkout 14 | - dependency-name: actions/dependency-review-action 15 | - dependency-name: actions/labeler 16 | - dependency-name: actions/setup-go 17 | - dependency-name: actions/setup-python 18 | - dependency-name: cisagov/action-job-preamble 19 | - dependency-name: cisagov/setup-env-github-action 20 | - dependency-name: crazy-max/ghaction-github-labeler 21 | - dependency-name: github/codeql-action 22 | - dependency-name: hashicorp/setup-packer 23 | - dependency-name: hashicorp/setup-terraform 24 | - dependency-name: mxschmitt/action-tmate 25 | # Managed by cisagov/skeleton-python-library 26 | - dependency-name: actions/download-artifact 27 | - dependency-name: actions/upload-artifact 28 | labels: 29 | # dependabot default we need to replicate 30 | - dependencies 31 | # This matches our label definition in .github/labels.yml as opposed to 32 | # dependabot's default of `github_actions`. 33 | - github-actions 34 | package-ecosystem: github-actions 35 | schedule: 36 | interval: weekly 37 | 38 | - directory: / 39 | package-ecosystem: pip 40 | schedule: 41 | interval: weekly 42 | 43 | - directory: / 44 | package-ecosystem: terraform 45 | schedule: 46 | interval: weekly 47 | version: 2 48 | -------------------------------------------------------------------------------- /src/assessment/sample_assessment.json: -------------------------------------------------------------------------------- 1 | { 2 | "campaigns": [ 3 | { 4 | "completed_date": "01/01/2025 14:00", 5 | "group_name": "RVXX1-G1", 6 | "launch_date": "01/01/2025 13:00", 7 | "name": "RVXXX1-C1", 8 | "page_name": "RVXXX1-1-AutoForward", 9 | "smtp": { 10 | "from_address": "Camp1 Phish", 11 | "host": "postfix:587", 12 | "ignore_cert": true, 13 | "interface_type": "SMTP", 14 | "name": "RVXXX1-SP" 15 | }, 16 | "template": { 17 | "html": "Body Test", 18 | "name": "RVXXX1-T1-ID", 19 | "subject": "Campaign 1", 20 | "text": "Body Test" 21 | }, 22 | "url": "http://bad.domain/camp1" 23 | } 24 | ], 25 | "domain": "bad.domain", 26 | "end_date": "01/06/2025 19:00", 27 | "groups": [ 28 | { 29 | "name": "RVXXX1-G1", 30 | "targets": [ 31 | { 32 | "email": "john.doe@domain.test", 33 | "first_name": "John", 34 | "last_name": "Doe", 35 | "position": "IT" 36 | }, 37 | { 38 | "email": "jane.smith@domain.test", 39 | "first_name": "Jane", 40 | "last_name": "Smith", 41 | "position": "HR" 42 | } 43 | ] 44 | } 45 | ], 46 | "id": "RVXXX1", 47 | "pages": [ 48 | { 49 | "capture_credentials": true, 50 | "capture_passwords": false, 51 | "html": "Test", 52 | "name": "RVXXX1-1-AutoForward", 53 | "redirect_url": "http://domain.tld/page" 54 | }, 55 | { 56 | "capture_credentials": true, 57 | "capture_passwords": false, 58 | "html": "Test", 59 | "name": "RVXXX1-2-AutoForward", 60 | "redirect_url": "http://other_domain.tld/page" 61 | } 62 | ], 63 | "start_date": "01/01/2025 13:00", 64 | "target_domain": "domain.test", 65 | "timezone": "US/Eastern" 66 | } 67 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: release 3 | 4 | on: # yamllint disable-line rule:truthy 5 | release: 6 | types: 7 | - prereleased 8 | - released 9 | 10 | env: 11 | IMAGE_NAME: cisagov/gophish-tools 12 | DOCKER_PW: ${{ secrets.DOCKER_PW }} 13 | DOCKER_USER: ${{ secrets.DOCKER_USER }} 14 | 15 | jobs: 16 | release: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v3 20 | - uses: actions/setup-python@v3 21 | with: 22 | python-version: "3.10" 23 | - name: Determine image version 24 | id: get_ver 25 | run: | 26 | echo "##[set-output name=version;]$(./bump-version show)" 27 | - name: Build Docker image 28 | run: | 29 | docker build \ 30 | --tag "$IMAGE_NAME" \ 31 | --build-arg GIT_COMMIT=$(git log -1 --format=%H) \ 32 | --build-arg GIT_REMOTE=$(git remote get-url origin) \ 33 | --build-arg VERSION=${{ steps.get_ver.outputs.version }} \ 34 | . 35 | - name: Tag Docker image 36 | run: | 37 | IFS='.' read -r -a version_array \ 38 | <<< "${{ steps.get_ver.outputs.version }}" 39 | docker login --username "$DOCKER_USER" --password "$DOCKER_PW" 40 | docker tag "$IMAGE_NAME" "${IMAGE_NAME}:latest" 41 | docker tag "$IMAGE_NAME" \ 42 | "${IMAGE_NAME}:${{ steps.get_ver.outputs.version }}" 43 | docker tag "$IMAGE_NAME" \ 44 | "${IMAGE_NAME}:${version_array[0]}.${version_array[1]}" 45 | docker tag "$IMAGE_NAME" "${IMAGE_NAME}:${version_array[0]}" 46 | - name: Publish image to Docker Hub 47 | run: | 48 | IFS='.' read -r -a version_array \ 49 | <<< "${{ steps.get_ver.outputs.version }}" 50 | docker push "${IMAGE_NAME}:latest" 51 | docker push "${IMAGE_NAME}:${{ steps.get_ver.outputs.version }}" 52 | docker push "${IMAGE_NAME}:${version_array[0]}.${version_array[1]}" 53 | docker push "${IMAGE_NAME}:${version_array[0]}" 54 | - name: Publish README.md to Docker Hub 55 | uses: peter-evans/dockerhub-description@v5 56 | env: 57 | DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }} 58 | DOCKERHUB_PASSWORD: ${{ secrets.DOCKER_PW }} 59 | DOCKERHUB_REPOSITORY: ${{ env.IMAGE_NAME }} 60 | -------------------------------------------------------------------------------- /.github/labeler.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Each entry in this file is a label that will be applied to pull requests 3 | # if there is a match based on the matching rules for the entry. Please see 4 | # the actions/labeler documentation for more information: 5 | # https://github.com/actions/labeler#match-object 6 | # 7 | # Note: Verify that the label you want to use is defined in the 8 | # crazy-max/ghaction-github-labeler configuration file located at 9 | # .github/labels.yml. 10 | 11 | ansible: 12 | - changed-files: 13 | - any-glob-to-any-file: 14 | - "**/ansible/**" 15 | dependencies: 16 | - changed-files: 17 | - any-glob-to-any-file: 18 | # Add any dependency files used. 19 | - .pre-commit-config.yaml 20 | - requirements*.txt 21 | - pyproject.toml 22 | docker: 23 | - changed-files: 24 | - any-glob-to-any-file: 25 | - "**/compose*.yml" 26 | - "**/docker-compose*.yml" 27 | - "**/Dockerfile*" 28 | documentation: 29 | - changed-files: 30 | - any-glob-to-any-file: 31 | - "**/*.md" 32 | github-actions: 33 | - changed-files: 34 | - any-glob-to-any-file: 35 | - .github/workflows/** 36 | javascript: 37 | - changed-files: 38 | - any-glob-to-any-file: 39 | - "**/*.js" 40 | packer: 41 | - changed-files: 42 | - any-glob-to-any-file: 43 | - "**/*.pkr.hcl" 44 | python: 45 | - changed-files: 46 | - any-glob-to-any-file: 47 | - "**/*.py" 48 | shell script: 49 | - changed-files: 50 | - any-glob-to-any-file: 51 | # If this project has any shell scripts that do not end in the ".sh" 52 | # extension, add them below. 53 | - "**/*.sh" 54 | - bump-version 55 | - setup-env 56 | terraform: 57 | - changed-files: 58 | - any-glob-to-any-file: 59 | - "**/*.tf" 60 | test: 61 | - changed-files: 62 | - any-glob-to-any-file: 63 | # Add any test-related files or paths. 64 | - .ansible-lint 65 | - .mdl_config.yaml 66 | - .yamllint 67 | - pyproject.toml 68 | - tests/** 69 | typescript: 70 | - changed-files: 71 | - any-glob-to-any-file: 72 | - "**/*.ts" 73 | upstream update: 74 | - head-branch: 75 | # Any Lineage pull requests should use this branch. 76 | - lineage/skeleton 77 | version bump: 78 | - changed-files: 79 | - any-glob-to-any-file: 80 | # Ensure this matches your version tracking file(s). 81 | - src/**/_version.py 82 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | rules: 5 | braces: 6 | # Do not allow non-empty flow mappings 7 | forbid: non-empty 8 | # Allow up to one space inside braces. This is required for Ansible compatibility. 9 | max-spaces-inside: 1 10 | 11 | brackets: 12 | # Do not allow non-empty flow sequences 13 | forbid: non-empty 14 | 15 | comments: 16 | # Ensure that inline comments have at least one space before the preceding content. 17 | # This is required for Ansible compatibility. 18 | min-spaces-from-content: 1 19 | 20 | # yamllint does not like it when you comment out different parts of 21 | # dictionaries in a list. You can see 22 | # https://github.com/adrienverge/yamllint/issues/384 for some examples of 23 | # this behavior. 24 | comments-indentation: disable 25 | 26 | indentation: 27 | # Ensure that block sequences inside of a mapping are indented 28 | indent-sequences: true 29 | # Enforce a specific number of spaces 30 | spaces: 2 31 | 32 | # yamllint does not allow inline mappings that exceed the line length by 33 | # default. There are many scenarios where the inline mapping may be a key, 34 | # hash, or other long value that would exceed the line length but cannot 35 | # reasonably be broken across lines. 36 | line-length: 37 | # This rule implies the allow-non-breakable-words rule 38 | allow-non-breakable-inline-mappings: true 39 | # Allows a 10% overage from the default limit of 80 40 | max: 88 41 | 42 | # Using anything other than strings to express octal values can lead to unexpected 43 | # and potentially unsafe behavior. Ansible strongly recommends against such practices 44 | # and these rules are needed for Ansible compatibility. Please see the following for 45 | # more information: 46 | # https://ansible.readthedocs.io/projects/lint/rules/risky-octal/ 47 | octal-values: 48 | # Do not allow explicit octal values (those beginning with a leading 0o). 49 | forbid-explicit-octal: true 50 | # Do not allow implicit octal values (those beginning with a leading 0). 51 | forbid-implicit-octal: true 52 | 53 | quoted-strings: 54 | # Allow disallowed quotes (single quotes) for strings that contain allowed quotes 55 | # (double quotes). 56 | allow-quoted-quotes: true 57 | # Apply these rules to keys in mappings as well 58 | check-keys: true 59 | # We prefer double quotes for strings when they are needed 60 | quote-type: double 61 | # Only require quotes when they are necessary for proper processing 62 | required: only-when-needed 63 | -------------------------------------------------------------------------------- /src/util/input.py: -------------------------------------------------------------------------------- 1 | """Functions related to user input.""" 2 | 3 | # Standard Python Libraries 4 | from datetime import datetime 5 | import logging 6 | 7 | # Third-Party Libraries 8 | from prompt_toolkit import prompt 9 | from prompt_toolkit.completion import WordCompleter 10 | import pytz 11 | 12 | # cisagov Libraries 13 | from util.validate import BlankInputValidator, BooleanValidator 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | 18 | def yes_no_prompt(message): 19 | """Prompt for a yes/no response.""" 20 | return prompt( 21 | "{}?(yes/no) ".format(message), 22 | completer=WordCompleter(["yes", "no"], ignore_case=True), 23 | validator=BooleanValidator(), 24 | ).lower() 25 | 26 | 27 | def get_input(message, default_value=""): 28 | """Record user input.""" 29 | return prompt( 30 | f"{message} ", 31 | default=default_value, 32 | validator=BlankInputValidator(), 33 | ) 34 | 35 | 36 | def get_number(msg): 37 | """Validate a numerical input.""" 38 | while True: 39 | try: 40 | num = int(get_input(msg)) 41 | break 42 | except ValueError: 43 | logging.error("None integer entered.") 44 | logging.warning("Please put only an integer") 45 | 46 | return num 47 | 48 | 49 | # Gets time from user and confirms formatting 50 | def get_time_input(type_, time_zone, default=""): 51 | """Get time input with time_zone and convert to ISO format. 52 | 53 | Utility for getting time input for a specific date type (start/end) 54 | in a specific timzone. Once received, input is validated and converted 55 | to ISO format. 56 | 57 | Arguments: 58 | type_ {str} -- Indiateds the date, start or complete genearly. 59 | time_zone {str} -- pytz timezone in a string 60 | 61 | Keyword Arguments: 62 | default {str} -- Default value to pre-populate user input (default: {""}) 63 | 64 | Returns: 65 | str -- String ISO representation of provided time in UTC. 66 | """ 67 | while True: 68 | try: 69 | input_time = get_input( 70 | " Please enter the {} date and time from {} (mm/dd/YYYY HH:MM (24hr)):\n ".format( 71 | type_, time_zone 72 | ), 73 | default, 74 | ) 75 | input_time = datetime.strptime(input_time, "%m/%d/%Y %H:%M") 76 | break 77 | except ValueError: 78 | logging.error("Invalid time input: %s", input_time) 79 | 80 | # Convert time to ISO format to be returned. 81 | return pytz.timezone(time_zone).localize(input_time).isoformat() 82 | -------------------------------------------------------------------------------- /tests/test_tools.py: -------------------------------------------------------------------------------- 1 | """Tests for Gophish tool functions.""" 2 | 3 | # Standard Python Libraries 4 | from unittest.mock import patch 5 | 6 | # Third-Party Libraries 7 | import pytest 8 | 9 | # cisagov Libraries 10 | from tools.gophish_complete import get_campaign_id 11 | from tools.gophish_export import ( 12 | assessment_exists, 13 | export_targets, 14 | find_unique_target_clicks_count, 15 | ) 16 | 17 | 18 | class TestComplete: 19 | """Test gophish-complete script.""" 20 | 21 | @pytest.mark.parametrize( 22 | "campaigns", [{"1": "RV0000-C1", "2": "RV0000-C2", "3": "RV0000-C3"}] 23 | ) 24 | def test_get_campaign_id_found(self, campaigns): 25 | """Verify correct campaign id is returned when a valid campaign name is provided.""" 26 | assert get_campaign_id("RV0000-C2", campaigns) == "2" 27 | 28 | @pytest.mark.parametrize( 29 | "campaigns", [{"1": "RV0000-C1", "2": "RV0000-C2", "3": "RV0000-C3"}] 30 | ) 31 | def test_get_campaign_id_not_found(self, campaigns): 32 | """Verify LookupError is raised when searching for unknown campaign id.""" 33 | with pytest.raises(LookupError): 34 | get_campaign_id("RV0000-C6", campaigns) 35 | 36 | 37 | class TestExport: 38 | """Test gophish-export script.""" 39 | 40 | @patch("tools.connect") 41 | def test_assessment_exists_found(self, mock_api, multiple_campaign_object): 42 | """Verify True is returned when assessment is in Gophish.""" 43 | mock_api.campaigns.get.return_value = multiple_campaign_object 44 | 45 | assert assessment_exists(mock_api, "RVXXX1") is True 46 | 47 | @patch("tools.connect") 48 | def test_assessment_exists_not_found(self, mock_api, multiple_campaign_object): 49 | """Verify False is returned when assessment is not in Gophish.""" 50 | mock_api.campaigns.get.return_value = multiple_campaign_object 51 | assert assessment_exists(mock_api, "RVXXX3") is False 52 | 53 | @patch("tools.connect") 54 | def test_find_unique_target_clicks_count(self, mock_api, multiple_click_object): 55 | """Verify that the correct number of unique users in a click list is found.""" 56 | assert find_unique_target_clicks_count(multiple_click_object) == 4 57 | 58 | def mock_get_group_ids(self, s, group_object): 59 | """Return a mock list of Gophish group objects.""" 60 | return group_object 61 | 62 | # Mocks the group id's returned for the assessment's groups. 63 | @patch("tools.gophish_export.get_group_ids", return_value=[1, 2]) 64 | # Mock API to allow Gophish group objects to be returned. 65 | @patch("tools.connect") 66 | def test_export_targets( 67 | self, mock_api, mock_export, multiple_gophish_group_object, email_target_json 68 | ): 69 | """Verify the appropriate JSON is created by export targets.""" 70 | mock_api.groups.get.side_effect = multiple_gophish_group_object 71 | 72 | assert export_targets(mock_api, "RVXXX1") == email_target_json 73 | -------------------------------------------------------------------------------- /.github/labels.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Rather than breaking up descriptions into multiline strings we disable that 3 | # specific rule in yamllint for this file. 4 | # yamllint disable rule:line-length 5 | - color: ff5850 6 | description: Pull requests that update Ansible code 7 | name: ansible 8 | - color: eb6420 9 | description: This issue or pull request is awaiting the outcome of another issue or pull request 10 | name: blocked 11 | - color: "000000" 12 | description: This issue or pull request involves changes to existing functionality 13 | name: breaking change 14 | - color: d73a4a 15 | description: This issue or pull request addresses broken functionality 16 | name: bug 17 | - color: 07648d 18 | description: This issue will be advertised on code.gov's Open Tasks page (https://code.gov/open-tasks) 19 | name: code.gov 20 | - color: 0366d6 21 | description: Pull requests that update a dependency file 22 | name: dependencies 23 | - color: 1d63ed 24 | description: Pull requests that update Docker code 25 | name: docker 26 | - color: 5319e7 27 | description: This issue or pull request improves or adds to documentation 28 | name: documentation 29 | - color: cfd3d7 30 | description: This issue or pull request already exists or is covered in another issue or pull request 31 | name: duplicate 32 | - color: b005bc 33 | description: A high-level objective issue encompassing multiple issues instead of a specific unit of work 34 | name: epic 35 | - color: "000000" 36 | description: Pull requests that update GitHub Actions code 37 | name: github-actions 38 | - color: 0e8a16 39 | description: This issue or pull request is well-defined and good for newcomers 40 | name: good first issue 41 | - color: ff7518 42 | description: Pull request that should count toward Hacktoberfest participation 43 | name: hacktoberfest-accepted 44 | - color: a2eeef 45 | description: This issue or pull request will add or improve functionality, maintainability, or ease of use 46 | name: improvement 47 | - color: fef2c0 48 | description: This issue or pull request is not applicable, incorrect, or obsolete 49 | name: invalid 50 | - color: f0db4f 51 | description: Pull requests that update JavaScript code 52 | name: javascript 53 | - color: ce099a 54 | description: This pull request is ready to merge during the next Lineage Kraken release 55 | name: kraken 🐙 56 | - color: a4fc5d 57 | description: This issue or pull request requires further information 58 | name: need info 59 | - color: fcdb45 60 | description: This pull request is awaiting an action or decision to move forward 61 | name: on hold 62 | - color: 02a8ef 63 | description: Pull requests that update Packer code 64 | name: packer 65 | - color: 3776ab 66 | description: Pull requests that update Python code 67 | name: python 68 | - color: ef476c 69 | description: This issue is a request for information or needs discussion 70 | name: question 71 | - color: d73a4a 72 | description: This issue or pull request addresses a security issue 73 | name: security 74 | - color: 4eaa25 75 | description: Pull requests that update shell scripts 76 | name: shell script 77 | - color: 7b42bc 78 | description: Pull requests that update Terraform code 79 | name: terraform 80 | - color: 00008b 81 | description: This issue or pull request adds or otherwise modifies test code 82 | name: test 83 | - color: 2678c5 84 | description: Pull requests that update TypeScript code 85 | name: typescript 86 | - color: 1d76db 87 | description: This issue or pull request pulls in upstream updates 88 | name: upstream update 89 | - color: d4c5f9 90 | description: This issue or pull request increments the version number 91 | name: version bump 92 | - color: ffffff 93 | description: This issue will not be incorporated 94 | name: wontfix 95 | -------------------------------------------------------------------------------- /src/util/validate.py: -------------------------------------------------------------------------------- 1 | """Validation functions.""" 2 | 3 | # Standard Python Libraries 4 | import re 5 | 6 | # Third-Party Libraries 7 | from prompt_toolkit.validation import ValidationError, Validator 8 | 9 | EMAIL_TEMPLATE = { 10 | "id": "Template ID from Database", 11 | "from_address": 'Full email address format "Display Name"', 12 | "subject": "Email Subject with Gophish tags if desired", 13 | "html": "HTML Body of the email", 14 | "text": "Text Body of the email", 15 | } 16 | 17 | 18 | def validate_assessment_id(assessment_id): 19 | """Validate that the provided assessment_id is matching the valid assessment_id format. Example: RV1234. 20 | 21 | Args: 22 | assessment_id (string): Assessment identifier to validate. 23 | 24 | Returns: 25 | match: The result of a regular expression match. 26 | """ 27 | return re.match(r"^RV\d{4,5}$", assessment_id) 28 | 29 | 30 | def validate_email(email): 31 | """Validate email format. 32 | 33 | Args: 34 | email (string): Email address om string format. 35 | 36 | Returns: 37 | Boolean: Indicating valid email address format. 38 | 39 | """ 40 | if not bool( 41 | re.match( 42 | r"^[a-zA-Z0-9]+[a-zA-Z0-9-.+_]+@(\[?)[a-zA-Z0-9-.]+..([a-zA-Z]{2,3}|[0-9]{2,6})(]?)$", 43 | email, 44 | ) 45 | ): 46 | raise FormatError(email) 47 | else: 48 | return True 49 | 50 | 51 | def validate_domain(email, domains): 52 | """Check that the domain matches that of the assessment.""" 53 | if email.split("@")[1].lower() in domains: 54 | return True 55 | 56 | return False 57 | 58 | 59 | def email_import_validation(import_temp): 60 | """Validate that import email JSON has appropriate fields.""" 61 | input_keys = import_temp.keys() 62 | dif = [ 63 | i 64 | for i in list(input_keys) + list(EMAIL_TEMPLATE.keys()) 65 | if i not in input_keys or i not in EMAIL_TEMPLATE.keys() 66 | ] 67 | 68 | for key in dif: 69 | if key in EMAIL_TEMPLATE.keys(): 70 | raise MissingKey(key, EMAIL_TEMPLATE[key]) 71 | 72 | 73 | class BlankInputValidator(Validator): 74 | """The BlankInputValidator class.""" 75 | 76 | def validate(self, document): 77 | """Validate if input text is empty.""" 78 | text = document.text 79 | 80 | if not text: 81 | raise ValidationError(message="Blank Input") 82 | 83 | 84 | class BooleanValidator(Validator): 85 | """The BooleanValidator class.""" 86 | 87 | def validate(self, document): 88 | """Validate if input text is 'yes' or 'no'.""" 89 | text = document.text 90 | if text.lower() not in ["yes", "no"]: 91 | raise ValidationError(message="Invalid Yes/No response.") 92 | 93 | 94 | class EmailValidator(Validator): 95 | """The EmailValidator class.""" 96 | 97 | def validate(self, document): 98 | """Validate if input text is a valid email address.""" 99 | email = document.text 100 | if not validate_email(email): 101 | raise ValidationError(message="Invalid Email Address") 102 | 103 | 104 | class FormatError(Exception): 105 | """The FormatError class.""" 106 | 107 | def __init__(self, email): 108 | """TBD.""" 109 | # Now for your custom code... 110 | self.email = email 111 | self.description = f"ERROR: {email} incorrect format" 112 | 113 | 114 | class MissingKey(Exception): 115 | """The MissingKey class.""" 116 | 117 | def __init__(self, key, description): 118 | """TBD.""" 119 | # Now for your custom code... 120 | self.key = key 121 | self.description = description 122 | -------------------------------------------------------------------------------- /.github/workflows/label-prs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Label pull requests 3 | 4 | on: # yamllint disable-line rule:truthy 5 | pull_request: 6 | types: 7 | - edited 8 | - opened 9 | - synchronize 10 | 11 | # Set a default shell for any run steps. The `-Eueo pipefail` sets errtrace, 12 | # nounset, errexit, and pipefail. The `-x` will print all commands as they are 13 | # run. Please see the GitHub Actions documentation for more information: 14 | # https://docs.github.com/en/actions/using-jobs/setting-default-values-for-jobs 15 | defaults: 16 | run: 17 | shell: bash -Eueo pipefail -x {0} 18 | 19 | jobs: 20 | diagnostics: 21 | name: Run diagnostics 22 | # This job does not need any permissions 23 | permissions: {} 24 | runs-on: ubuntu-latest 25 | steps: 26 | # Note that a duplicate of this step must be added at the top of 27 | # each job. 28 | - name: Apply standard cisagov job preamble 29 | uses: cisagov/action-job-preamble@v1 30 | with: 31 | check_github_status: "true" 32 | # This functionality is poorly implemented and has been 33 | # causing problems due to the MITM implementation hogging or 34 | # leaking memory. As a result we disable it by default. If 35 | # you want to temporarily enable it, simply set 36 | # monitor_permissions equal to "true". 37 | # 38 | # TODO: Re-enable this functionality when practical. See 39 | # cisagov/skeleton-generic#207 for more details. 40 | monitor_permissions: "false" 41 | output_workflow_context: "true" 42 | # Use a variable to specify the permissions monitoring 43 | # configuration. By default this will yield the 44 | # configuration stored in the cisagov organization-level 45 | # variable, but if you want to use a different configuration 46 | # then simply: 47 | # 1. Create a repository-level variable with the name 48 | # ACTIONS_PERMISSIONS_CONFIG. 49 | # 2. Set this new variable's value to the configuration you 50 | # want to use for this repository. 51 | # 52 | # Note in particular that changing the permissions 53 | # monitoring configuration *does not* require you to modify 54 | # this workflow. 55 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 56 | label: 57 | needs: 58 | - diagnostics 59 | permissions: 60 | # Permissions required by actions/labeler 61 | contents: read 62 | pull-requests: write 63 | runs-on: ubuntu-latest 64 | steps: 65 | - name: Apply standard cisagov job preamble 66 | uses: cisagov/action-job-preamble@v1 67 | with: 68 | # This functionality is poorly implemented and has been 69 | # causing problems due to the MITM implementation hogging or 70 | # leaking memory. As a result we disable it by default. If 71 | # you want to temporarily enable it, simply set 72 | # monitor_permissions equal to "true". 73 | # 74 | # TODO: Re-enable this functionality when practical. See 75 | # cisagov/skeleton-generic#207 for more details. 76 | monitor_permissions: "false" 77 | # Use a variable to specify the permissions monitoring 78 | # configuration. By default this will yield the 79 | # configuration stored in the cisagov organization-level 80 | # variable, but if you want to use a different configuration 81 | # then simply: 82 | # 1. Create a repository-level variable with the name 83 | # ACTIONS_PERMISSIONS_CONFIG. 84 | # 2. Set this new variable's value to the configuration you 85 | # want to use for this repository. 86 | # 87 | # Note in particular that changing the permissions 88 | # monitoring configuration *does not* require you to modify 89 | # this workflow. 90 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 91 | - name: Apply suitable labels to a pull request 92 | uses: actions/labeler@v6 93 | -------------------------------------------------------------------------------- /.github/workflows/sync-labels.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: sync-labels 3 | 4 | on: # yamllint disable-line rule:truthy 5 | push: 6 | paths: 7 | - .github/labels.yml 8 | - .github/workflows/sync-labels.yml 9 | workflow_dispatch: 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | diagnostics: 16 | name: Run diagnostics 17 | # This job does not need any permissions 18 | permissions: {} 19 | runs-on: ubuntu-latest 20 | steps: 21 | # Note that a duplicate of this step must be added at the top of 22 | # each job. 23 | - name: Apply standard cisagov job preamble 24 | uses: cisagov/action-job-preamble@v1 25 | with: 26 | check_github_status: "true" 27 | # This functionality is poorly implemented and has been 28 | # causing problems due to the MITM implementation hogging or 29 | # leaking memory. As a result we disable it by default. If 30 | # you want to temporarily enable it, simply set 31 | # monitor_permissions equal to "true". 32 | # 33 | # TODO: Re-enable this functionality when practical. See 34 | # cisagov/skeleton-generic#207 for more details. 35 | monitor_permissions: "false" 36 | output_workflow_context: "true" 37 | # Use a variable to specify the permissions monitoring 38 | # configuration. By default this will yield the 39 | # configuration stored in the cisagov organization-level 40 | # variable, but if you want to use a different configuration 41 | # then simply: 42 | # 1. Create a repository-level variable with the name 43 | # ACTIONS_PERMISSIONS_CONFIG. 44 | # 2. Set this new variable's value to the configuration you 45 | # want to use for this repository. 46 | # 47 | # Note in particular that changing the permissions 48 | # monitoring configuration *does not* require you to modify 49 | # this workflow. 50 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 51 | labeler: 52 | needs: 53 | - diagnostics 54 | permissions: 55 | # actions/checkout needs this to fetch code 56 | contents: read 57 | # crazy-max/ghaction-github-labeler needs this to manage repository labels 58 | issues: write 59 | runs-on: ubuntu-latest 60 | steps: 61 | - name: Apply standard cisagov job preamble 62 | uses: cisagov/action-job-preamble@v1 63 | with: 64 | # This functionality is poorly implemented and has been 65 | # causing problems due to the MITM implementation hogging or 66 | # leaking memory. As a result we disable it by default. If 67 | # you want to temporarily enable it, simply set 68 | # monitor_permissions equal to "true". 69 | # 70 | # TODO: Re-enable this functionality when practical. See 71 | # cisagov/skeleton-generic#207 for more details. 72 | monitor_permissions: "false" 73 | # Use a variable to specify the permissions monitoring 74 | # configuration. By default this will yield the 75 | # configuration stored in the cisagov organization-level 76 | # variable, but if you want to use a different configuration 77 | # then simply: 78 | # 1. Create a repository-level variable with the name 79 | # ACTIONS_PERMISSIONS_CONFIG. 80 | # 2. Set this new variable's value to the configuration you 81 | # want to use for this repository. 82 | # 83 | # Note in particular that changing the permissions 84 | # monitoring configuration *does not* require you to modify 85 | # this workflow. 86 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 87 | - uses: actions/checkout@v5 88 | - name: Sync repository labels 89 | if: success() 90 | uses: crazy-max/ghaction-github-labeler@v5 91 | with: 92 | # This is a hideous ternary equivalent so we only do a dry run unless 93 | # this workflow is triggered by the develop branch. 94 | dry-run: ${{ github.ref_name == 'develop' && 'false' || 'true' }} 95 | -------------------------------------------------------------------------------- /.github/workflows/dependency-review.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Dependency review 3 | 4 | on: # yamllint disable-line rule:truthy 5 | merge_group: 6 | types: 7 | - checks_requested 8 | pull_request: 9 | 10 | # Set a default shell for any run steps. The `-Eueo pipefail` sets errtrace, 11 | # nounset, errexit, and pipefail. The `-x` will print all commands as they are 12 | # run. Please see the GitHub Actions documentation for more information: 13 | # https://docs.github.com/en/actions/using-jobs/setting-default-values-for-jobs 14 | defaults: 15 | run: 16 | shell: bash -Eueo pipefail -x {0} 17 | 18 | jobs: 19 | diagnostics: 20 | name: Run diagnostics 21 | # This job does not need any permissions 22 | permissions: {} 23 | runs-on: ubuntu-latest 24 | steps: 25 | # Note that a duplicate of this step must be added at the top of 26 | # each job. 27 | - name: Apply standard cisagov job preamble 28 | uses: cisagov/action-job-preamble@v1 29 | with: 30 | check_github_status: "true" 31 | # This functionality is poorly implemented and has been 32 | # causing problems due to the MITM implementation hogging or 33 | # leaking memory. As a result we disable it by default. If 34 | # you want to temporarily enable it, simply set 35 | # monitor_permissions equal to "true". 36 | # 37 | # TODO: Re-enable this functionality when practical. See 38 | # cisagov/skeleton-generic#207 for more details. 39 | monitor_permissions: "false" 40 | output_workflow_context: "true" 41 | # Use a variable to specify the permissions monitoring 42 | # configuration. By default this will yield the 43 | # configuration stored in the cisagov organization-level 44 | # variable, but if you want to use a different configuration 45 | # then simply: 46 | # 1. Create a repository-level variable with the name 47 | # ACTIONS_PERMISSIONS_CONFIG. 48 | # 2. Set this new variable's value to the configuration you 49 | # want to use for this repository. 50 | # 51 | # Note in particular that changing the permissions 52 | # monitoring configuration *does not* require you to modify 53 | # this workflow. 54 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 55 | dependency-review: 56 | name: Dependency review 57 | needs: 58 | - diagnostics 59 | permissions: 60 | # actions/checkout needs this to fetch code 61 | contents: read 62 | runs-on: ubuntu-latest 63 | steps: 64 | - name: Apply standard cisagov job preamble 65 | uses: cisagov/action-job-preamble@v1 66 | with: 67 | # This functionality is poorly implemented and has been 68 | # causing problems due to the MITM implementation hogging or 69 | # leaking memory. As a result we disable it by default. If 70 | # you want to temporarily enable it, simply set 71 | # monitor_permissions equal to "true". 72 | # 73 | # TODO: Re-enable this functionality when practical. See 74 | # cisagov/skeleton-generic#207 for more details. 75 | monitor_permissions: "false" 76 | # Use a variable to specify the permissions monitoring 77 | # configuration. By default this will yield the 78 | # configuration stored in the cisagov organization-level 79 | # variable, but if you want to use a different configuration 80 | # then simply: 81 | # 1. Create a repository-level variable with the name 82 | # ACTIONS_PERMISSIONS_CONFIG. 83 | # 2. Set this new variable's value to the configuration you 84 | # want to use for this repository. 85 | # 86 | # Note in particular that changing the permissions 87 | # monitoring configuration *does not* require you to modify 88 | # this workflow. 89 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 90 | - id: checkout-repo 91 | name: Checkout the repository 92 | uses: actions/checkout@v5 93 | - id: dependency-review 94 | name: Review dependency changes for vulnerabilities and license changes 95 | uses: actions/dependency-review-action@v4 96 | -------------------------------------------------------------------------------- /src/assessment/reschedule.py: -------------------------------------------------------------------------------- 1 | """Modify campaign start/end dates in an assessment JSON file. 2 | 3 | Usage: 4 | pca-assessment-reschedule [--log-level=LEVEL] ASSESSMENT_FILE 5 | pca-assessment-reschedule (-h | --help) 6 | pca-assessment-reschedule --version 7 | 8 | Options: 9 | ASSESSMENT_FILE JSON file containing the assessment information. 10 | -h --help Show this screen. 11 | --version Show version. 12 | -l --log-level=LEVEL If specified, then the log level will be set to 13 | the specified value. Valid values are "debug", "info", 14 | "warning", "error", and "critical". [default: info] 15 | """ 16 | 17 | # Standard Python Libraries 18 | import json 19 | import logging 20 | import sys 21 | from typing import Dict 22 | 23 | # Third-Party Libraries 24 | from docopt import docopt 25 | 26 | # cisagov Libraries 27 | from models import Assessment 28 | from util.input import get_number, get_time_input 29 | from util.set_date import set_date 30 | 31 | from ._version import __version__ 32 | 33 | 34 | def display_assessment_dates(assessment): 35 | """Display all campaigns in an assessment in a table.""" 36 | print(f"Assessment ID: {assessment.id}") 37 | print(f"Start Date: {assessment.start_date} End Date: {assessment.end_date}\n") 38 | print("Campaign Launch End") 39 | print("-------- ------ ---") 40 | for campaign in assessment.campaigns: 41 | print( 42 | f" {campaign.name[len(campaign.name) - 1]} {campaign.launch_date} {campaign.complete_date}" 43 | ) 44 | 45 | print() 46 | 47 | 48 | def change_dates(campaign, timezone): 49 | """Change dates for a campaign.""" 50 | logging.info(f"Changing Dates for {campaign.name}") 51 | logging.debug(f"Pre-Change Launch Date: {campaign.launch_date}") 52 | logging.debug(f"Pre-Change Complete Date: {campaign.complete_date}") 53 | 54 | campaign.launch_date = get_time_input("start", timezone, campaign.launch_date) 55 | 56 | while True: 57 | campaign.complete_date = get_time_input("end", timezone, campaign.complete_date) 58 | 59 | if campaign.complete_date > campaign.launch_date: 60 | break 61 | else: 62 | logging.error("Complete Date is not after Launch Date.") 63 | 64 | logging.debug(f"Post-Change Launch Date: {campaign.launch_date}") 65 | logging.debug(f"Post-Change Complete Date: {campaign.complete_date}") 66 | 67 | return campaign 68 | 69 | 70 | def reschedule(assessment): 71 | """Reschedule assessment dates.""" 72 | logging.info("Determining where to start rescheduling...") 73 | assessment.reschedule = True 74 | display_assessment_dates(assessment) 75 | assessment.start_campaign = get_number( 76 | "Select a Campaign to start rescheduling at:" 77 | ) 78 | 79 | for campaign in assessment.campaigns: 80 | if int(campaign.name[len(campaign.name) - 1]) >= assessment.start_campaign: 81 | campaign = change_dates(campaign, assessment.timezone) 82 | assessment.campaigns[assessment.start_campaign - 1] = campaign 83 | set_date("start_date", assessment, campaign.launch_date) 84 | set_date("end_date", assessment, campaign.complete_date) 85 | 86 | logging.info("Dates have been changed...") 87 | display_assessment_dates(assessment) 88 | 89 | return assessment 90 | 91 | 92 | def main() -> None: 93 | """Set up logging and call the reschedule function.""" 94 | args: Dict[str, str] = docopt(__doc__, version=__version__) 95 | 96 | # Set up logging 97 | log_level = args["--log-level"] 98 | try: 99 | logging.basicConfig( 100 | format="\n%(levelname)s: %(message)s", level=log_level.upper() 101 | ) 102 | except ValueError: 103 | logging.critical( 104 | '"{}"is not a valid logging level. Possible values are debug, info, warning, and error.'.format( 105 | log_level 106 | ) 107 | ) 108 | sys.exit(1) 109 | 110 | try: 111 | with open(args["ASSESSMENT_FILE"]) as json_file: 112 | json_data = json.load(json_file) 113 | 114 | except OSError: 115 | logging.critical(f"JSON file not found: {args['ASSESSMENT_FILE']}") 116 | logging.critical("Please run command from the location with the file.") 117 | # Bandit complains about the input() function, but it is safe to 118 | # use in Python 3, which is required by this project. 119 | input("Press any key to close...") # nosec 120 | 121 | assessment = Assessment.parse(json_data) 122 | 123 | assessment = reschedule(assessment) 124 | 125 | with open(f"{assessment.id}-reschedule.json", "w") as fp: 126 | json.dump(assessment.as_dict(), fp, indent=4) 127 | 128 | logging.info(f"Assessment JSON ready: {assessment.id}-reschedule.json") 129 | # Stop logging and clean up 130 | logging.shutdown() 131 | -------------------------------------------------------------------------------- /tests/test_assessment_json.py: -------------------------------------------------------------------------------- 1 | """Tests for assessment JSON.""" 2 | 3 | # # Standard Python Libraries 4 | # import sys 5 | # from unittest.mock import patch 6 | # 7 | # # cisagov Libraries 8 | # import assessment.builder as assessment_builder 9 | 10 | # TODO Make test_emails csv 11 | 12 | 13 | class TestPlainAssessment: 14 | """Plain assessment test class.""" 15 | 16 | get_input_values = [ 17 | ["2. Assessment Domain- 106", "bad.tld"], 18 | ["3. Target Domain- 108", "target.tld"], 19 | ["6. Redirect URL-453", "redirect.tld"], 20 | ["10. Email CSV- 344", "data/test_group.csv"], 21 | ["17. Email File 1- 237", "data/C1.json"], 22 | ["23. Email File 2- 237", "data/C2.json"], 23 | ["28. Email File 3- 237", "data/C3.json"], 24 | ["33. Email File 4- 237", "data/C4.json"], 25 | ["38. Email File 4- 237", "data/C5.json"], 26 | ["43. Email File 4- 237", "data/C6.json"], 27 | ] 28 | get_input = [["12. SMTP User", ""], ["13. SMTP Password", ""]] 29 | yes_no_values = [ 30 | ["5. Auto Forward- 445", "yes"], 31 | ["7. Page Review", "no"], 32 | ["9. Group Labels- 320", "yes"], 33 | ["20. Campaign 1 Review", "no"], 34 | ["25. Campaign 2 Review", "no"], 35 | ["30. Campaign 3 Review", "no"], 36 | ["35. Campaign 4 Review", "no"], 37 | ["40. Campaign 5 Review", "no"], 38 | ["45. Campaign 6 Review", "no"], 39 | ] 40 | get_number_values = [ 41 | ["4. Number of Pages", 1], 42 | ["8. Num Groups- 315", 1], 43 | ["14. Num Campaigns- 114", 6], 44 | ] 45 | 46 | get_time_input_values = [ 47 | ["15. Campaign 1 Launch Date- 130", "2020-01-20T13:00:00-04:00"], 48 | ["16. Campaign 1 End Date- 133", "2020-01-20T13:30:00-04:00"], 49 | ["21. Campaign 2 Launch Date- 130", "2020-01-20T14:00:00-04:00"], 50 | ["22. Campaign 2 End Date- 133", "2020-01-20T14:30:00-04:00"], 51 | ["26. Campaign 3 Launch Date- 130", "2020-01-20T15:00:00-04:00"], 52 | ["27. Campaign 3 End Date- 133", "2020-01-20T15:30:00-04:00"], 53 | ["31. Campaign 4 Launch Date- 130", "2020-01-20T16:00:00-04:00"], 54 | ["32. Campaign 4 End Date- 133", "2020-01-20T16:30:00-04:00"], 55 | ["36. Campaign 5 Launch Date- 130", "2020-01-20T17:00:00-04:00"], 56 | ["37. Campaign 5 End Date- 133", "2020-01-20T17:30:00-04:00"], 57 | ["41. Campaign 6 Launch Date- 130", "2020-01-20T18:00:00-04:00"], 58 | ["42. Campaign 6 End Date- 133", "2020-01-20T18:30:00-04:00"], 59 | ] 60 | 61 | prompt_values = [ 62 | ["11. SMTP Host", "postfix:587"], 63 | ["18. Campaign 1 Url- 152", "http://bad.domain/camp1"], 64 | ["24. Campaign 2 Url- 152", "http://bad.domain/camp2"], 65 | ["29. Campaign 3 Url- 152", "http://bad.domain/camp3"], 66 | ["34. Campaign 4 Url- 152", "http://bad.domain/camp4"], 67 | ["39. Campaign 3 Url- 152", "http://bad.domain/camp5"], 68 | ["44. Campaign 4 Url- 152", "http://bad.domain/camp6"], 69 | ] 70 | 71 | radio_dialog_values = [["1. Time Zone - 63", "US/Eastern"]] 72 | 73 | def mock_get_input(self, s): 74 | """Return a mock input value.""" 75 | return self.get_input_values.pop(0)[1] 76 | 77 | def mock_input(self, s): 78 | """Return a mock input.""" 79 | return self.get_input.pop(0)[1] 80 | 81 | def mock_yes_no(self, s): 82 | """Return a mock yes/no value.""" 83 | return self.yes_no_values.pop(0)[1] 84 | 85 | def mock_get_number(self, s): 86 | """Return a mock number value.""" 87 | return self.get_number_values.pop(0)[1] 88 | 89 | def mock_get_time_input(self, type_, timezone): 90 | """Return a mock time input value.""" 91 | return self.get_time_input_values.pop(0)[1] 92 | 93 | def mock_prompt(self, s, validator, default): 94 | """Return a mock prompt value.""" 95 | return self.prompt_values.pop(0)[1] 96 | 97 | def mock_radio(self, values, title, text): 98 | """Return a mock radio dialog value.""" 99 | return self.radio_dialog_values.pop(0)[1] 100 | 101 | def mock_id_arg(self, s): 102 | """Return a mock assessment ID value.""" 103 | return "RVXXX1" 104 | 105 | # TODO: Replace with a useful, functioning test 106 | 107 | # def test_assessment(self, monkeypatch): 108 | # """Construct a test assessment from mock data.""" 109 | # with patch.object(sys, "argv", ["pca-wizard", "RVXXX1"]): 110 | # assessment_builder.get_input = self.mock_get_input 111 | # assessment_builder.input = self.mock_input 112 | # assessment_builder.yes_no_prompt = self.mock_yes_no 113 | # assessment_builder.get_number = self.mock_get_number 114 | # assessment_builder.get_time_input = self.mock_get_time_input 115 | # assessment_builder.prompt = self.mock_prompt 116 | # assessment_builder.radiolist_dialog = self.mock_radio 117 | # # assessment_builder.args["ASSESSMENT_ID"] = "RVXXX1" # self.mock_id_arg 118 | # assessment_builder.main() 119 | # assert True 120 | -------------------------------------------------------------------------------- /bump-version: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # bump-version [--push] [--label LABEL] (major | minor | patch | prerelease | build | finalize | show) 4 | # bump-version --list-files 5 | 6 | set -o nounset 7 | set -o errexit 8 | set -o pipefail 9 | 10 | # Stores the canonical version for the project. 11 | VERSION_FILE=src/_version.py 12 | # Files that should be updated with the new version. 13 | VERSION_FILES=("$VERSION_FILE") 14 | 15 | USAGE=$( 16 | cat << END_OF_LINE 17 | Update the version of the project. 18 | 19 | Usage: 20 | ${0##*/} [--push] [--label LABEL] (major | minor | patch | prerelease | build | finalize | show) 21 | ${0##*/} --list-files 22 | ${0##*/} (-h | --help) 23 | 24 | Options: 25 | -h | --help Show this message. 26 | --push Perform a \`git push\` after updating the version. 27 | --label LABEL Specify the label to use when updating the build or prerelease version. 28 | --list-files List the files that will be updated when the version is bumped. 29 | END_OF_LINE 30 | ) 31 | 32 | old_version=$(sed -n "s/^__version__ = \"\(.*\)\"$/\1/p" $VERSION_FILE) 33 | # Comment out periods so they are interpreted as periods and don't 34 | # just match any character 35 | old_version_regex=${old_version//\./\\\.} 36 | new_version="$old_version" 37 | 38 | bump_part="" 39 | label="" 40 | commit_prefix="Bump" 41 | with_push=false 42 | commands_with_label=("build" "prerelease") 43 | commands_with_prerelease=("major" "minor" "patch") 44 | with_prerelease=false 45 | 46 | ####################################### 47 | # Display an error message, the help information, and exit with a non-zero status. 48 | # Arguments: 49 | # Error message. 50 | ####################################### 51 | function invalid_option() { 52 | echo "$1" 53 | echo "$USAGE" 54 | exit 1 55 | } 56 | 57 | ####################################### 58 | # Bump the version using the provided command. 59 | # Arguments: 60 | # The version to bump. 61 | # The command to bump the version. 62 | # Returns: 63 | # The new version. 64 | ####################################### 65 | function bump_version() { 66 | local temp_version 67 | temp_version=$(python -c "import semver; print(semver.parse_version_info('$1').${2})") 68 | echo "$temp_version" 69 | } 70 | 71 | if [ $# -eq 0 ]; then 72 | echo "$USAGE" 73 | exit 1 74 | else 75 | while [ $# -gt 0 ]; do 76 | case $1 in 77 | --push) 78 | if [ "$with_push" = true ]; then 79 | invalid_option "Push has already been set." 80 | fi 81 | 82 | with_push=true 83 | shift 84 | ;; 85 | --label) 86 | if [ -n "$label" ]; then 87 | invalid_option "Label has already been set." 88 | fi 89 | 90 | label="$2" 91 | shift 2 92 | ;; 93 | build | finalize | major | minor | patch) 94 | if [ -n "$bump_part" ]; then 95 | invalid_option "Only one version part should be bumped at a time." 96 | fi 97 | 98 | bump_part="$1" 99 | shift 100 | ;; 101 | prerelease) 102 | with_prerelease=true 103 | shift 104 | ;; 105 | show) 106 | echo "$old_version" 107 | exit 0 108 | ;; 109 | -h | --help) 110 | echo "$USAGE" 111 | exit 0 112 | ;; 113 | --list-files) 114 | printf '%s\n' "${VERSION_FILES[@]}" 115 | exit 0 116 | ;; 117 | *) 118 | invalid_option "Invalid option: $1" 119 | ;; 120 | esac 121 | done 122 | fi 123 | 124 | if [ -n "$label" ] && [ "$with_prerelease" = false ] && [[ ! " ${commands_with_label[*]} " =~ [[:space:]]${bump_part}[[:space:]] ]]; then 125 | invalid_option "Setting the label is only allowed for the following commands: ${commands_with_label[*]}" 126 | fi 127 | 128 | if [ "$with_prerelease" = true ] && [ -n "$bump_part" ] && [[ ! " ${commands_with_prerelease[*]} " =~ [[:space:]]${bump_part}[[:space:]] ]]; then 129 | invalid_option "Changing the prerelease is only allowed in conjunction with the following commands: ${commands_with_prerelease[*]}" 130 | fi 131 | 132 | label_option="" 133 | if [ -n "$label" ]; then 134 | label_option="token='$label'" 135 | fi 136 | 137 | if [ -n "$bump_part" ]; then 138 | if [ "$bump_part" = "finalize" ]; then 139 | commit_prefix="Finalize" 140 | bump_command="finalize_version()" 141 | elif [ "$bump_part" = "build" ]; then 142 | bump_command="bump_${bump_part}($label_option)" 143 | else 144 | bump_command="bump_${bump_part}()" 145 | fi 146 | new_version=$(bump_version "$old_version" "$bump_command") 147 | echo Changing version from "$old_version" to "$new_version" 148 | fi 149 | 150 | if [ "$with_prerelease" = true ]; then 151 | bump_command="bump_prerelease($label_option)" 152 | temp_version=$(bump_version "$new_version" "$bump_command") 153 | echo Changing version from "$new_version" to "$temp_version" 154 | new_version="$temp_version" 155 | fi 156 | 157 | tmp_file=/tmp/version.$$ 158 | for version_file in "${VERSION_FILES[@]}"; do 159 | if [ ! -f "$version_file" ]; then 160 | echo Missing expected file: "$version_file" 161 | exit 1 162 | fi 163 | sed "s/$old_version_regex/$new_version/" "$version_file" > $tmp_file 164 | mv $tmp_file "$version_file" 165 | done 166 | 167 | git add "${VERSION_FILES[@]}" 168 | git commit --message "$commit_prefix version from $old_version to $new_version" 169 | 170 | if [ "$with_push" = true ]; then 171 | git push 172 | fi 173 | -------------------------------------------------------------------------------- /src/tools/gophish_test.py: -------------------------------------------------------------------------------- 1 | """Send a duplicate assessment from Gophish to custom targets as a test. 2 | 3 | Usage: 4 | Gophish-test [--log-level=LEVEL] ASSESSMENT_ID SERVER API_KEY 5 | Gophish-test (-h | --help) 6 | Gophish-test --version 7 | 8 | Options: 9 | API_KEY Gophish API key. 10 | ASSESSMENT_ID ID of the assessment to test. 11 | SERVER Full URL to Gophish server. 12 | -h --help Show this screen. 13 | --version Show version. 14 | -l --log-level=LEVEL If specified, then the log level will be set to 15 | the specified value. Valid values are "debug", "info", 16 | "warning", "error", and "critical". [default: info] 17 | 18 | NOTE: 19 | * The test assessment is an exact copy of the real assessment that will be immediately sent 20 | to the custom targets provided in this tool. 21 | """ 22 | 23 | # Standard Python Libraries 24 | import logging 25 | import sys 26 | from typing import Dict 27 | 28 | # Third-Party Libraries 29 | from docopt import docopt 30 | 31 | # No type stubs exist for gophish, so we add "type: ignore" to tell mypy to 32 | # ignore this library 33 | from gophish.models import SMTP, Campaign, Group, Page, Template, User # type: ignore 34 | import urllib3 35 | 36 | # cisagov Libraries 37 | from tools.connect import connect_api 38 | from util.input import get_input 39 | from util.validate import validate_email 40 | 41 | from ._version import __version__ 42 | 43 | # Disable "Insecure Request" warning: Gophish uses a self-signed certificate 44 | # as default for https connections, which can not be verified by a third 45 | # party; thus, an SSL insecure request warning is produced. 46 | urllib3.disable_warnings() 47 | 48 | 49 | def get_campaigns(api, assessment_id): 50 | """Return a list of all campaigns in an assessment.""" 51 | logging.info("Gathering Campaigns") 52 | allCampaigns = api.campaigns.get() 53 | assessmentCampaigns = list() 54 | 55 | for campaign in allCampaigns: 56 | if campaign.name.startswith(assessment_id): 57 | assessmentCampaigns.append(campaign) 58 | 59 | # Sets err to true if assessmentCampaigns has 0 length. 60 | logging.debug("Num Campaigns: %d", len(assessmentCampaigns)) 61 | if not len(assessmentCampaigns): 62 | logging.warning("No Campaigns found for %s", assessment_id) 63 | 64 | return assessmentCampaigns 65 | 66 | 67 | def add_group(api, assessment_id): 68 | """Create a test group.""" 69 | logging.info("Adding Test Group") 70 | 71 | newGroup = Group() 72 | 73 | newGroup.name = "Test-" + assessment_id 74 | 75 | # Holds list of Users to be added to group. 76 | targets = list() 77 | 78 | target = User() 79 | target.first_name = get_input("Enter First Name: ") 80 | # Receives the file name and checks if it exists. 81 | while target.first_name != "done" or target.first_name == "": 82 | target.last_name = get_input("Enter Last Name: ") 83 | 84 | while True: 85 | target.email = get_input("Enter Email: ") 86 | if not validate_email(target.email): 87 | print("In Valid Email") 88 | else: 89 | break 90 | 91 | target.position = get_input("Enter Org: ") 92 | 93 | targets.append(target) 94 | 95 | target = User() 96 | target.first_name = get_input("Enter First Name or 'done': ") 97 | 98 | newGroup.targets = targets 99 | 100 | newGroup = api.groups.post(newGroup) 101 | 102 | return newGroup.name 103 | 104 | 105 | def campaign_test(api, assessmentCampaigns, assessment_id): 106 | """Create test campaigns.""" 107 | tempGroups = [Group(name=add_group(api, assessment_id))] 108 | 109 | for campaign in assessmentCampaigns: 110 | tempUrl = campaign.url 111 | tempName = "Test-" + campaign.name 112 | tempPage = Page(name=campaign.page.name) 113 | tempTemplate = Template(name=campaign.template.name) 114 | tempSmtp = SMTP(name=campaign.smtp.name) 115 | 116 | postCampaign = Campaign( 117 | name=tempName, 118 | groups=tempGroups, 119 | page=tempPage, 120 | template=tempTemplate, 121 | smtp=tempSmtp, 122 | url=tempUrl, 123 | ) 124 | 125 | postCampaign = api.campaigns.post(postCampaign) 126 | logging.debug("Test Campaign added: %s", postCampaign.name) 127 | 128 | logging.info("All Test campaigns added.") 129 | 130 | return True 131 | 132 | 133 | def main() -> None: 134 | """Set up logging, connect to API, load all test data.""" 135 | args: Dict[str, str] = docopt(__doc__, version=__version__) 136 | 137 | # Set up logging 138 | log_level = args["--log-level"] 139 | try: 140 | logging.basicConfig( 141 | format="\n%(levelname)s: %(message)s", level=log_level.upper() 142 | ) 143 | except ValueError: 144 | logging.critical( 145 | '"%s" is not a valid logging level. Possible values are debug, info, warning, and error.', 146 | log_level, 147 | ) 148 | sys.exit(1) 149 | 150 | # Connect to API 151 | try: 152 | api = connect_api(args["API_KEY"], args["SERVER"]) 153 | logging.debug("Connected to: %s", args["SERVER"]) 154 | except Exception as e: 155 | logging.critical(e.args[0]) 156 | sys.exit(1) 157 | 158 | assessmentCampaigns = get_campaigns(api, args["ASSESSMENT_ID"]) 159 | 160 | if len(assessmentCampaigns) > 0: 161 | campaign_test(api, assessmentCampaigns, args["ASSESSMENT_ID"]) 162 | 163 | # Stop logging and clean up 164 | logging.shutdown() 165 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For most projects, this workflow file will not need changing; you simply need 3 | # to commit it to your repository. 4 | # 5 | # You may wish to alter this file to override the set of languages analyzed, 6 | # or to provide custom queries or build logic. 7 | name: CodeQL 8 | 9 | # The use of on here as a key is part of the GitHub actions syntax. 10 | # yamllint disable-line rule:truthy 11 | on: 12 | merge_group: 13 | types: 14 | - checks_requested 15 | pull_request: 16 | # The branches here must be a subset of the ones in the push key 17 | branches: 18 | - develop 19 | push: 20 | # Dependabot-triggered push events have read-only access, but uploading code 21 | # scanning requires write access. 22 | branches-ignore: 23 | - dependabot/** 24 | schedule: 25 | - cron: 0 14 * * 6 26 | 27 | jobs: 28 | diagnostics: 29 | name: Run diagnostics 30 | # This job does not need any permissions 31 | permissions: {} 32 | runs-on: ubuntu-latest 33 | steps: 34 | # Note that a duplicate of this step must be added at the top of 35 | # each job. 36 | - name: Apply standard cisagov job preamble 37 | uses: cisagov/action-job-preamble@v1 38 | with: 39 | check_github_status: "true" 40 | # This functionality is poorly implemented and has been 41 | # causing problems due to the MITM implementation hogging or 42 | # leaking memory. As a result we disable it by default. If 43 | # you want to temporarily enable it, simply set 44 | # monitor_permissions equal to "true". 45 | # 46 | # TODO: Re-enable this functionality when practical. See 47 | # cisagov/skeleton-generic#207 for more details. 48 | monitor_permissions: "false" 49 | output_workflow_context: "true" 50 | # Use a variable to specify the permissions monitoring 51 | # configuration. By default this will yield the 52 | # configuration stored in the cisagov organization-level 53 | # variable, but if you want to use a different configuration 54 | # then simply: 55 | # 1. Create a repository-level variable with the name 56 | # ACTIONS_PERMISSIONS_CONFIG. 57 | # 2. Set this new variable's value to the configuration you 58 | # want to use for this repository. 59 | # 60 | # Note in particular that changing the permissions 61 | # monitoring configuration *does not* require you to modify 62 | # this workflow. 63 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 64 | analyze: 65 | name: Analyze 66 | needs: 67 | - diagnostics 68 | permissions: 69 | # actions/checkout needs this to fetch code 70 | contents: read 71 | # required for all workflows 72 | security-events: write 73 | runs-on: ubuntu-latest 74 | strategy: 75 | fail-fast: false 76 | matrix: 77 | # Override automatic language detection by changing the below 78 | # list 79 | # 80 | # Supported options are actions, c-cpp, csharp, go, 81 | # java-kotlin, javascript-typescript, python, ruby, and swift. 82 | language: 83 | - actions 84 | - python 85 | # Learn more... 86 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 87 | 88 | steps: 89 | - name: Apply standard cisagov job preamble 90 | uses: cisagov/action-job-preamble@v1 91 | with: 92 | # This functionality is poorly implemented and has been 93 | # causing problems due to the MITM implementation hogging or 94 | # leaking memory. As a result we disable it by default. If 95 | # you want to temporarily enable it, simply set 96 | # monitor_permissions equal to "true". 97 | # 98 | # TODO: Re-enable this functionality when practical. See 99 | # cisagov/skeleton-generic#207 for more details. 100 | monitor_permissions: "false" 101 | # Use a variable to specify the permissions monitoring 102 | # configuration. By default this will yield the 103 | # configuration stored in the cisagov organization-level 104 | # variable, but if you want to use a different configuration 105 | # then simply: 106 | # 1. Create a repository-level variable with the name 107 | # ACTIONS_PERMISSIONS_CONFIG. 108 | # 2. Set this new variable's value to the configuration you 109 | # want to use for this repository. 110 | # 111 | # Note in particular that changing the permissions 112 | # monitoring configuration *does not* require you to modify 113 | # this workflow. 114 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 115 | 116 | - name: Checkout repository 117 | uses: actions/checkout@v5 118 | 119 | # Initializes the CodeQL tools for scanning. 120 | - name: Initialize CodeQL 121 | uses: github/codeql-action/init@v4 122 | with: 123 | languages: ${{ matrix.language }} 124 | 125 | # Autobuild attempts to build any compiled languages (C/C++, C#, or 126 | # Java). If this step fails, then you should remove it and run the build 127 | # manually (see below). 128 | - name: Autobuild 129 | uses: github/codeql-action/autobuild@v4 130 | 131 | # ℹ️ Command-line programs to run using the OS shell. 132 | # 📚 https://git.io/JvXDl 133 | 134 | # ✏️ If the Autobuild fails above, remove it and uncomment the following 135 | # three lines and modify them (or add more) to build your code if your 136 | # project uses a compiled language 137 | 138 | # - run: | 139 | # make bootstrap 140 | # make release 141 | 142 | - name: Perform CodeQL Analysis 143 | uses: github/codeql-action/analyze@v4 144 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # For more information about configuring project metadata for the 2 | # setuptools build backend, please see 3 | # https://setuptools.pypa.io/en/latest/userguide/pyproject_config.html 4 | [build-system] 5 | build-backend = "setuptools.build_meta" 6 | requires = ["setuptools"] 7 | 8 | [project] 9 | authors = [ 10 | { name = "Cybersecurity and Infrastructure Security Agency", email = "github@cisa.dhs.gov" } 11 | ] 12 | # See https://pypi.python.org/pypi?%3Aaction=list_classifiers 13 | classifiers = [ 14 | # How mature is this project? Common values are 15 | # 3 - Alpha 16 | # 4 - Beta 17 | # 5 - Production/Stable 18 | "Development Status :: 3 - Alpha", 19 | "Environment :: Console", 20 | # Indicate who your project is intended for 21 | "Intended Audience :: Developers", 22 | "Natural Language :: English", 23 | "Operating System :: OS Independent", 24 | # Specify the Python versions you support here. In particular, ensure 25 | # that you indicate whether you support Python 2, Python 3 or both. 26 | "Programming Language :: Python :: 3", 27 | "Programming Language :: Python :: 3 :: Only", 28 | "Programming Language :: Python :: 3.10", 29 | "Programming Language :: Python :: 3.11", 30 | # gophish-tools cannot support Python 3.12 or later until the 31 | # gophish Python package supports a newer version of the urllib3 32 | # library. The reason is identical to what is discussed here: 33 | # https://bugzilla.mozilla.org/show_bug.cgi?id=1857492 34 | # "Programming Language :: Python :: 3.12", 35 | # "Programming Language :: Python :: 3.13", 36 | # "Programming Language :: Python :: 3.14", 37 | "Programming Language :: Python :: Implementation :: CPython", 38 | ] 39 | dependencies = [ 40 | "docopt >= 0.6.2", 41 | "gophish >= 0.2.5", 42 | "httpagentparser", 43 | "prompt-toolkit == 3.0.43", 44 | "pytz >= 2019.1", 45 | "schema", 46 | "urllib3", 47 | ] 48 | description = "Helpful tools for interacting with Gophish" 49 | dynamic = ["readme", "version"] 50 | keywords = ["gophish automation"] 51 | license = "CC0-1.0" 52 | name = "gophish-tools" 53 | requires-python = ">=3.10" 54 | 55 | [project.optional-dependencies] 56 | # IMPORTANT: Keep type hinting-related dependencies of the dev section 57 | # in sync with the mypy pre-commit hook configuration (see 58 | # .pre-commit-config.yaml). Any changes to type hinting-related 59 | # dependencies here should be reflected in the additional_dependencies 60 | # field of the mypy pre-commit hook to avoid discrepancies in type 61 | # checking between environments. 62 | dev = [ 63 | "types-docopt", 64 | "types-Pygments", 65 | "types-pytz", 66 | "types-requests", 67 | "types-urllib3", 68 | ] 69 | test = [ 70 | "coverage", 71 | "coveralls", 72 | "pre-commit", 73 | "pytest-cov", 74 | "pytest-mock", 75 | "pytest", 76 | ] 77 | 78 | [project.scripts] 79 | gophish-cleaner = "tools.gophish_cleaner:main" 80 | gophish-complete = "tools.gophish_complete:main" 81 | gophish-export = "tools.gophish_export:main" 82 | gophish-import = "tools.gophish_import:main" 83 | gophish-test = "tools.gophish_test:main" 84 | pca-assessment-reschedule = "assessment.reschedule:main" 85 | pca-wizard = "assessment.builder:main" 86 | pca-wizard-templates = "templates.generate_template:main" 87 | 88 | [project.urls] 89 | homepage = "https://github.com/cisagov/gophish-tools" 90 | issues = "https://github.com/cisagov/gophish-tools/issues" 91 | # Landing page for CISA's cybersecurity mission 92 | mission = "https://www.cisa.gov/cybersecurity" 93 | source = "https://github.com/cisagov/gophish-tools" 94 | 95 | [tool.flake8] 96 | max-line-length = 80 97 | # Select (turn on) 98 | # * Complexity violations reported by mccabe (C) - 99 | # http://flake8.pycqa.org/en/latest/user/error-codes.html#error-violation-codes 100 | # * Documentation conventions compliance reported by pydocstyle (D) - 101 | # http://www.pydocstyle.org/en/stable/error_codes.html 102 | # * Default errors and warnings reported by pycodestyle (E and W) - 103 | # https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes 104 | # * Default errors reported by pyflakes (F) - 105 | # http://flake8.pycqa.org/en/latest/glossary.html#term-pyflakes 106 | # * Default warnings reported by flake8-bugbear (B) - 107 | # https://github.com/PyCQA/flake8-bugbear#list-of-warnings 108 | # * The B950 flake8-bugbear opinionated warning - 109 | # https://github.com/PyCQA/flake8-bugbear#opinionated-warnings 110 | select = ["C", "D", "E", "F", "W", "B", "B950"] 111 | # Ignore flake8's default warning about maximum line length, which has 112 | # a hard stop at the configured value. Instead we use 113 | # flake8-bugbear's B950, which allows up to 10% overage. 114 | # 115 | # Also ignore flake8's warning about line breaks before binary 116 | # operators. It no longer agrees with PEP8. See, for example, here: 117 | # https://github.com/ambv/black/issues/21. Guido agrees here: 118 | # https://github.com/python/peps/commit/c59c4376ad233a62ca4b3a6060c81368bd21e85b. 119 | extend-ignore = ["E501", "W503"] 120 | 121 | [tool.isort] 122 | combine_star = true 123 | force_sort_within_sections = true 124 | include_trailing_comma = true 125 | multi_line_output = 3 126 | 127 | import_heading_stdlib = "Standard Python Libraries" 128 | import_heading_thirdparty = "Third-Party Libraries" 129 | import_heading_firstparty = "cisagov Libraries" 130 | 131 | # Should be auto-populated by seed-isort-config hook 132 | known_third_party = [ 133 | "docopt", 134 | "gophish", 135 | "httpagentparser", 136 | "mock", 137 | "prompt_toolkit", 138 | "pytest", 139 | "pytz", 140 | "requests", 141 | ] 142 | # These must be manually set to correctly separate them from 143 | # third-party libraries 144 | known_first_party = [ 145 | "assessment", 146 | "models", 147 | "tests", 148 | "tools", 149 | "util", 150 | ] 151 | 152 | # Run isort under the black profile to align with our other Python 153 | # linting 154 | profile = "black" 155 | 156 | [tool.pytest.ini_options] 157 | # Increase verbosity, display extra test summary info for tests that 158 | # did not pass, display code coverage results, and enable debug 159 | # logging. 160 | addopts = "--verbose -ra --cov --log-cli-level=DEBUG" 161 | testpaths = ["tests/"] 162 | 163 | [tool.setuptools.dynamic] 164 | readme = {file = ["README.md"], content-type = "text/markdown"} 165 | version = {attr = "_version.__version__"} 166 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | CC0 1.0 Universal 2 | 3 | Statement of Purpose 4 | 5 | The laws of most jurisdictions throughout the world automatically confer 6 | exclusive Copyright and Related Rights (defined below) upon the creator and 7 | subsequent owner(s) (each and all, an "owner") of an original work of 8 | authorship and/or a database (each, a "Work"). 9 | 10 | Certain owners wish to permanently relinquish those rights to a Work for the 11 | purpose of contributing to a commons of creative, cultural and scientific 12 | works ("Commons") that the public can reliably and without fear of later 13 | claims of infringement build upon, modify, incorporate in other works, reuse 14 | and redistribute as freely as possible in any form whatsoever and for any 15 | purposes, including without limitation commercial purposes. These owners may 16 | contribute to the Commons to promote the ideal of a free culture and the 17 | further production of creative, cultural and scientific works, or to gain 18 | reputation or greater distribution for their Work in part through the use and 19 | efforts of others. 20 | 21 | For these and/or other purposes and motivations, and without any expectation 22 | of additional consideration or compensation, the person associating CC0 with a 23 | Work (the "Affirmer"), to the extent that he or she is an owner of Copyright 24 | and Related Rights in the Work, voluntarily elects to apply CC0 to the Work 25 | and publicly distribute the Work under its terms, with knowledge of his or her 26 | Copyright and Related Rights in the Work and the meaning and intended legal 27 | effect of CC0 on those rights. 28 | 29 | 1. Copyright and Related Rights. A Work made available under CC0 may be 30 | protected by copyright and related or neighboring rights ("Copyright and 31 | Related Rights"). Copyright and Related Rights include, but are not limited 32 | to, the following: 33 | 34 | i. the right to reproduce, adapt, distribute, perform, display, communicate, 35 | and translate a Work; 36 | 37 | ii. moral rights retained by the original author(s) and/or performer(s); 38 | 39 | iii. publicity and privacy rights pertaining to a person's image or likeness 40 | depicted in a Work; 41 | 42 | iv. rights protecting against unfair competition in regards to a Work, 43 | subject to the limitations in paragraph 4(a), below; 44 | 45 | v. rights protecting the extraction, dissemination, use and reuse of data in 46 | a Work; 47 | 48 | vi. database rights (such as those arising under Directive 96/9/EC of the 49 | European Parliament and of the Council of 11 March 1996 on the legal 50 | protection of databases, and under any national implementation thereof, 51 | including any amended or successor version of such directive); and 52 | 53 | vii. other similar, equivalent or corresponding rights throughout the world 54 | based on applicable law or treaty, and any national implementations thereof. 55 | 56 | 2. Waiver. To the greatest extent permitted by, but not in contravention of, 57 | applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and 58 | unconditionally waives, abandons, and surrenders all of Affirmer's Copyright 59 | and Related Rights and associated claims and causes of action, whether now 60 | known or unknown (including existing as well as future claims and causes of 61 | action), in the Work (i) in all territories worldwide, (ii) for the maximum 62 | duration provided by applicable law or treaty (including future time 63 | extensions), (iii) in any current or future medium and for any number of 64 | copies, and (iv) for any purpose whatsoever, including without limitation 65 | commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes 66 | the Waiver for the benefit of each member of the public at large and to the 67 | detriment of Affirmer's heirs and successors, fully intending that such Waiver 68 | shall not be subject to revocation, rescission, cancellation, termination, or 69 | any other legal or equitable action to disrupt the quiet enjoyment of the Work 70 | by the public as contemplated by Affirmer's express Statement of Purpose. 71 | 72 | 3. Public License Fallback. Should any part of the Waiver for any reason be 73 | judged legally invalid or ineffective under applicable law, then the Waiver 74 | shall be preserved to the maximum extent permitted taking into account 75 | Affirmer's express Statement of Purpose. In addition, to the extent the Waiver 76 | is so judged Affirmer hereby grants to each affected person a royalty-free, 77 | non transferable, non sublicensable, non exclusive, irrevocable and 78 | unconditional license to exercise Affirmer's Copyright and Related Rights in 79 | the Work (i) in all territories worldwide, (ii) for the maximum duration 80 | provided by applicable law or treaty (including future time extensions), (iii) 81 | in any current or future medium and for any number of copies, and (iv) for any 82 | purpose whatsoever, including without limitation commercial, advertising or 83 | promotional purposes (the "License"). The License shall be deemed effective as 84 | of the date CC0 was applied by Affirmer to the Work. Should any part of the 85 | License for any reason be judged legally invalid or ineffective under 86 | applicable law, such partial invalidity or ineffectiveness shall not 87 | invalidate the remainder of the License, and in such case Affirmer hereby 88 | affirms that he or she will not (i) exercise any of his or her remaining 89 | Copyright and Related Rights in the Work or (ii) assert any associated claims 90 | and causes of action with respect to the Work, in either case contrary to 91 | Affirmer's express Statement of Purpose. 92 | 93 | 4. Limitations and Disclaimers. 94 | 95 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 96 | surrendered, licensed or otherwise affected by this document. 97 | 98 | b. Affirmer offers the Work as-is and makes no representations or warranties 99 | of any kind concerning the Work, express, implied, statutory or otherwise, 100 | including without limitation warranties of title, merchantability, fitness 101 | for a particular purpose, non infringement, or the absence of latent or 102 | other defects, accuracy, or the present or absence of errors, whether or not 103 | discoverable, all to the greatest extent permissible under applicable law. 104 | 105 | c. Affirmer disclaims responsibility for clearing rights of other persons 106 | that may apply to the Work or any use thereof, including without limitation 107 | any person's Copyright and Related Rights in the Work. Further, Affirmer 108 | disclaims responsibility for obtaining any necessary consents, permissions 109 | or other rights required for any use of the Work. 110 | 111 | d. Affirmer understands and acknowledges that Creative Commons is not a 112 | party to this document and has no duty or obligation with respect to this 113 | CC0 or use of the Work. 114 | 115 | For more information, please see 116 | 117 | -------------------------------------------------------------------------------- /src/tools/gophish_cleaner.py: -------------------------------------------------------------------------------- 1 | """Remove an assessment or elements of an assessment in Gophish. 2 | 3 | Usage: 4 | gophish-cleaner (--assessment | --campaigns | --groups | --pages | --smtp | --templates) [--log-level=LEVEL] ASSESSMENT_ID SERVER API_KEY 5 | gophish-cleaner (-h | --help) 6 | gophish-cleaner --version 7 | 8 | Options: 9 | API_KEY Gophish API key. 10 | ASSESSMENT_ID ID of the assessment to remove data from. 11 | SERVER Full URL to Gophish server. 12 | -a --assessment Remove all data for the specified assessment. 13 | -c --campaigns Remove all campaigns from the specified assessment. 14 | -g --groups Remove all users and groups from the specified assessment. 15 | -p --pages Remove all landing pages from the specified assessment. 16 | -s --smtp Remove all sender profiles from the specified assessment. 17 | -t --templates Remove all email templates from the specified assessment. 18 | -h --help Show this screen. 19 | --version Show version. 20 | -l --log-level=LEVEL If specified, then the log level will be set to 21 | the specified value. Valid values are "debug", "info", 22 | "warning", "error", and "critical". [default: info] 23 | """ 24 | 25 | # import IPython; IPython.embed() #<<< BREAKPOINT >>> 26 | # sys.exit(0) 27 | 28 | # Standard Python Libraries 29 | import logging 30 | import sys 31 | from typing import Dict 32 | 33 | # Third-Party Libraries 34 | from docopt import docopt 35 | import urllib3 36 | 37 | # cisagov Libraries 38 | from tools.connect import connect_api 39 | 40 | from ._version import __version__ 41 | 42 | # Disable "Insecure Request" warning: Gophish uses a self-signed certificate 43 | # as default for https connections, which can not be verified by a third 44 | # party; thus, an SSL insecure request warning is produced. 45 | urllib3.disable_warnings() 46 | 47 | 48 | def confirm_id(element, assessment_id): 49 | """Display confirmation message and return response.""" 50 | while True: 51 | if element != "assessment": 52 | logging.warning( 53 | "NOTE: THIS WILL REMOVE ALL %s DATA ASSOCIATED WITH ASSESSMENT %s", 54 | element.upper(), 55 | assessment_id, 56 | ) 57 | # Bandit complains about the input() function, but it is safe to 58 | # use in Python 3, which is required by this project. 59 | confirm = input("Is this really what you want to do?(y/n) ") # nosec 60 | 61 | else: 62 | logging.warning( 63 | "NOTE: THIS WILL REMOVE ALL DATA ASSOCIATED WITH ASSESSMENT %s", 64 | assessment_id, 65 | ) 66 | # Bandit complains about the input() function, but it is safe to 67 | # use in Python 3, which is required by this project. 68 | confirm = input("Is this really what you want to do?(y/n) ") # nosec 69 | 70 | if confirm.lower() == "y": 71 | return True 72 | 73 | else: 74 | return False 75 | 76 | 77 | def remove_assessment(api, assessment_id): 78 | """Remove all elements of an assessment.""" 79 | if ( 80 | not remove_campaigns(api, assessment_id) 81 | or not remove_smtp(api, assessment_id) 82 | or not remove_group(api, assessment_id) 83 | or not remove_template(api, assessment_id) 84 | or not remove_page(api, assessment_id) 85 | ): 86 | success = False 87 | 88 | else: 89 | logging.info("Successfully removed all elements of %s", assessment_id) 90 | success = True 91 | 92 | return success 93 | 94 | 95 | def remove_campaigns(api, assessment_id): 96 | """Remove all campaigns from an assessment.""" 97 | allCampaigns = api.campaigns.get() 98 | 99 | for campaign in allCampaigns: 100 | if campaign.name.startswith(assessment_id): 101 | api.campaigns.delete(campaign.id) 102 | 103 | return True 104 | 105 | 106 | def remove_smtp(api, assessment_id): 107 | """Remove all SMTP from an assessment.""" 108 | allSMTP = api.smtp.get() 109 | 110 | for smtp in allSMTP: 111 | if smtp.name.startswith(assessment_id): 112 | api.smtp.delete(smtp.id) 113 | 114 | return True 115 | 116 | 117 | def remove_page(api, assessment_id): 118 | """Remove all pages from an assessment.""" 119 | allPages = api.pages.get() 120 | 121 | for page in allPages: 122 | if page.name.startswith(assessment_id): 123 | api.pages.delete(page.id) 124 | 125 | return True 126 | 127 | 128 | def remove_group(api, assessment_id): 129 | """Remove all groups from an assessment.""" 130 | allGroups = api.groups.get() 131 | 132 | for group in allGroups: 133 | if group.name.startswith(assessment_id): 134 | api.groups.delete(group.id) 135 | 136 | return True 137 | 138 | 139 | def remove_template(api, assessment_id): 140 | """Remove all templates from an assessment.""" 141 | allTemplates = api.templates.get() 142 | 143 | for template in allTemplates: 144 | if template.name.startswith(assessment_id): 145 | api.templates.delete(template.id) 146 | 147 | return True 148 | 149 | 150 | def main() -> None: 151 | """Set up logging, connect to API, remove assessment data.""" 152 | args: Dict[str, str] = docopt(__doc__, version=__version__) 153 | 154 | # Set up logging 155 | log_level = args["--log-level"] 156 | try: 157 | logging.basicConfig( 158 | format="\n%(levelname)s: %(message)s", level=log_level.upper() 159 | ) 160 | except ValueError: 161 | logging.critical( 162 | '"%s" is not a valid logging level. Possible values are debug, info, warning, and error.', 163 | log_level, 164 | ) 165 | sys.exit(1) 166 | 167 | else: 168 | # Connect to API 169 | try: 170 | api = connect_api(args["API_KEY"], args["SERVER"]) 171 | logging.debug("Connected to: %s", args["SERVER"]) 172 | except Exception as e: 173 | logging.critical(e.args[0]) 174 | sys.exit(1) 175 | 176 | assessment_id = args["ASSESSMENT_ID"] 177 | 178 | if args["--campaigns"] and confirm_id("CAMPAIGNS", assessment_id): 179 | success = remove_campaigns(api, assessment_id) 180 | 181 | elif args["--smtp"] and confirm_id("SMTPS", assessment_id): 182 | success = remove_smtp(api, assessment_id) 183 | 184 | elif args["--pages"] and confirm_id("PAGES", assessment_id): 185 | success = remove_page(api, assessment_id) 186 | 187 | elif args["--groups"] and confirm_id("GROUPS", assessment_id): 188 | success = remove_group(api, assessment_id) 189 | 190 | elif args["--templates"] and confirm_id("TEMPLATES", assessment_id): 191 | success = remove_template(api, assessment_id) 192 | 193 | elif args["--assessment"] and confirm_id("assessment", assessment_id): 194 | success = remove_assessment(api, assessment_id) 195 | 196 | else: 197 | success = False 198 | 199 | if not success: 200 | sys.exit(-1) 201 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Welcome # 2 | 3 | We're so glad you're thinking about contributing to this open source 4 | project! If you're unsure or afraid of anything, just ask or submit 5 | the issue or pull request anyway. The worst that can happen is that 6 | you'll be politely asked to change something. We appreciate any sort 7 | of contribution, and don't want a wall of rules to get in the way of 8 | that. 9 | 10 | Before contributing, we encourage you to read our CONTRIBUTING policy 11 | (you are here), our [LICENSE](LICENSE), and our [README](README.md), 12 | all of which should be in this repository. 13 | 14 | ## Issues ## 15 | 16 | If you want to report a bug or request a new feature, the most direct 17 | method is to [create an 18 | issue](https://github.com/cisagov/gophish-tools/issues) in 19 | this repository. We recommend that you first search through existing 20 | issues (both open and closed) to check if your particular issue has 21 | already been reported. If it has then you might want to add a comment 22 | to the existing issue. If it hasn't then feel free to create a new 23 | one. 24 | 25 | ## Pull requests ## 26 | 27 | If you choose to [submit a pull 28 | request](https://github.com/cisagov/gophish-tools/pulls), 29 | you will notice that our continuous integration (CI) system runs a 30 | fairly extensive set of linters, syntax checkers, system, and unit tests. 31 | Your pull request may fail these checks, and that's OK. If you want 32 | you can stop there and wait for us to make the necessary corrections 33 | to ensure your code passes the CI checks. 34 | 35 | If you want to make the changes yourself, or if you want to become a 36 | regular contributor, then you will want to set up 37 | [pre-commit](https://pre-commit.com/) on your local machine. Once you 38 | do that, the CI checks will run locally before you even write your 39 | commit message. This speeds up your development cycle considerably. 40 | 41 | ### Setting up pre-commit ### 42 | 43 | There are a few ways to do this, but we prefer to use 44 | [`pyenv`](https://github.com/pyenv/pyenv) and 45 | [`pyenv-virtualenv`](https://github.com/pyenv/pyenv-virtualenv) to 46 | create and manage a Python virtual environment specific to this 47 | project. 48 | 49 | We recommend using the `setup-env` script located in this repository, 50 | as it automates the entire environment configuration process. The 51 | dependencies required to run this script are 52 | [GNU `getopt`](https://github.com/util-linux/util-linux/blob/master/misc-utils/getopt.1.adoc), 53 | [`pyenv`](https://github.com/pyenv/pyenv), and [`pyenv-virtualenv`](https://github.com/pyenv/pyenv-virtualenv). 54 | If these tools are already configured on your system, you can simply run the 55 | following command: 56 | 57 | ```console 58 | ./setup-env 59 | ``` 60 | 61 | Otherwise, follow the steps below to manually configure your 62 | environment. 63 | 64 | #### Installing and using GNU `getopt`, `pyenv`, and `pyenv-virtualenv` #### 65 | 66 | On macOS, we recommend installing [brew](https://brew.sh/). Then 67 | installation is as simple as `brew install gnu-getopt pyenv pyenv-virtualenv` and 68 | adding this to your profile: 69 | 70 | ```bash 71 | # GNU getopt must be explicitly added to the path since it is 72 | # keg-only (https://docs.brew.sh/FAQ#what-does-keg-only-mean) 73 | export PATH="$(brew --prefix)/opt/gnu-getopt/bin:$PATH" 74 | 75 | # Setup pyenv 76 | export PYENV_ROOT="$HOME/.pyenv" 77 | export PATH="$PYENV_ROOT/bin:$PATH" 78 | eval "$(pyenv init --path)" 79 | eval "$(pyenv init -)" 80 | eval "$(pyenv virtualenv-init -)" 81 | ``` 82 | 83 | For Linux, Windows Subsystem for Linux (WSL), or macOS (if you 84 | don't want to use `brew`) you can use 85 | [pyenv/pyenv-installer](https://github.com/pyenv/pyenv-installer) to 86 | install the necessary tools. Before running this ensure that you have 87 | installed the prerequisites for your platform according to the 88 | [`pyenv` wiki 89 | page](https://github.com/pyenv/pyenv/wiki/common-build-problems). 90 | GNU `getopt` is included in most Linux distributions as part of the 91 | [`util-linux`](https://github.com/util-linux/util-linux) package. 92 | 93 | On WSL you should treat your platform as whatever Linux distribution 94 | you've chosen to install. 95 | 96 | Once you have installed `pyenv` you will need to add the following 97 | lines to your `.bash_profile` (or `.profile`): 98 | 99 | ```bash 100 | export PYENV_ROOT="$HOME/.pyenv" 101 | export PATH="$PYENV_ROOT/bin:$PATH" 102 | eval "$(pyenv init --path)" 103 | ``` 104 | 105 | and then add the following lines to your `.bashrc`: 106 | 107 | ```bash 108 | eval "$(pyenv init -)" 109 | eval "$(pyenv virtualenv-init -)" 110 | ``` 111 | 112 | If you want more information about setting up `pyenv` once installed, please run 113 | 114 | ```console 115 | pyenv init 116 | ``` 117 | 118 | and 119 | 120 | ```console 121 | pyenv virtualenv-init 122 | ``` 123 | 124 | for the current configuration instructions. 125 | 126 | If you are using a shell other than `bash` you should follow the 127 | instructions that the `pyenv-installer` script outputs. 128 | 129 | You will need to reload your shell for these changes to take effect so 130 | you can begin to use `pyenv`. 131 | 132 | For a list of Python versions that are already installed and ready to 133 | use with `pyenv`, use the command `pyenv versions`. To see a list of 134 | the Python versions available to be installed and used with `pyenv` 135 | use the command `pyenv install --list`. You can read more about 136 | the [many things that `pyenv` can do](https://github.com/pyenv/pyenv/blob/master/COMMANDS.md). 137 | See the [usage information](https://github.com/pyenv/pyenv-virtualenv#usage) 138 | for the additional capabilities that pyenv-virtualenv adds to the `pyenv` 139 | command. 140 | 141 | #### Creating the Python virtual environment #### 142 | 143 | Once `pyenv` and `pyenv-virtualenv` are installed on your system, you 144 | can create and configure the Python virtual environment with these 145 | commands: 146 | 147 | ```console 148 | cd gophish-tools 149 | pyenv virtualenv gophish-tools 150 | pyenv local gophish-tools 151 | pip install --requirement requirements-dev.txt 152 | ``` 153 | 154 | #### Installing the pre-commit hook #### 155 | 156 | Now setting up pre-commit is as simple as: 157 | 158 | ```console 159 | pre-commit install 160 | ``` 161 | 162 | At this point the pre-commit checks will run against any files that 163 | you attempt to commit. If you want to run the checks against the 164 | entire repo, just execute `pre-commit run --all-files`. 165 | 166 | ### Running unit and system tests ### 167 | 168 | In addition to the pre-commit checks the CI system will run the suite 169 | of unit and system tests that are included with this project. To run 170 | these tests locally execute `pytest` from the root of the project. 171 | 172 | We encourage any updates to these tests to improve the overall code 173 | coverage. If your pull request adds new functionality we would 174 | appreciate it if you extend existing test cases, or add new ones to 175 | exercise the newly added code. 176 | 177 | ## Public domain ## 178 | 179 | This project is in the public domain within the United States, and 180 | copyright and related rights in the work worldwide are waived through 181 | the [CC0 1.0 Universal public domain 182 | dedication](https://creativecommons.org/publicdomain/zero/1.0/). 183 | 184 | All contributions to this project will be released under the CC0 185 | dedication. By submitting a pull request, you are agreeing to comply 186 | with this waiver of copyright interest. 187 | -------------------------------------------------------------------------------- /src/tools/gophish_complete.py: -------------------------------------------------------------------------------- 1 | """Complete a campaign in Gophish and/or output a Gophish campaign summary. 2 | 3 | Usage: 4 | gophish-complete [--campaign=NAME] [--summary-only] [--log-level=LEVEL] SERVER API_KEY 5 | gophish-complete (-h | --help) 6 | gophish-complete --version 7 | 8 | Options: 9 | API_KEY Gophish API key. 10 | SERVER Full URL to Gophish server. 11 | -c --campaign=NAME Gophish campaign name. 12 | -s --summary-only Output a summary of a Gophish campaign. 13 | -h --help Show this screen. 14 | --version Show version. 15 | -l --log-level=LEVEL If specified, then the log level will be set to 16 | the specified value. Valid values are "debug", "info", 17 | "warning", "error", and "critical". [default: info] 18 | 19 | NOTE: 20 | * If a campaign name is not provided, all assessment campaigns will be listed to select from. 21 | """ 22 | 23 | # import IPython; IPython.embed() #<<< BREAKPOINT >>> 24 | # sys.exit(0) 25 | 26 | # Standard Python Libraries 27 | import logging 28 | import sys 29 | from typing import Dict 30 | 31 | # Third-Party Libraries 32 | from docopt import docopt 33 | import requests 34 | import urllib3 35 | 36 | # cisagov Libraries 37 | from tools.connect import connect_api 38 | from util.input import get_input, get_number 39 | 40 | from ._version import __version__ 41 | 42 | # Disable "Insecure Request" warning: Gophish uses a self-signed certificate 43 | # as default for https connections, which can not be verified by a third 44 | # party; thus, an SSL insecure request warning is produced. 45 | urllib3.disable_warnings() 46 | 47 | 48 | def get_campaign_id(campaign_name, campaigns): 49 | """Get campaign id from campaign name. 50 | 51 | Args: 52 | campaign_name (string): Full campaign name. 53 | campaigns (dict): Campaign id as key, campaign name as value. 54 | 55 | Raises: 56 | LookupError: Campaign name is not found in campaigns dictionary. 57 | 58 | Returns: 59 | Campaign id corresponding to the campaign name provided. 60 | """ 61 | for campaign_id, name_value in campaigns.items(): 62 | if name_value == campaign_name: 63 | return campaign_id 64 | 65 | raise LookupError(f'Campaign name "{campaign_name}" not found.') 66 | 67 | 68 | def get_campaigns(api, assessment_id=""): 69 | """Return a dictionary containing all campaigns. 70 | 71 | When called with a blank string for the assessment_id, the default value, 72 | all campaigns in all assessments will be returned. If an assessment_id is 73 | provided, then only the campaigns for that assessment will be returned. 74 | 75 | Args: 76 | api (Gophish API): Connection to Gophish server via the API. 77 | assessment_id (string): Assessment identifier to get campaigns from. 78 | 79 | Raises: 80 | LookupError: No campaigns found for the provided assessment id. 81 | 82 | Returns: 83 | dict: Campaign id as key, campaign name as value. 84 | """ 85 | allCampaigns = api.campaigns.get() 86 | 87 | assessmentCampaigns = dict() 88 | 89 | for campaign in allCampaigns: 90 | if campaign.name.startswith(assessment_id): 91 | assessmentCampaigns[campaign.id] = campaign.name 92 | 93 | if len(assessmentCampaigns) == 0: 94 | raise LookupError(f"No campaigns found for assessment {assessment_id}") 95 | 96 | return assessmentCampaigns 97 | 98 | 99 | def select_campaign(campaigns): 100 | """Return the ID of a selected campaign.""" 101 | print("Please select a Campaign ID:") 102 | print("\tID: Name") 103 | 104 | for id, name in campaigns.items(): 105 | print(f"\t {id}: {name}") 106 | 107 | print("") 108 | 109 | while True: 110 | inputId = get_number("ID: ") 111 | if inputId in campaigns: 112 | break 113 | else: 114 | logging.warning("Bad Campaign ID") 115 | print("Try again...") 116 | 117 | return inputId 118 | 119 | 120 | def complete_campaign(api_key, server, campaign_id): 121 | """Complete a campaign in Gophish. 122 | 123 | Args: 124 | api_key (string): Gophish API key. 125 | server (string): Full URL to Gophish server. 126 | campaign_id (int): Gophish campaign id. 127 | 128 | Raises: 129 | UserWarning: Gophish is unsuccessful in completing the campaign. 130 | """ 131 | url = f"{server}/api/campaigns/{campaign_id}/complete?api_key={api_key}" 132 | 133 | # Bandit complains about disabling the SSL certificate check, but we have 134 | # no choice here since we are using a self-signed certificate. 135 | response = requests.get(url=url, verify=False) # nosec 136 | 137 | if not response.json()["success"]: 138 | raise UserWarning(response.json()["message"]) 139 | else: 140 | print(f'\n{response.json()["message"]}') 141 | 142 | 143 | def print_summary(api, campaign_id): 144 | """Print a campaign summary.""" 145 | summary = api.campaigns.summary(campaign_id=campaign_id) 146 | 147 | print("Campaign Summary:") 148 | print(f"\tName: {summary.name}") 149 | print(f"\tStatus: {summary.status}") 150 | print(f"\tLaunch Date: {summary.launch_date}") 151 | print(f"\tCompleted Date: {summary.completed_date}") 152 | print(f"\tTotal Users: {summary.stats.total}") 153 | print(f"\tTotal Sent: {summary.stats.sent}") 154 | print(f"\tTotal Clicks: {summary.stats.clicked}") 155 | 156 | return True 157 | 158 | 159 | def main() -> None: 160 | """Set up logging, connect to API, call requested function(s).""" 161 | args: Dict[str, str] = docopt(__doc__, version=__version__) 162 | 163 | # Set up logging 164 | log_level = args["--log-level"] 165 | try: 166 | logging.basicConfig( 167 | format="\n%(levelname)s: %(message)s", level=log_level.upper() 168 | ) 169 | except ValueError: 170 | logging.critical( 171 | '"%s" is not a valid logging level. Possible values are debug, info, warning, and error.', 172 | log_level, 173 | ) 174 | sys.exit(1) 175 | 176 | # Connect to API 177 | try: 178 | api = connect_api(args["API_KEY"], args["SERVER"]) 179 | logging.debug('Connected to: "%s"', args["SERVER"]) 180 | except Exception as e: 181 | logging.critical(e.args[0]) 182 | sys.exit(1) 183 | 184 | try: 185 | if args["--campaign"]: 186 | # Use campaign name to find campaign id. 187 | campaigns = get_campaigns(api) 188 | campaign_id = get_campaign_id(args["--campaign"], campaigns) 189 | else: 190 | # User inputs assessment id and selects campaign from lists. 191 | assessment_id = get_input("Enter the Assessment ID:") 192 | campaigns = get_campaigns(api, assessment_id) 193 | campaign_id = select_campaign(campaigns) 194 | 195 | except LookupError as err: 196 | logging.error(err) 197 | sys.exit(1) 198 | 199 | if args["--summary-only"]: 200 | # Output summary only. 201 | print_summary(api, campaign_id) 202 | else: 203 | # Complete and output summary. 204 | try: 205 | complete_campaign(args["API_KEY"], args["SERVER"], campaign_id) 206 | 207 | except UserWarning as err: 208 | logging.warning(err) 209 | sys.exit(1) 210 | 211 | print_summary(api, campaign_id) 212 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ci: 3 | # Do not commit changes from running pre-commit for pull requests. 4 | autofix_prs: false 5 | # Autoupdate hooks weekly (this is the default). 6 | autoupdate_schedule: weekly 7 | 8 | default_language_version: 9 | # force all unspecified python hooks to run python3 10 | python: python3 11 | 12 | repos: 13 | # Check the pre-commit configuration 14 | - repo: meta 15 | hooks: 16 | - id: check-useless-excludes 17 | 18 | - repo: https://github.com/pre-commit/pre-commit-hooks 19 | rev: v6.0.0 20 | hooks: 21 | - id: check-case-conflict 22 | - id: check-executables-have-shebangs 23 | - id: check-json 24 | - id: check-merge-conflict 25 | - id: check-shebang-scripts-are-executable 26 | - id: check-symlinks 27 | - id: check-toml 28 | - id: check-vcs-permalinks 29 | - id: check-xml 30 | - id: debug-statements 31 | - id: destroyed-symlinks 32 | - id: detect-aws-credentials 33 | args: 34 | - --allow-missing-credentials 35 | - id: detect-private-key 36 | - id: end-of-file-fixer 37 | - id: mixed-line-ending 38 | args: 39 | - --fix=lf 40 | - id: pretty-format-json 41 | args: 42 | - --autofix 43 | - id: requirements-txt-fixer 44 | - id: trailing-whitespace 45 | 46 | # Text file hooks 47 | - repo: https://github.com/igorshubovych/markdownlint-cli 48 | rev: v0.45.0 49 | hooks: 50 | - id: markdownlint 51 | args: 52 | - --config=.mdl_config.yaml 53 | - repo: https://github.com/rbubley/mirrors-prettier 54 | rev: v3.6.2 55 | hooks: 56 | - id: prettier 57 | - repo: https://github.com/adrienverge/yamllint 58 | rev: v1.37.1 59 | hooks: 60 | - id: yamllint 61 | args: 62 | - --strict 63 | 64 | # GitHub Actions hooks 65 | - repo: https://github.com/python-jsonschema/check-jsonschema 66 | rev: 0.35.0 67 | hooks: 68 | - id: check-github-actions 69 | - id: check-github-workflows 70 | 71 | # pre-commit hooks 72 | - repo: https://github.com/pre-commit/pre-commit 73 | rev: v4.4.0 74 | hooks: 75 | - id: validate_manifest 76 | 77 | # Go hooks 78 | - repo: https://github.com/TekWizely/pre-commit-golang 79 | rev: v1.0.0-rc.4 80 | hooks: 81 | # Go Build 82 | - id: go-build-repo-mod 83 | # Style Checkers 84 | - id: go-critic 85 | # goimports 86 | - id: go-imports-repo 87 | args: 88 | # Write changes to files 89 | - -w 90 | # Go Mod Tidy 91 | - id: go-mod-tidy-repo 92 | # GoSec 93 | - id: go-sec-repo-mod 94 | # StaticCheck 95 | - id: go-staticcheck-repo-mod 96 | # Go Test 97 | - id: go-test-repo-mod 98 | # Go Vet 99 | - id: go-vet-repo-mod 100 | # Nix hooks 101 | - repo: https://github.com/nix-community/nixpkgs-fmt 102 | rev: v1.3.0 103 | hooks: 104 | - id: nixpkgs-fmt 105 | 106 | # Shell script hooks 107 | - repo: https://github.com/scop/pre-commit-shfmt 108 | rev: v3.12.0-2 109 | hooks: 110 | - id: shfmt 111 | args: 112 | # List files that will be formatted 113 | - --list 114 | # Write result to file instead of stdout 115 | - --write 116 | # Indent by two spaces 117 | - --indent 118 | - "2" 119 | # Binary operators may start a line 120 | - --binary-next-line 121 | # Switch cases are indented 122 | - --case-indent 123 | # Redirect operators are followed by a space 124 | - --space-redirects 125 | - repo: https://github.com/shellcheck-py/shellcheck-py 126 | rev: v0.11.0.1 127 | hooks: 128 | - id: shellcheck 129 | 130 | # Python hooks 131 | # Run bandit on the "tests" tree with a configuration 132 | - repo: https://github.com/PyCQA/bandit 133 | rev: 1.9.1 134 | hooks: 135 | - id: bandit 136 | name: bandit (tests tree) 137 | files: tests 138 | args: 139 | # Skip "assert used" check since assertions are used 140 | # frequently in pytests. 141 | - --skip=B101 142 | # Run bandit on everything except the "tests" tree 143 | - repo: https://github.com/PyCQA/bandit 144 | rev: 1.9.1 145 | hooks: 146 | - id: bandit 147 | name: bandit (everything else) 148 | exclude: tests 149 | - repo: https://github.com/psf/black-pre-commit-mirror 150 | rev: 25.11.0 151 | hooks: 152 | - id: black 153 | - repo: https://github.com/PyCQA/flake8 154 | rev: 7.3.0 155 | hooks: 156 | - id: flake8 157 | additional_dependencies: 158 | - flake8-docstrings==1.7.0 159 | # This is necessary to read the flake8 configuration from 160 | # the pyproject.toml file. 161 | - flake8-pyproject==1.2.3 162 | - repo: https://github.com/PyCQA/isort 163 | rev: 7.0.0 164 | hooks: 165 | - id: isort 166 | - repo: https://github.com/pre-commit/mirrors-mypy 167 | rev: v1.18.2 168 | hooks: 169 | - id: mypy 170 | # IMPORTANT: Keep type hinting-related dependencies of the 171 | # mypy pre-commit hook additional_dependencies in sync with 172 | # the dev section of setup.py to avoid discrepancies in type 173 | # checking between environments. 174 | additional_dependencies: 175 | - types-docopt 176 | - types-Pygments 177 | - types-pytz 178 | - types-requests 179 | - types-urllib3 180 | - repo: https://github.com/pypa/pip-audit 181 | rev: v2.9.0 182 | hooks: 183 | - id: pip-audit 184 | args: 185 | # Add any pip requirements files to scan 186 | - --requirement 187 | - requirements-dev.txt 188 | - --requirement 189 | - requirements-test.txt 190 | - --requirement 191 | - requirements.txt 192 | - repo: https://github.com/asottile/pyupgrade 193 | rev: v3.21.1 194 | hooks: 195 | - id: pyupgrade 196 | 197 | # Ansible hooks 198 | - repo: https://github.com/ansible/ansible-lint 199 | rev: v25.11.0 200 | hooks: 201 | - id: ansible-lint 202 | additional_dependencies: 203 | # On its own ansible-lint does not pull in ansible, only 204 | # ansible-core. Therefore, if an Ansible module lives in 205 | # ansible instead of ansible-core, the linter will complain 206 | # that the module is unknown. In these cases it is 207 | # necessary to add the ansible package itself as an 208 | # additional dependency, with the same pinning as is done in 209 | # requirements-test.txt of cisagov/skeleton-ansible-role. 210 | # 211 | # Version 10 is required because the pip-audit pre-commit 212 | # hook identifies a vulnerability in ansible-core 2.16.13, 213 | # but all versions of ansible 9 have a dependency on 214 | # ~=2.16.X. 215 | # 216 | # It is also a good idea to go ahead and upgrade to version 217 | # 10 since version 9 is going EOL at the end of November: 218 | # https://endoflife.date/ansible 219 | # - ansible>=10,<11 220 | # ansible-core 2.16.3 through 2.16.6 suffer from the bug 221 | # discussed in ansible/ansible#82702, which breaks any 222 | # symlinked files in vars, tasks, etc. for any Ansible role 223 | # installed via ansible-galaxy. Hence we never want to 224 | # install those versions. 225 | # 226 | # Note that the pip-audit pre-commit hook identifies a 227 | # vulnerability in ansible-core 2.16.13. The pin of 228 | # ansible-core to >=2.17 effectively also pins ansible to 229 | # >=10. 230 | # 231 | # It is also a good idea to go ahead and upgrade to 232 | # ansible-core 2.17 since security support for ansible-core 233 | # 2.16 ends this month: 234 | # https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html#ansible-core-support-matrix 235 | # 236 | # Note that any changes made to this dependency must also be 237 | # made in requirements.txt in cisagov/skeleton-packer and 238 | # requirements-test.txt in cisagov/skeleton-ansible-role. 239 | - ansible-core>=2.17 240 | 241 | # Terraform hooks 242 | - repo: https://github.com/antonbabenko/pre-commit-terraform 243 | rev: v1.103.0 244 | hooks: 245 | - id: terraform_fmt 246 | - id: terraform_validate 247 | 248 | # Docker hooks 249 | - repo: https://github.com/IamTheFij/docker-pre-commit 250 | rev: v3.0.1 251 | hooks: 252 | - id: docker-compose-check 253 | 254 | # Packer hooks 255 | - repo: https://github.com/cisagov/pre-commit-packer 256 | rev: v0.3.1 257 | hooks: 258 | - id: packer_fmt 259 | - id: packer_validate 260 | -------------------------------------------------------------------------------- /src/models/models.py: -------------------------------------------------------------------------------- 1 | """The models library.""" 2 | 3 | # Standard Python Libraries 4 | from datetime import datetime 5 | from typing import Any, Dict 6 | 7 | # TODO Research .attribute only valid properties 8 | 9 | 10 | class Model: 11 | """The Model class.""" 12 | 13 | _valid_properties: Dict[str, Any] = dict() 14 | 15 | @classmethod 16 | def _is_builtin(cls, obj): 17 | return isinstance(obj, (int, float, str, list, dict, bool)) 18 | 19 | def as_dict(self): 20 | """Return a dict representation of the resource.""" 21 | result = {} 22 | for key in self._valid_properties: 23 | val = getattr(self, key) 24 | if isinstance(val, datetime): 25 | val = val.isoformat() 26 | # Parse custom classes 27 | elif val and not Model._is_builtin(val): 28 | val = val.as_dict() 29 | # Parse lists of objects 30 | elif isinstance(val, list): 31 | # We only want to call as_dict in the case where the item 32 | # isn't a built in type. 33 | for i in range(len(val)): 34 | if Model._is_builtin(val[i]): 35 | continue 36 | val[i] = val[i].as_dict() 37 | # If it's a boolean, add it regardless of the value 38 | elif isinstance(val, bool): 39 | result[key] = val 40 | 41 | # Add it if it's not None 42 | if val: 43 | result[key] = val 44 | return result 45 | 46 | @classmethod 47 | def parse(cls, json): 48 | """Parse a JSON object into a model instance.""" 49 | raise NotImplementedError 50 | 51 | 52 | class Assessment(Model): 53 | """The Assessment class.""" 54 | 55 | _valid_properties: Dict[str, Any] = { 56 | "id": None, 57 | "timezone": "US/Eastern", 58 | "domain": None, 59 | "target_domains": [], 60 | "start_date": None, 61 | "end_date": None, 62 | "reschedule": False, 63 | "start_campaign": 1, 64 | "groups": [], 65 | "pages": [], 66 | "campaigns": [], 67 | } 68 | 69 | def __init__(self, **kwargs): 70 | """Create a new assessment instance.""" 71 | for key, default in Assessment._valid_properties.items(): 72 | setattr(self, key, kwargs.get(key, default)) 73 | 74 | @classmethod 75 | def parse(cls, json): 76 | """Parse assessment json.""" 77 | assessment = cls() 78 | for key, val in json.items(): 79 | if key == "campaigns": 80 | campaigns = [Campaign.parse(campaign) for campaign in val] 81 | setattr(assessment, key, campaigns) 82 | elif key == "page": 83 | setattr(assessment, key, Page.parse(val)) 84 | elif key == "groups": 85 | groups = [Group.parse(group) for group in val] 86 | setattr(assessment, key, groups) 87 | elif key in cls._valid_properties: 88 | setattr(assessment, key, val) 89 | return assessment 90 | 91 | 92 | class Page(Model): 93 | """The Page class.""" 94 | 95 | _valid_properties = { 96 | "name": None, 97 | "capture_credentials": None, 98 | "capture_passwords": False, 99 | "html": None, 100 | "redirect_url": None, 101 | } 102 | 103 | def __init__(self, **kwargs): 104 | """Create a new page instance.""" 105 | for key, default in Page._valid_properties.items(): 106 | setattr(self, key, kwargs.get(key, default)) 107 | 108 | @classmethod 109 | def parse(cls, json): 110 | """Parse page json.""" 111 | page = cls() 112 | for key, val in json.items(): 113 | if key in cls._valid_properties: 114 | setattr(page, key, val) 115 | return page 116 | 117 | 118 | class Group(Model): 119 | """The Group class.""" 120 | 121 | _valid_properties: Dict[str, Any] = {"name": None, "targets": []} 122 | 123 | def __init__(self, **kwargs): 124 | """Create a new group instance.""" 125 | for key, default in Group._valid_properties.items(): 126 | setattr(self, key, kwargs.get(key, default)) 127 | 128 | @classmethod 129 | def parse(cls, json): 130 | """Parse group json.""" 131 | group = cls() 132 | for key, val in json.items(): 133 | if key == "targets": 134 | emails = [Target.parse(email) for email in val] 135 | setattr(group, key, emails) 136 | elif key in cls._valid_properties: 137 | setattr(group, key, val) 138 | return group 139 | 140 | 141 | class Target(Model): 142 | """The Target class.""" 143 | 144 | _valid_properties = { 145 | "first_name": None, 146 | "last_name": None, 147 | "email": None, 148 | "position": None, 149 | } 150 | 151 | def __init__(self, first_name, last_name, email, **kwargs): 152 | """Create a new target instance.""" 153 | self.first_name = first_name 154 | self.last_name = last_name 155 | self.email = email 156 | if "position" in kwargs.keys(): 157 | setattr(self, "position", kwargs["position"]) 158 | 159 | @classmethod 160 | def parse(cls, json): 161 | """Parse target json.""" 162 | email = cls(json["first_name"], json["last_name"], json["email"]) 163 | if json["position"]: 164 | setattr(email, "position", json["position"]) 165 | return email 166 | 167 | 168 | class Template(Model): 169 | """The Template class.""" 170 | 171 | _valid_properties = { 172 | "name": None, 173 | "subject": None, 174 | "html": None, 175 | "text": None, 176 | } 177 | 178 | def __init__(self, **kwargs): 179 | """Create a new template instance.""" 180 | for key, default in Template._valid_properties.items(): 181 | setattr(self, key, kwargs.get(key, default)) 182 | 183 | @classmethod 184 | def parse(cls, json): 185 | """Parse template json.""" 186 | template = cls() 187 | for key, val in json.items(): 188 | if key in cls._valid_properties: 189 | setattr(template, key, val) 190 | return template 191 | 192 | 193 | class SMTP(Model): 194 | """The SMTP class.""" 195 | 196 | _valid_properties = { 197 | "name": None, 198 | "from_address": None, 199 | "password": None, 200 | "host": "postfix:587", 201 | "interface_type": "SMTP", 202 | "ignore_cert": True, 203 | } 204 | 205 | def __init__(self, **kwargs): 206 | """Create a new SMTP instance.""" 207 | for key, default in SMTP._valid_properties.items(): 208 | setattr(self, key, kwargs.get(key, default)) 209 | 210 | @classmethod 211 | def parse(cls, json): 212 | """Parse SMTP json.""" 213 | smtp = cls() 214 | for key, val in json.items(): 215 | if key in cls._valid_properties: 216 | setattr(smtp, key, val) 217 | return smtp 218 | 219 | 220 | class Campaign(Model): 221 | """The Campaign class.""" 222 | 223 | _valid_properties = { 224 | "name": None, 225 | "launch_date": None, 226 | "complete_date": None, 227 | "url": None, 228 | "template": None, 229 | "smtp": None, 230 | "group_name": None, 231 | "page_name": None, 232 | } 233 | 234 | def __init__(self, **kwargs): 235 | """Create a new campaign instance.""" 236 | for key, default in Campaign._valid_properties.items(): 237 | setattr(self, key, kwargs.get(key, default)) 238 | 239 | @classmethod 240 | def parse(cls, json): 241 | """Parse campaign json.""" 242 | campaign = cls() 243 | for key, val in json.items(): 244 | if key == "template": 245 | template = Template.parse(val) 246 | setattr(campaign, key, template) 247 | elif key == "smtp": 248 | smtp = SMTP.parse(val) 249 | setattr(campaign, key, smtp) 250 | elif key in cls._valid_properties: 251 | setattr(campaign, key, val) 252 | return campaign 253 | 254 | 255 | class Click(Model): 256 | """The Click class represents a Click object generated by the Gophish API when a user clicks on an email.""" 257 | 258 | _valid_properties = { 259 | "message": None, 260 | "user": None, 261 | "source_ip": None, 262 | "time": None, 263 | "application": None, 264 | } 265 | 266 | def __init__(self, **kwargs): 267 | """Create a new click instance.""" 268 | for key, default in Click._valid_properties.items(): 269 | setattr(self, key, kwargs.get(key, default)) 270 | 271 | @classmethod 272 | def parse(cls, json): 273 | """Parse click json.""" 274 | click = cls() 275 | for key, val in json.items(): 276 | if key in cls._valid_properties: 277 | setattr(click, key, val) 278 | 279 | def __getitem__(self, item): 280 | """Get item by attribute name.""" 281 | return getattr(self, item) 282 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """pytest plugin configuration. 2 | 3 | https://docs.pytest.org/en/latest/writing_plugins.html#conftest-py-plugins 4 | """ 5 | 6 | # Standard Python Libraries 7 | import hashlib 8 | import json 9 | 10 | # Third-Party Libraries 11 | from gophish.models import Group as Gophish_Group 12 | from gophish.models import User as Gophish_User 13 | import pytest 14 | 15 | # cisagov Libraries 16 | from models.models import ( 17 | SMTP, 18 | Assessment, 19 | Campaign, 20 | Click, 21 | Group, 22 | Page, 23 | Target, 24 | Template, 25 | ) 26 | 27 | """Support items for test_modules.py """ 28 | 29 | AUTO_FORWARD = """ 30 | 31 | 32 |
 
33 | 34 | """ 35 | 36 | """ JSON Fixtures """ 37 | 38 | 39 | @pytest.fixture 40 | def assessment_json(group_json, page_json, campaign_json): 41 | """Return an Assessment JSON.""" 42 | assessment_str = json.dumps( 43 | { 44 | "id": "RVXXX1", 45 | "timezone": "US/Eastern", 46 | "domain": "bad.domain", 47 | "target_domains": ["target.domain"], 48 | "start_date": "01/01/2025 13:00", 49 | "end_date": "01/06/2025 19:00", 50 | "groups": [group_json], 51 | "page": page_json, 52 | "campaigns": [campaign_json], 53 | } 54 | ) 55 | return json.loads(assessment_str) 56 | 57 | 58 | @pytest.fixture 59 | def group_json(target_json): 60 | """Return a Group JSON.""" 61 | group_string = json.dumps({"name": "RVXXX1-G1", "targets": target_json}) 62 | return json.loads(group_string) 63 | 64 | 65 | @pytest.fixture(scope="module") 66 | def target_json(): 67 | """Return a Target JSON with 2 emails.""" 68 | return json.loads( 69 | """[ 70 | { 71 | "first_name": "John", 72 | "last_name": "Doe", 73 | "email": "john.doe@domain.test", 74 | "position": "IT" 75 | }, 76 | { 77 | "first_name": "Jane", 78 | "last_name": "Smith", 79 | "email": "jane.smith@domain.test", 80 | "position": "HR" 81 | } 82 | ]""" 83 | ) 84 | 85 | 86 | @pytest.fixture 87 | def page_json(): 88 | """Return a Page JSON.""" 89 | page_str = json.dumps( 90 | { 91 | "name": "RVXXX1-AutoForward", 92 | "capture_credentials": True, 93 | "capture_passwords": False, 94 | "redirect_url": "redirect.domain", 95 | "html": AUTO_FORWARD, 96 | } 97 | ) 98 | return json.loads(page_str) 99 | 100 | 101 | @pytest.fixture 102 | def campaign_json(template_json, smtp_json): 103 | """Return a Campaign JSON.""" 104 | campaign_str = json.dumps( 105 | { 106 | "name": "RVXXX1-C1", 107 | "launch_date": "01/01/2025 13:00", 108 | "completed_date": "01/01/2025 14:00", 109 | "url": "http://bad.domain/camp1", 110 | "group_name": "RVXXX1-G1", 111 | "template": template_json, 112 | "smtp": smtp_json, 113 | } 114 | ) 115 | return json.loads(campaign_str) 116 | 117 | 118 | @pytest.fixture 119 | def template_json(): 120 | """Return a Template JSON.""" 121 | return json.loads( 122 | """{ 123 | "name": "RVXXX1-T1-ID", 124 | "subject": "Campaign 1", 125 | "html": "Body Test", 126 | "text": "Body Test" 127 | }""" 128 | ) 129 | 130 | 131 | @pytest.fixture 132 | def smtp_json(): 133 | """Return an SMTP JSON.""" 134 | return json.loads( 135 | """{ 136 | "name": "RVXXX1-SP", 137 | "from_address": "Camp1 Phish", 138 | "host": "postfix:587", 139 | "interface_type": "SMTP", 140 | "ignore_cert": true 141 | }""" 142 | ) 143 | 144 | 145 | # Object Fixtures 146 | @pytest.fixture 147 | def target_object(): 148 | """Return a single Target object.""" 149 | return Target( 150 | first_name="John", last_name="Doe", email="john.doe@domain.test", position="IT" 151 | ) 152 | 153 | 154 | @pytest.fixture 155 | def group_object(target_object): 156 | """Return a single Group object.""" 157 | return Group( 158 | name="RVXXX1-G1", 159 | targets=[ 160 | target_object, 161 | Target( 162 | first_name="Jane", 163 | last_name="Smith", 164 | email="jane.smith@domain.test", 165 | position="HR", 166 | ), 167 | ], 168 | ) 169 | 170 | 171 | @pytest.fixture 172 | def smtp_object(): 173 | """Return an SMTP object.""" 174 | return SMTP( 175 | name="RVXXX1-SP", 176 | from_address="Camp1 Phish", 177 | host="postfix:587", 178 | interface_type="SMTP", 179 | ignore_cert=True, 180 | ) 181 | 182 | 183 | @pytest.fixture 184 | def template_object(): 185 | """Return a Template object.""" 186 | return Template( 187 | name="RVXXX1-T1-ID", 188 | subject="Campaign 1", 189 | html="Body Test", 190 | text="Body Test", 191 | ) 192 | 193 | 194 | @pytest.fixture 195 | def page_object(): 196 | """Return a Page object.""" 197 | return Page( 198 | name="RVXXX1-AutoForward", 199 | capture_credentials=True, 200 | capture_passwords=False, 201 | redirect_url="redirect.domain", 202 | html=AUTO_FORWARD, 203 | ) 204 | 205 | 206 | @pytest.fixture 207 | def campaign_object(template_object, smtp_object): 208 | """Return a Campaign object.""" 209 | return Campaign( 210 | name="RVXXX1-C1", 211 | launch_date="01/01/2025 13:00", 212 | completed_date="01/01/2025 14:00", 213 | url="http://bad.domain/camp1", 214 | group_name="RVXXX1-G1", 215 | template=template_object, 216 | smtp=smtp_object, 217 | ) 218 | 219 | 220 | @pytest.fixture 221 | def assessment_object(group_object, page_object, campaign_object): 222 | """Return an Assessment object.""" 223 | return Assessment( 224 | id="RVXXX1", 225 | timezone="US/Eastern", 226 | domain="bad.domain", 227 | target_domains=["target.domain"], 228 | start_date="01/01/2025 13:00", 229 | end_date="01/06/2025 19:00", 230 | groups=[group_object], 231 | page=page_object, 232 | campaigns=[campaign_object], 233 | ) 234 | 235 | 236 | @pytest.fixture 237 | def multiple_campaign_object(template_object, smtp_object): 238 | """Return list of campaign objects.""" 239 | campaigns = list() 240 | 241 | for x in range(1, 8): 242 | campaigns.append( 243 | Campaign( 244 | name=f"RVXXX1-C{x}", 245 | launch_date=f"01/0{x}/2025 13:00", 246 | completed_date=f"01/0{x}/2025 14:00", 247 | url=f"http://bad.domain/camp{x}", 248 | group_name="RVXXX1-G1", 249 | template=template_object, 250 | smtp=smtp_object, 251 | ) 252 | ) 253 | 254 | # Make a campaign from a different assessment. 255 | campaigns[6].name = "RVXXX2-C7" 256 | 257 | return campaigns 258 | 259 | 260 | @pytest.fixture 261 | def multiple_gophish_group_object(): 262 | """Return list of Gophish group objects.""" 263 | groups = list() 264 | 265 | for x in range(1, 3): 266 | groups.append( 267 | Gophish_Group( 268 | group_id={x}, 269 | name=f"RVXXX1-G{x}", 270 | targets=[ 271 | Gophish_User( 272 | first_name="Jane", 273 | last_name="Smith", 274 | email=f"jane.smith{x}@domain.tld", 275 | position="IT", 276 | ), 277 | Gophish_User( 278 | first_name="John", 279 | last_name="Doe", 280 | email=f"john.doe{x}@domain.tld", 281 | position="HR", 282 | ), 283 | ], 284 | ) 285 | ) 286 | 287 | return groups 288 | 289 | 290 | @pytest.fixture 291 | def email_target_json(): 292 | """Return a email target JSON with 4 emails matching the Gophish group object.""" 293 | targets = list() 294 | for email, position in [ 295 | ("jane.smith1@domain.tld", "IT"), 296 | ("john.doe1@domain.tld", "HR"), 297 | ("jane.smith2@domain.tld", "IT"), 298 | ("john.doe2@domain.tld", "HR"), 299 | ]: 300 | targets.append( 301 | { 302 | "id": hashlib.sha256(email.encode("utf-8")).hexdigest(), 303 | "customer_defined_labels": {"RVXXX1": [position]}, 304 | } 305 | ) 306 | 307 | return targets 308 | 309 | 310 | @pytest.fixture 311 | def multiple_click_object(): 312 | """Return a list of clicks to match the correct number of unique users.""" 313 | clicks = list() 314 | for x in range(0, 2): 315 | clicks.append( 316 | Click( 317 | message="Testing", 318 | user="jane.smith1@domain.tld", 319 | source_ip="10.0.0.0", 320 | time="01/01/2025 13:00", 321 | application="NA", 322 | ) 323 | ) 324 | clicks.append( 325 | Click( 326 | message="Testing", 327 | user="john.doe1@domain.tld", 328 | source_ip="10.0.0.1", 329 | time="01/01/2025 13:00", 330 | application="NA", 331 | ) 332 | ) 333 | clicks.append( 334 | Click( 335 | message="Testing", 336 | user="jane.smith2@domain.tld", 337 | source_ip="10.0.0.2", 338 | time="01/01/2025 13:00", 339 | application="NA", 340 | ) 341 | ) 342 | clicks.append( 343 | Click( 344 | message="Testing", 345 | user="john.doe2@domain.tld", 346 | source_ip="10.0.0.3", 347 | time="01/01/2025 13:00", 348 | application="NA", 349 | ) 350 | ) 351 | return clicks 352 | 353 | 354 | def pytest_addoption(parser): 355 | """Add new commandline options to pytest.""" 356 | parser.addoption( 357 | "--runslow", action="store_true", default=False, help="run slow tests" 358 | ) 359 | 360 | 361 | def pytest_configure(config): 362 | """Register new markers.""" 363 | config.addinivalue_line("markers", "slow: mark test as slow") 364 | 365 | 366 | def pytest_collection_modifyitems(config, items): 367 | """Modify collected tests based on custom marks and commandline options.""" 368 | if config.getoption("--runslow"): 369 | # --runslow given in cli: do not skip slow tests 370 | return 371 | skip_slow = pytest.mark.skip(reason="need --runslow option to run") 372 | for item in items: 373 | if "slow" in item.keywords: 374 | item.add_marker(skip_slow) 375 | -------------------------------------------------------------------------------- /setup-env: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o nounset 4 | set -o errexit 5 | set -o pipefail 6 | 7 | USAGE=$( 8 | cat << 'END_OF_LINE' 9 | Configure a development environment for this repository. 10 | 11 | It does the following: 12 | - Allows the user to specify the Python version to use for the virtual environment. 13 | - Allows the user to specify a name for the virtual environment. 14 | - Verifies pyenv and pyenv-virtualenv are installed. 15 | - Creates the Python virtual environment. 16 | - Configures the activation of the virtual enviroment for the repo directory. 17 | - Installs the requirements needed for development (including mypy type stubs). 18 | - Installs git pre-commit hooks. 19 | - Configures git remotes for upstream "lineage" repositories. 20 | 21 | Usage: 22 | setup-env [--venv-name venv_name] [--python-version python_version] 23 | setup-env (-h | --help) 24 | 25 | Options: 26 | -f | --force Delete virtual enviroment if it already exists. 27 | -h | --help Show this message. 28 | -i | --install-hooks Install hook environments for all environments in the 29 | pre-commit config file. 30 | -l | --list-versions List available Python versions and select one interactively. 31 | -v | --venv-name Specify the name of the virtual environment. 32 | -p | --python-version Specify the Python version for the virtual environment. 33 | 34 | END_OF_LINE 35 | ) 36 | 37 | # Display pyenv's installed Python versions 38 | python_versions() { 39 | pyenv versions --bare --skip-aliases --skip-envs 40 | } 41 | 42 | check_python_version() { 43 | local version=$1 44 | 45 | # This is a valid regex for semantically correct Python version strings. 46 | # For more information see here: 47 | # https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string 48 | # Break down the regex into readable parts major.minor.patch 49 | local major="0|[1-9]\d*" 50 | local minor="0|[1-9]\d*" 51 | local patch="0|[1-9]\d*" 52 | 53 | # Splitting the prerelease part for readability 54 | # Start of the prerelease 55 | local prerelease="(?:-" 56 | # Numeric or alphanumeric identifiers 57 | local prerelease+="(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)" 58 | # Additional dot-separated identifiers 59 | local prerelease+="(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*" 60 | # End of the prerelease, making it optional 61 | local prerelease+=")?" 62 | # Optional build metadata 63 | local build="(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?" 64 | 65 | # Final regex composed of parts 66 | local regex="^($major)\.($minor)\.($patch)$prerelease$build$" 67 | 68 | # This checks if the Python version does not match the regex pattern specified in $regex, 69 | # using Perl for regex matching. If the pattern is not found, then prompt the user with 70 | # the invalid version message. 71 | if ! echo "$version" | perl -ne "exit(!/$regex/)"; then 72 | echo "Invalid version of Python: Python follows semantic versioning," \ 73 | "so any version string that is not a valid semantic version is an" \ 74 | "invalid version of Python." 75 | exit 1 76 | # Else if the Python version isn't installed then notify the user. 77 | # grep -E is used for searching through text lines that match the 78 | # specific version. 79 | elif ! python_versions | grep -E "^${version}$" > /dev/null; then 80 | echo "Error: Python version $version is not installed." 81 | echo "Installed Python versions are:" 82 | python_versions 83 | exit 1 84 | else 85 | echo "Using Python version $version" 86 | fi 87 | } 88 | 89 | # Flag to force deletion and creation of virtual environment 90 | FORCE=0 91 | 92 | # Initialize the other flags 93 | INSTALL_HOOKS=0 94 | LIST_VERSIONS=0 95 | PYTHON_VERSION="" 96 | VENV_NAME="" 97 | 98 | # Define long options 99 | LONGOPTS="force,help,install-hooks,list-versions,python-version:,venv-name:" 100 | 101 | # Define short options for getopt 102 | SHORTOPTS="fhilp:v:" 103 | 104 | # Check for GNU getopt by testing for long option support. GNU getopt supports 105 | # the "--test" option and will return exit code 4 while POSIX/BSD getopt does 106 | # not and will return exit code 0. 107 | if getopt --test > /dev/null 2>&1; then 108 | cat << 'END_OF_LINE' 109 | 110 | Please note, this script requires GNU getopt due to its enhanced 111 | functionality and compatibility with certain script features that 112 | are not supported by the POSIX getopt found in some systems, particularly 113 | those with a non-GNU version of getopt. This distinction is crucial 114 | as a system might have a non-GNU version of getopt installed by default, 115 | which could lead to unexpected behavior. 116 | 117 | On macOS, we recommend installing brew (https://brew.sh/). Then installation 118 | is as simple as `brew install gnu-getopt` and adding this to your 119 | profile: 120 | 121 | export PATH="$(brew --prefix)/opt/gnu-getopt/bin:$PATH" 122 | 123 | GNU getopt must be explicitly added to the PATH since it 124 | is keg-only (https://docs.brew.sh/FAQ#what-does-keg-only-mean). 125 | 126 | END_OF_LINE 127 | exit 1 128 | fi 129 | 130 | # Check to see if pyenv is installed 131 | if [ -z "$(command -v pyenv)" ] || { [ -z "$(command -v pyenv-virtualenv)" ] && [ ! -f "$(pyenv root)/plugins/pyenv-virtualenv/bin/pyenv-virtualenv" ]; }; then 132 | echo "pyenv and pyenv-virtualenv are required." 133 | if [[ "$OSTYPE" == "darwin"* ]]; then 134 | cat << 'END_OF_LINE' 135 | 136 | On macOS, we recommend installing brew, https://brew.sh/. Then installation 137 | is as simple as `brew install pyenv pyenv-virtualenv` and adding this to your 138 | profile: 139 | 140 | eval "$(pyenv init -)" 141 | eval "$(pyenv virtualenv-init -)" 142 | 143 | END_OF_LINE 144 | 145 | fi 146 | cat << 'END_OF_LINE' 147 | For Linux, Windows Subsystem for Linux (WSL), or macOS (if you don't want 148 | to use "brew") you can use https://github.com/pyenv/pyenv-installer to install 149 | the necessary tools. Before running this ensure that you have installed the 150 | prerequisites for your platform according to the pyenv wiki page, 151 | https://github.com/pyenv/pyenv/wiki/common-build-problems. 152 | 153 | On WSL you should treat your platform as whatever Linux distribution you've 154 | chosen to install. 155 | 156 | Once you have installed "pyenv" you will need to add the following lines to 157 | your ".bashrc": 158 | 159 | export PATH="$PATH:$HOME/.pyenv/bin" 160 | eval "$(pyenv init -)" 161 | eval "$(pyenv virtualenv-init -)" 162 | END_OF_LINE 163 | exit 1 164 | fi 165 | 166 | # Use GNU getopt to parse options 167 | if ! PARSED=$(getopt --options $SHORTOPTS --longoptions $LONGOPTS --name "$0" -- "$@"); then 168 | echo "Error parsing options" 169 | exit 1 170 | fi 171 | eval set -- "$PARSED" 172 | 173 | while true; do 174 | case "$1" in 175 | -f | --force) 176 | FORCE=1 177 | shift 178 | ;; 179 | -h | --help) 180 | echo "$USAGE" 181 | exit 0 182 | ;; 183 | -i | --install-hooks) 184 | INSTALL_HOOKS=1 185 | shift 186 | ;; 187 | -l | --list-versions) 188 | LIST_VERSIONS=1 189 | shift 190 | ;; 191 | -p | --python-version) 192 | PYTHON_VERSION="$2" 193 | shift 2 194 | # Check the Python version being passed in. 195 | check_python_version "$PYTHON_VERSION" 196 | ;; 197 | -v | --venv-name) 198 | VENV_NAME="$2" 199 | shift 2 200 | ;; 201 | --) 202 | shift 203 | break 204 | ;; 205 | *) 206 | # Unreachable due to GNU getopt handling all options 207 | echo "Programming error" 208 | exit 64 209 | ;; 210 | esac 211 | done 212 | 213 | # Determine the virtual environment name 214 | if [ -n "$VENV_NAME" ]; then 215 | # Use the user-provided environment name 216 | env_name="$VENV_NAME" 217 | else 218 | # Set the environment name to the last part of the working directory. 219 | env_name=${PWD##*/} 220 | fi 221 | 222 | # List Python versions and select one interactively. 223 | if [ $LIST_VERSIONS -ne 0 ]; then 224 | echo Available Python versions: 225 | python_versions 226 | # Read the user's desired Python version. 227 | # -r: treat backslashes as literal, -p: display prompt before input. 228 | read -r -p "Enter the desired Python version: " PYTHON_VERSION 229 | # Check the Python version being passed in. 230 | check_python_version "$PYTHON_VERSION" 231 | fi 232 | 233 | # Remove any lingering local configuration. 234 | if [ $FORCE -ne 0 ]; then 235 | rm -f .python-version 236 | pyenv virtualenv-delete --force "${env_name}" || true 237 | elif [[ -f .python-version ]]; then 238 | cat << 'END_OF_LINE' 239 | An existing .python-version file was found. Either remove this file yourself 240 | or re-run with the --force option to have it deleted along with the associated 241 | virtual environment. 242 | 243 | rm .python-version 244 | 245 | END_OF_LINE 246 | exit 1 247 | fi 248 | 249 | # Create a new virtual environment for this project 250 | # 251 | # If $PYTHON_VERSION is undefined then the current pyenv Python version will be used. 252 | # 253 | # We can't quote ${PYTHON_VERSION:=} below since if the variable is 254 | # undefined then we want nothing to appear; this is the reason for the 255 | # "shellcheck disable" line below. 256 | # 257 | # shellcheck disable=SC2086 258 | if ! pyenv virtualenv ${PYTHON_VERSION:=} "${env_name}"; then 259 | cat << END_OF_LINE 260 | An existing virtual environment named $env_name was found. Either delete this 261 | environment yourself or re-run with the --force option to have it deleted. 262 | 263 | pyenv virtualenv-delete ${env_name} 264 | 265 | END_OF_LINE 266 | exit 1 267 | fi 268 | 269 | # Set the local application-specific Python version(s) by writing the 270 | # version name to a file named `.python-version'. 271 | pyenv local "${env_name}" 272 | 273 | # Upgrade pip and friends 274 | python3 -m pip install --upgrade pip setuptools wheel 275 | 276 | # Find a requirements file (if possible) and install 277 | for req_file in "requirements-dev.txt" "requirements-test.txt" "requirements.txt"; do 278 | if [[ -f $req_file ]]; then 279 | pip install --requirement $req_file 280 | break 281 | fi 282 | done 283 | 284 | # Install git pre-commit hooks now or later. 285 | pre-commit install ${INSTALL_HOOKS:+"--install-hooks"} 286 | 287 | # Setup git remotes from lineage configuration 288 | # This could fail if the remotes are already setup, but that is ok. 289 | set +o errexit 290 | 291 | eval "$( 292 | python3 << 'END_OF_LINE' 293 | from pathlib import Path 294 | import yaml 295 | import sys 296 | 297 | LINEAGE_CONFIG = Path(".github/lineage.yml") 298 | 299 | if not LINEAGE_CONFIG.exists(): 300 | print("No lineage configuration found.", file=sys.stderr) 301 | sys.exit(0) 302 | 303 | with LINEAGE_CONFIG.open("r") as f: 304 | lineage = yaml.safe_load(stream=f) 305 | 306 | if lineage["version"] == "1": 307 | for parent_name, v in lineage["lineage"].items(): 308 | remote_url = v["remote-url"] 309 | print(f"git remote add {parent_name} {remote_url};") 310 | print(f"git remote set-url --push {parent_name} no_push;") 311 | else: 312 | print(f'Unsupported lineage version: {lineage["version"]}', file=sys.stderr) 313 | END_OF_LINE 314 | )" 315 | 316 | # Install all necessary mypy type stubs 317 | mypy --install-types --non-interactive src/ 318 | 319 | # Qapla' 320 | echo "Success!" 321 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gophish-tools # 2 | 3 | [![GitHub Build Status](https://github.com/cisagov/gophish-tools/workflows/build/badge.svg)](https://github.com/cisagov/gophish-tools/actions) 4 | [![License](https://img.shields.io/github/license/cisagov/gophish-tools)](https://spdx.org/licenses/) 5 | [![CodeQL](https://github.com/cisagov/gophish-tools/workflows/CodeQL/badge.svg)](https://github.com/cisagov/gophish-tools/actions/workflows/codeql-analysis.yml) 6 | [![Coverage Status](https://coveralls.io/repos/github/cisagov/gophish-tools/badge.svg?branch=develop)](https://coveralls.io/github/cisagov/gophish-tools?branch=develop) 7 | [![Code Style](https://img.shields.io/badge/Code%20Style-black-black)](https://github.com/psf/black) 8 | 9 | ## Docker Image ## 10 | 11 | ![MicroBadger Layers](https://img.shields.io/microbadger/layers/cisagov/gophish-tools) 12 | ![Docker Image Size](https://img.shields.io/docker/image-size/cisagov/gophish-tools) 13 | 14 | This repository contains a set of scripts that can be used by phishing 15 | campaign assessors to simplify the process of managing Gophish campaigns. 16 | 17 | ## Scripts ## 18 | 19 | - `gophish-cleaner` - Removes an assessment or elements of an assessment 20 | in Gophish. 21 | - `gophish-complete` - Completes a campaign in Gophish and/or outputs a 22 | Gophish campaign summary. 23 | - `gophish-export` - Exports all the data from an assessment within Gophish 24 | into a single JSON file. In addition, user report JSONs for each campaign in 25 | an assessment will also be generated. 26 | - `gophish-import` - Imports an assessment JSON file into Gophish. 27 | - `gophish-test` - Sends a duplicate assessment from Gophish to custom 28 | targets as a test. 29 | - `pca-wizard` - Creates an assessment JSON file via an interactive "wizard". 30 | - `pca-wizard-templates` - Generates templates for files needed when creating 31 | an assessment JSON with `pca-wizard`. 32 | 33 | ## Usage ## 34 | 35 | The scripts in this project can be executed either in a local Python 36 | environment or in a Docker container. 37 | 38 | ### Install and run via local Python ### 39 | 40 | We strongly encourage the use of virtual Python environments. Please see 41 | [this section](CONTRIBUTING.md#installing-and-using-pyenv-and-pyenv-virtualenv) 42 | in our ["Contributing" document](CONTRIBUTING.md) for information on how 43 | to set up and use a virtual Python environment. 44 | 45 | To install the scripts in your local Python environment: 46 | 47 | ```console 48 | git clone https://github.com/cisagov/gophish-tools.git 49 | cd gophish-tools 50 | pip install --requirement requirements.txt 51 | ``` 52 | 53 | After the scripts have been installed, they can be run like any other script: 54 | 55 | ```console 56 | gophish-import 57 | ``` 58 | 59 | ### Pull or build Docker image ### 60 | 61 | Pull `cisagov/gophish-tools` from the Docker repository: 62 | 63 | ```console 64 | docker pull cisagov/gophish-tools 65 | ``` 66 | 67 | Or build `cisagov/gophish-tools` from source: 68 | 69 | ```console 70 | git clone https://github.com/cisagov/gophish-tools.git 71 | cd gophish-tools 72 | docker build --tag cisagov/gophish-tools . 73 | ``` 74 | 75 | ### Run scripts via Docker ### 76 | 77 | The easiest way to use the containerized scripts is to alias them in your 78 | local shell: 79 | 80 | ```console 81 | eval "$(docker run cisagov/gophish-tools)" 82 | ``` 83 | 84 | That will add aliases to your **current shell** for all of the 85 | [scripts](#scripts) mentioned above, plus an additional one for 86 | `gophish-tools-bash`, which can be used to start up a `bash` shell inside 87 | a `gophish-tools` container. 88 | 89 | ## Assessment JSON Field Dictionary ## 90 | 91 | The following items are included in the assessment JSON as produced by 92 | `pca-wizard`. An [example assessment 93 | JSON](src/assessment/sample_assessment.json) can be found in this 94 | project. 95 | 96 | | Name | Description | Type | Default | Required | 97 | |------|-------------|:----:|:-------:|:--------:| 98 | | id | Assessment identifier. (e.g. `RV0000`) | string | | yes | 99 | | timezone | Timezone name based on [pytz](http://pytz.sourceforge.net/) timezones. (e.g. `US/Eastern`) | string | | yes | 100 | | domain | Assessment domain for Gophish public interface. (e.g. `domain.tld`) | string | | yes | 101 | | target_domain | Approved target domains where all email recipients must reside. (e.g. `[target1.tld, target2.tld]`) | list(string) | | yes | 102 | | start_date | Assessment start date in 24-hr ISO format with offset. (e.g. `2020-01-01T14:00:00-04:00`) | string | | yes | 103 | | end_date | Assessment end date in 24-hr ISO format with offset. (e.g. `2020-01-06T15:30:00-04:00`) | string | | yes | 104 | | reschedule | Indicates if the assessment json is a rescheduled assessment. | boolean | | yes | 105 | | start_campaign | The campaign that the assessment should start at. | integer | `1` | no | 106 | | groups | Consolidated list of email recipients grouped to receive campaigns, [example](#group-dictionary). | list(dict) | | yes | 107 | | pages | Gophish landing pages, [example](#page-dictionary). | list(dict) | | yes | 108 | | campaigns | Assessment campaigns, [example](#campaign-dictionary). | list(dict) | | yes | 109 | 110 | ### Group Dictionary ### 111 | 112 | Groups are imported from a templated `csv` file that can be generated 113 | with the command `pca-wizard-templates --targets`. 114 | 115 | | Name | Description | Type | Default | Required | 116 | |------|-------------|:----:|:-------:|:--------:| 117 | | name | Group name in the format of `{assessment identifier}-G{integer}` (e.g. `RV0000-G1`). | string | | yes | 118 | | targets | List of email recipients, [example](#target-dictionary). | list(dict) | | yes | 119 | 120 | ### Target Dictionary ### 121 | 122 | | Name | Description | Type | Default | Required | 123 | |------|-------------|:----:|:-------:|:--------:| 124 | | first_name | Recipient's first name. | string | | yes | 125 | | last_name | Recipient's last name. | string | | yes | 126 | | email | Recipient's email address. (e.g. `john.doe@target1.tld`) | string | | yes | 127 | | position | Position name for use in creating sub-groups of recipients within the organization such as "HR", "IT", etc. | string | | no | 128 | 129 | ### Page Dictionary ### 130 | 131 | | Name | Description | Type | Default | Required | 132 | |------|-------------|:----:|:-------:|:--------:| 133 | | name | Page name in the format of `{assessment identifier}-{integer}-{descriptor}` (e.g. `RV0000-1-AutoForward`). | string | | yes | 134 | | capture_credentials | Capture all non-password input with Gophish. Supports forwarding after submit action. | boolean | | yes | 135 | | capture_passwords | Capture password input by the user, but note that captured input is **stored in plain text as of Gophish version 0.9.0.** | boolean | `False` | no | 136 | | html | Content of the landing page in HTML format. | string | | yes | 137 | 138 | ### Campaign Dictionary ### 139 | 140 | | Name | Description | Type | Default | Required | 141 | |------|-------------|:----:|:-------:|:--------:| 142 | | name | Campaign name in the format of `{assessment identifier}-C{integer}` (e.g. `RV0000-C1`). | string | | yes | 143 | | launch_date | Campaign launch date in 24-hr ISO format with offset. (e.g. `2020-01-01T14:00:00-04:00`) | string | | yes | 144 | | completed_date | Campaign completion date in 24-hr ISO format with offset. (e.g. `2020-01-01T15:30:00-04:00`) | string | | yes | 145 | | url | Full URL for the campaign's landing page. (e.g. `http://domain.tld/camp/1`) | string | | yes | 146 | | page_name | Landing page name as defined in the assessment json. | string | | yes | 147 | | group_name | Group name as defined in the assessment json. | string | | yes | 148 | | template | Email template for the campaign, [example](#email-template-dictionary). | dict | | yes | 149 | | smtp | Gophish sending profile, [example](#smtp-dictionary). | dict | | yes | 150 | 151 | ### Email Template Dictionary ### 152 | 153 | Email templates can be imported from a templated `json` file that can be be generated 154 | with the command `pca-wizard-templates --emails`. 155 | 156 | | Name | Description | Type | Default | Required | 157 | |------|-------------|:----:|:-------:|:--------:| 158 | | name | Template name in the format of `{assessment identifier}-T{integer}-{template identifier}` (e.g. `RV0000-T1-1A2B3D`). | string | | yes | 159 | | subject | Email subject as seen by recipients. | string | | yes | 160 | | html | HTML representation of the email. | string | | yes | 161 | | text | Plain text representation of the email. | string | | yes | 162 | 163 | ### SMTP Dictionary ### 164 | 165 | | Name | Description | Type | Default | Required | 166 | |------|-------------|:----:|:-------:|:--------:| 167 | | name | Sending profile name in the format of `{assessment identifier}-SP-{integer}` (e.g. `RV0000-SP-1`). | string | | yes | 168 | | from_address | From email address with display name, required format: `{display name}<{sending email address}>`. (e.g. `John Doe`) | string | | yes | 169 | | host | Email server for Gophish to send email through. | string | `postfix:587` | no | 170 | | interface_type | Type of interface Gophish will use with mail server. | string | `SMTP` | no | 171 | | ignore_cert | Indicate if Gophish should ignore certs with mail server. | boolean | `True` | no | 172 | 173 | ## User Report Field Dictionary ## 174 | 175 | User report JSONs are also exported by `gophish-export`. User reports 176 | summarize data from targeted user clicks on phishing emails generated by a 177 | campaign. 178 | 179 | | Name | Description | Type | 180 | |------|-------------|:----:| 181 | | assessment | Assessment ID that this user report is associated with. | string | 182 | | campaign | Campaign ID that this user report is associated with. | string | 183 | | customer | Customer ID that this user report document is associated with. Note that Gophish does not contain this information, so `gophish-export` will always export it as an empty string. | string | 184 | | first_report | First report (click) generated by a targeted user. Format: `YYYY-MM-DDThh:mm.ss` | datetime | 185 | | total_num_reports | Total number of user reports received during a campaign. | integer | 186 | 187 | ## Campaign Summary Field Dictionary ## 188 | 189 | The following JSON data is exported by `gophish-export`. Campaign summaries for 190 | all assessments in a campaign is reported in the following format. 191 | 192 | | Name | Description | Type | 193 | |------|-------------|:----:| 194 | | subject | The subject line for a Gophish-generated email. | string | 195 | | sender | The from address of a Gophish-generated email. | string | 196 | | start_date | The start date of a campaign. | datetime | 197 | | end_date | The end date of a campaign. | datetime | 198 | | redirect | The URL the Gophish-generated email will redirect to. | string | 199 | | clicks | The total number of clicks reported by a campaign. | integer | 200 | | unique_clicks | The total number of clicks generated by unique users. | integer | 201 | | percent_clicks | The percentage of emails sent versus how many were clicks by a targeted user. | float | 202 | 203 | ## Contributing ## 204 | 205 | We welcome contributions! Please see [`CONTRIBUTING.md`](CONTRIBUTING.md) for 206 | details. 207 | 208 | ## License ## 209 | 210 | This project is in the worldwide [public domain](LICENSE). 211 | 212 | This project is in the public domain within the United States, and 213 | copyright and related rights in the work worldwide are waived through 214 | the [CC0 1.0 Universal public domain 215 | dedication](https://creativecommons.org/publicdomain/zero/1.0/). 216 | 217 | All contributions to this project will be released under the CC0 218 | dedication. By submitting a pull request, you are agreeing to comply 219 | with this waiver of copyright interest. 220 | -------------------------------------------------------------------------------- /src/tools/gophish_import.py: -------------------------------------------------------------------------------- 1 | """Import an assessment JSON file into Gophish. 2 | 3 | Usage: 4 | gophish-import [--log-level=LEVEL] [--reschedule] ASSESSMENT_FILE SERVER API_KEY 5 | gophish-import (-h | --help) 6 | gophish-import --version 7 | 8 | Options: 9 | API_KEY Gophish API key. 10 | ASSESSMENT_FILE Name of the JSON file containing assessment data. 11 | SERVER Full URL to Gophish server. 12 | -r --reschedule Adjust the current schedule of an assessment with the new schedule in the ASSESSMENT_FILE. 13 | -h --help Show this screen. 14 | --version Show version. 15 | -l --log-level=LEVEL If specified, then the log level will be set to 16 | the specified value. Valid values are "debug", "info", 17 | "warning", "error", and "critical". [default: info] 18 | """ 19 | 20 | # Standard Python Libraries 21 | import json 22 | import logging 23 | import sys 24 | from typing import Dict 25 | 26 | # Third-Party Libraries 27 | from docopt import docopt 28 | 29 | # No type stubs exist for gophish, so we add "type: ignore" to tell mypy to 30 | # ignore this library 31 | from gophish.models import ( # type: ignore 32 | SMTP, 33 | Campaign, 34 | Error, 35 | Group, 36 | Page, 37 | Template, 38 | User, 39 | ) 40 | import urllib3 41 | 42 | # cisagov Libraries 43 | from tools.connect import connect_api 44 | 45 | from ._version import __version__ 46 | 47 | # Disable "Insecure Request" warning: Gophish uses a self-signed certificate 48 | # as default for https connections, which can not be verified by a third 49 | # party; thus, an SSL insecure request warning is produced. 50 | urllib3.disable_warnings() 51 | 52 | 53 | def load_landings(api, assessment): 54 | """Return all landing pages in an assessment.""" 55 | pages = assessment["pages"] 56 | 57 | for page in pages: 58 | new_page = Page() 59 | new_page.name = page["name"] 60 | new_page.capture_credentials = page["capture_credentials"] 61 | new_page.capture_passwords = page["capture_passwords"] 62 | new_page.html = page["html"] 63 | if page["redirect_url"]: 64 | new_page.redirect_url = page["redirect_url"] 65 | 66 | # Debug page information 67 | 68 | logging.debug("Page Name: %s", new_page.name) 69 | logging.debug("Redirect URL: %s", new_page.redirect_url) 70 | 71 | """ 72 | Catches when a page has already been loaded into Gophish. 73 | Finds the current Gophish page ID so it can be deleted 74 | prior to re-loading the new page. 75 | """ 76 | while True: 77 | try: 78 | new_page = api.pages.post(new_page) 79 | break 80 | except Error as e: 81 | if e.message == "Page name already in use": 82 | logging.warning("%s. Finding with previously loaded page.", e) 83 | old_pages = api.pages.get() 84 | for old_page in old_pages: 85 | if old_page.name == new_page.name: 86 | logging.debug("Deleting Page with ID %d", old_page.id) 87 | api.pages.delete(old_page.id) 88 | logging.info("Re-Loading new page.") 89 | else: 90 | logging.error(f"{e}\n") 91 | raise 92 | 93 | # Returns Landing Page ID 94 | logging.info("Landing Page %s loaded.", new_page.name) 95 | page["id"] = new_page.id 96 | 97 | return pages 98 | 99 | 100 | def load_groups(api, assessment): 101 | """Return all groups in an assessment.""" 102 | groups = assessment["groups"] 103 | 104 | for group in groups: 105 | logging.info("Loading Group %s", group["name"]) 106 | 107 | new_group = Group() 108 | new_group.name = group["name"] 109 | 110 | for tgt in group["targets"]: 111 | target = User() 112 | target.first_name = tgt["first_name"] 113 | target.last_name = tgt["last_name"] 114 | target.email = tgt["email"] 115 | if tgt["position"]: 116 | target.position = tgt["position"] 117 | new_group.targets.append(target) 118 | 119 | """ 120 | Catches when a Group has already been loaded into Gophish. 121 | Finds the current Gophish group ID so it can be deleted 122 | prior to re-loading the new group. 123 | """ 124 | while True: 125 | try: 126 | new_group = api.groups.post(new_group) 127 | break 128 | except Error as e: 129 | if e.message == "Group name already in use": 130 | logging.warning("%s. Finding previously loaded group to delete.", e) 131 | groups = api.groups.get() 132 | logging.debug( 133 | "Checking %d for previously imported group to get ID", 134 | len(groups), 135 | ) 136 | for old_group in groups: 137 | if old_group.name == new_group.name: 138 | logging.debug("Deleting Group with ID %d", old_group.id) 139 | api.groups.delete(old_group.id) 140 | logging.info("Re-Loading new group.") 141 | else: 142 | logging.exception( 143 | "Exception encountered when loading group in Gophish (%s)", e 144 | ) 145 | raise 146 | 147 | group["id"] = new_group.id 148 | 149 | logging.info("Group Ready: %s", new_group.name) 150 | 151 | return groups 152 | 153 | 154 | def build_campaigns(api, assessment): 155 | """Build campaigns.""" 156 | logging.info("Building Campaigns.") 157 | for campaign in assessment["campaigns"]: 158 | logging.info("Building Campaign: %s", campaign["name"]) 159 | 160 | # Build Template object 161 | new_template = Template( 162 | name=campaign["template"]["name"], 163 | subject=campaign["template"]["subject"], 164 | html=campaign["template"]["html"], 165 | text=campaign["template"]["text"], 166 | ) 167 | 168 | """ 169 | Catches when Template has already been loaded into Gophish. 170 | Finds the current Gophish template ID so it can be deleted 171 | prior to re-loading the new template. 172 | """ 173 | while True: 174 | try: 175 | new_template = api.templates.post(new_template) 176 | break 177 | except Error as e: 178 | if e.message == "Template name already in use": 179 | logging.warning( 180 | "%s. Finding previously loaded template to delete.", e.message 181 | ) 182 | templates = api.templates.get() 183 | logging.debug( 184 | "Checking %d for previously imported template to get ID", 185 | len(templates), 186 | ) 187 | for old_template in templates: 188 | if old_template.name == new_template.name: 189 | logging.debug( 190 | "Deleting Template with ID %d", old_template.id 191 | ) 192 | api.templates.delete(old_template.id) 193 | logging.info("Re-Loading new template.") 194 | else: 195 | logging.exception( 196 | "Exception encountered when loading template in Gophish (%s)", 197 | e.message, 198 | ) 199 | raise 200 | 201 | # Build SMTP Object 202 | new_smtp = SMTP( 203 | name=campaign["smtp"]["name"], 204 | host=campaign["smtp"]["host"], 205 | from_address=campaign["smtp"]["from_address"], 206 | interface_type="SMTP", 207 | ignore_cert_errors=True, 208 | ) 209 | if ( 210 | "username" in campaign["smtp"].keys() 211 | and "password" in campaign["smtp"].keys() 212 | ): 213 | new_smtp.username = campaign["smtp"]["username"] 214 | new_smtp.password = campaign["smtp"]["password"] 215 | 216 | while True: 217 | try: 218 | new_smtp = api.smtp.post(new_smtp) 219 | break 220 | except Error as e: 221 | if e.message == "SMTP name already in use": 222 | logging.warning("%s. Finding previously loaded smtp to delete.", e) 223 | smtps = api.smtp.get() 224 | logging.debug( 225 | "Checking %d for previously imported smtp profiles to get ID", 226 | len(smtps), 227 | ) 228 | for old_smtp in smtps: 229 | if old_smtp.name == new_smtp.name: 230 | logging.debug("Deleting SMTP with ID %d", old_smtp.id) 231 | api.smtp.delete(old_smtp.id) 232 | logging.info("Re-Loading new SMTP.") 233 | else: 234 | logging.exception( 235 | "Exception encountered when loading SMTP in Gophish (%s)", 236 | e.message, 237 | ) 238 | raise 239 | 240 | # Check to remove any campaigns with the same name 241 | old_campaigns = api.campaigns.get() 242 | for old_campaign in old_campaigns: 243 | if old_campaign.name == campaign["name"]: 244 | logging.warning( 245 | "Previous Campaign found with name %s.", campaign["name"] 246 | ) 247 | logging.warning( 248 | "Previous Campaign with id %d being deleted.", old_campaign.id 249 | ) 250 | api.campaigns.delete(old_campaign.id) 251 | 252 | # Loads the campaign 253 | try: 254 | api.campaigns.post( 255 | Campaign( 256 | name=campaign["name"], 257 | groups=[Group(name=campaign["group_name"])], 258 | page=Page(name=campaign["page_name"]), 259 | template=new_template, 260 | smtp=new_smtp, 261 | url=campaign["url"], 262 | launch_date=campaign["launch_date"], 263 | completed_date=campaign["complete_date"], 264 | ) 265 | ) 266 | except Exception as e: 267 | logging.exception( 268 | "Exception encountered when loading campaign in Gophish (%s)", e.message 269 | ) 270 | raise 271 | 272 | logging.info("Campaign %s successfully loaded.", campaign["name"]) 273 | 274 | 275 | def main() -> None: 276 | """Set up logging, connect to API, import all assessment data.""" 277 | args: Dict[str, str] = docopt(__doc__, version=__version__) 278 | 279 | # Set up logging 280 | log_level = args["--log-level"] 281 | try: 282 | logging.basicConfig( 283 | format="%(asctime)-15s %(levelname)s: %(message)s", level=log_level.upper() 284 | ) 285 | except ValueError: 286 | logging.critical( 287 | '"%s" is not a valid logging level. Possible values are debug, info, warning, and error.', 288 | log_level, 289 | ) 290 | sys.exit(1) 291 | 292 | try: 293 | api = connect_api(args["API_KEY"], args["SERVER"]) 294 | logging.debug("Connected to: %s", args["SERVER"]) 295 | except Exception as e: 296 | logging.critical(e.args[0]) 297 | # Stop logging and clean up 298 | logging.shutdown() 299 | sys.exit(1) 300 | 301 | # Load assessment JSON from file 302 | try: 303 | with open(args["ASSESSMENT_FILE"]) as json_file: 304 | assessment = json.load(json_file) 305 | except FileNotFoundError as e: 306 | logging.exception("Unable to locate Assessment file (%s)", e) 307 | # Stop logging and clean up 308 | logging.shutdown() 309 | sys.exit(1) 310 | except PermissionError as e: 311 | logging.exception("Permission denied for opening Assessment file (%s)", e) 312 | # Stop logging and clean up 313 | logging.shutdown() 314 | sys.exit(1) 315 | 316 | try: 317 | # Load Landing page 318 | assessment["pages"] = load_landings(api, assessment) 319 | 320 | # Load Groups into Gophish, returns group numbers correlated to Group number 321 | assessment["groups"] = load_groups(api, assessment) 322 | 323 | # Load Campaigns 324 | build_campaigns(api, assessment) 325 | 326 | # Stop logging and clean up 327 | logging.shutdown() 328 | 329 | except Exception as e: 330 | logging.exception( 331 | "Exception encountered while loading data from Gophish (%s: %s)", type(e), e 332 | ) 333 | logging.critical("Closing with an error. Assessment not successfully loaded.") 334 | # Stop logging and clean up 335 | logging.shutdown() 336 | sys.exit(1) 337 | -------------------------------------------------------------------------------- /src/tools/gophish_export.py: -------------------------------------------------------------------------------- 1 | """Export all the data from an assessment within Gophish into a single JSON file. 2 | 3 | Usage: 4 | gophish-export [--log-level=LEVEL] ASSESSMENT_ID SERVER API_KEY 5 | gophish-export (-h | --help) 6 | gophish-export --version 7 | 8 | Options: 9 | API_KEY Gophish API key. 10 | ASSESSMENT_ID ID of the assessment to export data from. 11 | SERVER Full URL to Gophish server. 12 | -h --help Show this screen. 13 | --version Show version. 14 | -l --log-level=LEVEL If specified, then the log level will be set to 15 | the specified value. Valid values are "debug", "info", 16 | "warning", "error", and "critical". [default: info] 17 | """ 18 | 19 | # Standard Python Libraries 20 | from datetime import datetime 21 | import hashlib 22 | import json 23 | import logging 24 | import re 25 | import sys 26 | from typing import Dict 27 | 28 | # Third-Party Libraries 29 | from docopt import docopt 30 | 31 | # No type stubs exist for httpagentparser, so we add "type: ignore" to tell 32 | # mypy to ignore this library 33 | import httpagentparser # type: ignore 34 | import urllib3 35 | 36 | # cisagov Libraries 37 | from tools.connect import connect_api 38 | from util.validate import validate_assessment_id 39 | 40 | from ._version import __version__ 41 | 42 | # Disable "Insecure Request" warning: Gophish uses a self-signed certificate 43 | # as default for https connections, which can not be verified by a third 44 | # party; thus, an SSL insecure request warning is produced. 45 | urllib3.disable_warnings() 46 | 47 | 48 | def assessment_exists(api, assessment_id): 49 | """Check if Gophish has at least one campaign for designated assessment. 50 | 51 | Args: 52 | api (Gophish API): Connection to Gophish server via the API. 53 | assessment_id (string): Assessment identifier to get campaigns from. 54 | 55 | Returns: 56 | boolean: Indicates if a campaign is found starting with assessment_id. 57 | """ 58 | allCampaigns = api.campaigns.get() 59 | for campaign in allCampaigns: 60 | if campaign.name.startswith(assessment_id): 61 | return True 62 | 63 | return False 64 | 65 | 66 | def export_targets(api, assessment_id): 67 | """Add all targets to a list. 68 | 69 | Achieved by pulling the group IDs for any group starting with 70 | the assessment id. The targets within the group are then parsed 71 | into a targets list of target dicts. Each target dict includes a 72 | sha256 hash of the target's email and assessment id with any labels. 73 | 74 | Args: 75 | api (Gophish API): Connection to Gophish server via the API. 76 | assessment_id (string): Assessment identifier to get campaigns from. 77 | 78 | Returns: 79 | List of targets from the assessment's group(s). 80 | """ 81 | groupIDs = get_group_ids(api, assessment_id) 82 | 83 | targets = list() 84 | 85 | for group_id in groupIDs: 86 | # Gets target list for parsing. 87 | raw_targets = api.groups.get(group_id).as_dict()["targets"] 88 | 89 | for raw_target in raw_targets: 90 | target = dict() 91 | 92 | target["id"] = hashlib.sha256( 93 | raw_target["email"].encode("utf-8") 94 | ).hexdigest() 95 | target["customer_defined_labels"] = dict() 96 | 97 | if "position" in raw_target: 98 | target["customer_defined_labels"][assessment_id] = [ 99 | raw_target["position"] 100 | ] 101 | 102 | targets.append(target) 103 | 104 | logging.info( 105 | "%d email targets found for assessment %s.", len(targets), assessment_id 106 | ) 107 | 108 | return targets 109 | 110 | 111 | def get_group_ids(api, assessment_id): 112 | """Return a list of group IDs for all groups starting with specified assessment_id.""" 113 | rawGroup = api.groups.get() # Holds raw list of campaigns from Gophish. 114 | groups = list() # Holds list of campaign IDs that match the assessment. 115 | 116 | for group in rawGroup: 117 | group = group.as_dict() 118 | if group["name"].startswith(assessment_id): 119 | groups.append(group["id"]) 120 | 121 | return groups 122 | 123 | 124 | def export_campaigns(api, assessment_id): 125 | """Add all the campaigns' data for an assessment to a list. 126 | 127 | Args: 128 | api (Gophish API): Connection to Gophish server via the API. 129 | assessment_id (string): Assessment identifier to get campaigns from. 130 | 131 | Returns: 132 | List of the assessment's campaigns with data. 133 | """ 134 | campaignIDs = get_campaign_ids(api, assessment_id) 135 | campaigns = list() 136 | 137 | for campaign_id in campaignIDs: 138 | campaigns.append(get_campaign_data(api, campaign_id)) 139 | 140 | logging.info("%d campaigns found for assessment %s.", len(campaigns), assessment_id) 141 | 142 | return campaigns 143 | 144 | 145 | def get_campaign_ids(api, assessment_id): 146 | """Return a list of campaign IDs for all campaigns starting with specified assessment_id.""" 147 | rawCampaigns = api.campaigns.get() # Holds raw list of campaigns from Gophish. 148 | campaigns = list() # Holds list of campaign IDs that match the assessment. 149 | 150 | for campaign in rawCampaigns: 151 | campaign = campaign.as_dict() 152 | if campaign["name"].startswith(assessment_id): 153 | campaigns.append(campaign["id"]) 154 | 155 | return campaigns 156 | 157 | 158 | def get_campaign_data(api, campaign_id): 159 | """Return campaign metadata for the given campaign ID.""" 160 | campaign = dict() 161 | 162 | # Pulls the campaign data as dict from Gophish. 163 | rawCampaign: dict = api.campaigns.get(campaign_id).as_dict() 164 | 165 | campaign["id"] = rawCampaign["name"] 166 | 167 | campaign["start_time"] = rawCampaign["launch_date"] 168 | campaign["end_time"] = rawCampaign["completed_date"] 169 | campaign["url"] = rawCampaign["url"] 170 | 171 | campaign["subject"] = rawCampaign["template"]["subject"] 172 | 173 | # Get the template ID from the Gophish template name. 174 | campaign["template"] = ( 175 | api.templates.get(rawCampaign["template"]["id"]).as_dict()["name"].split("-")[2] 176 | ) 177 | 178 | campaign["clicks"] = get_click_data(api, campaign_id) 179 | 180 | # Get the e-mail send status from Gophish. 181 | campaign["status"] = get_email_status(api, campaign_id) 182 | 183 | return campaign 184 | 185 | 186 | def get_click_data(api, campaign_id): 187 | """Return a list of all clicks for a given campaign.""" 188 | rawEvents = api.campaigns.get(campaign_id).as_dict()["timeline"] 189 | clicks = list() # Holds list of all users that clicked. 190 | 191 | for rawEvent in rawEvents: 192 | if rawEvent["message"] == "Clicked Link": 193 | click = dict() 194 | 195 | # Builds out click document. 196 | click["user"] = hashlib.sha256( 197 | rawEvent["email"].encode("utf-8") 198 | ).hexdigest() 199 | click["source_ip"] = rawEvent["details"]["browser"]["address"] 200 | 201 | click["time"] = rawEvent["time"] 202 | 203 | click["application"] = get_application(rawEvent) 204 | 205 | clicks.append(click) 206 | 207 | return clicks 208 | 209 | 210 | def get_email_status(api, campaign_id): 211 | """Return the email send status and time.""" 212 | rawEvents = api.campaigns.get(campaign_id).as_dict()["timeline"] 213 | status = list() 214 | for rawEvent in rawEvents: 215 | email = dict() 216 | 217 | if rawEvent["message"] == "Email Sent": 218 | email["user"] = hashlib.sha256( 219 | rawEvent["email"].encode("utf-8") 220 | ).hexdigest() 221 | 222 | email["time"] = rawEvent["time"] 223 | 224 | email["status"] = "SUCCESS" 225 | 226 | elif rawEvent["message"] == "Error Sending Email": 227 | email["user"] = hashlib.sha256( 228 | rawEvent["email"].encode("utf-8") 229 | ).hexdigest() 230 | 231 | # Trim microseconds before converting to datetime. 232 | rawEvent["time"] = datetime.strptime( 233 | rawEvent["time"].split(".")[0], "%Y-%m-%dT%H:%M:%S" 234 | ) 235 | email["time"] = rawEvent["time"] 236 | 237 | email["status"] = "Failed" 238 | 239 | if email: 240 | status.append(email) 241 | 242 | return status 243 | 244 | 245 | def get_application(rawEvent): 246 | """Return application details.""" 247 | application = dict() 248 | 249 | application["external_ip"] = rawEvent["details"]["browser"]["address"] 250 | 251 | # Process user agent string. 252 | userAgent = rawEvent["details"]["browser"]["user-agent"] 253 | application["name"] = httpagentparser.detect(userAgent)["platform"]["name"] 254 | application["version"] = httpagentparser.detect(userAgent)["platform"]["version"] 255 | 256 | return application 257 | 258 | 259 | def find_unique_target_clicks_count(clicks): 260 | """Return the number of unique clicks in a click set.""" 261 | uniq_users = set() 262 | for click in clicks: 263 | uniq_users.add(click["user"]) 264 | return len(uniq_users) 265 | 266 | 267 | def write_campaign_summary(api, assessment_id): 268 | """Output a campaign summary report to JSON, console, and a text file.""" 269 | campaign_ids = get_campaign_ids(api, assessment_id) 270 | campaign_data_template = "campaign_data.json" 271 | campaign_summary_json = f"{assessment_id}_campaign_data.json" 272 | campaign_summary_textfile = f"{assessment_id}_summary_{datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S')}.txt" 273 | 274 | with open(campaign_data_template) as template: 275 | campaign_data = json.load(template) 276 | 277 | logging.info("Writing campaign summary report to %s", campaign_summary_textfile) 278 | file_out = open(campaign_summary_textfile, "w+") 279 | file_out.write("Campaigns for Assessment: " + assessment_id) 280 | 281 | regex = re.compile(r"^.*_(?Plevel-[1-6])$") 282 | for campaign_id in campaign_ids: 283 | campaign = api.campaigns.get(campaign_id) 284 | match = regex.fullmatch(campaign.name) 285 | if match: 286 | level = match.group("level") 287 | else: 288 | logging.warn( 289 | "Encountered campaign (%s) that is unable to be processed for campaign summary export. \n" 290 | "Campaign name is not properly suffixed with the campaign level number (e.g. '_level-1')\n" 291 | "Skipping campaign", 292 | campaign.name, 293 | ) 294 | continue 295 | 296 | logging.info(level) 297 | clicks = get_click_data(api, campaign_id) 298 | 299 | total_clicks = api.campaigns.summary(campaign_id=campaign_id).stats.clicked 300 | unique_clicks = find_unique_target_clicks_count(clicks) 301 | if total_clicks > 0: 302 | percent_clicks = unique_clicks / float(total_clicks) 303 | else: 304 | percent_clicks = 0.0 305 | campaign_data[level]["subject"] = campaign.template.subject 306 | campaign_data[level]["sender"] = campaign.smtp.from_address 307 | campaign_data[level]["start_date"] = campaign.launch_date 308 | campaign_data[level]["end_date"] = campaign.completed_date 309 | campaign_data[level]["redirect"] = campaign.url 310 | campaign_data[level]["clicks"] = total_clicks 311 | campaign_data[level]["unique_clicks"] = unique_clicks 312 | campaign_data[level]["percent_clicks"] = percent_clicks 313 | 314 | file_out.write("\n") 315 | file_out.write("-" * 50) 316 | file_out.write("\nCampaign: %s" % campaign.name) 317 | file_out.write("\nSubject: %s" % campaign_data[level]["subject"]) 318 | file_out.write("\nSender: %s" % campaign_data[level]["sender"]) 319 | file_out.write("\nStart Date: %s" % campaign_data[level]["start_date"]) 320 | file_out.write("\nEnd Date: %s" % campaign_data[level]["end_date"]) 321 | file_out.write("\nRedirect: %s" % campaign_data[level]["redirect"]) 322 | file_out.write("\nClicks: %d" % campaign_data[level]["clicks"]) 323 | file_out.write("\nUnique Clicks: %d" % campaign_data[level]["unique_clicks"]) 324 | file_out.write( 325 | "\nPercentage Clicks: %f" % campaign_data[level]["percent_clicks"] 326 | ) 327 | 328 | file_out.close() 329 | logging.info("Writing out summary JSON to %s", campaign_summary_json) 330 | with open(campaign_summary_json, "w") as fp: 331 | json.dump(campaign_data, fp, indent=4) 332 | 333 | 334 | def export_user_reports(api, assessment_id): 335 | """Build and export a user_report JSON file for each campaign in an assessment.""" 336 | campaign_ids = get_campaign_ids(api, assessment_id) 337 | 338 | for campaign_id in campaign_ids: 339 | first_report = None 340 | user_report_doc = dict() 341 | campaign = get_campaign_data(api, campaign_id) 342 | 343 | # iterate over clicks and find the earliest click 344 | for click in campaign["clicks"]: 345 | click_time = datetime.strptime( 346 | click["time"].split(".")[0], "%Y-%m-%dT%H:%M:%S" 347 | ) 348 | if first_report is None or click_time < first_report: 349 | first_report = click_time 350 | 351 | # The "customer" field is a placeholder added for operator convenience when 352 | # working with the JSON file created. 353 | user_report_doc["customer"] = "" 354 | user_report_doc["assessment"] = assessment_id 355 | # get_campaign_ids() returns integers, but user_report_doc["campaign"] 356 | # expects a string 357 | user_report_doc["campaign"] = str(campaign_id) 358 | if first_report is not None: 359 | user_report_doc["first_report"] = datetime.strftime( 360 | first_report, "%Y-%m-%dT%H:%M:%S" 361 | ) 362 | else: 363 | user_report_doc["first_report"] = "No clicks reported" 364 | 365 | user_report_doc["total_num_reports"] = api.campaigns.summary( 366 | campaign_id=campaign_id 367 | ).stats.clicked 368 | 369 | logging.info( 370 | "Writing out user report for campaign %s in assessment %s", 371 | campaign["id"], 372 | assessment_id, 373 | ) 374 | 375 | with open(f"{assessment_id}_{campaign_id}_user_report_doc.json", "w") as fp: 376 | json.dump(user_report_doc, fp, indent=4) 377 | 378 | 379 | def main() -> None: 380 | """Set up logging, connect to API, export all assessment data.""" 381 | args: Dict[str, str] = docopt(__doc__, version=__version__) 382 | 383 | # Set up logging 384 | log_level = args["--log-level"] 385 | try: 386 | logging.basicConfig( 387 | format="\n%(levelname)s: %(message)s", level=log_level.upper() 388 | ) 389 | except ValueError: 390 | logging.critical( 391 | '"%s" is not a valid logging level. Possible values are debug, info, warning, and error.', 392 | log_level, 393 | ) 394 | sys.exit(1) 395 | 396 | else: 397 | # Connect to API 398 | try: 399 | api = connect_api(args["API_KEY"], args["SERVER"]) 400 | logging.debug("Connected to: %s", args["SERVER"]) 401 | except Exception as e: 402 | logging.critical(e.args[0]) 403 | sys.exit(1) 404 | 405 | if not validate_assessment_id(args["ASSESSMENT_ID"]): 406 | logging.critical( 407 | '"%s" is an invalid assessment_id format. Assessment identifiers begin with RV and are followed by ' 408 | " a 4 or 5 digit numerical sequence. Examples: RV1234, RV12345", 409 | args["ASSESSMENT_ID"], 410 | ) 411 | sys.exit(1) 412 | 413 | if assessment_exists(api, args["ASSESSMENT_ID"]): 414 | assessment_dict: Dict = dict() 415 | 416 | # Add targets list to assessment dict. 417 | assessment_dict["targets"] = export_targets(api, args["ASSESSMENT_ID"]) 418 | 419 | # Add campaigns list to the assessment dict. 420 | assessment_dict["campaigns"] = export_campaigns(api, args["ASSESSMENT_ID"]) 421 | 422 | with open(f'data_{args["ASSESSMENT_ID"]}.json', "w") as fp: 423 | json.dump(assessment_dict, fp, indent=4) 424 | 425 | logging.info("Data written to data_%s.json", args["ASSESSMENT_ID"]) 426 | 427 | export_user_reports(api, args["ASSESSMENT_ID"]) 428 | write_campaign_summary(api, args["ASSESSMENT_ID"]) 429 | else: 430 | logging.error( 431 | 'Assessment "%s" does not exist in Gophish.', args["ASSESSMENT_ID"] 432 | ) 433 | sys.exit(1) 434 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: build 3 | 4 | on: # yamllint disable-line rule:truthy 5 | merge_group: 6 | types: 7 | - checks_requested 8 | pull_request: 9 | push: 10 | repository_dispatch: 11 | types: 12 | - apb 13 | 14 | # Set a default shell for any run steps. The `-Eueo pipefail` sets errtrace, 15 | # nounset, errexit, and pipefail. The `-x` will print all commands as they are 16 | # run. Please see the GitHub Actions documentation for more information: 17 | # https://docs.github.com/en/actions/using-jobs/setting-default-values-for-jobs 18 | defaults: 19 | run: 20 | shell: bash -Eueo pipefail -x {0} 21 | 22 | env: 23 | IMAGE_NAME: cisagov/gophish-tools 24 | PIP_CACHE_DIR: ~/.cache/pip 25 | PRE_COMMIT_CACHE_DIR: ~/.cache/pre-commit 26 | RUN_TMATE: ${{ secrets.RUN_TMATE }} 27 | TERRAFORM_DOCS_REPO_BRANCH_NAME: improvement/support_atx_closed_markdown_headers 28 | TERRAFORM_DOCS_REPO_DEPTH: 1 29 | TERRAFORM_DOCS_REPO_URL: https://github.com/mcdonnnj/terraform-docs.git 30 | 31 | jobs: 32 | diagnostics: 33 | name: Run diagnostics 34 | # This job does not need any permissions 35 | permissions: {} 36 | runs-on: ubuntu-latest 37 | steps: 38 | # Note that a duplicate of this step must be added at the top of 39 | # each job. 40 | - name: Apply standard cisagov job preamble 41 | uses: cisagov/action-job-preamble@v1 42 | with: 43 | check_github_status: "true" 44 | # This functionality is poorly implemented and has been 45 | # causing problems due to the MITM implementation hogging or 46 | # leaking memory. As a result we disable it by default. If 47 | # you want to temporarily enable it, simply set 48 | # monitor_permissions equal to "true". 49 | # 50 | # TODO: Re-enable this functionality when practical. See 51 | # cisagov/skeleton-generic#207 for more details. 52 | monitor_permissions: "false" 53 | output_workflow_context: "true" 54 | # Use a variable to specify the permissions monitoring 55 | # configuration. By default this will yield the 56 | # configuration stored in the cisagov organization-level 57 | # variable, but if you want to use a different configuration 58 | # then simply: 59 | # 1. Create a repository-level variable with the name 60 | # ACTIONS_PERMISSIONS_CONFIG. 61 | # 2. Set this new variable's value to the configuration you 62 | # want to use for this repository. 63 | # 64 | # Note in particular that changing the permissions 65 | # monitoring configuration *does not* require you to modify 66 | # this workflow. 67 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 68 | lint: 69 | needs: 70 | - diagnostics 71 | permissions: 72 | # actions/checkout needs this to fetch code 73 | contents: read 74 | runs-on: ubuntu-latest 75 | steps: 76 | - name: Apply standard cisagov job preamble 77 | uses: cisagov/action-job-preamble@v1 78 | with: 79 | # This functionality is poorly implemented and has been 80 | # causing problems due to the MITM implementation hogging or 81 | # leaking memory. As a result we disable it by default. If 82 | # you want to temporarily enable it, simply set 83 | # monitor_permissions equal to "true". 84 | # 85 | # TODO: Re-enable this functionality when practical. See 86 | # cisagov/skeleton-generic#207 for more details. 87 | monitor_permissions: "false" 88 | # Use a variable to specify the permissions monitoring 89 | # configuration. By default this will yield the 90 | # configuration stored in the cisagov organization-level 91 | # variable, but if you want to use a different configuration 92 | # then simply: 93 | # 1. Create a repository-level variable with the name 94 | # ACTIONS_PERMISSIONS_CONFIG. 95 | # 2. Set this new variable's value to the configuration you 96 | # want to use for this repository. 97 | # 98 | # Note in particular that changing the permissions 99 | # monitoring configuration *does not* require you to modify 100 | # this workflow. 101 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 102 | - id: setup-env 103 | uses: cisagov/setup-env-github-action@v1 104 | - uses: actions/checkout@v5 105 | - id: setup-python 106 | uses: actions/setup-python@v6 107 | with: 108 | python-version: ${{ steps.setup-env.outputs.python-version }} 109 | # We need the Go version and Go cache location for the actions/cache step, 110 | # so the Go installation must happen before that. 111 | - id: setup-go 112 | uses: actions/setup-go@v6 113 | with: 114 | # There is no expectation for actual Go code so we disable caching as 115 | # it relies on the existence of a go.sum file. 116 | cache: false 117 | go-version: ${{ steps.setup-env.outputs.go-version }} 118 | - id: go-cache 119 | name: Lookup Go cache directory 120 | run: | 121 | echo "dir=$(go env GOCACHE)" >> $GITHUB_OUTPUT 122 | - uses: actions/cache@v4 123 | env: 124 | BASE_CACHE_KEY: ${{ github.job }}-${{ runner.os }}-\ 125 | py${{ steps.setup-python.outputs.python-version }}-\ 126 | go${{ steps.setup-go.outputs.go-version }}-\ 127 | packer${{ steps.setup-env.outputs.packer-version }}-\ 128 | tf${{ steps.setup-env.outputs.terraform-version }}- 129 | with: 130 | # We do not use '**/setup.py' in the cache key so only the 'setup.py' 131 | # file in the root of the repository is used. This is in case a Python 132 | # package were to have a 'setup.py' as part of its internal codebase. 133 | key: ${{ env.BASE_CACHE_KEY }}\ 134 | ${{ hashFiles('**/requirements-test.txt') }}-\ 135 | ${{ hashFiles('**/requirements.txt') }}-\ 136 | ${{ hashFiles('**/.pre-commit-config.yaml') }}-\ 137 | ${{ hashFiles('setup.py') }} 138 | # Note that the .terraform directory IS NOT included in the 139 | # cache because if we were caching, then we would need to use 140 | # the `-upgrade=true` option. This option blindly pulls down the 141 | # latest modules and providers instead of checking to see if an 142 | # update is required. That behavior defeats the benefits of caching. 143 | # so there is no point in doing it for the .terraform directory. 144 | path: | 145 | ${{ env.PIP_CACHE_DIR }} 146 | ${{ env.PRE_COMMIT_CACHE_DIR }} 147 | ${{ steps.go-cache.outputs.dir }} 148 | restore-keys: | 149 | ${{ env.BASE_CACHE_KEY }} 150 | - uses: hashicorp/setup-packer@v3 151 | with: 152 | version: ${{ steps.setup-env.outputs.packer-version }} 153 | - uses: hashicorp/setup-terraform@v3 154 | with: 155 | terraform_version: ${{ steps.setup-env.outputs.terraform-version }} 156 | - name: Install go-critic 157 | env: 158 | PACKAGE_URL: github.com/go-critic/go-critic/cmd/gocritic 159 | PACKAGE_VERSION: ${{ steps.setup-env.outputs.go-critic-version }} 160 | run: go install ${PACKAGE_URL}@${PACKAGE_VERSION} 161 | - name: Install goimports 162 | env: 163 | PACKAGE_URL: golang.org/x/tools/cmd/goimports 164 | PACKAGE_VERSION: ${{ steps.setup-env.outputs.goimports-version }} 165 | run: go install ${PACKAGE_URL}@${PACKAGE_VERSION} 166 | - name: Install gosec 167 | env: 168 | PACKAGE_URL: github.com/securego/gosec/v2/cmd/gosec 169 | PACKAGE_VERSION: ${{ steps.setup-env.outputs.gosec-version }} 170 | run: go install ${PACKAGE_URL}@${PACKAGE_VERSION} 171 | - name: Install staticcheck 172 | env: 173 | PACKAGE_URL: honnef.co/go/tools/cmd/staticcheck 174 | PACKAGE_VERSION: ${{ steps.setup-env.outputs.staticcheck-version }} 175 | run: go install ${PACKAGE_URL}@${PACKAGE_VERSION} 176 | # TODO: https://github.com/cisagov/skeleton-generic/issues/165 177 | # We are temporarily using @mcdonnnj's forked branch of terraform-docs 178 | # until his PR: https://github.com/terraform-docs/terraform-docs/pull/745 179 | # is approved. This temporary fix will allow for ATX header support when 180 | # terraform-docs is run during linting. 181 | - name: Clone ATX headers branch from terraform-docs fork 182 | run: | 183 | git clone \ 184 | --branch $TERRAFORM_DOCS_REPO_BRANCH_NAME \ 185 | --depth $TERRAFORM_DOCS_REPO_DEPTH \ 186 | --single-branch \ 187 | $TERRAFORM_DOCS_REPO_URL /tmp/terraform-docs 188 | - name: Build and install terraform-docs binary 189 | run: | 190 | go build \ 191 | -C /tmp/terraform-docs \ 192 | -o $(go env GOPATH)/bin/terraform-docs 193 | - name: Install dependencies 194 | # We don't upgrade setuptools here because the versions of 195 | # gophish on PyPI are ancient and depend on an equally ancient 196 | # version of setuptools. The gophish package installation 197 | # will fail if we let pip pull in the latest setuptools; 198 | # instead, we let the gophish package installation itself 199 | # choose the version of setuptools to install. 200 | run: | 201 | python -m pip install --upgrade pip wheel 202 | pip install --upgrade --requirement requirements-test.txt 203 | - name: Set up pre-commit hook environments 204 | run: pre-commit install-hooks 205 | - name: Run pre-commit on all files 206 | run: pre-commit run --all-files 207 | - name: Setup tmate debug session 208 | uses: mxschmitt/action-tmate@v3 209 | if: env.RUN_TMATE 210 | test: 211 | name: test source - py${{ matrix.python-version }} - ${{ matrix.platform }} 212 | needs: 213 | - diagnostics 214 | permissions: 215 | # actions/checkout needs this to fetch code 216 | contents: read 217 | runs-on: ${{ matrix.platform }} 218 | strategy: 219 | fail-fast: false 220 | matrix: 221 | # We test on all of the latest platforms available to use with GitHub- 222 | # hosted runners for public repositories. 223 | platform: 224 | - macos-latest 225 | - ubuntu-24.04-arm 226 | - ubuntu-latest 227 | - windows-latest 228 | python-version: 229 | - "3.10" 230 | - "3.11" 231 | # gophish-tools cannot support Python 3.12 until the gophish 232 | # Python package supports a newer version of the urllib3 233 | # library. The reason is identical to what is discussed here: 234 | # https://bugzilla.mozilla.org/show_bug.cgi?id=1857492 235 | # - "3.12" 236 | # - "3.13" 237 | # - "3.14" 238 | steps: 239 | - name: Apply standard cisagov job preamble 240 | uses: cisagov/action-job-preamble@v1 241 | with: 242 | # This functionality is poorly implemented and has been 243 | # causing problems due to the MITM implementation hogging or 244 | # leaking memory. As a result we disable it by default. If 245 | # you want to temporarily enable it, simply set 246 | # monitor_permissions equal to "true". 247 | # 248 | # TODO: Re-enable this functionality when practical. See 249 | # cisagov/skeleton-python-library#149 for more details. 250 | monitor_permissions: "false" 251 | # Use a variable to specify the permissions monitoring 252 | # configuration. By default this will yield the 253 | # configuration stored in the cisagov organization-level 254 | # variable, but if you want to use a different configuration 255 | # then simply: 256 | # 1. Create a repository-level variable with the name 257 | # ACTIONS_PERMISSIONS_CONFIG. 258 | # 2. Set this new variable's value to the configuration you 259 | # want to use for this repository. 260 | # 261 | # Note in particular that changing the permissions 262 | # monitoring configuration *does not* require you to modify 263 | # this workflow. 264 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 265 | - uses: actions/checkout@v5 266 | - id: setup-python 267 | uses: actions/setup-python@v6 268 | with: 269 | python-version: ${{ matrix.python-version }} 270 | - uses: actions/cache@v4 271 | env: 272 | BASE_CACHE_KEY: ${{ github.job }}-\ 273 | ${{ runner.os }}-${{ runner.arch }}-\ 274 | py${{ steps.setup-python.outputs.python-version }}- 275 | with: 276 | path: ${{ env.PIP_CACHE_DIR }} 277 | # We do not use '**/setup.py' in the cache key so only the 'setup.py' 278 | # file in the root of the repository is used. This is in case a Python 279 | # package were to have a 'setup.py' as part of its internal codebase. 280 | key: ${{ env.BASE_CACHE_KEY }}\ 281 | ${{ hashFiles('**/requirements-test.txt') }}-\ 282 | ${{ hashFiles('**/requirements.txt') }}-\ 283 | ${{ hashFiles('setup.py') }} 284 | restore-keys: | 285 | ${{ env.BASE_CACHE_KEY }} 286 | - name: Install dependencies 287 | run: | 288 | python -m pip install --upgrade pip 289 | pip install --upgrade --requirement requirements-test.txt 290 | - name: Run tests 291 | env: 292 | RELEASE_TAG: ${{ github.event.release.tag_name }} 293 | run: pytest 294 | - name: Upload coverage report 295 | uses: coverallsapp/github-action@v2 296 | with: 297 | flag-name: py${{ matrix.python-version }} - ${{ matrix.platform }} 298 | parallel: true 299 | if: success() 300 | - name: Setup tmate debug session 301 | uses: mxschmitt/action-tmate@v3 302 | if: env.RUN_TMATE 303 | coveralls-finish: 304 | permissions: 305 | # actions/checkout needs this to fetch code 306 | contents: read 307 | runs-on: ubuntu-latest 308 | needs: 309 | - diagnostics 310 | - test 311 | steps: 312 | - name: Apply standard cisagov job preamble 313 | uses: cisagov/action-job-preamble@v1 314 | with: 315 | # This functionality is poorly implemented and has been 316 | # causing problems due to the MITM implementation hogging or 317 | # leaking memory. As a result we disable it by default. If 318 | # you want to temporarily enable it, simply set 319 | # monitor_permissions equal to "true". 320 | # 321 | # TODO: Re-enable this functionality when practical. See 322 | # cisagov/skeleton-python-library#149 for more details. 323 | monitor_permissions: "false" 324 | # Use a variable to specify the permissions monitoring 325 | # configuration. By default this will yield the 326 | # configuration stored in the cisagov organization-level 327 | # variable, but if you want to use a different configuration 328 | # then simply: 329 | # 1. Create a repository-level variable with the name 330 | # ACTIONS_PERMISSIONS_CONFIG. 331 | # 2. Set this new variable's value to the configuration you 332 | # want to use for this repository. 333 | # 334 | # Note in particular that changing the permissions 335 | # monitoring configuration *does not* require you to modify 336 | # this workflow. 337 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 338 | - uses: actions/checkout@v5 339 | - name: Finished coveralls reports 340 | uses: coverallsapp/github-action@v2 341 | with: 342 | parallel-finished: true 343 | - name: Setup tmate debug session 344 | uses: mxschmitt/action-tmate@v3 345 | if: env.RUN_TMATE 346 | build: 347 | name: build wheel - py${{ matrix.python-version }} 348 | needs: 349 | - diagnostics 350 | - lint 351 | - test 352 | permissions: 353 | # actions/checkout needs this to fetch code 354 | contents: read 355 | runs-on: ubuntu-latest 356 | strategy: 357 | fail-fast: false 358 | matrix: 359 | python-version: 360 | - "3.10" 361 | - "3.11" 362 | # gophish-tools cannot support Python 3.12 until the gophish 363 | # Python package supports a newer version of the urllib3 364 | # library. The reason is identical to what is discussed here: 365 | # https://bugzilla.mozilla.org/show_bug.cgi?id=1857492 366 | # - "3.12" 367 | # - "3.13" 368 | # - "3.14" 369 | steps: 370 | - name: Apply standard cisagov job preamble 371 | uses: cisagov/action-job-preamble@v1 372 | with: 373 | # This functionality is poorly implemented and has been 374 | # causing problems due to the MITM implementation hogging or 375 | # leaking memory. As a result we disable it by default. If 376 | # you want to temporarily enable it, simply set 377 | # monitor_permissions equal to "true". 378 | # 379 | # TODO: Re-enable this functionality when practical. See 380 | # cisagov/skeleton-python-library#149 for more details. 381 | monitor_permissions: "false" 382 | # Use a variable to specify the permissions monitoring 383 | # configuration. By default this will yield the 384 | # configuration stored in the cisagov organization-level 385 | # variable, but if you want to use a different configuration 386 | # then simply: 387 | # 1. Create a repository-level variable with the name 388 | # ACTIONS_PERMISSIONS_CONFIG. 389 | # 2. Set this new variable's value to the configuration you 390 | # want to use for this repository. 391 | # 392 | # Note in particular that changing the permissions 393 | # monitoring configuration *does not* require you to modify 394 | # this workflow. 395 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 396 | - uses: actions/checkout@v5 397 | - id: setup-python 398 | uses: actions/setup-python@v6 399 | with: 400 | python-version: ${{ matrix.python-version }} 401 | - uses: actions/cache@v4 402 | env: 403 | BASE_CACHE_KEY: ${{ github.job }}-${{ runner.os }}-\ 404 | py${{ steps.setup-python.outputs.python-version }}- 405 | with: 406 | path: ${{ env.PIP_CACHE_DIR }} 407 | # We do not use '**/setup.py' in the cache key so only the 'setup.py' 408 | # file in the root of the repository is used. This is in case a Python 409 | # package were to have a 'setup.py' as part of its internal codebase. 410 | key: ${{ env.BASE_CACHE_KEY }}\ 411 | ${{ hashFiles('**/requirements.txt') }}-\ 412 | ${{ hashFiles('setup.py') }} 413 | restore-keys: | 414 | ${{ env.BASE_CACHE_KEY }} 415 | - name: Install build dependencies 416 | # We don't upgrade setuptools here because gophish is ancient 417 | # code that depends on an ancient version of setuptools. The 418 | # pip install command fails if we let the python -m pip 419 | # command pull in the latest setuptools; it is safer to let 420 | # the pip install command pull in setuptools itself. 421 | run: | 422 | python -m pip install --upgrade pip wheel 423 | python -m pip install --upgrade build 424 | - name: Build Python artifacts 425 | run: python -m build 426 | - name: Determine project version 427 | id: get_ver 428 | run: | 429 | echo "##[set-output name=version;]$(./bump-version show)" 430 | - name: Build Docker image 431 | run: | 432 | version=$(./bump-version show) 433 | docker build \ 434 | --tag "$IMAGE_NAME" \ 435 | --build-arg GIT_COMMIT=$(git log -1 --format=%H) \ 436 | --build-arg GIT_REMOTE=$(git remote get-url origin) \ 437 | --build-arg VERSION=${{ steps.get_ver.outputs.version }} \ 438 | . 439 | - name: Save Docker image artifact 440 | run: | 441 | version=$(./bump-version show) 442 | docker save $IMAGE_NAME:latest | gzip > dist/image.tar.gz 443 | - name: Upload artifacts 444 | uses: actions/upload-artifact@v5 445 | with: 446 | name: dist-${{ matrix.python-version }} 447 | path: dist 448 | - name: Setup tmate debug session 449 | uses: mxschmitt/action-tmate@v3 450 | if: env.RUN_TMATE 451 | test-build: 452 | name: test built wheel - py${{ matrix.python-version }} - ${{ matrix.platform }} 453 | needs: 454 | - diagnostics 455 | - build 456 | permissions: 457 | # actions/checkout needs this to fetch code 458 | contents: read 459 | runs-on: ${{ matrix.platform }} 460 | strategy: 461 | fail-fast: false 462 | matrix: 463 | # We test on all of the latest platforms available to use with GitHub- 464 | # hosted runners for public repositories. 465 | platform: 466 | - macos-latest 467 | - ubuntu-24.04-arm 468 | - ubuntu-latest 469 | - windows-latest 470 | python-version: 471 | - "3.10" 472 | - "3.11" 473 | # gophish-tools cannot support Python 3.12 until the gophish 474 | # Python package supports a newer version of the urllib3 475 | # library. The reason is identical to what is discussed here: 476 | # https://bugzilla.mozilla.org/show_bug.cgi?id=1857492 477 | # - "3.12" 478 | # - "3.13" 479 | # - "3.14" 480 | steps: 481 | - name: Apply standard cisagov job preamble 482 | uses: cisagov/action-job-preamble@v1 483 | with: 484 | # This functionality is poorly implemented and has been 485 | # causing problems due to the MITM implementation hogging or 486 | # leaking memory. As a result we disable it by default. If 487 | # you want to temporarily enable it, simply set 488 | # monitor_permissions equal to "true". 489 | # 490 | # TODO: Re-enable this functionality when practical. See 491 | # cisagov/skeleton-python-library#149 for more details. 492 | monitor_permissions: "false" 493 | # Use a variable to specify the permissions monitoring 494 | # configuration. By default this will yield the 495 | # configuration stored in the cisagov organization-level 496 | # variable, but if you want to use a different configuration 497 | # then simply: 498 | # 1. Create a repository-level variable with the name 499 | # ACTIONS_PERMISSIONS_CONFIG. 500 | # 2. Set this new variable's value to the configuration you 501 | # want to use for this repository. 502 | # 503 | # Note in particular that changing the permissions 504 | # monitoring configuration *does not* require you to modify 505 | # this workflow. 506 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 507 | - uses: actions/checkout@v5 508 | - id: setup-python 509 | uses: actions/setup-python@v6 510 | with: 511 | python-version: ${{ matrix.python-version }} 512 | - uses: actions/cache@v4 513 | env: 514 | BASE_CACHE_KEY: ${{ github.job }}-\ 515 | ${{ runner.os }}-${{ runner.arch }}-\ 516 | py${{ steps.setup-python.outputs.python-version }}- 517 | with: 518 | path: ${{ env.PIP_CACHE_DIR }} 519 | # We do not use '**/setup.py' in the cache key so only the 'setup.py' 520 | # file in the root of the repository is used. This is in case a Python 521 | # package were to have a 'setup.py' as part of its internal codebase. 522 | key: ${{ env.BASE_CACHE_KEY }}\ 523 | ${{ hashFiles('**/requirements.txt') }}-\ 524 | ${{ hashFiles('setup.py') }} 525 | restore-keys: | 526 | ${{ env.BASE_CACHE_KEY }} 527 | - name: Retrieve the built wheel 528 | uses: actions/download-artifact@v6 529 | with: 530 | name: dist-${{ matrix.python-version }} 531 | path: dist 532 | - id: find-wheel 533 | name: Get the name of the retrieved wheel (there should only be one) 534 | run: echo "wheel=$(ls dist/*whl)" >> $GITHUB_OUTPUT 535 | - name: Update core Python packages 536 | # We don't upgrade setuptools here because the versions of 537 | # gophish on PyPI are ancient and depend on an equally ancient 538 | # version of setuptools. The gophish package installation 539 | # will fail if we let pip pull in the latest setuptools; 540 | # instead, we let the gophish package installation itself 541 | # choose the version of setuptools to install. 542 | run: python -m pip install --upgrade pip wheel 543 | - name: Install the built wheel (along with testing dependencies) 544 | run: python -m pip install ${{ steps.find-wheel.outputs.wheel }}[test] 545 | - name: Run tests 546 | env: 547 | RELEASE_TAG: ${{ github.event.release.tag_name }} 548 | run: pytest 549 | - name: Setup tmate debug session 550 | uses: mxschmitt/action-tmate@v3 551 | if: env.RUN_TMATE 552 | --------------------------------------------------------------------------------