├── tests
├── __init__.py
├── run-unittests.sh
├── run-unittests.ps1
├── seq_unused.plantuml
├── seq.plantuml
├── output.md
├── input.json
├── dfd_level1.txt
├── test_sql_dump.py
├── dfd_level0.txt
├── dfd.dot
├── dfd_colormap.dot
├── test_private_func.py
└── output.json
├── requirements.txt
├── .github
├── CODEOWNERS
└── workflows
│ ├── codesee-arch-diagram.yml
│ ├── main.yml
│ ├── codeql-analysis.yml
│ └── scorecard.yml
├── SUMMARY.md
├── requirements-dev.txt
├── sample.png
├── MANIFEST.in
├── docs
├── sample.png
├── threats.jq
├── sample.tm
├── basic_template.md
├── Stylesheet.css
├── reveal.md
├── advanced_template.md
└── pytm
│ └── report_util.html
├── .gitbook
└── assets
│ ├── dfd.png
│ ├── seq.png
│ └── sample.png
├── pytm
├── images
│ ├── lambda.png
│ ├── datastore.png
│ ├── datastore_black.png
│ ├── datastore_gold.png
│ ├── datastore_darkgreen.png
│ └── datastore_firebrick3.png
├── TODO.txt
├── report_util.py
├── __init__.py
├── flows.py
├── json.py
└── template_engine.py
├── CONTRIBUTORS.md
├── SECURITY.md
├── devbox.json
├── pyproject.toml
├── ROADMAP.md
├── Dockerfile
├── setup.py
├── Makefile
├── .gitignore
├── LICENSE
├── CONTRIBUTING.md
├── tm.py
├── CHANGELOG.md
├── README.md
└── poetry.lock
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | pydal>=20200714.1
2 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @colesmj
2 | * @izar
3 |
4 |
5 |
--------------------------------------------------------------------------------
/SUMMARY.md:
--------------------------------------------------------------------------------
1 | # Table of contents
2 |
3 | * [pytm](README.md)
4 |
5 |
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | -r requirements.txt
2 | pdoc3
3 | black
4 |
--------------------------------------------------------------------------------
/sample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/sample.png
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include images/lambda.png
2 | include threatlib/threats.json
3 |
--------------------------------------------------------------------------------
/docs/sample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/docs/sample.png
--------------------------------------------------------------------------------
/.gitbook/assets/dfd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/.gitbook/assets/dfd.png
--------------------------------------------------------------------------------
/.gitbook/assets/seq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/.gitbook/assets/seq.png
--------------------------------------------------------------------------------
/pytm/images/lambda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/pytm/images/lambda.png
--------------------------------------------------------------------------------
/.gitbook/assets/sample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/.gitbook/assets/sample.png
--------------------------------------------------------------------------------
/pytm/images/datastore.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/pytm/images/datastore.png
--------------------------------------------------------------------------------
/pytm/images/datastore_black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/pytm/images/datastore_black.png
--------------------------------------------------------------------------------
/pytm/images/datastore_gold.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/pytm/images/datastore_gold.png
--------------------------------------------------------------------------------
/pytm/images/datastore_darkgreen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/pytm/images/datastore_darkgreen.png
--------------------------------------------------------------------------------
/pytm/images/datastore_firebrick3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OWASP/pytm/HEAD/pytm/images/datastore_firebrick3.png
--------------------------------------------------------------------------------
/CONTRIBUTORS.md:
--------------------------------------------------------------------------------
1 | # PyTM Main Contributors
2 |
3 | * Was, Jan
4 | * Avhad, Pooja
5 | * Coles, Matthew
6 | * Ozmore, Nick
7 | * Shambhuni, Rohit
8 | * Tarandach, Izar
9 |
10 | Join us!
11 |
--------------------------------------------------------------------------------
/tests/run-unittests.sh:
--------------------------------------------------------------------------------
1 | # Script to prepare the environment and run the test. Is invoked by run-unittests.ps1
2 |
3 | cd /pwd && \
4 | pip install -r requirements-dev.txt && \
5 | pip install -r requirements.txt && \
6 | python3 -m unittest -v tests/test_*.py
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | Reporting a Vulnerability
2 | Please report (suspected) security vulnerabilities as a project issue. You will receive a response from us within 48 hours. If the issue is confirmed, we will release a patch as soon as possible depending on complexity but historically within a few days.
3 |
4 |
5 |
--------------------------------------------------------------------------------
/pytm/TODO.txt:
--------------------------------------------------------------------------------
1 | TODO
2 | ====
3 |
4 | * mitigations - create a mitigation class where Mitigation matches Finding for Element
5 | * add threats and verify that eval of bool expression matches all cases. if not, create a variation where a function can be provided instead
6 | * documentation with Sphinx preparing for Read The Docs (?)
7 |
--------------------------------------------------------------------------------
/tests/run-unittests.ps1:
--------------------------------------------------------------------------------
1 | [CmdletBinding()]
2 | param (
3 | [ValidateSet('always', 'never')]
4 | [string] $pull = 'always'
5 | )
6 |
7 | # Run all tests using docker and a read-only file system so the docker image cannot impact the local files.
8 |
9 | $rootFolder = Split-Path $PSScriptRoot
10 | docker run --pull $pull --rm -v "${rootFolder}:/pwd:ro" python bash /pwd/tests/run-unittests.sh
--------------------------------------------------------------------------------
/devbox.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.14.2/.schema/devbox.schema.json",
3 | "packages": [
4 | "pandoc@latest",
5 | "graphviz@latest",
6 | "openjdk@latest",
7 | "python@3.11.13",
8 | "poetry@latest"
9 | ],
10 | "shell": {
11 | "init_hook": ["poetry install"],
12 | "scripts": {
13 | "test": "poetry run pytest"
14 | }
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/docs/threats.jq:
--------------------------------------------------------------------------------
1 | ## \(.SID) \(.description)
2 |
3 | \(.details)
4 |
5 |
6 | - Severity
7 | - \(.severity)
8 |
9 | - Prerequisites
10 | - \(.prerequisites)
11 |
12 | - Example
13 | - \(.example)
14 |
15 | - Mitigations
16 | - \(.mitigations)
17 |
18 | - References
19 | - \(.references)
20 |
21 | - Condition
22 | - \(.condition)
23 |
24 | \n\n
25 |
--------------------------------------------------------------------------------
/docs/sample.tm:
--------------------------------------------------------------------------------
1 | /* threats =
2 | Finding: Dataflow not authenticated on web and db with score 8.6
3 | */
4 | diagram {
5 | boundary Web_Side {
6 | title = "Web Side"
7 | function web_server {
8 | title = "web server"
9 | }
10 | }
11 | boundary DB_side {
12 | title = "DB side"
13 | database database_server {
14 | title = "database server"
15 | }
16 | }
17 | web_server -> database_server {
18 | operation = "web and db"
19 | data = "HTTP"
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "pytm"
3 | version = "1.3.1"
4 | description = "A Pythonic framework for threat modeling"
5 | authors = ["pytm Team"]
6 | license = "MIT License"
7 |
8 | [tool.poetry.dependencies]
9 | python = "^3.9 || ^3.10 || ^3.11"
10 | pydal = "~20200714.1"
11 | legacy-cgi = { version = "^2.0", markers = "python_version >= '3.13'" }
12 |
13 | [tool.poetry.group.dev.dependencies]
14 | pytest = "^8.3.5"
15 | black = "^25.9.0"
16 | pdoc3 = "^0.11.6"
17 |
18 | [build-system]
19 | requires = ["poetry-core>=1.0.0"]
20 | build-backend = "poetry.core.masonry.api"
21 |
--------------------------------------------------------------------------------
/tests/seq_unused.plantuml:
--------------------------------------------------------------------------------
1 | @startuml
2 | actor actor_User_579e9aae81 as "User"
3 | database datastore_SQLDatabase_d2006ce1bb as "SQL Database"
4 | entity server_WebServer_f2eb7a3ff7 as "Web Server"
5 |
6 | actor_User_579e9aae81 -> server_WebServer_f2eb7a3ff7: User enters comments (*)
7 | note left
8 | bbb
9 | end note
10 | server_WebServer_f2eb7a3ff7 -> datastore_SQLDatabase_d2006ce1bb: Insert query with comments
11 | note left
12 | ccc
13 | end note
14 | datastore_SQLDatabase_d2006ce1bb -> server_WebServer_f2eb7a3ff7: Retrieve comments
15 | server_WebServer_f2eb7a3ff7 -> actor_User_579e9aae81: Show comments (*)
16 | @enduml
17 |
--------------------------------------------------------------------------------
/.github/workflows/codesee-arch-diagram.yml:
--------------------------------------------------------------------------------
1 | # This workflow was added by CodeSee. Learn more at https://codesee.io/
2 | # This is v2.0 of this workflow file
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request_target:
8 | types: [opened, synchronize, reopened]
9 |
10 | name: CodeSee
11 |
12 | permissions: read-all
13 |
14 | jobs:
15 | codesee:
16 | runs-on: ubuntu-latest
17 | continue-on-error: true
18 | name: Analyze the repo with CodeSee
19 | steps:
20 | - uses: Codesee-io/codesee-action@v2
21 | with:
22 | codesee-token: ${{ secrets.CODESEE_ARCH_DIAG_API_TOKEN }}
23 |
--------------------------------------------------------------------------------
/tests/seq.plantuml:
--------------------------------------------------------------------------------
1 | @startuml
2 | actor actor_User_579e9aae81 as "User"
3 | entity server_WebServer_f2eb7a3ff7 as "Web Server"
4 | database datastore_SQLDatabase_d2006ce1bb as "SQL Database"
5 |
6 | actor_User_579e9aae81 -> server_WebServer_f2eb7a3ff7: (1) User enters comments (*)
7 | note left
8 | bbb
9 | end note
10 | server_WebServer_f2eb7a3ff7 -> datastore_SQLDatabase_d2006ce1bb: (2) Insert query with comments
11 | note left
12 | ccc
13 | end note
14 | datastore_SQLDatabase_d2006ce1bb -> server_WebServer_f2eb7a3ff7: (3) Retrieve comments
15 | server_WebServer_f2eb7a3ff7 -> actor_User_579e9aae81: (4) Show comments (*)
16 | @enduml
17 |
--------------------------------------------------------------------------------
/ROADMAP.md:
--------------------------------------------------------------------------------
1 | # To the end of 2021
2 |
3 | * add more threat rules
4 | * add debugging capability to threat rules
5 | * merge/close existing PRs
6 |
7 | # 1H2022
8 |
9 | * add more rules
10 | * move to a more complete rule evaluation engine
11 | * export/import other popular TM tools data formats
12 | * lower barrier of entry by adding a new, natural way of describing systems
13 |
14 | # 2H2022
15 |
16 | * total world domination via threat modeling
17 |
18 | # 2H2025
19 |
20 | * world domination was not reached - but we got a nice slice of it
21 | * integration of TM-BOM format
22 | * additional diagramming formats
23 | * incorporation of AI capabilities
24 | * prioritization of findings
25 | * creation of threat scenarios
26 |
27 |
28 |
--------------------------------------------------------------------------------
/tests/output.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## System Description
4 |
5 |
6 | aaa
7 |
8 |
9 |
10 |
11 |
12 |
13 | ## Dataflow Diagram - Level 0 DFD
14 |
15 | 
16 |
17 |
18 |
19 | ## Dataflows
20 |
21 |
22 | Name|From|To |Data|Protocol|Port
23 | |:----:|:----:|:---:|:----:|:--------:|:----:|
24 | |User enters comments (*)|User|Web Server|auth cookie||-1|
25 | |Insert query with comments|Web Server|SQL Database|[]||-1|
26 | |Call func|Web Server|Lambda func|[]||-1|
27 | |Retrieve comments|SQL Database|Web Server|[]||-1|
28 | |Show comments (*)|Web Server|User|[]||-1|
29 | |Query for tasks|Task queue worker|SQL Database|[]||-1|
30 |
31 |
32 | ## Data Dictionary
33 |
34 |
35 | Name|Description|Classification
36 | |:----:|:--------:|:----:|
37 | |auth cookie|auth cookie description|PUBLIC|
38 |
39 |
40 |
41 |
42 | ## Potential Threats
43 |
44 |
45 |
46 |
47 | ||
48 |
--------------------------------------------------------------------------------
/tests/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "my test tm",
3 | "description": "aaa",
4 | "isOrdered": true,
5 | "onDuplicates": "IGNORE",
6 | "boundaries": [
7 | {
8 | "name": "Internet"
9 | },
10 | {
11 | "name": "Server/DB"
12 | }
13 | ],
14 | "elements": [
15 | {
16 | "__class__": "Actor",
17 | "name": "User",
18 | "inBoundary": "Internet"
19 | },
20 | {
21 | "__class__": "Server",
22 | "name": "Web Server"
23 | },
24 | {
25 | "__class__": "Datastore",
26 | "name": "SQL Database",
27 | "inBoundary": "Server/DB"
28 | }
29 | ],
30 | "flows": [
31 | {
32 | "name": "Request",
33 | "source": "User",
34 | "sink": "Web Server",
35 | "note": "bbb"
36 | },
37 | {
38 | "name": "Insert",
39 | "source": "Web Server",
40 | "sink": "SQL Database",
41 | "note": "ccc"
42 | },
43 | {
44 | "name": "Select",
45 | "source": "SQL Database",
46 | "sink": "Web Server"
47 | },
48 | {
49 | "name": "Response",
50 | "source": "Web Server",
51 | "sink": "User"
52 | }
53 | ]
54 | }
55 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 |
2 | FROM python:3.12-rc-alpine
3 |
4 |
5 | WORKDIR /usr/src/app
6 | ENTRYPOINT ["sh"]
7 |
8 | ENV PLANTUML_VER 1.2021.7
9 | ENV PLANTUML_PATH /usr/local/lib/plantuml.jar
10 | ENV PANDOC_VER 2.14.0.1
11 |
12 | RUN apk add --no-cache graphviz openjdk11-jre fontconfig make curl ttf-liberation ttf-linux-libertine ttf-dejavu \
13 | && apk add --no-cache --virtual .build-deps gcc musl-dev \
14 | && rm -rf /var/cache/apk/* \
15 | && curl -LO https://master.dl.sourceforge.net/project/plantuml/$PLANTUML_VER/plantuml.$PLANTUML_VER.jar \
16 | && mv plantuml.$PLANTUML_VER.jar $PLANTUML_PATH \
17 | && curl -LO https://github.com/jgm/pandoc/releases/download/$PANDOC_VER/pandoc-$PANDOC_VER-linux-amd64.tar.gz \
18 | && tar xvzf pandoc-$PANDOC_VER-linux-amd64.tar.gz --strip-components 1 -C /usr/local/
19 |
20 | ENV _JAVA_OPTIONS -Duser.home=/tmp -Dawt.useSystemAAFontSettings=gasp
21 | RUN printf '@startuml\n@enduml' | java -Djava.awt.headless=true -jar $PLANTUML_PATH -tpng -pipe >/dev/null
22 |
23 | COPY requirements.txt requirements-dev.txt ./
24 | RUN pip install --no-cache-dir -r requirements-dev.txt \
25 | && apk del .build-deps
26 |
27 | COPY pytm ./pytm
28 | COPY docs ./docs
29 | COPY *.py Makefile ./
30 |
--------------------------------------------------------------------------------
/tests/dfd_level1.txt:
--------------------------------------------------------------------------------
1 | digraph tm {
2 | graph [
3 | fontname = Arial;
4 | fontsize = 14;
5 | ]
6 | node [
7 | fontname = Arial;
8 | fontsize = 14;
9 | rankdir = lr;
10 | ]
11 | edge [
12 | shape = none;
13 | arrowtail = onormal;
14 | fontname = Arial;
15 | fontsize = 12;
16 | ]
17 | labelloc = "t";
18 | fontsize = 20;
19 | nodesep = 1;
20 |
21 | subgraph cluster_boundary_Internet_acf3059e70 {
22 | graph [
23 | fontsize = 10;
24 | fontcolor = black;
25 | style = dashed;
26 | color = firebrick2;
27 | label = <Internet>;
28 | ]
29 |
30 | actor_User_579e9aae81 [
31 | shape = square;
32 | color = black;
33 | fontcolor = black;
34 | label = "User";
35 | margin = 0.02;
36 | ]
37 |
38 | }
39 |
40 | subgraph cluster_boundary_ServerDB_88f2d9c06f {
41 | graph [
42 | fontsize = 10;
43 | fontcolor = black;
44 | style = dashed;
45 | color = firebrick2;
46 | label = <Server/DB>;
47 | ]
48 |
49 |
50 | }
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | # This is a basic workflow to help you get started with Actions
2 |
3 | name: build+test
4 |
5 | # Controls when the action will run. Triggers the workflow on push or pull request
6 | # events but only for the master branch
7 | on:
8 | push:
9 | branches: [ master ]
10 | pull_request:
11 | branches: [ master ]
12 |
13 | permissions:
14 | contents: read
15 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel
16 | jobs:
17 | # This workflow contains a single job called "build"
18 | build:
19 | # The type of runner that the job will run on
20 | runs-on: ubuntu-latest
21 |
22 | strategy:
23 | matrix:
24 | python-version: ["3.9", "3.10", "3.11"]
25 |
26 | # Steps represent a sequence of tasks that will be executed as part of the job
27 | steps:
28 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
29 | - uses: actions/checkout@v4
30 | - name: Set up Python ${{ matrix.python-version }}
31 | uses: actions/setup-python@v6
32 | with:
33 | python-version: ${{ matrix.python-version }}
34 | - name: Install Poetry
35 | run: pip install poetry
36 | - name: Install dependencies
37 | run: poetry install --with dev
38 | - name: Run tests
39 | run: poetry run pytest
40 |
--------------------------------------------------------------------------------
/docs/basic_template.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## System Description
4 |
5 |
6 | {tm.description}
7 |
8 |
9 |
10 | {tm.assumptions:if:
11 |
12 | |Assumptions|
13 | |-----------|
14 | {tm.assumptions:repeat:|{{item}}|
15 | }
16 |
17 |
18 |
19 |
20 | }
21 |
22 |
23 | ## Dataflow Diagram - Level 0 DFD
24 |
25 | 
26 |
27 |
28 |
29 | ## Dataflows
30 |
31 |
32 | Name|From|To |Data|Protocol|Port
33 | |:----:|:----:|:---:|:----:|:--------:|:----:|
34 | {dataflows:repeat:|{{item.name}}|{{item.source.name}}|{{item.sink.name}}|{{item.data}}|{{item.protocol}}|{{item.dstPort}}|
35 | }
36 |
37 | ## Data Dictionary
38 |
39 |
40 | Name|Description|Classification
41 | |:----:|:--------:|:----:|
42 | {data:repeat:|{{item.name}}|{{item.description}}|{{item.classification.name}}|
43 | }
44 |
45 |
46 |
47 | ## Potential Threats
48 |
49 |
50 |
51 |
52 | |{findings:repeat:
53 |
54 | {{item.threat_id}} -- {{item.description}}
55 | Targeted Element
56 | {{item.target}}
57 | Severity
58 | {{item.severity}}
59 | Example Instances
60 | {{item.example}}
61 | Mitigations
62 | {{item.mitigations}}
63 | References
64 | {{item.references}}
65 |
66 |
67 |
68 |
69 | }|
70 |
--------------------------------------------------------------------------------
/pytm/report_util.py:
--------------------------------------------------------------------------------
1 |
2 | class ReportUtils:
3 | @staticmethod
4 | def getParentName(element):
5 | from pytm import Boundary
6 | if (isinstance(element, Boundary)):
7 | parent = element.inBoundary
8 | if (parent is not None):
9 | return parent.name
10 | else:
11 | return str("")
12 | else:
13 | return "ERROR: getParentName method is not valid for " + element.__class__.__name__
14 |
15 |
16 | @staticmethod
17 | def getNamesOfParents(element):
18 | from pytm import Boundary
19 | if (isinstance(element, Boundary)):
20 | parents = [p.name for p in element.parents()]
21 | return parents
22 | else:
23 | return "ERROR: getNamesOfParents method is not valid for " + element.__class__.__name__
24 |
25 | @staticmethod
26 | def getFindingCount(element):
27 | from pytm import Element
28 | if (isinstance(element, Element)):
29 | return str(len(list(element.findings)))
30 | else:
31 | return "ERROR: getFindingCount method is not valid for " + element.__class__.__name__
32 |
33 | @staticmethod
34 | def getElementType(element):
35 | from pytm import Element
36 | if (isinstance(element, Element)):
37 | return str(element.__class__.__name__)
38 | else:
39 | return "ERROR: getElementType method is not valid for " + element.__class__.__name__
40 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 |
3 | with open("README.md", "r", encoding="utf-8") as f:
4 | long_description = f.read()
5 |
6 | setuptools.setup(
7 | name="pytm",
8 | version="1.3.1",
9 | packages=["pytm"],
10 | description="A Python-based framework for threat modeling.",
11 | long_description=long_description,
12 | long_description_content_type="text/markdown",
13 | author="pytm team",
14 | author_email="please_use_github_issues@nowhere.com",
15 | license="MIT License",
16 | url="https://github.com/izar/pytm",
17 | classifiers=[
18 | "Programming Language :: Python :: 3",
19 | "License :: OSI Approved :: MIT License",
20 | "Operating System :: OS Independent",
21 | "Development Status :: 5 - Production/Stable",
22 | "Environment :: Console",
23 | "Intended Audience :: Developers",
24 | "Topic :: Security",
25 | "Natural Language :: English",
26 | ],
27 | python_requires=">=3",
28 | install_requires=["pydal>=20200714.1"],
29 | package_data={
30 | "pytm": [
31 | "images/datastore.png",
32 | "images/lambda.png",
33 | "images/datastore_black.png",
34 | "images/datastore_darkgreen.png",
35 | "images/datastore_firebrick3.png",
36 | "images/datastore_gold.png",
37 | "threatlib/threats.json",
38 | ],
39 | },
40 | exclude_package_data={"": ["report.html"]},
41 | include_package_data=True,
42 | )
43 |
--------------------------------------------------------------------------------
/pytm/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "Action",
3 | "Actor",
4 | "Assumption",
5 | "Boundary",
6 | "Classification",
7 | "TLSVersion",
8 | "Data",
9 | "Dataflow",
10 | "Datastore",
11 | "DatastoreType",
12 | "Element",
13 | "ExternalEntity",
14 | "Finding",
15 | "Lambda",
16 | "Lifetime",
17 | "load",
18 | "loads",
19 | "Process",
20 | "Server",
21 | "SetOfProcesses",
22 | "Threat",
23 | "TM",
24 | ]
25 |
26 | import sys
27 |
28 | from .json import load, loads
29 | from .pytm import (
30 | TM,
31 | Action,
32 | Actor,
33 | Assumption,
34 | Boundary,
35 | Classification,
36 | Data,
37 | Dataflow,
38 | Datastore,
39 | DatastoreType,
40 | Element,
41 | ExternalEntity,
42 | Finding,
43 | Lambda,
44 | Lifetime,
45 | Process,
46 | Server,
47 | SetOfProcesses,
48 | Threat,
49 | TLSVersion,
50 | var,
51 | )
52 |
53 |
54 | def pdoc_overrides():
55 | result = {"pytm": False, "json": False, "template_engine": False}
56 | mod = sys.modules[__name__]
57 | for name, klass in mod.__dict__.items():
58 | if not isinstance(klass, type):
59 | continue
60 | for i in dir(klass):
61 | if i in ("check", "dfd", "seq"):
62 | result[f"{name}.{i}"] = False
63 | attr = getattr(klass, i, {})
64 | if isinstance(attr, var) and attr.doc != "":
65 | result[f"{name}.{i}"] = attr.doc
66 | return result
67 |
68 |
69 | __pdoc__ = pdoc_overrides()
70 |
--------------------------------------------------------------------------------
/docs/Stylesheet.css:
--------------------------------------------------------------------------------
1 | * { margin: 0; padding: 0; }
2 | html {
3 | padding: 0;
4 | font: normal 15px/1.25 Source Sans Pro, sans-serif;
5 | color: #000;
6 | hyphens: auto;
7 | word-wrap: break-word;
8 | background: #fff;
9 | margin-left: 1rem;
10 | }
11 |
12 | body > :first-child {
13 | margin-top: 1;
14 | }
15 |
16 | h1,h2,h3,h4,h5,h6 {
17 | line-height: 1;
18 | margin: 1rem;
19 | margin-top: 1.5rem;
20 | text-rendering: optimizeLegibility;
21 | }
22 |
23 | h1 { font-size: 2.15rem; }
24 | h2 { font-size: 2rem; }
25 | h3 { font-size: 1.65rem; }
26 | h4 { font-size: 1.25rem; }
27 | h5 { font-size: 1.1rem; }
28 | h6 { font-size: 1rem; }
29 |
30 | h2 em, h3 em{
31 | color:grey;
32 | }
33 |
34 | /* @end */
35 |
36 | p {
37 | margin-top: .75rem;
38 | margin-left: 1rem;
39 | }
40 |
41 | hr {
42 | margin: .75rem 0;
43 | opacity: .5;
44 | }
45 | table {
46 | margin: .75rem 0 0 1rem;
47 | padding: 0;
48 | width: 50%;
49 | text-align: left;
50 | white-space: nowrap;
51 | border-collapse: collapse;
52 | }
53 | table tr {
54 | margin: 0;
55 | padding: 0;
56 | width: auto;
57 | text-align: left;
58 | border-top: 1px solid #ccc;
59 | background-color: #fff;
60 | }
61 | table tr:nth-child(2n) {
62 | background-color: #f8f8f8;
63 | }
64 |
65 | table tr th {
66 | margin: 0;
67 | padding: .35em .75em;
68 | font-weight: bold;
69 | text-align: left;
70 | border: 1px solid #ccc;
71 | }
72 |
73 | table tr td {
74 | margin: 0;
75 | padding: .35em .75em;
76 | text-align: left;
77 | border: 1px solid #ccc;
78 | }
79 |
80 | details {
81 | margin-left: 2rem
82 | }
83 | /* @end */
84 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
2 | CWD := $(patsubst %/,%,$(dir $(MKFILE_PATH)))
3 | DOCKER_IMG := pytm
4 |
5 | ifeq ($(USE_DOCKER),true)
6 | SHELL=docker
7 | .SHELLFLAGS=run -u $$(id -u) -v $(CWD):/usr/src/app --rm $(DOCKER_IMG):latest -c
8 | endif
9 | ifndef PLANTUML_PATH
10 | export PLANTUML_PATH = ./plantuml.jar
11 | endif
12 |
13 | MODEL?=tm
14 |
15 | libs := $(wildcard pytm/*.py) $(wildcard pytm/threatlib/*.json) $(wildcard pytm/images/*)
16 |
17 | all: clean docs/pytm/index.html $(MODEL)
18 |
19 | safe_filename:
20 | ifeq ($(suffix $(MODEL)), .py)
21 | @echo "I think you mean MODEL=$(patsubst .py,,$(MODEL))"
22 | exit 1
23 | endif
24 |
25 |
26 | docs/pytm/index.html: $(wildcard pytm/*.py)
27 | PYTHONPATH=. pdoc --html --force --output-dir docs pytm
28 |
29 | docs/threats.md: $(wildcard pytm/threatlib/*.json)
30 | printf "# Threat database\n" > $@
31 | jq -r ".[] | \"$$(cat docs/threats.jq)\"" $< >> $@
32 |
33 | clean: safe_filename
34 | rm -rf dist/* build/* $(MODEL)
35 |
36 | $(MODEL): safe_filename
37 | mkdir -p $(MODEL)
38 | $(MAKE) MODEL=$(MODEL) report
39 |
40 | $(MODEL)/dfd.png: $(MODEL).py $(libs)
41 | ./$< --dfd | dot -Tpng -o $@
42 |
43 | $(MODEL)/seq.png: $(MODEL).py $(libs)
44 | ./$< --seq | java -Djava.awt.headless=true -jar $$PLANTUML_PATH -tpng -pipe > $@
45 |
46 | $(MODEL)/report.html: $(MODEL).py $(libs) docs/basic_template.md docs/Stylesheet.css
47 | ./$< --report docs/basic_template.md | pandoc -f markdown -t html > $@
48 |
49 | dfd: $(MODEL)/dfd.png
50 |
51 | seq: $(MODEL)/seq.png
52 |
53 | report: $(MODEL)/report.html seq dfd
54 |
55 | .PHONY: test
56 | test:
57 | @python3 -m unittest
58 |
59 | .PHONY: describe
60 | describe:
61 | ./tm.py --describe "TM Element Boundary ExternalEntity Actor Lambda Server Process SetOfProcesses Datastore Dataflow"
62 |
63 | .PHONY: image
64 | image:
65 | docker build -t $(DOCKER_IMG) .
66 |
67 | .PHONY: docs
68 | docs: docs/pytm/index.html docs/threats.md
69 |
70 | .PHONY: fmt
71 | fmt:
72 | black $(wildcard pytm/*.py) $(wildcard tests/*.py) $(wildcard *.py)
73 |
--------------------------------------------------------------------------------
/pytm/flows.py:
--------------------------------------------------------------------------------
1 | from pytm import Dataflow as DF
2 | from pytm import Element
3 |
4 |
5 | def req_reply(src: Element, dest: Element, req_name: str, reply_name=None) -> (DF, DF):
6 | '''
7 | This function creates two datflows where one dataflow is a request
8 | and the second dataflow is the corresponding reply to the newly created request.
9 |
10 | Args:
11 | req_name: name of the request dataflow
12 | reply_name: name of the reply datadlow
13 | if not set the name will be "Reply to "
14 |
15 | Usage:
16 | query_titles, reply_titles = req_reply(api, database, 'Query book titles')
17 |
18 | view_authors, reply_authors = req_reply(api, database,
19 | req_name='Query authors view',
20 | reply_name='Authors, with top titles')
21 |
22 | Returns:
23 | a tuple of two dataflows, where the first is the request and the second is the reply.
24 |
25 | '''
26 | if not reply_name:
27 | reply_name = f'Reply to {req_name}'
28 | req = DF(src, dest, req_name)
29 | reply = DF(dest, src, name=reply_name)
30 | reply.responseTo = req
31 | return req, reply
32 |
33 |
34 | def reply(req: DF, **kwargs) -> DF:
35 | '''
36 | This function takes a dataflow as an argument and returns a new dataflow, which is a response to the given dataflow.
37 |
38 | Args:
39 | req: a dataflow for which a reply should be generated
40 | kwargs: key word arguments for the newly created reply
41 | Usage:
42 | client_query = Dataflow(client, api, "Get authors page")
43 | api_query = Dataflow(api, database, 'Get authors')
44 | api_reply = reply(api_query)
45 | client_reply = reply(client_query)
46 | Returns:
47 | a Dataflow which is a reply to the given datadlow req
48 | '''
49 | if 'name' not in kwargs:
50 | name = f'Reply to {req.name}'
51 | else:
52 | name = kwargs['name']
53 | del kwargs['name']
54 | reply = DF(req.sink, req.source, name, **kwargs)
55 | reply.responseTo = req
56 | return req, reply
57 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .pyre/
2 | bin/
3 | include/
4 | pip-selfcheck.json
5 |
6 | # Byte-compiled / optimized / DLL files
7 | __pycache__/
8 | *.py[cod]
9 | *$py.class
10 |
11 | # C extensions
12 | *.so
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 | MANIFEST
32 |
33 | # PyInstaller
34 | # Usually these files are written by a python script from a template
35 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
36 | *.manifest
37 | *.spec
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | .hypothesis/
53 | .pytest_cache/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # pyenv
81 | .python-version
82 |
83 | # celery beat schedule file
84 | celerybeat-schedule
85 |
86 | # SageMath parsed files
87 | *.sage.py
88 |
89 | # Environments
90 | .env
91 | .venv
92 | env/
93 | venv/
94 | ENV/
95 | env.bak/
96 | venv.bak/
97 |
98 | # Spyder project settings
99 | .spyderproject
100 | .spyproject
101 |
102 | # Rope project settings
103 | .ropeproject
104 |
105 | # mkdocs documentation
106 | /site
107 |
108 | # mypy
109 | .mypy_cache/
110 | .vscode/settings.json
111 |
112 | pytm.code-workspace
113 | tm.df
114 | tm.png
115 | pytm-workspace.code-workspace
116 | .gitignore
117 | tm_example.dot
118 | .work/
119 |
120 | #IntelliJ/PyCharm
121 | .idea
122 | *.iml
123 |
124 | #Others
125 | plantuml.jar
126 | tm/
127 | /sqldump
128 | /tests/.config.pytm
129 |
130 | # devbox
131 | devbox.lock
132 |
133 | # zed
134 | .zed
135 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Main Project Contributors
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
23 | ========================================================
24 |
25 | PyTM uses material from CAPEC on its threat catalog. CAPEC has its own license, reproduced below:
26 |
27 | LICENSE
28 | The MITRE Corporation (MITRE) hereby grants you a non-exclusive, royalty-free license to use Common Attack Pattern Enumeration and Classification (CAPEC™) for research, development, and commercial purposes. Any copy you make for such purposes is authorized provided that you reproduce MITRE’s copyright designation and this license in any such copy.
29 |
30 | DISCLAIMERS
31 | ALL DOCUMENTS AND THE INFORMATION CONTAINED THEREIN ARE PROVIDED ON AN "AS IS" BASIS AND THE CONTRIBUTOR, THE ORGANIZATION HE/SHE REPRESENTS OR IS SPONSORED BY (IF ANY), THE MITRE CORPORATION, ITS BOARD OF TRUSTEES, OFFICERS, AGENTS, AND EMPLOYEES, DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION THEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
32 | =========================================================
33 |
34 |
--------------------------------------------------------------------------------
/tests/test_sql_dump.py:
--------------------------------------------------------------------------------
1 | import random
2 | import sqlite3
3 | from pathlib import Path
4 |
5 | import pytest
6 |
7 | from pytm import Boundary, Server, Threat, TM
8 |
9 |
10 | @pytest.fixture
11 | def sample_tm():
12 | TM.reset()
13 | random.seed(0)
14 | tm = TM("sql dump tm", description="desc")
15 |
16 | internet = Boundary("Internet")
17 | server_db = Boundary("Server/DB", inBoundary=internet)
18 | Server("Web Server", inBoundary=server_db)
19 |
20 | TM._threats = [
21 | Threat(
22 | SID="SRV001",
23 | description="Server threat",
24 | severity="High",
25 | target="Server",
26 | )
27 | ]
28 |
29 | tm.resolve()
30 | assert tm.findings, "Expected at least one finding for sqlDump tests"
31 | return tm
32 |
33 |
34 | def _open_connection(tmp_path: Path) -> sqlite3.Connection:
35 | db_path = tmp_path / "sqldump" / "test.db"
36 | return sqlite3.connect(db_path)
37 |
38 |
39 | def test_sql_dump_creates_serialized_columns(sample_tm, tmp_path, monkeypatch):
40 | monkeypatch.chdir(tmp_path)
41 |
42 | sample_tm.sqlDump("test.db")
43 |
44 | with _open_connection(tmp_path) as conn:
45 | column_names = {
46 | column_info[1].lower()
47 | for column_info in conn.execute("PRAGMA table_info(Boundary)")
48 | }
49 |
50 | assert {"name", "inscope", "inboundary"}.issubset(column_names)
51 |
52 |
53 | def test_sql_dump_persists_element_and_finding_data(sample_tm, tmp_path, monkeypatch):
54 | monkeypatch.chdir(tmp_path)
55 |
56 | sample_tm.sqlDump("test.db")
57 |
58 | with _open_connection(tmp_path) as conn:
59 | boundary_rows = conn.execute(
60 | "SELECT name, inBoundary FROM Boundary ORDER BY id"
61 | ).fetchall()
62 | server_rows = conn.execute(
63 | "SELECT name, inBoundary FROM Server ORDER BY id"
64 | ).fetchall()
65 | finding_rows = conn.execute(
66 | "SELECT threat_id FROM Finding ORDER BY id"
67 | ).fetchall()
68 |
69 | assert ("Internet", None) in boundary_rows
70 | assert ("Server/DB", "Internet") in boundary_rows
71 | assert ("Web Server", "Server/DB") in server_rows
72 | assert [row[0] for row in finding_rows] == ["SRV001"]
--------------------------------------------------------------------------------
/tests/dfd_level0.txt:
--------------------------------------------------------------------------------
1 | digraph tm {
2 | graph [
3 | fontname = Arial;
4 | fontsize = 14;
5 | ]
6 | node [
7 | fontname = Arial;
8 | fontsize = 14;
9 | rankdir = lr;
10 | ]
11 | edge [
12 | shape = none;
13 | arrowtail = onormal;
14 | fontname = Arial;
15 | fontsize = 12;
16 | ]
17 | labelloc = "t";
18 | fontsize = 20;
19 | nodesep = 1;
20 |
21 | subgraph cluster_boundary_Internet_acf3059e70 {
22 | graph [
23 | fontsize = 10;
24 | fontcolor = black;
25 | style = dashed;
26 | color = firebrick2;
27 | label = <Internet>;
28 | ]
29 |
30 | actor_User_579e9aae81 [
31 | shape = square;
32 | color = black;
33 | fontcolor = black;
34 | label = "User";
35 | margin = 0.02;
36 | ]
37 |
38 | }
39 |
40 | subgraph cluster_boundary_ServerDB_88f2d9c06f {
41 | graph [
42 | fontsize = 10;
43 | fontcolor = black;
44 | style = dashed;
45 | color = firebrick2;
46 | label = <Server/DB>;
47 | ]
48 |
49 | datastore_SQLDatabase_d2006ce1bb [
50 | shape = none;
51 | fixedsize = shape;
52 | image = "INSTALL_PATH/pytm/images/datastore_black.png";
53 | imagescale = true;
54 | color = black;
55 | fontcolor = black;
56 | xlabel = "SQL Database";
57 | label = "";
58 | ]
59 |
60 | }
61 |
62 | server_WebServer_f2eb7a3ff7 [
63 | shape = circle;
64 | color = black;
65 | fontcolor = black;
66 | label = "Web Server";
67 | margin = 0.02;
68 | ]
69 |
70 | actor_User_579e9aae81 -> server_WebServer_f2eb7a3ff7 [
71 | color = black;
72 | fontcolor = black;
73 | dir = forward;
74 | label = "User enters\ncomments (*)";
75 | ]
76 |
77 | server_WebServer_f2eb7a3ff7 -> datastore_SQLDatabase_d2006ce1bb [
78 | color = black;
79 | fontcolor = black;
80 | dir = forward;
81 | label = "Insert query with\ncomments";
82 | ]
83 |
84 | datastore_SQLDatabase_d2006ce1bb -> server_WebServer_f2eb7a3ff7 [
85 | color = black;
86 | fontcolor = black;
87 | dir = forward;
88 | label = "Retrieve comments";
89 | ]
90 |
91 | server_WebServer_f2eb7a3ff7 -> actor_User_579e9aae81 [
92 | color = black;
93 | fontcolor = black;
94 | dir = forward;
95 | label = "Show comments (*)";
96 | ]
97 |
98 | }
99 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | name: "CodeQL"
7 |
8 | on:
9 | push:
10 | branches: [master]
11 | pull_request:
12 | # The branches below must be a subset of the branches above
13 | branches: [master]
14 | schedule:
15 | - cron: '0 15 * * 3'
16 |
17 | permissions: # added using https://github.com/step-security/secure-workflows
18 | contents: read
19 |
20 | jobs:
21 | analyze:
22 | permissions:
23 | actions: read # for github/codeql-action/init to get workflow details
24 | contents: read # for actions/checkout to fetch code
25 | security-events: write # for github/codeql-action/autobuild to send a status report
26 | name: Analyze
27 | runs-on: ubuntu-latest
28 |
29 | strategy:
30 | fail-fast: false
31 | matrix:
32 | # Override automatic language detection by changing the below list
33 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python']
34 | language: ['python']
35 | # Learn more...
36 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection
37 |
38 | steps:
39 | - name: Checkout repository
40 | uses: actions/checkout@v2
41 | with:
42 | # We must fetch at least the immediate parents so that if this is
43 | # a pull request then we can checkout the head.
44 | fetch-depth: 2
45 |
46 | # Initializes the CodeQL tools for scanning.
47 | - name: Initialize CodeQL
48 | uses: github/codeql-action/init@v1
49 | with:
50 | languages: ${{ matrix.language }}
51 | # If you wish to specify custom queries, you can do so here or in a config file.
52 | # By default, queries listed here will override any specified in a config file.
53 | # Prefix the list here with "+" to use these queries and those in the config file.
54 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
55 |
56 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
57 | # If this step fails, then you should remove it and run the build manually (see below)
58 | - name: Autobuild
59 | uses: github/codeql-action/autobuild@v1
60 |
61 | # ℹ️ Command-line programs to run using the OS shell.
62 | # 📚 https://git.io/JvXDl
63 |
64 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
65 | # and modify them (or add more) to build your code if your project
66 | # uses a compiled language
67 |
68 | #- run: |
69 | # make bootstrap
70 | # make release
71 |
72 | - name: Perform CodeQL Analysis
73 | uses: github/codeql-action/analyze@v1
74 |
75 |
76 |
--------------------------------------------------------------------------------
/.github/workflows/scorecard.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub. They are provided
2 | # by a third-party and are governed by separate terms of service, privacy
3 | # policy, and support documentation.
4 |
5 | name: Scorecard supply-chain security
6 | on:
7 | # For Branch-Protection check. Only the default branch is supported. See
8 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
9 | branch_protection_rule:
10 | # To guarantee Maintained check is occasionally updated. See
11 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
12 | schedule:
13 | - cron: '18 20 * * 3'
14 | push:
15 | branches: [ "master" ]
16 |
17 | # Declare default permissions as read only.
18 | permissions: read-all
19 |
20 | jobs:
21 | analysis:
22 | name: Scorecard analysis
23 | runs-on: ubuntu-latest
24 | permissions:
25 | # Needed to upload the results to code-scanning dashboard.
26 | security-events: write
27 | # Needed to publish results and get a badge (see publish_results below).
28 | id-token: write
29 | # Uncomment the permissions below if installing in a private repository.
30 | # contents: read
31 | # actions: read
32 |
33 | steps:
34 | - name: "Checkout code"
35 | uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0
36 | with:
37 | persist-credentials: false
38 |
39 | - name: "Run analysis"
40 | uses: ossf/scorecard-action@v2.3.1
41 | with:
42 | results_file: results.sarif
43 | results_format: sarif
44 | # (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
45 | # - you want to enable the Branch-Protection check on a *public* repository, or
46 | # - you are installing Scorecard on a *private* repository
47 | # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
48 | # repo_token: ${{ secrets.SCORECARD_TOKEN }}
49 |
50 | # Public repositories:
51 | # - Publish results to OpenSSF REST API for easy access by consumers
52 | # - Allows the repository to include the Scorecard badge.
53 | # - See https://github.com/ossf/scorecard-action#publishing-results.
54 | # For private repositories:
55 | # - `publish_results` will always be set to `false`, regardless
56 | # of the value entered here.
57 | publish_results: true
58 |
59 | # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
60 | # format to the repository Actions tab.
61 | - name: "Upload artifact"
62 | uses: actions/upload-artifact@v4
63 | with:
64 | name: SARIF file
65 | path: results.sarif
66 | retention-days: 5
67 |
68 | # Upload the results to GitHub's code scanning dashboard.
69 | - name: "Upload to code-scanning"
70 | uses: github/codeql-action/upload-sarif@807578363a7869ca324a79039e6db9c843e0e100 # v2.1.27
71 | with:
72 | sarif_file: results.sarif
73 |
--------------------------------------------------------------------------------
/pytm/json.py:
--------------------------------------------------------------------------------
1 | import json
2 | import sys
3 |
4 | from .pytm import (
5 | TM,
6 | Boundary,
7 | Element,
8 | Dataflow,
9 | Server,
10 | ExternalEntity,
11 | Datastore,
12 | Actor,
13 | Process,
14 | SetOfProcesses,
15 | Action,
16 | Lambda,
17 | Controls,
18 | )
19 |
20 |
21 | def loads(s):
22 | """Load a TM object from a JSON string *s*."""
23 | result = json.loads(s, object_hook=decode)
24 | if not isinstance(result, TM):
25 | raise ValueError("Failed to decode JSON input as TM")
26 | return result
27 |
28 |
29 | def load(fp):
30 | """Load a TM object from an open file containing JSON."""
31 | result = json.load(fp, object_hook=decode)
32 | if not isinstance(result, TM):
33 | raise ValueError("Failed to decode JSON input as TM")
34 | return result
35 |
36 |
37 | def decode(data):
38 | if "elements" not in data and "flows" not in data and "boundaries" not in data:
39 | return data
40 |
41 | boundaries = decode_boundaries(data.pop("boundaries", []))
42 | elements = decode_elements(data.pop("elements", []), boundaries)
43 | decode_flows(data.pop("flows", []), elements)
44 |
45 | if "name" not in data:
46 | raise ValueError("name property missing for threat model")
47 | if "onDuplicates" in data:
48 | data["onDuplicates"] = Action(data["onDuplicates"])
49 | return TM(data.pop("name"), **data)
50 |
51 |
52 | def decode_boundaries(flat):
53 | boundaries = {}
54 | refs = {}
55 | for i, e in enumerate(flat):
56 | name = e.pop("name", None)
57 | if name is None:
58 | raise ValueError(f"name property missing in boundary {i}")
59 | if "inBoundary" in e:
60 | refs[name] = e.pop("inBoundary")
61 | e = Boundary(name, **e)
62 | boundaries[name] = e
63 |
64 | # do a second pass to resolve self-references
65 | for b in boundaries.values():
66 | if b.name not in refs:
67 | continue
68 | b.inBoundary = boundaries[refs[b.name]]
69 |
70 | return boundaries
71 |
72 |
73 | def decode_elements(flat, boundaries):
74 | elements = {}
75 | for i, e in enumerate(flat):
76 | klass = getattr(sys.modules[__name__], e.pop("__class__", "Asset"))
77 | name = e.pop("name", None)
78 | if name is None:
79 | raise ValueError(f"name property missing in element {i}")
80 | if "inBoundary" in e:
81 | if e["inBoundary"] not in boundaries:
82 | raise ValueError(
83 | f"element {name} references invalid boundary {e['inBoundary']}"
84 | )
85 | e["inBoundary"] = boundaries[e["inBoundary"]]
86 | e = klass(name, **e)
87 | elements[name] = e
88 |
89 | return elements
90 |
91 |
92 | def decode_flows(flat, elements):
93 | for i, e in enumerate(flat):
94 | name = e.pop("name", None)
95 | if name is None:
96 | raise ValueError(f"name property missing in dataflow {i}")
97 | if "source" not in e:
98 | raise ValueError(f"dataflow {name} is missing source property")
99 | if e["source"] not in elements:
100 | raise ValueError(f"dataflow {name} references invalid source {e['source']}")
101 | source = elements[e.pop("source")]
102 | if "sink" not in e:
103 | raise ValueError(f"dataflow {name} is missing sink property")
104 | if e["sink"] not in elements:
105 | raise ValueError(f"dataflow {name} references invalid sink {e['sink']}")
106 | sink = elements[e.pop("sink")]
107 | Dataflow(source, sink, name, **e)
108 |
--------------------------------------------------------------------------------
/pytm/template_engine.py:
--------------------------------------------------------------------------------
1 | # shamelessly lifted from https://makina-corpus.com/blog/metier/2016/the-worlds-simplest-python-template-engine
2 | # but modified to include support to call methods which return lists, to call external utility methods, use
3 | # if operator with methods and added a not operator.
4 |
5 | import string
6 |
7 |
8 | class SuperFormatter(string.Formatter):
9 | """World's simplest Template engine."""
10 |
11 | def format_field(self, value, spec):
12 |
13 | spec_parts = spec.split(":")
14 | if spec.startswith("repeat"):
15 | # Example usage, format, count of spec_parts, exampple format
16 | # object:repeat:template 2 {item.findings:repeat:{{item.id}}, }
17 |
18 | template = spec.partition(":")[-1]
19 | if type(value) is dict:
20 | value = value.items()
21 | return "".join([self.format(template, item=item) for item in value])
22 |
23 | elif spec.startswith("call:") and hasattr(value, "__call__"):
24 | # Example usage, format, exampple format
25 | # methood:call {item.display_name:call:}
26 | # methood:call:template {item.parents:call:{{item.name}}, }
27 | result = value()
28 |
29 | if type(result) is list:
30 | template = spec.partition(":")[-1]
31 | return "".join([self.format(template, item=item) for item in result])
32 |
33 | return result
34 |
35 | elif spec.startswith("call:"):
36 | # Example usage, format, exampple format
37 | # object:call:method_name {item:call:getFindingCount}
38 | # object:call:method_name:template {item:call:getNamesOfParents:
39 | # {{item}}
40 | # }
41 |
42 | method_name = spec_parts[1]
43 |
44 | result = self.call_util_method(method_name, value)
45 |
46 | if type(result) is list:
47 | template = spec.partition(":")[-1]
48 | template = template.partition(":")[-1]
49 | return "".join([self.format(template, item=item) for item in result])
50 |
51 | return result
52 |
53 | elif (spec.startswith("if") or spec.startswith("not")):
54 | # Example usage, format, exampple format
55 | # object.bool:if:template {item.inScope:if:Is in scope}
56 | # object:if:template {item.findings:if:Has Findings}
57 | # object.method:if:template {item.parents:if:Has Parents}
58 | #
59 | # object.bool:not:template {item.inScope:not:Is not in scope}
60 | # object:not:template {item.findings:not:Has No Findings}
61 | # object.method:not:template {item.parents:not:Has No Parents}
62 |
63 | template = spec.partition(":")[-1]
64 | if (hasattr(value, "__call__")):
65 | result = value()
66 | else:
67 | result = value
68 |
69 | if (spec.startswith("if")):
70 | return (result and template or "")
71 | else:
72 | return (not result and template or "")
73 |
74 | else:
75 | return super(SuperFormatter, self).format_field(value, spec)
76 |
77 | def call_util_method(self, method_name, object):
78 | module_name = "pytm.report_util"
79 | klass_name = "ReportUtils"
80 | module = __import__(module_name, fromlist=['ReportUtils'])
81 | klass = getattr(module, klass_name)
82 | method = getattr(klass, method_name)
83 |
84 | result = method(object)
85 | return result
86 |
--------------------------------------------------------------------------------
/docs/reveal.md:
--------------------------------------------------------------------------------
1 | # {tm.name}
2 |
3 | ---
4 |
5 | ## System Description
6 |
7 | {tm.description}
8 |
9 | ---
10 |
11 | ## Dataflow Diagram
12 |
13 | 
14 |
15 | ---
16 |
17 | ## Dataflows
18 |
19 | ----
20 |
21 | {dataflows:repeat:
22 |
23 | - **name** : {{item.display_name:call:}}
24 | - **from** : {{item.source.name}}
25 | - **to** : {{item.sink.name}}:{{item.dstPort}}
26 | - **data** : {{item.data}}
27 | - **protocol** : {{item.protocol}}
28 |
29 | ----
30 | }
31 |
32 | ---
33 |
34 | ## Data Dictionary
35 |
36 | ----
37 |
38 | {data:repeat:
39 |
40 | - **name** : {{item.name}}
41 | - **description** : {{item.description}}
42 | - **classification** : {{item.classification.name}}
43 | - **carried by** : {{item.carriedBy:repeat:{{{{item.name}}}}
}}
44 | - **processed by** : {{item.processedBy:repeat:{{{{item.name}}}}
}}
45 |
46 | ----
47 | }
48 |
49 |
50 | ---
51 |
52 | ## Actors
53 |
54 | ----
55 |
56 | {actors:repeat:
57 | - **name** : {{item.name}}
58 | - **description** : {{item.description}}
59 | - **is Admin** : {{item.isAdmin}}
60 | - **# of findings** : {{item:call:getFindingCount}}
61 |
62 | {{item.findings:not:
63 | ---
64 | }}
65 |
66 | {{item.findings:if:
67 | ----
68 | **Findings**
69 |
70 | ----
71 |
72 | {{item.findings:repeat:
73 | {{{{item.id}}}} -- {{{{item.description}}}}
74 |
75 | - **Targeted Element** : {{{{item.target}}}}
76 | - **Severity** : {{{{item.severity}}}}
77 | - **References** : {{{{item.references}}}}
78 |
79 | ----
80 |
81 | }}
82 | }}
83 | }
84 |
85 | ## Trust Boundaries
86 |
87 | ----
88 |
89 | {boundaries:repeat:
90 | - **name** : {{item.name}}
91 | - **description** : {{item.description}}
92 | - **in scope** : {{item.inScope}}
93 | - **immediate parent** : {{item.parents:if:{{item:call:getParentName}}}}{{item.parents:not:N/A, primary boundary}}
94 | - **all parents** : {{item.parents:call:{{{{item.display_name:call:}}}}, }}
95 | - **classification** : {{item.maxClassification}}
96 | - **finding count** : {{item:call:getFindingCount}}
97 |
98 | {{item.findings:not:
99 | ---
100 | }}
101 |
102 | {{item.findings:if:
103 | ----
104 | **Findings**
105 |
106 | ----
107 |
108 | {{item.findings:repeat:
109 | {{{{item.id}}}} - {{{{item.description}}}}
110 |
111 | - **Targeted Element** : {{{{item.target}}}}
112 | - **Severity** : {{{{item.severity}}}}
113 | - **References** : {{{{item.references}}}}
114 | ----
115 |
116 | }}
117 | }}
118 | }
119 |
120 | ## Assets
121 |
122 | {assets:repeat:
123 |
124 | - **name** : {{item.name}}
125 | - **description** : {{item.description}}
126 | - **in scope** : {{item.inScope}}
127 | - **type** : {{item:call:getElementType}}
128 | - **# of findings** : {{item:call:getFindingCount}}
129 |
130 | {{item.findings:not:
131 | ---
132 | }}
133 |
134 | {{item.findings:if:
135 | ----
136 | **Findings**
137 |
138 | ----
139 |
140 | {{item.findings:repeat:
141 | {{{{item.id}}}} - {{{{item.description}}}}
142 |
143 | - **Targeted Element** : {{{{item.target}}}}
144 | - **Severity** : {{{{item.severity}}}}
145 | - **References** : {{{{item.references}}}}
146 | ----
147 |
148 | }}
149 | }}
150 | }
151 |
152 | ## Data Flows
153 |
154 | {dataflows:repeat:
155 | Name|{{item.name}}
156 | |:----|:----|
157 | Description|{{item.description}}|
158 | Sink|{{item.sink}}|
159 | Source|{{item.source}}|
160 | Is Response|{{item.isResponse}}|
161 | In Scope|{{item.inScope}}|
162 | Finding Count|{{item:call:getFindingCount}}|
163 |
164 | {{item.findings:not:
165 | ---
166 | }}
167 |
168 | {{item.findings:if:
169 | ----
170 | **Findings**
171 |
172 | ----
173 |
174 | {{item.findings:repeat:
175 | {{{{item.id}}}} - {{{{item.description}}}}
176 |
177 | - **Targeted Element** : {{{{item.target}}}}
178 | - **Severity** : {{{{item.severity}}}}
179 | - **References** : {{{{item.references}}}}
180 | ----
181 |
182 | }}
183 | }}
184 | }
185 |
186 |
--------------------------------------------------------------------------------
/tests/dfd.dot:
--------------------------------------------------------------------------------
1 | digraph tm {
2 | graph [
3 | fontname = Arial;
4 | fontsize = 14;
5 | ]
6 | node [
7 | fontname = Arial;
8 | fontsize = 14;
9 | rankdir = lr;
10 | ]
11 | edge [
12 | shape = none;
13 | arrowtail = onormal;
14 | fontname = Arial;
15 | fontsize = 12;
16 | ]
17 | labelloc = "t";
18 | fontsize = 20;
19 | nodesep = 1;
20 |
21 | subgraph cluster_boundary_Companynet_88f2d9c06f {
22 | graph [
23 | fontsize = 10;
24 | fontcolor = black;
25 | style = dashed;
26 | color = firebrick2;
27 | label = <Company net>;
28 | ]
29 |
30 | subgraph cluster_boundary_dmz_579e9aae81 {
31 | graph [
32 | fontsize = 10;
33 | fontcolor = black;
34 | style = dashed;
35 | color = firebrick2;
36 | label = <dmz>;
37 | ]
38 |
39 | server_Gateway_f8af758679 [
40 | shape = circle;
41 | color = black;
42 | fontcolor = black;
43 | label = "Gateway";
44 | margin = 0.02;
45 | ]
46 |
47 | }
48 |
49 | subgraph cluster_boundary_backend_f2eb7a3ff7 {
50 | graph [
51 | fontsize = 10;
52 | fontcolor = black;
53 | style = dashed;
54 | color = firebrick2;
55 | label = <backend>;
56 | ]
57 |
58 | server_WebServer_2c440ebe53 [
59 | shape = circle;
60 | color = black;
61 | fontcolor = black;
62 | label = "Web Server";
63 | margin = 0.02;
64 | ]
65 |
66 | datastore_SQLDatabase_0291419f72 [
67 | shape = none;
68 | fixedsize = shape;
69 | image = "INSTALL_PATH/pytm/images/datastore_black.png";
70 | imagescale = true;
71 | color = black;
72 | fontcolor = black;
73 | xlabel = "SQL Database";
74 | label = "";
75 | ]
76 |
77 | }
78 |
79 | }
80 |
81 | subgraph cluster_boundary_Internet_acf3059e70 {
82 | graph [
83 | fontsize = 10;
84 | fontcolor = black;
85 | style = dashed;
86 | color = firebrick2;
87 | label = <Internet>;
88 | ]
89 |
90 | actor_User_d2006ce1bb [
91 | shape = square;
92 | color = black;
93 | fontcolor = black;
94 | label = "User";
95 | margin = 0.02;
96 | ]
97 |
98 | }
99 |
100 | actor_User_d2006ce1bb -> server_Gateway_f8af758679 [
101 | color = black;
102 | fontcolor = black;
103 | dir = forward;
104 | label = "User enters\ncomments (*)";
105 | ]
106 |
107 | server_Gateway_f8af758679 -> server_WebServer_2c440ebe53 [
108 | color = black;
109 | fontcolor = black;
110 | dir = forward;
111 | label = "Request";
112 | ]
113 |
114 | server_WebServer_2c440ebe53 -> datastore_SQLDatabase_0291419f72 [
115 | color = black;
116 | fontcolor = black;
117 | dir = forward;
118 | label = "Insert query with\ncomments";
119 | ]
120 |
121 | datastore_SQLDatabase_0291419f72 -> server_WebServer_2c440ebe53 [
122 | color = black;
123 | fontcolor = black;
124 | dir = forward;
125 | label = "Retrieve comments";
126 | ]
127 |
128 | server_WebServer_2c440ebe53 -> server_Gateway_f8af758679 [
129 | color = black;
130 | fontcolor = black;
131 | dir = forward;
132 | label = "Response";
133 | ]
134 |
135 | server_Gateway_f8af758679 -> actor_User_d2006ce1bb [
136 | color = black;
137 | fontcolor = black;
138 | dir = forward;
139 | label = "Show comments (*)";
140 | ]
141 |
142 | }
143 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Below you will find a collection of guidelines for submitting issues as well as contributing code to the PyTM repository.
4 | Please read those before starting an issue or a pull request.
5 |
6 | ## Issues
7 |
8 | Specific PyTM design and development issues, bugs, and feature requests are maintained by GitHub Issues.
9 |
10 | *Please do not post installation, build, usage, or modeling questions, or other requests for help to Issues.*
11 | Use the [PyTM-users list](https://groups.google.com/forum/#!forum/pytm-users) instead.
12 | This helps developers maintain a clear, uncluttered, and efficient view of the state of PyTM.
13 | See the chapter [PyTM-users](#PyTM-users) below for guidance on posting to the users list.
14 |
15 | When reporting an issue, it's most helpful to provide the following information, where applicable:
16 | * How does the problem look like and what steps reproduce it?
17 | * Can you reproduce it using the latest [master](https://github.com/izar/pytm/tree/master)?
18 | * What is your running environment? In particular:
19 | * OS,
20 | * Python version,
21 | * Dot or PlantUML version, if relevant,
22 | * Your model file, if possible.
23 | * **What have you already tried** to solve the problem? How did it fail? Are there any other issues related to yours?
24 | * If the bug is a crash, provide the backtrace (usually printed by PyTM).
25 |
26 | If only a small portion of the code/log is relevant to your issue, you may paste it directly into the post, preferably using Markdown syntax for code block: triple backtick ( \`\`\` ) to open/close a block.
27 | In other cases (multiple files, or long files), please **attach** them to the post - this greatly improves readability.
28 |
29 | If the problem arises during a complex operation (e.g. large model using PyTM), please reduce the example to the minimal size that still causes the error.
30 | Also, minimize influence of external modules, data etc. - this way it will be easier for others to understand and reproduce your issue, and eventually help you.
31 | Sometimes you will find the root cause yourself in the process.
32 |
33 | Try to give your issue a title that is succinct and specific. The devs will rename issues as needed to keep track of them.
34 |
35 | To execute the test suite, from the root of the repo run `make test`. To control what tests to run, use `python3 -m unittest -v tests/`.
36 |
37 | To regenerate test fixtures for `json.dumps` and report tests add a `print(output)` statement in the test and run `make test 2>/dev/null > tests/output.json` or `make test 2>/dev/null > tests/output.md`.
38 |
39 | ## PyTM-users
40 |
41 | Before you post to the [PyTM-users list](https://groups.google.com/forum/#!forum/pytm-users), make sure you look for existing solutions.
42 |
43 | * [GitHub issues](https://github.com/izar/pytm/issues) tracker (some problems have been answered there),
44 |
45 | Found a post/issue with your exact problem, but with no answer?
46 | Don't just leave a "me too" message - provide the details of your case.
47 | Problems with more available information are easier to solve and attract good attention.
48 |
49 | When posting to the list, make sure you provide as much relevant information as possible - recommendations for an issue report (see above) are a good starting point.
50 |
51 | Formatting recommendations hold: paste short logs/code fragments into the post (use fixed-width text for them), **attach** long logs or multiple files.
52 |
53 | ## Pull Requests
54 |
55 | PyTM welcomes all contributions.
56 |
57 | Briefly: read commit by commit, a PR should tell a clean, compelling story of _one_ improvement to PyTM. In particular:
58 |
59 | * A PR should do one clear thing that obviously improves PyTM, and nothing more. Making many smaller PRs is better than making one large PR; review effort is superlinear in the amount of code involved.
60 | * Similarly, each commit should be a small, atomic change representing one step in development. PRs should be made of many commits where appropriate.
61 | * Please do rewrite PR history to be clean rather than chronological. Within-PR bugfixes, style cleanups, reversions, etc. should be squashed and should not appear in merged PR history.
62 | * Anything nonobvious from the code should be explained in comments, commit messages, or the PR description, as appropriate.
63 |
64 | (With many thanks to the Caffe project for their original CONTRIBUTING.md file)
65 |
--------------------------------------------------------------------------------
/tests/dfd_colormap.dot:
--------------------------------------------------------------------------------
1 | digraph tm {
2 | graph [
3 | fontname = Arial;
4 | fontsize = 14;
5 | ]
6 | node [
7 | fontname = Arial;
8 | fontsize = 14;
9 | rankdir = lr;
10 | ]
11 | edge [
12 | shape = none;
13 | arrowtail = onormal;
14 | fontname = Arial;
15 | fontsize = 12;
16 | ]
17 | labelloc = "t";
18 | fontsize = 20;
19 | nodesep = 1;
20 |
21 | subgraph cluster_boundary_Companynet_88f2d9c06f {
22 | graph [
23 | fontsize = 10;
24 | fontcolor = black;
25 | style = dashed;
26 | color = black;
27 | label = <Company net>;
28 | ]
29 |
30 | subgraph cluster_boundary_dmz_579e9aae81 {
31 | graph [
32 | fontsize = 10;
33 | fontcolor = black;
34 | style = dashed;
35 | color = black;
36 | label = <dmz>;
37 | ]
38 |
39 | server_Gateway_f8af758679 [
40 | shape = circle;
41 | color = firebrick3; fillcolor="#b2222222"; style=filled ;
42 | fontcolor = black;
43 | label = "Gateway";
44 | margin = 0.02;
45 | ]
46 |
47 | }
48 |
49 | subgraph cluster_boundary_backend_f2eb7a3ff7 {
50 | graph [
51 | fontsize = 10;
52 | fontcolor = black;
53 | style = dashed;
54 | color = black;
55 | label = <backend>;
56 | ]
57 |
58 | server_WebServer_2c440ebe53 [
59 | shape = circle;
60 | color = firebrick3; fillcolor="#b2222222"; style=filled ;
61 | fontcolor = black;
62 | label = "Web Server";
63 | margin = 0.02;
64 | ]
65 |
66 | datastore_SQLDatabase_0291419f72 [
67 | shape = none;
68 | fixedsize = shape;
69 | image = "INSTALL_PATH/pytm/images/datastore_gold.png";
70 | imagescale = true;
71 | color = gold; fillcolor="#ffd80022"; style=filled;
72 | fontcolor = black;
73 | xlabel = "SQL Database";
74 | label = "";
75 | ]
76 |
77 | }
78 |
79 | }
80 |
81 | subgraph cluster_boundary_Internet_acf3059e70 {
82 | graph [
83 | fontsize = 10;
84 | fontcolor = black;
85 | style = dashed;
86 | color = black;
87 | label = <Internet>;
88 | ]
89 |
90 | actor_User_d2006ce1bb [
91 | shape = square;
92 | color = darkgreen; fillcolor="#00630022"; style=filled;
93 | fontcolor = black;
94 | label = "User";
95 | margin = 0.02;
96 | ]
97 |
98 | }
99 |
100 | actor_User_d2006ce1bb -> server_Gateway_f8af758679 [
101 | color = gold; fillcolor="#ffd80022"; style=filled;
102 | fontcolor = gold; fillcolor="#ffd80022"; style=filled;
103 | dir = forward;
104 | label = "User enters\ncomments (*)";
105 | ]
106 |
107 | server_Gateway_f8af758679 -> server_WebServer_2c440ebe53 [
108 | color = gold; fillcolor="#ffd80022"; style=filled;
109 | fontcolor = gold; fillcolor="#ffd80022"; style=filled;
110 | dir = forward;
111 | label = "Request";
112 | ]
113 |
114 | server_WebServer_2c440ebe53 -> datastore_SQLDatabase_0291419f72 [
115 | color = gold; fillcolor="#ffd80022"; style=filled;
116 | fontcolor = gold; fillcolor="#ffd80022"; style=filled;
117 | dir = forward;
118 | label = "Insert query with\ncomments";
119 | ]
120 |
121 | datastore_SQLDatabase_0291419f72 -> server_WebServer_2c440ebe53 [
122 | color = gold; fillcolor="#ffd80022"; style=filled;
123 | fontcolor = gold; fillcolor="#ffd80022"; style=filled;
124 | dir = forward;
125 | label = "Retrieve comments";
126 | ]
127 |
128 | server_WebServer_2c440ebe53 -> server_Gateway_f8af758679 [
129 | color = gold; fillcolor="#ffd80022"; style=filled;
130 | fontcolor = gold; fillcolor="#ffd80022"; style=filled;
131 | dir = forward;
132 | label = "Response";
133 | ]
134 |
135 | server_Gateway_f8af758679 -> actor_User_d2006ce1bb [
136 | color = gold; fillcolor="#ffd80022"; style=filled;
137 | fontcolor = gold; fillcolor="#ffd80022"; style=filled;
138 | dir = forward;
139 | label = "Show comments (*)";
140 | ]
141 |
142 | }
143 |
--------------------------------------------------------------------------------
/tm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from pytm import (
4 | TM,
5 | Actor,
6 | Boundary,
7 | Classification,
8 | Data,
9 | Dataflow,
10 | Datastore,
11 | Lambda,
12 | Server,
13 | DatastoreType,
14 | Assumption,
15 | )
16 |
17 | tm = TM("my test tm")
18 | tm.description = """This is a sample threat model of a very simple system - a web-based comment system.
19 | The user enters comments and these are added to a database and displayed back to the user.
20 | The thought is that it is, though simple, a complete enough example to express meaningful threats."""
21 | tm.isOrdered = True
22 | tm.mergeResponses = True
23 | tm.assumptions = [
24 | "Here you can document a list of assumptions about the system",
25 | ]
26 |
27 | internet = Boundary("Internet")
28 |
29 | server_db = Boundary("Server/DB")
30 | server_db.levels = [2]
31 |
32 | vpc = Boundary("AWS VPC")
33 |
34 | user = Actor("User")
35 | user.inBoundary = internet
36 | user.levels = [2]
37 |
38 | web = Server("Web Server")
39 | web.OS = "Ubuntu"
40 | web.controls.isHardened = True
41 | web.controls.sanitizesInput = False
42 | web.controls.encodesOutput = True
43 | web.controls.authorizesSource = False
44 | web.sourceFiles = ["pytm/json.py", "docs/template.md"]
45 | web.assumptions = [
46 | Assumption(
47 | "This webserver does not use PHP",
48 | exclude=["INP16"],
49 | ),
50 | ]
51 |
52 | db = Datastore("SQL Database")
53 | db.OS = "CentOS"
54 | db.controls.isHardened = False
55 | db.inBoundary = server_db
56 | db.type = DatastoreType.SQL
57 | db.inScope = True
58 | db.maxClassification = Classification.RESTRICTED
59 | db.levels = [2]
60 |
61 | secretDb = Datastore("Real Identity Database")
62 | secretDb.OS = "CentOS"
63 | secretDb.sourceFiles = ["pytm/pytm.py"]
64 | secretDb.controls.isHardened = True
65 | secretDb.inBoundary = server_db
66 | secretDb.type = DatastoreType.SQL
67 | secretDb.inScope = True
68 | secretDb.storesPII = True
69 | secretDb.maxClassification = Classification.TOP_SECRET
70 |
71 | my_lambda = Lambda("AWS Lambda")
72 | my_lambda.controls.hasAccessControl = True
73 | my_lambda.inBoundary = vpc
74 | my_lambda.levels = [1, 2]
75 |
76 | token_user_identity = Data(
77 | "Token verifying user identity", classification=Classification.SECRET
78 | )
79 | db_to_secretDb = Dataflow(db, secretDb, "Database verify real user identity")
80 | db_to_secretDb.protocol = "RDA-TCP"
81 | db_to_secretDb.dstPort = 40234
82 | db_to_secretDb.data = token_user_identity
83 | db_to_secretDb.note = "Verifying that the user is who they say they are."
84 | db_to_secretDb.maxClassification = Classification.SECRET
85 |
86 | comments_in_text = Data(
87 | "Comments in HTML or Markdown", classification=Classification.PUBLIC
88 | )
89 | user_to_web = Dataflow(user, web, "User enters comments (*)")
90 | user_to_web.protocol = "HTTP"
91 | user_to_web.dstPort = 80
92 | user_to_web.data = comments_in_text
93 | user_to_web.note = "This is a simple web app\nthat stores and retrieves user comments."
94 |
95 | query_insert = Data("Insert query with comments", classification=Classification.PUBLIC)
96 | web_to_db = Dataflow(web, db, "Insert query with comments")
97 | web_to_db.protocol = "MySQL"
98 | web_to_db.dstPort = 3306
99 | web_to_db.data = query_insert
100 | web_to_db.note = (
101 | "Web server inserts user comments\ninto it's SQL query and stores them in the DB."
102 | )
103 |
104 | comment_retrieved = Data(
105 | "Web server retrieves comments from DB", classification=Classification.PUBLIC
106 | )
107 | db_to_web = Dataflow(db, web, "Retrieve comments")
108 | db_to_web.protocol = "MySQL"
109 | db_to_web.dstPort = 80
110 | db_to_web.data = comment_retrieved
111 | db_to_web.responseTo = web_to_db
112 |
113 | comment_to_show = Data(
114 | "Web server shows comments to the end user", classifcation=Classification.PUBLIC
115 | )
116 | web_to_user = Dataflow(web, user, "Show comments (*)")
117 | web_to_user.protocol = "HTTP"
118 | web_to_user.data = comment_to_show
119 | web_to_user.responseTo = user_to_web
120 |
121 | clear_op = Data("Serverless function clears DB", classification=Classification.PUBLIC)
122 | my_lambda_to_db = Dataflow(my_lambda, db, "Serverless function periodically cleans DB")
123 | my_lambda_to_db.protocol = "MySQL"
124 | my_lambda_to_db.dstPort = 3306
125 | my_lambda_to_db.data = clear_op
126 |
127 | userIdToken = Data(
128 | name="User ID Token",
129 | description="Some unique token that represents the user real data in the secret database",
130 | classification=Classification.TOP_SECRET,
131 | traverses=[user_to_web, db_to_secretDb],
132 | processedBy=[db, secretDb],
133 | )
134 |
135 | if __name__ == "__main__":
136 | tm.process()
137 |
138 |
--------------------------------------------------------------------------------
/docs/advanced_template.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## System Description
4 |
5 | {tm.description}
6 |
7 | ## Dataflow Diagram - Level 0 DFD
8 |
9 | 
10 |
11 |
12 |
13 | ## Dataflows
14 |
15 | Name|From|To |Data|Protocol|Port
16 | |:----:|:----:|:---:|:----:|:--------:|:----:|
17 | {dataflows:repeat:|{{item.display_name:call:}}|{{item.source.name}}|{{item.sink.name}}|{{item.data}}|{{item.protocol}}|{{item.dstPort}}|
18 | }
19 |
20 | ## Data Dictionary
21 |
22 | Name|Description|Classification|Carried|Processed
23 | |:----:|:--------:|:----:|:----|:----|
24 | {data:repeat:|{{item.name}}|{{item.description}}|{{item.classification.name}}|{{item.carriedBy:repeat:{{{{item.name}}}}
}}|{{item.processedBy:repeat:{{{{item.name}}}}
}}|
25 | }
26 |
27 | ## Actors
28 |
29 | {actors:repeat:
30 | Name|{{item.name}}
31 | |:----|:----|
32 | Description|{{item.description}}|
33 | Is Admin|{{item.isAdmin}}
34 | Finding Count|{{item:call:getFindingCount}}|
35 |
36 | {{item.findings:if:
37 |
38 | **Threats**
39 |
40 | {{item.findings:repeat:
41 |
42 | {{{{item.id}}}} -- {{{{item.threat_id}}}} -- {{{{item.description}}}}
43 | Targeted Element
44 | {{{{item.target}}}}
45 | Severity
46 | {{{{item.severity}}}}
47 | Example Instances
48 | {{{{item.example}}}}
49 | Mitigations
50 | {{{{item.mitigations}}}}
51 | References
52 | {{{{item.references}}}}
53 |
54 |
55 | }}
56 | }}
57 | }
58 |
59 | ## Boundaries
60 |
61 | {boundaries:repeat:
62 | Name|{{item.name}}
63 | |:----|:----|
64 | Description|{{item.description}}|
65 | In Scope|{{item.inScope}}|
66 | Immediate Parent|{{item.parents:if:{{item:call:getParentName}}}}{{item.parents:not:N/A, primary boundary}}|
67 | All Parents|{{item.parents:call:{{{{item.display_name:call:}}}}, }}|
68 | Classification|{{item.maxClassification}}|
69 | Finding Count|{{item:call:getFindingCount}}|
70 |
71 | {{item.findings:if:
72 |
73 | **Threats**
74 |
75 | {{item.findings:repeat:
76 |
77 | {{{{item.id}}}} -- {{{{item.threat_id}}}} -- {{{{item.description}}}}
78 | Targeted Element
79 | {{{{item.target}}}}
80 | Severity
81 | {{{{item.severity}}}}
82 | Example Instances
83 | {{{{item.example}}}}
84 | Mitigations
85 | {{{{item.mitigations}}}}
86 | References
87 | {{{{item.references}}}}
88 |
89 |
90 | }}
91 | }}
92 | }
93 |
94 | ## Assets
95 |
96 | {assets:repeat:
97 | Name|{{item.name}}|
98 | |:----|:----|
99 | Description|{{item.description}}|
100 | In Scope|{{item.inScope}}|
101 | Type|{{item:call:getElementType}}|
102 | Finding Count|{{item:call:getFindingCount}}|
103 |
104 | {{item.findings:if:
105 |
106 | **Threats**
107 |
108 | {{item.findings:repeat:
109 |
110 | {{{{item.id}}}} -- {{{{item.threat_id}}}} -- {{{{item.description}}}}
111 | Targeted Element
112 | {{{{item.target}}}}
113 | Severity
114 | {{{{item.severity}}}}
115 | Example Instances
116 | {{{{item.example}}}}
117 | Mitigations
118 | {{{{item.mitigations}}}}
119 | References
120 | {{{{item.references}}}}
121 |
122 |
123 | }}
124 | }}
125 | }
126 |
127 | ## Data Flows
128 |
129 | {dataflows:repeat:
130 | Name|{{item.name}}
131 | |:----|:----|
132 | Description|{{item.description}}|
133 | Sink|{{item.sink}}|
134 | Source|{{item.source}}|
135 | Is Response|{{item.isResponse}}|
136 | In Scope|{{item.inScope}}|
137 | Finding Count|{{item:call:getFindingCount}}|
138 |
139 | {{item.findings:if:
140 |
141 | **Threats**
142 |
143 | {{item.findings:repeat:
144 |
145 | {{{{item.id}}}} -- {{{{item.threat_id}}}} -- {{{{item.description}}}}
146 | Targeted Element
147 | {{{{item.target}}}}
148 | Severity
149 | {{{{item.severity}}}}
150 | Example Instances
151 | {{{{item.example}}}}
152 | Mitigations
153 | {{{{item.mitigations}}}}
154 | References
155 | {{{{item.references}}}}
156 |
157 |
158 | }}
159 | }}
160 | }
161 |
162 | {tm.excluded_findings:if:
163 | # Excluded Threats
164 | }
165 |
166 | {tm.excluded_findings:repeat:
167 |
168 | {{item.id}} -- {{item.threat_id}} -- {{item.description}}
169 | **{{item.threat_id}}** was excluded for **{{item.target}}** because of the assumption: "{{item.assumption.name}}
170 | "
171 | {{item.assumption.description:if:
172 | Assumption description
173 | {{item.assumption.description}}
174 | }}
175 |
176 | Targeted Element
177 | {{item.target}}
178 | Severity
179 | {{item.severity}}
180 | Example Instances
181 | {{item.example}}
182 | References
183 | {{item.references}}
184 |
185 | }
186 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # 1.2.0
2 |
3 | ## Breaking changes
4 |
5 | - Replace `usesLatestTLSversion` with `minTLSVersion` in assets and `tlsVersion` in data flows [#123](https://github.com/izar/pytm/pull/123)
6 | - When the `data` attribute of elements is initialied with a string, convert it to a `Data` object with `undefined` as name and the string as description; change the default classification from `PUBLIC` to `UNKNOWN` [#148](https://github.com/izar/pytm/pull/148)
7 |
8 | ## New features
9 |
10 | - Separate actors and assets from elements when dumping the model to JSON [#150](https://github.com/izar/pytm/pull/150)
11 | - Add unique Finding ids [#154](https://github.com/izar/pytm/pull/154)
12 | - Allow to associate the threat model script with source code files and check their age difference [#145](https://github.com/izar/pytm/pull/145)
13 | - Adapt [the DFD3 notation](https://github.com/adamshostack/DFD3) [#143](https://github.com/izar/pytm/pull/143)
14 | - Allow to override findings (threats) attributes [#137](https://github.com/izar/pytm/pull/137)
15 | - Allow to mark data as PII or credentials and check if it's protected [#127](https://github.com/izar/pytm/pull/127)
16 | - Added '--levels' - every element now has a 'levels' attribute, a list of integers denoting different DFD levels for rendering
17 | - Added HTML docs using pdoc [#110](https://github.com/izar/pytm/pull/110)
18 | - Added `checksDestinationRevocation` attribute to account for certificate revocation checks [#109](https://github.com/izar/pytm/pull/109)
19 |
20 | ## Bug fixes
21 |
22 | - Escape HTML entities in Threat attributes [#149](https://github.com/izar/pytm/pull/149)
23 | - Fix generating reports for models with a `Datastore` that has `isEncryptedAtRest` set and a `Data` that has `isStored` set [#141](https://github.com/izar/pytm/pull/141)
24 | - Fix condition on the `Data Leak` threat so it does not always match [#139](https://github.com/izar/pytm/pull/139)
25 | - Fixed printing the data attribute in reports [#123](https://github.com/izar/pytm/pull/123)
26 | - Added a markdown file with threats [#126](https://github.com/izar/pytm/pull/126)
27 | - Fixed drawing nested boudnaries [#117](https://github.com/izar/pytm/pull/117)
28 | - Add missing `provideIntegrity` attribute in `Actor` and `Asset` classes [#116](https://github.com/izar/pytm/pull/116)
29 |
30 | # 1.1.2
31 |
32 | - Added Poetry [#108](https://github.com/izar/pytm/pull/108)
33 | - Fix drawing DFDs for nested Boundaries [#107](https://github.com/izar/pytm/pull/107)
34 |
35 | # 1.1.1
36 |
37 | - Fix pydal dependencies install on pip [#106](https://github.com/izar/pytm/pull/106)
38 |
39 | # 1.1.0
40 |
41 | ## Breaking changes
42 |
43 | - Removed `HandlesResources` attribute from the `Process` class, which duplicates `handlesResources`
44 | - Change default `Dataflow.dstPort` attribute value from `10000` to `-1`
45 |
46 | ## New features
47 |
48 |
49 | - Add dump of elements and findings to sqlite database using "--sqldump " (with result in ./sqldump/) [#103](https://github.com/izar/pytm/pull/103)
50 | - Add Data element and DataLeak finding to support creation of a data dictionary separate from the model [#104](https://github.com/izar/pytm/pull/104)
51 | - Add JSON input [#105](https://github.com/izar/pytm/pull/105)
52 | - Add JSON output [#102](https://github.com/izar/pytm/pull/102)
53 | - Use numbered dataflow labels in sequence diagram [#94](https://github.com/izar/pytm/pull/94)
54 | - Move authenticateDestination to base Element [#88](https://github.com/izar/pytm/pull/88)
55 | - Assign inputs and outputs to all elements [#89](https://github.com/izar/pytm/pull/89)
56 | - Allow detecting and/or hiding duplicate dataflows by setting `TM.onDuplicates` [#100](https://github.com/izar/pytm/pull/100)
57 | - Ignore unused elements if `TM.ignoreUnused` is True [#84](https://github.com/izar/pytm/pull/84)
58 | - Assign findings to elements [#86](https://github.com/izar/pytm/pull/86)
59 | - Add description to class attributes [#91](https://github.com/izar/pytm/pull/91)
60 | - New Element methods to be used in threat conditions [#82](https://github.com/izar/pytm/pull/82)
61 | - Provide a Docker image and allow running make targets in a container [#87](https://github.com/izar/pytm/pull/87)
62 | - Dataflow inherits source and/or sink attribute values [#79](https://github.com/izar/pytm/pull/79)
63 | - Merge edges in DFD when `TM.mergeResponses` is True; allow marking `Dataflow` as responses [#76](https://github.com/izar/pytm/pull/76)
64 | - Automatic ordering of dataflows when `TM.isOrdered` is True [#66](https://github.com/izar/pytm/pull/66)
65 | - Loading a custom threats file by setting `TM.threatsFile` [#68](https://github.com/izar/pytm/pull/68)
66 | - Setting properties on init [#67](https://github.com/izar/pytm/pull/67)
67 | - Wrap long labels in DFDs [#65](https://github.com/izar/pytm/pull/65)
68 |
69 | ## Bug fixes
70 |
71 | - Ensure all items have correct color, based on scope [#93](https://github.com/izar/pytm/pull/93)
72 | - Add missing server isResilient property [#63](https://github.com/izar/pytm/issues/63)
73 | - Advanced templates in repeat blocks [#81](https://github.com/izar/pytm/pull/81)
74 | - Produce stable diagrams [#79](https://github.com/izar/pytm/pull/79)
75 | - Allow overriding classes [#64](https://github.com/izar/pytm/pull/64)
76 |
77 | # 1.0.0
78 |
79 | ## New features
80 |
81 | - New threats [#61](https://github.com/izar/pytm/pull/61)
82 |
83 | ## Bug fixes
84 |
85 | - UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d [#57](https://github.com/izar/pytm/pull/57)
86 | - `_uniq_name` missing 1 required positional argument [#60](https://github.com/izar/pytm/pull/60)
87 | - Render objects with duplicate names [#45](https://github.com/izar/pytm/issues/45)
88 |
89 | # 0.8.1
90 |
91 | ## Bug fixes
92 |
93 | - Draw nested boundaries [#54](https://github.com/izar/pytm/pull/54), [#55](https://github.com/izar/pytm/pull/55)
94 |
95 | # 0.8.0
96 |
97 | ## New features
98 |
99 | - Draw nested boundaries [#52](https://github.com/izar/pytm/pull/52)
100 |
--------------------------------------------------------------------------------
/tests/test_private_func.py:
--------------------------------------------------------------------------------
1 | import random
2 | import pytest
3 |
4 | from pytm.pytm import (
5 | TM,
6 | Actor,
7 | Assumption,
8 | Boundary,
9 | Data,
10 | Dataflow,
11 | Datastore,
12 | DatastoreType,
13 | Finding,
14 | Process,
15 | Server,
16 | Threat,
17 | UIError,
18 | encode_threat_data,
19 | )
20 |
21 | class TestUniqueNames:
22 | def test_duplicate_boundary_names_have_different_unique_names(self):
23 | random.seed(0)
24 | object_1 = Boundary("foo")
25 | object_2 = Boundary("foo")
26 |
27 | object_1_uniq_name = object_1._uniq_name()
28 | object_2_uniq_name = object_2._uniq_name()
29 |
30 | assert object_1_uniq_name != object_2_uniq_name
31 | assert object_1_uniq_name == "boundary_foo_acf3059e70"
32 | assert object_2_uniq_name == "boundary_foo_88f2d9c06f"
33 |
34 | class TestAttributes:
35 | def test_write_once(self):
36 | user = Actor("User")
37 | with pytest.raises(ValueError):
38 | user.name = "Computer"
39 |
40 | def test_kwargs(self):
41 | user = Actor("User", isAdmin=True)
42 | assert user.isAdmin is True
43 | user = Actor("User")
44 | assert user.isAdmin is False
45 | user.isAdmin = True
46 | assert user.isAdmin is True
47 |
48 | def test_load_threats(self):
49 | tm = TM("TM")
50 | assert len(TM._threats) != 0
51 | with pytest.raises(UIError):
52 | tm.threatsFile = "threats.json"
53 | with pytest.raises(UIError):
54 | TM("TM", threatsFile="threats.json")
55 |
56 | def test_responses(self):
57 | tm = TM("my test tm", description="aa", isOrdered=True)
58 | user = Actor("User")
59 | web = Server("Web Server")
60 | db = Datastore("SQL Database")
61 | http_req = Dataflow(user, web, "http req")
62 | insert = Dataflow(web, db, "insert data")
63 | query = Dataflow(web, db, "query")
64 | query_resp = Dataflow(db, web, "query results", responseTo=query)
65 | http_resp = Dataflow(web, user, "http resp")
66 | http_resp.responseTo = http_req
67 | assert tm.check()
68 | assert http_req.response == http_resp
69 | assert http_resp.isResponse is True
70 | assert query_resp.isResponse is True
71 | assert query_resp.responseTo == query
72 | assert query.response == query_resp
73 | assert insert.response is None
74 | assert insert.isResponse is False
75 |
76 | def test_defaults(self):
77 | tm = TM("TM")
78 | user_data = Data("HTTP")
79 | user = Actor("User", data=user_data)
80 | user.controls.authenticatesDestination = True
81 | json_data = Data("JSON")
82 | server = Server(
83 | "Server", port=443, protocol="HTTPS", isEncrypted=True, data=json_data
84 | )
85 | sql_resp = Data("SQL resp")
86 | db = Datastore(
87 | "PostgreSQL",
88 | port=5432,
89 | protocol="PostgreSQL",
90 | data=sql_resp,
91 | )
92 | db.controls.isEncrypted = False
93 | db.type = DatastoreType.SQL
94 | worker = Process("Task queue worker")
95 | req_get_data = Data("HTTP GET")
96 | req_get = Dataflow(user, server, "HTTP GET", data=req_get_data)
97 | server_query_data = Data("SQL")
98 | server_query = Dataflow(server, db, "Query", data=server_query_data)
99 | result_data = Data("Results")
100 | result = Dataflow(db, server, "Results", data=result_data, isResponse=True)
101 | resp_get_data = Data("HTTP Response")
102 | resp_get = Dataflow(server, user, "HTTP Response", data=resp_get_data, isResponse=True)
103 | test_assumption = Assumption("test assumption")
104 | resp_get.assumptions = [test_assumption]
105 | req_post_data = Data("JSON")
106 | req_post = Dataflow(user, server, "HTTP POST", data=req_post_data)
107 | resp_post = Dataflow(server, user, "HTTP Response", isResponse=True)
108 | test_assumption_exclude = Assumption("test assumption", exclude=["ABCD", "BCDE"])
109 | resp_post.assumptions = [test_assumption_exclude]
110 | sql_data = Data("SQL")
111 | worker_query = Dataflow(worker, db, "Query", data=sql_data)
112 | Dataflow(db, worker, "Results", isResponse=True)
113 | cookie = Data("Auth Cookie", carriedBy=[req_get, req_post])
114 | assert tm.check()
115 | assert req_get.srcPort == -1
116 | assert req_get.dstPort == server.port
117 | assert req_get.controls.isEncrypted == server.controls.isEncrypted
118 | assert req_get.controls.authenticatesDestination == user.controls.authenticatesDestination
119 | assert req_get.protocol == server.protocol
120 | assert user.data.issubset(req_get.data)
121 | assert server_query.srcPort == -1
122 | assert server_query.dstPort == db.port
123 | assert server_query.controls.isEncrypted == db.controls.isEncrypted
124 | assert server_query.controls.authenticatesDestination == server.controls.authenticatesDestination
125 | assert server_query.protocol == db.protocol
126 | assert server.data.issubset(server_query.data)
127 | assert result.srcPort == db.port
128 | assert result.dstPort == -1
129 | assert result.controls.isEncrypted == db.controls.isEncrypted
130 | assert result.controls.authenticatesDestination is False
131 | assert result.protocol == db.protocol
132 | assert db.data.issubset(result.data)
133 | assert db.assumptions == []
134 | assert resp_get.srcPort == server.port
135 | assert resp_get.dstPort == -1
136 | assert resp_get.controls.isEncrypted == server.controls.isEncrypted
137 | assert resp_get.controls.authenticatesDestination is False
138 | assert resp_get.protocol == server.protocol
139 | assert server.data.issubset(resp_get.data)
140 | assert resp_get.assumptions == [test_assumption]
141 | assert req_post.srcPort == -1
142 | assert req_post.dstPort == server.port
143 | assert req_post.controls.isEncrypted == server.controls.isEncrypted
144 | assert req_post.controls.authenticatesDestination == user.controls.authenticatesDestination
145 | assert req_post.protocol == server.protocol
146 | assert user.data.issubset(req_post.data)
147 | assert resp_post.srcPort == server.port
148 | assert resp_post.dstPort == -1
149 | assert resp_post.controls.isEncrypted == server.controls.isEncrypted
150 | assert resp_post.controls.authenticatesDestination is False
151 | assert resp_post.protocol == server.protocol
152 | assert server.data.issubset(resp_post.data)
153 | assert resp_post.assumptions == [test_assumption_exclude]
154 | assert resp_post.assumptions[0].exclude == set(test_assumption_exclude.exclude)
155 | assert server.inputs == [req_get, req_post]
156 | assert server.outputs == [server_query]
157 | assert worker.inputs == []
158 | assert worker.outputs == [worker_query]
159 | assert cookie.carriedBy == [req_get, req_post]
160 | assert set(cookie.processedBy) == set([user, server])
161 | assert cookie in req_get.data
162 | assert set([d.name for d in req_post.data]) == set([cookie.name, "HTTP", "JSON"])
163 |
164 | class TestMethod:
165 | def test_defaults(self):
166 | tm = TM("my test tm", description="aa", isOrdered=True)
167 | internet = Boundary("Internet")
168 | cloud = Boundary("Cloud")
169 | user = Actor("User", inBoundary=internet)
170 | server = Server("Server")
171 | db = Datastore("DB", inBoundary=cloud)
172 | db.type = DatastoreType.SQL
173 | func = Datastore("Lambda function", inBoundary=cloud)
174 | request = Dataflow(user, server, "request")
175 | response = Dataflow(server, user, "response", isResponse=True)
176 | user_query = Dataflow(user, db, "user query")
177 | server_query = Dataflow(server, db, "server query")
178 | func_query = Dataflow(func, db, "func query")
179 | default_target = ["Actor", "Boundary", "Dataflow", "Datastore", "Server"]
180 | testCases = [
181 | {"target": server, "condition": "target.oneOf(Server, Datastore)"},
182 | {"target": server, "condition": "not target.oneOf(Actor, Dataflow)"},
183 | {"target": request, "condition": "target.crosses(Boundary)"},
184 | {"target": user_query, "condition": "target.crosses(Boundary)"},
185 | {"target": server_query, "condition": "target.crosses(Boundary)"},
186 | {"target": func_query, "condition": "not target.crosses(Boundary)"},
187 | {"target": func_query, "condition": "not target.enters(Boundary)"},
188 | {"target": func_query, "condition": "not target.exits(Boundary)"},
189 | {"target": request, "condition": "not target.enters(Boundary)"},
190 | {"target": request, "condition": "target.exits(Boundary)"},
191 | {"target": response, "condition": "target.enters(Boundary)"},
192 | {"target": response, "condition": "not target.exits(Boundary)"},
193 | {"target": user, "condition": "target.inside(Boundary)"},
194 | {"target": func, "condition": "not any(target.inputs)"},
195 | {
196 | "target": server,
197 | "condition": "any(f.sink.oneOf(Datastore) and f.sink.type == DatastoreType.SQL "
198 | "for f in target.outputs)",
199 | },
200 | ]
201 | assert tm.check()
202 | for case in testCases:
203 | t = Threat(SID="", target=default_target, condition=case["condition"])
204 | assert t.apply(case["target"]), f"Failed to match {case['target']} against {case['condition']}"
205 |
206 | class TestFunction:
207 | def test_encode_threat_data(self):
208 | findings = [
209 | Finding(
210 | description="A test description",
211 | severity="High",
212 | id="1",
213 | threat_id="INP01",
214 | cvss="9.876",
215 | response="A test response",
216 | ),
217 | Finding(
218 | description="An escape test
16 |
17 |
18 |
19 |
20 |
21 |
22 | Module pytm.report_util
23 |
24 |
25 |
26 |
27 | Expand source code
28 |
29 | class ReportUtils:
30 | @staticmethod
31 | def getParentName(element):
32 | from pytm import Boundary
33 | if (isinstance(element, Boundary)):
34 | parent = element.inBoundary
35 | if (parent is not None):
36 | return parent.name
37 | else:
38 | return str("")
39 | else:
40 | return "ERROR: getParentName method is not valid for " + element.__class__.__name__
41 |
42 |
43 | @staticmethod
44 | def getNamesOfParents(element):
45 | from pytm import Boundary
46 | if (isinstance(element, Boundary)):
47 | parents = [p.name for p in element.parents()]
48 | return parents
49 | else:
50 | return "ERROR: getNamesOfParents method is not valid for " + element.__class__.__name__
51 |
52 | @staticmethod
53 | def getFindingCount(element):
54 | from pytm import Element
55 | if (isinstance(element, Element)):
56 | return str(len(list(element.findings)))
57 | else:
58 | return "ERROR: getFindingCount method is not valid for " + element.__class__.__name__
59 |
60 | @staticmethod
61 | def getElementType(element):
62 | from pytm import Element
63 | if (isinstance(element, Element)):
64 | return str(element.__class__.__name__)
65 | else:
66 | return "ERROR: getElementType method is not valid for " + element.__class__.__name__
67 |
68 |
69 |
71 |
73 |
75 |
76 |
77 |
78 |
79 | class ReportUtils
80 |
81 | -
82 |
83 |
84 |
85 | Expand source code
86 |
87 | class ReportUtils:
88 | @staticmethod
89 | def getParentName(element):
90 | from pytm import Boundary
91 | if (isinstance(element, Boundary)):
92 | parent = element.inBoundary
93 | if (parent is not None):
94 | return parent.name
95 | else:
96 | return str("")
97 | else:
98 | return "ERROR: getParentName method is not valid for " + element.__class__.__name__
99 |
100 |
101 | @staticmethod
102 | def getNamesOfParents(element):
103 | from pytm import Boundary
104 | if (isinstance(element, Boundary)):
105 | parents = [p.name for p in element.parents()]
106 | return parents
107 | else:
108 | return "ERROR: getNamesOfParents method is not valid for " + element.__class__.__name__
109 |
110 | @staticmethod
111 | def getFindingCount(element):
112 | from pytm import Element
113 | if (isinstance(element, Element)):
114 | return str(len(list(element.findings)))
115 | else:
116 | return "ERROR: getFindingCount method is not valid for " + element.__class__.__name__
117 |
118 | @staticmethod
119 | def getElementType(element):
120 | from pytm import Element
121 | if (isinstance(element, Element)):
122 | return str(element.__class__.__name__)
123 | else:
124 | return "ERROR: getElementType method is not valid for " + element.__class__.__name__
125 |
126 | Static methods
127 |
128 |
129 | def getElementType(element)
130 |
131 | -
132 |
133 |
134 |
135 | Expand source code
136 |
137 | @staticmethod
138 | def getElementType(element):
139 | from pytm import Element
140 | if (isinstance(element, Element)):
141 | return str(element.__class__.__name__)
142 | else:
143 | return "ERROR: getElementType method is not valid for " + element.__class__.__name__
144 |
145 |
146 |
147 | def getFindingCount(element)
148 |
149 | -
150 |
151 |
152 |
153 | Expand source code
154 |
155 | @staticmethod
156 | def getFindingCount(element):
157 | from pytm import Element
158 | if (isinstance(element, Element)):
159 | return str(len(list(element.findings)))
160 | else:
161 | return "ERROR: getFindingCount method is not valid for " + element.__class__.__name__
162 |
163 |
164 |
165 | def getNamesOfParents(element)
166 |
167 | -
168 |
169 |
170 |
171 | Expand source code
172 |
173 | @staticmethod
174 | def getNamesOfParents(element):
175 | from pytm import Boundary
176 | if (isinstance(element, Boundary)):
177 | parents = [p.name for p in element.parents()]
178 | return parents
179 | else:
180 | return "ERROR: getNamesOfParents method is not valid for " + element.__class__.__name__
181 |
182 |
183 |
184 | def getParentName(element)
185 |
186 | -
187 |
188 |
189 |
190 | Expand source code
191 |
192 | @staticmethod
193 | def getParentName(element):
194 | from pytm import Boundary
195 | if (isinstance(element, Boundary)):
196 | parent = element.inBoundary
197 | if (parent is not None):
198 | return parent.name
199 | else:
200 | return str("")
201 | else:
202 | return "ERROR: getParentName method is not valid for " + element.__class__.__name__
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
236 |
237 |
240 |
241 |