├── tests
├── __init__.py
└── test_classical_classifier.py
├── version
├── src
└── tclf
│ ├── __init__.py
│ ├── types.py
│ └── classical_classifier.py
├── .github
├── CODEOWNERS
├── release.yml
├── dependabot.yaml
└── workflows
│ ├── tests.yaml
│ └── publish.yaml
├── docs
├── img
│ ├── gsu.png
│ ├── logo.png
│ ├── favicon.ico
│ └── header.png
├── reference.md
├── javascripts
│ └── katex.js
├── nan_handling.md
├── overrides
│ └── partials
│ │ └── comments.html
├── naming_conventions.md
├── option_trade_classification.md
├── index.md
├── rules.md
└── apa-6th-edition.csl
├── .vscode
└── settings.json
├── sonar-project.properties
├── CITATION.cff
├── CHANGELOG.md
├── .gitignore
├── LICENSE
├── .pre-commit-config.yaml
├── mkdocs.yml
├── pyproject.toml
└── README.md
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/version:
--------------------------------------------------------------------------------
1 | 0.0.9
2 |
--------------------------------------------------------------------------------
/src/tclf/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @karelze
2 |
--------------------------------------------------------------------------------
/docs/img/gsu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/KarelZe/tclf/HEAD/docs/img/gsu.png
--------------------------------------------------------------------------------
/docs/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/KarelZe/tclf/HEAD/docs/img/logo.png
--------------------------------------------------------------------------------
/docs/img/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/KarelZe/tclf/HEAD/docs/img/favicon.ico
--------------------------------------------------------------------------------
/docs/img/header.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/KarelZe/tclf/HEAD/docs/img/header.png
--------------------------------------------------------------------------------
/docs/reference.md:
--------------------------------------------------------------------------------
1 | Welcome to the reference.
2 |
3 |
4 | ::: tclf.classical_classifier.ClassicalClassifier
5 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "sonarlint.connectedMode.project": {
3 | "connectionId": "vscode",
4 | "projectKey": "KarelZe_tclf"
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/sonar-project.properties:
--------------------------------------------------------------------------------
1 | sonar.issue.ignore.multicriteria=S117
2 | sonar.issue.ignore.multicriteria.S117.ruleKey=python:S117
3 | sonar.issue.ignore.multicriteria.S117.resourceKey=*
4 |
--------------------------------------------------------------------------------
/src/tclf/types.py:
--------------------------------------------------------------------------------
1 | """Common type hints."""
2 |
3 | from typing import Union
4 |
5 | import numpy as np
6 | import numpy.typing as npt
7 | import pandas as pd
8 | from scipy.sparse import spmatrix
9 |
10 | MatrixLike = Union[np.ndarray, pd.DataFrame, spmatrix]
11 | ArrayLike = Union[npt.ArrayLike, pd.Series]
12 |
--------------------------------------------------------------------------------
/docs/javascripts/katex.js:
--------------------------------------------------------------------------------
1 | document$.subscribe(({ body }) => {
2 | renderMathInElement(body, {
3 | delimiters: [
4 | { left: "$$", right: "$$", display: true },
5 | { left: "$", right: "$", display: false },
6 | { left: "\\(", right: "\\)", display: false },
7 | { left: "\\[", right: "\\]", display: true }
8 | ],
9 | })
10 | })
11 |
--------------------------------------------------------------------------------
/.github/release.yml:
--------------------------------------------------------------------------------
1 | # .github/release.yml
2 |
3 | changelog:
4 | exclude:
5 | labels:
6 | - ignore-for-release
7 | authors:
8 | - octocat
9 | categories:
10 | - title: Code
11 | labels:
12 | - python
13 | - enhancement
14 | - title: Documentation
15 | labels:
16 | - documentation
17 | - title: Other Changes
18 | labels:
19 | - "*"
20 |
--------------------------------------------------------------------------------
/docs/nan_handling.md:
--------------------------------------------------------------------------------
1 | We take care to treat `NaN` values correctly. If features relevant for classification like the trade price or quoted bid/ask prices are missing, no classification is performed and classification of the trade is deferred to the subsequent rule or fallback strategy.
2 |
3 | Alternatively, you can provide imputed data. See [`sklearn.impute`](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.impute) for details.
4 |
--------------------------------------------------------------------------------
/.github/dependabot.yaml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
5 |
6 | version: 2
7 | updates:
8 | # Maintain dependencies for pip
9 | - package-ecosystem: "pip"
10 | directory: "/" # Location of package manifests
11 | schedule:
12 | interval: "daily"
13 | assignees:
14 | - "KarelZe"
15 | # Maintain dependencies for GitHub Actions
16 | - package-ecosystem: "github-actions"
17 | directory: "/"
18 | schedule:
19 | interval: "daily"
20 | assignees:
21 | - "KarelZe"
22 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | title: tclf
3 | message: >-
4 | If you use this software, please cite it using the
5 | metadata from this file.
6 | type: software
7 | authors:
8 | - given-names: Markus
9 | family-names: Bilz
10 | email: web@markusbilz.com
11 | orcid: 'https://orcid.org/0009-0009-6833-4393'
12 | repository-code: 'https://github.com/KarelZe/tclf'
13 | url: 'https://karelze.github.io/tclf/'
14 | abstract: >-
15 | `tclf` is a scikit-learn-compatible implementation of
16 | trade classification algorithms to classify financial
17 | markets transactions into buyer- and seller-initiated
18 | trades.
19 | keywords:
20 | - clnv
21 | - lee-ready
22 | - rule-based classifier
23 | - scikit-learn
24 | - trade-classification
25 | license: BSD 3
26 | version: 0.0.3
27 | date-released: '2024-01-06'
28 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## 0.3.0 (2024-02-18)
2 |
3 | ### Feat
4 |
5 | - bump version file in pre-commit hook
6 |
7 | ### Refactor
8 |
9 | - move version to version file 📑 (#96)
10 |
11 | ## 0.0.8 (2024-02-04)
12 |
13 | ## 0.0.7 (2024-02-04)
14 |
15 | ### Feat
16 |
17 | - improve links in examples🔗 (#90)
18 |
19 | ## 0.0.6 (2024-01-15)
20 |
21 | ### Feat
22 |
23 | - **ci**: add codespeedHQ performance analysis🐇 (#81)
24 | - **ci**: skip python pipelines for changes in docs (#80)
25 | - **doc**: add analytics 🔢 (#79)
26 | - **doc**: add light/dark mode to documentation🧾 (#78)
27 |
28 | ### Refactor
29 |
30 | - **docs**: restructure readme + index (#82)
31 | - use generic checks for missing columns🙂 (#71)
32 |
33 | ## 0.0.5 (2024-01-06)
34 |
35 | ## 0.0.4 (2024-01-06)
36 |
37 | ## 0.0.3 (2024-01-06)
38 |
39 | ## 0.0.2 (2023-12-28)
40 |
41 | ## 0.0.1 (2023-12-28)
42 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yaml:
--------------------------------------------------------------------------------
1 | name: Tests
2 | permissions:
3 | contents: read
4 | on:
5 | push:
6 | paths:
7 | - '**/*'
8 | - '!docs/**'
9 | jobs:
10 | build:
11 | name: test with ${{ matrix.python-version }} on ${{ matrix.os }}
12 | runs-on: ${{ matrix.os }}
13 | strategy:
14 | matrix:
15 | python-version:
16 | - "3.9"
17 | - "3.11"
18 | os: [ubuntu-latest]
19 | steps:
20 | - name: Git clone
21 | uses: actions/checkout@v5
22 | - name: Install uv and python
23 | uses: astral-sh/setup-uv@v7
24 | with:
25 | enable-cache: true
26 | python-version: ${{ matrix.python-version }}
27 | - name: Install dependencies
28 | run: |
29 | uvx --with tox-uv tox -e test
30 | - name: Upload Coverage to Codecov
31 | uses: codecov/codecov-action@v5
32 | with:
33 | token: ${{ secrets.CODECOV_TOKEN }}
34 | files: ./coverage.xml
35 | env_vars: OS,PYTHON
36 | fail_ci_if_error: true
37 | verbose: true
38 | # - name: Run benchmarks
39 | # if: matrix.os == 'ubuntu-latest'
40 | # uses: CodSpeedHQ/action@v3
41 | # with:
42 | # token: ${{ secrets.CODSPEED_TOKEN }}
43 | # # see: https://docs.codspeed.io/integrations/ci/github-actions
44 | # run: uv run pytest tests/ --codspeed
45 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # scikit-learn specific
10 | doc/_build/
11 | doc/auto_examples/
12 | doc/modules/generated/
13 | doc/datasets/generated/
14 |
15 | # Distribution / packaging
16 |
17 | .Python
18 | env/
19 | build/
20 | develop-eggs/
21 | dist/
22 | downloads/
23 | eggs/
24 | .eggs/
25 | lib/
26 | lib64/
27 | parts/
28 | sdist/
29 | var/
30 | *.egg-info/
31 | .installed.cfg
32 | *.egg
33 |
34 | # PyInstaller
35 | # Usually these files are written by a python script from a template
36 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
37 | *.manifest
38 | *.spec
39 |
40 | # Installer logs
41 | pip-log.txt
42 | pip-delete-this-directory.txt
43 |
44 | # Unit test / coverage reports
45 | htmlcov/
46 | .tox/
47 | .coverage
48 | .coverage.*
49 | .cache
50 | nosetests.xml
51 | coverage.xml
52 | *,cover
53 | .hypothesis/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 |
62 | # Sphinx documentation
63 | site/
64 |
65 | # PyBuilder
66 | target/
67 |
68 | # pixi
69 | # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
70 | # This is especially recommended for binary packages to ensure reproducibility, and is more
71 | # commonly ignored for libraries.
72 | .pixi
73 | pixi.lock
74 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2016, Vighnesh Birodkar and scikit-learn-contrib contributors
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | * Neither the name of project-template nor the names of its
15 | contributors may be used to endorse or promote products derived from
16 | this software without specific prior written permission.
17 |
18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | ci:
2 | autofix_commit_msg: |
3 | ci: auto fixes from pre-commit.com hooks
4 |
5 | for more information, see https://pre-commit.ci
6 | autofix_prs: true
7 | autoupdate_branch: ''
8 | autoupdate_commit_msg: 'ci: pre-commit autoupdate'
9 | autoupdate_schedule: weekly
10 | skip: []
11 | submodules: false
12 | # See https://pre-commit.com for more information
13 | # See https://pre-commit.com/hooks.html for more hooks
14 | repos:
15 | - repo: https://github.com/pre-commit/pre-commit-hooks
16 | rev: v6.0.0
17 | hooks:
18 | - id: trailing-whitespace
19 | - id: check-added-large-files
20 | - id: check-builtin-literals
21 | - id: fix-byte-order-marker
22 | - id: check-merge-conflict
23 | - id: check-symlinks
24 | - id: check-toml
25 | - id: check-yaml
26 | args: ['--unsafe']
27 | - id: debug-statements
28 | - id: end-of-file-fixer
29 | - id: mixed-line-ending
30 | - id: trailing-whitespace
31 | - repo: https://github.com/pre-commit/mirrors-mypy
32 | rev: v1.19.1
33 | hooks:
34 | - id: mypy
35 | # yaml requires additional stubs.
36 | # Similar to: https://stackoverflow.com/a/73603491/5755604
37 | additional_dependencies: ['types-PyYAML']
38 | - repo: https://github.com/astral-sh/ruff-pre-commit
39 | rev: v0.14.10
40 | hooks:
41 | - id: ruff
42 | args:
43 | - --fix
44 | - id: ruff-format
45 | - repo: https://github.com/commitizen-tools/commitizen
46 | rev: v4.10.0
47 | hooks:
48 | - id: commitizen
49 | stages: [commit-msg]
50 | exclude: "^(references|reports)"
51 |
--------------------------------------------------------------------------------
/docs/overrides/partials/comments.html:
--------------------------------------------------------------------------------
1 | {% if page.meta.comments %}
2 |
3 |
18 |
19 |
20 |
54 | {% endif %}
55 |
--------------------------------------------------------------------------------
/docs/naming_conventions.md:
--------------------------------------------------------------------------------
1 | For `tclf` to work, we impose constraints on the column names. The following input is required by each rule. Data requirements are additive, if multiple rules are applied.
2 |
3 |
4 |
5 |
6 | | Rule | Layer Name | Columns |
7 | |-----------------------------|------------------------|-------------------------------------------------------------------------------------------|
8 | | No classification | `("nan","sub")` | None |
9 | | Tick test | `("tick","sub")` | `trade_price`, `price_{sub}_lag` |
10 | | Reverse tick Test | `("rev_tick","sub")` | `trade_price`, `price_{sub}_lead` |
11 | | Quote Rule | `("quote","sub")` | `trade_price`, `ask_{sub}`, `bid_{sub}` |
12 | | Lee-Ready Algorithm | `("lr","sub")` | `trade_price`, `price_{sub}_lag`, `ask_{sub}`, `bid_{sub}` |
13 | | EMO Algorithm | `("emo","sub")` | `trade_price`, `price_{sub}_lag`, `ask_{sub}`, `bid_{sub}` |
14 | | CLNV Rule | `("clnv","sub")` | `trade_price`, `price_{sub}_lag`, `ask_{sub}`, `bid_{sub}` |
15 | | Reverse Lee-Ready Algorithm | `("rev_lr","sub")` | `trade_price`, `price_{sub}_lead`, `ask_{sub}`, `bid_{sub}` |
16 | | Reverse EMO Algorithm | `("rev_emo","sub")` | `trade_price`, `price_{sub}_lead`, `ask_{sub}`, `bid_{sub}` |
17 | | Reverse CLNV Rule | `("rev_clnv","sub")` | `trade_price`, `price_{sub}_lead`, `ask_{sub}`, `bid_{sub}` |
18 | | Depth rule | `("depth","sub")` | `trade_price`, `ask_{sub}`, `bid_{sub}`, `ask_size_{sub}`, `bid_size_{sub}` |
19 | | Trade size rule | `("trade_size","sub")` | `trade_size`, `ask_size_{sub}`, `bid_size_{sub}` |
20 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: tclf
2 | site_description: tclf, a scikit-learn-compatible implementation of popular trade classification algorithms to classify financial markets transactions into buyer- and seller-initiated trades.
3 | site_url: https://karelze.github.io/tclf/
4 |
5 | theme:
6 | custom_dir: docs/overrides
7 | logo: img/logo.png
8 | favicon: img/favicon.ico
9 | name: material
10 | darkMode: true
11 | palette:
12 | # Palette toggle for automatic mode
13 | - media: "(prefers-color-scheme)"
14 | toggle:
15 | icon: material/brightness-auto
16 | name: Switch to light mode
17 | primary: black
18 | accent: indigo
19 | # Palette toggle for light mode
20 | - media: "(prefers-color-scheme: light)"
21 | scheme: default
22 | toggle:
23 | icon: material/brightness-7
24 | name: Switch to dark mode
25 | primary: black
26 | accent: indigo
27 | # Palette toggle for dark mode
28 | - media: "(prefers-color-scheme: dark)"
29 | scheme: slate
30 | toggle:
31 | icon: material/brightness-4
32 | name: Switch to system preference
33 | primary: black
34 | accent: indigo
35 | icon:
36 | repo: fontawesome/brands/github
37 | edit: material/pencil
38 | view: material/eye
39 | features:
40 | - content.action.edit
41 | - navigation.tabs
42 | - navigation.instant.progress
43 | - search.suggest
44 | - search.highlight
45 |
46 | repo_name: karelze/tclf
47 | repo_url: https://github.com/karelze/tclf
48 | edit_uri: edit/main/docs/
49 |
50 | nav:
51 | - Home: index.md
52 | - Rules: rules.md
53 | - API reference: reference.md
54 | - Examples:
55 | - Option trade classification: option_trade_classification.md
56 | - More:
57 | - Naming conventions: naming_conventions.md
58 | - Handling of NaNs: nan_handling.md
59 |
60 | markdown_extensions:
61 | - toc:
62 | permalink: true
63 | - markdown.extensions.codehilite:
64 | guess_lang: false
65 | - admonition
66 | - codehilite
67 | - extra
68 | - pymdownx.details
69 | - pymdownx.superfences
70 | - pymdownx.superfences:
71 | custom_fences:
72 | - name: mermaid
73 | class: mermaid
74 | format: !!python/name:pymdownx.superfences.fence_code_format ''
75 | - pymdownx.tabbed:
76 | alternate_style: true
77 | - footnotes
78 | - pymdownx.arithmatex:
79 | generic: true
80 |
81 |
82 | plugins:
83 | - search
84 | - mkdocstrings:
85 | default_handler: python
86 | handlers:
87 | python:
88 | paths: [src]
89 | - bibtex:
90 | bib_file: "docs/bibliography.bib"
91 | csl_file: "docs/apa-6th-edition.csl"
92 | cite_inline: true
93 |
94 | extra:
95 | social:
96 | - icon: fontawesome/brands/github
97 | link: https://github.com/karelze/tclf
98 | - icon: fontawesome/brands/linkedin
99 | link: https://www.linkedin.com/in/markus-bilz/
100 | analytics:
101 | provider: google
102 | property: G-H3H7C48XJR
103 |
104 | extra_javascript:
105 | - javascripts/katex.js
106 | - https://unpkg.com/mermaid@8.4.6/dist/mermaid.min.js
107 | - https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.16.7/katex.min.js
108 | - https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.16.7/contrib/auto-render.min.js
109 |
110 | extra_css:
111 | - https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.16.7/katex.min.css
112 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yaml:
--------------------------------------------------------------------------------
1 | # adapted from here: https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/
2 |
3 | name: Publish Python 🐍 distribution 📦 to PyPI and TestPyPI
4 |
5 | on:
6 | push:
7 | paths:
8 | - '**/*'
9 | - '!docs/**'
10 |
11 | jobs:
12 | build:
13 | name: Build distribution 📦
14 | runs-on: ubuntu-latest
15 |
16 | steps:
17 | - uses: actions/checkout@v5
18 | - name: Set up Python
19 | uses: actions/setup-python@v6
20 | with:
21 | python-version: "3.x"
22 | - name: Install pypa/build
23 | run: >-
24 | python -m
25 | pip install
26 | build
27 | --user
28 | - name: Build a binary wheel and a source tarball
29 | run: python -m build
30 | - name: Store the distribution packages
31 | uses: actions/upload-artifact@v6
32 | with:
33 | name: python-package-distributions
34 | path: dist/
35 | publish-to-pypi:
36 | name: >-
37 | Publish Python 🐍 distribution 📦 to PyPI
38 | if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes
39 | needs:
40 | - build
41 | runs-on: ubuntu-latest
42 | environment:
43 | name: pypi
44 | url: https://pypi.org/p/tclf
45 | permissions:
46 | id-token: write
47 | steps:
48 | - name: Download all the dists
49 | uses: actions/download-artifact@v7
50 | with:
51 | name: python-package-distributions
52 | path: dist/
53 | - name: Publish distribution 📦 to PyPI
54 | uses: pypa/gh-action-pypi-publish@release/v1
55 |
56 | github-release:
57 | name: >-
58 | Sign the Python 🐍 distribution 📦 with Sigstore
59 | and upload them to GitHub Release
60 | needs:
61 | - publish-to-pypi
62 | runs-on: ubuntu-latest
63 |
64 | permissions:
65 | contents: write
66 | id-token: write
67 |
68 | steps:
69 | - name: Download all the dists
70 | uses: actions/download-artifact@v7
71 | with:
72 | name: python-package-distributions
73 | path: dist/
74 | - name: Sign the dists with Sigstore
75 | uses: sigstore/gh-action-sigstore-python@v3.2.0
76 | with:
77 | inputs: >-
78 | ./dist/*.tar.gz
79 | ./dist/*.whl
80 | - name: Create GitHub Release
81 | env:
82 | GITHUB_TOKEN: ${{ github.token }}
83 | run: >-
84 | gh release create
85 | '${{ github.ref_name }}'
86 | --repo '${{ github.repository }}'
87 | --title 'v${{ github.ref_name }}'
88 | --generate-notes
89 | - name: Upload artifact signatures to GitHub Release
90 | env:
91 | GITHUB_TOKEN: ${{ github.token }}
92 | # Upload to GitHub Release using the `gh` CLI.
93 | # `dist/` contains the built packages, and the
94 | # sigstore-produced signatures and certificates.
95 | run: >-
96 | gh release upload
97 | '${{ github.ref_name }}' dist/**
98 | --repo '${{ github.repository }}'
99 |
100 | publish-to-testpypi:
101 | name: Publish Python 🐍 distribution 📦 to TestPyPI
102 | needs:
103 | - build
104 | runs-on: ubuntu-latest
105 |
106 | environment:
107 | name: testpypi
108 | url: https://test.pypi.org/p/tclf
109 |
110 | permissions:
111 | id-token: write
112 |
113 | steps:
114 | - name: Download all the dists
115 | uses: actions/download-artifact@v7
116 | with:
117 | name: python-package-distributions
118 | path: dist/
119 | - name: Publish distribution 📦 to TestPyPI
120 | uses: pypa/gh-action-pypi-publish@release/v1
121 | with:
122 | skip-existing: true
123 | repository-url: https://test.pypi.org/legacy/
124 |
--------------------------------------------------------------------------------
/docs/option_trade_classification.md:
--------------------------------------------------------------------------------
1 | ---
2 | comments: true
3 | ---
4 | ## Setup Rules
5 | This tutorial aims to reproduce plots from a working paper by Grauer et al. [-@grauerOptionTradeClassification2022], which achieves state-of-the-art performance in option trade classification. The authors recommend to classify option trades by:
6 | > [...] our new trade size rule together with quote rules successively applied to NBBO and quotes on the trading venue. Quotes at the midpoint on both the NBBO and the exchange should be classified first with the depth rule and any remaining trades with the reverse tick test.
7 |
8 |
9 | There's a lot going on.🥵
10 |
11 | To match the author's description, we first set up `layers`. We use the `tclf` implementation of the [tradesize](rules.md#trade-size-rule), [quote](rules.md#quote-rule), and [depth rule](rules.md#depth-rule), as well as [reverse tick test](rules.md#reverse-tick-test). The subset named "ex" refers to exchange-specific data, "best" to the NBBO and "all" for inter-exchange level data. Identical to the paper, the reverse tick test is applied at the inter-exchange level, due to the devastating results of tick-based algorithms at the exchange level. The authors perform random classification on unclassified trades, hence we choose `strategy="random"`.
12 | ```python
13 | from tclf.classical_classifier import ClassicalClassifier
14 |
15 | layers = [
16 | ("trade_size", "ex"),
17 | ("quote", "best"),
18 | ("quote", "ex"),
19 | ("depth", "best"),
20 | ("depth", "ex"),
21 | ("rev_tick", "all"),
22 | ]
23 | clf = ClassicalClassifier(layers=layers, strategy="random")
24 | ```
25 |
26 | ## Prepare Dataset
27 |
28 | Next, we need to load a dataset of option trades. I chose one, which was recorded at the ISE and used in the paper to evaluate the trade classification rules. I access it from a google cloud bucket and load it into a pandas dataframe `X`.
29 |
30 | ```python
31 | import gcsfs
32 | import pandas as pd
33 |
34 | fs = gcsfs.GCSFileSystem()
35 |
36 | gcs_loc = fs.glob(
37 | "gs://tclf/bucket_name/dir_name/*"
38 | )
39 | X = pd.read_parquet(gcs_loc, engine="pyarrow", filesystem=fs)
40 | ```
41 | Unfortunately, the dataset does not yet follow the [naming conventions](https://karelze.github.io/tclf/naming_conventions/) and is missing columns required by `tclf`. We take care of this next.😅
42 |
43 | ```python
44 | clf.fit(X)
45 | >>> ValueError: Expected to find columns: ['ask_best', 'ask_size_best', 'bid_best', 'bid_size_best', 'trade_price', 'trade_size']. Check the naming/presence of columns. See: https://karelze.github.io/tclf/naming_conventions/
46 | ```
47 |
48 | The calculation of the [depth rule](rules.md#depth-rule) requires the columns `ask_{subset}`, `bid_{subset}`, and `trade_price`, as well as `ask_size_{subset}`, `bid_size_{subset}` and `trade_size`. The columns `BEST_ASK`, `BEST_BID`, `TRADE_PRICE`, and `TRADE_SIZE` are renamed to match our naming conventions of `ask_{subset}`, `bid_{subset}`, `trade_price`, and `trade_size`.
49 |
50 | As there is no `{ask/bid}_size_best` at the NBBO level (`subset="best"`), I copy the columns from the trading venue. This allows us to mimic the author's decision to filter for mid-spread at the NBBO level, but classify by the trade size relative to the ask/bid size at the exchange.
51 |
52 | We save the true label `y_true` and the timestamp of the trade `QUOTE_DATETIME` to a new dataframe, named `X_meta`, which we use for plotting. We remove these columns from the original dataframe.
53 |
54 | ```python
55 | X = X.rename(
56 | {
57 | "TRADE_PRICE": "trade_price",
58 | "TRADE_SIZE": "trade_size",
59 | "BEST_ASK": "ask_best",
60 | "BEST_BID": "bid_best",
61 | "buy_sell": "y_true",
62 | },
63 | axis=1,
64 | )
65 |
66 | features_meta = ["QUOTE_DATETIME", "y_true"]
67 | X_meta = X[features_meta]
68 | X = X.drop(columns=features_meta)
69 |
70 | X[["ask_size_best", "bid_size_best"]] = X[["ask_size_ex", "bid_size_ex"]]
71 | ```
72 | ## Generate Results
73 | Next, we can simply pass the prepared dataframe `X` to the classifier and append the results to our dataframe `X_meta`.
74 |
75 | ```python
76 | X_meta["y_pred"] = clf.fit(X).predict(X)
77 | ```
78 |
79 | To estimate the accuracy over time, we group by date and estimate the accuracy for each group. We make use of [`sklearn.metrics.accuracy_score`](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html).
80 |
81 | ```python
82 | from sklearn.metrics import accuracy_score
83 |
84 | df_plot = X_meta.groupby(X_meta.QUOTE_DATETIME.dt.date).apply(
85 | lambda x: accuracy_score(x["y_true"], x["y_pred"]) * 100
86 | )
87 | ```
88 |
89 | ## Plot Results
90 |
91 | We use [`matplotlib`](https://matplotlib.org/) to match the plots from the paper as closely as possible.
92 |
93 |
94 | ```python
95 | import matplotlib.pyplot as plt
96 | from matplotlib.dates import DateFormatter
97 | from matplotlib.ticker import PercentFormatter
98 |
99 | plt.rcParams["font.family"] = "serif"
100 |
101 | plt.figure(figsize=(9, 3))
102 | plt.grid(True, axis="y")
103 |
104 | # line plot
105 | plt.plot(df_plot, color="tab:orange", linewidth=1.5, label="ISE")
106 |
107 | # y-axis + x-axis
108 | plt.ylim(0, 100)
109 | plt.ylabel("Overall success rate")
110 | ax = plt.gca()
111 | ax.yaxis.set_major_formatter(PercentFormatter(100, decimals=0))
112 | ax.xaxis.set_major_formatter(DateFormatter("%b-%y"))
113 |
114 | # title + legend
115 | plt.title(
116 | "C: Performance of trade classification based on\n trade size rule + depth rule + reverse LR (NBBO,exchange)",
117 | loc="left",
118 | )
119 | plt.legend(loc="lower left", frameon=False)
120 |
121 | plt.show()
122 | ```
123 |
124 | **Output:**
125 |
126 | 
127 |
128 | Pretty close to the author's work. Just spanning a shorter period of time.🙂
129 |
130 | ## Footnotes
131 | \bibliography
132 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools >= 61.0"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "tclf"
7 | authors = [
8 | { name="Markus Bilz", email="github@markusbilz.com" },
9 | ]
10 | description = "Classify trades using trade classification algorithms 🐍"
11 | readme = "README.md"
12 | license = {file = "LICENSE.txt"}
13 | requires-python = ">=3.9"
14 | classifiers = [
15 | "Development Status :: 3 - Alpha",
16 | "Programming Language :: Python :: 3 :: Only",
17 | "Programming Language :: Python :: 3.9",
18 | "Programming Language :: Python :: 3.10",
19 | "Programming Language :: Python :: 3.11",
20 | "Programming Language :: Python :: 3.12",
21 | "Programming Language :: Python :: 3.13",
22 | "Programming Language :: Python :: 3.14",
23 | "Operating System :: OS Independent",
24 | "Intended Audience :: Information Technology",
25 | "Intended Audience :: Science/Research",
26 | "Topic :: Scientific/Engineering",
27 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
28 | ]
29 |
30 | dependencies = [
31 | "numpy >1.24.4,<2.0.0",
32 | "pandas",
33 | "scikit-learn",
34 | ]
35 |
36 | dynamic = ["version"]
37 |
38 | [project.optional-dependencies]
39 | dev = [
40 | # doc
41 | "mkdocs",
42 | "mkdocs-material",
43 | "mkdocstrings-python",
44 | # build
45 | "build",
46 | # test
47 | "pytest",
48 | "pytest-cov",
49 | # linting",
50 | "pre-commit",
51 | "mypy",
52 | "ruff",
53 | "tox-uv>=1.13.1",
54 | "tox>=4.23.2",
55 | "pytest-codspeed",
56 | "mkdocs-bibtex",
57 | "commitizen",
58 | "pypandoc-binary"
59 | ]
60 |
61 | [tool.setuptools.dynamic]
62 | version = {file = "version"}
63 |
64 | [project.urls]
65 | "Homepage" = "https://github.com/KarelZe/tclf"
66 | "Bug Tracker" = "https://github.com/KarelZe/tclf/issues"
67 |
68 | [tool.mypy]
69 | # https://github.com/python/mypy/issues/2410
70 | ignore_missing_imports = true
71 | disallow_untyped_defs = true
72 | disallow_untyped_calls = true
73 | disallow_incomplete_defs = true
74 |
75 | [tool.pytest.ini_options]
76 | minversion = 7.0
77 | addopts = "-ra -p no:warnings -v --cov --cov-report term-missing --doctest-modules"
78 | pythonpath = ["src"]
79 | testpaths = ["tests"]
80 |
81 | [tool.coverage.run]
82 | omit = [
83 | "debug_*.py",
84 | "tclf/tests/*",
85 | ]
86 | branch = true
87 |
88 | [tool.coverage.report]
89 | exclude_also = [
90 | "def __repr__",
91 | "if self\\.debug",
92 | "raise AssertionError",
93 | "raise NotImplementedError",
94 | "if 0:",
95 | "if __name__ == .__main__.:",
96 | "@(abc\\.)?abstractmethod",
97 | "if self.verbose:"
98 | ]
99 | show_missing = true
100 |
101 | [tool.commitizen]
102 | name = "cz_conventional_commits"
103 | version = "0.3.0"
104 | tag_format = "$version"
105 | version_files = [
106 | "docs/index.md:version",
107 | "pyproject.toml:version",
108 | "version",
109 | ]
110 | bump_message = "bump: version $current_version → $new_version"
111 | update_changelog_on_bump = true
112 | annotated_tag = true
113 |
114 |
115 | [tool.ruff]
116 |
117 | [tool.ruff.lint]
118 |
119 | # See rules: https://beta.ruff.rs/docs/rules/
120 | select = [
121 | "C", # flake8-comprehensions
122 | "D", # pydocstyle
123 | "DOC", # pydoclint
124 | "E", # pycodestyle errors
125 | "F", # pyflakes
126 | "FURB", # refurb
127 | "I", # isort
128 | "N", # pep8-naming
129 | "NPY", # numpy
130 | "PD", # pandas-vet
131 | "PIE", # misc lints
132 | "PT", # pytest
133 | "PTH", # flake8-use-pathlib
134 | "PGH", # pygrep
135 | "RET", # return
136 | "RUF", # ruff-specific rules
137 | "UP", # pyupgrade
138 | "SIM", # flake8-simplify
139 | "W", # pycodestyle warnings
140 | ]
141 |
142 | ignore = [
143 | "E501", # line too long, handled by black
144 | "N803", # argument name should be lowercase
145 | "N806", # variable name should be lowercase
146 | "C901", # too complex
147 | "D206", # indent with white space
148 | "W191", # tab identation
149 | ]
150 |
151 | preview = true
152 |
153 | [tool.ruff.format]
154 | preview = true
155 |
156 | [tool.ruff.lint.isort]
157 | known-first-party = ["tclf"]
158 | section-order = ["future", "standard-library", "third-party", "first-party", "local-folder"]
159 |
160 | [tool.ruff.lint.per-file-ignores]
161 | "__init__.py" = ["D104", "F401"] # disable missing docstrings in __init__, unused imports
162 |
163 | [tool.ruff.lint.pydocstyle]
164 | convention = "google"
165 |
166 | [tool.tox]
167 | env_list = ["format", "lint", "pre-commit"]
168 | min_version = "4.23"
169 |
170 | [tool.tox.env_run_base]
171 | runner = "uv-venv-lock-runner"
172 | allowlist_externals = ["/bin/sh"]
173 | skip_install = true
174 | with_dev = true
175 |
176 | [tool.tox.env.clean]
177 | description = "cleanup tasks (remove build artifacts and cache)"
178 | commands = [
179 | ["coverage", "erase"],
180 | ["sh", "-c", "rm -rf .mypy_cache .pytest_cache .ruff_cache dist mlruns reports"]
181 | ]
182 |
183 | [tool.tox.env.doc]
184 | description = "writing documentation using mkdocs"
185 | commands = [
186 | ["mkdocs", "serve"]
187 | ]
188 |
189 |
190 | [tool.tox.env.format]
191 | description = "code formatting using ruff"
192 | commands = [
193 | ["ruff", "format", { replace = "posargs", default = ["src", "tests"], extend = true }]
194 | ]
195 |
196 | [tool.tox.env.lint]
197 | description = "linting and syntax checks"
198 | commands = [
199 | ["ruff", "check", { replace = "posargs", default = ["src", "tests"], extend = true} ],
200 | ["ruff", "format", "--check", { replace = "posargs", default = ["src", "tests"], extend = true} ],
201 | ["mypy", { replace = "posargs", default = ["src"], extend = true} ]
202 | ]
203 |
204 | [tool.tox.env.pre-commit]
205 | description = "pre-commit hooks"
206 | commands = [["pre-commit", "run", "--all-files", "--show-diff-on-failure"]]
207 |
208 | [tool.tox.env.test]
209 | description = "tests"
210 |
211 | extras = [
212 | "dev"
213 | ]
214 | commands = [["pytest", "-v", "tests", "--cov=src", "--cov-report=xml"]]
215 |
216 | [tool.tox.env.build]
217 | description = "build the project"
218 | commands = [["uv", "build"]]
219 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Trade Classification With Python
2 |
3 | [](https://github.com/KarelZe/tclf/actions)
4 | [](https://codecov.io/gh/KarelZe/tclf/tree/main/graph)
5 | [](https://sonarcloud.io/summary/new_code?id=KarelZe_tclf)
6 |
7 | 
8 |
9 | **Documentation ✒️:** [https://karelze.github.io/tclf/](https://karelze.github.io/tclf/)
10 |
11 | **Source Code 🐍:** [https://github.com/KarelZe/tclf](https://github.com/KarelZe/tclf)
12 |
13 | `tclf` is a [`scikit-learn`](https://scikit-learn.org/stable/)-compatible implementation of trade classification algorithms to classify financial markets transactions into buyer- and seller-initiated trades.
14 |
15 | The key features are:
16 |
17 | * **Easy**: Easy to use and learn.
18 | * **Sklearn-compatible**: Compatible to the sklearn API. Use sklearn metrics and visualizations.
19 | * **Feature complete**: Wide range of supported algorithms. Use the algorithms individually or stack them like LEGO blocks.
20 |
21 | ## Installation
22 |
23 | **pip**
24 | ```console
25 | pip install tclf
26 | ```
27 |
28 | **[uv⚡](https://github.com/astral-sh/uv)**
29 | ```console
30 | uv add tclf
31 | ```
32 |
33 | ## Supported Algorithms
34 |
35 | - (Rev.) CLNV rule[^1]
36 | - (Rev.) EMO rule[^2]
37 | - (Rev.) LR algorithm[^6]
38 | - (Rev.) Tick test[^5]
39 | - Depth rule[^3]
40 | - Quote rule[^4]
41 | - Tradesize rule[^3]
42 |
43 | For a primer on trade classification rules visit the [rules section 🆕](https://karelze.github.io/tclf/rules/) in our docs.
44 |
45 | ## Minimal Example
46 |
47 | Let's start simple: classify all trades by the quote rule and all other trades, which cannot be classified by the quote rule, randomly.
48 |
49 | Create a `main.py` with:
50 | ```python title="main.py"
51 | import numpy as np
52 | import pandas as pd
53 |
54 | from tclf.classical_classifier import ClassicalClassifier
55 |
56 | X = pd.DataFrame(
57 | [
58 | [1.5, 1, 3],
59 | [2.5, 1, 3],
60 | [1.5, 3, 1],
61 | [2.5, 3, 1],
62 | [1, np.nan, 1],
63 | [3, np.nan, np.nan],
64 | ],
65 | columns=["trade_price", "bid_ex", "ask_ex"],
66 | )
67 |
68 | clf = ClassicalClassifier(layers=[("quote", "ex")], strategy="random")
69 | clf.fit(X)
70 | probs = clf.predict_proba(X)
71 | ```
72 | Run your script with
73 | ```console
74 | $ python main.py
75 | ```
76 | In this example, input data is available as a pd.DataFrame with columns conforming to our [naming conventions](https://karelze.github.io/tclf/naming_conventions/).
77 |
78 | The parameter `layers=[("quote", "ex")]` sets the quote rule at the exchange level and `strategy="random"` specifies the fallback strategy for unclassified trades.
79 |
80 | ## Advanced Example
81 | Often it is desirable to classify both on exchange level data and nbbo data. Also, data might only be available as a numpy array. So let's extend the previous example by classifying using the quote rule at exchange level, then at nbbo and all other trades randomly.
82 |
83 | ```python title="main.py" hl_lines="6 16 17 20"
84 | import numpy as np
85 | from sklearn.metrics import accuracy_score
86 |
87 | from tclf.classical_classifier import ClassicalClassifier
88 |
89 | X = np.array(
90 | [
91 | [1.5, 1, 3, 2, 2.5],
92 | [2.5, 1, 3, 1, 3],
93 | [1.5, 3, 1, 1, 3],
94 | [2.5, 3, 1, 1, 3],
95 | [1, np.nan, 1, 1, 3],
96 | [3, np.nan, np.nan, 1, 3],
97 | ]
98 | )
99 | y_true = np.array([-1, 1, 1, -1, -1, 1])
100 | features = ["trade_price", "bid_ex", "ask_ex", "bid_best", "ask_best"]
101 |
102 | clf = ClassicalClassifier(
103 | layers=[("quote", "ex"), ("quote", "best")], strategy="random", features=features
104 | )
105 | clf.fit(X)
106 | acc = accuracy_score(y_true, clf.predict(X))
107 | ```
108 | In this example, input data is available as np.arrays with both exchange (`"ex"`) and nbbo data (`"best"`). We set the layers parameter to `layers=[("quote", "ex"), ("quote", "best")]` to classify trades first on subset `"ex"` and remaining trades on subset `"best"`. Additionally, we have to set `ClassicalClassifier(..., features=features)` to pass column information to the classifier.
109 |
110 | Like before, column/feature names must follow our [naming conventions](https://karelze.github.io/tclf/naming_conventions/).
111 |
112 | ## Other Examples
113 |
114 | For more practical examples, see our [examples section](https://karelze.github.io/tclf/option_trade_classification).
115 |
116 | ## Development
117 |
118 | We are using [`tox`](https://tox.wiki/en/latest/user_guide.html) with [`uv`](https://docs.astral.sh/uv/) for development.
119 |
120 | ```bash
121 | tox -e lint
122 | tox -e format
123 | tox -e test
124 | tox -e build
125 | ```
126 |
127 | ## Citation
128 |
129 | If you are using the package in publications, please cite as:
130 |
131 | ```latex
132 | @software{bilz_tclf_2023,
133 | author = {Bilz, Markus},
134 | license = {BSD 3},
135 | month = nov,
136 | title = {{tclf} -- trade classification with python},
137 | url = {https://github.com/KarelZe/tclf},
138 | version = {0.0.1},
139 | year = {2023}
140 | }
141 | ```
142 |
143 | ## Footnotes
144 |
145 | [^1]: Chakrabarty, B., Li, B., Nguyen, V., & Van Ness, R. A. (2007). Trade classification algorithms for electronic communications network trades.
Journal of Banking & Finance,
31(12), 3806–3821.
https://doi.org/10.1016/j.jbankfin.2007.03.003
146 |
147 | [^2]: Ellis, K., Michaely, R., & O’Hara, M. (2000). The accuracy of trade classification rules: Evidence from nasdaq.
The Journal of Financial and Quantitative Analysis,
35(4), 529–551.
https://doi.org/10.2307/2676254
148 |
149 | [^3]: Grauer, C., Schuster, P., & Uhrig-Homburg, M. (2023).
Option trade classification.
https://doi.org/10.2139/ssrn.4098475
150 |
151 | [^4]: Harris, L. (1989). A day-end transaction price anomaly.
The Journal of Financial and Quantitative Analysis,
24(1), 29.
https://doi.org/10.2307/2330746
152 |
153 | [^5]: Hasbrouck, J. (2009). Trading costs and returns for U.s. Equities: Estimating effective costs from daily data.
The Journal of Finance,
64(3), 1445–1477.
https://doi.org/10.1111/j.1540-6261.2009.01469.x
154 | [^6]: Lee, C., & Ready, M. J. (1991). Inferring trade direction from intraday data.
The Journal of Finance,
46(2), 733–746.
https://doi.org/10.1111/j.1540-6261.1991.tb02683.x
155 |
156 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Trade Classification With Python
2 |
3 | [](https://github.com/KarelZe/tclf/actions)
4 | [](https://codecov.io/gh/KarelZe/tclf/tree/main/graph)
5 | [](https://sonarcloud.io/summary/new_code?id=KarelZe_tclf)
6 |
7 | 
8 |
9 | **Documentation ✒️:** [https://karelze.github.io/tclf/](https://karelze.github.io/tclf/)
10 |
11 | **Source Code 🐍:** [https://github.com/KarelZe/tclf](https://github.com/KarelZe/tclf)
12 |
13 | `tclf` is a [`scikit-learn`](https://scikit-learn.org/stable/)-compatible implementation of trade classification algorithms to classify financial markets transactions into buyer- and seller-initiated trades.
14 |
15 | The key features are:
16 |
17 | * **Easy**: Easy to use and learn.
18 | * **Sklearn-compatible**: Compatible to the sklearn API. Use sklearn metrics and visualizations.
19 | * **Feature complete**: Wide range of supported algorithms. Use the algorithms individually or stack them like LEGO blocks.
20 |
21 | ## Installation
22 |
23 | **pip**
24 | ```console
25 | pip install tclf
26 | ```
27 |
28 | **[uv⚡](https://github.com/astral-sh/uv)**
29 | ```console
30 | uv add tclf
31 | ```
32 |
33 | ## Supported Algorithms
34 |
35 | - (Rev.) CLNV rule[^1]
36 | - (Rev.) EMO rule[^2]
37 | - (Rev.) LR algorithm[^6]
38 | - (Rev.) Tick test[^5]
39 | - Depth rule[^3]
40 | - Quote rule[^4]
41 | - Tradesize rule[^3]
42 |
43 | For a primer on trade classification rules visit the [rules section 🆕](https://karelze.github.io/tclf/rules/) in our docs.
44 |
45 | ## Minimal Example
46 |
47 | Let's start simple: classify all trades by the quote rule and all other trades, which cannot be classified by the quote rule, randomly.
48 |
49 | Create a `main.py` with:
50 | ```python title="main.py"
51 | import numpy as np
52 | import pandas as pd
53 |
54 | from tclf.classical_classifier import ClassicalClassifier
55 |
56 | X = pd.DataFrame(
57 | [
58 | [1.5, 1, 3],
59 | [2.5, 1, 3],
60 | [1.5, 3, 1],
61 | [2.5, 3, 1],
62 | [1, np.nan, 1],
63 | [3, np.nan, np.nan],
64 | ],
65 | columns=["trade_price", "bid_ex", "ask_ex"],
66 | )
67 |
68 | clf = ClassicalClassifier(layers=[("quote", "ex")], strategy="random")
69 | clf.fit(X)
70 | probs = clf.predict_proba(X)
71 | ```
72 | Run your script with
73 | ```console
74 | $ python main.py
75 | ```
76 | In this example, input data is available as a pd.DataFrame with columns conforming to our [naming conventions](https://karelze.github.io/tclf/naming_conventions/).
77 |
78 | The parameter `layers=[("quote", "ex")]` sets the quote rule at the exchange level and `strategy="random"` specifies the fallback strategy for unclassified trades.
79 |
80 | ## Advanced Example
81 | Often it is desirable to classify both on exchange level data and nbbo data. Also, data might only be available as a numpy array. So let's extend the previous example by classifying using the quote rule at exchange level, then at nbbo and all other trades randomly.
82 |
83 | ```python title="main.py" hl_lines="6 16 17 20"
84 | import numpy as np
85 | from sklearn.metrics import accuracy_score
86 |
87 | from tclf.classical_classifier import ClassicalClassifier
88 |
89 | X = np.array(
90 | [
91 | [1.5, 1, 3, 2, 2.5],
92 | [2.5, 1, 3, 1, 3],
93 | [1.5, 3, 1, 1, 3],
94 | [2.5, 3, 1, 1, 3],
95 | [1, np.nan, 1, 1, 3],
96 | [3, np.nan, np.nan, 1, 3],
97 | ]
98 | )
99 | y_true = np.array([-1, 1, 1, -1, -1, 1])
100 | features = ["trade_price", "bid_ex", "ask_ex", "bid_best", "ask_best"]
101 |
102 | clf = ClassicalClassifier(
103 | layers=[("quote", "ex"), ("quote", "best")], strategy="random", features=features
104 | )
105 | clf.fit(X)
106 | acc = accuracy_score(y_true, clf.predict(X))
107 | ```
108 | In this example, input data is available as np.arrays with both exchange (`"ex"`) and nbbo data (`"best"`). We set the layers parameter to `layers=[("quote", "ex"), ("quote", "best")]` to classify trades first on subset `"ex"` and remaining trades on subset `"best"`. Additionally, we have to set `ClassicalClassifier(..., features=features)` to pass column information to the classifier.
109 |
110 | Like before, column/feature names must follow our [naming conventions](https://karelze.github.io/tclf/naming_conventions/).
111 |
112 | ## Other Examples
113 |
114 | For more practical examples, see our [examples section](https://karelze.github.io/tclf/option_trade_classification).
115 |
116 | ## Development
117 |
118 | We are using [`tox`](https://tox.wiki/en/latest/user_guide.html) with [`uv`](https://docs.astral.sh/uv/) for development.
119 |
120 | ```bash
121 | tox -e lint
122 | tox -e format
123 | tox -e test
124 | tox -e build
125 | ```
126 |
127 | ## Citation
128 |
129 | If you are using the package in publications, please cite as:
130 |
131 | ```latex
132 | @software{bilz_tclf_2023,
133 | author = {Bilz, Markus},
134 | license = {BSD 3},
135 | month = nov,
136 | title = {{tclf} -- trade classification with python},
137 | url = {https://github.com/KarelZe/tclf},
138 | version = {0.0.1},
139 | year = {2023}
140 | }
141 | ```
142 |
143 | ## Footnotes
144 |
145 | [^1]: Chakrabarty, B., Li, B., Nguyen, V., & Van Ness, R. A. (2007). Trade classification algorithms for electronic communications network trades.
Journal of Banking & Finance,
31(12), 3806–3821.
https://doi.org/10.1016/j.jbankfin.2007.03.003
146 |
147 | [^2]: Ellis, K., Michaely, R., & O’Hara, M. (2000). The accuracy of trade classification rules: Evidence from nasdaq.
The Journal of Financial and Quantitative Analysis,
35(4), 529–551.
https://doi.org/10.2307/2676254
148 |
149 | [^3]: Grauer, C., Schuster, P., & Uhrig-Homburg, M. (2023).
Option trade classification.
https://doi.org/10.2139/ssrn.4098475
150 |
151 | [^4]: Harris, L. (1989). A day-end transaction price anomaly.
The Journal of Financial and Quantitative Analysis,
24(1), 29.
https://doi.org/10.2307/2330746
152 |
153 | [^5]: Hasbrouck, J. (2009). Trading costs and returns for U.s. Equities: Estimating effective costs from daily data.
The Journal of Finance,
64(3), 1445–1477.
https://doi.org/10.1111/j.1540-6261.2009.01469.x
154 | [^6]: Lee, C., & Ready, M. J. (1991). Inferring trade direction from intraday data.
The Journal of Finance,
46(2), 733–746.
https://doi.org/10.1111/j.1540-6261.1991.tb02683.x
155 |
156 |
--------------------------------------------------------------------------------
/docs/rules.md:
--------------------------------------------------------------------------------
1 | ---
2 | comments: true
3 | ---
4 |
5 | # A Primer on Trade Classification Rules
6 |
7 | The goal of trade classification algorithms is to identify the *initiator of a trade*. While definitions for the trade initiator differ in literature [cp. @leeInferringInvestorBehavior2000, pp. 94--97; @odders-whiteOccurrenceConsequencesInaccurate2000, p. 262] the trade initiator is binary and either the buyer or the seller.
8 |
9 | As the trade initiator is frequently absent in datasets, it must be inferred using trade classification algorithms or other approaches. This article introduces basic rules for trade classification.
10 |
11 | ## Notation
12 | We denote the predicted class by $y \in \mathcal{Y}$ with $\mathcal{Y}=\{-1,1\}$, whereby $y=-1$ is indicating a seller-initiated and $y=1$ a buyer-initiated trade. We denote the sequence of trade prices of the $i$-th security by $(P_{i,t})_{t=1}^{T}$ and the corresponding ask at $t$ by $A_{i,t}$ and bid by $B_{i,t}$. The midpoint of the bid-ask spread is denoted by $M_{i,t} = \tfrac{1}{2}(B_{i,t} + A_{i,t})$. Moreover, we denote the quoted size at the ask with $\tilde{A}_{i,t}$, $\tilde{B}_{i,t}$ of the bid, and $P_{i,t}$ the trade price at $t$ of the $i$-th security.
13 |
14 | For simplicity we assume an ideal data regime, where quote data is complete and spreads are positive.🥵
15 |
16 | ## Basic Rules
17 |
18 | This section presents basic classification rules, that may be used for trade classification independently or integrated into a [hybrid algorithm](#hybrid-rules).
19 |
20 | ### Quote Rule
21 |
22 | The quote rule classifies a trade by comparing the trade price against the corresponding quotes at the time of the trade. If the trade price is above the midpoint of the bid-ask spread, $M_{i,t}$, the trade is classified as a buy and if it is below the midpoint, as a sell [@harrisDayEndTransactionPrice1989, p. 41].
23 |
24 | Thus, the classification rule on $\mathcal{A} = \left\{(i, t) \in \mathbb{N}^2: P_{i,t} \neq M_{i,t}\right\}$ is given by:
25 |
26 | $$
27 | \operatorname{quote}\colon \mathcal{A} \to \mathcal{Y},\quad
28 | \operatorname{quote}(i, t)=
29 | \begin{cases}
30 | 1, & \mathrm{if}\ P_{i, t}>M_{i, t} \\
31 | -1, & \mathrm{if}\ P_{i, t}P_{i, t-1} \\
55 | -1, & \mathrm{if}\ P_{i, t} < P_{i, t-1} \\
56 | Y\sim\mathrm{Uniform}(\mathcal{Y}), & \mathrm{if}\ t=1 \\
57 | \operatorname{tick}(i, t-1), & \mathrm{else}.
58 | \end{cases}
59 | $$
60 |
61 | Considering the cases the trade price is higher than the previous price (uptick) the trade is classified as a buy. Reversely, if it is below the previous price (downtick), the trade is classified as a sell. If the price change is zero (zero tick), the signing uses the last price different from the current price [@leeInferringTradeDirection1991,p. 735]. To end recursion at $t=1$, we sign the trades randomly as buyer- or seller-initiated to simplify notation 🤓.
62 |
63 | The tick rule can sign all trades as long as a last differing trade price exists, but the overall precision can be impacted by infrequent trading.
64 |
65 | **Code**
66 | ```python
67 | from tclf.classical_classifier import ClassicalClassifier
68 | clf = ClassicalClassifier(layers=[("tick", "subset")], strategy="random")
69 | clf.fit(X)
70 | ```
71 |
72 | ### Reverse Tick Test
73 |
74 | The reverse tick test is a variant of the tick test proposed in [@hasbrouckTradesQuotesInventories1988, p.241]. It is similar to the tick rule but classifies based on the next, distinguishable trade price.
75 |
76 | $$
77 | \operatorname{rtick} \colon \mathbb{N}^2 \to \mathcal{Y},\quad
78 | \operatorname{rtick}(i, t)=
79 | \begin{cases}
80 | 1, & \mathrm{if}\ P_{i, t} > P_{i, t+1} \\
81 | -1, & \mathrm{if}\ P_{i, t} < P_{i, t+1} \\
82 | Y\sim\mathrm{Uniform}(\mathcal{Y}), & \mathrm{if}\ t+1=T \\
83 | \operatorname{rtick}(i, t+1), & \mathrm{else}
84 | \end{cases}
85 | $$
86 |
87 | As denoted in the equation, the trade is classified as seller-initiated, if the next trade is on an uptick or a zero uptick, and classified as buyer-initiated for trades at a downtick or a zero downtick [@leeInferringTradeDirection1991, pp. 735--736].
88 |
89 | **Code**
90 | ```python
91 | from tclf.classical_classifier import ClassicalClassifier
92 | clf = ClassicalClassifier(layers=[("rev_tick", "subset")], strategy="random")
93 | clf.fit(X)
94 | ```
95 |
96 |
97 | ### Depth Rule
98 |
99 | The depth rule gauges the trade initiator from the quoted size at the best bid and ask. Based on the observation that an exceeding bid or ask size relates to higher liquidity at one trade side, trades are classified as a buy (sell) for a larger ask (bid) size [@grauerOptionTradeClassification2022, pp. 14--15].
100 | We set the domain as $\mathcal{A} = \left\{(i, t) \in \mathbb{N}^2: P_{i,t} = M_{i,t} \land \tilde{A}_{i,t} \neq \tilde{B}_{i,t} \right\}$. The depth rule is now calculated as:
101 |
102 | $$
103 | \operatorname{depth} \colon \mathcal{A} \to \mathcal{Y},\quad
104 | \operatorname{depth}(i, t)=
105 | \begin{cases}
106 | 1, & \mathrm{if}\ \tilde{A}_{i,t} > \tilde{B}_{i,t}. \\
107 | -1, & \mathrm{if}\ \tilde{A}_{i,t} < \tilde{B}_{i,t}\\
108 | \end{cases}
109 | $$
110 |
111 | The depth rule classifies midspread trades only, if the ask size is different from the bid size, as the ratio between the ask and bid size is the sole criterion for inferring the trade's initiator. Due to these restrictive conditions in $\mathcal{A}$, the depth rule can sign only a fraction of all trades and must be best followed by other rules.
112 |
113 | **Code**
114 | ```python
115 | from tclf.classical_classifier import ClassicalClassifier
116 | clf = ClassicalClassifier(layers=[("depth", "subset")])
117 | clf.fit(X)
118 | ```
119 |
120 | ### Trade Size Rule
121 |
122 | The trade size rule classifies based on a match between the size of the trade $\tilde{P}_{i, t}$ and the quoted bid and ask sizes. The rationale is, that the market maker tries to fill the limit order of a customer, which results in the trade being executed at the contemporaneous bid or ask, with a trade size equaling the quoted size [@grauerOptionTradeClassification2022]. The trade size rule is defined on $\mathcal{A} = \left\{(i, t) \in \mathbb{N}^2: \tilde{P}_{i,t} = \tilde{A}_{i,t} \neq \tilde{B}_{i,t} \lor \tilde{P}_{i,t} \neq\tilde{A}_{i,t} = \tilde{B}_{i,t} \right\}$ as:
123 |
124 | $$
125 | \operatorname{tsize} \colon \mathcal{A} \to \mathcal{Y},\quad
126 | \operatorname{tsize}(i, t)=
127 | \begin{cases}
128 | 1, & \mathrm{if}\ \tilde{P}_{i, t} = \tilde{B}_{i, t} \neq \tilde{A}_{i, t} \\
129 | -1, & \mathrm{if}\ \tilde{P}_{i, t} = \tilde{A}_{i, t} \neq \tilde{B}_{i, t}. \\
130 | \end{cases}
131 | $$
132 |
133 | When both the size of the ask and bid correspond with the trade size or the trade size does not match the quoted sizes, the result is ambiguous.
134 |
135 | **Code**
136 | ```python
137 | from tclf.classical_classifier import ClassicalClassifier
138 | clf = ClassicalClassifier(layers=[("trade_size", "subset")])
139 | clf.fit(X)
140 | ```
141 |
142 | ## Hybrid Rules
143 |
144 | The basic trade classification rules from [basic rules](#basic-rules) can be combined into a hybrid algorithm to enforce universal applicability to all trades and improve the classification performance.
145 |
146 | Popular variants include the [LR algorithm](#lee-and-ready-algorithm), the [EMO rule](#ellis-michaely-ohara-rule), and the [CLNV method](#chakrabarty-li-nguyen-van-ness-method). All three algorithms utilize the quote and tick rule to a varying extent. Basic rules are selected based on the proximity of the trade price to the quotes.
147 |
148 | As put forth by Grauer et al. [-@grauerOptionTradeClassification2022], basic or hybrid rules can be combined through stacking. This approach generalizes the aforementioned algorithms, as the applied rule is no longer dependent on the proximity to the quotes, but rather on the classifiability of the trade with the primary rules given by the domains and their ordering.
149 |
150 | ### Lee and Ready Algorithm
151 |
152 | The LR algorithm [@leeInferringTradeDirection1991, p. 745] combines the (reverse) tick test and quote rule into a single rule, which is derived from two observations. First, Lee and Ready [-@leeInferringTradeDirection1991, pp. 735-745] observe a higher precision of the quote rule over the tick rule, which makes it their preferred choice. Second, by the means of a simple model, the authors demonstrate that the tick test can correctly classify on average 85.4 % of all midspread trades if the model's assumptions of constant quotes between trades and the arrival of the market and standing orders following a Poisson process are met. Outside the model's tight assumptions, the expected accuracy of the tick test can be unmet.
153 |
154 | In combination, the algorithm primarily signs trades according to the quote rule. Trades at the midpoint of the spread, unclassifiable by the quote rule, are classified by the tick test. Overall:
155 |
156 | $$
157 | \operatorname{lr} \colon \mathbb{N}^2 \to \mathcal{Y},\quad\operatorname{lr}(i,t)=
158 | \begin{cases}
159 | 1, & \mathrm{if}\ P_{i, t} > M_{i, t} \\
160 | -1, & \mathrm{if}\ P_{i, t} < M_{i, t} \\
161 | \operatorname{tick}(i, t), & \mathrm{else}.
162 | \end{cases}
163 | $$
164 |
165 |
166 | **Code**
167 | ```python
168 | from tclf.classical_classifier import ClassicalClassifier
169 | clf = ClassicalClassifier(layers=[("lr", "subset")])
170 | clf.fit(X)
171 | ```
172 |
173 | ### Ellis-Michaely-O'Hara Rule
174 |
175 | Ellis et al. [-@ellisAccuracyTradeClassification2000, pp. 535--536] examine the performance of the previous algorithms for stocks traded at NASDAQ. By analyzing miss-classified trades with regard to the proximity of the trade to the quotes, they observe, that the quote rule and by extension, the [LR algorithm](#lee-and-ready-algorithm), perform particularly well at classifying trades executed at the bid and the ask price but trail the performance of the tick rule for trades inside or outside the spread [@ellisAccuracyTradeClassification2000, pp. 535--536]. The authors combine these observations into a single rule, known as the EMO algorithm.
176 |
177 | The EMO algorithm extends the tick rule by classifying trades at the quotes using the quote rule, and all other trades with the tick test. Formally, the classification rule is given by:
178 |
179 | $$
180 | \operatorname{emo} \colon \mathbb{N}^2 \to \mathcal{Y}, \quad
181 | \operatorname{emo}(i, t)=
182 | \begin{cases}
183 | 1, & \mathrm{if}\ P_{i, t} = A_{i, t} \\
184 | -1, & \mathrm{if}\ P_{i, t} = B_{i, t} \\
185 | \operatorname{tick}(i, t), & \mathrm{else}.
186 | \end{cases}
187 | $$
188 |
189 | The EMO algorithm embeds both the quote and tick rule. As trades off the quotes are classified by the tick rule, the algorithm's overall success rate is dominated by the tick test assuming most trades are off-the-quotes.
190 |
191 | **Code**
192 | ```python
193 | from tclf.classical_classifier import ClassicalClassifier
194 | clf = ClassicalClassifier(layers=[("emo", "subset")])
195 | clf.fit(X)
196 | ```
197 |
198 | ### Chakrabarty-Li-Nguyen-Van-Ness Method
199 |
200 | Like the previous two algorithms, the CLNV method [@chakrabartyTradeClassificationAlgorithms2007, pp. 3811-3812] is a hybrid of the quote and tick rule and extends the EMO rule by a differentiated treatment of trades inside the quotes, which are notoriously hard to classify. The authors segment the bid-ask spread into deciles (ten equal-width bins) and classify trades around the midpoint (fourth to seventh decile) by the tick rule and trades close or outside the quotes are categorized by the tick rule.
201 |
202 | $$
203 | \operatorname{clnv} \colon \mathbb{N}^2 \to \mathcal{Y}, \quad
204 | \operatorname{clnv}(i, t)=
205 | \begin{cases}
206 | 1, & \mathrm{if}\ P_{i, t} \in \left(\frac{3}{10} B_{i,t} + \frac{7}{10} A_{i,t}, A_{i, t}\right] \\
207 | -1, & \mathrm{if}\ P_{i, t} \in \left[ B_{i,t}, \frac{7}{10} B_{i,t} + \frac{3}{10} A_{i,t}\right) \\
208 | \operatorname{tick}(i, t), & \mathrm{else}
209 | \end{cases}
210 | $$
211 |
212 | It is derived from a performance comparison of the tick rule ([EMO rule](#ellis-michaely-ohara-rule)) against the quote rule ([LR algorithm](#lee-and-ready-algorithm)) on stock data, whereby the accuracy was assessed separately for each decile.
213 |
214 | **Code**
215 | ```python
216 | from tclf.classical_classifier import ClassicalClassifier
217 | clf = ClassicalClassifier(layers=[("clnv", "subset")])
218 | clf.fit(X)
219 | ```
220 |
221 | ### Stacked Rule
222 |
223 | The previous algorithms are static concerning the used base rules and their alignment. Combining arbitrary rules into a single algorithm requires a generic procedure. Grauer et al.[-@grauerOptionTradeClassification2022, p. 15] combine basic and hybrid rules through stacking. In this setting, the trade traverses a stack of pre-defined rules until a rule can classify the trade or the end of the stack is reached. The classification is now dependent on the employed rules but also on their relative ordering.
224 |
225 | The most basic application is in the [LR algorithm](#lee-and-ready-algorithm), combining $\operatorname{quote}$ and $\operatorname{tick}$. For a more complex example consider the hybrid rule consisting of $\operatorname{tsize}_{\mathrm{ex}}$, $\operatorname{quote}_{\mathrm{nbbo}}$, $\operatorname{quote}_{\mathrm{ex}}$, $\operatorname{depth}_{\mathrm{nbbo}}$, $\operatorname{depth}_{\mathrm{ex}}$ and $\operatorname{rtick}_{\mathrm{all}}$ popularized in Grauer et al. [-@grauerOptionTradeClassification2022, p. 15].
226 |
227 | In practice, rules may be ordered greedily and new rules added if there are unclassified trades.
228 |
229 | **Code**
230 | ```python
231 | from tclf.classical_classifier import ClassicalClassifier
232 |
233 | layers = [
234 | ("trade_size", "ex"),
235 | ("quote", "best"),
236 | ("quote", "ex"),
237 | ("depth", "best"),
238 | ("depth", "ex"),
239 | ("rev_tick", "all"),
240 | ]
241 | clf = ClassicalClassifier(layers=layers, strategy="random")
242 | clf.fit(X)
243 | ```
244 | ## Footnotes
245 | \bibliography
246 |
--------------------------------------------------------------------------------
/src/tclf/classical_classifier.py:
--------------------------------------------------------------------------------
1 | """Implements classical trade classification rules with a sklearn-like interface.
2 |
3 | Both simple rules like quote rule or tick test or hybrids are included.
4 | """
5 |
6 | from __future__ import annotations
7 |
8 | import re
9 | from typing import Literal, get_args
10 |
11 | import numpy as np
12 | import numpy.typing as npt
13 | import pandas as pd
14 | from sklearn.base import BaseEstimator, ClassifierMixin
15 | from sklearn.utils import check_random_state
16 | from sklearn.utils.validation import (
17 | _check_sample_weight,
18 | check_is_fitted,
19 | )
20 |
21 | from tclf.types import ArrayLike, MatrixLike
22 |
23 | ALLOWED_FUNC_LITERALS = Literal[
24 | "tick",
25 | "rev_tick",
26 | "quote",
27 | "lr",
28 | "rev_lr",
29 | "emo",
30 | "rev_emo",
31 | "clnv",
32 | "rev_clnv",
33 | "trade_size",
34 | "depth",
35 | "nan",
36 | ]
37 | ALLOWED_FUNC_STR: tuple[ALLOWED_FUNC_LITERALS, ...] = get_args(ALLOWED_FUNC_LITERALS)
38 |
39 |
40 | class ClassicalClassifier(ClassifierMixin, BaseEstimator):
41 | """ClassicalClassifier implements several trade classification rules.
42 |
43 | Including:
44 | Tick test,
45 | Reverse tick test,
46 | Quote rule,
47 | LR algorithm,
48 | EMO algorithm,
49 | CLNV algorithm,
50 | Trade size rule,
51 | Depth rule,
52 | and nan
53 |
54 | Args:
55 | classifier mixin (ClassifierMixin): mixin for classifier functionality, such as `predict_proba()`
56 | base estimator (BaseEstimator): base estimator for basic functionality, such as `transform()`
57 | """
58 |
59 | X_ = pd.DataFrame
60 |
61 | def __init__(
62 | self,
63 | layers: list[
64 | tuple[
65 | ALLOWED_FUNC_LITERALS,
66 | str,
67 | ]
68 | ]
69 | | None = None,
70 | *,
71 | features: list[str] | None = None,
72 | random_state: float | None = 42,
73 | strategy: Literal["random", "const"] = "random",
74 | ):
75 | """Initialize a ClassicalClassifier.
76 |
77 | Examples:
78 | >>> X = pd.DataFrame(
79 | ... [
80 | ... [1.5, 1, 3],
81 | ... [2.5, 1, 3],
82 | ... [1.5, 3, 1],
83 | ... [2.5, 3, 1],
84 | ... [1, np.nan, 1],
85 | ... [3, np.nan, np.nan],
86 | ... ],
87 | ... columns=["trade_price", "bid_ex", "ask_ex"],
88 | ... )
89 | >>> clf = ClassicalClassifier(layers=[("quote", "ex")], strategy="const")
90 | >>> clf.fit(X)
91 | ClassicalClassifier(layers=[('quote', 'ex')], strategy='const')
92 | >>> pred = clf.predict_proba(X)
93 |
94 | Args:
95 | layers (List[tuple[ALLOWED_FUNC_LITERALS, str]]): Layers of classical rule and subset name. Supported rules: "tick", "rev_tick", "quote", "lr", "rev_lr", "emo", "rev_emo", "trade_size", "depth", and "nan". Defaults to None, which results in classification by 'strategy' parameter.
96 | features (List[str] | None, optional): List of feature names in order of columns. Required to match columns in feature matrix with label. Can be `None`, if `pd.DataFrame` is passed. Defaults to None.
97 | random_state (float | None, optional): random seed. Defaults to 42.
98 | strategy (Literal["random", "const"], optional): Strategy to fill unclassfied. Randomly with uniform probability or with constant 0. Defaults to "random".
99 | """
100 | self.layers = layers
101 | self.random_state = random_state
102 | self.features = features
103 | self.strategy = strategy
104 |
105 | def _more_tags(self) -> dict[str, bool | dict[str, str]]:
106 | """Set tags for sklearn.
107 |
108 | See: https://scikit-learn.org/stable/developers/develop.html#estimator-tags
109 |
110 | Returns:
111 | dict[str, bool | dict[str, str]]: dict with tags
112 | """
113 | return {
114 | "allow_nan": True,
115 | "binary_only": True,
116 | "requires_y": False,
117 | "poor_score": True,
118 | "_xfail_checks": {
119 | "check_classifiers_classes": "Disabled due to partly random classification.",
120 | "check_classifiers_train": "No check, as unsupervised classifier.",
121 | "check_classifiers_one_label": "Disabled due to partly random classification.",
122 | "check_methods_subset_invariance": "No check, as unsupervised classifier.",
123 | "check_methods_sample_order_invariance": "No check, as unsupervised classifier.",
124 | "check_supervised_y_no_nan": "No check, as unsupervised classifier.",
125 | "check_supervised_y_2d": "No check, as unsupervised classifier.",
126 | "check_classifiers_regression_target": "No check, as unsupervised classifier.",
127 | },
128 | }
129 |
130 | def _tick(self, subset: str) -> npt.NDArray:
131 | """Classify a trade as a buy (sell) if its trade price is above (below) the closest different price of a previous trade.
132 |
133 | Args:
134 | subset (str): subset i.e., 'all' or 'ex'.
135 |
136 | Returns:
137 | npt.NDArray: result of tick rule. Can be np.NaN.
138 | """
139 | return np.where(
140 | self.X_["trade_price"] > self.X_[f"price_{subset}_lag"],
141 | 1,
142 | np.where(
143 | self.X_["trade_price"] < self.X_[f"price_{subset}_lag"], -1, np.nan
144 | ),
145 | )
146 |
147 | def _rev_tick(self, subset: str) -> npt.NDArray:
148 | """Classify a trade as a sell (buy) if its trade price is below (above) the closest different price of a subsequent trade.
149 |
150 | Args:
151 | subset (str): subset i.e.,'all' or 'ex'.
152 |
153 | Returns:
154 | npt.NDArray: result of reverse tick rule. Can be np.NaN.
155 | """
156 | return np.where(
157 | self.X_[f"price_{subset}_lead"] > self.X_["trade_price"],
158 | -1,
159 | np.where(
160 | self.X_[f"price_{subset}_lead"] < self.X_["trade_price"], 1, np.nan
161 | ),
162 | )
163 |
164 | def _quote(self, subset: str) -> npt.NDArray:
165 | """Classify a trade as a buy (sell) if its trade price is above (below) the midpoint of the bid and ask spread. Trades executed at the midspread are not classified.
166 |
167 | Args:
168 | subset (str): subset i.e., 'ex' or 'best'.
169 |
170 | Returns:
171 | npt.NDArray: result of quote rule. Can be np.NaN.
172 | """
173 | mid = self._mid(subset)
174 |
175 | return np.where(
176 | self.X_["trade_price"] > mid,
177 | 1,
178 | np.where(self.X_["trade_price"] < mid, -1, np.nan),
179 | )
180 |
181 | def _lr(self, subset: str) -> npt.NDArray:
182 | """Classify a trade as a buy (sell) if its price is above (below) the midpoint (quote rule), and use the tick test to classify midspread trades.
183 |
184 | Adapted from Lee and Ready (1991).
185 |
186 | Args:
187 | subset (str): subset i.e., 'ex' or 'best'.
188 |
189 | Returns:
190 | npt.ndarray: result of the lee and ready algorithm with tick rule.
191 | Can be np.NaN.
192 | """
193 | q_r = self._quote(subset)
194 | return np.where(~np.isnan(q_r), q_r, self._tick(subset))
195 |
196 | def _rev_lr(self, subset: str) -> npt.NDArray:
197 | """Classify a trade as a buy (sell) if its price is above (below) the midpoint (quote rule), and use the reverse tick test to classify midspread trades.
198 |
199 | Adapted from Lee and Ready (1991).
200 |
201 | Args:
202 | subset (str): subset i.e.,'ex' or 'best'.
203 |
204 | Returns:
205 | npt.NDArray: result of the lee and ready algorithm with reverse tick
206 | rule. Can be np.NaN.
207 | """
208 | q_r = self._quote(subset)
209 | return np.where(~np.isnan(q_r), q_r, self._rev_tick(subset))
210 |
211 | def _mid(self, subset: str) -> npt.NDArray:
212 | """Calculate the midpoint of the bid and ask spread.
213 |
214 | Midpoint is calculated as the average of the bid and ask spread if the spread is positive. Otherwise, np.NaN is returned.
215 |
216 | Args:
217 | subset (str): subset i.e.,
218 | 'ex' or 'best'
219 |
220 | Returns:
221 | npt.NDArray: midpoints. Can be np.NaN.
222 | """
223 | return np.where(
224 | self.X_[f"ask_{subset}"] >= self.X_[f"bid_{subset}"],
225 | 0.5 * (self.X_[f"ask_{subset}"] + self.X_[f"bid_{subset}"]),
226 | np.nan,
227 | )
228 |
229 | def _is_at_ask_xor_bid(self, subset: str) -> pd.Series:
230 | """Check if the trade price is at the ask xor bid.
231 |
232 | Args:
233 | subset (str): subset i.e.,
234 | 'ex' or 'best'.
235 |
236 | Returns:
237 | pd.Series: boolean series with result.
238 | """
239 | at_ask = np.isclose(self.X_["trade_price"], self.X_[f"ask_{subset}"], atol=1e-4)
240 | at_bid = np.isclose(self.X_["trade_price"], self.X_[f"bid_{subset}"], atol=1e-4)
241 | return at_ask ^ at_bid
242 |
243 | def _is_at_upper_xor_lower_quantile(
244 | self, subset: str, quantiles: float = 0.3
245 | ) -> pd.Series:
246 | """Check if the trade price is at the ask xor bid.
247 |
248 | Args:
249 | subset (str): subset i.e., 'ex'.
250 | quantiles (float, optional): percentage of quantiles. Defaults to 0.3.
251 |
252 | Returns:
253 | pd.Series: boolean series with result.
254 | """
255 | in_upper = (
256 | (1.0 - quantiles) * self.X_[f"ask_{subset}"]
257 | + quantiles * self.X_[f"bid_{subset}"]
258 | <= self.X_["trade_price"]
259 | ) & (self.X_["trade_price"] <= self.X_[f"ask_{subset}"])
260 | in_lower = (self.X_[f"bid_{subset}"] <= self.X_["trade_price"]) & (
261 | self.X_["trade_price"]
262 | <= quantiles * self.X_[f"ask_{subset}"]
263 | + (1.0 - quantiles) * self.X_[f"bid_{subset}"]
264 | )
265 | return in_upper ^ in_lower
266 |
267 | def _emo(self, subset: str) -> npt.NDArray:
268 | """Classify a trade as a buy (sell) if the trade takes place at the ask (bid) quote, and use the tick test to classify all other trades.
269 |
270 | Adapted from Ellis et al. (2000).
271 |
272 | Args:
273 | subset (Literal["ex", "best"]): subset i.e., 'ex' or 'best'.
274 |
275 | Returns:
276 | npt.NDArray: result of the emo algorithm with tick rule. Can be
277 | np.NaN.
278 | """
279 | return np.where(
280 | self._is_at_ask_xor_bid(subset), self._quote(subset), self._tick(subset)
281 | )
282 |
283 | def _rev_emo(self, subset: str) -> npt.NDArray:
284 | """Classify a trade as a buy (sell) if the trade takes place at the ask (bid) quote, and use the reverse tick test to classify all other trades.
285 |
286 | Adapted from Grauer et al. (2022).
287 |
288 | Args:
289 | subset (str): subset i.e., 'ex' or 'best'.
290 |
291 | Returns:
292 | npt.NDArray: result of the emo algorithm with reverse tick rule.
293 | Can be np.NaN.
294 | """
295 | return np.where(
296 | self._is_at_ask_xor_bid(subset), self._quote(subset), self._rev_tick(subset)
297 | )
298 |
299 | def _clnv(self, subset: str) -> npt.NDArray:
300 | """Classify a trade based on deciles of the bid and ask spread.
301 |
302 | Spread is divided into ten deciles and trades are classified as follows:
303 | - use quote rule for at ask until 30 % below ask (upper 3 deciles)
304 | - use quote rule for at bid until 30 % above bid (lower 3 deciles)
305 | - use tick rule for all other trades (±2 deciles from midpoint; outside
306 | bid or ask).
307 |
308 | Adapted from Chakrabarty et al. (2007).
309 |
310 | Args:
311 | subset (str): subset i.e.,'ex' or 'best'.
312 |
313 | Returns:
314 | npt.NDArray: result of the emo algorithm with tick rule. Can be
315 | np.NaN.
316 | """
317 | return np.where(
318 | self._is_at_upper_xor_lower_quantile(subset),
319 | self._quote(subset),
320 | self._tick(subset),
321 | )
322 |
323 | def _rev_clnv(self, subset: str) -> npt.NDArray:
324 | """Classify a trade based on deciles of the bid and ask spread.
325 |
326 | Spread is divided into ten deciles and trades are classified as follows:
327 | - use quote rule for at ask until 30 % below ask (upper 3 deciles)
328 | - use quote rule for at bid until 30 % above bid (lower 3 deciles)
329 | - use reverse tick rule for all other trades (±2 deciles from midpoint;
330 | outside bid or ask).
331 |
332 | Similar to extension of emo algorithm proposed Grauer et al. (2022).
333 |
334 | Args:
335 | subset (str): subset i.e., 'ex' or 'best'.
336 |
337 | Returns:
338 | npt.NDArray: result of the emo algorithm with tick rule. Can be
339 | np.NaN.
340 | """
341 | return np.where(
342 | self._is_at_upper_xor_lower_quantile(subset),
343 | self._quote(subset),
344 | self._rev_tick(subset),
345 | )
346 |
347 | def _trade_size(self, subset: str) -> npt.NDArray:
348 | """Classify a trade as a buy (sell) the trade size matches exactly either the bid (ask) quote size.
349 |
350 | Adapted from Grauer et al. (2022).
351 |
352 | Args:
353 | subset (str): subset i.e., 'ex' or 'best'.
354 |
355 | Returns:
356 | npt.NDArray: result of the trade size rule. Can be np.NaN.
357 | """
358 | bid_eq_ask = np.isclose(
359 | self.X_[f"ask_size_{subset}"], self.X_[f"bid_size_{subset}"], atol=1e-4
360 | )
361 |
362 | ts_eq_bid = (
363 | np.isclose(self.X_["trade_size"], self.X_[f"bid_size_{subset}"], atol=1e-4)
364 | & ~bid_eq_ask
365 | )
366 | ts_eq_ask = (
367 | np.isclose(self.X_["trade_size"], self.X_[f"ask_size_{subset}"], atol=1e-4)
368 | & ~bid_eq_ask
369 | )
370 |
371 | return np.where(ts_eq_bid, 1, np.where(ts_eq_ask, -1, np.nan))
372 |
373 | def _depth(self, subset: str) -> npt.NDArray:
374 | """Classify midspread trades as buy (sell), if the ask size (bid size) exceeds the bid size (ask size).
375 |
376 | Adapted from Grauer et al. (2022).
377 |
378 | Args:
379 | subset (str): subset i.e., 'ex' or 'best'.
380 |
381 | Returns:
382 | npt.NDArray: result of depth rule. Can be np.NaN.
383 | """
384 | at_mid = np.isclose(self._mid(subset), self.X_["trade_price"], atol=1e-4)
385 |
386 | return np.where(
387 | at_mid & (self.X_[f"ask_size_{subset}"] > self.X_[f"bid_size_{subset}"]),
388 | 1,
389 | np.where(
390 | at_mid
391 | & (self.X_[f"ask_size_{subset}"] < self.X_[f"bid_size_{subset}"]),
392 | -1,
393 | np.nan,
394 | ),
395 | )
396 |
397 | def _nan(self, subset: str) -> npt.NDArray:
398 | """Classify nothing. Fast forward results from previous classifier.
399 |
400 | Returns:
401 | npt.NDArray: result of the trade size rule. Can be np.NaN.
402 | """
403 | return np.full(shape=(self.X_.shape[0],), fill_value=np.nan)
404 |
405 | def _validate_columns(self, missing_columns: list) -> None:
406 | """Validate if all required columns are present.
407 |
408 | Args:
409 | missing_columns (list): list of missing columns.
410 |
411 | Raises:
412 | ValueError: columns missing in dataframe.
413 | """
414 | columns = self.columns_ + missing_columns if self.columns_ else missing_columns
415 | self.X_ = pd.DataFrame(np.zeros(shape=(1, len(columns))), columns=columns)
416 | try:
417 | self._predict()
418 | except KeyError as e:
419 | result = re.search(r"'([^']+)'", str(e))
420 | if result:
421 | add_missing = result.group(1)
422 | if add_missing:
423 | missing_columns.append(add_missing)
424 | return self._validate_columns(missing_columns)
425 | if missing_columns:
426 | raise ValueError(
427 | f"Expected to find columns: {sorted(missing_columns)}. Check naming/presenence of columns. See: https://karelze.github.io/tclf/naming_conventions/"
428 | )
429 | del self.X_
430 | return None
431 |
432 | def fit(
433 | self,
434 | X: MatrixLike,
435 | y: ArrayLike | None = None,
436 | sample_weight: npt.NDArray | None = None,
437 | ) -> ClassicalClassifier:
438 | """Fit the classifier.
439 |
440 | Args:
441 | X (MatrixLike): features
442 | y (ArrayLike | None, optional): ignored, present here for API consistency by convention.
443 | sample_weight (npt.NDArray | None, optional): Sample weights. Defaults to None.
444 |
445 | Raises:
446 | ValueError: Unknown subset e. g., 'ise'
447 | ValueError: Unknown function string e. g., 'lee-ready'
448 | ValueError: Multi output is not supported.
449 |
450 | Returns:
451 | ClassicalClassifier: Instance of itself.
452 | """
453 | _check_sample_weight(sample_weight, X)
454 |
455 | funcs = (
456 | self._tick,
457 | self._rev_tick,
458 | self._quote,
459 | self._lr,
460 | self._rev_lr,
461 | self._emo,
462 | self._rev_emo,
463 | self._clnv,
464 | self._rev_clnv,
465 | self._trade_size,
466 | self._depth,
467 | self._nan,
468 | )
469 |
470 | self.func_mapping_ = dict(zip(ALLOWED_FUNC_STR, funcs))
471 |
472 | # create working copy to be altered and try to get columns from df
473 | self.columns_ = self.features
474 | if isinstance(X, pd.DataFrame):
475 | self.columns_ = X.columns.tolist()
476 |
477 | X = self._validate_data(
478 | X,
479 | y="no_validation",
480 | dtype=[np.float64, np.float32],
481 | accept_sparse=False,
482 | force_all_finite=False,
483 | )
484 |
485 | self.classes_ = np.array([-1, 1])
486 |
487 | # if no features are provided or inferred, use default
488 | if self.columns_ is None:
489 | self.columns_ = [str(i) for i in range(X.shape[1])]
490 |
491 | if len(self.columns_) > 0 and X.shape[1] != len(self.columns_):
492 | raise ValueError(
493 | f"Expected {len(self.columns_)} columns, got {X.shape[1]}."
494 | )
495 |
496 | self._layers = self.layers if self.layers is not None else []
497 | for func_str, _ in self._layers:
498 | if func_str not in ALLOWED_FUNC_STR:
499 | raise ValueError(
500 | f"Unknown function string: {func_str},"
501 | f"expected one of {ALLOWED_FUNC_STR}."
502 | )
503 |
504 | self._validate_columns([])
505 | return self
506 |
507 | def predict(self, X: MatrixLike) -> npt.NDArray:
508 | """Perform classification on test vectors `X`.
509 |
510 | Args:
511 | X (MatrixLike): feature matrix.
512 |
513 | Returns:
514 | npt.NDArray: Predicted traget values for X.
515 | """
516 | check_is_fitted(self)
517 | X = self._validate_data(
518 | X,
519 | dtype=[np.float64, np.float32],
520 | accept_sparse=False,
521 | force_all_finite=False,
522 | )
523 |
524 | rs = check_random_state(self.random_state)
525 |
526 | self.X_ = pd.DataFrame(data=X, columns=self.columns_)
527 | pred = self._predict()
528 |
529 | # fill NaNs randomly with -1 and 1 or with constant zero
530 | mask = np.isnan(pred)
531 | if self.strategy == "random":
532 | pred[mask] = rs.choice(self.classes_, pred.shape)[mask]
533 | else:
534 | pred[mask] = np.zeros(pred.shape)[mask]
535 |
536 | # reset self.X_ to avoid persisting it
537 | del self.X_
538 | return pred
539 |
540 | def _predict(self) -> npt.NDArray:
541 | """Predict with rule stack.
542 |
543 | Returns:
544 | npt.NDArray: prediction
545 | """
546 | pred = np.full(shape=(self.X_.shape[0],), fill_value=np.nan)
547 | for func_str, subset in self._layers:
548 | func = self.func_mapping_[func_str]
549 | pred = np.where(
550 | np.isnan(pred),
551 | func(subset=subset),
552 | pred,
553 | )
554 | return pred
555 |
556 | def predict_proba(self, X: MatrixLike) -> npt.NDArray:
557 | """Predict class probabilities for X.
558 |
559 | Probabilities are either 0 or 1 depending on the class.
560 |
561 | For strategy 'constant' probabilities are (0.5,0.5) for unclassified classes.
562 |
563 | Args:
564 | X (MatrixLike): feature matrix
565 |
566 | Returns:
567 | npt.NDArray: probabilities
568 | """
569 | # assign 0.5 to all classes. Required for strategy 'constant'.
570 | prob = np.full((len(X), 2), 0.5)
571 |
572 | # Class can be assumed to be -1 or 1 for strategy 'random'.
573 | # Class might be zero though for strategy constant. Mask non-zeros.
574 | preds = self.predict(X)
575 | mask = np.flatnonzero(preds)
576 |
577 | # get index of predicted class and one-hot encode it
578 | indices = np.nonzero(preds[mask, None] == self.classes_[None, :])[1]
579 | n_classes = np.max(self.classes_) + 1
580 |
581 | # overwrite defaults with one-hot encoded classes.
582 | # For strategy 'constant' probabilities are (0.5,0.5).
583 | prob[mask] = np.identity(n_classes)[indices]
584 | return prob
585 |
--------------------------------------------------------------------------------
/tests/test_classical_classifier.py:
--------------------------------------------------------------------------------
1 | """Tests for the classical classifier."""
2 |
3 | from __future__ import annotations
4 |
5 | from typing import Callable
6 |
7 | import numpy as np
8 | import pandas as pd
9 | import pytest
10 | from numpy.testing import assert_allclose
11 | from sklearn.base import BaseEstimator
12 | from sklearn.utils.estimator_checks import parametrize_with_checks
13 | from sklearn.utils.validation import check_is_fitted
14 |
15 | from tclf.classical_classifier import ALLOWED_FUNC_LITERALS, ClassicalClassifier
16 |
17 |
18 | class TestClassicalClassifier:
19 | """Perform automated tests for ClassicalClassifier.
20 |
21 | Args:
22 | unittest (_type_): unittest module
23 | """
24 |
25 | @pytest.fixture
26 | def x_train(self) -> pd.DataFrame:
27 | """Training set fixture.
28 |
29 | Returns:
30 | pd.DataFrame: training set
31 | """
32 | return pd.DataFrame(
33 | np.zeros(shape=(1, 14)),
34 | columns=[
35 | "ask_size_ex",
36 | "bid_size_ex",
37 | "ask_best",
38 | "bid_best",
39 | "ask_ex",
40 | "bid_ex",
41 | "trade_price",
42 | "trade_size",
43 | "price_ex_lag",
44 | "price_ex_lead",
45 | "price_best_lag",
46 | "price_best_lead",
47 | "price_all_lag",
48 | "price_all_lead",
49 | ],
50 | )
51 |
52 | @pytest.fixture
53 | def x_test(self) -> pd.DataFrame:
54 | """Test set fixture.
55 |
56 | Returns:
57 | pd.DataFrame: test set
58 | """
59 | return pd.DataFrame(
60 | [[1, 2], [3, 4], [1, 2], [3, 4]], columns=["ask_best", "bid_best"]
61 | )
62 |
63 | @pytest.fixture
64 | def y_test(self) -> pd.Series:
65 | """Test target fixture.
66 |
67 | Returns:
68 | pd.Series: test target
69 | """
70 | return pd.Series([1, -1, 1, -1])
71 |
72 | @pytest.fixture
73 | def clf(self, x_train: pd.DataFrame) -> ClassicalClassifier:
74 | """Classifier fixture with random classification.
75 |
76 | Args:
77 | x_train (pd.DataFrame): train set
78 |
79 | Returns:
80 | ClassicalClassifier: fitted clf
81 | """
82 | return ClassicalClassifier().fit(x_train[["ask_best", "bid_best"]])
83 |
84 | @parametrize_with_checks([ClassicalClassifier()])
85 | @pytest.mark.xfail(reason="fix in rework of dataframe api")
86 | def test_sklearn_compatibility(
87 | self, estimator: BaseEstimator, check: Callable
88 | ) -> None:
89 | """Test, if classifier is compatible with sklearn."""
90 | check(estimator)
91 |
92 | def test_shapes(
93 | self, clf: ClassicalClassifier, x_test: pd.DataFrame, y_test: pd.Series
94 | ) -> None:
95 | """Test, if shapes of the classifier equal the targets.
96 |
97 | Shapes are usually [no. of samples, 1].
98 | """
99 | y_pred = clf.predict(x_test)
100 |
101 | assert y_test.shape == y_pred.shape
102 |
103 | def test_proba(self, clf: ClassicalClassifier, x_test: pd.DataFrame) -> None:
104 | """Test, if probabilities are in [0, 1]."""
105 | y_pred = clf.predict_proba(x_test)
106 | assert (y_pred >= 0).all()
107 | assert (y_pred <= 1).all()
108 |
109 | def test_score(
110 | self, clf: ClassicalClassifier, x_test: pd.DataFrame, y_test: pd.Series
111 | ) -> None:
112 | """Test, if score is correctly calculated..
113 |
114 | For a random classification i. e., `layers=[("nan", "ex")]`, the score
115 | should be around 0.5.
116 | """
117 | accuracy = clf.score(x_test, y_test)
118 | assert 0.0 <= accuracy <= 1.0
119 |
120 | def test_random_state(self, x_train: pd.DataFrame, x_test: pd.DataFrame) -> None:
121 | """Test, if random state is correctly set.
122 |
123 | Two classifiers with the same random state should give the same results.
124 | """
125 | columns = ["ask_best", "bid_best"]
126 | first_classifier = ClassicalClassifier(
127 | layers=[("nan", "ex")],
128 | random_state=50,
129 | ).fit(x_train[columns])
130 | first_y_pred = first_classifier.predict(x_test)
131 |
132 | second_classifier = ClassicalClassifier(
133 | layers=[("nan", "ex")],
134 | random_state=50,
135 | ).fit(x_train[columns])
136 | second_y_pred = second_classifier.predict(x_test)
137 |
138 | assert (first_y_pred == second_y_pred).all()
139 |
140 | def test_fit(self, x_train: pd.DataFrame) -> None:
141 | """Test, if fit works.
142 |
143 | A fitted classifier should have an attribute `layers_`.
144 | """
145 | fitted_classifier = ClassicalClassifier(
146 | layers=[("nan", "ex")],
147 | random_state=42,
148 | ).fit(x_train[["ask_best", "bid_best"]])
149 | assert check_is_fitted(fitted_classifier) is None
150 |
151 | def test_strategy_const(self, x_train: pd.DataFrame, x_test: pd.DataFrame) -> None:
152 | """Test, if strategy 'const' returns correct proabilities.
153 |
154 | A classifier with strategy 'constant' should return class probabilities
155 | of (0.5, 0.5), if a trade can not be classified.
156 | """
157 | columns = ["ask_best", "bid_best"]
158 | fitted_classifier = ClassicalClassifier(
159 | layers=[("nan", "ex")], strategy="const"
160 | ).fit(x_train[columns])
161 | assert_allclose(
162 | fitted_classifier.predict_proba(x_test[columns]),
163 | 0.5,
164 | rtol=1e-09,
165 | atol=1e-09,
166 | )
167 |
168 | def test_invalid_func(self, x_train: pd.DataFrame) -> None:
169 | """Test, if only valid function strings can be passed.
170 |
171 | An exception should be raised for invalid function strings.
172 | Test for 'foo', which is no valid rule.
173 | """
174 | classifier = ClassicalClassifier(
175 | layers=[("foo", "all")], # type: ignore [list-item]
176 | random_state=42,
177 | )
178 | with pytest.raises(ValueError, match=r"Unknown function string"):
179 | classifier.fit(x_train)
180 |
181 | def test_missing_columns(self, x_train: pd.DataFrame) -> None:
182 | """Test, if an error is raised, if required columns are missing.
183 |
184 | An exception should be raised if required features are missing,
185 | including the columns required for classification.
186 | """
187 | classifier = ClassicalClassifier(
188 | layers=[("tick", "all"), ("quote", "ex")], random_state=42
189 | )
190 | with pytest.raises(
191 | ValueError,
192 | match=r"Expected to find columns: ['ask_ex', 'bid_ex', 'price_all_lag']*",
193 | ):
194 | classifier.fit(x_train[["trade_price", "trade_size"]])
195 |
196 | def test_invalid_col_length(self, x_train: pd.DataFrame) -> None:
197 | """Test, if only valid column length can be passed.
198 |
199 | An exception should be raised if length of columns list does not match
200 | the number of columns in the data. `features` is only used if, data is
201 | not passed as `pd.DataFrame`.Test for columns list of length 2, which
202 | does not match the data.
203 | """
204 | classifier = ClassicalClassifier(
205 | layers=[("tick", "all")], random_state=42, features=["one"]
206 | )
207 | with pytest.raises(ValueError, match=r"Expected"):
208 | classifier.fit(x_train.to_numpy())
209 |
210 | def test_override(self, x_train: pd.DataFrame) -> None:
211 | """Test, if classifier does not override valid results from layer one.
212 |
213 | If all data can be classified using first rule, first rule should
214 | only be applied.
215 | """
216 | columns = ["trade_price", "price_ex_lag", "price_all_lead"]
217 | x_test = pd.DataFrame(
218 | [[1, 2, 0], [2, 1, 3]],
219 | columns=columns,
220 | )
221 | y_test = pd.Series([-1, 1])
222 | y_pred = (
223 | ClassicalClassifier(
224 | layers=[("tick", "ex"), ("rev_tick", "all")],
225 | random_state=7,
226 | )
227 | .fit(x_train[columns])
228 | .predict(x_test)
229 | )
230 | assert (y_pred == y_test).all()
231 |
232 | def test_np_array(self, x_train: pd.DataFrame) -> None:
233 | """Test, if classifier works, if only np.ndarrays are provided.
234 |
235 | If only np.ndarrays are provided, the classifier should work, by constructing
236 | a dataframe from the arrays and the `columns` list.
237 | """
238 | x_test = np.array([[1, 2, 0], [2, 1, 3]])
239 | y_test = np.array([-1, 1])
240 |
241 | columns = ["trade_price", "price_ex_lag", "price_ex_lead"]
242 | y_pred = (
243 | ClassicalClassifier(
244 | layers=[("tick", "ex"), ("rev_tick", "ex")],
245 | random_state=7,
246 | features=columns,
247 | )
248 | .fit(x_train[columns].to_numpy())
249 | .predict(x_test)
250 | )
251 | assert (y_pred == y_test).all()
252 |
253 | @pytest.mark.parametrize("subset", ["best", "ex"])
254 | def test_mid(self, x_train: pd.DataFrame, subset: str) -> None:
255 | """Test, if no mid is calculated, if bid exceeds ask etc.
256 |
257 | Args:
258 | x_train (pd.DataFrame): train set
259 | subset (str): subset
260 | """
261 | columns = ["trade_price", f"bid_{subset}", f"ask_{subset}"]
262 |
263 | # first two by rule, all other by random chance.
264 | x_test = pd.DataFrame(
265 | [
266 | [1.5, 1, 3],
267 | [2.5, 1, 3],
268 | [1.5, 3, 1], # bid > ask
269 | [2.5, 3, 1], # bid > ask
270 | [1, np.nan, 1], # missing data
271 | [3, np.nan, np.nan], # missing_data
272 | ],
273 | columns=columns,
274 | )
275 | y_test = pd.Series([-1, 1, 1, -1, -1, 1])
276 | y_pred = (
277 | ClassicalClassifier(layers=[("quote", subset)], random_state=45)
278 | .fit(x_train[columns])
279 | .predict(x_test)
280 | )
281 | assert (y_pred == y_test).all()
282 |
283 | def _apply_rule(
284 | self,
285 | x_train: pd.DataFrame,
286 | x_test: pd.DataFrame,
287 | y_test: pd.DataFrame,
288 | layers: list[tuple[ALLOWED_FUNC_LITERALS, str]],
289 | random_state: int = 7,
290 | ) -> None:
291 | """Apply rule-based classification.
292 |
293 | Args:
294 | x_train (pd.DataFrame): training features
295 | x_test (pd.DataFrame): test features
296 | y_test (pd.DataFrame): true labels
297 | layers (list[tuple[ALLOWED_FUNC_LITERALS, str]]): layers
298 | random_state (int, optional): random state. Defaults to 7.
299 | """
300 | y_pred = (
301 | ClassicalClassifier(
302 | layers=layers,
303 | random_state=random_state,
304 | )
305 | .fit(x_train[x_test.columns])
306 | .predict(x_test)
307 | )
308 | assert (y_pred == y_test).all()
309 |
310 | @pytest.mark.benchmark
311 | @pytest.mark.parametrize("subset", ["all", "ex"])
312 | def test_tick_rule(self, x_train: pd.DataFrame, subset: str) -> None:
313 | """Test, if tick rule is correctly applied.
314 |
315 | Tests cases where prev. trade price is higher, lower, equal or missing.
316 |
317 | Args:
318 | x_train (pd.DataFrame): training set
319 | subset (str): subset e. g., 'ex'
320 | """
321 | x_test = pd.DataFrame(
322 | [[1, 2], [2, 1], [1, 1], [1, np.nan]],
323 | columns=["trade_price", f"price_{subset}_lag"],
324 | )
325 |
326 | # first two by rule (see p. 28 Grauer et al.), remaining two by random chance.
327 | y_test = pd.Series([-1, 1, 1, -1])
328 | self._apply_rule(x_train, x_test, y_test, [("tick", subset)], 7)
329 |
330 | @pytest.mark.benchmark
331 | @pytest.mark.parametrize("subset", ["all", "ex"])
332 | def test_rev_tick_rule(self, x_train: pd.DataFrame, subset: str) -> None:
333 | """Test, if rev. tick rule is correctly applied.
334 |
335 | Tests cases where suc. trade price is higher, lower, equal or missing.
336 |
337 | Args:
338 | x_train (pd.DataFrame): training set
339 | subset (str): subset e. g., 'ex'
340 | """
341 | x_test = pd.DataFrame(
342 | [[1, 2], [2, 1], [1, 1], [1, np.nan]],
343 | columns=["trade_price", f"price_{subset}_lead"],
344 | )
345 | # first two by rule (see p. 28 Grauer et al.), remaining two by random chance.
346 | y_test = pd.Series([-1, 1, 1, -1])
347 | self._apply_rule(x_train, x_test, y_test, [("rev_tick", subset)], 7)
348 |
349 | @pytest.mark.benchmark
350 | @pytest.mark.parametrize("subset", ["best", "ex"])
351 | def test_quote_rule(self, x_train: pd.DataFrame, subset: str) -> None:
352 | """Test, if quote rule is correctly applied.
353 |
354 | Tests cases where prev. trade price is higher, lower, equal or missing.
355 |
356 | Args:
357 | x_train (pd.DataFrame): training set
358 | subset (str): subset e. g., 'ex'
359 | """
360 | # first two by rule (see p. 28 Grauer et al.), remaining four by random chance.
361 | x_test = pd.DataFrame(
362 | [
363 | [1, 1, 3],
364 | [3, 1, 3],
365 | [1, 1, 1],
366 | [3, 2, 4],
367 | [1, np.nan, 1],
368 | [3, np.nan, np.nan],
369 | ],
370 | columns=["trade_price", f"bid_{subset}", f"ask_{subset}"],
371 | )
372 | y_test = pd.Series([-1, 1, 1, -1, -1, 1])
373 | self._apply_rule(x_train, x_test, y_test, [("quote", subset)], 45)
374 |
375 | @pytest.mark.benchmark
376 | @pytest.mark.parametrize("subset", ["best", "ex"])
377 | def test_lr(self, x_train: pd.DataFrame, subset: str) -> None:
378 | """Test, if the lr algorithm is correctly applied.
379 |
380 | Tests cases where both quote rule and tick rule all are used.
381 |
382 | Args:
383 | x_train (pd.DataFrame): training set
384 | subset (str): subset e. g., 'ex'
385 | """
386 | # first two by quote rule, remaining two by tick rule.
387 | x_test = pd.DataFrame(
388 | [[1, 1, 3, 0], [3, 1, 3, 0], [1, 1, 1, 0], [3, 2, 4, 4]],
389 | columns=[
390 | "trade_price",
391 | f"bid_{subset}",
392 | f"ask_{subset}",
393 | f"price_{subset}_lag",
394 | ],
395 | )
396 | y_test = pd.Series([-1, 1, 1, -1])
397 | self._apply_rule(x_train, x_test, y_test, [("lr", subset)], 7)
398 |
399 | @pytest.mark.benchmark
400 | @pytest.mark.parametrize("subset", ["best", "ex"])
401 | def test_rev_lr(self, x_train: pd.DataFrame, subset: str) -> None:
402 | """Test, if the rev. lr algorithm is correctly applied.
403 |
404 | Tests cases where both quote rule and tick rule all are used.
405 |
406 | Args:
407 | x_train (pd.DataFrame): training set
408 | subset (str): subset e. g., 'ex'
409 | """
410 | # first two by quote rule, two by tick rule, and two by random chance.
411 | x_test = pd.DataFrame(
412 | [
413 | [1, 1, 3, 0],
414 | [3, 1, 3, 0],
415 | [1, 1, 1, 0],
416 | [3, 2, 4, 4],
417 | [1, 1, np.nan, np.nan],
418 | [1, 1, np.nan, np.nan],
419 | ],
420 | columns=[
421 | "trade_price",
422 | f"bid_{subset}",
423 | f"ask_{subset}",
424 | f"price_{subset}_lead",
425 | ],
426 | )
427 | y_test = pd.Series([-1, 1, 1, -1, -1, 1])
428 | self._apply_rule(x_train, x_test, y_test, [("rev_lr", subset)], 42)
429 |
430 | @pytest.mark.benchmark
431 | @pytest.mark.parametrize("subset", ["best", "ex"])
432 | def test_emo(self, x_train: pd.DataFrame, subset: str) -> None:
433 | """Test, if the emo algorithm is correctly applied.
434 |
435 | Tests cases where both quote rule at bid or ask and tick rule all are used.
436 |
437 | Args:
438 | x_train (pd.DataFrame): training set
439 | subset (str): subset e.g., best
440 | """
441 | # first two by quote rule, two by tick rule, two by random chance.
442 | x_test = pd.DataFrame(
443 | [
444 | [1, 1, 3, 0],
445 | [3, 1, 3, 0],
446 | [1, 1, 1, 0],
447 | [3, 2, 4, 4],
448 | [1, 1, np.inf, np.nan],
449 | [1, 1, np.nan, np.nan],
450 | ],
451 | columns=[
452 | "trade_price",
453 | f"bid_{subset}",
454 | f"ask_{subset}",
455 | f"price_{subset}_lag",
456 | ],
457 | )
458 | y_test = pd.Series([-1, 1, 1, -1, -1, 1])
459 | self._apply_rule(x_train, x_test, y_test, [("emo", subset)], 42)
460 |
461 | @pytest.mark.benchmark
462 | @pytest.mark.parametrize("subset", ["best", "ex"])
463 | def test_rev_emo(self, x_train: pd.DataFrame, subset: str) -> None:
464 | """Test, if the rev. emo algorithm is correctly applied.
465 |
466 | Tests cases where both quote rule at bid or ask and rev. tick rule all are used.
467 |
468 | Args:
469 | x_train (pd.DataFrame): training set
470 | subset (str): subset e. g., 'ex'
471 | """
472 | # first two by quote rule, two by tick rule, two by random chance.
473 | x_test = pd.DataFrame(
474 | [
475 | [1, 1, 3, 0],
476 | [3, 1, 3, 0],
477 | [1, 1, 1, 0],
478 | [3, 2, 4, 4],
479 | [1, 1, np.inf, np.nan],
480 | [1, 1, np.nan, np.nan],
481 | ],
482 | columns=[
483 | "trade_price",
484 | f"bid_{subset}",
485 | f"ask_{subset}",
486 | f"price_{subset}_lead",
487 | ],
488 | )
489 | y_test = pd.Series([-1, 1, 1, -1, -1, 1])
490 | self._apply_rule(x_train, x_test, y_test, [("rev_emo", subset)], 42)
491 |
492 | @pytest.mark.benchmark
493 | @pytest.mark.parametrize("subset", ["best", "ex"])
494 | def test_clnv(self, x_train: pd.DataFrame, subset: str) -> None:
495 | """Test, if the clnv algorithm is correctly applied.
496 |
497 | Tests cases where both quote rule and tick rule all are used.
498 |
499 | Args:
500 | x_train (pd.DataFrame): training set
501 | subset (str): subset e. g., 'ex'
502 | """
503 | # first two by quote rule, two by tick rule, two by random chance.
504 | x_test = pd.DataFrame(
505 | [
506 | [5, 3, 1, 0], # tick rule
507 | [0, 3, 1, 1], # tick rule
508 | [2.9, 3, 1, 1], # quote rule
509 | [2.3, 3, 1, 3], # tick rule
510 | [1.7, 3, 1, 0], # tick rule
511 | [1.3, 3, 1, 1], # quote rule
512 | ],
513 | columns=[
514 | "trade_price",
515 | f"ask_{subset}",
516 | f"bid_{subset}",
517 | f"price_{subset}_lag",
518 | ],
519 | )
520 | y_test = pd.Series([1, -1, 1, -1, 1, -1])
521 | self._apply_rule(x_train, x_test, y_test, [("clnv", subset)], 42)
522 |
523 | @pytest.mark.benchmark
524 | @pytest.mark.parametrize("subset", ["best", "ex"])
525 | def test_rev_clnv(self, x_train: pd.DataFrame, subset: str) -> None:
526 | """Test, if the rev. clnv algorithm is correctly applied.
527 |
528 | Tests cases where both quote rule and rev. tick rule all are used.
529 |
530 | Args:
531 | x_train (pd.DataFrame): training set
532 | subset (str): subset e. g., 'ex'
533 | """
534 | x_test = pd.DataFrame(
535 | [
536 | [5, 3, 1, 0], # rev tick rule
537 | [0, 3, 1, 1], # rev tick rule
538 | [2.9, 3, 1, 1], # quote rule
539 | [2.3, 3, 1, 3], # rev tick rule
540 | [1.7, 3, 1, 0], # rev tick rule
541 | [1.3, 3, 1, 1], # quote rule
542 | ],
543 | columns=[
544 | "trade_price",
545 | f"ask_{subset}",
546 | f"bid_{subset}",
547 | f"price_{subset}_lead",
548 | ],
549 | )
550 | y_test = pd.Series([1, -1, 1, -1, 1, -1])
551 | self._apply_rule(x_train, x_test, y_test, [("rev_clnv", subset)], 5)
552 |
553 | @pytest.mark.benchmark
554 | def test_trade_size(self, x_train: pd.DataFrame) -> None:
555 | """Test, if the trade size algorithm is correctly applied.
556 |
557 | Tests cases where relevant data is present or missing.
558 | """
559 | # first two by trade size, random, at bid size, random, random.
560 | x_test = pd.DataFrame(
561 | [
562 | [1, 1, 3],
563 | [3, 1, 3],
564 | [1, 1, 1],
565 | [3, np.nan, 3],
566 | [1, np.inf, 2],
567 | [1, np.inf, 2],
568 | ],
569 | columns=["trade_size", "ask_size_ex", "bid_size_ex"],
570 | )
571 | y_test = pd.Series([-1, 1, -1, 1, -1, 1])
572 | self._apply_rule(x_train, x_test, y_test, [("trade_size", "ex")], 42)
573 |
574 | @pytest.mark.benchmark
575 | def test_depth(self, x_train: pd.DataFrame) -> None:
576 | """Test, if the depth rule is correctly applied.
577 |
578 | Tests cases where relevant data is present or missing.
579 | """
580 | # first three by depth, all other random as mid is different from trade price.
581 | x_test = pd.DataFrame(
582 | [
583 | [2, 1, 2, 4, 3],
584 | [1, 2, 2, 4, 3],
585 | [2, 1, 4, 4, 4],
586 | [2, 1, 2, 4, 2],
587 | [2, 1, 2, 4, 2],
588 | ],
589 | columns=[
590 | "ask_size_ex",
591 | "bid_size_ex",
592 | "ask_ex",
593 | "bid_ex",
594 | "trade_price",
595 | ],
596 | )
597 | y_test = pd.Series([1, -1, 1, 1, -1])
598 | self._apply_rule(x_train, x_test, y_test, [("depth", "ex")], 5)
599 |
--------------------------------------------------------------------------------
/docs/apa-6th-edition.csl:
--------------------------------------------------------------------------------
1 |
2 |
1581 |
--------------------------------------------------------------------------------