├── .github ├── FUNDING.yml ├── issue_template.md ├── workflows │ ├── deploy-docs.yml │ └── ci.yml └── deploy-gh-pages.sh ├── doc ├── logo.png ├── scripts │ ├── strip_yaml.awk │ ├── ipython_config.py │ └── logo.py ├── pdoc_template │ ├── logo.mako │ ├── credits.mako │ ├── config.mako │ └── head.mako ├── README.md ├── examples │ ├── Strategies Library.py │ ├── Multiple Time Frames.py │ ├── Parameter Heatmap & Optimization.py │ ├── Trading with Machine Learning.py │ └── Quick Start User Guide.py ├── build.sh └── alternatives.md ├── requirements.txt ├── MANIFEST.in ├── .flake8 ├── .gitignore ├── backtesting ├── test │ ├── __main__.py │ ├── __init__.py │ └── _test.py ├── autoscale_cb.js ├── __init__.py ├── _util.py ├── lib.py └── _plotting.py ├── .codecov.yml ├── setup.cfg ├── CONTRIBUTING.md ├── CHANGELOG.md ├── README.md ├── setup.py └── LICENSE.md /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: kernc 2 | -------------------------------------------------------------------------------- /doc/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndersonJo/backtesting.py/master/doc/logo.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # To run example notebooks, install required and test dependencies 2 | .[test] 3 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | exclude MANIFEST.in 2 | exclude .* 3 | 4 | recursive-exclude .* * 5 | recursive-exclude doc * 6 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 100 3 | exclude = 4 | .git, 5 | __pycache__, 6 | doc/examples 7 | -------------------------------------------------------------------------------- /doc/scripts/strip_yaml.awk: -------------------------------------------------------------------------------- 1 | #!/usr/bin/awk -f 2 | 3 | # Remove YAML front matter from jupytext-converted .py notebooks 4 | 5 | BEGIN { drop = 0; } 6 | /^# ---$/ { if (NR <= 3) { drop = 1 } else { drop = 0; next } } 7 | drop == 0 { print } 8 | -------------------------------------------------------------------------------- /doc/pdoc_template/logo.mako: -------------------------------------------------------------------------------- 1 |
2 | 3 | Backtesting.py 4 | 5 |
6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | *.html 3 | *.png 4 | _version.py 5 | 6 | *.egg-info 7 | .eggs/* 8 | __pycache__/* 9 | dist/* 10 | 11 | .coverage 12 | .coverage.* 13 | htmlcov/* 14 | 15 | doc/build/* 16 | 17 | .idea/* 18 | .vscode/ 19 | 20 | **/.ipynb_checkpoints 21 | *~* 22 | 23 | .venv/ 24 | -------------------------------------------------------------------------------- /backtesting/test/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import unittest 3 | 4 | suite = unittest.defaultTestLoader.discover('backtesting.test', 5 | pattern='_test*.py') 6 | if __name__ == '__main__': 7 | result = unittest.TextTestRunner(verbosity=2).run(suite) 8 | sys.exit(not result.wasSuccessful()) 9 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | comment: off 2 | coverage: 3 | range: 75..95 4 | precision: 0 5 | status: 6 | patch: 7 | default: 8 | target: 90 9 | project: 10 | default: 11 | target: auto 12 | threshold: 5 13 | # Fix for https://github.com/codecov/codecov-python/issues/136 14 | fixes: 15 | - "__init__.py::backtesting/__init__.py" 16 | -------------------------------------------------------------------------------- /doc/scripts/ipython_config.py: -------------------------------------------------------------------------------- 1 | # In build.sh, this file is copied into (and removed from) 2 | # ~/.ipython/profile_default/startup/ 3 | 4 | import pandas as pd 5 | pd.set_option("display.max_rows", 30) 6 | # This an alternative to setting display.preceision=2, 7 | # which doesn't work well for our dtype=object Series. 8 | pd.set_option('display.float_format', '{:.2f}'.format) 9 | del pd 10 | -------------------------------------------------------------------------------- /doc/README.md: -------------------------------------------------------------------------------- 1 | Backtesting.py Documentation 2 | ============================ 3 | After installing documentation dependencies: 4 | 5 | pip install .[doc,test] 6 | 7 | build HTML documentation by running: 8 | 9 | ./build.sh 10 | 11 | When submitting pull requests that change example notebooks, 12 | commit example _.py_ files too 13 | (`build.sh` should tell you how to make them). 14 | -------------------------------------------------------------------------------- /doc/pdoc_template/credits.mako: -------------------------------------------------------------------------------- 1 | <%! 2 | from backtesting import __version__ 3 | %> 4 |

5 | backtesting ${__version__} 6 | 7 |

8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 100 3 | 4 | [mypy] 5 | warn_unused_ignores = True 6 | warn_redundant_casts = True 7 | ignore_missing_imports = True 8 | 9 | [coverage:run] 10 | parallel = 1 11 | concurrency = 12 | multiprocessing 13 | source = 14 | backtesting 15 | doc/examples 16 | omit = 17 | 18 | [coverage:report] 19 | exclude_lines = 20 | ^\s*continue\b 21 | ^\s*return\b 22 | ^\s*raise\b 23 | ^\s*except\b 24 | ^\s*warnings\.warn\( 25 | ^\s*warn\( 26 | -------------------------------------------------------------------------------- /.github/issue_template.md: -------------------------------------------------------------------------------- 1 | ### Expected Behavior 2 | 3 | 4 | ### Actual Behavior 5 | 6 | 9 | 10 | 11 | ### Steps to Reproduce 12 | 13 | 15 | 16 | 1. 17 | 2. 18 | 3. 19 | 20 | ### Additional info 21 | 22 | 23 | 24 | - Backtesting version: 0.?.? 25 | -------------------------------------------------------------------------------- /doc/pdoc_template/config.mako: -------------------------------------------------------------------------------- 1 | <%! 2 | html_lang = 'en' 3 | show_inherited_members = False 4 | extract_module_toc_into_sidebar = True 5 | list_class_variables_in_index = True 6 | sort_identifiers = True 7 | show_type_annotations = False 8 | show_source_code = False 9 | google_search_query = ''' 10 | inurl:kernc.github.io/backtesting.py 11 | inurl:github.com/kernc/backtesting.py 12 | ''' 13 | 14 | 15 | from pdoc.html_helpers import glimpse as _glimpse 16 | 17 | # Make visible the code block from the first paragraph of the 18 | # `backtesting.backtesting` module 19 | def glimpse(text, *args, **kwargs): 20 | return _glimpse(text, max_length=180, paragraph=False) 21 | %> 22 | -------------------------------------------------------------------------------- /backtesting/test/__init__.py: -------------------------------------------------------------------------------- 1 | """Data and utilities for testing.""" 2 | import pandas as pd 3 | 4 | 5 | def _read_file(filename): 6 | from os.path import dirname, join 7 | 8 | return pd.read_csv(join(dirname(__file__), filename), 9 | index_col=0, parse_dates=True, infer_datetime_format=True) 10 | 11 | 12 | GOOG = _read_file('GOOG.csv') 13 | """DataFrame of daily NASDAQ:GOOG (Google/Alphabet) stock price data from 2004 to 2013.""" 14 | 15 | EURUSD = _read_file('EURUSD.csv') 16 | """DataFrame of hourly EUR/USD forex data from April 2017 to February 2018.""" 17 | 18 | 19 | def SMA(arr: pd.Series, n: int) -> pd.Series: 20 | """ 21 | Returns `n`-period simple moving average of array `arr`. 22 | """ 23 | return pd.Series(arr).rolling(n).mean() 24 | -------------------------------------------------------------------------------- /doc/scripts/logo.py: -------------------------------------------------------------------------------- 1 | from bokeh.io import show, output_file 2 | from bokeh.models import ColumnDataSource 3 | from bokeh.plotting import figure 4 | 5 | output_file("backtesting_logo.html") 6 | 7 | source = ColumnDataSource(data=dict( 8 | colors=[['#00a618', '#d0d000', 'tomato'][i] 9 | for i in [0, 0, 1, 0, 1, 0, 0, 1, 0, 2]], 10 | x=list(range(10)), 11 | bottom=[1, 3, 4, 3, 2, 3, 5, 5, 7, 6.5], 12 | top= [4, 7, 6, 5, 4, 6, 8, 7, 9, 8])) # noqa: E222,E251 13 | 14 | 15 | p = figure(plot_height=800, plot_width=1200, tools='wheel_zoom,save') 16 | p.vbar('x', .6, 'bottom', 'top', source=source, 17 | line_color='black', line_width=2, 18 | fill_color='colors') 19 | 20 | p.xgrid.grid_line_color = None 21 | p.ygrid.grid_line_color = None 22 | p.y_range.start = -2 23 | p.y_range.end = 12 24 | p.x_range.start = -2 25 | p.x_range.end = 11 26 | p.background_fill_color = None 27 | p.border_fill_color = None 28 | 29 | show(p) 30 | -------------------------------------------------------------------------------- /doc/pdoc_template/head.mako: -------------------------------------------------------------------------------- 1 | <%! 2 | from pdoc.html_helpers import minify_css 3 | %> 4 | <%def name="homelink()" filter="minify_css"> 5 | .homelink { 6 | display: block; 7 | font-size: 2em; 8 | font-weight: bold; 9 | color: #555; 10 | text-align: center; 11 | padding: .5em 0; 12 | } 13 | .homelink:hover { 14 | color: inherit; 15 | } 16 | .homelink img { 17 | display: block; 18 | max-width:40%; 19 | max-height: 5em; 20 | margin: auto; 21 | margin-bottom: .3em; 22 | } 23 | 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /.github/workflows/deploy-docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy docs 2 | on: 3 | push: 4 | tags: ['[0-9]+.[0-9]+.*'] 5 | 6 | jobs: 7 | deploy: 8 | name: Deploy 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - name: Set up Python 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: 3.8 16 | 17 | - uses: actions/cache@v2 18 | name: Set up caches 19 | with: 20 | path: ~/.cache/pip 21 | key: ${{ runner.os }} 22 | 23 | - name: Checkout repo 24 | uses: actions/checkout@v2 25 | with: 26 | fetch-depth: 3 27 | - name: Fetch tags 28 | run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* 29 | 30 | - name: Install dependencies 31 | run: | 32 | pip install -U pip setuptools wheel 33 | pip install -U -e .[doc,test] 34 | 35 | - name: Build docs 36 | run: time catchsegv doc/build.sh 37 | 38 | - name: Deploy docs 39 | env: 40 | GH_PASSWORD: ${{ secrets.GITHUB_TOKEN }} 41 | run: .github/deploy-gh-pages.sh 42 | -------------------------------------------------------------------------------- /.github/deploy-gh-pages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | if [ ! -d doc/build ]; then 5 | echo 'Error: invalid directory. Deploy from repo root.' 6 | exit 1 7 | fi 8 | 9 | [ "$GH_PASSWORD" ] || exit 12 10 | 11 | sitemap() { 12 | WEBSITE='https://kernc.github.io/backtesting.py' 13 | find -name '*.html' | 14 | sed "s,^\.,$WEBSITE," | 15 | sed 's/index.html$//' | 16 | grep -v '/google.*\.html$' | 17 | sort -u > 'sitemap.txt' 18 | echo "Sitemap: $WEBSITE/sitemap.txt" > 'robots.txt' 19 | } 20 | 21 | head=$(git rev-parse HEAD) 22 | 23 | git clone -b gh-pages "https://kernc:$GH_PASSWORD@github.com/$GITHUB_REPOSITORY.git" gh-pages 24 | mkdir -p gh-pages/doc 25 | cp -R doc/build/* gh-pages/doc/ 26 | cd gh-pages 27 | sitemap 28 | git add * 29 | if git diff --staged --quiet; then 30 | echo "$0: No changes to commit." 31 | exit 0 32 | fi 33 | 34 | if ! git config user.name; then 35 | git config user.name 'github-actions' 36 | git config user.email '41898282+github-actions[bot]@users.noreply.github.com' 37 | fi 38 | 39 | git commit -a -m "CI: Update docs for ${GITHUB_REF#refs/tags/} ($head)" 40 | git push 41 | -------------------------------------------------------------------------------- /backtesting/autoscale_cb.js: -------------------------------------------------------------------------------- 1 | if (!window._bt_scale_range) { 2 | window._bt_scale_range = function (range, min, max, pad) { 3 | "use strict"; 4 | if (min !== Infinity && max !== -Infinity) { 5 | pad = pad ? (max - min) * .03 : 0; 6 | range.start = min - pad; 7 | range.end = max + pad; 8 | } else console.error('backtesting: scale range error:', min, max, range); 9 | }; 10 | } 11 | 12 | clearTimeout(window._bt_autoscale_timeout); 13 | 14 | window._bt_autoscale_timeout = setTimeout(function () { 15 | /** 16 | * @variable cb_obj `fig_ohlc.x_range`. 17 | * @variable source `ColumnDataSource` 18 | * @variable ohlc_range `fig_ohlc.y_range`. 19 | * @variable volume_range `fig_volume.y_range`. 20 | */ 21 | "use strict"; 22 | 23 | let i = Math.max(Math.floor(cb_obj.start), 0), 24 | j = Math.min(Math.ceil(cb_obj.end), source.data['ohlc_high'].length); 25 | 26 | let max = Math.max.apply(null, source.data['ohlc_high'].slice(i, j)), 27 | min = Math.min.apply(null, source.data['ohlc_low'].slice(i, j)); 28 | _bt_scale_range(ohlc_range, min, max, true); 29 | 30 | if (volume_range) { 31 | max = Math.max.apply(null, source.data['Volume'].slice(i, j)); 32 | _bt_scale_range(volume_range, 0, max * 1.03, false); 33 | } 34 | 35 | }, 50); 36 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: { branches: [master] } 4 | pull_request: { branches: [master] } 5 | schedule: [ cron: '2 2 * * 6' ] # Every Saturday, 02:02 6 | 7 | jobs: 8 | build: 9 | name: Build 10 | runs-on: ubuntu-18.04 11 | 12 | strategy: 13 | matrix: 14 | python-version: [3.6, 3.7] 15 | include: 16 | - python-version: 3.8 17 | test-type: lint 18 | - python-version: 3.8 19 | test-type: docs 20 | 21 | steps: 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | 27 | - uses: actions/cache@v2 28 | name: Set up caches 29 | with: 30 | path: ~/.cache/pip 31 | key: ${{ runner.os }}-py${{ matrix.python-version }} 32 | 33 | - name: Checkout repo 34 | uses: actions/checkout@v2 35 | with: 36 | fetch-depth: 3 37 | - name: Fetch tags 38 | run: git fetch --depth=1 origin +refs/tags/*:refs/tags/* 39 | 40 | - name: Install dependencies 41 | run: | 42 | pip install -U pip setuptools wheel 43 | pip install -U --pre .[test] 44 | 45 | - name: Install lint dependencies 46 | if: matrix.test-type == 'lint' 47 | run: pip install -U .[dev] 48 | 49 | - name: Install docs dependencies 50 | if: matrix.test-type == 'docs' 51 | run: pip install -e .[doc,test] # -e provides _version.py for pdoc 52 | 53 | - name: Test w/ Coverage, Lint 54 | if: matrix.test-type == 'lint' 55 | env: { BOKEH_BROWSER: none } 56 | run: | 57 | flake8 58 | mypy backtesting 59 | time catchsegv coverage run -m backtesting.test 60 | bash <(curl -s https://codecov.io/bash) 61 | 62 | - name: Test 63 | if: '! matrix.test-type' 64 | env: { BOKEH_BROWSER: none } 65 | run: time catchsegv python -m backtesting.test 66 | 67 | - name: Test docs 68 | if: matrix.test-type == 'docs' 69 | run: time catchsegv doc/build.sh 70 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contributing guidelines 2 | ======================= 3 | 4 | Issues 5 | ------ 6 | Before reporting an issue, see if a similar issue is already open. 7 | Also check if a similar issue was recently closed — your bug might 8 | have been fixed already. 9 | 10 | To have your issue dealt with promptly, it's best to construct a 11 | [minimal working example] that exposes the issue in a clear and 12 | reproducible manner. Make sure to understand 13 | [how to report bugs effectively][bugs]. 14 | 15 | Show verbatim code in [fenced code blocks], and use the 16 | preview function! 17 | 18 | [minimal working example]: https://en.wikipedia.org/wiki/Minimal_working_example 19 | [bugs]: https://www.chiark.greenend.org.uk/~sgtatham/bugs.html 20 | [fenced code blocks]: https://www.markdownguide.org/extended-syntax/#fenced-code-blocks 21 | 22 | 23 | Installation 24 | ------------ 25 | To install a developmental version of the project, 26 | first [fork the project]. Then: 27 | 28 | git clone git@github.com:YOUR_USERNAME/backtesting.py 29 | cd backtesting.py 30 | pip3 install -e .[doc,test,dev] 31 | 32 | [fork the project]: https://help.github.com/articles/fork-a-repo/ 33 | 34 | 35 | Testing 36 | ------- 37 | Please write reasonable unit tests for any new / changed functionality. 38 | See _backtesting/test_ directory for existing tests. 39 | Before submitting a PR, ensure the tests pass: 40 | 41 | python -m backtesting.test 42 | 43 | Also ensure that idiomatic code style is respected by running: 44 | 45 | flake8 46 | mypy backtesting 47 | 48 | 49 | Documentation 50 | ------------- 51 | See _doc/README.md_. Besides Jupyter Notebook examples, all documentation 52 | is generated from [pdoc]-compatible docstrings in code. 53 | 54 | [pdoc]: https://pdoc3.github.io/pdoc 55 | 56 | 57 | Pull requests 58 | ------------- 59 | A general recommended reading: 60 | [How to make your code reviewer fall in love with you][code-review]. 61 | Please use explicit commit messages. See [NumPy's development workflow] 62 | for inspiration. 63 | 64 | [code-review]: https://mtlynch.io/code-review-love/ 65 | [NumPy's development workflow]: https://numpy.org/doc/stable/dev/development_workflow.html 66 | -------------------------------------------------------------------------------- /backtesting/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | ## Manuals 3 | 4 | * [**Quick Start User Guide**](../examples/Quick Start User Guide.html) 5 | 6 | ## Tutorials 7 | 8 | * [Library of Utilities and Composable Base Strategies](../examples/Strategies Library.html) 9 | * [Multiple Time Frames](../examples/Multiple Time Frames.html) 10 | * [**Parameter Heatmap & Optimization**](../examples/Parameter Heatmap & Optimization.html) 11 | * [Trading with Machine Learning](../examples/Trading with Machine Learning.html) 12 | 13 | These tutorials are also available as live Jupyter notebooks: 14 | [![Binder](https://mybinder.org/badge_logo.svg)][binder] 15 | [![Google Colab](https://colab.research.google.com/assets/colab-badge.svg)][colab] 16 |
In Colab, you might have to `!pip install backtesting`. 17 | 18 | [binder]: \ 19 | https://mybinder.org/v2/gh/kernc/backtesting.py/master?\ 20 | urlpath=lab%2Ftree%2Fdoc%2Fexamples%2FQuick%20Start%20User%20Guide.ipynb 21 | [colab]: https://colab.research.google.com/github/kernc/backtesting.py/ 22 | 23 | ## Example Strategies 24 | 25 | * (contributions welcome) 26 | 27 | 28 | .. tip:: 29 | For an overview of recent changes, see 30 | [What's New](https://github.com/kernc/backtesting.py/blob/master/CHANGELOG.md). 31 | 32 | 33 | ## FAQ 34 | 35 | Some answers to frequent and popular questions can be found on the 36 | [issue tracker](https://github.com/kernc/backtesting.py/issues?q=label%3Aquestion+-label%3Ainvalid) 37 | or on the [discussion forum](https://github.com/kernc/backtesting.py/discussions) on GitHub. 38 | Please use the search! 39 | 40 | ## License 41 | 42 | This software is licensed under the terms of [AGPL 3.0]{: rel=license}, 43 | meaning you can use it for any reasonable purpose and remain in 44 | complete ownership of all the excellent trading strategies you produce, 45 | but you are also encouraged to make sure any upgrades to _Backtesting.py_ 46 | itself find their way back to the community. 47 | 48 | [AGPL 3.0]: https://www.gnu.org/licenses/agpl-3.0.html 49 | 50 | # API Reference Documentation 51 | """ 52 | try: 53 | from ._version import version as __version__ # noqa: F401 54 | except ImportError: 55 | __version__ = '?.?.?' # Package not installed 56 | 57 | from .backtesting import Backtest, Strategy # noqa: F401 58 | from . import lib # noqa: F401 59 | from ._plotting import set_bokeh_output # noqa: F401 60 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | What's New 2 | ========== 3 | 4 | These were the major changes contributing to each release: 5 | 6 | 7 | ### 0.x.x 8 | 9 | 10 | ### 0.3.1 11 | (2021-01-25) 12 | 13 | * Avoid some `pandas.Index` deprecations 14 | * Fix `Backtest.plot(show_legend=False)` for recent Bokeh 15 | 16 | 17 | ### 0.3.0 18 | (2020-11-24) 19 | 20 | * Faster [model-based optimization](https://kernc.github.io/backtesting.py/doc/examples/Parameter%20Heatmap%20&%20Optimization.html#Model-based-optimization) using scikit-optimize (#154) 21 | * Optionally faster [optimization](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest.optimize) by randomized grid search (#154) 22 | * _Annualized_ Return/Volatility/Sharpe/Sortino/Calmar stats (#156) 23 | * Auto close open trades on backtest finish 24 | * Add `Backtest.plot(plot_return=)`, akin to `plot_equity=` 25 | * Update Expectancy formula (#181) 26 | 27 | 28 | ### 0.2.4 29 | (2020-10-27) 30 | 31 | * Add [`lib.random_ohlc_data()`](https://kernc.github.io/backtesting.py/doc/backtesting/lib.html#backtesting.lib.random_ohlc_data) OHLC data generator 32 | * Aggregate Equity on 'last' when plot resampling 33 | * Update stats calculation for Buy & Hold to be long-only (#152) 34 | 35 | 36 | ### 0.2.3 37 | (2020-09-10) 38 | 39 | * Link hover crosshairs across plots 40 | * Clicking plot legend glyph toggles indicator visibility 41 | * Fix Bokeh tooltip showing literal '\ ' 42 | 43 | 44 | ### 0.2.2 45 | (2020-08-21) 46 | 47 | 48 | ### 0.2.1 49 | (2020-08-03) 50 | 51 | * Add [`Trade.entry_time/.exit_time`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Trade) 52 | * Handle SL/TP hit on the same day the position was opened 53 | 54 | 55 | ### 0.2.0 56 | (2020-07-15) 57 | 58 | * New Order/Trade/Position API (#47) 59 | * Add data pandas accessors [`.df` and `.s`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Strategy.data) 60 | * Add `Backtest(..., exclusive_orders=)` that closes previous trades on new orders 61 | * Add `Backtest(..., hedging=)` that makes FIFO trade closing optional 62 | * Add `bt.plot(reverse_indicators=)` param 63 | * Add `bt.plot(resample=)` and auto-downsample large data 64 | * Use geometric mean return in Sharpe/Sortino stats computation 65 | 66 | 67 | ### 0.1.8 68 | (2020-07-14) 69 | 70 | * Add Profit Factor statistic (#85) 71 | 72 | 73 | ### 0.1.7 74 | (2020-03-23) 75 | 76 | * Fix support for 2-D indicators 77 | * Fix tooltip Date field formatting with Bokeh 2.0.0 78 | 79 | 80 | ### 0.1.6 81 | (2020-03-09) 82 | 83 | 84 | ### 0.1.5 85 | (2020-03-02) 86 | 87 | 88 | ### 0.1.4 89 | (2020-02-25) 90 | 91 | 92 | ### 0.1.3 93 | (2020-02-24) 94 | 95 | * Show number of trades on OHLC plot legend 96 | * Add parameter agg= to lib.resample_apply() 97 | * Reset position price (etc.) after closing position 98 | * Fix pandas insertion error on Windos 99 | 100 | 101 | ### 0.1.2 102 | (2019-09-23) 103 | 104 | * Make plot span 100% of browser width 105 | 106 | 107 | ### 0.1.1 108 | (2019-09-23) 109 | 110 | * Avoid multiprocessing trouble on Windos (#6) 111 | * Add scatter plot indicators 112 | 113 | 114 | ### 0.1.0 115 | (2019-01-15) 116 | 117 | * Initial release -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![](https://i.imgur.com/E8Kj69Y.png)](https://kernc.github.io/backtesting.py/) 2 | 3 | Backtesting.py 4 | ============== 5 | [![Build Status](https://img.shields.io/github/workflow/status/kernc/backtesting.py/CI/master?style=for-the-badge)](https://github.com/kernc/backtesting.py/actions) 6 | [![Code Coverage](https://img.shields.io/codecov/c/gh/kernc/backtesting.py.svg?style=for-the-badge)](https://codecov.io/gh/kernc/backtesting.py) 7 | [![Backtesting on PyPI](https://img.shields.io/pypi/v/backtesting.svg?color=blue&style=for-the-badge)](https://pypi.org/project/backtesting) 8 | [![PyPI downloads](https://img.shields.io/pypi/dd/backtesting.svg?color=skyblue&style=for-the-badge)](https://pypi.org/project/backtesting) 9 | [![GitHub Sponsors](https://img.shields.io/github/sponsors/kernc?color=pink&style=for-the-badge)](https://github.com/sponsors/kernc) 10 | 11 | Backtest trading strategies with Python. 12 | 13 | [**Project website**](https://kernc.github.io/backtesting.py) 14 | 15 | [Documentation] 16 | 17 | [![Star](https://i.imgur.com/LSI6p6O.png)](#top) the project if you use it. 18 | 19 | [Documentation]: https://kernc.github.io/backtesting.py/doc/backtesting/ 20 | 21 | 22 | Installation 23 | ------------ 24 | 25 | $ pip install backtesting 26 | 27 | 28 | Usage 29 | ----- 30 | ```python 31 | from backtesting import Backtest, Strategy 32 | from backtesting.lib import crossover 33 | 34 | from backtesting.test import SMA, GOOG 35 | 36 | 37 | class SmaCross(Strategy): 38 | def init(self): 39 | price = self.data.Close 40 | self.ma1 = self.I(SMA, price, 10) 41 | self.ma2 = self.I(SMA, price, 20) 42 | 43 | def next(self): 44 | if crossover(self.ma1, self.ma2): 45 | self.buy() 46 | elif crossover(self.ma2, self.ma1): 47 | self.sell() 48 | 49 | 50 | bt = Backtest(GOOG, SmaCross, commission=.002, 51 | exclusive_orders=True) 52 | stats = bt.run() 53 | bt.plot() 54 | ``` 55 | 56 | Results in: 57 | 58 | ```text 59 | Start 2004-08-19 00:00:00 60 | End 2013-03-01 00:00:00 61 | Duration 3116 days 00:00:00 62 | Exposure Time [%] 94.27 63 | Equity Final [$] 68935.12 64 | Equity Peak [$] 68991.22 65 | Return [%] 589.35 66 | Buy & Hold Return [%] 703.46 67 | Return (Ann.) [%] 25.42 68 | Volatility (Ann.) [%] 38.43 69 | Sharpe Ratio 0.66 70 | Sortino Ratio 1.30 71 | Calmar Ratio 0.77 72 | Max. Drawdown [%] -33.08 73 | Avg. Drawdown [%] -5.58 74 | Max. Drawdown Duration 688 days 00:00:00 75 | Avg. Drawdown Duration 41 days 00:00:00 76 | # Trades 93 77 | Win Rate [%] 53.76 78 | Best Trade [%] 57.12 79 | Worst Trade [%] -16.63 80 | Avg. Trade [%] 1.96 81 | Max. Trade Duration 121 days 00:00:00 82 | Avg. Trade Duration 32 days 00:00:00 83 | Profit Factor 2.13 84 | Expectancy [%] 6.91 85 | SQN 1.78 86 | _strategy SmaCross(n1=10, n2=20) 87 | _equity_curve Equ... 88 | _trades Size EntryB... 89 | dtype: object 90 | ``` 91 | [![plot of trading simulation](https://i.imgur.com/xRFNHfg.png)](https://kernc.github.io/backtesting.py/#example) 92 | 93 | Find more usage examples in the [documentation]. 94 | 95 | Features 96 | -------- 97 | * Simple, well-documented API 98 | * Blazing fast execution 99 | * Built-in optimizer 100 | * Library of composable base strategies and utilities 101 | * Indicator-library-agnostic 102 | * Supports _any_ financial instrument with candlestick data 103 | * Detailed results 104 | * Interactive visualizations 105 | 106 | Alternatives 107 | ------------ 108 | See [alternatives.md] for a list of alternative Python 109 | backtesting frameworks and related packages. 110 | 111 | [alternatives.md]: https://github.com/kernc/backtesting.py/blob/master/doc/alternatives.md 112 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | if sys.version_info < (3, 6): 5 | sys.exit('ERROR: Backtesting.py requires Python 3.6+') 6 | 7 | 8 | if __name__ == '__main__': 9 | from setuptools import setup, find_packages 10 | 11 | setup( 12 | name='Backtesting', 13 | description="Backtest trading strategies in Python", 14 | license='AGPL-3.0', 15 | url='https://kernc.github.io/backtesting.py/', 16 | project_urls={ 17 | 'Documentation': 'https://kernc.github.io/backtesting.py/doc/backtesting/', 18 | 'Source': 'https://github.com/kernc/backtesting.py/', 19 | 'Tracker': 'https://github.com/kernc/backtesting.py/issues', 20 | }, 21 | long_description=open(os.path.join(os.path.dirname(__file__), 'README.md'), 22 | encoding='utf-8').read(), 23 | long_description_content_type='text/markdown', 24 | packages=find_packages(), 25 | include_package_data=True, 26 | setup_requires=[ 27 | 'setuptools_git', 28 | 'setuptools_scm', 29 | ], 30 | use_scm_version={ 31 | 'write_to': os.path.join('backtesting', '_version.py'), 32 | }, 33 | install_requires=[ 34 | 'numpy', 35 | 'pandas >= 0.25.0, != 0.25.0', 36 | 'bokeh >= 1.4.0', 37 | ], 38 | extras_require={ 39 | 'doc': [ 40 | 'pdoc3', 41 | 'jupytext >= 1.3', 42 | 'nbconvert', 43 | 'ipykernel', # for nbconvert 44 | 'jupyter_client', # for nbconvert 45 | ], 46 | 'test': [ 47 | 'seaborn', 48 | 'matplotlib', 49 | 'scikit-learn', 50 | 'scikit-optimize', 51 | ], 52 | 'dev': [ 53 | 'flake8', 54 | 'coverage', 55 | 'mypy', 56 | ], 57 | }, 58 | test_suite="backtesting.test", 59 | python_requires='>=3.6', 60 | author='Zach Lûster', 61 | classifiers=[ 62 | 'Intended Audience :: Financial and Insurance Industry', 63 | 'Intended Audience :: Science/Research', 64 | 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)', 65 | 'Operating System :: OS Independent', 66 | 'Programming Language :: Python :: 3 :: Only', 67 | 'Topic :: Office/Business :: Financial :: Investment', 68 | 'Topic :: Scientific/Engineering :: Visualization', 69 | ], 70 | keywords=[ 71 | 'algo', 72 | 'algorithmic', 73 | 'ashi', 74 | 'backtest', 75 | 'backtesting', 76 | 'bitcoin', 77 | 'bokeh', 78 | 'bonds', 79 | 'candle', 80 | 'candlestick', 81 | 'cboe', 82 | 'chart', 83 | 'cme', 84 | 'commodities', 85 | 'crash', 86 | 'crypto', 87 | 'currency', 88 | 'doji', 89 | 'drawdown', 90 | 'equity', 91 | 'etf', 92 | 'ethereum', 93 | 'exchange', 94 | 'finance', 95 | 'financial', 96 | 'forecast', 97 | 'forex', 98 | 'fund', 99 | 'futures', 100 | 'fx', 101 | 'fxpro', 102 | 'gold', 103 | 'heiken', 104 | 'historical', 105 | 'indicator', 106 | 'invest', 107 | 'investing', 108 | 'investment', 109 | 'macd', 110 | 'market', 111 | 'mechanical', 112 | 'money', 113 | 'oanda', 114 | 'ohlc', 115 | 'ohlcv', 116 | 'order', 117 | 'price', 118 | 'profit', 119 | 'quant', 120 | 'quantitative', 121 | 'rsi', 122 | 'silver', 123 | 'stocks', 124 | 'strategy', 125 | 'ticker', 126 | 'trader', 127 | 'trading', 128 | 'tradingview', 129 | 'usd', 130 | ], 131 | ) 132 | -------------------------------------------------------------------------------- /doc/examples/Strategies Library.py: -------------------------------------------------------------------------------- 1 | # --- 2 | # jupyter: 3 | # jupytext: 4 | # text_representation: 5 | # extension: .py 6 | # format_name: light 7 | # format_version: '1.5' 8 | # jupytext_version: 1.5.1 9 | # kernelspec: 10 | # display_name: Python 3 11 | # language: python 12 | # name: python3 13 | # --- 14 | 15 | # Library of Composable Base Strategies 16 | # ====================== 17 | # 18 | # This tutorial will show how to reuse composable base trading strategies that are part of _backtesting.py_ software distribution. 19 | # It is, henceforth, assumed you're already familiar with 20 | # [basic package usage](https://kernc.github.io/backtesting.py/doc/examples/Quick Start User Guide.html). 21 | # 22 | # We'll extend the same moving average cross-over strategy as in 23 | # [Quick Start User Guide](https://kernc.github.io/backtesting.py/doc/examples/Quick Start User Guide.html), 24 | # but we'll rewrite it as a vectorized signal strategy and add trailing stop-loss. 25 | # 26 | # Again, we'll use our helper moving average function. 27 | 28 | from backtesting.test import SMA 29 | 30 | # Part of this software distribution is 31 | # [`backtesting.lib`](https://kernc.github.io/backtesting.py/doc/backtesting/lib.html) 32 | # module that contains various reusable utilities for strategy development. 33 | # Some of those utilities are composable base strategies we can extend and build upon. 34 | # 35 | # We import and extend two of those strategies here: 36 | # * [`SignalStrategy`](https://kernc.github.io/backtesting.py/doc/backtesting/lib.html#backtesting.lib.SignalStrategy) 37 | # which decides upon a single signal vector whether to buy into a position, akin to 38 | # [vectorized backtesting](https://www.google.com/search?q=vectorized+backtesting) 39 | # engines, and 40 | # * [`TrailingStrategy`](https://kernc.github.io/backtesting.py/doc/backtesting/lib.html#backtesting.lib.TrailingStrategy) 41 | # which automatically trails the current price with a stop-loss order some multiple of 42 | # [average true range](https://en.wikipedia.org/wiki/Average_true_range) 43 | # (ATR) away. 44 | 45 | # + 46 | import pandas as pd 47 | from backtesting.lib import SignalStrategy, TrailingStrategy 48 | 49 | 50 | class SmaCross(SignalStrategy, 51 | TrailingStrategy): 52 | n1 = 10 53 | n2 = 25 54 | 55 | def init(self): 56 | # In init() and in next() it is important to call the 57 | # super method to properly initialize the parent classes 58 | super().init() 59 | 60 | # Precompute the two moving averages 61 | sma1 = self.I(SMA, self.data.close, self.n1) 62 | sma2 = self.I(SMA, self.data.close, self.n2) 63 | 64 | # Where sma1 crosses sma2 upwards. Diff gives us [-1,0, *1*] 65 | signal = (pd.Series(sma1) > sma2).astype(int).diff().fillna(0) 66 | signal = signal.replace(-1, 0) # Upwards/long only 67 | 68 | # Use 95% of available liquidity (at the time) on each order. 69 | # (Leaving a value of 1. would instead buy a single share.) 70 | entry_size = signal * .95 71 | 72 | # Set order entry sizes using the method provided by 73 | # `SignalStrategy`. See the docs. 74 | self.set_signal(entry_size=entry_size) 75 | 76 | # Set trailing stop-loss to 2x ATR using 77 | # the method provided by `TrailingStrategy` 78 | self.set_trailing_sl(2) 79 | 80 | 81 | # - 82 | 83 | # Note, since the strategies in `lib` may require their own intialization and next-tick logic, be sure to **always call `super().init()` and `super().next()` in your overridden methods**. 84 | # 85 | # Let's see how the example strategy fares on historical Google data. 86 | 87 | # + 88 | from backtesting import Backtest 89 | from backtesting.test import GOOG 90 | 91 | bt = Backtest(GOOG, SmaCross, commission=.002) 92 | 93 | bt.run() 94 | bt.plot() 95 | # - 96 | 97 | # Notice how managing risk with a trailing stop-loss secures our gains and limits our losses. 98 | # 99 | # For other strategies of the sort, and other reusable utilities in general, see 100 | # [**_backtesting.lib_ module reference**](https://kernc.github.io/backtesting.py/doc/backtesting/lib.html). 101 | 102 | # Learn more by exploring further 103 | # [examples](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#tutorials) 104 | # or find more framework options in the 105 | # [full API reference](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#header-submodules). 106 | -------------------------------------------------------------------------------- /doc/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | IS_RELEASE="$([[ "${GITHUB_REF:-}" == refs/tags/* ]] && echo 1 || true)" 4 | 5 | die () { echo "ERROR: $*" >&2; exit 2; } 6 | 7 | for cmd in pdoc3 \ 8 | jupytext \ 9 | jupyter-nbconvert; do 10 | command -v "$cmd" >/dev/null || 11 | die "Missing $cmd; \`pip install backtesting[doc]\`" 12 | done 13 | 14 | DOCROOT="$(dirname "$(readlink -f "$0")")" 15 | BUILDROOT="$DOCROOT/build" 16 | 17 | 18 | echo 19 | echo 'Building API reference docs' 20 | echo 21 | mkdir -p "$BUILDROOT" 22 | rm -r "$BUILDROOT" 2>/dev/null || true 23 | pushd "$DOCROOT/.." >/dev/null 24 | pdoc3 --html \ 25 | ${IS_RELEASE+--template-dir "$DOCROOT/pdoc_template"} \ 26 | --output-dir "$BUILDROOT" \ 27 | backtesting 28 | popd >/dev/null 29 | 30 | 31 | echo 32 | echo 'Ensuring example notebooks match their py counterparts' 33 | echo 34 | strip_yaml () { awk -f "$DOCROOT/scripts/strip_yaml.awk" "$@"; } 35 | for ipynb in "$DOCROOT"/examples/*.ipynb; do 36 | echo "Checking: '$ipynb'" 37 | diff <(strip_yaml "${ipynb%.ipynb}.py") <(jupytext --to py --output - "$ipynb" | strip_yaml) || 38 | die "Notebook and its matching .py file differ. Maybe run: \`jupytext --to py '$ipynb'\` ?" 39 | done 40 | 41 | 42 | echo 43 | echo 'Converting example notebooks → py → HTML' 44 | echo 45 | jupytext --test --update --to ipynb "$DOCROOT/examples"/*.py 46 | { mkdir -p ~/.ipython/profile_default/startup 47 | cp -f "$DOCROOT/scripts/ipython_config.py" ~/.ipython/profile_default/startup/99-backtesting-docs.py 48 | trap 'rm -f ~/.ipython/profile_default/startup/99-backtesting-docs.py' EXIT; } 49 | PYTHONWARNINGS='ignore::UserWarning,ignore::RuntimeWarning' \ 50 | jupyter-nbconvert --execute --to=html \ 51 | --ExecutePreprocessor.timeout=300 \ 52 | --output-dir="$BUILDROOT/examples" "$DOCROOT/examples"/*.ipynb 53 | 54 | 55 | if [ "$IS_RELEASE" ]; then 56 | echo -e '\nAdding GAnalytics code\n' 57 | 58 | ANALYTICS="" 59 | find "$BUILDROOT" -name '*.html' -print0 | 60 | xargs -0 -- sed -i "s##$ANALYTICS#i" 61 | ANALYTICS="" 62 | find "$BUILDROOT" -name '*.html' -print0 | 63 | xargs -0 -- sed -i "s##$ANALYTICS#i" 64 | ANALYTICS='' 65 | find "$BUILDROOT" -name '*.html' -print0 | 66 | xargs -0 -- sed -i "s##$ANALYTICS#i" 67 | fi 68 | 69 | 70 | echo 71 | echo 'Testing for broken links' 72 | echo 73 | pushd "$BUILDROOT" >/dev/null 74 | WEBSITE='https://kernc\.github\.io/backtesting\.py' 75 | grep -PR '/dev/null 2>&1 || 96 | die "broken link in $file: $url" 97 | done 98 | done 99 | popd >/dev/null 100 | 101 | 102 | echo 103 | echo "All good. Docs in: $BUILDROOT" 104 | echo 105 | echo " file://$BUILDROOT/backtesting/index.html" 106 | echo 107 | -------------------------------------------------------------------------------- /doc/alternatives.md: -------------------------------------------------------------------------------- 1 | Alternatives 2 | ------------ 3 | The thing with backtesting is, unless you dug into the dirty details yourself, 4 | you can't rely on execution correctness, and you risk losing your house. 5 | In addition, everyone has their own preconveived ideas about how a mechanical 6 | trading strategy should be conducted, so everyone (and their brother) 7 | just rolls their own backtesting frameworks. 8 | 9 | If after reviewing the docs and examples perchance you find 10 | [_Backtesting.py_](https://kernc.github.io/backtesting.py) not your cup of tea, 11 | kindly have a look at some similar alternative Python backtesting frameworks: 12 | 13 | - [bt](http://pmorissette.github.io/bt/) - 14 | a framework based on reusable and flexible blocks of 15 | strategy logic that support multiple instruments and 16 | output detailed statistics and useful charts. 17 | - [vectorbt](https://polakowo.io/vectorbt/) - 18 | a pandas-based library for quickly analyzing trading strategies at scale. 19 | - [Backtrader](https://www.backtrader.com/) - 20 | a pure-python feature-rich framework for backtesting 21 | and live algotrading with a few brokers. 22 | - [PyAlgoTrade](https://gbeced.github.io/pyalgotrade/) - 23 | event-driven algorithmic trading library with focus on 24 | backtesting and support for live trading. 25 | - [Pinkfish](http://fja05680.github.io/pinkfish/) - 26 | a lightweight backtester for intraday strategies on daily data. 27 | - [finmarketpy](https://github.com/cuemacro/finmarketpy) - 28 | a library for analyzing financial market data. 29 | - [QuantStart QSTrader](https://github.com/mhallsmoore/qstrader/) - 30 | a modular schedule-driven backtesting framework for long-short equities 31 | and ETF-based systematic trading strategies. 32 | - [pysystemtrade](https://github.com/robcarver17/pysystemtrade) - 33 | the open-source version of Robert Carver's backtesting engine that 34 | implements systems according to his book _Systematic Trading: 35 | A unique new method for designing trading and investing systems_. 36 | - [QTPyLib](https://github.com/ranaroussi/qtpylib) - 37 | a versatile, event-driven algorithmic trading library. 38 | - [Gemini](https://github.com/anfederico/Gemini) - 39 | a backtester namely focusing on cryptocurrency markets. 40 | - [Quantdom](https://github.com/constverum/Quantdom) - 41 | a Qt-based framework that lets you focus on modeling financial strategies, 42 | portfolio management, and analyzing backtests. 43 | - [Clairvoyant](https://github.com/anfederico/Clairvoyant) - 44 | software for identifying and monitoring social / historical cues 45 | for short-term stock movement. 46 | - [optopsy](https://github.com/michaelchu/optopsy) - 47 | a nimble backtesting library for options trading. 48 | - [RQalpha](https://github.com/ricequant/rqalpha) - 49 | a complete solution for programmatic traders from data acquisition, 50 | algorithmic trading, backtesting, real-time simulation, live trading 51 | to mere data analysis. Documentation in Chinese. 52 | - [zvt](https://github.com/zvtvz/zvt) - 53 | a quant trading platform which includes data recorder, factor calculation, 54 | stock picking, backtesting, and unified visualization. Documentation in Chinese. 55 | - [AwesomeQuant](https://github.com/wilsonfreitas/awesome-quant#trading--backtesting) - 56 | A somewhat curated list of libraries, packages, and resources for quants. 57 | 58 | #### Obsolete / Unmaintained 59 | 60 | The following projects are mainly old, stale, incomplete, incompatible, 61 | abandoned, and here for posterity reference only: 62 | 63 | - [Zipline](https://www.zipline.io/) - 64 | the backtesting and live-trading engine powering Quantopian — the 65 | community-centered, hosted platform for building and executing strategies. 66 | - [AlephNull](https://github.com/CarterBain/AlephNull) - 67 | extends the features of Zipline, for use within an institutional environment. 68 | - [ProfitPy](https://code.google.com/p/profitpy/) - 69 | a set of libraries and tools for the development, testing, and execution of 70 | automated stock trading systems. 71 | - [prophet](https://github.com/Emsu/prophet) - 72 | a microframework for financial markets, focusing on modeling 73 | strategies and portfolio management. 74 | - [pybacktest](https://github.com/ematvey/pybacktest) - 75 | a vectorized pandas-based backtesting framework, 76 | designed to make backtesting compact, simple and fast. 77 | - [quant](https://github.com/maihde/quant) - 78 | a technical analysis tool for trading strategies with a particularily 79 | simplistic view of the market. 80 | - [QuantSoftware Toolkit](https://github.com/QuantSoftware/QuantSoftwareToolkit) - 81 | a toolkit by the guys that soon after went to form Lucena Research. 82 | - [QuantStart QSForex](https://github.com/mhallsmoore/qsforex) - 83 | an event-driven backtesting and live-trading platform for use in 84 | the foreign exchange markets, 85 | - [tia: Toolkit for integration and analysis](https://github.com/PaulMest/tia/) - 86 | a toolkit providing Bloomberg data access, PDF generation, 87 | technical analysis and backtesting functionality. 88 | - [TradingWithPython](https://github.com/sjev/trading-with-python) - 89 | boiler-plate code for the (no longer active) course _Trading With Python_. 90 | - [Ultra-Finance](https://github.com/panpanpandas/ultrafinance) - 91 | real-time financial data collection, analyzing and backtesting trading strategies. 92 | - [visualize-wealth](https://github.com/benjaminmgross/visualize-wealth) - 93 | a library to construct, backtest, analyze, and evaluate portfolios 94 | and their benchmarks, with comprehensive documentation illustrating 95 | all underlying methodologies and statistics. 96 | -------------------------------------------------------------------------------- /doc/examples/Multiple Time Frames.py: -------------------------------------------------------------------------------- 1 | # --- 2 | # jupyter: 3 | # jupytext: 4 | # text_representation: 5 | # extension: .py 6 | # format_name: light 7 | # format_version: '1.5' 8 | # jupytext_version: 1.5.1 9 | # kernelspec: 10 | # display_name: Python 3 11 | # language: python 12 | # name: python3 13 | # --- 14 | 15 | # Multiple Time Frames 16 | # ============ 17 | # 18 | # Best trading strategies that rely on technical analysis might take into account price action on multiple time frames. 19 | # This tutorial will show how to do that with _backtesting.py_, offloading most of the work to 20 | # [pandas resampling](http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling). 21 | # It is assumed you're already familiar with 22 | # [basic framework usage](https://kernc.github.io/backtesting.py/doc/examples/Quick Start User Guide.html). 23 | # 24 | # We will put to the test this long-only, supposed 25 | # [400%-a-year trading strategy](http://jbmarwood.com/stock-trading-strategy-300/), 26 | # which uses daily and weekly 27 | # [relative strength index](https://en.wikipedia.org/wiki/Relative_strength_index) 28 | # (RSI) values and moving averages (MA). 29 | # 30 | # In practice, one should use functions from an indicator library, such as 31 | # [TA-Lib](https://github.com/mrjbq7/ta-lib) or 32 | # [Tulipy](https://tulipindicators.org), 33 | # but among us, let's introduce the two indicators we'll be using. 34 | 35 | # + 36 | import pandas as pd 37 | 38 | 39 | def SMA(array, n): 40 | """Simple moving average""" 41 | return pd.Series(array).rolling(n).mean() 42 | 43 | 44 | def RSI(array, n): 45 | """Relative strength index""" 46 | # Approximate; good enough 47 | gain = pd.Series(array).diff() 48 | loss = gain.copy() 49 | gain[gain < 0] = 0 50 | loss[loss > 0] = 0 51 | rs = gain.ewm(n).mean() / loss.abs().ewm(n).mean() 52 | return 100 - 100 / (1 + rs) 53 | 54 | 55 | # - 56 | 57 | # The strategy roughly goes like this: 58 | # 59 | # Buy a position when: 60 | # * weekly RSI(30) $\geq$ daily RSI(30) $>$ 70 61 | # * close $>$ MA(10) $>$ MA(20) $>$ MA(50) $>$ MA(100) 62 | # 63 | # close the position when: 64 | # * Daily close is more than 2% _below_ MA(10) 65 | # * 8% fixed stop loss is hit 66 | # 67 | # We need to provide bars data in the _lowest time frame_ (i.e. daily) and resample it to any higher time frame (i.e. weekly) that our strategy requires. 68 | 69 | # + 70 | from backtesting import Strategy, Backtest 71 | from backtesting.lib import resample_apply 72 | 73 | 74 | class System(Strategy): 75 | d_rsi = 30 # Daily RSI lookback periods 76 | w_rsi = 30 # Weekly 77 | level = 70 78 | 79 | def init(self): 80 | # Compute moving averages the strategy demands 81 | self.ma10 = self.I(SMA, self.data.close, 10) 82 | self.ma20 = self.I(SMA, self.data.close, 20) 83 | self.ma50 = self.I(SMA, self.data.close, 50) 84 | self.ma100 = self.I(SMA, self.data.close, 100) 85 | 86 | # Compute daily RSI(30) 87 | self.daily_rsi = self.I(RSI, self.data.close, self.d_rsi) 88 | 89 | # To construct weekly RSI, we can use `resample_apply()` 90 | # helper function from the library 91 | self.weekly_rsi = resample_apply( 92 | 'W-FRI', RSI, self.data.close, self.w_rsi) 93 | 94 | def next(self): 95 | price = self.data.close[-1] 96 | 97 | # If we don't already have a position, and 98 | # if all conditions are satisfied, enter long. 99 | if (not self.position and 100 | self.daily_rsi[-1] > self.level and 101 | self.weekly_rsi[-1] > self.level and 102 | self.weekly_rsi[-1] > self.daily_rsi[-1] and 103 | self.ma10[-1] > self.ma20[-1] > self.ma50[-1] > self.ma100[-1] and 104 | price > self.ma10[-1]): 105 | 106 | # Buy at market price on next open, but do 107 | # set 8% fixed stop loss. 108 | self.buy(sl=.92 * price) 109 | 110 | # If the price closes 2% or more below 10-day MA 111 | # close the position, if any. 112 | elif price < .98 * self.ma10[-1]: 113 | self.position.close() 114 | 115 | 116 | # - 117 | 118 | # Let's see how our strategy fares replayed on nine years of Google stock data. 119 | 120 | # + 121 | from backtesting.test import GOOG 122 | 123 | backtest = Backtest(GOOG, System, commission=.002) 124 | backtest.run() 125 | # - 126 | 127 | # Meager four trades in the span of nine years and with zero return? How about if we optimize the parameters a bit? 128 | 129 | # + 130 | # %%time 131 | 132 | backtest.optimize(d_rsi=range(10, 35, 5), 133 | w_rsi=range(10, 35, 5), 134 | level=range(30, 80, 10)) 135 | # - 136 | 137 | backtest.plot() 138 | 139 | # Better. While the strategy doesn't perform as well as simple buy & hold, it does so with significantly lower exposure (time in market). 140 | # 141 | # In conclusion, to test strategies on multiple time frames, you need to pass in OHLC data in the lowest time frame, then resample it to higher time frames, apply the indicators, then resample back to the lower time frame, filling in the in-betweens. 142 | # Which is what the function [`backtesting.lib.resample_apply()`](https://kernc.github.io/backtesting.py/doc/backtesting/lib.html#backtesting.lib.resample_apply) does for you. 143 | 144 | # Learn more by exploring further 145 | # [examples](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#tutorials) 146 | # or find more framework options in the 147 | # [full API reference](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#header-submodules). 148 | -------------------------------------------------------------------------------- /backtesting/_util.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | from typing import Dict, List, Optional, Sequence, Union 3 | from numbers import Number 4 | 5 | import numpy as np 6 | import pandas as pd 7 | 8 | 9 | def try_(lazy_func, default=None, exception=Exception): 10 | try: 11 | return lazy_func() 12 | except exception: 13 | return default 14 | 15 | 16 | def _as_str(value) -> str: 17 | if isinstance(value, (Number, str)): 18 | return str(value) 19 | if isinstance(value, pd.DataFrame): 20 | return 'df' 21 | name = str(getattr(value, 'name', '') or '') 22 | if name in ('Open', 'High', 'Low', 'Close', 'Volume'): 23 | return name[:1] 24 | if callable(value): 25 | name = getattr(value, '__name__', value.__class__.__name__).replace('', 'λ') 26 | if len(name) > 10: 27 | name = name[:9] + '…' 28 | return name 29 | 30 | 31 | def _as_list(value) -> List: 32 | if isinstance(value, Sequence) and not isinstance(value, str): 33 | return list(value) 34 | return [value] 35 | 36 | 37 | def _data_period(index) -> Union[pd.Timedelta, Number]: 38 | """Return data index period as pd.Timedelta""" 39 | values = pd.Series(index[-100:]) 40 | return values.diff().dropna().median() 41 | 42 | 43 | class _Array(np.ndarray): 44 | """ 45 | ndarray extended to supply .name and other arbitrary properties 46 | in ._opts dict. 47 | """ 48 | def __new__(cls, array, *, name=None, **kwargs): 49 | obj = np.asarray(array).view(cls) 50 | obj.name = name or array.name 51 | obj._opts = kwargs 52 | return obj 53 | 54 | def __array_finalize__(self, obj): 55 | if obj is not None: 56 | self.name = getattr(obj, 'name', '') 57 | self._opts = getattr(obj, '_opts', {}) 58 | 59 | # Make sure properties name and _opts are carried over 60 | # when (un-)pickling. 61 | def __reduce__(self): 62 | value = super().__reduce__() 63 | return value[:2] + (value[2] + (self.__dict__,),) 64 | 65 | def __setstate__(self, state): 66 | self.__dict__.update(state[-1]) 67 | super().__setstate__(state[:-1]) 68 | 69 | def __bool__(self): 70 | try: 71 | return bool(self[-1]) 72 | except IndexError: 73 | return super().__bool__() 74 | 75 | def __float__(self): 76 | try: 77 | return float(self[-1]) 78 | except IndexError: 79 | return super().__float__() 80 | 81 | def to_series(self): 82 | warnings.warn("`.to_series()` is deprecated. For pd.Series conversion, use accessor `.s`") 83 | return self.s 84 | 85 | @property 86 | def s(self) -> pd.Series: 87 | values = np.atleast_2d(self) 88 | index = self._opts['index'][:values.shape[1]] 89 | return pd.Series(values[0], index=index, name=self.name) 90 | 91 | @property 92 | def df(self) -> pd.DataFrame: 93 | values = np.atleast_2d(np.asarray(self)) 94 | index = self._opts['index'][:values.shape[1]] 95 | df = pd.DataFrame(values.T, index=index, columns=[self.name] * len(values)) 96 | return df 97 | 98 | 99 | class _Indicator(_Array): 100 | pass 101 | 102 | 103 | class _Data: 104 | """ 105 | A data array accessor. Provides access to OHLCV "columns" 106 | as a standard `pd.DataFrame` would, except it's not a DataFrame 107 | and the returned "series" are _not_ `pd.Series` but `np.ndarray` 108 | for performance reasons. 109 | """ 110 | def __init__(self, df: pd.DataFrame): 111 | self.__df = df 112 | self.__i = len(df) 113 | self.__pip: Optional[float] = None 114 | self.__cache: Dict[str, _Array] = {} 115 | self.__arrays: Dict[str, _Array] = {} 116 | self._update() 117 | 118 | def __getitem__(self, item): 119 | return self.__get_array(item) 120 | 121 | def __getattr__(self, item): 122 | try: 123 | return self.__get_array(item) 124 | except KeyError: 125 | raise AttributeError(f"Column '{item}' not in data") from None 126 | 127 | def _set_length(self, i): 128 | self.__i = i 129 | self.__cache.clear() 130 | 131 | def _update(self): 132 | index = self.__df.index.copy() 133 | self.__arrays = {col: _Array(arr, index=index) 134 | for col, arr in self.__df.items()} 135 | # Leave index as Series because pd.Timestamp nicer API to work with 136 | self.__arrays['__index'] = index 137 | 138 | def __repr__(self): 139 | i = min(self.__i, len(self.__df) - 1) 140 | index = self.__arrays['__index'][i] 141 | items = ', '.join(f'{k}={v}' for k, v in self.__df.iloc[i].items()) 142 | return f'' 143 | 144 | def __len__(self): 145 | return self.__i 146 | 147 | @property 148 | def df(self) -> pd.DataFrame: 149 | return (self.__df.iloc[:self.__i] 150 | if self.__i < len(self.__df) 151 | else self.__df) 152 | 153 | @property 154 | def pip(self) -> float: 155 | if self.__pip is None: 156 | self.__pip = 10**-np.median([len(s.partition('.')[-1]) 157 | for s in self.__arrays['Close'].astype(str)]) 158 | return self.__pip 159 | 160 | def __get_array(self, key) -> _Array: 161 | arr = self.__cache.get(key) 162 | if arr is None: 163 | arr = self.__cache[key] = self.__arrays[key][:self.__i] 164 | return arr 165 | 166 | @property 167 | def Open(self) -> _Array: 168 | return self.__get_array('Open') 169 | 170 | @property 171 | def High(self) -> _Array: 172 | return self.__get_array('High') 173 | 174 | @property 175 | def Low(self) -> _Array: 176 | return self.__get_array('Low') 177 | 178 | @property 179 | def Close(self) -> _Array: 180 | return self.__get_array('Close') 181 | 182 | @property 183 | def Volume(self) -> _Array: 184 | return self.__get_array('Volume') 185 | 186 | @property 187 | def index(self) -> pd.DatetimeIndex: 188 | return self.__get_array('__index') 189 | 190 | # Make pickling in Backtest.optimize() work with our catch-all __getattr__ 191 | def __getstate__(self): 192 | return self.__dict__ 193 | 194 | def __setstate__(self, state): 195 | self.__dict__ = state 196 | -------------------------------------------------------------------------------- /doc/examples/Parameter Heatmap & Optimization.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # --- 3 | # jupyter: 4 | # jupytext: 5 | # text_representation: 6 | # extension: .py 7 | # format_name: light 8 | # format_version: '1.5' 9 | # jupytext_version: 1.6.0 10 | # kernelspec: 11 | # display_name: Python 3 12 | # language: python 13 | # name: python3 14 | # --- 15 | 16 | # Parameter Heatmap 17 | # ========== 18 | # 19 | # This tutorial will show how to optimize strategies with multiple parameters and how to examine and reason about optimization results. 20 | # It is assumed you're already familiar with 21 | # [basic _backtesting.py_ usage](https://kernc.github.io/backtesting.py/doc/examples/Quick Start User Guide.html). 22 | # 23 | # First, let's again import our helper moving average function. 24 | # In practice, one should use functions from an indicator library, such as 25 | # [TA-Lib](https://github.com/mrjbq7/ta-lib) or 26 | # [Tulipy](https://tulipindicators.org). 27 | 28 | from backtesting.test import SMA 29 | 30 | # Our strategy will be a similar moving average cross-over strategy to the one in 31 | # [Quick Start User Guide](https://kernc.github.io/backtesting.py/doc/examples/Quick Start User Guide.html), 32 | # but we will use four moving averages in total: 33 | # two moving averages whose relationship determines a general trend 34 | # (we only trade long when the shorter MA is above the longer one, and vice versa), 35 | # and two moving averages whose cross-over with daily _close_ prices determine the signal to enter or exit the position. 36 | 37 | # + 38 | from backtesting import Strategy 39 | from backtesting.lib import crossover 40 | 41 | 42 | class Sma4Cross(Strategy): 43 | n1 = 50 44 | n2 = 100 45 | n_enter = 20 46 | n_exit = 10 47 | 48 | def init(self): 49 | self.sma1 = self.I(SMA, self.data.close, self.n1) 50 | self.sma2 = self.I(SMA, self.data.close, self.n2) 51 | self.sma_enter = self.I(SMA, self.data.close, self.n_enter) 52 | self.sma_exit = self.I(SMA, self.data.close, self.n_exit) 53 | 54 | def next(self): 55 | 56 | if not self.position: 57 | 58 | # On upwards trend, if price closes above 59 | # "entry" MA, go long 60 | 61 | # Here, even though the operands are arrays, this 62 | # works by implicitly comparing the two last values 63 | if self.sma1 > self.sma2: 64 | if crossover(self.data.close, self.sma_enter): 65 | self.buy() 66 | 67 | # On downwards trend, if price closes below 68 | # "entry" MA, go short 69 | 70 | else: 71 | if crossover(self.sma_enter, self.data.close): 72 | self.sell() 73 | 74 | # But if we already hold a position and the price 75 | # closes back below (above) "exit" MA, close the position 76 | 77 | else: 78 | if (self.position.is_long and 79 | crossover(self.sma_exit, self.data.close) 80 | or 81 | self.position.is_short and 82 | crossover(self.data.close, self.sma_exit)): 83 | self.position.close() 84 | 85 | 86 | # - 87 | 88 | # It's not a robust strategy, but we can optimize it. 89 | # 90 | # [Grid search](https://en.wikipedia.org/wiki/Hyperparameter_optimization#Grid_search) 91 | # is an exhaustive search through a set of specified sets of values of hyperparameters. One evaluates the performance for each set of parameters and finally selects the combination that performs best. 92 | # 93 | # Let's optimize our strategy on Google stock data using _randomized_ grid search over the parameter space, evaluating at most (approximately) 200 randomly chosen combinations: 94 | 95 | # + 96 | # %%time 97 | 98 | from backtesting import Backtest 99 | from backtesting.test import GOOG 100 | 101 | backtest = Backtest(GOOG, Sma4Cross, commission=.002) 102 | 103 | stats, heatmap = backtest.optimize( 104 | n1=range(10, 110, 10), 105 | n2=range(20, 210, 20), 106 | n_enter=range(15, 35, 5), 107 | n_exit=range(10, 25, 5), 108 | constraint=lambda p: p.n_exit < p.n_enter < p.n1 < p.n2, 109 | maximize='Equity Final [$]', 110 | max_tries=200, 111 | random_state=0, 112 | return_heatmap=True) 113 | # - 114 | 115 | # Notice `return_heatmap=True` parameter passed to 116 | # [`Backtest.optimize()`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest.optimize). 117 | # It makes the function return a heatmap series along with the usual stats of the best run. 118 | # `heatmap` is a pandas Series indexed with a MultiIndex, a cartesian product of all permissible (tried) parameter values. 119 | # The series values are from the `maximize=` argument we provided. 120 | 121 | heatmap 122 | 123 | # This heatmap contains the results of all the runs, 124 | # making it very easy to obtain parameter combinations for e.g. three best runs: 125 | 126 | heatmap.sort_values().iloc[-3:] 127 | 128 | # But we use vision to make judgements on larger data sets much faster. 129 | # Let's plot the whole heatmap by projecting it on two chosen dimensions. 130 | # Say we're mostly interested in how parameters `n1` and `n2`, on average, affect the outcome. 131 | 132 | hm = heatmap.groupby(['n1', 'n2']).mean().unstack() 133 | hm 134 | 135 | # Let's plot this table using the excellent [_Seaborn_](https://seaborn.pydata.org) package: 136 | 137 | # + 138 | # %matplotlib inline 139 | 140 | import seaborn as sns 141 | 142 | sns.heatmap(hm[::-1], cmap='viridis') 143 | # - 144 | 145 | # We see that, on average, we obtain the highest result using trend-determining parameters `n1=40` and `n2=60`, 146 | # and it's not like other nearby combinations work similarly well — in our particular strategy, this combination really stands out. 147 | # 148 | # Since our strategy contains several parameters, we might be interested in other relationships between their values. 149 | # We can use 150 | # [`backtesting.lib.plot_heatmaps()`](https://kernc.github.io/backtesting.py/doc/backtesting/lib.html#backtesting.lib.plot_heatmaps) 151 | # function to plot interactive heatmaps of all parameter combinations simultaneously. 152 | 153 | # + 154 | from backtesting.lib import plot_heatmaps 155 | 156 | plot_heatmaps(heatmap, agg='mean') 157 | # - 158 | 159 | # ## Model-based optimization 160 | # 161 | # Above, we used _randomized grid search_ optimization method. Any kind of grid search, however, might be computationally expensive for large data sets. In the follwing example, we will use 162 | # [_scikit-optimize_](https://scikit-optimize.github.io) 163 | # package to guide our optimization better informed using forests of decision trees. 164 | # The hyperparameter model is sequentially improved by evaluating the expensive function (the backtest) at the next best point, thereby hopefully converging to a set of optimal parameters with as few evaluations as possible. 165 | # 166 | # So, with `method="skopt"`: 167 | 168 | # + 169 | # %%capture 170 | 171 | # ! pip install scikit-optimize # This is a run-time dependency 172 | 173 | # + 174 | # %%time 175 | 176 | stats_skopt, heatmap, optimize_result = backtest.optimize( 177 | n1=[10, 100], # Note: For method="skopt", we 178 | n2=[20, 200], # only need interval end-points 179 | n_enter=[10, 40], 180 | n_exit=[10, 30], 181 | constraint=lambda p: p.n_exit < p.n_enter < p.n1 < p.n2, 182 | maximize='Equity Final [$]', 183 | method='skopt', 184 | max_tries=200, 185 | random_state=0, 186 | return_heatmap=True, 187 | return_optimization=True) 188 | # - 189 | 190 | heatmap.sort_values().iloc[-3:] 191 | 192 | # Notice how the optimization runs somewhat slower even though `max_tries=` is the same. But that's due to the sequential nature of the algorithm and should actually perform rather comparably even in cases of _much larger parameter spaces_ where grid search would effectively blow up, but likely (hopefully) reaching a better local optimum than a randomized search would. 193 | # A note of warning, again, to take steps to avoid 194 | # [overfitting](https://en.wikipedia.org/wiki/Overfitting) 195 | # insofar as possible. 196 | # 197 | # Understanding the impact of each parameter on the computed objective function is easy in two dimensions, but as the number of dimensions grows, partial dependency plots are increasingly useful. 198 | # [Plotting tools from _scikit-optimize_](https://scikit-optimize.github.io/stable/modules/plots.html) 199 | # take care of many of the more mundane things needed to make good and informative plots of the parameter space: 200 | 201 | # + 202 | from skopt.plots import plot_objective 203 | 204 | _ = plot_objective(optimize_result, n_points=10) 205 | 206 | # + 207 | from skopt.plots import plot_evaluations 208 | 209 | _ = plot_evaluations(optimize_result, bins=10) 210 | # - 211 | 212 | # Learn more by exploring further 213 | # [examples](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#tutorials) 214 | # or find more framework options in the 215 | # [full API reference](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#header-submodules). 216 | -------------------------------------------------------------------------------- /doc/examples/Trading with Machine Learning.py: -------------------------------------------------------------------------------- 1 | # --- 2 | # jupyter: 3 | # jupytext: 4 | # text_representation: 5 | # extension: .py 6 | # format_name: light 7 | # format_version: '1.5' 8 | # jupytext_version: 1.5.1 9 | # kernelspec: 10 | # display_name: Python 3 11 | # language: python 12 | # name: python3 13 | # --- 14 | 15 | # # Trading with Machine Learning Models 16 | # 17 | # This tutorial will show how to train and backtest a 18 | # [machine learning](https://en.wikipedia.org/wiki/Machine_learning) 19 | # price forecast model with _backtesting.py_ framework. It is assumed you're already familiar with 20 | # [basic framework usage](https://kernc.github.io/backtesting.py/doc/examples/Quick Start User Guide.html) 21 | # and machine learning in general. 22 | # 23 | # For this tutorial, we'll use almost a year's worth sample of hourly EUR/USD forex data: 24 | 25 | # + 26 | from backtesting.test import EURUSD, SMA 27 | 28 | data = EURUSD.copy() 29 | data 30 | 31 | 32 | # - 33 | 34 | # In 35 | # [supervised machine learning](https://en.wikipedia.org/wiki/Supervised_learning), 36 | # we try to learn a function that maps input feature vectors (independent variables) into known output values (dependent variable): 37 | # 38 | # $$ f\colon X \to \mathbf{y} $$ 39 | # 40 | # That way, provided our model function is sufficient, we can predict future output values from the newly acquired input feature vectors to some degree of certainty. 41 | # In our example, we'll try to map several price-derived features and common technical indicators to the price point two days in the future. 42 | # We construct [model design matrix](https://en.wikipedia.org/wiki/Design_matrix) $X$ below: 43 | 44 | # + 45 | def BBANDS(data, n_lookback, n_std): 46 | """Bollinger bands indicator""" 47 | hlc3 = (data.high + data.low + data.close) / 3 48 | mean, std = hlc3.rolling(n_lookback).mean(), hlc3.rolling(n_lookback).std() 49 | upper = mean + n_std*std 50 | lower = mean - n_std*std 51 | return upper, lower 52 | 53 | 54 | close = data.close.values 55 | sma10 = SMA(data.close, 10) 56 | sma20 = SMA(data.close, 20) 57 | sma50 = SMA(data.close, 50) 58 | sma100 = SMA(data.close, 100) 59 | upper, lower = BBANDS(data, 20, 2) 60 | 61 | # Design matrix / independent features: 62 | 63 | # Price-derived features 64 | data['X_SMA10'] = (close - sma10) / close 65 | data['X_SMA20'] = (close - sma20) / close 66 | data['X_SMA50'] = (close - sma50) / close 67 | data['X_SMA100'] = (close - sma100) / close 68 | 69 | data['X_DELTA_SMA10'] = (sma10 - sma20) / close 70 | data['X_DELTA_SMA20'] = (sma20 - sma50) / close 71 | data['X_DELTA_SMA50'] = (sma50 - sma100) / close 72 | 73 | # Indicator features 74 | data['X_MOM'] = data.close.pct_change(periods=2) 75 | data['X_BB_upper'] = (upper - close) / close 76 | data['X_BB_lower'] = (lower - close) / close 77 | data['X_BB_width'] = (upper - lower) / close 78 | data['X_Sentiment'] = ~data.index.to_series().between('2017-09-27', '2017-12-14') 79 | 80 | # Some datetime features for good measure 81 | data['X_day'] = data.index.dayofweek 82 | data['X_hour'] = data.index.hour 83 | 84 | data = data.dropna().astype(float) 85 | # - 86 | 87 | # Since all our indicators work only with past values, we can safely precompute the design matrix in advance. Alternatively, we would reconstruct the matrix every time before training the model. 88 | # 89 | # Notice the made-up _sentiment_ feature. In real life, one would obtain similar features by parsing news sources, Twitter sentiment, Stocktwits or similar. 90 | # This is just to show input data can contain all sorts of additional explanatory columns. 91 | # 92 | # As mentioned, our dependent variable will be the price (return) two days in the future, simplified into values $1$ when the return is positive (and significant), $-1$ when negative, or $0$ when the return after two days is roughly around zero. Let's write some functions that return our model matrix $X$ and dependent, class variable $\mathbf{y}$ as plain NumPy arrays: 93 | 94 | # + 95 | import numpy as np 96 | 97 | 98 | def get_X(data): 99 | """Return model design matrix X""" 100 | return data.filter(like='X').values 101 | 102 | 103 | def get_y(data): 104 | """Return dependent variable y""" 105 | y = data.close.pct_change(48).shift(-48) # Returns after roughly two days 106 | y[y.between(-.004, .004)] = 0 # Devalue returns smaller than 0.4% 107 | y[y > 0] = 1 108 | y[y < 0] = -1 109 | return y 110 | 111 | 112 | def get_clean_Xy(df): 113 | """Return (X, y) cleaned of NaN values""" 114 | X = get_X(df) 115 | y = get_y(df).values 116 | isnan = np.isnan(y) 117 | X = X[~isnan] 118 | y = y[~isnan] 119 | return X, y 120 | 121 | 122 | # - 123 | 124 | # Let's see how our data performs modeled using a simple 125 | # [k-nearest neighbors](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) 126 | # (kNN) algorithm from the state of the art 127 | # [scikit-learn](https://scikit-learn.org) 128 | # Python machine learning package. 129 | # To avoid (or at least demonstrate) 130 | # [overfitting](https://scikit-learn.org/stable/auto_examples/model_selection/plot_underfitting_overfitting.html), 131 | # always split your data into _train_ and _test_ sets; in particular, don't validate your model performance on the same data it was built on. 132 | 133 | # + 134 | import pandas as pd 135 | from sklearn.neighbors import KNeighborsClassifier 136 | from sklearn.model_selection import train_test_split 137 | 138 | X, y = get_clean_Xy(data) 139 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) 140 | 141 | clf = KNeighborsClassifier(7) # Model the output based on 7 "nearest" examples 142 | clf.fit(X_train, y_train) 143 | 144 | y_pred = clf.predict(X_test) 145 | 146 | _ = pd.DataFrame({'y_true': y_test, 'y_pred': y_pred}).plot(figsize=(15, 2), alpha=.7) 147 | print('Classification accuracy: ', np.mean(y_test == y_pred)) 148 | # - 149 | 150 | # We see the forecasts are all over the place (classification accuracy 42%), but is the model of any use under real backtesting? 151 | # 152 | # Let's backtest a simple strategy that buys the asset for 20% of available equity with 20:1 leverage whenever the forecast is positive (the price in two days is predicted to go up), 153 | # and sells under the same terms when the forecast is negative, all the while setting reasonable stop-loss and take-profit levels. Notice also the steady use of 154 | # [`data.df`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Strategy.data) 155 | # accessor: 156 | 157 | # + 158 | # %%time 159 | 160 | from backtesting import Backtest, Strategy 161 | 162 | N_TRAIN = 400 163 | 164 | 165 | class MLTrainOnceStrategy(Strategy): 166 | price_delta = .004 # 0.4% 167 | 168 | def init(self): 169 | # Init our model, a kNN classifier 170 | self.clf = KNeighborsClassifier(7) 171 | 172 | # Train the classifier in advance on the first N_TRAIN examples 173 | df = self.data.df.iloc[:N_TRAIN] 174 | X, y = get_clean_Xy(df) 175 | self.clf.fit(X, y) 176 | 177 | # Plot y for inspection 178 | self.I(get_y, self.data.df, name='y_true') 179 | 180 | # Prepare empty, all-NaN forecast indicator 181 | self.forecasts = self.I(lambda: np.repeat(np.nan, len(self.data)), name='forecast') 182 | 183 | def next(self): 184 | # Skip the training, in-sample data 185 | if len(self.data) < N_TRAIN: 186 | return 187 | 188 | # Proceed only with out-of-sample data. Prepare some variables 189 | high, low, close = self.data.high, self.data.low, self.data.close 190 | current_time = self.data.index[-1] 191 | 192 | # Forecast the next movement 193 | X = get_X(self.data.df.iloc[-1:]) 194 | forecast = self.clf.predict(X)[0] 195 | 196 | # Update the plotted "forecast" indicator 197 | self.forecasts[-1] = forecast 198 | 199 | # If our forecast is upwards and we don't already hold a long position 200 | # place a long order for 20% of available account equity. Vice versa for short. 201 | # Also set target take-profit and stop-loss prices to be one price_delta 202 | # away from the current closing price. 203 | upper, lower = close[-1] * (1 + np.r_[1, -1]*self.price_delta) 204 | 205 | if forecast == 1 and not self.position.is_long: 206 | self.buy(size=.2, tp=upper, sl=lower) 207 | elif forecast == -1 and not self.position.is_short: 208 | self.sell(size=.2, tp=lower, sl=upper) 209 | 210 | # Additionally, set aggressive stop-loss on trades that have been open 211 | # for more than two days 212 | for trade in self.trades: 213 | if current_time - trade.entry_time > pd.Timedelta('2 days'): 214 | if trade.is_long: 215 | trade.sl = max(trade.sl, low) 216 | else: 217 | trade.sl = min(trade.sl, high) 218 | 219 | 220 | bt = Backtest(data, MLTrainOnceStrategy, commission=.0002, margin=.05) 221 | bt.run() 222 | # - 223 | 224 | bt.plot() 225 | 226 | 227 | # Despite our lousy win rate, the strategy seems profitable. Let's see how it performs under 228 | # [walk-forward optimization](https://en.wikipedia.org/wiki/Walk_forward_optimization), 229 | # akin to k-fold or leave-one-out 230 | # [cross-validation](https://en.wikipedia.org/wiki/Cross-validation_%28statistics%29): 231 | 232 | # + 233 | # %%time 234 | 235 | class MLWalkForwardStrategy(MLTrainOnceStrategy): 236 | def next(self): 237 | # Skip the cold start period with too few values available 238 | if len(self.data) < N_TRAIN: 239 | return 240 | 241 | # Re-train the model only every 20 iterations. 242 | # Since 20 << N_TRAIN, we don't lose much in terms of 243 | # "recent training examples", but the speed-up is significant! 244 | if len(self.data) % 20: 245 | return super().next() 246 | 247 | # Retrain on last N_TRAIN values 248 | df = self.data.df[-N_TRAIN:] 249 | X, y = get_clean_Xy(df) 250 | self.clf.fit(X, y) 251 | 252 | # Now that the model is fitted, 253 | # proceed the same as in MLTrainOnceStrategy 254 | super().next() 255 | 256 | 257 | bt = Backtest(data, MLWalkForwardStrategy, commission=.0002, margin=.05) 258 | bt.run() 259 | # - 260 | 261 | bt.plot() 262 | 263 | # Apparently, when repeatedly retrained on past `N_TRAIN` data points in a rolling manner, our basic model generalizes poorly and performs not quite as well. 264 | # 265 | # This was a simple and contrived, tongue-in-cheek example that shows one way to use machine learning forecast models with _backtesting.py_ framework. 266 | # In reality, you will need a far better feature space, better models (cf. 267 | # [deep learning](https://en.wikipedia.org/wiki/Deep_learning#Deep_neural_networks)), 268 | # and better money management strategies to achieve 269 | # [consistent profits](https://en.wikipedia.org/wiki/Day_trading#Profitability) 270 | # in automated short-term forex trading. More proper data science is an exercise for the keen reader. 271 | # 272 | # Some instant optimization tips that come to mind are: 273 | # * **Data is king.** Make sure your design matrix features as best as possible model and correlate with your chosen target variable(s) and not just represent random noise. 274 | # * Instead of modelling a single target variable $y$, model a multitude of target/class variables, possibly better designed than our "48-hour returns" above. 275 | # * **Model everything:** forecast price, volume, time before it "takes off", SL/TP levels, 276 | # [optimal position size](https://en.wikipedia.org/wiki/Kelly_criterion#Application_to_the_stock_market) 277 | # ... 278 | # * Reduce 279 | # [false positives](https://en.wikipedia.org/wiki/False_positive_rate) 280 | # by increasing the conviction needed and imposing extra domain expertise and discretionary limitations before entering trades. 281 | # 282 | # Also make sure to familiarize yourself with the full 283 | # [Backtesting.py API reference](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#header-submodules) 284 | -------------------------------------------------------------------------------- /doc/examples/Quick Start User Guide.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # --- 3 | # jupyter: 4 | # jupytext: 5 | # text_representation: 6 | # extension: .py 7 | # format_name: light 8 | # format_version: '1.5' 9 | # jupytext_version: 1.5.1 10 | # kernelspec: 11 | # display_name: Python 3 12 | # language: python 13 | # name: python3 14 | # --- 15 | 16 | # _Backtesting.py_ Quick Start User Guide 17 | # ======================= 18 | # 19 | # This tutorial shows some of the features of *backtesting.py*, a Python framework for [backtesting](https://www.investopedia.com/terms/b/backtesting.asp) trading strategies. 20 | # 21 | # _Backtesting.py_ is a small and lightweight, blazing fast backtesting framework that uses state-of-the-art Python structures and procedures (Python 3.6+, Pandas, NumPy, Bokeh). It has a very small and simple API that is easy to remember and quickly shape towards meaningful results. The library _doesn't_ really support stock picking or trading strategies that rely on arbitrage or multi-asset portfolio rebalancing; instead, it works with an individual tradeable asset at a time and is best suited for optimizing position entrance and exit signal strategies, decisions upon values of technical indicators, and it's also a versatile interactive trade visualization and statistics tool. 22 | # 23 | # 24 | # ## Data 25 | # 26 | # _You bring your own data._ Backtesting ingests _all kinds of 27 | # [OHLC](https://en.wikipedia.org/wiki/Open-high-low-close_chart) 28 | # data_ (stocks, forex, futures, crypto, ...) as a 29 | # [pandas.DataFrame](https://pandas.pydata.org/pandas-docs/stable/10min.html) 30 | # with columns `'open'`, `'high'`, `'low'`, `'close'` and (optionally) `'volume'`. Such data is widely obtainable (see: 31 | # [pandas-datareader](https://pandas-datareader.readthedocs.io/en/latest/), 32 | # [Quandl](https://www.quandl.com/tools/python), 33 | # [findatapy](https://github.com/cuemacro/findatapy)). 34 | # Besides these, your data frames can have _additional columns_ which are accessible in your strategies in a similar manner. 35 | # 36 | # DataFrame should ideally be indexed with a _datetime index_ (convert it with [`pd.to_datetime()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html)), otherwise a simple range index will do. 37 | 38 | # + 39 | # Example OHLC daily data for Google Inc. 40 | from backtesting.test import GOOG 41 | 42 | GOOG.tail() 43 | # - 44 | 45 | # ## Strategy 46 | # 47 | # Let's create our first strategy to backtest on these Google data, a simple [moving average (MA) cross-over strategy](https://en.wikipedia.org/wiki/Moving_average_crossover). 48 | # 49 | # _Backtesting.py_ doesn't ship its own set of _technical analysis indicators_. Users favoring TA should probably refer to functions from proven indicator libraries, such as 50 | # [TA-Lib](https://github.com/mrjbq7/ta-lib) or 51 | # [Tulipy](https://tulipindicators.org), 52 | # but for this example, we can define a simple helper moving average function ourselves: 53 | 54 | # + 55 | import pandas as pd 56 | 57 | 58 | def SMA(values, n): 59 | """ 60 | Return simple moving average of `values`, at 61 | each step taking into account `n` previous values. 62 | """ 63 | return pd.Series(values).rolling(n).mean() 64 | 65 | 66 | # - 67 | 68 | # A new strategy needs to extend 69 | # [`Strategy`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Strategy) 70 | # class and override its two abstract methods: 71 | # [`init()`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Strategy.init) and 72 | # [`next()`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Strategy.next). 73 | # 74 | # Method `init()` is invoked before the strategy is run. Within it, one ideally precomputes in efficient, vectorized manner whatever indicators and signals the strategy depends on. 75 | # 76 | # Method `next()` is then iteratively called by the 77 | # [`Backtest`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest) 78 | # instance, once for each data point (data frame row), simulating the incremental availability of each new full candlestick bar. 79 | # 80 | # Note, _backtesting.py_ cannot make decisions / trades _within_ candlesticks — any new orders are executed on the next candle's _open_ (or the current candle's _close_ if 81 | # [`trade_on_close=True`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest.__init__)). 82 | # If you find yourself wishing to trade within candlesticks (e.g. daytrading), you instead need to begin with more fine-grained (e.g. hourly) data. 83 | 84 | # + 85 | from backtesting import Strategy 86 | from backtesting.lib import crossover 87 | 88 | 89 | class SmaCross(Strategy): 90 | # Define the two MA lags as *class variables* 91 | # for later optimization 92 | n1 = 10 93 | n2 = 20 94 | 95 | def init(self): 96 | # Precompute the two moving averages 97 | self.sma1 = self.I(SMA, self.data.close, self.n1) 98 | self.sma2 = self.I(SMA, self.data.close, self.n2) 99 | 100 | def next(self): 101 | # If sma1 crosses above sma2, close any existing 102 | # short trades, and buy the asset 103 | if crossover(self.sma1, self.sma2): 104 | self.position.close() 105 | self.buy() 106 | 107 | # Else, if sma1 crosses below sma2, close any existing 108 | # long trades, and sell the asset 109 | elif crossover(self.sma2, self.sma1): 110 | self.position.close() 111 | self.sell() 112 | 113 | 114 | # - 115 | 116 | # In `init()` as well as in `next()`, the data the strategy is simulated on is available as an instance variable 117 | # [`self.data`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Strategy.data). 118 | # 119 | # In `init()`, we declare and **compute indicators indirectly by wrapping them in 120 | # [`self.I()`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Strategy.I)**. 121 | # The wrapper is passed a function (our `SMA` function) along with any arguments to call it with (our _close_ values and the MA lag). Indicators wrapped in this way will be automatically plotted, and their legend strings will be intelligently inferred. 122 | # 123 | # In `next()`, we simply check if the faster moving average just crossed over the slower one. If it did and upwards, we close the possible short position and go long; if it did and downwards, we close the open long position and go short. Note, we don't adjust order size, so _Backtesting.py_ assumes _maximal possible position_. We use 124 | # [`backtesting.lib.crossover()`](https://kernc.github.io/backtesting.py/doc/backtesting/lib.html#backtesting.lib.crossover) 125 | # function instead of writing more obscure and confusing conditions, such as: 126 | 127 | # + magic_args="echo" language="script" 128 | # 129 | # def next(self): 130 | # if (self.sma1[-2] < self.sma2[-2] and 131 | # self.sma1[-1] > self.sma2[-1]): 132 | # self.position.close() 133 | # self.buy() 134 | # 135 | # elif (self.sma1[-2] > self.sma2[-2] and # Ugh! 136 | # self.sma1[-1] < self.sma2[-1]): 137 | # self.position.close() 138 | # self.sell() 139 | # - 140 | 141 | # In `init()`, the whole series of points was available, whereas **in `next()`, the length of `self.data` and all declared indicators is adjusted** on each `next()` call so that `array[-1]` (e.g. `self.data.close[-1]` or `self.sma1[-1]`) always contains the most recent value, `array[-2]` the previous value, etc. (ordinary Python indexing of ascending-sorted 1D arrays). 142 | # 143 | # **Note**: `self.data` and any indicators wrapped with `self.I` (e.g. `self.sma1`) are NumPy arrays for performance reasons. If you prefer pandas Series or DataFrame objects, use `Strategy.data..s` or `Strategy.data.df` accessors respectively. You could also construct the series manually, e.g. `pd.Series(self.data.close, index=self.data.index)`. 144 | # 145 | # We might avoid `self.position.close()` calls if we primed the 146 | # [`Backtest`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest) 147 | # instance with `Backtest(..., exclusive_orders=True)`. 148 | 149 | # ## Backtesting 150 | # 151 | # Let's see how our strategy performs on historical Google data. The 152 | # [`Backtest`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest) 153 | # instance is initialized with OHLC data and a strategy _class_ (see API reference for additional options), and we begin with 10,000 units of cash and set broker's commission to realistic 0.2%. 154 | 155 | # + 156 | from backtesting import Backtest 157 | 158 | bt = Backtest(GOOG, SmaCross, cash=10_000, commission=.002) 159 | stats = bt.run() 160 | stats 161 | # - 162 | 163 | # [`Backtest.run()`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest.run) 164 | # method returns a pandas Series of simulation results and statistics associated with our strategy. We see that this simple strategy makes almost 600% return in the period of 9 years, with maximum drawdown 33%, and with longest drawdown period spanning almost two years ... 165 | # 166 | # [`Backtest.plot()`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest.plot) 167 | # method provides the same insights in a more visual form. 168 | 169 | bt.plot() 170 | 171 | # ## Optimization 172 | # 173 | # We hard-coded the two lag parameters (`n1` and `n2`) into our strategy above. However, the strategy may work better with 15–30 or some other cross-over. **We declared the parameters as optimizable by making them [class variables](https://docs.python.org/3/tutorial/classes.html#class-and-instance-variables)**. 174 | # 175 | # We optimize the two parameters by calling 176 | # [`Backtest.optimize()`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest.optimize) 177 | # method with each parameter a keyword argument pointing to its pool of possible values to test. Parameter `n1` is tested for values in range between 5 and 30 and parameter `n2` for values between 10 and 70, respectively. Some combinations of values of the two parameters are invalid, i.e. `n1` should not be _larger than_ or equal to `n2`. We limit admissible parameter combinations with an _ad hoc_ constraint function, which takes in the parameters and returns `True` (i.e. admissible) whenever `n1` is less than `n2`. Additionally, we search for such parameter combination that maximizes return over the observed period. We could instead choose to optimize any other key from the returned `stats` series. 178 | 179 | # + 180 | # %%time 181 | 182 | stats = bt.optimize(n1=range(5, 30, 5), 183 | n2=range(10, 70, 5), 184 | maximize='Equity Final [$]', 185 | constraint=lambda param: param.n1 < param.n2) 186 | stats 187 | # - 188 | 189 | # We can look into `stats['_strategy']` to access the Strategy _instance_ and its optimal parameter values (10 and 15). 190 | 191 | stats._strategy 192 | 193 | bt.plot(plot_volume=False, plot_pl=False) 194 | 195 | # Strategy optimization managed to up its initial performance _on in-sample data_ by almost 50% and even beat simple 196 | # [buy & hold](https://en.wikipedia.org/wiki/Buy_and_hold). 197 | # In real life optimization, however, do **take steps to avoid 198 | # [overfitting](https://en.wikipedia.org/wiki/Overfitting)**. 199 | 200 | # ## Trade data 201 | # 202 | # In addition to backtest statistics returned by 203 | # [`Backtest.run()`](https://kernc.github.io/backtesting.py/doc/backtesting/backtesting.html#backtesting.backtesting.Backtest.run) 204 | # shown above, you can look into _individual trade returns_ and the changing _equity curve_ and _drawdown_ by inspecting the last few, internal keys in the result series. 205 | 206 | stats.tail() 207 | 208 | # The columns should be self-explanatory. 209 | 210 | stats['_equity_curve'] # Contains equity/drawdown curves. DrawdownDuration is only defined at ends of DD periods. 211 | 212 | stats['_trades'] # Contains individual trade data 213 | 214 | # Learn more by exploring further 215 | # [examples](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#tutorials) 216 | # or find more framework options in the 217 | # [full API reference](https://kernc.github.io/backtesting.py/doc/backtesting/index.html#header-submodules). 218 | -------------------------------------------------------------------------------- /backtesting/lib.py: -------------------------------------------------------------------------------- 1 | """ 2 | Collection of common building blocks, helper auxiliary functions and 3 | composable strategy classes for reuse. 4 | 5 | Intended for simple missing-link procedures, not reinventing 6 | of better-suited, state-of-the-art, fast libraries, 7 | such as TA-Lib, Tulipy, PyAlgoTrade, NumPy, SciPy ... 8 | 9 | Please raise ideas for additions to this collection on the [issue tracker]. 10 | 11 | [issue tracker]: https://github.com/kernc/backtesting.py 12 | """ 13 | 14 | from collections import OrderedDict 15 | from itertools import compress 16 | from numbers import Number 17 | from inspect import currentframe 18 | from typing import Sequence, Optional, Union, Callable 19 | 20 | import numpy as np 21 | import pandas as pd 22 | 23 | from .backtesting import Strategy 24 | from ._plotting import plot_heatmaps as _plot_heatmaps 25 | from ._util import _Array, _as_str 26 | 27 | __pdoc__ = {} 28 | 29 | 30 | OHLCV_AGG = OrderedDict(( 31 | ('Open', 'first'), 32 | ('High', 'max'), 33 | ('Low', 'min'), 34 | ('Close', 'last'), 35 | ('Volume', 'sum'), 36 | )) 37 | """Dictionary of rules for aggregating resampled OHLCV data frames, 38 | e.g. 39 | 40 | df.resample('4H', label='right').agg(OHLCV_AGG) 41 | """ 42 | 43 | TRADES_AGG = OrderedDict(( 44 | ('Size', 'sum'), 45 | ('EntryBar', 'first'), 46 | ('ExitBar', 'last'), 47 | ('EntryPrice', 'mean'), 48 | ('ExitPrice', 'mean'), 49 | ('PnL', 'sum'), 50 | ('ReturnPct', 'mean'), 51 | ('EntryTime', 'first'), 52 | ('ExitTime', 'last'), 53 | ('Duration', 'sum'), 54 | )) 55 | """Dictionary of rules for aggregating resampled trades data, 56 | e.g. 57 | 58 | stats['_trades'].resample('1D', on='ExitTime', 59 | label='right').agg(TRADES_AGG) 60 | """ 61 | 62 | _EQUITY_AGG = { 63 | 'Equity': 'last', 64 | 'DrawdownPct': 'max', 65 | 'DrawdownDuration': 'max', 66 | } 67 | 68 | 69 | def barssince(condition: Sequence[bool], default=np.inf) -> int: 70 | """ 71 | Return the number of bars since `condition` sequence was last `True`, 72 | or if never, return `default`. 73 | 74 | >>> barssince(self.data.Close > self.data.Open) 75 | 3 76 | """ 77 | return next(compress(range(len(condition)), reversed(condition)), default) 78 | 79 | 80 | def cross(series1: Sequence, series2: Sequence) -> bool: 81 | """ 82 | Return `True` if `series1` and `series2` just crossed (either 83 | direction). 84 | 85 | >>> cross(self.data.Close, self.sma) 86 | True 87 | 88 | """ 89 | return crossover(series1, series2) or crossover(series2, series1) 90 | 91 | 92 | def crossover(series1: Sequence, series2: Sequence) -> bool: 93 | """ 94 | Return `True` if `series1` just crossed over 95 | `series2`. 96 | 97 | >>> crossover(self.data.Close, self.sma) 98 | True 99 | """ 100 | series1 = ( 101 | series1.values if isinstance(series1, pd.Series) else 102 | (series1, series1) if isinstance(series1, Number) else 103 | series1) 104 | series2 = ( 105 | series2.values if isinstance(series2, pd.Series) else 106 | (series2, series2) if isinstance(series2, Number) else 107 | series2) 108 | try: 109 | return series1[-2] < series2[-2] and series1[-1] > series2[-1] 110 | except IndexError: 111 | return False 112 | 113 | 114 | def plot_heatmaps(heatmap: pd.Series, 115 | agg: Union[str, Callable] = 'max', 116 | *, 117 | ncols: int = 3, 118 | plot_width: int = 1200, 119 | filename: str = '', 120 | open_browser: bool = True): 121 | """ 122 | Plots a grid of heatmaps, one for every pair of parameters in `heatmap`. 123 | 124 | `heatmap` is a Series as returned by 125 | `backtesting.backtesting.Backtest.optimize` when its parameter 126 | `return_heatmap=True`. 127 | 128 | When projecting the n-dimensional heatmap onto 2D, the values are 129 | aggregated by 'max' function by default. This can be tweaked 130 | with `agg` parameter, which accepts any argument pandas knows 131 | how to aggregate by. 132 | 133 | .. todo:: 134 | Lay heatmaps out lower-triangular instead of in a simple grid. 135 | Like [`skopt.plots.plot_objective()`][plot_objective] does. 136 | 137 | [plot_objective]: \ 138 | https://scikit-optimize.github.io/stable/modules/plots.html#plot-objective 139 | """ 140 | return _plot_heatmaps(heatmap, agg, ncols, filename, plot_width, open_browser) 141 | 142 | 143 | def quantile(series: Sequence, quantile: Union[None, float] = None): 144 | """ 145 | If `quantile` is `None`, return the quantile _rank_ of the last 146 | value of `series` wrt former series values. 147 | 148 | If `quantile` is a value between 0 and 1, return the _value_ of 149 | `series` at this quantile. If used to working with percentiles, just 150 | divide your percentile amount with 100 to obtain quantiles. 151 | 152 | >>> quantile(self.data.Close[-20:], .1) 153 | 162.130 154 | >>> quantile(self.data.Close) 155 | 0.13 156 | """ 157 | if quantile is None: 158 | try: 159 | last, series = series[-1], series[:-1] 160 | return np.mean(series < last) 161 | except IndexError: 162 | return np.nan 163 | assert 0 <= quantile <= 1, "quantile must be within [0, 1]" 164 | return np.nanpercentile(series, quantile * 100) 165 | 166 | 167 | def resample_apply(rule: str, 168 | func: Optional[Callable[..., Sequence]], 169 | series: Union[pd.Series, pd.DataFrame, _Array], 170 | *args, 171 | agg: Union[str, dict] = None, 172 | **kwargs): 173 | """ 174 | Apply `func` (such as an indicator) to `series`, resampled to 175 | a time frame specified by `rule`. When called from inside 176 | `backtesting.backtesting.Strategy.init`, 177 | the result (returned) series will be automatically wrapped in 178 | `backtesting.backtesting.Strategy.I` 179 | wrapper method. 180 | 181 | `rule` is a valid [Pandas offset string] indicating 182 | a time frame to resample `series` to. 183 | 184 | [Pandas offset string]: \ 185 | http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases 186 | 187 | `func` is the indicator function to apply on the resampled series. 188 | 189 | `series` is a data series (or array), such as any of the 190 | `backtesting.backtesting.Strategy.data` series. Due to pandas 191 | resampling limitations, this only works when input series 192 | has a datetime index. 193 | 194 | `agg` is the aggregation function to use on resampled groups of data. 195 | Valid values are anything accepted by `pandas/resample/.agg()`. 196 | Default value for dataframe input is `OHLCV_AGG` dictionary. 197 | Default value for series input is the appropriate entry from `OHLCV_AGG` 198 | if series has a matching name, or otherwise the value `"last"`, 199 | which is suitable for closing prices, 200 | but you might prefer another (e.g. `"max"` for peaks, or similar). 201 | 202 | Finally, any `*args` and `**kwargs` that are not already eaten by 203 | implicit `backtesting.backtesting.Strategy.I` call 204 | are passed to `func`. 205 | 206 | For example, if we have a typical moving average function 207 | `SMA(values, lookback_period)`, _hourly_ data source, and need to 208 | apply the moving average MA(10) on a _daily_ time frame, 209 | but don't want to plot the resulting indicator, we can do: 210 | 211 | class System(Strategy): 212 | def init(self): 213 | self.sma = resample_apply( 214 | 'D', SMA, self.data.Close, 10, plot=False) 215 | 216 | The above short snippet is roughly equivalent to: 217 | 218 | class System(Strategy): 219 | def init(self): 220 | # Strategy exposes `self.data` as raw NumPy arrays. 221 | # Let's convert closing prices back to pandas Series. 222 | close = self.data.Close.s 223 | 224 | # Resample to daily resolution. Aggregate groups 225 | # using their last value (i.e. closing price at the end 226 | # of the day). Notice `label='right'`. If it were set to 227 | # 'left' (default), the strategy would exhibit 228 | # look-ahead bias. 229 | daily = close.resample('D', label='right').agg('last') 230 | 231 | # We apply SMA(10) to daily close prices, 232 | # then reindex it back to original hourly index, 233 | # forward-filling the missing values in each day. 234 | # We make a separate function that returns the final 235 | # indicator array. 236 | def SMA(series, n): 237 | from backtesting.test import SMA 238 | return SMA(series, n).reindex(close.index).ffill() 239 | 240 | # The result equivalent to the short example above: 241 | self.sma = self.I(SMA, daily, 10, plot=False) 242 | 243 | """ 244 | if func is None: 245 | def func(x, *_, **__): 246 | return x 247 | 248 | if not isinstance(series, (pd.Series, pd.DataFrame)): 249 | assert isinstance(series, _Array), \ 250 | 'resample_apply() takes either a `pd.Series`, `pd.DataFrame`, ' \ 251 | 'or a `Strategy.data.*` array' 252 | series = series.s 253 | 254 | if agg is None: 255 | agg = OHLCV_AGG.get(getattr(series, 'name', None), 'last') 256 | if isinstance(series, pd.DataFrame): 257 | agg = {column: OHLCV_AGG.get(column, 'last') 258 | for column in series.columns} 259 | 260 | resampled = series.resample(rule, label='right').agg(agg).dropna() 261 | resampled.name = _as_str(series) + '[' + rule + ']' 262 | 263 | # Check first few stack frames if we are being called from 264 | # inside Strategy.init, and if so, extract Strategy.I wrapper. 265 | frame, level = currentframe(), 0 266 | while frame and level <= 3: 267 | frame = frame.f_back 268 | level += 1 269 | if isinstance(frame.f_locals.get('self'), Strategy): # type: ignore 270 | strategy_I = frame.f_locals['self'].I # type: ignore 271 | break 272 | else: 273 | def strategy_I(func, *args, **kwargs): 274 | return func(*args, **kwargs) 275 | 276 | def wrap_func(resampled, *args, **kwargs): 277 | result = func(resampled, *args, **kwargs) 278 | if not isinstance(result, pd.DataFrame) and not isinstance(result, pd.Series): 279 | result = np.asarray(result) 280 | if result.ndim == 1: 281 | result = pd.Series(result, name=resampled.name) 282 | elif result.ndim == 2: 283 | result = pd.DataFrame(result.T) 284 | # Resample back to data index 285 | if not isinstance(result.index, pd.DatetimeIndex): 286 | result.index = resampled.index 287 | result = result.reindex(index=series.index.union(resampled.index), 288 | method='ffill').reindex(series.index) 289 | return result 290 | 291 | wrap_func.__name__ = func.__name__ # type: ignore 292 | 293 | array = strategy_I(wrap_func, resampled, *args, **kwargs) 294 | return array 295 | 296 | 297 | def random_ohlc_data(example_data: pd.DataFrame, *, 298 | frac=1., random_state: int = None) -> pd.DataFrame: 299 | """ 300 | OHLC data generator. The generated OHLC data has basic 301 | [descriptive statistics](https://en.wikipedia.org/wiki/Descriptive_statistics) 302 | similar to the provided `example_data`. 303 | 304 | `frac` is a fraction of data to sample (with replacement). Values greater 305 | than 1 result in oversampling. 306 | 307 | Such random data can be effectively used for stress testing trading 308 | strategy robustness, Monte Carlo simulations, significance testing, etc. 309 | 310 | >>> from backtesting.test import EURUSD 311 | >>> ohlc_generator = random_ohlc_data(EURUSD) 312 | >>> next(ohlc_generator) # returns new random data 313 | ... 314 | >>> next(ohlc_generator) # returns new random data 315 | ... 316 | """ 317 | def shuffle(x): 318 | return x.sample(frac=frac, replace=frac > 1, random_state=random_state) 319 | 320 | if len(example_data.columns.intersection({'Open', 'High', 'Low', 'Close'})) != 4: 321 | raise ValueError("`data` must be a pandas.DataFrame with columns " 322 | "'Open', 'High', 'Low', 'Close'") 323 | while True: 324 | df = shuffle(example_data) 325 | df.index = example_data.index 326 | padding = df.Close - df.Open.shift(-1) 327 | gaps = shuffle(example_data.Open.shift(-1) - example_data.Close) 328 | deltas = (padding + gaps).shift(1).fillna(0).cumsum() 329 | for key in ('Open', 'High', 'Low', 'Close'): 330 | df[key] += deltas 331 | yield df 332 | 333 | 334 | class SignalStrategy(Strategy): 335 | """ 336 | A simple helper strategy that operates on position entry/exit signals. 337 | This makes the backtest of the strategy simulate a [vectorized backtest]. 338 | See [tutorials] for usage examples. 339 | 340 | [vectorized backtest]: https://www.google.com/search?q=vectorized+backtest 341 | [tutorials]: index.html#tutorials 342 | 343 | To use this helper strategy, subclass it, override its 344 | `backtesting.backtesting.Strategy.init` method, 345 | and set the signal vector by calling 346 | `backtesting.lib.SignalStrategy.set_signal` method from within it. 347 | 348 | class ExampleStrategy(SignalStrategy): 349 | def init(self): 350 | super().init() 351 | self.set_signal(sma1 > sma2, sma1 < sma2) 352 | 353 | Remember to call `super().init()` and `super().next()` in your 354 | overridden methods. 355 | """ 356 | __entry_signal = (0,) 357 | __exit_signal = (False,) 358 | 359 | def set_signal(self, entry_size: Sequence[float], 360 | exit_portion: Sequence[float] = None, 361 | *, 362 | plot: bool = True): 363 | """ 364 | Set entry/exit signal vectors (arrays). 365 | 366 | A long entry signal is considered present wherever `entry_size` 367 | is greater than zero, and a short signal wherever `entry_size` 368 | is less than zero, following `backtesting.backtesting.Order.size` semantics. 369 | 370 | If `exit_portion` is provided, a nonzero value closes portion the position 371 | (see `backtesting.backtesting.Trade.close()`) in the respective direction 372 | (positive values close long trades, negative short). 373 | 374 | If `plot` is `True`, the signal entry/exit indicators are plotted when 375 | `backtesting.backtesting.Backtest.plot` is called. 376 | """ 377 | self.__entry_signal = self.I( # type: ignore 378 | lambda: pd.Series(entry_size, dtype=float).replace(0, np.nan), 379 | name='entry size', plot=plot, overlay=False, scatter=True, color='black') 380 | 381 | if exit_portion is not None: 382 | self.__exit_signal = self.I( # type: ignore 383 | lambda: pd.Series(exit_portion, dtype=float).replace(0, np.nan), 384 | name='exit portion', plot=plot, overlay=False, scatter=True, color='black') 385 | 386 | def next(self): 387 | super().next() 388 | 389 | exit_portion = self.__exit_signal[-1] 390 | if exit_portion > 0: 391 | for trade in self.trades: 392 | if trade.is_long: 393 | trade.close(exit_portion) 394 | elif exit_portion < 0: 395 | for trade in self.trades: 396 | if trade.is_short: 397 | trade.close(-exit_portion) 398 | 399 | entry_size = self.__entry_signal[-1] 400 | if entry_size > 0: 401 | self.buy(size=entry_size) 402 | elif entry_size < 0: 403 | self.sell(size=-entry_size) 404 | 405 | 406 | class TrailingStrategy(Strategy): 407 | """ 408 | A strategy with automatic trailing stop-loss, trailing the current 409 | price at distance of some multiple of average true range (ATR). Call 410 | `TrailingStrategy.set_trailing_sl()` to set said multiple 411 | (`6` by default). See [tutorials] for usage examples. 412 | 413 | [tutorials]: index.html#tutorials 414 | 415 | Remember to call `super().init()` and `super().next()` in your 416 | overridden methods. 417 | """ 418 | __n_atr = 6. 419 | __atr = None 420 | 421 | def init(self): 422 | super().init() 423 | self.set_atr_periods() 424 | 425 | def set_atr_periods(self, periods: int = 100): 426 | """ 427 | Set the lookback period for computing ATR. The default value 428 | of 100 ensures a _stable_ ATR. 429 | """ 430 | h, l, c_prev = self.data.High, self.data.Low, pd.Series(self.data.Close).shift(1) 431 | tr = np.max([h - l, (c_prev - h).abs(), (c_prev - l).abs()], axis=0) 432 | atr = pd.Series(tr).rolling(periods).mean().bfill().values 433 | self.__atr = atr 434 | 435 | def set_trailing_sl(self, n_atr: float = 6): 436 | """ 437 | Sets the future trailing stop-loss as some multiple (`n_atr`) 438 | average true bar ranges away from the current price. 439 | """ 440 | self.__n_atr = n_atr 441 | 442 | def next(self): 443 | super().next() 444 | # Can't use index=-1 because self.__atr is not an Indicator type 445 | index = len(self.data)-1 446 | for trade in self.trades: 447 | if trade.is_long: 448 | trade.sl = max(trade.sl or -np.inf, 449 | self.data.Close[index] - self.__atr[index] * self.__n_atr) 450 | else: 451 | trade.sl = min(trade.sl or np.inf, 452 | self.data.Close[index] + self.__atr[index] * self.__n_atr) 453 | 454 | 455 | # Prevent pdoc3 documenting __init__ signature of Strategy subclasses 456 | for cls in list(globals().values()): 457 | if isinstance(cls, type) and issubclass(cls, Strategy): 458 | __pdoc__[f'{cls.__name__}.__init__'] = False 459 | 460 | 461 | # NOTE: Don't put anything below this __all__ list 462 | 463 | __all__ = [getattr(v, '__name__', k) 464 | for k, v in globals().items() # export 465 | if ((callable(v) and v.__module__ == __name__ or # callables from this module 466 | k.isupper()) and # or CONSTANTS 467 | not getattr(v, '__name__', k).startswith('_'))] # neither marked internal 468 | 469 | # NOTE: Don't put anything below here. See above. 470 | -------------------------------------------------------------------------------- /backtesting/_plotting.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import sys 4 | import warnings 5 | from colorsys import hls_to_rgb, rgb_to_hls 6 | from itertools import cycle, combinations 7 | from functools import partial 8 | from typing import Callable, List, Union 9 | 10 | import numpy as np 11 | import pandas as pd 12 | 13 | from bokeh.colors import RGB 14 | from bokeh.colors.named import ( 15 | lime as BULL_COLOR, 16 | tomato as BEAR_COLOR 17 | ) 18 | from bokeh.plotting import figure as _figure 19 | from bokeh.models import ( 20 | CrosshairTool, 21 | CustomJS, 22 | ColumnDataSource, 23 | NumeralTickFormatter, 24 | Span, 25 | HoverTool, 26 | Range1d, 27 | DatetimeTickFormatter, 28 | FuncTickFormatter, 29 | WheelZoomTool, 30 | LinearColorMapper, 31 | ) 32 | from bokeh.io import output_notebook, output_file, show 33 | from bokeh.io.state import curstate 34 | from bokeh.layouts import gridplot 35 | from bokeh.palettes import Category10 36 | from bokeh.transform import factor_cmap 37 | 38 | from backtesting._util import _data_period, _as_list, _Indicator 39 | 40 | with open(os.path.join(os.path.dirname(__file__), 'autoscale_cb.js'), 41 | encoding='utf-8') as _f: 42 | _AUTOSCALE_JS_CALLBACK = _f.read() 43 | 44 | IS_JUPYTER_NOTEBOOK = 'JPY_PARENT_PID' in os.environ 45 | 46 | if IS_JUPYTER_NOTEBOOK: 47 | warnings.warn('Jupyter Notebook detected. ' 48 | 'Setting Bokeh output to notebook. ' 49 | 'This may not work in Jupyter clients without JavaScript ' 50 | 'support (e.g. PyCharm, Spyder IDE). ' 51 | 'Reset with `backtesting.set_bokeh_output(notebook=False)`.') 52 | output_notebook() 53 | 54 | 55 | def set_bokeh_output(notebook=False): 56 | """ 57 | Set Bokeh to output either to a file or Jupyter notebook. 58 | By default, Bokeh outputs to notebook if running from within 59 | notebook was detected. 60 | """ 61 | global IS_JUPYTER_NOTEBOOK 62 | IS_JUPYTER_NOTEBOOK = notebook 63 | 64 | 65 | def _windos_safe_filename(filename): 66 | if sys.platform.startswith('win'): 67 | return re.sub(r'[^a-zA-Z0-9,_-]', '_', filename.replace('=', '-')) 68 | return filename 69 | 70 | 71 | def _bokeh_reset(filename=None): 72 | curstate().reset() 73 | if filename: 74 | if not filename.endswith('.html'): 75 | filename += '.html' 76 | output_file(filename, title=filename) 77 | elif IS_JUPYTER_NOTEBOOK: 78 | curstate().output_notebook() 79 | 80 | 81 | def colorgen(): 82 | yield from cycle(Category10[10]) 83 | 84 | 85 | def lightness(color, lightness=.94): 86 | rgb = np.array([color.r, color.g, color.b]) / 255 87 | h, _, s = rgb_to_hls(*rgb) 88 | rgb = np.array(hls_to_rgb(h, lightness, s)) * 255 89 | return RGB(*rgb) 90 | 91 | 92 | _MAX_CANDLES = 10_000 93 | 94 | 95 | def _maybe_resample_data(resample_rule, df, indicators, equity_data, trades): 96 | if isinstance(resample_rule, str): 97 | freq = resample_rule 98 | else: 99 | if resample_rule is False or len(df) <= _MAX_CANDLES: 100 | return df, indicators, equity_data, trades 101 | 102 | from_index = dict(day=-2, hour=-6, minute=1, second=0, millisecond=0, 103 | microsecond=0, nanosecond=0)[df.index.resolution] 104 | FREQS = ('1T', '5T', '10T', '15T', '30T', '1H', '2H', '4H', '8H', '1D', '1W', '1M') 105 | freq = next((f for f in FREQS[from_index:] 106 | if len(df.resample(f)) <= _MAX_CANDLES), FREQS[-1]) 107 | warnings.warn(f"Data contains too many candlesticks to plot; downsampling to {freq!r}. " 108 | "See `Backtest.plot(resample=...)`") 109 | 110 | from .lib import OHLCV_AGG, TRADES_AGG, _EQUITY_AGG 111 | df = df.resample(freq, label='right').agg(OHLCV_AGG).dropna() 112 | 113 | indicators = [_Indicator(i.df.resample(freq, label='right').mean() 114 | .dropna().reindex(df.index).values.T, 115 | **dict(i._opts, name=i.name, 116 | # Replace saved index with the resampled one 117 | index=df.index)) 118 | for i in indicators] 119 | assert not indicators or indicators[0].df.index.equals(df.index) 120 | 121 | equity_data = equity_data.resample(freq, label='right').agg(_EQUITY_AGG).dropna(how='all') 122 | assert equity_data.index.equals(df.index) 123 | 124 | def _weighted_returns(s, trades=trades): 125 | df = trades.loc[s.index] 126 | return ((df['Size'].abs() * df['ReturnPct']) / df['Size'].abs().sum()).sum() 127 | 128 | def _group_trades(column): 129 | def f(s, new_index=df.index.astype(np.int64), bars=trades[column]): 130 | if s.size: 131 | # Via int64 because on pandas recently broken datetime 132 | mean_time = int(bars.loc[s.index].view('i8').mean()) 133 | new_bar_idx = new_index.get_loc(mean_time, method='nearest') 134 | return new_bar_idx 135 | return f 136 | 137 | if len(trades): # Avoid pandas "resampling on Int64 index" error 138 | trades = trades.assign(count=1).resample(freq, on='ExitTime', label='right').agg(dict( 139 | TRADES_AGG, 140 | ReturnPct=_weighted_returns, 141 | count='sum', 142 | EntryBar=_group_trades('EntryTime'), 143 | ExitBar=_group_trades('ExitTime'), 144 | )).dropna() 145 | 146 | return df, indicators, equity_data, trades 147 | 148 | 149 | def plot(*, results: pd.Series, 150 | df: pd.DataFrame, 151 | indicators: List[_Indicator], 152 | filename='', plot_width=None, 153 | plot_equity=True, plot_return=False, plot_pl=True, 154 | plot_volume=True, plot_drawdown=False, 155 | smooth_equity=False, relative_equity=True, 156 | superimpose=True, resample=True, 157 | reverse_indicators=True, 158 | show_legend=True, open_browser=True): 159 | """ 160 | Like much of GUI code everywhere, this is a mess. 161 | """ 162 | # We need to reset global Bokeh state, otherwise subsequent runs of 163 | # plot() contain some previous run's cruft data (was noticed when 164 | # TestPlot.test_file_size() test was failing). 165 | if not filename and not IS_JUPYTER_NOTEBOOK: 166 | filename = _windos_safe_filename(str(results._strategy)) 167 | _bokeh_reset(filename) 168 | 169 | COLORS = [BEAR_COLOR, BULL_COLOR] 170 | BAR_WIDTH = .8 171 | 172 | assert df.index.equals(results['_equity_curve'].index) 173 | equity_data = results['_equity_curve'].copy(deep=False) 174 | trades = results['_trades'] 175 | 176 | plot_volume = plot_volume and not df.Volume.isnull().all() 177 | plot_equity = plot_equity and not trades.empty 178 | plot_return = plot_return and not trades.empty 179 | plot_pl = plot_pl and not trades.empty 180 | is_datetime_index = isinstance(df.index, pd.DatetimeIndex) 181 | 182 | from .lib import OHLCV_AGG 183 | # ohlc df may contain many columns. We're only interested in, and pass on to Bokeh, these 184 | df = df[list(OHLCV_AGG.keys())].copy(deep=False) 185 | 186 | # Limit data to max_candles 187 | if is_datetime_index: 188 | df, indicators, equity_data, trades = _maybe_resample_data( 189 | resample, df, indicators, equity_data, trades) 190 | 191 | df.index.name = None # Provides source name @index 192 | df['datetime'] = df.index # Save original, maybe datetime index 193 | df = df.reset_index(drop=True) 194 | equity_data = equity_data.reset_index(drop=True) 195 | index = df.index 196 | 197 | new_bokeh_figure = partial( 198 | _figure, 199 | x_axis_type='linear', 200 | plot_width=plot_width, 201 | plot_height=400, 202 | tools="xpan,xwheel_zoom,box_zoom,undo,redo,reset,save", 203 | active_drag='xpan', 204 | active_scroll='xwheel_zoom') 205 | 206 | pad = (index[-1] - index[0]) / 20 207 | 208 | fig_ohlc = new_bokeh_figure( 209 | x_range=Range1d(index[0], index[-1], 210 | min_interval=10, 211 | bounds=(index[0] - pad, 212 | index[-1] + pad)) if index.size > 1 else None) 213 | figs_above_ohlc, figs_below_ohlc = [], [] 214 | 215 | source = ColumnDataSource(df) 216 | source.add((df.Close >= df.Open).values.astype(np.uint8).astype(str), 'inc') 217 | 218 | trade_source = ColumnDataSource(dict( 219 | index=trades['ExitBar'], 220 | datetime=trades['ExitTime'], 221 | exit_price=trades['ExitPrice'], 222 | entry_index=trades['EntryBar'], 223 | entry_datetime=trades['EntryTime'], 224 | entry_price=trades['EntryPrice'], 225 | size=trades['Size'], 226 | returns_positive=(trades['ReturnPct'] > 0).astype(int).astype(str), 227 | )) 228 | 229 | inc_cmap = factor_cmap('inc', COLORS, ['0', '1']) 230 | cmap = factor_cmap('returns_positive', COLORS, ['0', '1']) 231 | colors_darker = [lightness(BEAR_COLOR, .35), 232 | lightness(BULL_COLOR, .35)] 233 | trades_cmap = factor_cmap('returns_positive', colors_darker, ['0', '1']) 234 | 235 | if is_datetime_index: 236 | fig_ohlc.xaxis.formatter = FuncTickFormatter( 237 | args=dict(axis=fig_ohlc.xaxis[0], 238 | formatter=DatetimeTickFormatter(days=['%Y/%m/%d'], months=['%Y/%m']), 239 | source=source), 240 | code=''' 241 | this.labels = this.labels || formatter.doFormat(ticks 242 | .map(i => source.data.datetime[i]) 243 | .filter(t => t !== undefined)); 244 | return this.labels[index] || ""; 245 | ''') 246 | 247 | NBSP = '\N{NBSP}' * 4 248 | ohlc_extreme_values = df[['High', 'Low']].copy(deep=False) 249 | ohlc_tooltips = [ 250 | ('open', '@Open'), 251 | ('high', '@High'), 252 | ('low', '@Low'), 253 | ('close', '@Close'), 254 | ('volume', '@Volume{0,0}')] 255 | 256 | def new_indicator_figure(**kwargs): 257 | kwargs.setdefault('plot_height', 90) 258 | fig = new_bokeh_figure(x_range=fig_ohlc.x_range, 259 | active_scroll='xwheel_zoom', 260 | active_drag='xpan', 261 | **kwargs) 262 | fig.xaxis.visible = False 263 | fig.yaxis.minor_tick_line_color = None 264 | return fig 265 | 266 | def set_tooltips(fig, tooltips=(), vline=True, renderers=()): 267 | tooltips = list(tooltips) 268 | renderers = list(renderers) 269 | 270 | if is_datetime_index: 271 | formatters = {'@datetime': 'datetime'} 272 | tooltips = [("Date", "@datetime{%Y-%m-%d %H:%M:%S}")] + tooltips 273 | else: 274 | formatters = {} 275 | tooltips = [("#", "@index")] + tooltips 276 | fig.add_tools(HoverTool( 277 | point_policy='follow_mouse', 278 | renderers=renderers, formatters=formatters, 279 | tooltips=tooltips, mode='vline' if vline else 'mouse')) 280 | 281 | def _plot_equity_section(is_return=False): 282 | """Equity section""" 283 | # Max DD Dur. line 284 | equity = equity_data['Equity'].copy() 285 | dd_end = equity_data['DrawdownDuration'].idxmax() 286 | if np.isnan(dd_end): 287 | dd_start = dd_end = equity.index[0] 288 | else: 289 | dd_start = equity[:dd_end].idxmax() 290 | # If DD not extending into the future, get exact point of intersection with equity 291 | if dd_end != equity.index[-1]: 292 | dd_end = np.interp(equity[dd_start], 293 | (equity[dd_end - 1], equity[dd_end]), 294 | (dd_end - 1, dd_end)) 295 | 296 | if smooth_equity: 297 | interest_points = pd.Index([ 298 | # Beginning and end 299 | equity.index[0], equity.index[-1], 300 | # Peak equity and peak DD 301 | equity.idxmax(), equity_data['DrawdownPct'].idxmax(), 302 | # Include max dd end points. Otherwise the MaxDD line looks amiss. 303 | dd_start, int(dd_end), min(int(dd_end + 1), equity.size - 1), 304 | ]) 305 | select = pd.Index(trades['ExitBar']).union(interest_points) 306 | select = select.unique().dropna() 307 | equity = equity.iloc[select].reindex(equity.index) 308 | equity.interpolate(inplace=True) 309 | 310 | assert equity.index.equals(equity_data.index) 311 | 312 | if relative_equity: 313 | equity /= equity.iloc[0] 314 | if is_return: 315 | equity -= equity.iloc[0] 316 | 317 | yaxis_label = 'Return' if is_return else 'Equity' 318 | source_key = 'eq_return' if is_return else 'equity' 319 | source.add(equity, source_key) 320 | fig = new_indicator_figure( 321 | y_axis_label=yaxis_label, 322 | **({} if plot_drawdown else dict(plot_height=110))) 323 | 324 | # High-watermark drawdown dents 325 | fig.patch('index', 'equity_dd', 326 | source=ColumnDataSource(dict( 327 | index=np.r_[index, index[::-1]], 328 | equity_dd=np.r_[equity, equity.cummax()[::-1]] 329 | )), 330 | fill_color='#ffffea', line_color='#ffcb66') 331 | 332 | # Equity line 333 | r = fig.line('index', source_key, source=source, line_width=1.5, line_alpha=1) 334 | if relative_equity: 335 | tooltip_format = f'@{source_key}{{+0,0.[000]%}}' 336 | tick_format = '0,0.[00]%' 337 | legend_format = '{:,.0f}%' 338 | else: 339 | tooltip_format = f'@{source_key}{{$ 0,0}}' 340 | tick_format = '$ 0.0 a' 341 | legend_format = '${:,.0f}' 342 | set_tooltips(fig, [(yaxis_label, tooltip_format)], renderers=[r]) 343 | fig.yaxis.formatter = NumeralTickFormatter(format=tick_format) 344 | 345 | # Peaks 346 | argmax = equity.idxmax() 347 | fig.scatter(argmax, equity[argmax], 348 | legend_label='Peak ({})'.format( 349 | legend_format.format(equity[argmax] * (100 if relative_equity else 1))), 350 | color='cyan', size=8) 351 | fig.scatter(index[-1], equity.values[-1], 352 | legend_label='Final ({})'.format( 353 | legend_format.format(equity.iloc[-1] * (100 if relative_equity else 1))), 354 | color='blue', size=8) 355 | 356 | if not plot_drawdown: 357 | drawdown = equity_data['DrawdownPct'] 358 | argmax = drawdown.idxmax() 359 | fig.scatter(argmax, equity[argmax], 360 | legend_label='Max Drawdown (-{:.1f}%)'.format(100 * drawdown[argmax]), 361 | color='red', size=8) 362 | dd_timedelta_label = df['datetime'].iloc[int(round(dd_end))] - df['datetime'].iloc[dd_start] 363 | fig.line([dd_start, dd_end], equity.iloc[dd_start], 364 | line_color='red', line_width=2, 365 | legend_label=f'Max Dd Dur. ({dd_timedelta_label})' 366 | .replace(' 00:00:00', '') 367 | .replace('(0 days ', '(')) 368 | 369 | figs_above_ohlc.append(fig) 370 | 371 | def _plot_drawdown_section(): 372 | """Drawdown section""" 373 | fig = new_indicator_figure(y_axis_label="Drawdown") 374 | drawdown = equity_data['DrawdownPct'] 375 | argmax = drawdown.idxmax() 376 | source.add(drawdown, 'drawdown') 377 | r = fig.line('index', 'drawdown', source=source, line_width=1.3) 378 | fig.scatter(argmax, drawdown[argmax], 379 | legend_label='Peak (-{:.1f}%)'.format(100 * drawdown[argmax]), 380 | color='red', size=8) 381 | set_tooltips(fig, [('Drawdown', '@drawdown{-0.[0]%}')], renderers=[r]) 382 | fig.yaxis.formatter = NumeralTickFormatter(format="-0.[0]%") 383 | return fig 384 | 385 | def _plot_pl_section(): 386 | """Profit/Loss markers section""" 387 | fig = new_indicator_figure(y_axis_label="Profit / Loss") 388 | fig.add_layout(Span(location=0, dimension='width', line_color='#666666', 389 | line_dash='dashed', line_width=1)) 390 | returns_long = np.where(trades['Size'] > 0, trades['ReturnPct'], np.nan) 391 | returns_short = np.where(trades['Size'] < 0, trades['ReturnPct'], np.nan) 392 | size = trades['Size'].abs() 393 | size = np.interp(size, (size.min(), size.max()), (8, 20)) 394 | trade_source.add(returns_long, 'returns_long') 395 | trade_source.add(returns_short, 'returns_short') 396 | trade_source.add(size, 'marker_size') 397 | if 'count' in trades: 398 | trade_source.add(trades['count'], 'count') 399 | r1 = fig.scatter('index', 'returns_long', source=trade_source, fill_color=cmap, 400 | marker='triangle', line_color='black', size='marker_size') 401 | r2 = fig.scatter('index', 'returns_short', source=trade_source, fill_color=cmap, 402 | marker='inverted_triangle', line_color='black', size='marker_size') 403 | 404 | # 매수시점 캔들 차트에 바로 그림 405 | fig_ohlc.scatter('entry_index', 'entry_price', source=trade_source, size='marker_size', 406 | fill_color='blue', color='blue', line_color='blue', marker='triangle') 407 | 408 | # 매도시점 캔들 차트에 바로 그림 409 | fig_ohlc.scatter('index', 'exit_price', source=trade_source, size='marker_size', 410 | fill_color='red', color='red', line_color='red', marker='inverted_triangle') 411 | 412 | tooltips = [("Size", "@size{0,0}")] 413 | if 'count' in trades: 414 | tooltips.append(("Count", "@count{0,0}")) 415 | set_tooltips(fig, tooltips + [("P/L", "@returns_long{+0.[000]%}")], 416 | vline=False, renderers=[r1]) 417 | set_tooltips(fig, tooltips + [("P/L", "@returns_short{+0.[000]%}")], 418 | vline=False, renderers=[r2]) 419 | fig.yaxis.formatter = NumeralTickFormatter(format="0.[00]%") 420 | return fig 421 | 422 | def _plot_volume_section(): 423 | """Volume section""" 424 | fig = new_indicator_figure(y_axis_label="Volume") 425 | fig.xaxis.formatter = fig_ohlc.xaxis[0].formatter 426 | fig.xaxis.visible = True 427 | fig_ohlc.xaxis.visible = False # Show only Volume's xaxis 428 | r = fig.vbar('index', BAR_WIDTH, 'Volume', source=source, color=inc_cmap) 429 | set_tooltips(fig, [('Volume', '@Volume{0.00 a}')], renderers=[r]) 430 | fig.yaxis.formatter = NumeralTickFormatter(format="0 a") 431 | return fig 432 | 433 | def _plot_superimposed_ohlc(): 434 | """Superimposed, downsampled vbars""" 435 | time_resolution = pd.DatetimeIndex(df['datetime']).resolution 436 | resample_rule = (superimpose if isinstance(superimpose, str) else 437 | dict(day='M', 438 | hour='D', 439 | minute='H', 440 | second='T', 441 | millisecond='S').get(time_resolution)) 442 | if not resample_rule: 443 | warnings.warn( 444 | f"'Can't superimpose OHLC data with rule '{resample_rule}'" 445 | f"(index datetime resolution: '{time_resolution}'). Skipping.", 446 | stacklevel=4) 447 | return 448 | 449 | df2 = (df.assign(_width=1).set_index('datetime') 450 | .resample(resample_rule, label='left') 451 | .agg(dict(OHLCV_AGG, _width='count'))) 452 | 453 | # Check if resampling was downsampling; error on upsampling 454 | orig_freq = _data_period(df['datetime']) 455 | resample_freq = _data_period(df2.index) 456 | if resample_freq < orig_freq: 457 | raise ValueError('Invalid value for `superimpose`: Upsampling not supported.') 458 | if resample_freq == orig_freq: 459 | warnings.warn('Superimposed OHLC plot matches the original plot. Skipping.', 460 | stacklevel=4) 461 | return 462 | 463 | df2.index = df2['_width'].cumsum().shift(1).fillna(0) 464 | df2.index += df2['_width'] / 2 - .5 465 | df2['_width'] -= .1 # Candles don't touch 466 | 467 | df2['inc'] = (df2.Close >= df2.Open).astype(int).astype(str) 468 | df2.index.name = None 469 | 470 | source2 = ColumnDataSource(df2) 471 | fig_ohlc.segment('index', 'High', 'index', 'Low', source=source2, color='#bbbbbb') 472 | colors_lighter = [lightness(BEAR_COLOR, .92), 473 | lightness(BULL_COLOR, .92)] 474 | fig_ohlc.vbar('index', '_width', 'Open', 'Close', source=source2, line_color=None, 475 | fill_color=factor_cmap('inc', colors_lighter, ['0', '1'])) 476 | 477 | def _plot_ohlc(): 478 | """Main OHLC bars""" 479 | fig_ohlc.segment('index', 'High', 'index', 'Low', source=source, color="black") 480 | r = fig_ohlc.vbar('index', BAR_WIDTH, 'Open', 'Close', source=source, 481 | line_color="black", fill_color=inc_cmap) 482 | return r 483 | 484 | def _plot_ohlc_trades(): 485 | """Trade entry / exit markers on OHLC plot""" 486 | trade_source.add(trades[['EntryBar', 'ExitBar']].values.tolist(), 'position_lines_xs') 487 | trade_source.add(trades[['EntryPrice', 'ExitPrice']].values.tolist(), 'position_lines_ys') 488 | fig_ohlc.multi_line(xs='position_lines_xs', ys='position_lines_ys', 489 | source=trade_source, line_color=trades_cmap, 490 | legend_label=f'Trades ({len(trades)})', 491 | line_width=8, line_alpha=1, line_dash='dotted') 492 | 493 | def _plot_indicators(): 494 | """Strategy indicators""" 495 | 496 | def _too_many_dims(value): 497 | assert value.ndim >= 2 498 | if value.ndim > 2: 499 | warnings.warn(f"Can't plot indicators with >2D ('{value.name}')", 500 | stacklevel=5) 501 | return True 502 | return False 503 | 504 | class LegendStr(str): 505 | # The legend string is such a string that only matches 506 | # itself if it's the exact same object. This ensures 507 | # legend items are listed separately even when they have the 508 | # same string contents. Otherwise, Bokeh would always consider 509 | # equal strings as one and the same legend item. 510 | def __eq__(self, other): 511 | return self is other 512 | 513 | ohlc_colors = colorgen() 514 | indicator_figs = [] 515 | 516 | for i, value in enumerate(indicators): 517 | value = np.atleast_2d(value) 518 | 519 | # Use .get()! A user might have assigned a Strategy.data-evolved 520 | # _Array without Strategy.I() 521 | if not value._opts.get('plot') or _too_many_dims(value): 522 | continue 523 | 524 | is_overlay = value._opts['overlay'] 525 | is_scatter = value._opts['scatter'] 526 | if is_overlay: 527 | fig = fig_ohlc 528 | else: 529 | fig = new_indicator_figure() 530 | indicator_figs.append(fig) 531 | tooltips = [] 532 | colors = value._opts['color'] 533 | colors = colors and cycle(_as_list(colors)) or ( 534 | cycle([next(ohlc_colors)]) if is_overlay else colorgen()) 535 | legend_label = LegendStr(value.name) 536 | for j, arr in enumerate(value, 1): 537 | color = next(colors) 538 | source_name = f'{legend_label}_{i}_{j}' 539 | if arr.dtype == bool: 540 | arr = arr.astype(int) 541 | source.add(arr, source_name) 542 | tooltips.append(f'@{{{source_name}}}{{0,0.0[0000]}}') 543 | if is_overlay: 544 | ohlc_extreme_values[source_name] = arr 545 | if is_scatter: 546 | fig.scatter( 547 | 'index', source_name, source=source, 548 | legend_label=legend_label, color=color, 549 | line_color='black', fill_alpha=.8, 550 | marker='circle', radius=BAR_WIDTH / 2 * 1.5) 551 | else: 552 | fig.line( 553 | 'index', source_name, source=source, 554 | legend_label=legend_label, line_color=color, 555 | line_width=1.3) 556 | else: 557 | if is_scatter: 558 | r = fig.scatter( 559 | 'index', source_name, source=source, 560 | legend_label=LegendStr(legend_label), color=color, 561 | marker='circle', radius=BAR_WIDTH / 2 * .9) 562 | else: 563 | r = fig.line( 564 | 'index', source_name, source=source, 565 | legend_label=LegendStr(legend_label), line_color=color, 566 | line_width=1.3) 567 | # Add dashed centerline just because 568 | mean = float(pd.Series(arr).mean()) 569 | if not np.isnan(mean) and (abs(mean) < .1 or 570 | round(abs(mean), 1) == .5 or 571 | round(abs(mean), -1) in (50, 100, 200)): 572 | fig.add_layout(Span(location=float(mean), dimension='width', 573 | line_color='#666666', line_dash='dashed', 574 | line_width=.5)) 575 | if is_overlay: 576 | ohlc_tooltips.append((legend_label, NBSP.join(tooltips))) 577 | else: 578 | set_tooltips(fig, [(legend_label, NBSP.join(tooltips))], vline=True, renderers=[r]) 579 | # If the sole indicator line on this figure, 580 | # have the legend only contain text without the glyph 581 | if len(value) == 1: 582 | fig.legend.glyph_width = 0 583 | return indicator_figs 584 | 585 | # Construct figure ... 586 | 587 | if plot_equity: 588 | _plot_equity_section() 589 | 590 | if plot_return: 591 | _plot_equity_section(is_return=True) 592 | 593 | if plot_drawdown: 594 | figs_above_ohlc.append(_plot_drawdown_section()) 595 | 596 | if plot_pl: 597 | figs_above_ohlc.append(_plot_pl_section()) 598 | 599 | if plot_volume: 600 | fig_volume = _plot_volume_section() 601 | figs_below_ohlc.append(fig_volume) 602 | 603 | if superimpose and is_datetime_index: 604 | _plot_superimposed_ohlc() 605 | 606 | ohlc_bars = _plot_ohlc() 607 | _plot_ohlc_trades() 608 | indicator_figs = _plot_indicators() 609 | if reverse_indicators: 610 | indicator_figs = indicator_figs[::-1] 611 | figs_below_ohlc.extend(indicator_figs) 612 | 613 | set_tooltips(fig_ohlc, ohlc_tooltips, vline=True, renderers=[ohlc_bars]) 614 | 615 | source.add(ohlc_extreme_values.min(1), 'ohlc_low') 616 | source.add(ohlc_extreme_values.max(1), 'ohlc_high') 617 | 618 | custom_js_args = dict(ohlc_range=fig_ohlc.y_range, 619 | source=source) 620 | if plot_volume: 621 | custom_js_args.update(volume_range=fig_volume.y_range) 622 | 623 | fig_ohlc.x_range.js_on_change('end', CustomJS(args=custom_js_args, 624 | code=_AUTOSCALE_JS_CALLBACK)) 625 | 626 | plots = figs_above_ohlc + [fig_ohlc] + figs_below_ohlc 627 | linked_crosshair = CrosshairTool(dimensions='both') 628 | 629 | for f in plots: 630 | if f.legend: 631 | f.legend.visible = show_legend 632 | f.legend.location = 'top_left' 633 | f.legend.border_line_width = 1 634 | f.legend.border_line_color = '#333333' 635 | f.legend.padding = 5 636 | f.legend.spacing = 0 637 | f.legend.margin = 0 638 | f.legend.label_text_font_size = '8pt' 639 | f.legend.click_policy = "hide" 640 | f.min_border_left = 0 641 | f.min_border_top = 3 642 | f.min_border_bottom = 6 643 | f.min_border_right = 10 644 | f.outline_line_color = '#666666' 645 | 646 | f.add_tools(linked_crosshair) 647 | wheelzoom_tool = next(wz for wz in f.tools if isinstance(wz, WheelZoomTool)) 648 | wheelzoom_tool.maintain_focus = False 649 | 650 | kwargs = {} 651 | if plot_width is None: 652 | kwargs['sizing_mode'] = 'stretch_width' 653 | 654 | fig = gridplot( 655 | plots, 656 | ncols=1, 657 | toolbar_location='right', 658 | toolbar_options=dict(logo=None), 659 | merge_tools=True, 660 | **kwargs 661 | ) 662 | show(fig, browser=None if open_browser else 'none') 663 | return fig 664 | 665 | 666 | def plot_heatmaps(heatmap: pd.Series, agg: Union[Callable, str], ncols: int, 667 | filename: str = '', plot_width: int = 1200, open_browser: bool = True): 668 | if not (isinstance(heatmap, pd.Series) and 669 | isinstance(heatmap.index, pd.MultiIndex)): 670 | raise ValueError('heatmap must be heatmap Series as returned by ' 671 | '`Backtest.optimize(..., return_heatmap=True)`') 672 | 673 | _bokeh_reset(filename) 674 | 675 | param_combinations = combinations(heatmap.index.names, 2) 676 | dfs = [heatmap.groupby(list(dims)).agg(agg).to_frame(name='_Value') 677 | for dims in param_combinations] 678 | plots = [] 679 | cmap = LinearColorMapper(palette='Viridis256', 680 | low=min(df.min().min() for df in dfs), 681 | high=max(df.max().max() for df in dfs), 682 | nan_color='white') 683 | for df in dfs: 684 | name1, name2 = df.index.names 685 | level1 = df.index.levels[0].astype(str).tolist() 686 | level2 = df.index.levels[1].astype(str).tolist() 687 | df = df.reset_index() 688 | df[name1] = df[name1].astype('str') 689 | df[name2] = df[name2].astype('str') 690 | 691 | fig = _figure(x_range=level1, 692 | y_range=level2, 693 | x_axis_label=name1, 694 | y_axis_label=name2, 695 | plot_width=plot_width // ncols, 696 | plot_height=plot_width // ncols, 697 | tools='box_zoom,reset,save', 698 | tooltips=[(name1, '@' + name1), 699 | (name2, '@' + name2), 700 | ('Value', '@_Value{0.[000]}')]) 701 | fig.grid.grid_line_color = None 702 | fig.axis.axis_line_color = None 703 | fig.axis.major_tick_line_color = None 704 | fig.axis.major_label_standoff = 0 705 | 706 | fig.rect(x=name1, 707 | y=name2, 708 | width=1, 709 | height=1, 710 | source=df, 711 | line_color=None, 712 | fill_color=dict(field='_Value', 713 | transform=cmap)) 714 | plots.append(fig) 715 | 716 | fig = gridplot( 717 | plots, 718 | ncols=ncols, 719 | toolbar_options=dict(logo=None), 720 | toolbar_location='above', 721 | merge_tools=True, 722 | ) 723 | 724 | show(fig, browser=None if open_browser else 'none') 725 | return fig 726 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | ### GNU AFFERO GENERAL PUBLIC LICENSE 2 | 3 | Version 3, 19 November 2007 4 | 5 | Copyright (C) 2007 Free Software Foundation, Inc. 6 | 7 | 8 | Everyone is permitted to copy and distribute verbatim copies of this 9 | license document, but changing it is not allowed. 10 | 11 | ### Preamble 12 | 13 | The GNU Affero General Public License is a free, copyleft license for 14 | software and other kinds of works, specifically designed to ensure 15 | cooperation with the community in the case of network server software. 16 | 17 | The licenses for most software and other practical works are designed 18 | to take away your freedom to share and change the works. By contrast, 19 | our General Public Licenses are intended to guarantee your freedom to 20 | share and change all versions of a program--to make sure it remains 21 | free software for all its users. 22 | 23 | When we speak of free software, we are referring to freedom, not 24 | price. Our General Public Licenses are designed to make sure that you 25 | have the freedom to distribute copies of free software (and charge for 26 | them if you wish), that you receive source code or can get it if you 27 | want it, that you can change the software or use pieces of it in new 28 | free programs, and that you know you can do these things. 29 | 30 | Developers that use our General Public Licenses protect your rights 31 | with two steps: (1) assert copyright on the software, and (2) offer 32 | you this License which gives you legal permission to copy, distribute 33 | and/or modify the software. 34 | 35 | A secondary benefit of defending all users' freedom is that 36 | improvements made in alternate versions of the program, if they 37 | receive widespread use, become available for other developers to 38 | incorporate. Many developers of free software are heartened and 39 | encouraged by the resulting cooperation. However, in the case of 40 | software used on network servers, this result may fail to come about. 41 | The GNU General Public License permits making a modified version and 42 | letting the public access it on a server without ever releasing its 43 | source code to the public. 44 | 45 | The GNU Affero General Public License is designed specifically to 46 | ensure that, in such cases, the modified source code becomes available 47 | to the community. It requires the operator of a network server to 48 | provide the source code of the modified version running there to the 49 | users of that server. Therefore, public use of a modified version, on 50 | a publicly accessible server, gives the public access to the source 51 | code of the modified version. 52 | 53 | An older license, called the Affero General Public License and 54 | published by Affero, was designed to accomplish similar goals. This is 55 | a different license, not a version of the Affero GPL, but Affero has 56 | released a new version of the Affero GPL which permits relicensing 57 | under this license. 58 | 59 | The precise terms and conditions for copying, distribution and 60 | modification follow. 61 | 62 | ### TERMS AND CONDITIONS 63 | 64 | #### 0. Definitions. 65 | 66 | "This License" refers to version 3 of the GNU Affero General Public 67 | License. 68 | 69 | "Copyright" also means copyright-like laws that apply to other kinds 70 | of works, such as semiconductor masks. 71 | 72 | "The Program" refers to any copyrightable work licensed under this 73 | License. Each licensee is addressed as "you". "Licensees" and 74 | "recipients" may be individuals or organizations. 75 | 76 | To "modify" a work means to copy from or adapt all or part of the work 77 | in a fashion requiring copyright permission, other than the making of 78 | an exact copy. The resulting work is called a "modified version" of 79 | the earlier work or a work "based on" the earlier work. 80 | 81 | A "covered work" means either the unmodified Program or a work based 82 | on the Program. 83 | 84 | To "propagate" a work means to do anything with it that, without 85 | permission, would make you directly or secondarily liable for 86 | infringement under applicable copyright law, except executing it on a 87 | computer or modifying a private copy. Propagation includes copying, 88 | distribution (with or without modification), making available to the 89 | public, and in some countries other activities as well. 90 | 91 | To "convey" a work means any kind of propagation that enables other 92 | parties to make or receive copies. Mere interaction with a user 93 | through a computer network, with no transfer of a copy, is not 94 | conveying. 95 | 96 | An interactive user interface displays "Appropriate Legal Notices" to 97 | the extent that it includes a convenient and prominently visible 98 | feature that (1) displays an appropriate copyright notice, and (2) 99 | tells the user that there is no warranty for the work (except to the 100 | extent that warranties are provided), that licensees may convey the 101 | work under this License, and how to view a copy of this License. If 102 | the interface presents a list of user commands or options, such as a 103 | menu, a prominent item in the list meets this criterion. 104 | 105 | #### 1. Source Code. 106 | 107 | The "source code" for a work means the preferred form of the work for 108 | making modifications to it. "Object code" means any non-source form of 109 | a work. 110 | 111 | A "Standard Interface" means an interface that either is an official 112 | standard defined by a recognized standards body, or, in the case of 113 | interfaces specified for a particular programming language, one that 114 | is widely used among developers working in that language. 115 | 116 | The "System Libraries" of an executable work include anything, other 117 | than the work as a whole, that (a) is included in the normal form of 118 | packaging a Major Component, but which is not part of that Major 119 | Component, and (b) serves only to enable use of the work with that 120 | Major Component, or to implement a Standard Interface for which an 121 | implementation is available to the public in source code form. A 122 | "Major Component", in this context, means a major essential component 123 | (kernel, window system, and so on) of the specific operating system 124 | (if any) on which the executable work runs, or a compiler used to 125 | produce the work, or an object code interpreter used to run it. 126 | 127 | The "Corresponding Source" for a work in object code form means all 128 | the source code needed to generate, install, and (for an executable 129 | work) run the object code and to modify the work, including scripts to 130 | control those activities. However, it does not include the work's 131 | System Libraries, or general-purpose tools or generally available free 132 | programs which are used unmodified in performing those activities but 133 | which are not part of the work. For example, Corresponding Source 134 | includes interface definition files associated with source files for 135 | the work, and the source code for shared libraries and dynamically 136 | linked subprograms that the work is specifically designed to require, 137 | such as by intimate data communication or control flow between those 138 | subprograms and other parts of the work. 139 | 140 | The Corresponding Source need not include anything that users can 141 | regenerate automatically from other parts of the Corresponding Source. 142 | 143 | The Corresponding Source for a work in source code form is that same 144 | work. 145 | 146 | #### 2. Basic Permissions. 147 | 148 | All rights granted under this License are granted for the term of 149 | copyright on the Program, and are irrevocable provided the stated 150 | conditions are met. This License explicitly affirms your unlimited 151 | permission to run the unmodified Program. The output from running a 152 | covered work is covered by this License only if the output, given its 153 | content, constitutes a covered work. This License acknowledges your 154 | rights of fair use or other equivalent, as provided by copyright law. 155 | 156 | You may make, run and propagate covered works that you do not convey, 157 | without conditions so long as your license otherwise remains in force. 158 | You may convey covered works to others for the sole purpose of having 159 | them make modifications exclusively for you, or provide you with 160 | facilities for running those works, provided that you comply with the 161 | terms of this License in conveying all material for which you do not 162 | control copyright. Those thus making or running the covered works for 163 | you must do so exclusively on your behalf, under your direction and 164 | control, on terms that prohibit them from making any copies of your 165 | copyrighted material outside their relationship with you. 166 | 167 | Conveying under any other circumstances is permitted solely under the 168 | conditions stated below. Sublicensing is not allowed; section 10 makes 169 | it unnecessary. 170 | 171 | #### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 172 | 173 | No covered work shall be deemed part of an effective technological 174 | measure under any applicable law fulfilling obligations under article 175 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 176 | similar laws prohibiting or restricting circumvention of such 177 | measures. 178 | 179 | When you convey a covered work, you waive any legal power to forbid 180 | circumvention of technological measures to the extent such 181 | circumvention is effected by exercising rights under this License with 182 | respect to the covered work, and you disclaim any intention to limit 183 | operation or modification of the work as a means of enforcing, against 184 | the work's users, your or third parties' legal rights to forbid 185 | circumvention of technological measures. 186 | 187 | #### 4. Conveying Verbatim Copies. 188 | 189 | You may convey verbatim copies of the Program's source code as you 190 | receive it, in any medium, provided that you conspicuously and 191 | appropriately publish on each copy an appropriate copyright notice; 192 | keep intact all notices stating that this License and any 193 | non-permissive terms added in accord with section 7 apply to the code; 194 | keep intact all notices of the absence of any warranty; and give all 195 | recipients a copy of this License along with the Program. 196 | 197 | You may charge any price or no price for each copy that you convey, 198 | and you may offer support or warranty protection for a fee. 199 | 200 | #### 5. Conveying Modified Source Versions. 201 | 202 | You may convey a work based on the Program, or the modifications to 203 | produce it from the Program, in the form of source code under the 204 | terms of section 4, provided that you also meet all of these 205 | conditions: 206 | 207 | - a) The work must carry prominent notices stating that you modified 208 | it, and giving a relevant date. 209 | - b) The work must carry prominent notices stating that it is 210 | released under this License and any conditions added under 211 | section 7. This requirement modifies the requirement in section 4 212 | to "keep intact all notices". 213 | - c) You must license the entire work, as a whole, under this 214 | License to anyone who comes into possession of a copy. This 215 | License will therefore apply, along with any applicable section 7 216 | additional terms, to the whole of the work, and all its parts, 217 | regardless of how they are packaged. This License gives no 218 | permission to license the work in any other way, but it does not 219 | invalidate such permission if you have separately received it. 220 | - d) If the work has interactive user interfaces, each must display 221 | Appropriate Legal Notices; however, if the Program has interactive 222 | interfaces that do not display Appropriate Legal Notices, your 223 | work need not make them do so. 224 | 225 | A compilation of a covered work with other separate and independent 226 | works, which are not by their nature extensions of the covered work, 227 | and which are not combined with it such as to form a larger program, 228 | in or on a volume of a storage or distribution medium, is called an 229 | "aggregate" if the compilation and its resulting copyright are not 230 | used to limit the access or legal rights of the compilation's users 231 | beyond what the individual works permit. Inclusion of a covered work 232 | in an aggregate does not cause this License to apply to the other 233 | parts of the aggregate. 234 | 235 | #### 6. Conveying Non-Source Forms. 236 | 237 | You may convey a covered work in object code form under the terms of 238 | sections 4 and 5, provided that you also convey the machine-readable 239 | Corresponding Source under the terms of this License, in one of these 240 | ways: 241 | 242 | - a) Convey the object code in, or embodied in, a physical product 243 | (including a physical distribution medium), accompanied by the 244 | Corresponding Source fixed on a durable physical medium 245 | customarily used for software interchange. 246 | - b) Convey the object code in, or embodied in, a physical product 247 | (including a physical distribution medium), accompanied by a 248 | written offer, valid for at least three years and valid for as 249 | long as you offer spare parts or customer support for that product 250 | model, to give anyone who possesses the object code either (1) a 251 | copy of the Corresponding Source for all the software in the 252 | product that is covered by this License, on a durable physical 253 | medium customarily used for software interchange, for a price no 254 | more than your reasonable cost of physically performing this 255 | conveying of source, or (2) access to copy the Corresponding 256 | Source from a network server at no charge. 257 | - c) Convey individual copies of the object code with a copy of the 258 | written offer to provide the Corresponding Source. This 259 | alternative is allowed only occasionally and noncommercially, and 260 | only if you received the object code with such an offer, in accord 261 | with subsection 6b. 262 | - d) Convey the object code by offering access from a designated 263 | place (gratis or for a charge), and offer equivalent access to the 264 | Corresponding Source in the same way through the same place at no 265 | further charge. You need not require recipients to copy the 266 | Corresponding Source along with the object code. If the place to 267 | copy the object code is a network server, the Corresponding Source 268 | may be on a different server (operated by you or a third party) 269 | that supports equivalent copying facilities, provided you maintain 270 | clear directions next to the object code saying where to find the 271 | Corresponding Source. Regardless of what server hosts the 272 | Corresponding Source, you remain obligated to ensure that it is 273 | available for as long as needed to satisfy these requirements. 274 | - e) Convey the object code using peer-to-peer transmission, 275 | provided you inform other peers where the object code and 276 | Corresponding Source of the work are being offered to the general 277 | public at no charge under subsection 6d. 278 | 279 | A separable portion of the object code, whose source code is excluded 280 | from the Corresponding Source as a System Library, need not be 281 | included in conveying the object code work. 282 | 283 | A "User Product" is either (1) a "consumer product", which means any 284 | tangible personal property which is normally used for personal, 285 | family, or household purposes, or (2) anything designed or sold for 286 | incorporation into a dwelling. In determining whether a product is a 287 | consumer product, doubtful cases shall be resolved in favor of 288 | coverage. For a particular product received by a particular user, 289 | "normally used" refers to a typical or common use of that class of 290 | product, regardless of the status of the particular user or of the way 291 | in which the particular user actually uses, or expects or is expected 292 | to use, the product. A product is a consumer product regardless of 293 | whether the product has substantial commercial, industrial or 294 | non-consumer uses, unless such uses represent the only significant 295 | mode of use of the product. 296 | 297 | "Installation Information" for a User Product means any methods, 298 | procedures, authorization keys, or other information required to 299 | install and execute modified versions of a covered work in that User 300 | Product from a modified version of its Corresponding Source. The 301 | information must suffice to ensure that the continued functioning of 302 | the modified object code is in no case prevented or interfered with 303 | solely because modification has been made. 304 | 305 | If you convey an object code work under this section in, or with, or 306 | specifically for use in, a User Product, and the conveying occurs as 307 | part of a transaction in which the right of possession and use of the 308 | User Product is transferred to the recipient in perpetuity or for a 309 | fixed term (regardless of how the transaction is characterized), the 310 | Corresponding Source conveyed under this section must be accompanied 311 | by the Installation Information. But this requirement does not apply 312 | if neither you nor any third party retains the ability to install 313 | modified object code on the User Product (for example, the work has 314 | been installed in ROM). 315 | 316 | The requirement to provide Installation Information does not include a 317 | requirement to continue to provide support service, warranty, or 318 | updates for a work that has been modified or installed by the 319 | recipient, or for the User Product in which it has been modified or 320 | installed. Access to a network may be denied when the modification 321 | itself materially and adversely affects the operation of the network 322 | or violates the rules and protocols for communication across the 323 | network. 324 | 325 | Corresponding Source conveyed, and Installation Information provided, 326 | in accord with this section must be in a format that is publicly 327 | documented (and with an implementation available to the public in 328 | source code form), and must require no special password or key for 329 | unpacking, reading or copying. 330 | 331 | #### 7. Additional Terms. 332 | 333 | "Additional permissions" are terms that supplement the terms of this 334 | License by making exceptions from one or more of its conditions. 335 | Additional permissions that are applicable to the entire Program shall 336 | be treated as though they were included in this License, to the extent 337 | that they are valid under applicable law. If additional permissions 338 | apply only to part of the Program, that part may be used separately 339 | under those permissions, but the entire Program remains governed by 340 | this License without regard to the additional permissions. 341 | 342 | When you convey a copy of a covered work, you may at your option 343 | remove any additional permissions from that copy, or from any part of 344 | it. (Additional permissions may be written to require their own 345 | removal in certain cases when you modify the work.) You may place 346 | additional permissions on material, added by you to a covered work, 347 | for which you have or can give appropriate copyright permission. 348 | 349 | Notwithstanding any other provision of this License, for material you 350 | add to a covered work, you may (if authorized by the copyright holders 351 | of that material) supplement the terms of this License with terms: 352 | 353 | - a) Disclaiming warranty or limiting liability differently from the 354 | terms of sections 15 and 16 of this License; or 355 | - b) Requiring preservation of specified reasonable legal notices or 356 | author attributions in that material or in the Appropriate Legal 357 | Notices displayed by works containing it; or 358 | - c) Prohibiting misrepresentation of the origin of that material, 359 | or requiring that modified versions of such material be marked in 360 | reasonable ways as different from the original version; or 361 | - d) Limiting the use for publicity purposes of names of licensors 362 | or authors of the material; or 363 | - e) Declining to grant rights under trademark law for use of some 364 | trade names, trademarks, or service marks; or 365 | - f) Requiring indemnification of licensors and authors of that 366 | material by anyone who conveys the material (or modified versions 367 | of it) with contractual assumptions of liability to the recipient, 368 | for any liability that these contractual assumptions directly 369 | impose on those licensors and authors. 370 | 371 | All other non-permissive additional terms are considered "further 372 | restrictions" within the meaning of section 10. If the Program as you 373 | received it, or any part of it, contains a notice stating that it is 374 | governed by this License along with a term that is a further 375 | restriction, you may remove that term. If a license document contains 376 | a further restriction but permits relicensing or conveying under this 377 | License, you may add to a covered work material governed by the terms 378 | of that license document, provided that the further restriction does 379 | not survive such relicensing or conveying. 380 | 381 | If you add terms to a covered work in accord with this section, you 382 | must place, in the relevant source files, a statement of the 383 | additional terms that apply to those files, or a notice indicating 384 | where to find the applicable terms. 385 | 386 | Additional terms, permissive or non-permissive, may be stated in the 387 | form of a separately written license, or stated as exceptions; the 388 | above requirements apply either way. 389 | 390 | #### 8. Termination. 391 | 392 | You may not propagate or modify a covered work except as expressly 393 | provided under this License. Any attempt otherwise to propagate or 394 | modify it is void, and will automatically terminate your rights under 395 | this License (including any patent licenses granted under the third 396 | paragraph of section 11). 397 | 398 | However, if you cease all violation of this License, then your license 399 | from a particular copyright holder is reinstated (a) provisionally, 400 | unless and until the copyright holder explicitly and finally 401 | terminates your license, and (b) permanently, if the copyright holder 402 | fails to notify you of the violation by some reasonable means prior to 403 | 60 days after the cessation. 404 | 405 | Moreover, your license from a particular copyright holder is 406 | reinstated permanently if the copyright holder notifies you of the 407 | violation by some reasonable means, this is the first time you have 408 | received notice of violation of this License (for any work) from that 409 | copyright holder, and you cure the violation prior to 30 days after 410 | your receipt of the notice. 411 | 412 | Termination of your rights under this section does not terminate the 413 | licenses of parties who have received copies or rights from you under 414 | this License. If your rights have been terminated and not permanently 415 | reinstated, you do not qualify to receive new licenses for the same 416 | material under section 10. 417 | 418 | #### 9. Acceptance Not Required for Having Copies. 419 | 420 | You are not required to accept this License in order to receive or run 421 | a copy of the Program. Ancillary propagation of a covered work 422 | occurring solely as a consequence of using peer-to-peer transmission 423 | to receive a copy likewise does not require acceptance. However, 424 | nothing other than this License grants you permission to propagate or 425 | modify any covered work. These actions infringe copyright if you do 426 | not accept this License. Therefore, by modifying or propagating a 427 | covered work, you indicate your acceptance of this License to do so. 428 | 429 | #### 10. Automatic Licensing of Downstream Recipients. 430 | 431 | Each time you convey a covered work, the recipient automatically 432 | receives a license from the original licensors, to run, modify and 433 | propagate that work, subject to this License. You are not responsible 434 | for enforcing compliance by third parties with this License. 435 | 436 | An "entity transaction" is a transaction transferring control of an 437 | organization, or substantially all assets of one, or subdividing an 438 | organization, or merging organizations. If propagation of a covered 439 | work results from an entity transaction, each party to that 440 | transaction who receives a copy of the work also receives whatever 441 | licenses to the work the party's predecessor in interest had or could 442 | give under the previous paragraph, plus a right to possession of the 443 | Corresponding Source of the work from the predecessor in interest, if 444 | the predecessor has it or can get it with reasonable efforts. 445 | 446 | You may not impose any further restrictions on the exercise of the 447 | rights granted or affirmed under this License. For example, you may 448 | not impose a license fee, royalty, or other charge for exercise of 449 | rights granted under this License, and you may not initiate litigation 450 | (including a cross-claim or counterclaim in a lawsuit) alleging that 451 | any patent claim is infringed by making, using, selling, offering for 452 | sale, or importing the Program or any portion of it. 453 | 454 | #### 11. Patents. 455 | 456 | A "contributor" is a copyright holder who authorizes use under this 457 | License of the Program or a work on which the Program is based. The 458 | work thus licensed is called the contributor's "contributor version". 459 | 460 | A contributor's "essential patent claims" are all patent claims owned 461 | or controlled by the contributor, whether already acquired or 462 | hereafter acquired, that would be infringed by some manner, permitted 463 | by this License, of making, using, or selling its contributor version, 464 | but do not include claims that would be infringed only as a 465 | consequence of further modification of the contributor version. For 466 | purposes of this definition, "control" includes the right to grant 467 | patent sublicenses in a manner consistent with the requirements of 468 | this License. 469 | 470 | Each contributor grants you a non-exclusive, worldwide, royalty-free 471 | patent license under the contributor's essential patent claims, to 472 | make, use, sell, offer for sale, import and otherwise run, modify and 473 | propagate the contents of its contributor version. 474 | 475 | In the following three paragraphs, a "patent license" is any express 476 | agreement or commitment, however denominated, not to enforce a patent 477 | (such as an express permission to practice a patent or covenant not to 478 | sue for patent infringement). To "grant" such a patent license to a 479 | party means to make such an agreement or commitment not to enforce a 480 | patent against the party. 481 | 482 | If you convey a covered work, knowingly relying on a patent license, 483 | and the Corresponding Source of the work is not available for anyone 484 | to copy, free of charge and under the terms of this License, through a 485 | publicly available network server or other readily accessible means, 486 | then you must either (1) cause the Corresponding Source to be so 487 | available, or (2) arrange to deprive yourself of the benefit of the 488 | patent license for this particular work, or (3) arrange, in a manner 489 | consistent with the requirements of this License, to extend the patent 490 | license to downstream recipients. "Knowingly relying" means you have 491 | actual knowledge that, but for the patent license, your conveying the 492 | covered work in a country, or your recipient's use of the covered work 493 | in a country, would infringe one or more identifiable patents in that 494 | country that you have reason to believe are valid. 495 | 496 | If, pursuant to or in connection with a single transaction or 497 | arrangement, you convey, or propagate by procuring conveyance of, a 498 | covered work, and grant a patent license to some of the parties 499 | receiving the covered work authorizing them to use, propagate, modify 500 | or convey a specific copy of the covered work, then the patent license 501 | you grant is automatically extended to all recipients of the covered 502 | work and works based on it. 503 | 504 | A patent license is "discriminatory" if it does not include within the 505 | scope of its coverage, prohibits the exercise of, or is conditioned on 506 | the non-exercise of one or more of the rights that are specifically 507 | granted under this License. You may not convey a covered work if you 508 | are a party to an arrangement with a third party that is in the 509 | business of distributing software, under which you make payment to the 510 | third party based on the extent of your activity of conveying the 511 | work, and under which the third party grants, to any of the parties 512 | who would receive the covered work from you, a discriminatory patent 513 | license (a) in connection with copies of the covered work conveyed by 514 | you (or copies made from those copies), or (b) primarily for and in 515 | connection with specific products or compilations that contain the 516 | covered work, unless you entered into that arrangement, or that patent 517 | license was granted, prior to 28 March 2007. 518 | 519 | Nothing in this License shall be construed as excluding or limiting 520 | any implied license or other defenses to infringement that may 521 | otherwise be available to you under applicable patent law. 522 | 523 | #### 12. No Surrender of Others' Freedom. 524 | 525 | If conditions are imposed on you (whether by court order, agreement or 526 | otherwise) that contradict the conditions of this License, they do not 527 | excuse you from the conditions of this License. If you cannot convey a 528 | covered work so as to satisfy simultaneously your obligations under 529 | this License and any other pertinent obligations, then as a 530 | consequence you may not convey it at all. For example, if you agree to 531 | terms that obligate you to collect a royalty for further conveying 532 | from those to whom you convey the Program, the only way you could 533 | satisfy both those terms and this License would be to refrain entirely 534 | from conveying the Program. 535 | 536 | #### 13. Remote Network Interaction; Use with the GNU General Public License. 537 | 538 | Notwithstanding any other provision of this License, if you modify the 539 | Program, your modified version must prominently offer all users 540 | interacting with it remotely through a computer network (if your 541 | version supports such interaction) an opportunity to receive the 542 | Corresponding Source of your version by providing access to the 543 | Corresponding Source from a network server at no charge, through some 544 | standard or customary means of facilitating copying of software. This 545 | Corresponding Source shall include the Corresponding Source for any 546 | work covered by version 3 of the GNU General Public License that is 547 | incorporated pursuant to the following paragraph. 548 | 549 | Notwithstanding any other provision of this License, you have 550 | permission to link or combine any covered work with a work licensed 551 | under version 3 of the GNU General Public License into a single 552 | combined work, and to convey the resulting work. The terms of this 553 | License will continue to apply to the part which is the covered work, 554 | but the work with which it is combined will remain governed by version 555 | 3 of the GNU General Public License. 556 | 557 | #### 14. Revised Versions of this License. 558 | 559 | The Free Software Foundation may publish revised and/or new versions 560 | of the GNU Affero General Public License from time to time. Such new 561 | versions will be similar in spirit to the present version, but may 562 | differ in detail to address new problems or concerns. 563 | 564 | Each version is given a distinguishing version number. If the Program 565 | specifies that a certain numbered version of the GNU Affero General 566 | Public License "or any later version" applies to it, you have the 567 | option of following the terms and conditions either of that numbered 568 | version or of any later version published by the Free Software 569 | Foundation. If the Program does not specify a version number of the 570 | GNU Affero General Public License, you may choose any version ever 571 | published by the Free Software Foundation. 572 | 573 | If the Program specifies that a proxy can decide which future versions 574 | of the GNU Affero General Public License can be used, that proxy's 575 | public statement of acceptance of a version permanently authorizes you 576 | to choose that version for the Program. 577 | 578 | Later license versions may give you additional or different 579 | permissions. However, no additional obligations are imposed on any 580 | author or copyright holder as a result of your choosing to follow a 581 | later version. 582 | 583 | #### 15. Disclaimer of Warranty. 584 | 585 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 586 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 587 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT 588 | WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT 589 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 590 | A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND 591 | PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE 592 | DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR 593 | CORRECTION. 594 | 595 | #### 16. Limitation of Liability. 596 | 597 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 598 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR 599 | CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 600 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES 601 | ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT 602 | NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR 603 | LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM 604 | TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER 605 | PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 606 | 607 | #### 17. Interpretation of Sections 15 and 16. 608 | 609 | If the disclaimer of warranty and limitation of liability provided 610 | above cannot be given local legal effect according to their terms, 611 | reviewing courts shall apply local law that most closely approximates 612 | an absolute waiver of all civil liability in connection with the 613 | Program, unless a warranty or assumption of liability accompanies a 614 | copy of the Program in return for a fee. 615 | 616 | END OF TERMS AND CONDITIONS 617 | 618 | ### How to Apply These Terms to Your New Programs 619 | 620 | If you develop a new program, and you want it to be of the greatest 621 | possible use to the public, the best way to achieve this is to make it 622 | free software which everyone can redistribute and change under these 623 | terms. 624 | 625 | To do so, attach the following notices to the program. It is safest to 626 | attach them to the start of each source file to most effectively state 627 | the exclusion of warranty; and each file should have at least the 628 | "copyright" line and a pointer to where the full notice is found. 629 | 630 | 631 | Copyright (C) 632 | 633 | This program is free software: you can redistribute it and/or modify 634 | it under the terms of the GNU Affero General Public License as 635 | published by the Free Software Foundation, either version 3 of the 636 | License, or (at your option) any later version. 637 | 638 | This program is distributed in the hope that it will be useful, 639 | but WITHOUT ANY WARRANTY; without even the implied warranty of 640 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 641 | GNU Affero General Public License for more details. 642 | 643 | You should have received a copy of the GNU Affero General Public License 644 | along with this program. If not, see . 645 | 646 | Also add information on how to contact you by electronic and paper 647 | mail. 648 | 649 | If your software can interact with users remotely through a computer 650 | network, you should also make sure that it provides a way for users to 651 | get its source. For example, if your program is a web application, its 652 | interface could display a "Source" link that leads users to an archive 653 | of the code. There are many ways you could offer source, and different 654 | solutions will be better for different programs; see section 13 for 655 | the specific requirements. 656 | 657 | You should also get your employer (if you work as a programmer) or 658 | school, if any, to sign a "copyright disclaimer" for the program, if 659 | necessary. For more information on this, and how to apply and follow 660 | the GNU AGPL, see . 661 | -------------------------------------------------------------------------------- /backtesting/test/_test.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import os 3 | import sys 4 | import time 5 | import unittest 6 | import warnings 7 | from concurrent.futures.process import ProcessPoolExecutor 8 | from contextlib import contextmanager 9 | from glob import glob 10 | from runpy import run_path 11 | from tempfile import NamedTemporaryFile, gettempdir 12 | from unittest import TestCase 13 | from unittest.mock import patch 14 | 15 | import numpy as np 16 | import pandas as pd 17 | 18 | from backtesting import Backtest, Strategy 19 | from backtesting.lib import ( 20 | OHLCV_AGG, 21 | barssince, 22 | cross, 23 | crossover, 24 | quantile, 25 | SignalStrategy, 26 | TrailingStrategy, 27 | resample_apply, 28 | plot_heatmaps, 29 | random_ohlc_data, 30 | ) 31 | from backtesting.test import GOOG, EURUSD, SMA 32 | from backtesting._util import _Indicator, _as_str, _Array, try_ 33 | 34 | SHORT_DATA = GOOG.iloc[:20] # Short data for fast tests with no indicator lag 35 | 36 | 37 | @contextmanager 38 | def _tempfile(): 39 | with NamedTemporaryFile(suffix='.html') as f: 40 | if sys.platform.startswith('win'): 41 | f.close() 42 | yield f.name 43 | 44 | 45 | @contextmanager 46 | def chdir(path): 47 | cwd = os.getcwd() 48 | os.chdir(path) 49 | try: 50 | yield 51 | finally: 52 | os.chdir(cwd) 53 | 54 | 55 | class SmaCross(Strategy): 56 | # NOTE: These values are also used on the website! 57 | fast = 10 58 | slow = 30 59 | 60 | def init(self): 61 | self.sma1 = self.I(SMA, self.data.Close, self.fast) 62 | self.sma2 = self.I(SMA, self.data.Close, self.slow) 63 | 64 | def next(self): 65 | if crossover(self.sma1, self.sma2): 66 | self.position.close() 67 | self.buy() 68 | elif crossover(self.sma2, self.sma1): 69 | self.position.close() 70 | self.sell() 71 | 72 | 73 | class TestBacktest(TestCase): 74 | def test_run(self): 75 | bt = Backtest(EURUSD, SmaCross) 76 | bt.run() 77 | 78 | def test_run_invalid_param(self): 79 | bt = Backtest(GOOG, SmaCross) 80 | self.assertRaises(AttributeError, bt.run, foo=3) 81 | 82 | def test_run_speed(self): 83 | bt = Backtest(GOOG, SmaCross) 84 | start = time.process_time() 85 | bt.run() 86 | end = time.process_time() 87 | self.assertLess(end - start, .3) 88 | 89 | def test_data_missing_columns(self): 90 | df = GOOG.copy(deep=False) 91 | del df['Open'] 92 | with self.assertRaises(ValueError): 93 | Backtest(df, SmaCross).run() 94 | 95 | def test_data_nan_columns(self): 96 | df = GOOG.copy() 97 | df['Open'] = np.nan 98 | with self.assertRaises(ValueError): 99 | Backtest(df, SmaCross).run() 100 | 101 | def test_data_extra_columns(self): 102 | df = GOOG.copy(deep=False) 103 | df['P/E'] = np.arange(len(df)) 104 | df['MCap'] = np.arange(len(df)) 105 | 106 | class S(Strategy): 107 | def init(self): 108 | assert len(self.data.MCap) == len(self.data.Close) 109 | assert len(self.data['P/E']) == len(self.data.Close) 110 | 111 | def next(self): 112 | assert len(self.data.MCap) == len(self.data.Close) 113 | assert len(self.data['P/E']) == len(self.data.Close) 114 | 115 | Backtest(df, S).run() 116 | 117 | def test_data_invalid(self): 118 | with self.assertRaises(TypeError): 119 | Backtest(GOOG.index, SmaCross).run() 120 | with self.assertRaises(ValueError): 121 | Backtest(GOOG.iloc[:0], SmaCross).run() 122 | 123 | def test_assertions(self): 124 | class Assertive(Strategy): 125 | def init(self): 126 | self.sma = self.I(SMA, self.data.Close, 10) 127 | self.remains_indicator = np.r_[2] * np.cumsum(self.sma * 5 + 1) * np.r_[2] 128 | 129 | self.transpose_invalid = self.I(lambda: np.column_stack((self.data.Open, 130 | self.data.Close))) 131 | 132 | resampled = resample_apply('W', SMA, self.data.Close, 3) 133 | resampled_ind = resample_apply('W', SMA, self.sma, 3) 134 | assert np.unique(resampled[-5:]).size == 1 135 | assert np.unique(resampled[-6:]).size == 2 136 | assert resampled in self._indicators, "Strategy.I not called" 137 | assert resampled_ind in self._indicators, "Strategy.I not called" 138 | 139 | assert 1 == try_(lambda: self.data.X, 1, AttributeError) 140 | assert 1 == try_(lambda: self.data['X'], 1, KeyError) 141 | 142 | assert self.data.pip == .01 143 | 144 | assert float(self.data.Close) == self.data.Close[-1] 145 | 146 | def next(self, FIVE_DAYS=pd.Timedelta('3 days')): 147 | assert self.equity >= 0 148 | 149 | assert isinstance(self.sma, _Indicator) 150 | assert isinstance(self.remains_indicator, _Indicator) 151 | assert self.remains_indicator.name 152 | assert isinstance(self.remains_indicator._opts, dict) 153 | 154 | assert not np.isnan(self.data.Open[-1]) 155 | assert not np.isnan(self.data.High[-1]) 156 | assert not np.isnan(self.data.Low[-1]) 157 | assert not np.isnan(self.data.Close[-1]) 158 | assert not np.isnan(self.data.Volume[-1]) 159 | assert not np.isnan(self.sma[-1]) 160 | assert self.data.index[-1] 161 | 162 | self.position 163 | self.position.size 164 | self.position.pl 165 | self.position.pl_pct 166 | self.position.is_long 167 | 168 | if crossover(self.sma, self.data.Close): 169 | self.orders.cancel() # cancels only non-contingent 170 | price = self.data.Close[-1] 171 | sl, tp = 1.05 * price, .9 * price 172 | 173 | n_orders = len(self.orders) 174 | self.sell(size=.21, limit=price, stop=price, sl=sl, tp=tp) 175 | assert len(self.orders) == n_orders + 1 176 | 177 | order = self.orders[-1] 178 | assert order.limit == price 179 | assert order.stop == price 180 | assert order.size == -.21 181 | assert order.sl == sl 182 | assert order.tp == tp 183 | assert not order.is_contingent 184 | 185 | elif self.position: 186 | assert not self.position.is_long 187 | assert self.position.is_short 188 | assert self.position.pl 189 | assert self.position.pl_pct 190 | assert self.position.size < 0 191 | 192 | trade = self.trades[0] 193 | if self.data.index[-1] - self.data.index[trade.entry_bar] > FIVE_DAYS: 194 | assert not trade.is_long 195 | assert trade.is_short 196 | assert trade.size < 0 197 | assert trade.entry_bar > 0 198 | assert isinstance(trade.entry_time, pd.Timestamp) 199 | assert trade.exit_bar is None 200 | assert trade.exit_time is None 201 | assert trade.entry_price > 0 202 | assert trade.exit_price is None 203 | assert trade.pl / 1 204 | assert trade.pl_pct / 1 205 | assert trade.value > 0 206 | assert trade.sl 207 | assert trade.tp 208 | # Close multiple times 209 | self.position.close(.5) 210 | self.position.close(.5) 211 | self.position.close(.5) 212 | self.position.close() 213 | self.position.close() 214 | 215 | bt = Backtest(GOOG, Assertive) 216 | with self.assertWarns(UserWarning): 217 | stats = bt.run() 218 | self.assertEqual(stats['# Trades'], 145) 219 | 220 | def test_broker_params(self): 221 | bt = Backtest(GOOG.iloc[:100], SmaCross, 222 | cash=1000, commission=.01, margin=.1, trade_on_close=True) 223 | bt.run() 224 | 225 | def test_dont_overwrite_data(self): 226 | df = EURUSD.copy() 227 | bt = Backtest(df, SmaCross) 228 | bt.run() 229 | bt.optimize(fast=4, slow=[6, 8]) 230 | bt.plot(plot_drawdown=True, open_browser=False) 231 | self.assertTrue(df.equals(EURUSD)) 232 | 233 | def test_strategy_abstract(self): 234 | class MyStrategy(Strategy): 235 | pass 236 | 237 | self.assertRaises(TypeError, MyStrategy, None, None) 238 | 239 | def test_strategy_str(self): 240 | bt = Backtest(GOOG.iloc[:100], SmaCross) 241 | self.assertEqual(str(bt.run()._strategy), SmaCross.__name__) 242 | self.assertEqual(str(bt.run(fast=11)._strategy), SmaCross.__name__ + '(fast=11)') 243 | 244 | def test_compute_drawdown(self): 245 | dd = pd.Series([0, 1, 7, 0, 4, 0, 0]) 246 | durations, peaks = Backtest._compute_drawdown_duration_peaks(dd) 247 | np.testing.assert_array_equal(durations, pd.Series([3, 2], index=[3, 5]).reindex(dd.index)) 248 | np.testing.assert_array_equal(peaks, pd.Series([7, 4], index=[3, 5]).reindex(dd.index)) 249 | 250 | def test_compute_stats(self): 251 | stats = Backtest(GOOG, SmaCross).run() 252 | expected = pd.Series({ 253 | # NOTE: These values are also used on the website! 254 | '# Trades': 66, 255 | 'Avg. Drawdown Duration': pd.Timedelta('41 days 00:00:00'), 256 | 'Avg. Drawdown [%]': -5.925851581948801, 257 | 'Avg. Trade Duration': pd.Timedelta('46 days 00:00:00'), 258 | 'Avg. Trade [%]': 2.531715975158555, 259 | 'Best Trade [%]': 53.59595229490424, 260 | 'Buy & Hold Return [%]': 703.4582419772772, 261 | 'Calmar Ratio': 0.4414380935608377, 262 | 'Duration': pd.Timedelta('3116 days 00:00:00'), 263 | 'End': pd.Timestamp('2013-03-01 00:00:00'), 264 | 'Equity Final [$]': 51422.98999999996, 265 | 'Equity Peak [$]': 75787.44, 266 | 'Expectancy [%]': 3.2748078066748834, 267 | 'Exposure Time [%]': 96.74115456238361, 268 | 'Max. Drawdown Duration': pd.Timedelta('584 days 00:00:00'), 269 | 'Max. Drawdown [%]': -47.98012705007589, 270 | 'Max. Trade Duration': pd.Timedelta('183 days 00:00:00'), 271 | 'Profit Factor': 2.167945974262033, 272 | 'Return (Ann.) [%]': 21.180255813792282, 273 | 'Return [%]': 414.2298999999996, 274 | 'Volatility (Ann.) [%]': 36.49390889140787, 275 | 'SQN': 1.0766187356697705, 276 | 'Sharpe Ratio': 0.5803778344714113, 277 | 'Sortino Ratio': 1.0847880675854096, 278 | 'Start': pd.Timestamp('2004-08-19 00:00:00'), 279 | 'Win Rate [%]': 46.96969696969697, 280 | 'Worst Trade [%]': -18.39887353835481, 281 | }) 282 | 283 | def almost_equal(a, b): 284 | try: 285 | return np.isclose(a, b, rtol=1.e-8) 286 | except TypeError: 287 | return a == b 288 | 289 | diff = {key: print(key) or value 290 | for key, value in stats.filter(regex='^[^_]').items() 291 | if not almost_equal(value, expected[key])} 292 | self.assertDictEqual(diff, {}) 293 | 294 | self.assertSequenceEqual( 295 | sorted(stats['_equity_curve'].columns), 296 | sorted(['Equity', 'DrawdownPct', 'DrawdownDuration'])) 297 | 298 | self.assertEqual(len(stats['_trades']), 66) 299 | 300 | self.assertSequenceEqual( 301 | sorted(stats['_trades'].columns), 302 | sorted(['Size', 'EntryBar', 'ExitBar', 'EntryPrice', 'ExitPrice', 303 | 'PnL', 'ReturnPct', 'EntryTime', 'ExitTime', 'Duration'])) 304 | 305 | def test_compute_stats_bordercase(self): 306 | 307 | class SingleTrade(Strategy): 308 | def init(self): 309 | self._done = False 310 | 311 | def next(self): 312 | if not self._done: 313 | self.buy() 314 | self._done = True 315 | if self.position: 316 | self.position.close() 317 | 318 | class SinglePosition(Strategy): 319 | def init(self): 320 | pass 321 | 322 | def next(self): 323 | if not self.position: 324 | self.buy() 325 | 326 | class NoTrade(Strategy): 327 | def init(self): 328 | pass 329 | 330 | def next(self): 331 | pass 332 | 333 | for strategy in (SmaCross, 334 | SingleTrade, 335 | SinglePosition, 336 | NoTrade): 337 | with self.subTest(strategy=strategy.__name__): 338 | stats = Backtest(GOOG.iloc[:100], strategy).run() 339 | 340 | self.assertFalse(np.isnan(stats['Equity Final [$]'])) 341 | self.assertFalse(stats['_equity_curve']['Equity'].isnull().any()) 342 | self.assertEqual(stats['_strategy'].__class__, strategy) 343 | 344 | def test_trade_enter_hit_sl_on_same_day(self): 345 | the_day = pd.Timestamp("2012-10-17 00:00:00") 346 | 347 | class S(Strategy): 348 | def init(self): pass 349 | 350 | def next(self): 351 | if self.data.index[-1] == the_day: 352 | self.buy(sl=720) 353 | 354 | self.assertEqual(Backtest(GOOG, S).run()._trades.iloc[0].ExitPrice, 720) 355 | 356 | class S(S): 357 | def next(self): 358 | if self.data.index[-1] == the_day: 359 | self.buy(stop=758, sl=720) 360 | 361 | with self.assertWarns(UserWarning): 362 | self.assertEqual(Backtest(GOOG, S).run()._trades.iloc[0].ExitPrice, 705.58) 363 | 364 | def test_stop_price_between_sl_tp(self): 365 | class S(Strategy): 366 | def init(self): pass 367 | 368 | def next(self): 369 | if self.data.index[-1] == pd.Timestamp("2004-09-09 00:00:00"): 370 | self.buy(stop=104, sl=103, tp=110) 371 | 372 | with self.assertWarns(UserWarning): 373 | self.assertEqual(Backtest(GOOG, S).run()._trades.iloc[0].EntryPrice, 104) 374 | 375 | def test_position_close_portion(self): 376 | class SmaCross(Strategy): 377 | def init(self): 378 | self.sma1 = self.I(SMA, self.data.Close, 10) 379 | self.sma2 = self.I(SMA, self.data.Close, 20) 380 | 381 | def next(self): 382 | if not self.position and crossover(self.sma1, self.sma2): 383 | self.buy(size=10) 384 | if self.position and crossover(self.sma2, self.sma1): 385 | self.position.close(portion=.5) 386 | 387 | bt = Backtest(GOOG, SmaCross, commission=.002) 388 | bt.run() 389 | 390 | def test_close_orders_from_last_strategy_iteration(self): 391 | class S(Strategy): 392 | def init(self): pass 393 | 394 | def next(self): 395 | if not self.position: 396 | self.buy() 397 | elif len(self.data) == len(SHORT_DATA): 398 | self.position.close() 399 | 400 | self.assertFalse(Backtest(SHORT_DATA, S).run()._trades.empty) 401 | 402 | def test_check_adjusted_price_when_placing_order(self): 403 | class S(Strategy): 404 | def init(self): pass 405 | 406 | def next(self): 407 | self.buy(tp=self.data.Close * 1.01) 408 | 409 | self.assertRaises(ValueError, Backtest(SHORT_DATA, S, commission=.02).run) 410 | 411 | 412 | class TestStrategy(TestCase): 413 | def _Backtest(self, strategy_coroutine, **kwargs): 414 | class S(Strategy): 415 | def init(self): 416 | self.step = strategy_coroutine(self) 417 | 418 | def next(self): 419 | try_(self.step.__next__, None, StopIteration) 420 | 421 | return Backtest(SHORT_DATA, S, **kwargs) 422 | 423 | def test_position(self): 424 | def coroutine(self): 425 | yield self.buy() 426 | 427 | assert self.position 428 | assert self.position.is_long 429 | assert not self.position.is_short 430 | assert self.position.size > 0 431 | assert self.position.pl 432 | assert self.position.pl_pct 433 | 434 | yield self.position.close() 435 | 436 | assert not self.position 437 | assert not self.position.is_long 438 | assert not self.position.is_short 439 | assert not self.position.size 440 | assert not self.position.pl 441 | assert not self.position.pl_pct 442 | 443 | self._Backtest(coroutine).run() 444 | 445 | def test_broker_hedging(self): 446 | def coroutine(self): 447 | yield self.buy(size=2) 448 | 449 | assert len(self.trades) == 1 450 | yield self.sell(size=1) 451 | 452 | assert len(self.trades) == 2 453 | 454 | self._Backtest(coroutine, hedging=True).run() 455 | 456 | def test_broker_exclusive_orders(self): 457 | def coroutine(self): 458 | yield self.buy(size=2) 459 | 460 | assert len(self.trades) == 1 461 | yield self.sell(size=3) 462 | 463 | assert len(self.trades) == 1 464 | assert self.trades[0].size == -3 465 | 466 | self._Backtest(coroutine, exclusive_orders=True).run() 467 | 468 | def test_trade_multiple_close(self): 469 | def coroutine(self): 470 | yield self.buy() 471 | 472 | assert self.trades 473 | self.trades[-1].close(1) 474 | self.trades[-1].close(.1) 475 | yield 476 | 477 | self._Backtest(coroutine).run() 478 | 479 | def test_close_trade_leaves_needsize_0(self): 480 | def coroutine(self): 481 | self.buy(size=1) 482 | self.buy(size=1) 483 | yield 484 | if self.position: 485 | self.sell(size=1) 486 | 487 | self._Backtest(coroutine).run() 488 | 489 | def test_stop_limit_order_price_is_stop_price(self): 490 | def coroutine(self): 491 | self.buy(stop=112, limit=113, size=1) 492 | self.sell(stop=107, limit=105, size=1) 493 | yield 494 | 495 | stats = self._Backtest(coroutine).run() 496 | self.assertListEqual(stats._trades.filter(like='Price').stack().tolist(), [112, 107]) 497 | 498 | def test_autoclose_trades_on_finish(self): 499 | def coroutine(self): 500 | yield self.buy() 501 | 502 | stats = self._Backtest(coroutine).run() 503 | self.assertEqual(len(stats._trades), 1) 504 | 505 | 506 | class TestOptimize(TestCase): 507 | def test_optimize(self): 508 | bt = Backtest(GOOG.iloc[:100], SmaCross) 509 | OPT_PARAMS = dict(fast=range(2, 5, 2), slow=[2, 5, 7, 9]) 510 | 511 | self.assertRaises(ValueError, bt.optimize) 512 | self.assertRaises(ValueError, bt.optimize, maximize='missing key', **OPT_PARAMS) 513 | self.assertRaises(ValueError, bt.optimize, maximize='missing key', **OPT_PARAMS) 514 | self.assertRaises(TypeError, bt.optimize, maximize=15, **OPT_PARAMS) 515 | self.assertRaises(TypeError, bt.optimize, constraint=15, **OPT_PARAMS) 516 | self.assertRaises(ValueError, bt.optimize, constraint=lambda d: False, **OPT_PARAMS) 517 | self.assertRaises(ValueError, bt.optimize, return_optimization=True, **OPT_PARAMS) 518 | 519 | res = bt.optimize(**OPT_PARAMS) 520 | self.assertIsInstance(res, pd.Series) 521 | 522 | default_maximize = inspect.signature(Backtest.optimize).parameters['maximize'].default 523 | res2 = bt.optimize(**OPT_PARAMS, maximize=lambda s: s[default_maximize]) 524 | self.assertDictEqual(res.filter(regex='^[^_]').fillna(-1).to_dict(), 525 | res2.filter(regex='^[^_]').fillna(-1).to_dict()) 526 | 527 | res3, heatmap = bt.optimize(**OPT_PARAMS, return_heatmap=True, 528 | constraint=lambda d: d.slow > 2 * d.fast) 529 | self.assertIsInstance(heatmap, pd.Series) 530 | self.assertEqual(len(heatmap), 4) 531 | self.assertEqual(heatmap.name, default_maximize) 532 | 533 | with _tempfile() as f: 534 | bt.plot(filename=f, open_browser=False) 535 | 536 | def test_method_skopt(self): 537 | bt = Backtest(GOOG.iloc[:100], SmaCross) 538 | res, heatmap, skopt_results = bt.optimize( 539 | fast=range(2, 20), slow=np.arange(2, 20, dtype=object), 540 | constraint=lambda p: p.fast < p.slow, 541 | max_tries=30, 542 | method='skopt', 543 | return_optimization=True, 544 | return_heatmap=True, 545 | random_state=2) 546 | self.assertIsInstance(res, pd.Series) 547 | self.assertIsInstance(heatmap, pd.Series) 548 | self.assertGreater(heatmap.max(), 1.1) 549 | self.assertGreater(heatmap.min(), -2) 550 | self.assertEqual(-skopt_results.fun, heatmap.max()) 551 | self.assertEqual(heatmap.index.tolist(), heatmap.dropna().index.unique().tolist()) 552 | 553 | def test_max_tries(self): 554 | bt = Backtest(GOOG.iloc[:100], SmaCross) 555 | OPT_PARAMS = dict(fast=range(2, 10, 2), slow=[2, 5, 7, 9]) 556 | for method, max_tries, random_state in (('grid', 5, 2), 557 | ('grid', .3, 2), 558 | ('skopt', 7, 0), 559 | ('skopt', .45, 0)): 560 | with self.subTest(method=method, 561 | max_tries=max_tries, 562 | random_state=random_state): 563 | _, heatmap = bt.optimize(max_tries=max_tries, 564 | method=method, 565 | random_state=random_state, 566 | return_heatmap=True, 567 | **OPT_PARAMS) 568 | self.assertEqual(len(heatmap), 6) 569 | 570 | def test_nowrite_df(self): 571 | # Test we don't write into passed data df by default. 572 | # Important for copy-on-write in Backtest.optimize() 573 | df = EURUSD.astype(float) 574 | values = df.values.ctypes.data 575 | assert values == df.values.ctypes.data 576 | 577 | class S(SmaCross): 578 | def init(self): 579 | super().init() 580 | assert values == self.data.df.values.ctypes.data 581 | 582 | bt = Backtest(df, S) 583 | _ = bt.run() 584 | assert values == bt._data.values.ctypes.data 585 | 586 | def test_multiprocessing_windows_spawn(self): 587 | df = GOOG.iloc[:100] 588 | kw = dict(fast=[10]) 589 | 590 | stats1 = Backtest(df, SmaCross).optimize(**kw) 591 | with patch('multiprocessing.get_start_method', lambda **_: 'spawn'): 592 | with self.assertWarns(UserWarning) as cm: 593 | stats2 = Backtest(df, SmaCross).optimize(**kw) 594 | 595 | self.assertIn('multiprocessing support', cm.warning.args[0]) 596 | assert stats1.filter('[^_]').equals(stats2.filter('[^_]')), (stats1, stats2) 597 | 598 | def test_optimize_invalid_param(self): 599 | bt = Backtest(GOOG.iloc[:100], SmaCross) 600 | self.assertRaises(AttributeError, bt.optimize, foo=range(3)) 601 | self.assertRaises(ValueError, bt.optimize, fast=[]) 602 | 603 | def test_optimize_no_trades(self): 604 | bt = Backtest(GOOG, SmaCross) 605 | stats = bt.optimize(fast=[3], slow=[3]) 606 | self.assertTrue(stats.isnull().any()) 607 | 608 | def test_optimize_speed(self): 609 | bt = Backtest(GOOG.iloc[:100], SmaCross) 610 | start = time.process_time() 611 | bt.optimize(fast=(2, 5, 7), slow=[10, 15, 20, 30]) 612 | end = time.process_time() 613 | self.assertLess(end - start, .2) 614 | 615 | 616 | class TestPlot(TestCase): 617 | def test_plot_before_run(self): 618 | bt = Backtest(GOOG, SmaCross) 619 | self.assertRaises(RuntimeError, bt.plot) 620 | 621 | def test_file_size(self): 622 | bt = Backtest(GOOG, SmaCross) 623 | bt.run() 624 | with _tempfile() as f: 625 | bt.plot(filename=f[:-len('.html')], open_browser=False) 626 | self.assertLess(os.path.getsize(f), 500000) 627 | 628 | def test_params(self): 629 | bt = Backtest(GOOG.iloc[:100], SmaCross) 630 | bt.run() 631 | with _tempfile() as f: 632 | for p in dict(plot_volume=False, 633 | plot_equity=False, 634 | plot_return=True, 635 | plot_pl=False, 636 | plot_drawdown=True, 637 | superimpose=False, 638 | resample='1W', 639 | smooth_equity=False, 640 | relative_equity=False, 641 | reverse_indicators=True, 642 | show_legend=False).items(): 643 | with self.subTest(param=p[0]): 644 | bt.plot(**dict([p]), filename=f, open_browser=False) 645 | 646 | def test_hide_legend(self): 647 | bt = Backtest(GOOG.iloc[:100], SmaCross) 648 | bt.run() 649 | with _tempfile() as f: 650 | bt.plot(filename=f, show_legend=False) 651 | # Give browser time to open before tempfile is removed 652 | time.sleep(5) 653 | 654 | def test_resolutions(self): 655 | with _tempfile() as f: 656 | for rule in 'LSTHDWM': 657 | with self.subTest(rule=rule): 658 | df = EURUSD.iloc[:2].resample(rule).agg(OHLCV_AGG).dropna().iloc[:1100] 659 | bt = Backtest(df, SmaCross) 660 | bt.run() 661 | bt.plot(filename=f, open_browser=False) 662 | 663 | def test_range_axis(self): 664 | df = GOOG.iloc[:100].reset_index(drop=True) 665 | 666 | # Warm-up. CPython bug bpo-29620. 667 | try: 668 | with self.assertWarns(UserWarning): 669 | Backtest(df, SmaCross) 670 | except RuntimeError: 671 | pass 672 | 673 | with self.assertWarns(UserWarning): 674 | bt = Backtest(df, SmaCross) 675 | bt.run() 676 | with _tempfile() as f: 677 | bt.plot(filename=f, open_browser=False) 678 | 679 | def test_preview(self): 680 | class Strategy(SmaCross): 681 | def init(self): 682 | super().init() 683 | 684 | def ok(x): 685 | return x 686 | 687 | self.a = self.I(SMA, self.data.Open, 5, overlay=False, name='ok') 688 | self.b = self.I(ok, np.random.random(len(self.data.Open))) 689 | 690 | bt = Backtest(GOOG, Strategy) 691 | bt.run() 692 | with _tempfile() as f: 693 | bt.plot(filename=f, plot_drawdown=True, smooth_equity=True) 694 | # Give browser time to open before tempfile is removed 695 | time.sleep(5) 696 | 697 | def test_wellknown(self): 698 | class S(Strategy): 699 | def init(self): 700 | pass 701 | 702 | def next(self): 703 | date = self.data.index[-1] 704 | if date == pd.Timestamp('Thu 19 Oct 2006'): 705 | self.buy(stop=484, limit=466, size=100) 706 | elif date == pd.Timestamp('Thu 30 Oct 2007'): 707 | self.position.close() 708 | elif date == pd.Timestamp('Tue 11 Nov 2008'): 709 | self.sell(stop=self.data.Low, 710 | limit=324.90, # High from 14 Nov 711 | size=200) 712 | 713 | bt = Backtest(GOOG, S, margin=.1) 714 | stats = bt.run() 715 | trades = stats['_trades'] 716 | 717 | self.assertAlmostEqual(stats['Equity Peak [$]'], 46961) 718 | self.assertEqual(stats['Equity Final [$]'], 0) 719 | self.assertEqual(len(trades), 2) 720 | assert trades[['EntryTime', 'ExitTime']].equals( 721 | pd.DataFrame(dict(EntryTime=pd.to_datetime(['2006-11-01', '2008-11-14']), 722 | ExitTime=pd.to_datetime(['2007-10-31', '2009-09-21'])))) 723 | assert trades['PnL'].round().equals(pd.Series([23469., -34420.])) 724 | 725 | with _tempfile() as f: 726 | bt.plot(filename=f, plot_drawdown=True, smooth_equity=False) 727 | # Give browser time to open before tempfile is removed 728 | time.sleep(1) 729 | 730 | def test_resample(self): 731 | bt = Backtest(GOOG, SmaCross) 732 | bt.run() 733 | import backtesting._plotting 734 | with _tempfile() as f,\ 735 | patch.object(backtesting._plotting, '_MAX_CANDLES', 10),\ 736 | self.assertWarns(UserWarning): 737 | bt.plot(filename=f, resample=True) 738 | # Give browser time to open before tempfile is removed 739 | time.sleep(1) 740 | 741 | def test_indicator_color(self): 742 | class S(Strategy): 743 | def init(self): 744 | a = self.I(SMA, self.data.Close, 5, overlay=True, color='red') 745 | b = self.I(SMA, self.data.Close, 10, overlay=False, color='blue') 746 | self.I(lambda: (a, b), overlay=False, color=('green', 'orange')) 747 | 748 | def next(self): 749 | pass 750 | 751 | bt = Backtest(GOOG, S) 752 | bt.run() 753 | with _tempfile() as f: 754 | bt.plot(filename=f, 755 | plot_drawdown=False, plot_equity=False, plot_pl=False, plot_volume=False, 756 | open_browser=False) 757 | 758 | def test_indicator_scatter(self): 759 | class S(Strategy): 760 | def init(self): 761 | self.I(SMA, self.data.Close, 5, overlay=True, scatter=True) 762 | self.I(SMA, self.data.Close, 10, overlay=False, scatter=True) 763 | 764 | def next(self): 765 | pass 766 | 767 | bt = Backtest(GOOG, S) 768 | bt.run() 769 | with _tempfile() as f: 770 | bt.plot(filename=f, 771 | plot_drawdown=False, plot_equity=False, plot_pl=False, plot_volume=False, 772 | open_browser=False) 773 | 774 | 775 | class TestLib(TestCase): 776 | def test_barssince(self): 777 | self.assertEqual(barssince(np.r_[1, 0, 0]), 2) 778 | self.assertEqual(barssince(np.r_[0, 0, 0]), np.inf) 779 | self.assertEqual(barssince(np.r_[0, 0, 0], 0), 0) 780 | 781 | def test_cross(self): 782 | self.assertTrue(cross([0, 1], [1, 0])) 783 | self.assertTrue(cross([1, 0], [0, 1])) 784 | self.assertFalse(cross([1, 0], [1, 0])) 785 | 786 | def test_crossover(self): 787 | self.assertTrue(crossover([0, 1], [1, 0])) 788 | self.assertTrue(crossover([0, 1], .5)) 789 | self.assertTrue(crossover([0, 1], pd.Series([.5, .5], index=[5, 6]))) 790 | self.assertFalse(crossover([1, 0], [1, 0])) 791 | self.assertFalse(crossover([0], [1])) 792 | 793 | def test_quantile(self): 794 | self.assertEqual(quantile(np.r_[1, 3, 2], .5), 2) 795 | self.assertEqual(quantile(np.r_[1, 3, 2]), .5) 796 | 797 | def test_resample_apply(self): 798 | res = resample_apply('D', SMA, EURUSD.Close, 10) 799 | self.assertEqual(res.name, 'C[D]') 800 | self.assertEqual(res.count() / res.size, .9634) 801 | np.testing.assert_almost_equal(res.iloc[-48:].unique().tolist(), 802 | [1.242643, 1.242381, 1.242275], 803 | decimal=6) 804 | 805 | def resets_index(*args): 806 | return pd.Series(SMA(*args).values) 807 | 808 | res2 = resample_apply('D', resets_index, EURUSD.Close, 10) 809 | self.assertTrue((res.dropna() == res2.dropna()).all()) 810 | self.assertTrue((res.index == res2.index).all()) 811 | 812 | res3 = resample_apply('D', None, EURUSD) 813 | self.assertIn('Volume', res3) 814 | 815 | res3 = resample_apply('D', lambda df: (df.Close, df.Close), EURUSD) 816 | self.assertIsInstance(res3, pd.DataFrame) 817 | 818 | def test_plot_heatmaps(self): 819 | bt = Backtest(GOOG, SmaCross) 820 | stats, heatmap = bt.optimize(fast=range(2, 7, 2), 821 | slow=range(7, 15, 2), 822 | return_heatmap=True) 823 | with _tempfile() as f: 824 | for agg in ('mean', 825 | lambda x: np.percentile(x, 75)): 826 | plot_heatmaps(heatmap, agg, filename=f, open_browser=False) 827 | 828 | # Preview 829 | plot_heatmaps(heatmap, filename=f) 830 | time.sleep(5) 831 | 832 | def test_random_ohlc_data(self): 833 | generator = random_ohlc_data(GOOG, frac=1) 834 | new_data = next(generator) 835 | self.assertEqual(list(new_data.index), list(GOOG.index)) 836 | self.assertEqual(new_data.shape, GOOG.shape) 837 | self.assertEqual(list(new_data.columns), list(GOOG.columns)) 838 | 839 | def test_SignalStrategy(self): 840 | class S(SignalStrategy): 841 | def init(self): 842 | sma = self.data.Close.s.rolling(10).mean() 843 | self.set_signal(self.data.Close > sma, 844 | self.data.Close < sma) 845 | 846 | stats = Backtest(GOOG, S).run() 847 | self.assertIn(stats['# Trades'], (1181, 1182)) # varies on different archs? 848 | 849 | def test_TrailingStrategy(self): 850 | class S(TrailingStrategy): 851 | def init(self): 852 | super().init() 853 | self.set_atr_periods(40) 854 | self.set_trailing_sl(3) 855 | self.sma = self.I(lambda: self.data.Close.s.rolling(10).mean()) 856 | 857 | def next(self): 858 | super().next() 859 | if not self.position and self.data.Close > self.sma: 860 | self.buy() 861 | 862 | stats = Backtest(GOOG, S).run() 863 | self.assertEqual(stats['# Trades'], 57) 864 | 865 | 866 | class TestUtil(TestCase): 867 | def test_as_str(self): 868 | def func(): 869 | pass 870 | 871 | class Class: 872 | def __call__(self): 873 | pass 874 | 875 | self.assertEqual(_as_str('4'), '4') 876 | self.assertEqual(_as_str(4), '4') 877 | self.assertEqual(_as_str(_Indicator([1, 2], name='x')), 'x') 878 | self.assertEqual(_as_str(func), 'func') 879 | self.assertEqual(_as_str(Class), 'Class') 880 | self.assertEqual(_as_str(Class()), 'Class') 881 | self.assertEqual(_as_str(pd.Series([1, 2], name='x')), 'x') 882 | self.assertEqual(_as_str(pd.DataFrame()), 'df') 883 | self.assertEqual(_as_str(lambda x: x), 'λ') 884 | for s in ('Open', 'High', 'Low', 'Close', 'Volume'): 885 | self.assertEqual(_as_str(_Array([1], name=s)), s[0]) 886 | 887 | def test_pandas_accessors(self): 888 | class S(Strategy): 889 | def init(self): 890 | close, index = self.data.Close, self.data.index 891 | assert close.s.equals(pd.Series(close, index=index)) 892 | assert self.data.df['Close'].equals(pd.Series(close, index=index)) 893 | self.data.df['new_key'] = 2 * close 894 | 895 | def next(self): 896 | close, index = self.data.Close, self.data.index 897 | assert close.s.equals(pd.Series(close, index=index)) 898 | assert self.data.df['Close'].equals(pd.Series(close, index=index)) 899 | assert self.data.df['new_key'].equals(pd.Series(self.data.new_key, index=index)) 900 | 901 | Backtest(GOOG.iloc[:20], S).run() 902 | 903 | def test_indicators_picklable(self): 904 | bt = Backtest(SHORT_DATA, SmaCross) 905 | with ProcessPoolExecutor() as executor: 906 | stats = executor.submit(Backtest.run, bt).result() 907 | assert stats._strategy._indicators[0]._opts, '._opts and .name were not unpickled' 908 | bt.plot(results=stats, resample='2d', open_browser=False) 909 | 910 | 911 | class TestDocs(TestCase): 912 | DOCS_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'doc') 913 | 914 | @unittest.skipUnless(os.path.isdir(DOCS_DIR), "docs dir doesn't exist") 915 | def test_examples(self): 916 | examples = glob(os.path.join(self.DOCS_DIR, 'examples', '*.py')) 917 | self.assertGreaterEqual(len(examples), 4) 918 | with chdir(gettempdir()): 919 | for file in examples: 920 | with self.subTest(example=os.path.basename(file)): 921 | run_path(file) 922 | 923 | def test_backtest_run_docstring_contains_stats_keys(self): 924 | stats = Backtest(SHORT_DATA, SmaCross).run() 925 | for key in stats.index: 926 | self.assertIn(key, Backtest.run.__doc__) 927 | 928 | def test_readme_contains_stats_keys(self): 929 | with open(os.path.join(os.path.dirname(__file__), 930 | '..', '..', 'README.md')) as f: 931 | readme = f.read() 932 | stats = Backtest(SHORT_DATA, SmaCross).run() 933 | for key in stats.index: 934 | self.assertIn(key, readme) 935 | 936 | 937 | if __name__ == '__main__': 938 | warnings.filterwarnings('error') 939 | unittest.main() 940 | --------------------------------------------------------------------------------