├── .github ├── dependabot.yml └── workflows │ ├── codeql-analysis.yml │ └── python-publish.yml ├── .gitignore ├── CHANGELOG.rst ├── CONTRIBUTORS.txt ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── docs ├── report.jpg ├── snapshot.jpg └── tearsheet.html ├── example.py ├── example_with_benchmark.py ├── meta.yaml ├── quantstats_lumi ├── __init__.py ├── _plotting │ ├── __init__.py │ ├── core.py │ └── wrappers.py ├── plots.py ├── report.html ├── reports.py ├── stats.py ├── utils.py └── version.py ├── report.html ├── report_with_benchmark.html ├── requirements.txt ├── setup.cfg └── setup.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | open-pull-requests-limit: 10 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | interval: daily 12 | open-pull-requests-limit: 10 13 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '45 20 * * 5' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v4 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v3 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v3 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v3 72 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package 5 | 6 | on: 7 | release: 8 | types: [created] 9 | 10 | jobs: 11 | deploy: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Set up Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: '3.x' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install setuptools wheel twine 25 | - name: Build and publish 26 | env: 27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 29 | run: | 30 | python setup.py sdist bdist_wheel 31 | twine upload dist/* 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .idea 3 | __pycache__/* 4 | quantstats/__pycache__/* 5 | dist 6 | /build 7 | /docs/build/doctrees/* 8 | /docs/build/html/* 9 | /docs/build/epub/* 10 | !/docs/build/epub/*.epub 11 | quantstats.egg-info 12 | QuantStats.egg-info 13 | Icon 14 | /tests 15 | .vscode 16 | Icon 17 | QuantStats_Lumi.egg-info/* 18 | test_tearsheet.html 19 | test_tearsheet_parameters.html 20 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | Change Log 2 | =========== 3 | 4 | 0.0.62 5 | ------ 6 | - Changed `serenity_index` and `recovery_factor` to use simple sum instead of compounded sum 7 | - Reports passing the `compounded` param to all supporting methods 8 | - Fixed a bug related to monthly_heatmap display 9 | 10 | 0.0.61 11 | ------ 12 | - Fixed positional arguments passed to cagr() 13 | 14 | 0.0.60 15 | ------ 16 | - Multi-strategy reports! You can now pass a dataframe with a column for each strategy to get a unified, single report for all 17 | - Support request proxy with yfinance 18 | - Added custom periods to CAGR 19 | - Correct drawdown days calculation when last day is a drawdown 20 | - Write report in correct file path 21 | - IPython 7+ compatibility 22 | - Pandas 2.0 compatibility 23 | - Fix for benchmark name when supplied by the user 24 | - Handles tz-native and tz-aware comparisson issue 25 | - Adding benchmark name to html report 26 | - Update README ticker to META :) 27 | - Many pull requests merged 28 | 29 | 30 | 0.0.59 31 | ------ 32 | - Fixed EOY compounded return calculation 33 | 34 | 0.0.58 35 | ------ 36 | - Run fillna(0) on plot's beta (issue #193) 37 | 38 | 0.0.57 39 | ------ 40 | - Fixed `sigma` calculation in `stats.probabilistic_ratio()` 41 | 42 | 0.0.56 43 | ------ 44 | - Added option to explicitly provide the benchmark title via `benchmark_title=...` 45 | 46 | 0.0.55 47 | ------ 48 | - Fix for benchmark name in html report when supplied by the user 49 | 50 | 0.0.54 51 | ------ 52 | - Fixed dependency name in requirements.txt 53 | 54 | 55 | 0.0.53 56 | ------ 57 | - Added information ratio to reports 58 | 59 | 0.0.52 60 | ------ 61 | - Added Treynor ratio 62 | 63 | 0.0.51 64 | ------ 65 | - Added max consecutive wins/losses to full report 66 | - Added “correlation to benchmark” to report 67 | - Cleanup inf/nan from reports 68 | - Added benchmark name to stats column and html report 69 | - Added probabilistic sharpe/sortino ratios 70 | - Fix relative dates calculations 71 | 72 | 0.0.50 73 | ------ 74 | - Fixed a bug when reporting the max drawdown 75 | 76 | 0.0.49 77 | ------ 78 | - Fixed an issue with saving the HTML report as a file 79 | 80 | 0.0.48 81 | ------ 82 | - Fixed RF display bug 83 | 84 | 0.0.47 85 | ------ 86 | - Fixed average DD display bug 87 | 88 | 0.0.46 89 | ------ 90 | - Misc bug fixes and speedups 91 | 92 | 0.0.45 93 | ------ 94 | - Fixed ``stats.rolling_sharpe()`` parameter mismatch 95 | 96 | 0.0.44 97 | ------ 98 | - Match dates logic on ``utils.make_index()`` 99 | 100 | 0.0.43 101 | ------ 102 | - Fixed ``stats.rolling_sortino()`` calculations 103 | - Added ``match_dates`` flag to reports to make strategy and benchmark comparible by syncing their dates and frequency 104 | - Added ``prepare_returns`` flag to ``utils._prepare_benchmark()`` 105 | - Misc code cleanup and speedups 106 | 107 | 0.0.42 108 | ------ 109 | - Usability improvements 110 | 111 | 0.0.41 112 | ------ 113 | - Typos fixed 114 | 115 | 0.0.40 116 | ------ 117 | - Added rebalance option to ``utils.make_index()`` 118 | - Added option to add ``log_scale=True/False` to ``plots.snapshot()`` 119 | 120 | 0.0.39 121 | ------ 122 | - Fixed ``plots.rolling_volatility()`` benchmark display (bug introduced in 0.0.37) 123 | 124 | 0.0.38 125 | ------ 126 | - Added ``stats.smart_sharpe()`` and ``stats.smart_sortino()`` 127 | 128 | 0.0.37 129 | ------ 130 | - added ``stats.rolling_sharpe()``, ``stats.rolling_sortino()``, ``stats.and rolling_volatility()`` 131 | - Added ``stats.distribution()`` 132 | - Added Omega ratio 133 | - BREAKING CHANGE: Eenamed ``trading_year_days`` param to ``periods_per_year`` 134 | - Misc code cleanup and speedups 135 | 136 | 0.0.36 137 | ------ 138 | - Added ``as_pct`` params to ``reports.metrics()`` for when you need display data as DataFrame 139 | 140 | 0.0.35 141 | ------ 142 | - Passing correct rolling windows in ``rolling_beta()`` 143 | - Added Serenity Index 144 | - Passing ``trading_year_days`` to method ``metrics`` 145 | - Fixed "day is out of range for month" error 146 | 147 | 0.0.34 148 | ------ 149 | - Fixed bug in ``stats.consecutive_wins()`` and ``stats.consecutive_losses()`` 150 | - Fixed seaborn's depreated ``distplot`` warning 151 | - Improved annualization by passing ``trading_year_days`` 152 | 153 | 0.0.33 154 | ------ 155 | - Added option to pass the number of days per year in reports, so you can now use ``trading_year_days=365`` if you're trading crypto, or any other number for intl. markets. 156 | 157 | 0.0.32 158 | ------ 159 | - Fixed bug in ``plot_histogram()`` (issues 94+95) 160 | 161 | 0.0.31 162 | ------ 163 | - Enable period setting for adjusted sortino 164 | - Added ``utils.make_index()`` for easy "etf" creation 165 | 166 | 0.0.30 167 | ------ 168 | - Fixed PIP installer 169 | 170 | 0.0.29 171 | ------ 172 | - Minor code refactoring 173 | 174 | 0.0.28 175 | ------ 176 | - ``gain_to_pain`` renamed to ``gain_to_pain_ratio`` 177 | - Minor code refactoring 178 | 179 | 0.0.27 180 | ------ 181 | - Added Sortino/√2 and Gain/Pain ratio to report 182 | - Merged PRs to fix some bugs 183 | 184 | 0.0.26 185 | ------ 186 | - Misc bug fixes and code improvements 187 | 188 | 0.0.25 189 | ------ 190 | - Fixed ``conditional_value_at_risk()`` 191 | - Fixed ``%matplotlib inline`` issue notebooks 192 | 193 | 0.0.24 194 | ------ 195 | - Added mtd/qtd/ytd methods for panda (usage: ``df.mtd()``) 196 | - Fixed Pandas deprecation warning 197 | - Fixed Matplotlib deprecation warning 198 | - Try setting ``%matplotlib inline`` automatic in notebooks 199 | 200 | 0.0.23 201 | ------ 202 | - Fixed profit Factor formula 203 | 204 | 0.0.22 205 | ------ 206 | - Misc bug fixes 207 | 208 | 0.0.21 209 | ------ 210 | - Fixed chart EOY chart's ``xticks`` when charting data with 10+ years 211 | - Fixed issue where daily return >= 100% 212 | - Fixed Snapshot plot 213 | - Removed duplicaated code 214 | - Added conda installer 215 | - Misc code refactoring and optimizations 216 | 217 | 0.0.20 218 | ------ 219 | - Misc bugfixes 220 | 221 | 0.0.19 222 | ------ 223 | - Cleaning up data before calculations (replaces inf/-inf/-0 with 0) 224 | - Removed usage of ``pandas.compound()`` for future ``pandas`` version compatibility 225 | - Auto conversion of price-to-returns and returns-to-data as needed 226 | 227 | 0.0.18 228 | ------ 229 | - Fixed issue when last date in data is in the past (issue #4) 230 | - Fixed issue when data has less than 5 drawdown periods (issue #4) 231 | 232 | 0.0.17 233 | ------ 234 | - Fixed CAGR calculation for more accuracy 235 | - Handles drawdowns better in live trading mode when currently in drawdown 236 | 237 | 0.0.16 238 | ------ 239 | - Handles no drawdowns better 240 | 241 | 0.0.15 242 | ------ 243 | - Better report formatting 244 | - Code cleanup 245 | 246 | 0.0.14 247 | ------ 248 | - Fixed calculation for rolling sharpe and rolling sortino charts 249 | - Nicer CSS when printing html reports 250 | 251 | 0.0.13 252 | ------ 253 | - Fixed non-compounded plots in reports when using ``compounded=False`` 254 | 255 | 0.0.12 256 | ------ 257 | - Option to add ``compounded=True/False`` to reports (default is ``True``) 258 | 259 | 0.0.11 260 | ------ 261 | - Minor bug fixes 262 | 263 | 0.0.10 264 | ------ 265 | - Updated to install and use ``yfinance`` instead of ``fix_yahoo_finance`` 266 | 267 | 0.0.09 268 | ------ 269 | - Added support for 3 modes (cumulative, compounded, fixed amount) in ``plots.earnings()`` and ``utils.make_portfolio()`` 270 | - Added two DataFrame utilities: ``df.curr_month()`` and ``df.date(date)`` 271 | - Misc bug fixes and code refactoring 272 | 273 | 274 | 0.0.08 275 | ------ 276 | - Better calculations for cagr, var, cvar, avg win/loss and payoff_ratio 277 | - Removed unused param from ``to_plotly()`` 278 | - Added risk free param to ``log_returns()`` + renamed it to ``to_log_returns()`` 279 | - Misc bug fixes and code improvements 280 | 281 | 0.0.07 282 | ------ 283 | - Plots returns figure if ``show`` is set to False 284 | 285 | 0.0.06 286 | ------ 287 | - Minor bug fix 288 | 289 | 0.0.05 290 | ------ 291 | - Added ``plots.to_plotly()`` method 292 | - Added Ulcer Index to metrics report 293 | - Better returns/price detection 294 | - Bug fixes and code refactoring 295 | 296 | 0.0.04 297 | ------ 298 | - Added ``pct_rank()`` method to stats 299 | - Added ``multi_shift()`` method to utils 300 | 301 | 0.0.03 302 | ------ 303 | - Better VaR/cVaR calculation 304 | - Fixed calculation of ``to_drawdown_series()`` 305 | - Changed VaR/cVaR default confidence to 95% 306 | - Improved Sortino formula 307 | - Fixed conversion of returns to prices (``to_prices()``) 308 | 309 | 0.0.02 310 | ------ 311 | - Initial release 312 | 313 | 0.0.01 314 | ------ 315 | - Pre-release placeholder 316 | -------------------------------------------------------------------------------- /CONTRIBUTORS.txt: -------------------------------------------------------------------------------- 1 | # This file contains a list of people who have made contributions 2 | # to the public version of QuantStats. 3 | 4 | Lumiwealth BotSpot, Maintainer 5 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Include the license file 2 | include LICENSE.txt 3 | include requirements.txt 4 | include quantstats_lumi/report.html 5 | include README.md 6 | 7 | # Include the data files 8 | recursive-include data * 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Python version](https://img.shields.io/badge/python-3.6+-blue.svg?style=flat)](https://pypi.python.org/pypi/quantstats-lumi) 2 | [![PyPi version](https://img.shields.io/pypi/v/quantstats-lumi.svg?maxAge=60)](https://pypi.python.org/pypi/quantstats-lumi) 3 | [![PyPi status](https://img.shields.io/pypi/status/quantstats-lumi.svg?maxAge=60)](https://pypi.python.org/pypi/quantstats-lumi) 4 | [![PyPi downloads](https://img.shields.io/pypi/dm/quantstats-lumi.svg?maxAge=2592000&label=installs&color=%2327B1FF)](https://pypi.python.org/pypi/quantstats-lumi) 5 | [![CodeFactor](https://www.codefactor.io/repository/github/Lumiwealth/quantstats_lumi/badge)](https://www.codefactor.io/repository/github/Lumiwealth/quantstats_lumi) 6 | [![Star this repo](https://img.shields.io/github/stars/Lumiwealth/quantstats_lumi.svg?style=social&label=Star&maxAge=60)](https://github.com/Lumiwealth/quantstats_lumi) 7 | [![Follow BotSpot on twitter](https://img.shields.io/twitter/follow/botspottrade.svg?style=social&label=Follow&maxAge=60)](https://twitter.com/botspottrade) 8 | 9 | # Fork of Original QuantStats by Ran Aroussi, Maintained by Lumiwealth BotSpot 10 | 11 | This is a forked version of the original QuantStats library by Ran Aroussi. The original library can be found at https://github.com/ranaroussi/quantstats 12 | 13 | This forked version is maintained by **Lumiwealth BotSpot**. For more information, visit https://github.com/Lumiwealth/quantstats_lumi or https://botspot.trade. 14 | 15 | This forked version was created because it seems that the original library is no longer being maintained. The original library has a number of issues and pull requests that have been open for a long time and have not been addressed. This forked version aims to address some of these issues and pull requests. 16 | 17 | This forked version is created and maintained by the Lumiwealth team. We are a team of data scientists and software engineers who are passionate about quantitative finance and algorithmic trading. We use QuantStats in our daily work with the Lumibot library and we want to make sure that QuantStats is a reliable and well-maintained library. 18 | 19 | If you're interested in learning how to make your own trading algorithms, check out our Lumibot library at https://github.com/Lumiwealth/lumibot and check out our courses at https://lumiwealth.com 20 | 21 | # QuantStats Lumi: Portfolio analytics for quants 22 | 23 | **QuantStats Lumi** is a Python library that performs portfolio profiling, allowing quants and portfolio managers to understand their performance better by providing them with in-depth analytics and risk metrics. 24 | 25 | [Changelog »](https://github.com/Lumiwealth/quantstats_lumi/blob/main/CHANGELOG.rst) 26 | 27 | ### QuantStats is comprised of 3 main modules: 28 | 29 | 1. `quantstats.stats` - for calculating various performance metrics, like Sharpe ratio, Win rate, Volatility, etc. 30 | 2. `quantstats.plots` - for visualizing performance, drawdowns, rolling statistics, monthly returns, etc. 31 | 3. `quantstats.reports` - for generating metrics reports, batch plotting, and creating tear sheets that can be saved as an HTML file. 32 | 33 | Here's an example of a simple tear sheet analyzing a strategy: 34 | 35 | # Quick Start 36 | 37 | Install QuantStats Lumi using pip: 38 | 39 | ```bash 40 | $ pip install quantstats-lumi 41 | ``` 42 | 43 | ```python 44 | %matplotlib inline 45 | import quantstats_lumi as qs 46 | 47 | # extend pandas functionality with metrics, etc. 48 | qs.extend_pandas() 49 | 50 | # fetch the daily returns for a stock 51 | stock = qs.utils.download_returns('META') 52 | 53 | # show sharpe ratio 54 | qs.stats.sharpe(stock) 55 | 56 | # or using extend_pandas() :) 57 | stock.sharpe() 58 | ``` 59 | 60 | Output: 61 | 62 | ```text 63 | 0.8135304438803402 64 | ``` 65 | 66 | ### Visualize stock performance 67 | 68 | ```python 69 | qs.plots.snapshot(stock, title='Facebook Performance', show=True) 70 | 71 | # can also be called via: 72 | # stock.plot_snapshot(title='Facebook Performance', show=True) 73 | ``` 74 | 75 | Output: 76 | 77 | ![Snapshot plot](https://github.com/Lumiwealth/quantstats_lumi/blob/main/docs/snapshot.jpg?raw=true) 78 | 79 | ### Creating a report 80 | 81 | You can create 7 different report tearsheets: 82 | 83 | 1. `qs.reports.metrics(mode='basic|full', ...)` - shows basic/full metrics 84 | 2. `qs.reports.plots(mode='basic|full', ...)` - shows basic/full plots 85 | 3. `qs.reports.basic(...)` - shows basic metrics and plots 86 | 4. `qs.reports.full(...)` - shows full metrics and plots 87 | 5. `qs.reports.html(...)` - generates a complete report as html 88 | 89 | Let' create an html tearsheet 90 | 91 | ```python 92 | # (benchmark can be a pandas Series or ticker) 93 | qs.reports.html(stock, "SPY") 94 | ``` 95 | 96 | Output will generate something like this: 97 | 98 | ![HTML tearsheet](https://github.com/Lumiwealth/quantstats_lumi/blob/main/docs/report.jpg?raw=true) 99 | 100 | ([view original html file](https://rawcdn.githack.com/Lumiwealth/quantstats_lumi/main/docs/tearsheet.html)) 101 | 102 | ### To view a complete list of available methods, run 103 | 104 | ```python 105 | [f for f in dir(qs.stats) if f[0] != '_'] 106 | ``` 107 | 108 | ```text 109 | ['avg_loss', 110 | 'avg_return', 111 | 'avg_win', 112 | 'best', 113 | 'cagr', 114 | 'calmar', 115 | 'common_sense_ratio', 116 | 'comp', 117 | 'compare', 118 | 'compsum', 119 | 'conditional_value_at_risk', 120 | 'consecutive_losses', 121 | 'consecutive_wins', 122 | 'cpc_index', 123 | 'cvar', 124 | 'drawdown_details', 125 | 'expected_return', 126 | 'expected_shortfall', 127 | 'exposure', 128 | 'gain_to_pain_ratio', 129 | 'geometric_mean', 130 | 'ghpr', 131 | 'greeks', 132 | 'implied_volatility', 133 | 'information_ratio', 134 | 'kelly_criterion', 135 | 'kurtosis', 136 | 'max_drawdown', 137 | 'monthly_returns', 138 | 'outlier_loss_ratio', 139 | 'outlier_win_ratio', 140 | 'outliers', 141 | 'payoff_ratio', 142 | 'profit_factor', 143 | 'profit_ratio', 144 | 'r2', 145 | 'r_squared', 146 | 'rar', 147 | 'recovery_factor', 148 | 'remove_outliers', 149 | 'risk_of_ruin', 150 | 'risk_return_ratio', 151 | 'rolling_greeks', 152 | 'ror', 153 | 'sharpe', 154 | 'skew', 155 | 'sortino', 156 | 'adjusted_sortino', 157 | 'tail_ratio', 158 | 'to_drawdown_series', 159 | 'ulcer_index', 160 | 'ulcer_performance_index', 161 | 'upi', 162 | 'utils', 163 | 'value_at_risk', 164 | 'var', 165 | 'volatility', 166 | 'win_loss_ratio', 167 | 'win_rate', 168 | 'worst'] 169 | ``` 170 | 171 | ```python 172 | [f for f in dir(qs.plots) if f[0] != '_'] 173 | ``` 174 | 175 | ```text 176 | ['daily_returns', 177 | 'distribution', 178 | 'drawdown', 179 | 'drawdowns_periods', 180 | 'earnings', 181 | 'histogram', 182 | 'log_returns', 183 | 'monthly_heatmap', 184 | 'returns', 185 | 'rolling_beta', 186 | 'rolling_sharpe', 187 | 'rolling_sortino', 188 | 'rolling_volatility', 189 | 'snapshot', 190 | 'yearly_returns'] 191 | ``` 192 | 193 | **\*\*\* Full documenttion coming soon \*\*\*** 194 | 195 | In the meantime, you can get insights as to optional parameters for each method, by using Python's `help` method: 196 | 197 | ```python 198 | help(qs.stats.conditional_value_at_risk) 199 | ``` 200 | 201 | ```text 202 | Help on function conditional_value_at_risk in module quantstats.stats: 203 | 204 | conditional_value_at_risk(returns, sigma=1, confidence=0.99) 205 | calculats the conditional daily value-at-risk (aka expected shortfall) 206 | quantifies the amount of tail risk an investment 207 | ``` 208 | 209 | ## Installation 210 | 211 | Install using `pip`: 212 | 213 | ```bash 214 | $ pip install quantstats-lumi --upgrade --no-cache-dir 215 | ``` 216 | 217 | Install using `conda`: 218 | 219 | ```bash 220 | $ conda install -c lumiwealth quantstats-lumi # Or your specific channel / remove if not applicable 221 | ``` 222 | 223 | ## Requirements 224 | 225 | * [Python](https://www.python.org) >= 3.5+ 226 | * [pandas](https://github.com/pydata/pandas) (tested to work with >=0.24.0) 227 | * [numpy](http://www.numpy.org) >= 1.15.0 228 | * [scipy](https://www.scipy.org) >= 1.2.0 229 | * [matplotlib](https://matplotlib.org) >= 3.0.0 230 | * [seaborn](https://seaborn.pydata.org) >= 0.9.0 231 | * [tabulate](https://bitbucket.org/astanin/python-tabulate) >= 0.8.0 232 | * [yfinance](https://github.com/ranaroussi/yfinance) >= 0.1.38 233 | * [plotly](https://plot.ly/) >= 3.4.1 (optional, for using `plots.to_plotly()`) 234 | 235 | ## Questions? 236 | 237 | This is a new library... If you find a bug, please 238 | [open an issue](https://github.com/Lumiwealth/quantstats_lumi/issues) 239 | in this repository. 240 | 241 | If you'd like to contribute, a great place to look is the 242 | [issues marked with help-wanted](https://github.com/Lumiwealth/quantstats_lumi/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22). 243 | 244 | For more information about Lumiwealth BotSpot, visit https://botspot.trade. 245 | 246 | ## Known Issues 247 | 248 | For some reason, I couldn't find a way to tell seaborn not to return the 249 | monthly returns heatmap when instructed to save - so even if you save the plot (by passing `savefig={...}`) it will still show the plot. 250 | 251 | ## Legal Stuff 252 | 253 | **QuantStats Lumi** is distributed under the **Apache Software License**. See the `LICENSE.txt` file in the release for details. 254 | 255 | ## Testing 256 | 257 | To run the test suite, make sure you have `pytest` installed, then run: 258 | 259 | ```bash 260 | pytest 261 | ``` 262 | 263 | This will automatically discover and run all tests in the repository. 264 | 265 | ## P.S. 266 | 267 | Please drop us a note with any feedback you have. 268 | 269 | **Lumiwealth BotSpot** 270 | https://github.com/Lumiwealth/quantstats_lumi 271 | https://botspot.trade 272 | -------------------------------------------------------------------------------- /docs/report.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Lumiwealth/quantstats_lumi/ecbbf9fa35218294ef9cdf522fe492928ec0ed5f/docs/report.jpg -------------------------------------------------------------------------------- /docs/snapshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Lumiwealth/quantstats_lumi/ecbbf9fa35218294ef9cdf522fe492928ec0ed5f/docs/snapshot.jpg -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import quantstats_lumi.reports as reports 3 | import os 4 | 5 | # Create a returns series with a DatetimeIndex covering over a year (24 months) 6 | index = pd.date_range(start='2022-01-31', periods=24, freq='ME') 7 | returns = pd.Series([0.02, -0.02, -0.03, -0.01, 0.02, 0.01, -0.01, 0.015, 0.01, -0.005, 0.02, 0.01, 8 | 0.015, -0.01, 0.02, -0.01, -0.01, 0.02, 0.01, -0.01, 0.015, 0.01, -0.005, 0.02], index=index) 9 | 10 | # Specify the output file path 11 | output_file_path = "report.html" 12 | 13 | # Create a set of example parameters 14 | example_parameters = { 15 | "symbol": "SPY", # The symbol of the underlying asset 16 | "idle_holding_symbol": "QQQ", # We will put a portion of the portfolio in this symbol the whole time, set to None to not use this feature 17 | "idle_holding_pct": 0.25, # The percentage of the portfolio to put in the idle holding symbol 18 | "days_to_expiry": 0, # The number of days to expiry 19 | "strike_step_size": 1, # How far apart each strike should be 20 | "min_wing_size": 2, # The minimum spread between the wings 21 | "time_to_start": "10:30", # The time to start trading 22 | "max_loss": 0.25, # The maximum loss to take before getting out of the trade 23 | "first_adjustment_loss": 0.10, # The first adjustment to make if the trade goes against us 24 | "take_profit": 0.15, # The profit to take before getting out of the trade 25 | "pct_to_trade": 0.35, # The percentage of the portfolio to trade 26 | "time_to_close": "15:30", # The time to close the trade 27 | "days_of_week_to_trade": "01234", # The days of the week to trade, where 0 is Monday and 4 is Friday 28 | "wing_size_adjustment": 0, # The amount to adjust the wing size by (0.1 = 10%) 29 | "max_adx": 40, # The maximum ADX value to create a butterfly (ADX is a trend strength indicator) 30 | "min_gamma_risk": 0, # The minimum gamma risk to take on the trade (it will only take a trade if the gamma risk is greater than this value) 31 | "expected_iv_collapse": 2.5, # The expected implied volatility collapse 32 | "adx_length": 14, # The length of the ADX indicator 33 | "take_profit_trailing_stop": 0.02, # The trailing stop to use for the take profit, after the take profit target is hit, set to None to not use this feature 34 | } 35 | 36 | # Generate the HTML report for monthly data (periods_per_year=12) 37 | html_result = reports.html(returns, output=output_file_path, parameters=example_parameters, periods_per_year=12) 38 | 39 | # Print the HTML result 40 | print(html_result) 41 | 42 | # Open the HTML file in a browser to visually inspect the output 43 | os.system(f"open {output_file_path}") 44 | -------------------------------------------------------------------------------- /example_with_benchmark.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import quantstats_lumi.reports as reports 3 | import os 4 | 5 | # Create a returns series with a DatetimeIndex covering 2 years (24 months) 6 | index = pd.date_range(start='2022-01-31', periods=24, freq='M') 7 | strategy_returns = pd.Series( 8 | [0.02, -0.01, 0.03, -0.01, 0.02, 0.01, -0.01, 0.015, 0.01, -0.005, 0.02, 0.01, 9 | 0.015, -0.01, 0.02, 0.01, -0.01, 0.02, 0.01, -0.01, 0.015, 0.01, -0.005, 0.02], 10 | index=index, 11 | name="Strategy" 12 | ) 13 | 14 | # Create a benchmark returns series (e.g., a bit less volatile and lower mean) 15 | benchmark_returns = pd.Series( 16 | [0.012, -0.008, -0.018, -0.007, 0.013, 0.008, -0.006, 0.009, 0.007, -0.003, 0.012, 0.008, 17 | 0.009, -0.006, 0.013, 0.008, -0.006, 0.012, -0.008, -0.006, 0.009, 0.007, -0.003, 0.012], 18 | index=index, 19 | name="Benchmark" 20 | ) 21 | 22 | output_file_path = "report_with_benchmark.html" 23 | 24 | example_parameters = { 25 | "symbol": "SPY", 26 | "idle_holding_symbol": "QQQ", 27 | "idle_holding_pct": 0.25, 28 | "days_to_expiry": 0, 29 | "strike_step_size": 1, 30 | "min_wing_size": 2, 31 | "time_to_start": "10:30", 32 | "max_loss": 0.25, 33 | "first_adjustment_loss": 0.10, 34 | "take_profit": 0.15, 35 | "pct_to_trade": 0.35, 36 | "time_to_close": "15:30", 37 | "days_of_week_to_trade": "01234", 38 | "wing_size_adjustment": 0, 39 | "max_adx": 40, 40 | "min_gamma_risk": 0, 41 | "expected_iv_collapse": 2.5, 42 | "adx_length": 14, 43 | "take_profit_trailing_stop": 0.02, 44 | } 45 | 46 | # Generate the HTML report for monthly data (periods_per_year=12) with benchmark 47 | html_result = reports.html( 48 | strategy_returns, 49 | benchmark=benchmark_returns, 50 | output=output_file_path, 51 | parameters=example_parameters, 52 | periods_per_year=12 53 | ) 54 | 55 | print(html_result) 56 | os.system(f"open {output_file_path}") 57 | -------------------------------------------------------------------------------- /meta.yaml: -------------------------------------------------------------------------------- 1 | {% set name = "QuantStats Lumi" %} 2 | {% set version = "1.0.1" %} 3 | 4 | package: 5 | name: "{{ name|lower }}" 6 | version: "{{ version }}" 7 | 8 | source: 9 | url: "https://pypi.io/packages/source/{{ name[0] }}/{{ name }}/{{ name }}-{{ version }}.tar.gz" 10 | sha256: "c09e9ad157f6cbbf76c7cace6d52e64486a64fffdb21f4a840152f7e9ea8f8a2" 11 | 12 | build: 13 | noarch: python 14 | number: 0 15 | script: "{{ PYTHON }} -m pip install . --no-deps --ignore-installed -vv " 16 | 17 | requirements: 18 | host: 19 | - pandas >=0.24.0 20 | - numpy >=1.16.5 21 | - seaborn >=0.9.0 22 | - matplotlib >=3.0.0 23 | - scipy >=1.2.0 24 | - tabulate >=0.8.0 25 | - yfinance >=0.1.55 26 | - pip 27 | - python 28 | - scipy >=1.2.0 29 | - pytest 30 | run: 31 | - pandas >=0.24.0 32 | - numpy >=1.16.5 33 | - seaborn >=0.9.0 34 | - matplotlib >=3.0.0 35 | - scipy >=1.2.0 36 | - tabulate >=0.8.0 37 | - yfinance >=0.1.55 38 | - pip 39 | - python 40 | - scipy >=1.2.0 41 | - pytest 42 | 43 | test: 44 | imports: 45 | - quantstats 46 | - quantstats._plotting 47 | 48 | about: 49 | home: "https://github.com/Lumiwealth/quantstats_lumi" 50 | license: "Apache Software" 51 | license_family: "APACHE" 52 | license_file: "" 53 | summary: "QuantStats Lumi: Portfolio analytics for quants" 54 | description: | 55 | QuantStats Lumi is a Python library that performs portfolio profiling, 56 | allowing quants and portfolio managers to understand their 57 | performance better by providing them with in-depth analytics 58 | and risk metrics. 59 | Forked and maintained by Lumiwealth BotSpot. 60 | doc_url: "https://github.com/Lumiwealth/quantstats_lumi" 61 | dev_url: "https://github.com/Lumiwealth/quantstats_lumi" 62 | doc_source_url: "https://github.com/Lumiwealth/quantstats_lumi/blob/master/README.rst" 63 | 64 | 65 | extra: 66 | recipe-maintainers: 67 | - lumiwealth-botspot 68 | -------------------------------------------------------------------------------- /quantstats_lumi/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | # 4 | # QuantStats: Portfolio analytics for quants 5 | # https://github.com/ranaroussi/quantstats 6 | # 7 | # Copyright 2019-2023 Ran Aroussi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | from . import version 22 | 23 | __version__ = version.version 24 | __author__ = "Ran Aroussi" 25 | 26 | from . import stats, utils, plots, reports 27 | 28 | __all__ = ["stats", "plots", "reports", "utils", "extend_pandas"] 29 | 30 | # try automatic matplotlib inline 31 | utils._in_notebook(matplotlib_inline=True) 32 | 33 | 34 | def extend_pandas(): 35 | """ 36 | Extends pandas by exposing methods to be used like: 37 | df.sharpe(), df.best('day'), ... 38 | """ 39 | from pandas.core.base import PandasObject as _po 40 | 41 | _po.compsum = stats.compsum 42 | _po.comp = stats.comp 43 | _po.expected_return = stats.expected_return 44 | _po.geometric_mean = stats.geometric_mean 45 | _po.ghpr = stats.ghpr 46 | _po.outliers = stats.outliers 47 | _po.remove_outliers = stats.remove_outliers 48 | _po.best = stats.best 49 | _po.worst = stats.worst 50 | _po.consecutive_wins = stats.consecutive_wins 51 | _po.consecutive_losses = stats.consecutive_losses 52 | _po.exposure = stats.exposure 53 | _po.win_rate = stats.win_rate 54 | _po.avg_return = stats.avg_return 55 | _po.avg_win = stats.avg_win 56 | _po.avg_loss = stats.avg_loss 57 | _po.volatility = stats.volatility 58 | _po.rolling_volatility = stats.rolling_volatility 59 | _po.implied_volatility = stats.implied_volatility 60 | _po.sharpe = stats.sharpe 61 | _po.smart_sharpe = stats.smart_sharpe 62 | _po.rolling_sharpe = stats.rolling_sharpe 63 | _po.sortino = stats.sortino 64 | _po.smart_sortino = stats.smart_sortino 65 | _po.adjusted_sortino = stats.adjusted_sortino 66 | _po.rolling_sortino = stats.rolling_sortino 67 | _po.omega = stats.omega 68 | _po.cagr = stats.cagr 69 | _po.rar = stats.rar 70 | _po.skew = stats.skew 71 | _po.kurtosis = stats.kurtosis 72 | _po.calmar = stats.calmar 73 | _po.ulcer_index = stats.ulcer_index 74 | _po.ulcer_performance_index = stats.ulcer_performance_index 75 | _po.upi = stats.upi 76 | _po.serenity_index = stats.serenity_index 77 | _po.risk_of_ruin = stats.risk_of_ruin 78 | _po.ror = stats.ror 79 | _po.value_at_risk = stats.value_at_risk 80 | _po.var = stats.var 81 | _po.conditional_value_at_risk = stats.conditional_value_at_risk 82 | _po.cvar = stats.cvar 83 | _po.expected_shortfall = stats.expected_shortfall 84 | _po.tail_ratio = stats.tail_ratio 85 | _po.payoff_ratio = stats.payoff_ratio 86 | _po.win_loss_ratio = stats.win_loss_ratio 87 | _po.profit_ratio = stats.profit_ratio 88 | _po.profit_factor = stats.profit_factor 89 | _po.gain_to_pain_ratio = stats.gain_to_pain_ratio 90 | _po.cpc_index = stats.cpc_index 91 | _po.common_sense_ratio = stats.common_sense_ratio 92 | _po.outlier_win_ratio = stats.outlier_win_ratio 93 | _po.outlier_loss_ratio = stats.outlier_loss_ratio 94 | _po.recovery_factor = stats.recovery_factor 95 | _po.risk_return_ratio = stats.risk_return_ratio 96 | _po.max_drawdown = stats.max_drawdown 97 | _po.to_drawdown_series = stats.to_drawdown_series 98 | _po.kelly_criterion = stats.kelly_criterion 99 | _po.monthly_returns = stats.monthly_returns 100 | _po.pct_rank = stats.pct_rank 101 | 102 | _po.treynor_ratio = stats.treynor_ratio 103 | _po.probabilistic_sharpe_ratio = stats.probabilistic_sharpe_ratio 104 | _po.probabilistic_sortino_ratio = stats.probabilistic_sortino_ratio 105 | _po.probabilistic_adjusted_sortino_ratio = ( 106 | stats.probabilistic_adjusted_sortino_ratio 107 | ) 108 | 109 | # methods from utils 110 | _po.to_returns = utils.to_returns 111 | _po.to_prices = utils.to_prices 112 | _po.to_log_returns = utils.to_log_returns 113 | _po.log_returns = utils.log_returns 114 | _po.exponential_stdev = utils.exponential_stdev 115 | _po.rebase = utils.rebase 116 | _po.aggregate_returns = utils.aggregate_returns 117 | _po.to_excess_returns = utils.to_excess_returns 118 | _po.multi_shift = utils.multi_shift 119 | _po.curr_month = utils._pandas_current_month 120 | _po.date = utils._pandas_date 121 | _po.mtd = utils._mtd 122 | _po.qtd = utils._qtd 123 | _po.ytd = utils._ytd 124 | 125 | # methods that requires benchmark stats 126 | _po.r_squared = stats.r_squared 127 | _po.r2 = stats.r2 128 | _po.information_ratio = stats.information_ratio 129 | _po.greeks = stats.greeks 130 | _po.rolling_greeks = stats.rolling_greeks 131 | _po.compare = stats.compare 132 | 133 | # plotting methods 134 | _po.plot_snapshot = plots.snapshot 135 | _po.plot_earnings = plots.earnings 136 | _po.plot_daily_returns = plots.daily_returns 137 | _po.plot_distribution = plots.distribution 138 | _po.plot_drawdown = plots.drawdown 139 | _po.plot_drawdowns_periods = plots.drawdowns_periods 140 | _po.plot_histogram = plots.histogram 141 | _po.plot_log_returns = plots.log_returns 142 | _po.plot_returns = plots.returns 143 | _po.plot_rolling_beta = plots.rolling_beta 144 | _po.plot_rolling_sharpe = plots.rolling_sharpe 145 | _po.plot_rolling_sortino = plots.rolling_sortino 146 | _po.plot_rolling_volatility = plots.rolling_volatility 147 | _po.plot_yearly_returns = plots.yearly_returns 148 | _po.plot_monthly_heatmap = plots.monthly_heatmap 149 | 150 | _po.metrics = reports.metrics 151 | 152 | 153 | # extend_pandas() 154 | -------------------------------------------------------------------------------- /quantstats_lumi/_plotting/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | # 4 | # QuantStats: Portfolio analytics for quants 5 | # https://github.com/ranaroussi/quantstats 6 | # 7 | # Copyright 2019-2023 Ran Aroussi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | -------------------------------------------------------------------------------- /quantstats_lumi/_plotting/core.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | # 4 | # Quantreturns: Portfolio analytics for quants 5 | # https://github.com/ranaroussi/quantreturns 6 | # 7 | # Copyright 2019-2023 Ran Aroussi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | import matplotlib.pyplot as _plt 22 | 23 | try: 24 | _plt.rcParams["font.family"] = "Arial" 25 | except Exception: 26 | pass 27 | 28 | import matplotlib.dates as _mdates 29 | import numpy as _np 30 | import pandas as _pd 31 | import seaborn as _sns 32 | from matplotlib.ticker import FormatStrFormatter as _FormatStrFormatter 33 | from matplotlib.ticker import FuncFormatter as _FuncFormatter 34 | 35 | from .. import stats as _stats 36 | from .. import utils as _utils 37 | 38 | _sns.set( 39 | font_scale=1.1, 40 | rc={ 41 | "figure.figsize": (10, 6), 42 | "axes.facecolor": "white", 43 | "figure.facecolor": "white", 44 | "grid.color": "#dddddd", 45 | "grid.linewidth": 0.5, 46 | "lines.linewidth": 1.5, 47 | "text.color": "#333333", 48 | "xtick.color": "#666666", 49 | "ytick.color": "#666666", 50 | }, 51 | ) 52 | 53 | _FLATUI_COLORS = [ 54 | "#FEDD78", 55 | "#348DC1", 56 | "#BA516B", 57 | "#4FA487", 58 | "#9B59B6", 59 | "#613F66", 60 | "#84B082", 61 | "#DC136C", 62 | "#559CAD", 63 | "#4A5899", 64 | ] 65 | _GRAYSCALE_COLORS = [ 66 | "#000000", 67 | "#222222", 68 | "#555555", 69 | "#888888", 70 | "#AAAAAA", 71 | "#CCCCCC", 72 | "#EEEEEE", 73 | "#333333", 74 | "#666666", 75 | "#999999", 76 | ] 77 | 78 | 79 | def _get_colors(grayscale): 80 | colors = _FLATUI_COLORS 81 | ls = "-" 82 | alpha = 0.8 83 | if grayscale: 84 | colors = _GRAYSCALE_COLORS 85 | ls = "-" 86 | alpha = 0.5 87 | return colors, ls, alpha 88 | 89 | 90 | def plot_returns_bars( 91 | returns, 92 | benchmark=None, 93 | returns_label="Strategy", 94 | hline=None, 95 | hlw=None, 96 | hlcolor="red", 97 | hllabel="", 98 | resample="YE", 99 | title="Returns", 100 | match_volatility=False, 101 | log_scale=False, 102 | figsize=(10, 6), 103 | grayscale=False, 104 | fontname="Arial", 105 | ylabel=True, 106 | subtitle=True, 107 | savefig=None, 108 | show=True, 109 | ): 110 | if match_volatility and benchmark is None: 111 | raise ValueError("match_volatility requires passing of " "benchmark.") 112 | if match_volatility and benchmark is not None: 113 | bmark_vol = benchmark.loc[returns.index].std() 114 | returns = (returns / returns.std()) * bmark_vol 115 | 116 | # --------------- 117 | colors, _, _ = _get_colors(grayscale) 118 | if isinstance(returns, _pd.Series): 119 | df = _pd.DataFrame(index=returns.index, data={returns.name: returns}) 120 | elif isinstance(returns, _pd.DataFrame): 121 | df = _pd.DataFrame( 122 | index=returns.index, data={col: returns[col] for col in returns.columns} 123 | ) 124 | if isinstance(benchmark, _pd.Series): 125 | df[benchmark.name] = benchmark[benchmark.index.isin(returns.index)] 126 | if isinstance(returns, _pd.Series): 127 | df = df[[benchmark.name, returns.name]] 128 | elif isinstance(returns, _pd.DataFrame): 129 | col_names = [benchmark.name, returns.columns] 130 | df = df[list(_pd.core.common.flatten(col_names))] 131 | 132 | df = df.dropna() 133 | if resample is not None: 134 | df = df.resample(resample).apply(_stats.comp).resample(resample).last() 135 | # --------------- 136 | 137 | fig, ax = _plt.subplots(figsize=figsize) 138 | ax.spines["top"].set_visible(False) 139 | ax.spines["right"].set_visible(False) 140 | ax.spines["bottom"].set_visible(False) 141 | ax.spines["left"].set_visible(False) 142 | 143 | # use a more precise date string for the x axis locations in the toolbar 144 | fig.suptitle( 145 | title, y=0.94, fontweight="bold", fontname=fontname, fontsize=14, color="black" 146 | ) 147 | 148 | if subtitle: 149 | ax.set_title( 150 | "%s - %s \n" 151 | % ( 152 | df.index.date[:1][0].strftime("%Y"), 153 | df.index.date[-1:][0].strftime("%Y"), 154 | ), 155 | fontsize=12, 156 | color="gray", 157 | ) 158 | 159 | if benchmark is None: 160 | colors = colors[1:] 161 | df.plot(kind="bar", ax=ax, color=colors) 162 | 163 | fig.set_facecolor("white") 164 | ax.set_facecolor("white") 165 | 166 | try: 167 | ax.set_xticklabels(df.index.year) 168 | years = sorted(list(set(df.index.year))) 169 | except AttributeError: 170 | ax.set_xticklabels(df.index) 171 | years = sorted(list(set(df.index))) 172 | 173 | # ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d') 174 | # years = sorted(list(set(df.index.year))) 175 | if len(years) > 10: 176 | mod = int(len(years) / 10) 177 | _plt.xticks( 178 | _np.arange(len(years)), 179 | [str(year) if not i % mod else "" for i, year in enumerate(years)], 180 | ) 181 | 182 | # rotate and align the tick labels so they look better 183 | fig.autofmt_xdate() 184 | 185 | if hline is not None: 186 | if not isinstance(hline, _pd.Series): 187 | if grayscale: 188 | hlcolor = "gray" 189 | ax.axhline(hline, ls="--", lw=hlw, color=hlcolor, label=hllabel, zorder=2) 190 | 191 | ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2) 192 | 193 | # if isinstance(benchmark, _pd.Series) or hline: 194 | ax.legend(fontsize=11) 195 | 196 | _plt.yscale("symlog" if log_scale else "linear") 197 | 198 | ax.set_xlabel("") 199 | if ylabel: 200 | ax.set_ylabel( 201 | "Returns", fontname=fontname, fontweight="bold", fontsize=12, color="black" 202 | ) 203 | ax.yaxis.set_label_coords(-0.1, 0.5) 204 | 205 | ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis)) 206 | 207 | if benchmark is None and len(_pd.DataFrame(returns).columns) == 1: 208 | ax.get_legend().remove() 209 | 210 | try: 211 | _plt.subplots_adjust(hspace=0, bottom=0, top=1) 212 | except Exception: 213 | pass 214 | 215 | try: 216 | fig.tight_layout() 217 | except Exception: 218 | pass 219 | 220 | if savefig: 221 | if isinstance(savefig, dict): 222 | _plt.savefig(**savefig) 223 | else: 224 | _plt.savefig(savefig) 225 | 226 | if show: 227 | _plt.show(block=False) 228 | 229 | _plt.close() 230 | 231 | if not show: 232 | return fig 233 | 234 | return None 235 | 236 | 237 | def plot_timeseries( 238 | returns, 239 | benchmark=None, 240 | title="Returns", 241 | compound=False, 242 | cumulative=True, 243 | fill=False, 244 | returns_label="Strategy", 245 | hline=None, 246 | hlw=None, 247 | hlcolor="red", 248 | hllabel="", 249 | percent=True, 250 | match_volatility=False, 251 | log_scale=False, 252 | resample=None, 253 | lw=1.5, 254 | figsize=(10, 6), 255 | ylabel="", 256 | grayscale=False, 257 | fontname="Arial", 258 | subtitle=True, 259 | savefig=None, 260 | show=True, 261 | ): 262 | colors, ls, alpha = _get_colors(grayscale) 263 | 264 | returns.fillna(0, inplace=True) 265 | if isinstance(benchmark, _pd.Series): 266 | benchmark.fillna(0, inplace=True) 267 | 268 | if match_volatility and benchmark is None: 269 | raise ValueError("match_volatility requires passing of " "benchmark.") 270 | if match_volatility and benchmark is not None: 271 | bmark_vol = benchmark.std() 272 | returns = (returns / returns.std()) * bmark_vol 273 | 274 | # --------------- 275 | if compound is True: 276 | if cumulative: 277 | returns = _stats.compsum(returns) 278 | if isinstance(benchmark, _pd.Series): 279 | benchmark = _stats.compsum(benchmark) 280 | else: 281 | returns = returns.cumsum() 282 | if isinstance(benchmark, _pd.Series): 283 | benchmark = benchmark.cumsum() 284 | 285 | if resample: 286 | returns = returns.resample(resample) 287 | returns = returns.last() if compound is True else returns.sum() 288 | if isinstance(benchmark, _pd.Series): 289 | benchmark = benchmark.resample(resample) 290 | benchmark = benchmark.last() if compound is True else benchmark.sum() 291 | # --------------- 292 | 293 | fig, ax = _plt.subplots(figsize=figsize) 294 | ax.spines["top"].set_visible(False) 295 | ax.spines["right"].set_visible(False) 296 | ax.spines["bottom"].set_visible(False) 297 | ax.spines["left"].set_visible(False) 298 | 299 | fig.suptitle( 300 | title, y=0.94, fontweight="bold", fontname=fontname, fontsize=14, color="black" 301 | ) 302 | 303 | if subtitle: 304 | ax.set_title( 305 | "%s - %s \n" 306 | % ( 307 | returns.index.date[:1][0].strftime("%e %b '%y"), 308 | returns.index.date[-1:][0].strftime("%e %b '%y"), 309 | ), 310 | fontsize=12, 311 | color="gray", 312 | ) 313 | 314 | fig.set_facecolor("white") 315 | ax.set_facecolor("white") 316 | 317 | if isinstance(benchmark, _pd.Series): 318 | ax.plot(benchmark, lw=lw, ls=ls, label=benchmark.name, color=colors[0]) 319 | 320 | alpha = 0.25 if grayscale else 1 321 | if isinstance(returns, _pd.Series): 322 | ax.plot(returns, lw=lw, label=returns.name, color=colors[1], alpha=alpha) 323 | elif isinstance(returns, _pd.DataFrame): 324 | # color_dict = {col: colors[i+1] for i, col in enumerate(returns.columns)} 325 | for i, col in enumerate(returns.columns): 326 | ax.plot(returns[col], lw=lw, label=col, alpha=alpha, color=colors[i + 1]) 327 | 328 | if fill: 329 | if isinstance(returns, _pd.Series): 330 | ax.fill_between(returns.index, 0, returns, color=colors[1], alpha=0.25) 331 | elif isinstance(returns, _pd.DataFrame): 332 | for i, col in enumerate(returns.columns): 333 | ax.fill_between( 334 | returns[col].index, 0, returns[col], color=colors[i + 1], alpha=0.25 335 | ) 336 | 337 | # rotate and align the tick labels so they look better 338 | fig.autofmt_xdate() 339 | 340 | # use a more precise date string for the x axis locations in the toolbar 341 | # ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d') 342 | 343 | if hline is not None: 344 | if not isinstance(hline, _pd.Series): 345 | if grayscale: 346 | hlcolor = "black" 347 | ax.axhline(hline, ls="--", lw=hlw, color=hlcolor, label=hllabel, zorder=2) 348 | 349 | ax.axhline(0, ls="-", lw=1, color="gray", zorder=1) 350 | ax.axhline(0, ls="--", lw=1, color="white" if grayscale else "black", zorder=2) 351 | 352 | # if isinstance(benchmark, _pd.Series) or hline is not None: 353 | ax.legend(fontsize=11) 354 | 355 | _plt.yscale("symlog" if log_scale else "linear") 356 | 357 | # Set y-axis limits to avoid blank space at the bottom and top 358 | min_val = returns.min() 359 | max_val = returns.max() 360 | if benchmark is not None: 361 | min_val = min(min_val, benchmark.min()) 362 | max_val = max(max_val, benchmark.max()) 363 | 364 | # Handle cases where min_val or max_val might be NaN or Inf 365 | if not _np.isfinite(min_val) or not _np.isfinite(max_val) or min_val == max_val: 366 | min_val = -1 # Default min value 367 | max_val = 1 # Default max value 368 | # if using percent, adjust defaults 369 | if percent: 370 | min_val = -0.01 371 | max_val = 0.01 372 | 373 | ax.set_ylim(bottom=min_val, top=max_val) 374 | 375 | if percent: 376 | ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis)) 377 | # ax.yaxis.set_major_formatter(_plt.FuncFormatter( 378 | # lambda x, loc: "{:,}%".format(int(x*100)))) 379 | 380 | ax.set_xlabel("") 381 | if ylabel: 382 | ax.set_ylabel( 383 | ylabel, fontname=fontname, fontweight="bold", fontsize=12, color="black" 384 | ) 385 | ax.yaxis.set_label_coords(-0.1, 0.5) 386 | 387 | if benchmark is None and len(_pd.DataFrame(returns).columns) == 1: 388 | ax.get_legend().remove() 389 | 390 | try: 391 | _plt.subplots_adjust(hspace=0, bottom=0, top=1) 392 | except Exception: 393 | pass 394 | 395 | try: 396 | fig.tight_layout() 397 | except Exception: 398 | pass 399 | 400 | if savefig: 401 | if isinstance(savefig, dict): 402 | _plt.savefig(**savefig) 403 | else: 404 | _plt.savefig(savefig) 405 | 406 | if show: 407 | _plt.show(block=False) 408 | 409 | _plt.close() 410 | 411 | if not show: 412 | return fig 413 | 414 | return None 415 | 416 | 417 | def plot_histogram( 418 | returns, 419 | benchmark, 420 | resample="ME", 421 | bins=20, 422 | fontname="Arial", 423 | grayscale=False, 424 | title="Returns", 425 | kde=True, 426 | figsize=(10, 6), 427 | ylabel=True, 428 | subtitle=True, 429 | compounded=True, 430 | savefig=None, 431 | show=True, 432 | ): 433 | # colors = ['#348dc1', '#003366', 'red'] 434 | # if grayscale: 435 | # colors = ['silver', 'gray', 'black'] 436 | 437 | colors, _, _ = _get_colors(grayscale) 438 | 439 | apply_fnc = _stats.comp if compounded else 'sum' 440 | if benchmark is not None: 441 | benchmark = ( 442 | benchmark.fillna(0) 443 | .resample(resample) 444 | .apply(apply_fnc) 445 | .resample(resample) 446 | .last() 447 | ) 448 | 449 | returns = ( 450 | returns.fillna(0).resample(resample).apply(apply_fnc).resample(resample).last() 451 | ) 452 | 453 | figsize = (0.995 * figsize[0], figsize[1]) 454 | fig, ax = _plt.subplots(figsize=figsize) 455 | 456 | ax.spines["top"].set_visible(False) 457 | ax.spines["right"].set_visible(False) 458 | ax.spines["bottom"].set_visible(False) 459 | ax.spines["left"].set_visible(False) 460 | 461 | fig.suptitle( 462 | title, y=0.94, fontweight="bold", fontname=fontname, fontsize=14, color="black" 463 | ) 464 | 465 | if subtitle: 466 | ax.set_title( 467 | "%s - %s \n" 468 | % ( 469 | returns.index.date[:1][0].strftime("%Y"), 470 | returns.index.date[-1:][0].strftime("%Y"), 471 | ), 472 | fontsize=12, 473 | color="gray", 474 | ) 475 | 476 | fig.set_facecolor("white") 477 | ax.set_facecolor("white") 478 | 479 | if isinstance(returns, _pd.DataFrame) and len(returns.columns) == 1: 480 | returns = returns[returns.columns[0]] 481 | 482 | pallete = colors[1:2] if benchmark is None else colors[:2] 483 | alpha = 0.7 484 | if isinstance(returns, _pd.DataFrame): 485 | pallete = ( 486 | colors[1: len(returns.columns) + 1] 487 | if benchmark is None 488 | else colors[: len(returns.columns) + 1] 489 | ) 490 | if len(returns.columns) > 1: 491 | alpha = 0.5 492 | 493 | fix_instance = lambda x: x[x.columns[0]] if isinstance(x, _pd.DataFrame) else x 494 | if benchmark is not None: 495 | if isinstance(returns, _pd.Series): 496 | combined_returns = ( 497 | fix_instance(benchmark).to_frame() 498 | .join(returns.to_frame()) 499 | .stack() 500 | .reset_index() 501 | .rename(columns={"level_1": "", 0: "Returns"}) 502 | ) 503 | elif isinstance(returns, _pd.DataFrame): 504 | combined_returns = ( 505 | fix_instance(benchmark).to_frame() 506 | .join(returns) 507 | .stack() 508 | .reset_index() 509 | .rename(columns={"level_1": "", 0: "Returns"}) 510 | ) 511 | x = _sns.histplot( 512 | data=combined_returns, 513 | x="Returns", 514 | bins=bins, 515 | alpha=alpha, 516 | kde=kde, 517 | stat="density", 518 | hue="", 519 | palette=pallete, 520 | ax=ax, 521 | ) 522 | 523 | else: 524 | if isinstance(returns, _pd.Series): 525 | combined_returns = returns.copy() 526 | if kde: 527 | _sns.kdeplot( 528 | data=combined_returns, 529 | color="black", 530 | ax=ax, 531 | warn_singular=False, 532 | ) 533 | x = _sns.histplot( 534 | data=combined_returns, 535 | bins=bins, 536 | alpha=alpha, 537 | kde=False, 538 | stat="density", 539 | color=colors[1], 540 | ax=ax, 541 | ) 542 | 543 | elif isinstance(returns, _pd.DataFrame): 544 | combined_returns = ( 545 | returns.stack() 546 | .reset_index() 547 | .rename(columns={"level_1": "", 0: "Returns"}) 548 | ) 549 | # _sns.kdeplot(data=combined_returns, color='black', ax=ax, warn_singular=False) 550 | x = _sns.histplot( 551 | data=combined_returns, 552 | x="Returns", 553 | bins=bins, 554 | alpha=alpha, 555 | kde=kde, 556 | stat="density", 557 | hue="", 558 | palette=pallete, 559 | ax=ax, 560 | ) 561 | 562 | # Why do we need average? 563 | if isinstance(combined_returns, _pd.Series) or len(combined_returns.columns) == 1: 564 | ax.axvline( 565 | combined_returns.mean(), 566 | ls="--", 567 | lw=1.5, 568 | zorder=2, 569 | label="Average", 570 | color="red", 571 | ) 572 | 573 | # _plt.setp(x.get_legend().get_texts(), fontsize=11) 574 | ax.xaxis.set_major_formatter( 575 | _plt.FuncFormatter(lambda x, loc: "{:,}%".format(int(x * 100))) 576 | ) 577 | 578 | # Removed static lines for clarity 579 | # ax.axhline(0.01, lw=1, color="#000000", zorder=2) 580 | # ax.axvline(0, lw=1, color="#000000", zorder=2) 581 | 582 | ax.set_xlabel("") 583 | ax.set_ylabel( 584 | "Occurrences", fontname=fontname, fontweight="bold", fontsize=12, color="black" 585 | ) 586 | ax.yaxis.set_label_coords(-0.1, 0.5) 587 | 588 | # fig.autofmt_xdate() 589 | 590 | try: 591 | _plt.subplots_adjust(hspace=0, bottom=0, top=1) 592 | except Exception: 593 | pass 594 | 595 | try: 596 | fig.tight_layout() 597 | except Exception: 598 | pass 599 | 600 | if savefig: 601 | if isinstance(savefig, dict): 602 | _plt.savefig(**savefig) 603 | else: 604 | _plt.savefig(savefig) 605 | 606 | if show: 607 | _plt.show(block=False) 608 | 609 | _plt.close() 610 | 611 | if not show: 612 | return fig 613 | 614 | return None 615 | 616 | 617 | def plot_rolling_stats( 618 | returns, 619 | benchmark=None, 620 | title="", 621 | returns_label="Strategy", 622 | hline=None, 623 | hlw=None, 624 | hlcolor="red", 625 | hllabel="", 626 | lw=1.5, 627 | figsize=(10, 6), 628 | ylabel="", 629 | grayscale=False, 630 | fontname="Arial", 631 | subtitle=True, 632 | savefig=None, 633 | show=True, 634 | ): 635 | colors, _, _ = _get_colors(grayscale) 636 | 637 | fig, ax = _plt.subplots(figsize=figsize) 638 | ax.spines["top"].set_visible(False) 639 | ax.spines["right"].set_visible(False) 640 | ax.spines["bottom"].set_visible(False) 641 | ax.spines["left"].set_visible(False) 642 | 643 | if isinstance(returns, _pd.DataFrame): 644 | returns_label = list(returns.columns) 645 | 646 | if isinstance(returns, _pd.Series): 647 | df = _pd.DataFrame(index=returns.index, data={returns_label: returns}) 648 | elif isinstance(returns, _pd.DataFrame): 649 | df = _pd.DataFrame( 650 | index=returns.index, data={col: returns[col] for col in returns.columns} 651 | ) 652 | if isinstance(benchmark, _pd.Series): 653 | df["Benchmark"] = benchmark[benchmark.index.isin(returns.index)] 654 | if isinstance(returns, _pd.Series): 655 | df = df[["Benchmark", returns_label]].dropna() 656 | ax.plot( 657 | df[returns_label].dropna(), lw=lw, label=returns.name, color=colors[1] 658 | ) 659 | elif isinstance(returns, _pd.DataFrame): 660 | col_names = ["Benchmark", returns_label] 661 | df = df[list(_pd.core.common.flatten(col_names))].dropna() 662 | for i, col in enumerate(returns_label): 663 | ax.plot(df[col], lw=lw, label=col, color=colors[i + 1]) 664 | ax.plot( 665 | df["Benchmark"], lw=lw, label=benchmark.name, color=colors[0], alpha=0.8 666 | ) 667 | else: 668 | if isinstance(returns, _pd.Series): 669 | df = df[[returns_label]].dropna() 670 | ax.plot( 671 | df[returns_label].dropna(), lw=lw, label=returns.name, color=colors[1] 672 | ) 673 | elif isinstance(returns, _pd.DataFrame): 674 | df = df[returns_label].dropna() 675 | for i, col in enumerate(returns_label): 676 | ax.plot(df[col], lw=lw, label=col, color=colors[i + 1]) 677 | 678 | # rotate and align the tick labels so they look better 679 | fig.autofmt_xdate() 680 | 681 | # use a more precise date string for the x axis locations in the toolbar 682 | # ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d')\ 683 | fig.suptitle( 684 | title, y=0.94, fontweight="bold", fontname=fontname, fontsize=14, color="black" 685 | ) 686 | 687 | if subtitle: 688 | ax.set_title( 689 | "%s - %s \n" 690 | % ( 691 | df.index.date[:1][0].strftime("%e %b '%y"), 692 | df.index.date[-1:][0].strftime("%e %b '%y"), 693 | ), 694 | fontsize=12, 695 | color="gray", 696 | ) 697 | 698 | if hline is not None: 699 | if not isinstance(hline, _pd.Series): 700 | if grayscale: 701 | hlcolor = "black" 702 | ax.axhline(hline, ls="--", lw=hlw, color=hlcolor, label=hllabel, zorder=2) 703 | 704 | ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2) 705 | 706 | if ylabel: 707 | ax.set_ylabel( 708 | ylabel, fontname=fontname, fontweight="bold", fontsize=12, color="black" 709 | ) 710 | ax.yaxis.set_label_coords(-0.1, 0.5) 711 | 712 | ax.yaxis.set_major_formatter(_FormatStrFormatter("%.2f")) 713 | 714 | ax.legend(fontsize=11) 715 | 716 | if benchmark is None and len(_pd.DataFrame(returns).columns) == 1: 717 | ax.get_legend().remove() 718 | 719 | try: 720 | _plt.subplots_adjust(hspace=0, bottom=0, top=1) 721 | except Exception: 722 | pass 723 | 724 | try: 725 | fig.tight_layout() 726 | except Exception: 727 | pass 728 | 729 | if savefig: 730 | if isinstance(savefig, dict): 731 | _plt.savefig(**savefig) 732 | else: 733 | _plt.savefig(savefig) 734 | if show: 735 | _plt.show(block=False) 736 | 737 | _plt.close() 738 | 739 | if not show: 740 | return fig 741 | 742 | return None 743 | 744 | 745 | def plot_rolling_beta( 746 | returns, 747 | benchmark, 748 | window1=126, 749 | window1_label="", 750 | window2=None, 751 | window2_label="", 752 | title="", 753 | hlcolor="red", 754 | figsize=(10, 6), 755 | grayscale=False, 756 | fontname="Arial", 757 | lw=1.5, 758 | ylabel=True, 759 | subtitle=True, 760 | savefig=None, 761 | show=True, 762 | ): 763 | colors, _, _ = _get_colors(grayscale) 764 | 765 | fig, ax = _plt.subplots(figsize=figsize) 766 | ax.spines["top"].set_visible(False) 767 | ax.spines["right"].set_visible(False) 768 | ax.spines["bottom"].set_visible(False) 769 | ax.spines["left"].set_visible(False) 770 | 771 | fig.suptitle( 772 | title, y=0.94, fontweight="bold", fontname=fontname, fontsize=14, color="black" 773 | ) 774 | 775 | if subtitle: 776 | ax.set_title( 777 | "%s - %s \n" 778 | % ( 779 | returns.index.date[:1][0].strftime("%e %b '%y"), 780 | returns.index.date[-1:][0].strftime("%e %b '%y"), 781 | ), 782 | fontsize=12, 783 | color="gray", 784 | ) 785 | 786 | i = 1 787 | if isinstance(returns, _pd.Series): 788 | beta = _stats.rolling_greeks(returns, benchmark, window1)["beta"].fillna(0) 789 | ax.plot(beta, lw=lw, label=window1_label, color=colors[1]) 790 | elif isinstance(returns, _pd.DataFrame): 791 | beta = { 792 | col: _stats.rolling_greeks(returns[col], benchmark, window1)["beta"].fillna( 793 | 0 794 | ) 795 | for col in returns.columns 796 | } 797 | for name, b in beta.items(): 798 | ax.plot(b, lw=lw, label=name + " " + f"({window1_label})", color=colors[i]) 799 | i += 1 800 | 801 | i = 1 802 | if window2: 803 | lw = lw - 0.5 804 | if isinstance(returns, _pd.Series): 805 | ax.plot( 806 | _stats.rolling_greeks(returns, benchmark, window2)["beta"], 807 | lw=lw, 808 | label=window2_label, 809 | color="gray", 810 | alpha=0.8, 811 | ) 812 | elif isinstance(returns, _pd.DataFrame): 813 | betas_w2 = { 814 | col: _stats.rolling_greeks(returns[col], benchmark, window2)["beta"] 815 | for col in returns.columns 816 | } 817 | for name, beta_w2 in betas_w2.items(): 818 | ax.plot( 819 | beta_w2, 820 | lw=lw, 821 | ls="--", 822 | label=name + " " + f"({window2_label})", 823 | alpha=0.5, 824 | color=colors[i], 825 | ) 826 | i += 1 827 | 828 | beta_min = ( 829 | beta.min() 830 | if isinstance(returns, _pd.Series) 831 | else min([b.min() for b in beta.values()]) 832 | ) 833 | beta_max = ( 834 | beta.max() 835 | if isinstance(returns, _pd.Series) 836 | else max([b.max() for b in beta.values()]) 837 | ) 838 | mmin = min([-100, int(beta_min * 100)]) 839 | mmax = max([100, int(beta_max * 100)]) 840 | step = 50 if (mmax - mmin) >= 200 else 100 841 | ax.set_yticks([x / 100 for x in list(range(mmin, mmax, step))]) 842 | 843 | if isinstance(returns, _pd.Series): 844 | hlcolor = "black" if grayscale else hlcolor 845 | ax.axhline(beta.mean(), ls="--", lw=1.5, color=hlcolor, zorder=2) 846 | 847 | ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2) 848 | 849 | fig.autofmt_xdate() 850 | 851 | # use a more precise date string for the x axis locations in the toolbar 852 | ax.fmt_xdata = _mdates.DateFormatter("%Y-%m-%d") 853 | 854 | if ylabel: 855 | ax.set_ylabel( 856 | "Beta", fontname=fontname, fontweight="bold", fontsize=12, color="black" 857 | ) 858 | ax.yaxis.set_label_coords(-0.1, 0.5) 859 | 860 | ax.legend(fontsize=11) 861 | if benchmark is None and len(_pd.DataFrame(returns).columns) == 1: 862 | ax.get_legend().remove() 863 | 864 | try: 865 | _plt.subplots_adjust(hspace=0, bottom=0, top=1) 866 | except Exception: 867 | pass 868 | 869 | try: 870 | fig.tight_layout() 871 | except Exception: 872 | pass 873 | 874 | if savefig: 875 | if isinstance(savefig, dict): 876 | _plt.savefig(**savefig) 877 | else: 878 | _plt.savefig(savefig) 879 | 880 | if show: 881 | _plt.show(block=False) 882 | 883 | _plt.close() 884 | 885 | if not show: 886 | return fig 887 | 888 | return None 889 | 890 | 891 | def plot_longest_drawdowns( 892 | returns, 893 | periods=5, 894 | lw=1.5, 895 | fontname="Arial", 896 | grayscale=False, 897 | title=None, 898 | log_scale=False, 899 | figsize=(10, 6), 900 | ylabel=True, 901 | subtitle=True, 902 | compounded=True, 903 | savefig=None, 904 | show=True, 905 | ): 906 | colors = ["#348dc1", "#003366", "red"] 907 | if grayscale: 908 | colors = ["#000000"] * 3 909 | 910 | dd = _stats.to_drawdown_series(returns.fillna(0)) 911 | dddf = _stats.drawdown_details(dd) 912 | longest_dd = dddf.sort_values(by="days", ascending=False, kind="mergesort")[ 913 | :periods 914 | ] 915 | 916 | fig, ax = _plt.subplots(figsize=figsize) 917 | ax.spines["top"].set_visible(False) 918 | ax.spines["right"].set_visible(False) 919 | ax.spines["bottom"].set_visible(False) 920 | ax.spines["left"].set_visible(False) 921 | 922 | fig.suptitle( 923 | f"{title} - Worst %.0f Drawdown Periods" % periods + (" (Log Scaled)" if log_scale else ""), 924 | y=0.94, 925 | fontweight="bold", 926 | fontname=fontname, 927 | fontsize=14, 928 | color="black", 929 | ) 930 | if subtitle: 931 | ax.set_title( 932 | "%s - %s \n" 933 | % ( 934 | returns.index.date[:1][0].strftime("%e %b '%y"), 935 | returns.index.date[-1:][0].strftime("%e %b '%y"), 936 | ), 937 | fontsize=12, 938 | color="gray", 939 | ) 940 | 941 | fig.set_facecolor("white") 942 | ax.set_facecolor("white") 943 | series = _stats.compsum(returns) if compounded else returns.cumsum() 944 | ax.plot(series, lw=lw, label="Backtest", color=colors[0]) 945 | 946 | highlight = "black" if grayscale else "red" 947 | for _, row in longest_dd.iterrows(): 948 | ax.axvspan( 949 | *_mdates.datestr2num([str(row["start"]), str(row["end"])]), 950 | color=highlight, 951 | alpha=0.1, 952 | ) 953 | 954 | # rotate and align the tick labels so they look better 955 | fig.autofmt_xdate() 956 | 957 | # use a more precise date string for the x axis locations in the toolbar 958 | ax.fmt_xdata = _mdates.DateFormatter("%Y-%m-%d") 959 | 960 | ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2) 961 | _plt.yscale("symlog" if log_scale else "linear") 962 | 963 | # Set y-axis limits to avoid blank space at the bottom and top 964 | ax.set_ylim(bottom=series.min(), top=series.max()) 965 | 966 | if ylabel: 967 | ax.set_ylabel( 968 | "Cumulative Returns", 969 | fontname=fontname, 970 | fontweight="bold", 971 | fontsize=12, 972 | color="black", 973 | ) 974 | ax.yaxis.set_label_coords(-0.1, 0.5) 975 | 976 | ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis)) 977 | # ax.yaxis.set_major_formatter(_plt.FuncFormatter( 978 | # lambda x, loc: "{:,}%".format(int(x*100)))) 979 | 980 | fig.autofmt_xdate() 981 | 982 | try: 983 | _plt.subplots_adjust(hspace=0, bottom=0, top=1) 984 | except Exception: 985 | pass 986 | 987 | try: 988 | fig.tight_layout() 989 | except Exception: 990 | pass 991 | 992 | if savefig: 993 | if isinstance(savefig, dict): 994 | _plt.savefig(**savefig) 995 | else: 996 | _plt.savefig(savefig) 997 | 998 | if show: 999 | _plt.show(block=False) 1000 | 1001 | _plt.close() 1002 | 1003 | if not show: 1004 | return fig 1005 | 1006 | return None 1007 | 1008 | 1009 | def plot_distribution( 1010 | returns, 1011 | figsize=(10, 6), 1012 | fontname="Arial", 1013 | grayscale=False, 1014 | ylabel=True, 1015 | subtitle=True, 1016 | compounded=True, 1017 | title=None, 1018 | savefig=None, 1019 | show=True, 1020 | log_scale=False, 1021 | ): 1022 | colors = _FLATUI_COLORS 1023 | if grayscale: 1024 | colors = ["#f9f9f9", "#dddddd", "#bbbbbb", "#999999", "#808080"] 1025 | # colors, ls, alpha = _get_colors(grayscale) 1026 | 1027 | port = _pd.DataFrame(returns.fillna(0)) 1028 | port.columns = ["Daily"] 1029 | 1030 | apply_fnc = _stats.comp if compounded else 'sum' 1031 | 1032 | port["Weekly"] = port["Daily"].resample("W-MON").apply(apply_fnc) 1033 | port["Weekly"] = port["Weekly"].ffill() 1034 | 1035 | port["Monthly"] = port["Daily"].resample("ME").apply(apply_fnc) 1036 | port["Monthly"] = port["Monthly"].ffill() 1037 | 1038 | port["Quarterly"] = port["Daily"].resample("QE").apply(apply_fnc) 1039 | port["Quarterly"] = port["Quarterly"].ffill() 1040 | 1041 | port["Yearly"] = port["Daily"].resample("YE").apply(apply_fnc) 1042 | port["Yearly"] = port["Yearly"].ffill() 1043 | 1044 | fig, ax = _plt.subplots(figsize=figsize) 1045 | ax.spines["top"].set_visible(False) 1046 | ax.spines["right"].set_visible(False) 1047 | ax.spines["bottom"].set_visible(False) 1048 | ax.spines["left"].set_visible(False) 1049 | 1050 | if title: 1051 | title = f"{title} - Return Quantiles" 1052 | else: 1053 | title = "Return Quantiles" 1054 | fig.suptitle( 1055 | title, y=0.94, fontweight="bold", fontname=fontname, fontsize=14, color="black" 1056 | ) 1057 | 1058 | if subtitle: 1059 | ax.set_title( 1060 | "%s - %s \n" 1061 | % ( 1062 | returns.index.date[:1][0].strftime("%e %b '%y"), 1063 | returns.index.date[-1:][0].strftime("%e %b '%y"), 1064 | ), 1065 | fontsize=12, 1066 | color="gray", 1067 | ) 1068 | 1069 | fig.set_facecolor("white") 1070 | ax.set_facecolor("white") 1071 | 1072 | _sns.boxplot( 1073 | data=port, 1074 | ax=ax, 1075 | palette={ 1076 | "Daily": colors[0], 1077 | "Weekly": colors[1], 1078 | "Monthly": colors[2], 1079 | "Quarterly": colors[3], 1080 | "Yearly": colors[4], 1081 | }, 1082 | ) 1083 | 1084 | _plt.yscale("symlog" if log_scale else "linear") 1085 | ax.yaxis.set_major_formatter( 1086 | _plt.FuncFormatter(lambda x, loc: "{:,}%".format(int(x * 100))) 1087 | ) 1088 | 1089 | if ylabel: 1090 | ax.set_ylabel( 1091 | "Returns", fontname=fontname, fontweight="bold", fontsize=12, color="black" 1092 | ) 1093 | ax.yaxis.set_label_coords(-0.1, 0.5) 1094 | 1095 | fig.autofmt_xdate() 1096 | 1097 | try: 1098 | _plt.subplots_adjust(hspace=0) 1099 | except Exception: 1100 | pass 1101 | try: 1102 | fig.tight_layout(w_pad=0, h_pad=0) 1103 | except Exception: 1104 | pass 1105 | 1106 | if savefig: 1107 | if isinstance(savefig, dict): 1108 | _plt.savefig(**savefig) 1109 | else: 1110 | _plt.savefig(savefig) 1111 | 1112 | if show: 1113 | _plt.show(block=False) 1114 | 1115 | _plt.close() 1116 | 1117 | if not show: 1118 | return fig 1119 | 1120 | return None 1121 | 1122 | 1123 | def plot_table( 1124 | tbl, 1125 | columns=None, 1126 | title="", 1127 | title_loc="left", 1128 | header=True, 1129 | colWidths=None, 1130 | rowLoc="right", 1131 | colLoc="right", 1132 | colLabels=None, 1133 | edges="horizontal", 1134 | orient="horizontal", 1135 | figsize=(5.5, 6), 1136 | savefig=None, 1137 | show=False, 1138 | ): 1139 | if columns is not None: 1140 | try: 1141 | tbl.columns = columns 1142 | except Exception: 1143 | pass 1144 | 1145 | fig = _plt.figure(figsize=figsize) 1146 | ax = _plt.subplot(111, frame_on=False) 1147 | 1148 | if title != "": 1149 | ax.set_title( 1150 | title, fontweight="bold", fontsize=14, color="black", loc=title_loc 1151 | ) 1152 | 1153 | the_table = ax.table( 1154 | cellText=tbl.values, 1155 | colWidths=colWidths, 1156 | rowLoc=rowLoc, 1157 | colLoc=colLoc, 1158 | edges=edges, 1159 | colLabels=(tbl.columns if header else colLabels), 1160 | loc="center", 1161 | zorder=2, 1162 | ) 1163 | 1164 | the_table.auto_set_font_size(False) 1165 | the_table.set_fontsize(12) 1166 | the_table.scale(1, 1) 1167 | 1168 | for (row, col), cell in the_table.get_celld().items(): 1169 | cell.set_height(0.08) 1170 | cell.set_text_props(color="black") 1171 | cell.set_edgecolor("#dddddd") 1172 | if row == 0 and header: 1173 | cell.set_edgecolor("black") 1174 | cell.set_facecolor("black") 1175 | cell.set_linewidth(2) 1176 | cell.set_text_props(weight="bold", color="black") 1177 | elif col == 0 and "vertical" in orient: 1178 | cell.set_edgecolor("#dddddd") 1179 | cell.set_linewidth(1) 1180 | cell.set_text_props(weight="bold", color="black") 1181 | elif row > 1: 1182 | cell.set_linewidth(1) 1183 | 1184 | ax.grid(False) 1185 | ax.set_xticks([]) 1186 | ax.set_yticks([]) 1187 | 1188 | try: 1189 | _plt.subplots_adjust(hspace=0) 1190 | except Exception: 1191 | pass 1192 | try: 1193 | fig.tight_layout(w_pad=0, h_pad=0) 1194 | except Exception: 1195 | pass 1196 | 1197 | if savefig: 1198 | if isinstance(savefig, dict): 1199 | _plt.savefig(**savefig) 1200 | else: 1201 | _plt.savefig(savefig) 1202 | 1203 | if show: 1204 | _plt.show(block=False) 1205 | 1206 | _plt.close() 1207 | 1208 | if not show: 1209 | return fig 1210 | 1211 | return None 1212 | 1213 | 1214 | def monthly_heatmap_detailedview( 1215 | returns, 1216 | grayscale=False, 1217 | figsize=(14, 6), 1218 | annot_size=11, 1219 | returns_label="Strategy", 1220 | fontname="Arial", 1221 | monthly_dd_font_rate=0.8, 1222 | annual_dd_font_rate=0.8, 1223 | return_font_rate=1.0, 1224 | savefig=None, 1225 | show=True, 1226 | ): 1227 | daily_returns = returns.pct_change(fill_method=None).fillna(0) 1228 | monthly_returns = daily_returns.resample('ME').apply(lambda x: (x + 1).prod() - 1) * 100 1229 | monthly_drawdowns = calculate_monthly_drawdowns(returns) * 100 1230 | 1231 | monthly_combined = _pd.DataFrame({ 1232 | "Returns": monthly_returns, 1233 | "Drawdowns": monthly_drawdowns 1234 | }) 1235 | 1236 | monthly_combined["Year"] = monthly_combined.index.year 1237 | monthly_combined["Month"] = monthly_combined.index.month 1238 | 1239 | pivot_returns = monthly_combined.pivot(index="Year", columns="Month", values="Returns") 1240 | pivot_drawdowns = monthly_combined.pivot(index="Year", columns="Month", values="Drawdowns") 1241 | 1242 | cmap = "gray" if grayscale else "RdYlGn" 1243 | 1244 | fig, ax = _plt.subplots(figsize=figsize) 1245 | ax.set_facecolor("white") 1246 | fig.set_facecolor("white") 1247 | 1248 | annot_returns = pivot_returns.map(lambda x: f"{x:.2f}" if _pd.notnull(x) else "") 1249 | annot_drawdowns = pivot_drawdowns.map(lambda x: f"{x:.2f}" if _pd.notnull(x) else "") 1250 | mask = pivot_returns.isnull() 1251 | 1252 | return_font_size = annot_size * return_font_rate 1253 | 1254 | _sns.heatmap( 1255 | pivot_returns, 1256 | annot=annot_returns, 1257 | center=0, 1258 | annot_kws={"size": return_font_size, "ha": 'center', "va": 'bottom'}, 1259 | fmt="s", 1260 | linewidths=0.5, 1261 | cmap=cmap, 1262 | cbar_kws={"format": "%.0f%%"}, 1263 | ax=ax, 1264 | mask=mask 1265 | ) 1266 | 1267 | common_index = pivot_returns.index.intersection(annot_drawdowns.index) 1268 | pivot_returns = pivot_returns.loc[common_index] 1269 | annot_drawdowns = annot_drawdowns.loc[common_index] 1270 | 1271 | cell_index = 1 1272 | 1273 | for i in range(pivot_returns.shape[0]): 1274 | for j in range(pivot_returns.shape[1]): 1275 | if _pd.notnull(pivot_returns.iloc[i, j]): 1276 | cell = ax.get_children()[cell_index] 1277 | return_color = cell.get_color() 1278 | 1279 | monthly_dd_color = 'white' if return_color == 'w' else 'dimgray' 1280 | ax.text(j + 0.5, i + 0.55, annot_drawdowns.iloc[i, j], 1281 | ha='center', va='top', fontsize=return_font_size * monthly_dd_font_rate, 1282 | color=monthly_dd_color) 1283 | 1284 | cell_index += 1 1285 | else: 1286 | continue 1287 | 1288 | annual_returns = (pivot_returns / 100 + 1).prod(axis=1).sub(1).mul(100) 1289 | annually_grouped = daily_returns.groupby(daily_returns.index.year) 1290 | annual_dd = annually_grouped.apply(_stats.max_drawdown) * 100 1291 | 1292 | # Generate ytick_labels 1293 | ytick_labels = [f"{year}\n{annual_returns[year]:.2f}" for year in pivot_returns.index] 1294 | 1295 | # Remove existing y-axis labels 1296 | ax.set_yticks([]) 1297 | 1298 | # Add new y-axis labels 1299 | for idx, label in enumerate(ytick_labels): 1300 | ax.text(-0.1, idx + 0.45, label, 1301 | verticalalignment='center', 1302 | horizontalalignment='right', 1303 | fontsize=annot_size * 1.0, # Maybe, 1.0 can be new argument like ytick_font_rate 1304 | transform=ax.transData) 1305 | 1306 | # Add Drawdown 1307 | ax.text(-0.1, idx + 0.75, f"{annual_dd[pivot_returns.index[idx]]:.2f}", 1308 | verticalalignment='center', 1309 | horizontalalignment='right', 1310 | fontsize=annot_size * annual_dd_font_rate, # Set Drawdown font size slightly smaller 1311 | transform=ax.transData, 1312 | color='dimgray') 1313 | 1314 | # Add YTD label 1315 | ax.text(-0.1, len(pivot_returns.index) * 1.02, 'YTD', fontsize=annot_size, 1316 | verticalalignment='center', horizontalalignment='right', 1317 | transform=ax.transData) 1318 | 1319 | ax.set_title( 1320 | f"{returns_label} - Monthly Returns & Drawdowns (%)", 1321 | fontsize=14, 1322 | fontname=fontname, 1323 | fontweight="bold" 1324 | ) 1325 | 1326 | month_abbr = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'] 1327 | _plt.xticks(ticks=_np.arange(0.5, 12.5, 1), labels=month_abbr, rotation=0, fontsize=annot_size) 1328 | 1329 | ax.tick_params(colors="#808080") 1330 | _plt.xticks(rotation=0, fontsize=annot_size * 1.2) 1331 | _plt.yticks(rotation=0, fontsize=annot_size * 1.2) 1332 | 1333 | ax.set_xlabel('') 1334 | ax.set_ylabel('') 1335 | 1336 | _plt.tight_layout(pad=1) 1337 | _plt.subplots_adjust(right=1.05) 1338 | _plt.grid(False) 1339 | 1340 | if savefig: 1341 | if isinstance(savefig, dict): 1342 | _plt.savefig(**savefig) 1343 | else: 1344 | _plt.savefig(savefig) 1345 | 1346 | if show: 1347 | _plt.show(block=False) 1348 | 1349 | _plt.close() 1350 | 1351 | if not show: 1352 | return fig 1353 | 1354 | return None 1355 | 1356 | 1357 | def calculate_monthly_drawdowns(returns): 1358 | drawdowns = [] 1359 | monthly_last_date = returns.resample('ME').apply(lambda x: x.index[-1]).index.tolist() 1360 | monthly_last_trading_date = returns.resample('ME').apply(lambda x: x.index[-1]).tolist() 1361 | monthly_last_trading_date.insert(0, returns.index[0]) 1362 | 1363 | for index, end_date in enumerate(monthly_last_trading_date): 1364 | if index == 0: 1365 | continue 1366 | 1367 | last_month_end_date = monthly_last_trading_date[index - 1] 1368 | current_month_returns = returns.loc[last_month_end_date:end_date] 1369 | 1370 | current_dd = _stats.max_drawdown(current_month_returns) 1371 | drawdowns.append(current_dd) 1372 | 1373 | return _pd.Series(drawdowns, index=monthly_last_date) 1374 | 1375 | 1376 | def format_cur_axis(x, _): 1377 | if x >= 1e12: 1378 | res = "$%1.1fT" % (x * 1e-12) 1379 | return res.replace(".0T", "T") 1380 | if x >= 1e9: 1381 | res = "$%1.1fB" % (x * 1e-9) 1382 | return res.replace(".0B", "B") 1383 | if x >= 1e6: 1384 | res = "$%1.1fM" % (x * 1e-6) 1385 | return res.replace(".0M", "ME") 1386 | if x >= 1e3: 1387 | res = "$%1.0fK" % (x * 1e-3) 1388 | return res.replace(".0K", "K") 1389 | res = "$%1.0f" % x 1390 | return res.replace(".0", "") 1391 | 1392 | 1393 | def format_pct_axis(x, _): 1394 | x *= 100 # lambda x, loc: "{:,}%".format(int(x * 100)) 1395 | if x >= 1e12: 1396 | res = "%1.1fT%%" % (x * 1e-12) 1397 | return res.replace(".0T%", "T%") 1398 | if x >= 1e9: 1399 | res = "%1.1fB%%" % (x * 1e-9) 1400 | return res.replace(".0B%", "B%") 1401 | if x >= 1e6: 1402 | res = "%1.1fM%%" % (x * 1e-6) 1403 | return res.replace(".0M%", "M%") 1404 | if x >= 1e3: 1405 | res = "%1.1fK%%" % (x * 1e-3) 1406 | return res.replace(".0K%", "K%") 1407 | res = "%1.0f%%" % x 1408 | return res.replace(".0%", "%") 1409 | -------------------------------------------------------------------------------- /quantstats_lumi/_plotting/wrappers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | # 4 | # Quantreturns: Portfolio analytics for quants 5 | # https://github.com/ranaroussi/quantreturns 6 | # 7 | # Copyright 2019-2023 Ran Aroussi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | import warnings 22 | 23 | import matplotlib.pyplot as _plt 24 | import numpy as _np 25 | import pandas as _pd 26 | import seaborn as _sns 27 | from matplotlib.ticker import FuncFormatter as _FuncFormatter 28 | from matplotlib.ticker import StrMethodFormatter as _StrMethodFormatter 29 | 30 | from .. import stats as _stats 31 | from .. import utils as _utils 32 | from . import core as _core 33 | 34 | _FLATUI_COLORS = ["#fedd78", "#348dc1", "#af4b64", "#4fa487", "#9b59b6", "#808080"] 35 | _GRAYSCALE_COLORS = (len(_FLATUI_COLORS) * ["black"]) + ["white"] 36 | 37 | _HAS_PLOTLY = False 38 | try: 39 | import plotly 40 | 41 | _HAS_PLOTLY = True 42 | except ImportError: 43 | pass 44 | 45 | 46 | def to_plotly(fig): 47 | if not _HAS_PLOTLY: 48 | return fig 49 | with warnings.catch_warnings(): 50 | warnings.simplefilter("ignore") 51 | fig = plotly.tools.mpl_to_plotly(fig) 52 | return plotly.plotly.iplot(fig, filename="quantstats-plot", overwrite=True) 53 | 54 | 55 | def snapshot( 56 | returns, 57 | grayscale=False, 58 | figsize=(10, 8), 59 | title="Portfolio Summary", 60 | fontname="Arial", 61 | lw=1.5, 62 | mode="comp", 63 | subtitle=True, 64 | savefig=None, 65 | show=True, 66 | log_scale=False, 67 | **kwargs, 68 | ): 69 | strategy_colname = kwargs.get("strategy_col", "Strategy") 70 | 71 | multi_column = False 72 | if isinstance(returns, _pd.Series): 73 | returns.name = strategy_colname 74 | elif isinstance(returns, _pd.DataFrame): 75 | if len(returns.columns) > 1: 76 | if strategy_colname in returns.columns: 77 | returns = returns[strategy_colname] 78 | else: 79 | multi_column = True 80 | returns = returns.mean(axis=1) 81 | title = title + " (daily equal-weighted*)" 82 | returns.columns = strategy_colname 83 | 84 | colors = _GRAYSCALE_COLORS if grayscale else _FLATUI_COLORS 85 | returns = _utils.make_portfolio(returns.dropna(), 1, mode).pct_change().fillna(0) 86 | 87 | if figsize is None: 88 | size = list(_plt.gcf().get_size_inches()) 89 | figsize = (size[0], size[0] * 0.75) 90 | 91 | fig, axes = _plt.subplots( 92 | 3, 1, sharex=True, figsize=figsize, gridspec_kw={"height_ratios": [3, 1, 1]} 93 | ) 94 | 95 | if multi_column: 96 | _plt.figtext( 97 | 0, 98 | -0.05, 99 | " * When a multi-column DataFrame is passed, the mean of all columns will be used as returns.\n" 100 | " To change this behavior, use a pandas Series or pass the column name in the `strategy_col` parameter.", 101 | ha="left", 102 | fontsize=11, 103 | color="black", 104 | alpha=0.6, 105 | linespacing=1.5, 106 | ) 107 | 108 | for ax in axes: 109 | ax.spines["top"].set_visible(False) 110 | ax.spines["right"].set_visible(False) 111 | ax.spines["bottom"].set_visible(False) 112 | ax.spines["left"].set_visible(False) 113 | 114 | fig.suptitle( 115 | title, fontsize=14, y=0.97, fontname=fontname, fontweight="bold", color="black" 116 | ) 117 | 118 | fig.set_facecolor("white") 119 | 120 | if subtitle: 121 | if isinstance(returns, _pd.Series): 122 | axes[0].set_title( 123 | "%s - %s ; Sharpe: %.2f \n" 124 | % ( 125 | returns.index.date[:1][0].strftime("%e %b '%y"), 126 | returns.index.date[-1:][0].strftime("%e %b '%y"), 127 | _stats.sharpe(returns), 128 | ), 129 | fontsize=12, 130 | color="gray", 131 | ) 132 | elif isinstance(returns, _pd.DataFrame): 133 | axes[0].set_title( 134 | "\n%s - %s ; " 135 | % ( 136 | returns.index.date[:1][0].strftime("%e %b '%y"), 137 | returns.index.date[-1:][0].strftime("%e %b '%y"), 138 | ), 139 | fontsize=12, 140 | color="gray", 141 | ) 142 | 143 | axes[0].set_ylabel( 144 | "Cumulative Return", fontname=fontname, fontweight="bold", fontsize=12 145 | ) 146 | if isinstance(returns, _pd.Series): 147 | axes[0].plot( 148 | _stats.compsum(returns) * 100, 149 | color=colors[1], 150 | lw=1 if grayscale else lw, 151 | zorder=1, 152 | ) 153 | elif isinstance(returns, _pd.DataFrame): 154 | for col in returns.columns: 155 | axes[0].plot( 156 | _stats.compsum(returns[col]) * 100, 157 | label=col, 158 | lw=1 if grayscale else lw, 159 | zorder=1, 160 | ) 161 | axes[0].axhline(0, color="silver", lw=1, zorder=0) 162 | 163 | axes[0].set_yscale("symlog" if log_scale else "linear") 164 | # axes[0].legend(fontsize=12) 165 | 166 | dd = _stats.to_drawdown_series(returns) * 100 167 | ddmin = _utils._round_to_closest(abs(dd.min()), 5) 168 | ddmin_ticks = 5 169 | if ddmin > 50: 170 | ddmin_ticks = ddmin / 4 171 | elif ddmin > 20: 172 | ddmin_ticks = ddmin / 3 173 | ddmin_ticks = int(_utils._round_to_closest(ddmin_ticks, 5)) 174 | 175 | # ddmin_ticks = int(_utils._round_to_closest(ddmin, 5)) 176 | axes[1].set_ylabel("Drawdown", fontname=fontname, fontweight="bold", fontsize=12) 177 | axes[1].set_yticks(_np.arange(-ddmin, 0, step=ddmin_ticks)) 178 | if isinstance(dd, _pd.Series): 179 | axes[1].plot(dd, color=colors[2], lw=1 if grayscale else lw, zorder=1) 180 | elif isinstance(dd, _pd.DataFrame): 181 | for col in dd.columns: 182 | axes[1].plot(dd[col], label=col, lw=1 if grayscale else lw, zorder=1) 183 | axes[1].axhline(0, color="silver", lw=1, zorder=0) 184 | if not grayscale: 185 | if isinstance(dd, _pd.Series): 186 | axes[1].fill_between(dd.index, 0, dd, color=colors[2], alpha=0.25) 187 | elif isinstance(dd, _pd.DataFrame): 188 | for i, col in enumerate(dd.columns): 189 | axes[1].fill_between( 190 | dd[col].index, 0, dd[col], color=colors[i + 1], alpha=0.25 191 | ) 192 | 193 | axes[1].set_yscale("symlog" if log_scale else "linear") 194 | # axes[1].legend(fontsize=12) 195 | 196 | axes[2].set_ylabel( 197 | "Daily Return", fontname=fontname, fontweight="bold", fontsize=12 198 | ) 199 | if isinstance(returns, _pd.Series): 200 | axes[2].plot( 201 | returns * 100, color=colors[0], label=returns.name, lw=0.5, zorder=1 202 | ) 203 | elif isinstance(returns, _pd.DataFrame): 204 | for i, col in enumerate(returns.columns): 205 | axes[2].plot( 206 | returns[col] * 100, color=colors[i], label=col, lw=0.5, zorder=1 207 | ) 208 | axes[2].axhline(0, color="silver", lw=1, zorder=0) 209 | axes[2].axhline(0, color=colors[-1], linestyle="--", lw=1, zorder=2) 210 | 211 | axes[2].set_yscale("symlog" if log_scale else "linear") 212 | # axes[2].legend(fontsize=12) 213 | 214 | retmax = _utils._round_to_closest(returns.max() * 100, 5) 215 | retmin = _utils._round_to_closest(returns.min() * 100, 5) 216 | retdiff = retmax - retmin 217 | steps = 5 218 | if retdiff > 50: 219 | steps = retdiff / 5 220 | elif retdiff > 30: 221 | steps = retdiff / 4 222 | steps = _utils._round_to_closest(steps, 5) 223 | axes[2].set_yticks(_np.arange(retmin, retmax, step=steps)) 224 | 225 | for ax in axes: 226 | ax.set_facecolor("white") 227 | ax.yaxis.set_label_coords(-0.1, 0.5) 228 | ax.yaxis.set_major_formatter(_StrMethodFormatter("{x:,.0f}%")) 229 | 230 | _plt.subplots_adjust(hspace=0, bottom=0, top=1) 231 | fig.autofmt_xdate() 232 | 233 | try: 234 | _plt.subplots_adjust(hspace=0) 235 | except Exception: 236 | pass 237 | try: 238 | fig.tight_layout(w_pad=0, h_pad=0) 239 | except Exception: 240 | pass 241 | 242 | if savefig: 243 | if isinstance(savefig, dict): 244 | _plt.savefig(**savefig) 245 | else: 246 | _plt.savefig(savefig) 247 | 248 | if show: 249 | _plt.show(block=False) 250 | 251 | _plt.close() 252 | 253 | if not show: 254 | return fig 255 | 256 | return None 257 | 258 | 259 | def earnings( 260 | returns, 261 | start_balance=1e5, 262 | mode="comp", 263 | grayscale=False, 264 | figsize=(10, 6), 265 | title="Portfolio Earnings", 266 | fontname="Arial", 267 | lw=1.5, 268 | subtitle=True, 269 | savefig=None, 270 | show=True, 271 | ): 272 | colors = _GRAYSCALE_COLORS if grayscale else _FLATUI_COLORS 273 | alpha = 0.5 if grayscale else 0.8 274 | 275 | returns = _utils.make_portfolio(returns, start_balance, mode) 276 | 277 | if figsize is None: 278 | size = list(_plt.gcf().get_size_inches()) 279 | figsize = (size[0], size[0] * 0.55) 280 | 281 | fig, ax = _plt.subplots(figsize=figsize) 282 | ax.spines["top"].set_visible(False) 283 | ax.spines["right"].set_visible(False) 284 | ax.spines["bottom"].set_visible(False) 285 | ax.spines["left"].set_visible(False) 286 | 287 | fig.suptitle( 288 | title, fontsize=14, y=0.995, fontname=fontname, fontweight="bold", color="black" 289 | ) 290 | 291 | if subtitle: 292 | ax.set_title( 293 | "\n%s - %s ; P&L: %s (%s) " 294 | % ( 295 | returns.index.date[1:2][0].strftime("%e %b '%y"), 296 | returns.index.date[-1:][0].strftime("%e %b '%y"), 297 | _utils._score_str( 298 | "${:,}".format(round(returns.values[-1] - returns.values[0], 2)) 299 | ), 300 | _utils._score_str( 301 | "{:,}%".format( 302 | round((returns.values[-1] / returns.values[0] - 1) * 100, 2) 303 | ) 304 | ), 305 | ), 306 | fontsize=12, 307 | color="gray", 308 | ) 309 | 310 | mx = returns.max() 311 | returns_max = returns[returns == mx] 312 | ix = returns_max[~_np.isnan(returns_max)].index[0] 313 | returns_max = _np.where(returns.index == ix, mx, _np.nan) 314 | 315 | ax.plot( 316 | returns.index, 317 | returns_max, 318 | marker="o", 319 | lw=0, 320 | alpha=alpha, 321 | markersize=12, 322 | color=colors[0], 323 | ) 324 | ax.plot(returns.index, returns, color=colors[1], lw=1 if grayscale else lw) 325 | 326 | ax.set_ylabel( 327 | "Value of ${:,.0f}".format(start_balance), 328 | fontname=fontname, 329 | fontweight="bold", 330 | fontsize=12, 331 | ) 332 | 333 | ax.yaxis.set_major_formatter(_FuncFormatter(_core.format_cur_axis)) 334 | ax.yaxis.set_label_coords(-0.1, 0.5) 335 | 336 | fig.set_facecolor("white") 337 | ax.set_facecolor("white") 338 | fig.autofmt_xdate() 339 | 340 | try: 341 | _plt.subplots_adjust(hspace=0) 342 | except Exception: 343 | pass 344 | try: 345 | fig.tight_layout(w_pad=0, h_pad=0) 346 | except Exception: 347 | pass 348 | 349 | if savefig: 350 | if isinstance(savefig, dict): 351 | _plt.savefig(**savefig) 352 | else: 353 | _plt.savefig(savefig) 354 | 355 | if show: 356 | _plt.show(block=False) 357 | 358 | _plt.close() 359 | 360 | if not show: 361 | return fig 362 | 363 | return None 364 | 365 | 366 | def returns( 367 | returns, 368 | benchmark=None, 369 | grayscale=False, 370 | figsize=(10, 6), 371 | fontname="Arial", 372 | lw=1.5, 373 | match_volatility=False, 374 | compound=True, 375 | cumulative=True, 376 | resample=None, 377 | ylabel="Cumulative Returns", 378 | subtitle=True, 379 | savefig=None, 380 | show=True, 381 | prepare_returns=True, 382 | ): 383 | title = "Cumulative Returns" if compound else "Returns" 384 | if benchmark is not None: 385 | if isinstance(benchmark, str): 386 | title += " vs %s" % benchmark.upper() 387 | else: 388 | title += " vs Benchmark" 389 | if match_volatility: 390 | title += " (Volatility Matched)" 391 | 392 | benchmark = _utils._prepare_benchmark(benchmark, returns.index) 393 | 394 | if prepare_returns: 395 | returns = _utils._prepare_returns(returns) 396 | 397 | fig = _core.plot_timeseries( 398 | returns, 399 | benchmark, 400 | title, 401 | ylabel=ylabel, 402 | match_volatility=match_volatility, 403 | log_scale=False, 404 | resample=resample, 405 | compound=compound, 406 | cumulative=cumulative, 407 | lw=lw, 408 | figsize=figsize, 409 | fontname=fontname, 410 | grayscale=grayscale, 411 | subtitle=subtitle, 412 | savefig=savefig, 413 | show=show, 414 | ) 415 | if not show: 416 | return fig 417 | 418 | 419 | def log_returns( 420 | returns, 421 | benchmark=None, 422 | grayscale=False, 423 | figsize=(10, 5), 424 | fontname="Arial", 425 | lw=1.5, 426 | match_volatility=False, 427 | compound=True, 428 | cumulative=True, 429 | resample=None, 430 | ylabel="Cumulative Returns", 431 | subtitle=True, 432 | savefig=None, 433 | show=True, 434 | prepare_returns=True, 435 | ): 436 | title = "Cumulative Returns" if compound else "Returns" 437 | if benchmark is not None: 438 | if isinstance(benchmark, str): 439 | title += " vs %s (Log Scaled" % benchmark.upper() 440 | else: 441 | title += " vs Benchmark (Log Scaled" 442 | if match_volatility: 443 | title += ", Volatility Matched" 444 | else: 445 | title += " (Log Scaled" 446 | title += ")" 447 | 448 | if prepare_returns: 449 | returns = _utils._prepare_returns(returns) 450 | 451 | benchmark = _utils._prepare_benchmark(benchmark, returns.index) 452 | 453 | fig = _core.plot_timeseries( 454 | returns, 455 | benchmark, 456 | title, 457 | ylabel=ylabel, 458 | match_volatility=match_volatility, 459 | log_scale=True, 460 | resample=resample, 461 | compound=compound, 462 | cumulative=cumulative, 463 | lw=lw, 464 | figsize=figsize, 465 | fontname=fontname, 466 | grayscale=grayscale, 467 | subtitle=subtitle, 468 | savefig=savefig, 469 | show=show, 470 | ) 471 | if not show: 472 | return fig 473 | 474 | 475 | def daily_returns( 476 | returns, 477 | benchmark, 478 | grayscale=False, 479 | figsize=(10, 4), 480 | fontname="Arial", 481 | lw=0.5, 482 | log_scale=False, 483 | ylabel="Returns", 484 | subtitle=True, 485 | savefig=None, 486 | show=True, 487 | prepare_returns=True, 488 | active=False, 489 | ): 490 | if prepare_returns: 491 | returns = _utils._prepare_returns(returns) 492 | if active and benchmark is not None: 493 | benchmark = _utils._prepare_returns(benchmark) 494 | returns = returns - benchmark 495 | 496 | plot_title = "Daily Active Returns" if active else "Daily Returns" 497 | 498 | fig = _core.plot_timeseries( 499 | returns, 500 | None, 501 | plot_title, 502 | ylabel=ylabel, 503 | match_volatility=False, 504 | log_scale=log_scale, 505 | resample="D", 506 | compound=False, 507 | lw=lw, 508 | figsize=figsize, 509 | fontname=fontname, 510 | grayscale=grayscale, 511 | subtitle=subtitle, 512 | savefig=savefig, 513 | show=show, 514 | ) 515 | if not show: 516 | return fig 517 | 518 | 519 | def yearly_returns( 520 | returns, 521 | benchmark=None, 522 | fontname="Arial", 523 | grayscale=False, 524 | hlw=1.5, 525 | hlcolor="red", 526 | hllabel="", 527 | match_volatility=False, 528 | log_scale=False, 529 | figsize=(10, 5), 530 | ylabel=True, 531 | subtitle=True, 532 | compounded=True, 533 | savefig=None, 534 | show=True, 535 | prepare_returns=True, 536 | ): 537 | title = "EOY Returns" 538 | if benchmark is not None: 539 | title += " vs Benchmark" 540 | benchmark = ( 541 | _utils._prepare_benchmark(benchmark, returns.index) 542 | .resample("YE") 543 | .apply(_stats.comp) 544 | .resample("YE") 545 | .last() 546 | ) 547 | 548 | if prepare_returns: 549 | returns = _utils._prepare_returns(returns) 550 | 551 | if compounded: 552 | returns = returns.resample("YE").apply(_stats.comp) 553 | else: 554 | returns = returns.resample("YE").sum() 555 | returns = returns.resample("YE").last() 556 | 557 | fig = _core.plot_returns_bars( 558 | returns, 559 | benchmark, 560 | fontname=fontname, 561 | hline=returns.mean(), 562 | hlw=hlw, 563 | hllabel=hllabel, 564 | hlcolor=hlcolor, 565 | match_volatility=match_volatility, 566 | log_scale=log_scale, 567 | resample=None, 568 | title=title, 569 | figsize=figsize, 570 | grayscale=grayscale, 571 | ylabel=ylabel, 572 | subtitle=subtitle, 573 | savefig=savefig, 574 | show=show, 575 | ) 576 | if not show: 577 | return fig 578 | 579 | 580 | def distribution( 581 | returns, 582 | fontname="Arial", 583 | grayscale=False, 584 | ylabel=True, 585 | figsize=(10, 6), 586 | subtitle=True, 587 | compounded=True, 588 | savefig=None, 589 | show=True, 590 | title=None, 591 | prepare_returns=True, 592 | log_scale=True, 593 | ): 594 | if prepare_returns: 595 | returns = _utils._prepare_returns(returns) 596 | 597 | fig = _core.plot_distribution( 598 | returns, 599 | fontname=fontname, 600 | grayscale=grayscale, 601 | figsize=figsize, 602 | ylabel=ylabel, 603 | subtitle=subtitle, 604 | title=title, 605 | compounded=compounded, 606 | savefig=savefig, 607 | show=show, 608 | log_scale=log_scale, 609 | ) 610 | if not show: 611 | return fig 612 | 613 | 614 | def histogram( 615 | returns, 616 | benchmark=None, 617 | resample="ME", 618 | fontname="Arial", 619 | grayscale=False, 620 | figsize=(10, 5), 621 | ylabel=True, 622 | subtitle=True, 623 | compounded=True, 624 | savefig=None, 625 | show=True, 626 | prepare_returns=True, 627 | ): 628 | if prepare_returns: 629 | returns = _utils._prepare_returns(returns) 630 | if benchmark is not None: 631 | benchmark = _utils._prepare_returns(benchmark) 632 | 633 | if resample == "W": 634 | title = "Weekly " 635 | elif resample == "ME": 636 | title = "Monthly " 637 | elif resample == "Q": 638 | title = "Quarterly " 639 | elif resample == "YE": 640 | title = "Annual " 641 | else: 642 | title = "" 643 | 644 | return _core.plot_histogram( 645 | returns, 646 | benchmark, 647 | resample=resample, 648 | grayscale=grayscale, 649 | fontname=fontname, 650 | title="Distribution of %sReturns" % title, 651 | figsize=figsize, 652 | ylabel=ylabel, 653 | subtitle=subtitle, 654 | compounded=compounded, 655 | savefig=savefig, 656 | show=show, 657 | ) 658 | 659 | 660 | def drawdown( 661 | returns, 662 | grayscale=False, 663 | figsize=(10, 5), 664 | fontname="Arial", 665 | lw=1, 666 | log_scale=False, 667 | match_volatility=False, 668 | compound=False, 669 | ylabel="Drawdown", 670 | resample=None, 671 | subtitle=True, 672 | savefig=None, 673 | show=True, 674 | ): 675 | dd = _stats.to_drawdown_series(returns) 676 | 677 | fig = _core.plot_timeseries( 678 | dd, 679 | title="Underwater Plot", 680 | hline=dd.mean(), 681 | hlw=2, 682 | hllabel="Average", 683 | returns_label="Drawdown", 684 | compound=compound, 685 | match_volatility=match_volatility, 686 | log_scale=log_scale, 687 | resample=resample, 688 | fill=True, 689 | lw=lw, 690 | figsize=figsize, 691 | ylabel=ylabel, 692 | fontname=fontname, 693 | grayscale=grayscale, 694 | subtitle=subtitle, 695 | savefig=savefig, 696 | show=show, 697 | ) 698 | if not show: 699 | return fig 700 | 701 | 702 | def drawdowns_periods( 703 | returns, 704 | periods=5, 705 | lw=1.5, 706 | log_scale=False, 707 | fontname="Arial", 708 | grayscale=False, 709 | title=None, 710 | figsize=(10, 5), 711 | ylabel=True, 712 | subtitle=True, 713 | compounded=True, 714 | savefig=None, 715 | show=True, 716 | prepare_returns=True, 717 | ): 718 | if prepare_returns: 719 | returns = _utils._prepare_returns(returns) 720 | 721 | fig = _core.plot_longest_drawdowns( 722 | returns, 723 | periods=periods, 724 | lw=lw, 725 | log_scale=log_scale, 726 | fontname=fontname, 727 | grayscale=grayscale, 728 | title=title, 729 | figsize=figsize, 730 | ylabel=ylabel, 731 | subtitle=subtitle, 732 | compounded=compounded, 733 | savefig=savefig, 734 | show=show, 735 | ) 736 | if not show: 737 | return fig 738 | 739 | 740 | def rolling_beta( 741 | returns, 742 | benchmark, 743 | window1=126, 744 | window1_label="6-Months", 745 | window2=365, 746 | window2_label="12-Months", 747 | lw=1.5, 748 | fontname="Arial", 749 | grayscale=False, 750 | figsize=(10, 3), 751 | ylabel=True, 752 | subtitle=True, 753 | savefig=None, 754 | show=True, 755 | prepare_returns=True, 756 | ): 757 | if prepare_returns: 758 | returns = _utils._prepare_returns(returns) 759 | 760 | benchmark = _utils._prepare_benchmark(benchmark, returns.index) 761 | 762 | fig = _core.plot_rolling_beta( 763 | returns, 764 | benchmark, 765 | window1=window1, 766 | window1_label=window1_label, 767 | window2=window2, 768 | window2_label=window2_label, 769 | title="Rolling Beta to Benchmark", 770 | fontname=fontname, 771 | grayscale=grayscale, 772 | lw=lw, 773 | figsize=figsize, 774 | ylabel=ylabel, 775 | subtitle=subtitle, 776 | savefig=savefig, 777 | show=show, 778 | ) 779 | if not show: 780 | return fig 781 | 782 | 783 | def rolling_volatility( 784 | returns, 785 | benchmark=None, 786 | period=126, 787 | period_label="6-Months", 788 | periods_per_year=365, 789 | lw=1.5, 790 | fontname="Arial", 791 | grayscale=False, 792 | figsize=(10, 3), 793 | ylabel="Volatility", 794 | subtitle=True, 795 | savefig=None, 796 | show=True, 797 | ): 798 | returns = _stats.rolling_volatility(returns, period, periods_per_year) 799 | 800 | if benchmark is not None: 801 | benchmark = _utils._prepare_benchmark(benchmark, returns.index) 802 | benchmark = _stats.rolling_volatility( 803 | benchmark, period, periods_per_year, prepare_returns=False 804 | ) 805 | 806 | fig = _core.plot_rolling_stats( 807 | returns, 808 | benchmark, 809 | hline=returns.mean(), 810 | hlw=1.5, 811 | ylabel=ylabel, 812 | title="Rolling Volatility (%s)" % period_label, 813 | fontname=fontname, 814 | grayscale=grayscale, 815 | lw=lw, 816 | figsize=figsize, 817 | subtitle=subtitle, 818 | savefig=savefig, 819 | show=show, 820 | ) 821 | if not show: 822 | return fig 823 | 824 | 825 | def rolling_sharpe( 826 | returns, 827 | benchmark=None, 828 | rf=0.0, 829 | period=126, 830 | period_label="6-Months", 831 | periods_per_year=365, 832 | lw=1.25, 833 | fontname="Arial", 834 | grayscale=False, 835 | figsize=(10, 3), 836 | ylabel="Sharpe", 837 | subtitle=True, 838 | savefig=None, 839 | show=True, 840 | ): 841 | returns = _stats.rolling_sharpe( 842 | returns, 843 | rf, 844 | period, 845 | True, 846 | periods_per_year, 847 | ) 848 | 849 | if benchmark is not None: 850 | benchmark = _utils._prepare_benchmark(benchmark, returns.index, rf) 851 | benchmark = _stats.rolling_sharpe( 852 | benchmark, rf, period, True, periods_per_year, prepare_returns=False 853 | ) 854 | 855 | fig = _core.plot_rolling_stats( 856 | returns, 857 | benchmark, 858 | hline=returns.mean(), 859 | hlw=1.5, 860 | ylabel=ylabel, 861 | title="Rolling Sharpe (%s)" % period_label, 862 | fontname=fontname, 863 | grayscale=grayscale, 864 | lw=lw, 865 | figsize=figsize, 866 | subtitle=subtitle, 867 | savefig=savefig, 868 | show=show, 869 | ) 870 | if not show: 871 | return fig 872 | 873 | 874 | def rolling_sortino( 875 | returns, 876 | benchmark=None, 877 | rf=0.0, 878 | period=126, 879 | period_label="6-Months", 880 | periods_per_year=365, 881 | lw=1.25, 882 | fontname="Arial", 883 | grayscale=False, 884 | figsize=(10, 3), 885 | ylabel="Sortino", 886 | subtitle=True, 887 | savefig=None, 888 | show=True, 889 | ): 890 | returns = _stats.rolling_sortino(returns, rf, period, True, periods_per_year) 891 | 892 | if benchmark is not None: 893 | benchmark = _utils._prepare_benchmark(benchmark, returns.index, rf) 894 | benchmark = _stats.rolling_sortino( 895 | benchmark, rf, period, True, periods_per_year, prepare_returns=False 896 | ) 897 | 898 | fig = _core.plot_rolling_stats( 899 | returns, 900 | benchmark, 901 | hline=returns.mean(), 902 | hlw=1.5, 903 | ylabel=ylabel, 904 | title="Rolling Sortino (%s)" % period_label, 905 | fontname=fontname, 906 | grayscale=grayscale, 907 | lw=lw, 908 | figsize=figsize, 909 | subtitle=subtitle, 910 | savefig=savefig, 911 | show=show, 912 | ) 913 | if not show: 914 | return fig 915 | 916 | 917 | def monthly_heatmap( 918 | returns, 919 | benchmark=None, 920 | annot_size=10, 921 | figsize=(10, 5), 922 | cbar=True, 923 | square=False, 924 | returns_label="Strategy", 925 | compounded=True, 926 | eoy=False, 927 | grayscale=False, 928 | fontname="Arial", 929 | ylabel=True, 930 | savefig=None, 931 | show=True, 932 | active=False, 933 | ): 934 | # colors, ls, alpha = _core._get_colors(grayscale) 935 | cmap = "gray" if grayscale else "RdYlGn" 936 | 937 | returns = _stats.monthly_returns(returns, eoy=eoy, compounded=compounded) * 100 938 | 939 | fig_height = len(returns) / 2.5 940 | 941 | if figsize is None: 942 | size = list(_plt.gcf().get_size_inches()) 943 | figsize = (size[0], size[1]) 944 | 945 | figsize = (figsize[0], max([fig_height, figsize[1]])) 946 | 947 | if cbar: 948 | figsize = (figsize[0] * 1.051, max([fig_height, figsize[1]])) 949 | 950 | fig, ax = _plt.subplots(figsize=figsize) 951 | ax.spines["top"].set_visible(False) 952 | ax.spines["right"].set_visible(False) 953 | ax.spines["bottom"].set_visible(False) 954 | ax.spines["left"].set_visible(False) 955 | 956 | fig.set_facecolor("white") 957 | ax.set_facecolor("white") 958 | 959 | # _sns.set(font_scale=.9) 960 | if active and benchmark is not None: 961 | ax.set_title( 962 | f"{returns_label} - Monthly Active Returns (%)\n", 963 | fontsize=14, 964 | y=0.995, 965 | fontname=fontname, 966 | fontweight="bold", 967 | color="black", 968 | ) 969 | benchmark = ( 970 | _stats.monthly_returns(benchmark, eoy=eoy, compounded=compounded) * 100 971 | ) 972 | active_returns = returns - benchmark 973 | 974 | ax = _sns.heatmap( 975 | active_returns, 976 | ax=ax, 977 | annot=True, 978 | center=0, 979 | annot_kws={"size": annot_size}, 980 | fmt="0.2f", 981 | linewidths=0.5, 982 | square=square, 983 | cbar=cbar, 984 | cmap=cmap, 985 | cbar_kws={"format": "%.0f%%"}, 986 | ) 987 | else: 988 | ax.set_title( 989 | f"{returns_label} - Monthly Returns (%)\n", 990 | fontsize=14, 991 | y=0.995, 992 | fontname=fontname, 993 | fontweight="bold", 994 | color="black", 995 | ) 996 | ax = _sns.heatmap( 997 | returns, 998 | ax=ax, 999 | annot=True, 1000 | center=0, 1001 | annot_kws={"size": annot_size}, 1002 | fmt="0.2f", 1003 | linewidths=0.5, 1004 | square=square, 1005 | cbar=cbar, 1006 | cmap=cmap, 1007 | cbar_kws={"format": "%.0f%%"}, 1008 | ) 1009 | # _sns.set(font_scale=1) 1010 | 1011 | # align plot to match other 1012 | if ylabel: 1013 | ax.set_ylabel("Years", fontname=fontname, fontweight="bold", fontsize=12) 1014 | ax.yaxis.set_label_coords(-0.1, 0.5) 1015 | 1016 | ax.tick_params(colors="#808080") 1017 | _plt.xticks(rotation=0, fontsize=annot_size * 1.2) 1018 | _plt.yticks(rotation=0, fontsize=annot_size * 1.2) 1019 | 1020 | try: 1021 | _plt.subplots_adjust(hspace=0, bottom=0, top=1) 1022 | except Exception: 1023 | pass 1024 | try: 1025 | fig.tight_layout(w_pad=0, h_pad=0) 1026 | except Exception: 1027 | pass 1028 | 1029 | if savefig: 1030 | if isinstance(savefig, dict): 1031 | _plt.savefig(**savefig) 1032 | else: 1033 | _plt.savefig(savefig) 1034 | 1035 | if show: 1036 | _plt.show(block=False) 1037 | 1038 | _plt.close() 1039 | 1040 | if not show: 1041 | return fig 1042 | 1043 | return None 1044 | 1045 | 1046 | def monthly_returns( 1047 | returns, 1048 | annot_size=10, 1049 | figsize=(10, 5), 1050 | cbar=True, 1051 | square=False, 1052 | compounded=True, 1053 | eoy=False, 1054 | grayscale=False, 1055 | fontname="Arial", 1056 | ylabel=True, 1057 | savefig=None, 1058 | show=True, 1059 | ): 1060 | return monthly_heatmap( 1061 | returns=returns, 1062 | annot_size=annot_size, 1063 | figsize=figsize, 1064 | cbar=cbar, 1065 | square=square, 1066 | compounded=compounded, 1067 | eoy=eoy, 1068 | grayscale=grayscale, 1069 | fontname=fontname, 1070 | ylabel=ylabel, 1071 | savefig=savefig, 1072 | show=show, 1073 | ) 1074 | 1075 | 1076 | def monthly_returns_detailedview( 1077 | returns, 1078 | grayscale=False, 1079 | figsize=(14, 6), 1080 | annot_size=11, 1081 | returns_label="Strategy", 1082 | fontname="Arial", 1083 | return_font_rate=1.0, 1084 | monthly_dd_font_rate=0.8, 1085 | annual_dd_font_rate=0.8, 1086 | savefig=None, 1087 | show=True, 1088 | ): 1089 | fig = _core.monthly_heatmap_detailedview( 1090 | returns, 1091 | grayscale=grayscale, 1092 | figsize=figsize, 1093 | annot_size=annot_size, 1094 | returns_label=returns_label, 1095 | return_font_rate=return_font_rate, 1096 | annual_dd_font_rate=annual_dd_font_rate, 1097 | monthly_dd_font_rate=monthly_dd_font_rate, 1098 | fontname=fontname, 1099 | savefig=savefig, 1100 | show=show, 1101 | ) 1102 | 1103 | if not show: 1104 | return fig 1105 | -------------------------------------------------------------------------------- /quantstats_lumi/plots.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | # 4 | # QuantStats: Portfolio analytics for quants 5 | # https://github.com/ranaroussi/quantstats 6 | # 7 | # Copyright 2019-2023 Ran Aroussi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | try: 22 | from pandas.plotting import register_matplotlib_converters as _rmc 23 | 24 | _rmc() 25 | except ImportError: 26 | pass 27 | 28 | from quantstats_lumi._plotting.wrappers import * 29 | -------------------------------------------------------------------------------- /quantstats_lumi/report.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Tearsheet (generated by QuantStats) 10 | 11 | 12 | 634 | 635 | 636 | 637 |
638 | 642 | 643 | 722 | 723 |
724 |
725 | {{returns}} 726 |
727 | 728 |
{{log_returns}}
729 |
{{vol_returns}}
730 |
{{eoy_returns}}
731 |
{{monthly_dist}}
732 |
{{daily_returns}}
733 |
{{rolling_beta}}
734 |
{{rolling_vol}}
735 |
{{rolling_sharpe}}
736 |
{{rolling_sortino}}
737 |
{{dd_periods}}
738 |
{{dd_plot}}
739 |
{{monthly_heatmap}}
740 |
{{returns_dist}}
741 | 742 | 743 |
744 | Disclaimer: This report is for informational purposes only and 745 | should not be considered as investment advice. Past performance 746 | is not indicative of future results. 747 |
748 |
749 |
750 | 751 | 823 | 824 | 825 | -------------------------------------------------------------------------------- /quantstats_lumi/stats.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | # 4 | # QuantStats: Portfolio analytics for quants 5 | # https://github.com/ranaroussi/quantstats 6 | # 7 | # Copyright 2019-2023 Ran Aroussi 8 | # 9 | # Licensed under the Apache License, Version 2.0 (the "License"); 10 | # you may not use this file except in compliance with the License. 11 | # You may obtain a copy of the License at 12 | # 13 | # http://www.apache.org/licenses/LICENSE-2.0 14 | # 15 | # Unless required by applicable law or agreed to in writing, software 16 | # distributed under the License is distributed on an "AS IS" BASIS, 17 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | # See the License for the specific language governing permissions and 19 | # limitations under the License. 20 | 21 | from math import ceil as _ceil 22 | from math import sqrt as _sqrt 23 | from warnings import warn 24 | 25 | import numpy as _np 26 | import pandas as _pd 27 | from scipy.stats import linregress as _linregress 28 | from scipy.stats import norm as _norm 29 | 30 | from . import utils as _utils 31 | 32 | # ======== STATS ======== 33 | 34 | 35 | def pct_rank(prices, window=60): 36 | """Rank prices by window""" 37 | rank = _utils.multi_shift(prices, window).T.rank(pct=True).T 38 | return rank.iloc[:, 0] * 100.0 39 | 40 | 41 | def compsum(returns): 42 | """Calculates rolling compounded returns""" 43 | return returns.add(1).cumprod() - 1 44 | 45 | 46 | def comp(returns): 47 | """Calculates total compounded returns""" 48 | return returns.add(1).prod() - 1 49 | 50 | 51 | def distribution(returns, compounded=True, prepare_returns=True): 52 | """Returns the distribution of returns 53 | Args: 54 | * returns (Series, DataFrame): Input return series 55 | * compounded (bool): Calculate compounded returns? 56 | """ 57 | 58 | def get_outliers(data): 59 | """Returns outliers""" 60 | # https://datascience.stackexchange.com/a/57199 61 | Q1 = data.quantile(0.25) 62 | Q3 = data.quantile(0.75) 63 | IQR = Q3 - Q1 # IQR is interquartile range. 64 | filtered = (data >= Q1 - 1.5 * IQR) & (data <= Q3 + 1.5 * IQR) 65 | return { 66 | "values": data.loc[filtered].tolist(), 67 | "outliers": data.loc[~filtered].tolist(), 68 | } 69 | 70 | if isinstance(returns, _pd.DataFrame): 71 | warn( 72 | "Pandas DataFrame was passed (Series expected). " 73 | "Only first column will be used." 74 | ) 75 | returns = returns.copy() 76 | returns.columns = map(str.lower, returns.columns) 77 | if len(returns.columns) > 1 and "close" in returns.columns: 78 | returns = returns["close"] 79 | else: 80 | returns = returns[returns.columns[0]] 81 | 82 | apply_fnc = comp if compounded else 'sum' 83 | daily = returns.dropna() 84 | 85 | if prepare_returns: 86 | daily = _utils._prepare_returns(daily) 87 | 88 | return { 89 | "Daily": get_outliers(daily), 90 | "Weekly": get_outliers(daily.resample("W-MON").apply(apply_fnc)), 91 | "Monthly": get_outliers(daily.resample("ME").apply(apply_fnc)), 92 | "Quarterly": get_outliers(daily.resample("Q").apply(apply_fnc)), 93 | "Yearly": get_outliers(daily.resample("YE").apply(apply_fnc)), 94 | } 95 | 96 | 97 | def expected_return(returns, aggregate=None, compounded=True, prepare_returns=True): 98 | """ 99 | Returns the expected return for a given period 100 | by calculating the geometric holding period return 101 | """ 102 | if prepare_returns: 103 | returns = _utils._prepare_returns(returns) 104 | returns = _utils.aggregate_returns(returns, aggregate, compounded) 105 | return _np.prod(1 + returns, axis=0) ** (1 / len(returns)) - 1 106 | 107 | 108 | def geometric_mean(retruns, aggregate=None, compounded=True): 109 | """Shorthand for expected_return()""" 110 | return expected_return(retruns, aggregate, compounded) 111 | 112 | 113 | def ghpr(retruns, aggregate=None, compounded=True): 114 | """Shorthand for expected_return()""" 115 | return expected_return(retruns, aggregate, compounded) 116 | 117 | 118 | def outliers(returns, quantile=0.95): 119 | """Returns series of outliers""" 120 | return returns[returns > returns.quantile(quantile)].dropna(how="all") 121 | 122 | 123 | def remove_outliers(returns, quantile=0.95): 124 | """Returns series of returns without the outliers""" 125 | return returns[returns < returns.quantile(quantile)] 126 | 127 | 128 | def best(returns, aggregate=None, compounded=True, prepare_returns=True): 129 | """Returns the best day/month/week/quarter/year's return""" 130 | if prepare_returns: 131 | returns = _utils._prepare_returns(returns) 132 | return _utils.aggregate_returns(returns, aggregate, compounded).max() 133 | 134 | 135 | def worst(returns, aggregate=None, compounded=True, prepare_returns=True): 136 | """Returns the worst day/month/week/quarter/year's return""" 137 | if prepare_returns: 138 | returns = _utils._prepare_returns(returns) 139 | return _utils.aggregate_returns(returns, aggregate, compounded).min() 140 | 141 | 142 | def consecutive_wins(returns, aggregate=None, compounded=True, prepare_returns=True): 143 | """Returns the maximum consecutive wins by day/month/week/quarter/year""" 144 | if prepare_returns: 145 | returns = _utils._prepare_returns(returns) 146 | returns = _utils.aggregate_returns(returns, aggregate, compounded) > 0 147 | return _utils._count_consecutive(returns).max() 148 | 149 | 150 | def consecutive_losses(returns, aggregate=None, compounded=True, prepare_returns=True): 151 | """ 152 | Returns the maximum consecutive losses by 153 | day/month/week/quarter/year 154 | """ 155 | if prepare_returns: 156 | returns = _utils._prepare_returns(returns) 157 | returns = _utils.aggregate_returns(returns, aggregate, compounded) < 0 158 | return _utils._count_consecutive(returns).max() 159 | 160 | 161 | def exposure(returns, prepare_returns=True): 162 | """Returns the market exposure time (returns != 0)""" 163 | if prepare_returns: 164 | returns = _utils._prepare_returns(returns) 165 | 166 | def _exposure(ret): 167 | """Returns the market exposure time (returns != 0)""" 168 | ex = len(ret[(~_np.isnan(ret)) & (ret != 0)]) / len(ret) 169 | return _ceil(ex * 100) / 100 170 | 171 | if isinstance(returns, _pd.DataFrame): 172 | _df = {} 173 | for col in returns.columns: 174 | _df[col] = _exposure(returns[col]) 175 | return _pd.Series(_df) 176 | return _exposure(returns) 177 | 178 | 179 | def win_rate(returns, aggregate=None, compounded=True, prepare_returns=True): 180 | """Calculates the win ratio for a period""" 181 | 182 | def _win_rate(series): 183 | try: 184 | return len(series[series > 0]) / len(series[series != 0]) 185 | except Exception: 186 | return 0.0 187 | 188 | if prepare_returns: 189 | returns = _utils._prepare_returns(returns) 190 | if aggregate: 191 | returns = _utils.aggregate_returns(returns, aggregate, compounded) 192 | 193 | if isinstance(returns, _pd.DataFrame): 194 | _df = {} 195 | for col in returns.columns: 196 | _df[col] = _win_rate(returns[col]) 197 | 198 | return _pd.Series(_df) 199 | 200 | return _win_rate(returns) 201 | 202 | 203 | def avg_return(returns, aggregate=None, compounded=True, prepare_returns=True): 204 | """Calculates the average return/trade return for a period""" 205 | if prepare_returns: 206 | returns = _utils._prepare_returns(returns) 207 | if aggregate: 208 | returns = _utils.aggregate_returns(returns, aggregate, compounded) 209 | return returns[returns != 0].dropna().mean() 210 | 211 | 212 | def avg_win(returns, aggregate=None, compounded=True, prepare_returns=True): 213 | """ 214 | Calculates the average winning 215 | return/trade return for a period 216 | """ 217 | if prepare_returns: 218 | returns = _utils._prepare_returns(returns) 219 | if aggregate: 220 | returns = _utils.aggregate_returns(returns, aggregate, compounded) 221 | return returns[returns > 0].dropna().mean() 222 | 223 | 224 | def avg_loss(returns, aggregate=None, compounded=True, prepare_returns=True): 225 | """ 226 | Calculates the average low if 227 | return/trade return for a period 228 | """ 229 | if prepare_returns: 230 | returns = _utils._prepare_returns(returns) 231 | if aggregate: 232 | returns = _utils.aggregate_returns(returns, aggregate, compounded) 233 | return returns[returns < 0].dropna().mean() 234 | 235 | 236 | def volatility(returns, periods=365, annualize=True, prepare_returns=True): 237 | """Calculates the volatility of returns for a period""" 238 | if prepare_returns: 239 | returns = _utils._prepare_returns(returns) 240 | std = returns.std() 241 | if annualize: 242 | return std * _np.sqrt(periods) 243 | 244 | return std 245 | 246 | 247 | def rolling_volatility( 248 | returns, rolling_period=126, periods_per_year=365, prepare_returns=True 249 | ): 250 | """Calculates the rolling volatility of returns for a period 251 | Args: 252 | * returns (Series, DataFrame): Input return series 253 | * rolling_period (int): Rolling period 254 | * periods_per_year: periods per year 255 | """ 256 | if prepare_returns: 257 | returns = _utils._prepare_returns(returns, rolling_period) 258 | 259 | return returns.rolling(rolling_period).std() * _np.sqrt(periods_per_year) 260 | 261 | 262 | def implied_volatility(returns, periods=365, annualize=True): 263 | """Calculates the implied volatility of returns for a period""" 264 | logret = _utils.log_returns(returns) 265 | if annualize: 266 | return logret.rolling(periods).std() * _np.sqrt(periods) 267 | return logret.std() 268 | 269 | 270 | def autocorr_penalty(returns, prepare_returns=False): 271 | """Metric to account for auto correlation""" 272 | if prepare_returns: 273 | returns = _utils._prepare_returns(returns) 274 | 275 | if isinstance(returns, _pd.DataFrame): 276 | returns = returns[returns.columns[0]] 277 | 278 | num = len(returns) 279 | coef = _np.abs(_np.corrcoef(returns[:-1], returns[1:])[0, 1]) 280 | corr = [((num - x) / num) * coef**x for x in range(1, num)] 281 | return _np.sqrt(1 + 2 * _np.sum(corr)) 282 | 283 | 284 | # ======= METRICS ======= 285 | 286 | 287 | def sharpe(returns, rf=0.0, periods=365, annualize=True, smart=False): 288 | """ 289 | Calculates the sharpe ratio of access returns 290 | 291 | If rf is non-zero, you must specify periods. 292 | In this case, rf is assumed to be expressed in yearly (annualized) terms 293 | 294 | Args: 295 | * returns (Series, DataFrame): Input return series 296 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 297 | * periods (int): Freq. of returns 298 | * annualize: return annualize sharpe? 299 | * smart: return smart sharpe ratio 300 | """ 301 | if rf != 0 and periods is None: 302 | raise Exception("Must provide periods if rf != 0") 303 | 304 | returns = _utils._prepare_returns(returns, rf, periods) 305 | divisor = returns.std(ddof=1) 306 | if smart: 307 | # penalize sharpe with auto correlation 308 | divisor = divisor * autocorr_penalty(returns) 309 | res = returns.mean() / divisor 310 | 311 | if annualize: 312 | return res * _np.sqrt(1 if periods is None else periods) 313 | 314 | return res 315 | 316 | 317 | def smart_sharpe(returns, rf=0.0, periods=365, annualize=True): 318 | """Calculates the smart sharpe ratio 319 | Args: 320 | * returns (Series, DataFrame): Input return series 321 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 322 | * periods (int): Freq. of returns 323 | * annualize: return annualize sharpe? 324 | """ 325 | return sharpe(returns, rf, periods, annualize, True) 326 | 327 | 328 | def rolling_sharpe( 329 | returns, 330 | rf=0.0, 331 | rolling_period=126, 332 | annualize=True, 333 | periods_per_year=365, 334 | prepare_returns=True, 335 | ): 336 | """Calculates the rolling sharpe ratio 337 | Args: 338 | * returns (Series, DataFrame): Input return series 339 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 340 | * rolling_period (int): Rolling period 341 | * annualize: return annualize sharpe? 342 | * periods_per_year: periods per year 343 | """ 344 | if rf != 0 and rolling_period is None: 345 | raise Exception("Must provide periods if rf != 0") 346 | 347 | if prepare_returns: 348 | returns = _utils._prepare_returns(returns, rf, rolling_period) 349 | 350 | res = returns.rolling(rolling_period).mean() / returns.rolling(rolling_period).std() 351 | 352 | if annualize: 353 | res = res * _np.sqrt(1 if periods_per_year is None else periods_per_year) 354 | return res 355 | 356 | 357 | def sortino(returns, rf=0, periods=365, annualize=True, smart=False): 358 | """ 359 | Calculates the sortino ratio of access returns 360 | 361 | If rf is non-zero, you must specify periods. 362 | In this case, rf is assumed to be expressed in yearly (annualized) terms 363 | 364 | Calculation is based on this paper by Red Rock Capital 365 | http://www.redrockcapital.com/Sortino__A__Sharper__Ratio_Red_Rock_Capital.pdf 366 | """ 367 | if rf != 0 and periods is None: 368 | raise Exception("Must provide periods if rf != 0") 369 | 370 | returns = _utils._prepare_returns(returns, rf, periods) 371 | 372 | downside = _np.sqrt((returns[returns < 0] ** 2).sum() / len(returns)) 373 | 374 | if smart: 375 | # penalize sortino with auto correlation 376 | downside = downside * autocorr_penalty(returns) 377 | 378 | res = returns.mean() / downside 379 | 380 | if annualize: 381 | return res * _np.sqrt(1 if periods is None else periods) 382 | 383 | return res 384 | 385 | 386 | def smart_sortino(returns, rf=0, periods=365, annualize=True): 387 | """Calculates the smart sortino ratio 388 | Args: 389 | * returns (Series, DataFrame): Input return series 390 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 391 | * periods (int): Freq. of returns 392 | * annualize: return annualize sharpe? 393 | """ 394 | return sortino(returns, rf, periods, annualize, True) 395 | 396 | 397 | def rolling_sortino( 398 | returns, rf=0, rolling_period=126, annualize=True, periods_per_year=365, **kwargs 399 | ): 400 | """Calculates the rolling sortino ratio 401 | Args: 402 | * returns (Series, DataFrame): Input return series 403 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 404 | * rolling_period (int): Rolling period 405 | * annualize: return annualize sharpe? 406 | * periods_per_year: periods per year 407 | """ 408 | if rf != 0 and rolling_period is None: 409 | raise Exception("Must provide periods if rf != 0") 410 | 411 | if kwargs.get("prepare_returns", True): 412 | returns = _utils._prepare_returns(returns, rf, rolling_period) 413 | 414 | downside = ( 415 | returns.rolling(rolling_period).apply( 416 | lambda x: (x.values[x.values < 0] ** 2).sum() 417 | ) 418 | / rolling_period 419 | ) 420 | 421 | res = returns.rolling(rolling_period).mean() / _np.sqrt(downside) 422 | if annualize: 423 | res = res * _np.sqrt(1 if periods_per_year is None else periods_per_year) 424 | return res 425 | 426 | 427 | def adjusted_sortino(returns, rf=0, periods=365, annualize=True, smart=False): 428 | """ 429 | Jack Schwager's version of the Sortino ratio allows for 430 | direct comparisons to the Sharpe. See here for more info: 431 | https://archive.is/wip/2rwFW 432 | """ 433 | data = sortino(returns, rf, periods=periods, annualize=annualize, smart=smart) 434 | return data / _sqrt(2) 435 | 436 | 437 | def probabilistic_ratio( 438 | series, rf=0.0, base="sharpe", periods=365, annualize=False, smart=False 439 | ): 440 | """Calculates the probabilistic sharpe ratio 441 | Args: 442 | * series (Series, DataFrame): Input return series 443 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 444 | * periods (int): Freq. of returns 445 | * annualize: return annualize sharpe? 446 | * smart: return smart sharpe ratio 447 | """ 448 | 449 | if base.lower() == "sharpe": 450 | base = sharpe(series, periods=periods, annualize=False, smart=smart) 451 | elif base.lower() == "sortino": 452 | base = sortino(series, periods=periods, annualize=False, smart=smart) 453 | elif base.lower() == "adjusted_sortino": 454 | base = adjusted_sortino(series, periods=periods, annualize=False, smart=smart) 455 | else: 456 | raise Exception( 457 | "`metric` must be either `sharpe`, `sortino`, or `adjusted_sortino`" 458 | ) 459 | skew_no = skew(series, prepare_returns=False) 460 | kurtosis_no = kurtosis(series, prepare_returns=False) 461 | 462 | n = len(series) 463 | 464 | sigma_sr = _np.sqrt( 465 | (1 + (0.5 * base**2) - (skew_no * base) + (((kurtosis_no - 3) / 4) * base**2)) 466 | / (n - 1) 467 | ) 468 | 469 | ratio = (base - rf) / sigma_sr 470 | psr = _norm.cdf(ratio) 471 | 472 | if annualize: 473 | return psr * (365**0.5) 474 | return psr 475 | 476 | 477 | def probabilistic_sharpe_ratio( 478 | series, rf=0.0, periods=365, annualize=False, smart=False 479 | ): 480 | """Calculates the probabilistic sharpe ratio 481 | Args: 482 | * series (Series, DataFrame): Input return series 483 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 484 | * periods (int): Freq. of returns 485 | * annualize: return annualize sharpe? 486 | * smart: return smart sharpe ratio 487 | """ 488 | return probabilistic_ratio( 489 | series, rf, base="sharpe", periods=periods, annualize=annualize, smart=smart 490 | ) 491 | 492 | 493 | def probabilistic_sortino_ratio( 494 | series, rf=0.0, periods=365, annualize=False, smart=False 495 | ): 496 | """Calculates the probabilistic sortino ratio 497 | Args: 498 | * series (Series, DataFrame): Input return series 499 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 500 | * periods (int): Freq. of returns 501 | * annualize: return annualize sharpe? 502 | * smart: return smart sharpe ratio""" 503 | return probabilistic_ratio( 504 | series, rf, base="sortino", periods=periods, annualize=annualize, smart=smart 505 | ) 506 | 507 | 508 | def probabilistic_adjusted_sortino_ratio( 509 | series, rf=0.0, periods=365, annualize=False, smart=False 510 | ): 511 | """Calculates the probabilistic adjusted sortino ratio 512 | 513 | Args: 514 | * series (Series, DataFrame): Input return series 515 | * rf (float): Risk-free rate expressed as a yearly (annualized) return 516 | * periods (int): Freq. of returns 517 | * annualize: return annualize sharpe? 518 | * smart: return smart sharpe ratio""" 519 | return probabilistic_ratio( 520 | series, 521 | rf, 522 | base="adjusted_sortino", 523 | periods=periods, 524 | annualize=annualize, 525 | smart=smart, 526 | ) 527 | 528 | 529 | def treynor_ratio(returns, benchmark, periods=365.0, rf=0.0): 530 | """ 531 | Calculates the Treynor ratio 532 | 533 | Args: 534 | * returns (Series, DataFrame): Input return series 535 | * benchmatk (String, Series, DataFrame): Benchmark to compare beta to 536 | * periods (int): Freq. of returns 537 | """ 538 | if isinstance(returns, _pd.DataFrame): 539 | returns = returns[returns.columns[0]] 540 | 541 | beta = greeks(returns, benchmark, periods=periods).to_dict().get("beta", 0) 542 | if beta == 0: 543 | return 0 544 | return (comp(returns) - rf) / beta 545 | 546 | 547 | def omega(returns, rf=0.0, required_return=0.0, periods=365): 548 | """ 549 | Determines the Omega ratio of a strategy. 550 | See https://en.wikipedia.org/wiki/Omega_ratio for more details. 551 | """ 552 | if len(returns) < 2: 553 | return _np.nan 554 | 555 | if required_return <= -1: 556 | return _np.nan 557 | 558 | returns = _utils._prepare_returns(returns, rf, periods) 559 | 560 | if periods == 1: 561 | return_threshold = required_return 562 | else: 563 | return_threshold = (1 + required_return) ** (1.0 / periods) - 1 564 | 565 | returns_less_thresh = returns - return_threshold 566 | numer = returns_less_thresh[returns_less_thresh > 0.0].sum().values[0] 567 | denom = -1.0 * returns_less_thresh[returns_less_thresh < 0.0].sum().values[0] 568 | 569 | if denom > 0.0: 570 | return numer / denom 571 | 572 | return _np.nan 573 | 574 | 575 | def gain_to_pain_ratio(returns, rf=0, resolution="D"): 576 | """ 577 | Jack Schwager's GPR. See here for more info: 578 | https://archive.is/wip/2rwFW 579 | """ 580 | returns = _utils._prepare_returns(returns, rf).resample(resolution).sum() 581 | downside = abs(returns[returns < 0].sum()) 582 | return returns.sum() / downside 583 | 584 | 585 | def cagr(returns, rf=0.0, compounded=True, periods=365): 586 | """ 587 | Calculates the communicative annualized growth return (CAGR) of access returns. 588 | The number of years is based on the actual calendar period covered by the returns. 589 | 590 | Args: 591 | returns: Series or DataFrame of returns 592 | rf: risk-free rate (ignored here) 593 | compounded: use compounded returns (default True) 594 | periods: (ignored for 'years' calculation in CAGR, kept for API compatibility with other metrics) 595 | Actual years are derived from the returns' index. 596 | 597 | Returns: 598 | CAGR as a float (e.g., 0.132 for 13.2%) 599 | """ 600 | if not isinstance(returns, (_pd.Series, _pd.DataFrame)) or returns.empty: 601 | return _np.nan 602 | 603 | # Drop NaNs and get index 604 | if isinstance(returns, _pd.DataFrame): 605 | returns = returns.sort_index() 606 | idx = returns.dropna(how="all").index 607 | else: 608 | returns = returns.sort_index() 609 | idx = returns.dropna().index 610 | if len(idx) < 2: 611 | return _np.nan 612 | 613 | # Use total_seconds for accurate fractional years (works for both daily and intraday) 614 | delta = idx[-1] - idx[0] 615 | years = delta.total_seconds() / (365.25 * 24 * 60 * 60) 616 | if years <= 0: 617 | return _np.nan 618 | 619 | if compounded: 620 | if isinstance(returns, _pd.DataFrame): 621 | numeric_returns = returns.select_dtypes(include=[_np.number]) 622 | if numeric_returns.empty: 623 | return _np.nan if numeric_returns.shape[1] == 0 else _pd.Series([_np.nan] * numeric_returns.shape[1], index=numeric_returns.columns) 624 | valid = numeric_returns.loc[idx[0]:idx[-1]] 625 | total_return_factor = (valid + 1).prod() 626 | res = total_return_factor ** (1.0 / years) - 1 627 | else: 628 | if not _pd.api.types.is_numeric_dtype(returns): 629 | return _np.nan 630 | valid = returns.loc[idx[0]:idx[-1]] 631 | total_return_factor = (valid + 1).prod() 632 | res = total_return_factor ** (1.0 / years) - 1 633 | else: 634 | total = _utils._prepare_returns(returns, rf) 635 | total = total.sort_index() 636 | idx2 = total.dropna().index 637 | if len(idx2) < 2: 638 | return _np.nan 639 | delta2 = idx2[-1] - idx2[0] 640 | current_years = delta2.total_seconds() / (365.25 * 24 * 60 * 60) 641 | if current_years <= 0: 642 | return _np.nan 643 | if isinstance(total, _pd.DataFrame): 644 | total_return = total.loc[idx2[0]:idx2[-1]].sum(axis=0) 645 | else: 646 | total_return = total.loc[idx2[0]:idx2[-1]].sum() 647 | res = (total_return + 1.0) ** (1.0 / current_years) - 1 648 | 649 | if isinstance(returns, _pd.DataFrame): 650 | if not isinstance(res, _pd.Series): 651 | if _np.isscalar(res) and 'numeric_returns' in locals() and not numeric_returns.empty: 652 | res = _pd.Series([res] * len(numeric_returns.columns), index=numeric_returns.columns) 653 | elif 'numeric_returns' in locals() and not numeric_returns.empty: 654 | res = _pd.Series(res, index=numeric_returns.columns) 655 | else: 656 | res = _pd.Series(res, index=returns.columns if not isinstance(res, _pd.Series) else res.index) 657 | elif 'numeric_returns' in locals() and not numeric_returns.empty: 658 | res.index = numeric_returns.columns 659 | 660 | return res 661 | 662 | 663 | def rar(returns, rf=0.0, periods=365): 664 | """ 665 | Calculates the risk-adjusted return of access returns 666 | (CAGR / exposure. takes time into account.) 667 | 668 | If rf is non-zero, you must specify periods. 669 | In this case, rf is assumed to be expressed in yearly (annualized) terms 670 | """ 671 | returns = _utils._prepare_returns(returns, rf) 672 | return cagr(returns=returns, periods=periods) / exposure(returns) 673 | 674 | 675 | def skew(returns, prepare_returns=True): 676 | """ 677 | Calculates returns' skewness 678 | (the degree of asymmetry of a distribution around its mean) 679 | """ 680 | if prepare_returns: 681 | returns = _utils._prepare_returns(returns) 682 | return returns.skew() 683 | 684 | 685 | def kurtosis(returns, prepare_returns=True): 686 | """ 687 | Calculates returns' kurtosis 688 | (the degree to which a distribution peak compared to a normal distribution) 689 | """ 690 | if prepare_returns: 691 | returns = _utils._prepare_returns(returns) 692 | return returns.kurtosis() 693 | 694 | 695 | def calmar(returns, prepare_returns=True, periods=365): 696 | """Calculates the calmar ratio (CAGR% / MaxDD%)""" 697 | if prepare_returns: 698 | returns = _utils._prepare_returns(returns) 699 | cagr_ratio = cagr(returns=returns, periods=periods) 700 | max_dd = max_drawdown(returns) 701 | return cagr_ratio / abs(max_dd) 702 | 703 | 704 | def ulcer_index(returns): 705 | """Calculates the ulcer index score (downside risk measurment)""" 706 | dd = to_drawdown_series(returns) 707 | return _np.sqrt(_np.divide((dd**2).sum(), returns.shape[0] - 1)) 708 | 709 | 710 | def ulcer_performance_index(returns, rf=0): 711 | """ 712 | Calculates the ulcer index score 713 | (downside risk measurment) 714 | """ 715 | return (comp(returns) - rf) / ulcer_index(returns) 716 | 717 | 718 | def upi(returns, rf=0): 719 | """Shorthand for ulcer_performance_index()""" 720 | return ulcer_performance_index(returns, rf) 721 | 722 | 723 | def serenity_index(returns, rf=0): 724 | """ 725 | Calculates the serenity index score 726 | (https://www.keyquant.com/Download/GetFile?Filename=%5CPublications%5CKeyQuant_WhitePaper_APT_Part1.pdf) 727 | """ 728 | dd = to_drawdown_series(returns) 729 | pitfall = -cvar(dd) / returns.std() 730 | return (returns.sum() - rf) / (ulcer_index(returns) * pitfall) 731 | 732 | 733 | def risk_of_ruin(returns, prepare_returns=True): 734 | """ 735 | Calculates the risk of ruin 736 | (the likelihood of losing all one's investment capital) 737 | """ 738 | if prepare_returns: 739 | returns = _utils._prepare_returns(returns) 740 | wins = win_rate(returns) 741 | return ((1 - wins) / (1 + wins)) ** len(returns) 742 | 743 | 744 | def ror(returns): 745 | """Shorthand for risk_of_ruin()""" 746 | return risk_of_ruin(returns) 747 | 748 | 749 | def value_at_risk(returns, sigma=1, confidence=0.95, prepare_returns=True): 750 | """ 751 | Calculats the daily value-at-risk 752 | (variance-covariance calculation with confidence n) 753 | """ 754 | if prepare_returns: 755 | returns = _utils._prepare_returns(returns) 756 | mu = returns.mean() 757 | sigma *= returns.std() 758 | 759 | if confidence > 1: 760 | confidence = confidence / 100 761 | 762 | return _norm.ppf(1 - confidence, mu, sigma) 763 | 764 | 765 | def var(returns, sigma=1, confidence=0.95, prepare_returns=True): 766 | """Shorthand for value_at_risk()""" 767 | return value_at_risk(returns, sigma, confidence, prepare_returns) 768 | 769 | 770 | def conditional_value_at_risk(returns, sigma=1, confidence=0.95, prepare_returns=True): 771 | """ 772 | Calculats the conditional daily value-at-risk (aka expected shortfall) 773 | quantifies the amount of tail risk an investment 774 | """ 775 | if prepare_returns: 776 | returns = _utils._prepare_returns(returns) 777 | var = value_at_risk(returns, sigma, confidence) 778 | c_var = returns[returns < var].values.mean() 779 | return c_var if ~_np.isnan(c_var) else var 780 | 781 | 782 | def cvar(returns, sigma=1, confidence=0.95, prepare_returns=True): 783 | """Shorthand for conditional_value_at_risk()""" 784 | return conditional_value_at_risk(returns, sigma, confidence, prepare_returns) 785 | 786 | 787 | def expected_shortfall(returns, sigma=1, confidence=0.95): 788 | """Shorthand for conditional_value_at_risk()""" 789 | return conditional_value_at_risk(returns, sigma, confidence) 790 | 791 | 792 | def tail_ratio(returns, cutoff=0.95, prepare_returns=True): 793 | """ 794 | Measures the ratio between the right 795 | (95%) and left tail (5%). 796 | """ 797 | if prepare_returns: 798 | returns = _utils._prepare_returns(returns) 799 | return abs(returns.quantile(cutoff) / returns.quantile(1 - cutoff)) 800 | 801 | 802 | def payoff_ratio(returns, prepare_returns=True): 803 | """Measures the payoff ratio (average win/average loss)""" 804 | if prepare_returns: 805 | returns = _utils._prepare_returns(returns) 806 | return avg_win(returns) / abs(avg_loss(returns)) 807 | 808 | 809 | def win_loss_ratio(returns, prepare_returns=True): 810 | """Shorthand for payoff_ratio()""" 811 | return payoff_ratio(returns, prepare_returns) 812 | 813 | 814 | def profit_ratio(returns, prepare_returns=True): 815 | """Measures the profit ratio (win ratio / loss ratio)""" 816 | if prepare_returns: 817 | returns = _utils._prepare_returns(returns) 818 | wins = returns[returns >= 0] 819 | loss = returns[returns < 0] 820 | 821 | win_ratio = abs(wins.mean() / wins.count()) 822 | loss_ratio = abs(loss.mean() / loss.count()) 823 | try: 824 | return win_ratio / loss_ratio 825 | except Exception: 826 | return 0.0 827 | 828 | 829 | def profit_factor(returns, prepare_returns=True): 830 | """Measures the profit ratio (wins/loss)""" 831 | if prepare_returns: 832 | returns = _utils._prepare_returns(returns) 833 | return abs(returns[returns >= 0].sum() / returns[returns < 0].sum()) 834 | 835 | 836 | def cpc_index(returns, prepare_returns=True): 837 | """ 838 | Measures the cpc ratio 839 | (profit factor * win % * win loss ratio) 840 | """ 841 | if prepare_returns: 842 | returns = _utils._prepare_returns(returns) 843 | return profit_factor(returns) * win_rate(returns) * win_loss_ratio(returns) 844 | 845 | 846 | def common_sense_ratio(returns, prepare_returns=True): 847 | """Measures the common sense ratio (profit factor * tail ratio)""" 848 | if prepare_returns: 849 | returns = _utils._prepare_returns(returns) 850 | return profit_factor(returns) * tail_ratio(returns) 851 | 852 | 853 | def outlier_win_ratio(returns, quantile=0.99, prepare_returns=True): 854 | """ 855 | Calculates the outlier winners ratio 856 | 99th percentile of returns / mean positive return 857 | """ 858 | if prepare_returns: 859 | returns = _utils._prepare_returns(returns) 860 | return returns.quantile(quantile).mean() / returns[returns >= 0].mean() 861 | 862 | 863 | def outlier_loss_ratio(returns, quantile=0.01, prepare_returns=True): 864 | """ 865 | Calculates the outlier losers ratio 866 | 1st percentile of returns / mean negative return 867 | """ 868 | if prepare_returns: 869 | returns = _utils._prepare_returns(returns) 870 | return returns.quantile(quantile).mean() / returns[returns < 0].mean() 871 | 872 | 873 | def recovery_factor(returns, rf=0.0, prepare_returns=True): 874 | """Measures how fast the strategy recovers from drawdowns""" 875 | if prepare_returns: 876 | returns = _utils._prepare_returns(returns) 877 | total_returns = returns.sum() - rf 878 | max_dd = max_drawdown(returns) 879 | return abs(total_returns) / abs(max_dd) 880 | 881 | 882 | def risk_return_ratio(returns, prepare_returns=True): 883 | """ 884 | Calculates the return / risk ratio 885 | (sharpe ratio without factoring in the risk-free rate) 886 | """ 887 | if prepare_returns: 888 | returns = _utils._prepare_returns(returns) 889 | return returns.mean() / returns.std() 890 | 891 | 892 | def max_drawdown(prices): 893 | """Calculates the maximum drawdown""" 894 | prices = _utils._prepare_prices(prices) 895 | return (prices / prices.expanding(min_periods=0).max()).min() - 1 896 | 897 | 898 | def to_drawdown_series(returns): 899 | """Convert returns series to drawdown series""" 900 | prices = _utils._prepare_prices(returns) 901 | dd = prices / _np.maximum.accumulate(prices) - 1.0 902 | return dd.replace([_np.inf, -_np.inf, -0], 0) 903 | 904 | 905 | def drawdown_details(drawdown): 906 | """ 907 | Calculates drawdown details, including start/end/valley dates, 908 | duration, max drawdown and max dd for 99% of the dd period 909 | for every drawdown period 910 | """ 911 | 912 | def _drawdown_details(drawdown): 913 | # mark no drawdown 914 | no_dd = drawdown == 0 915 | 916 | # extract dd start dates, first date of the drawdown 917 | starts = ~no_dd & no_dd.shift(1) 918 | starts = list(starts[starts.values].index) 919 | 920 | # extract end dates, last date of the drawdown 921 | ends = no_dd & (~no_dd).shift(1) 922 | ends = ends.shift(-1, fill_value=False) 923 | ends = list(ends[ends.values].index) 924 | 925 | # no drawdown :) 926 | if not starts: 927 | return _pd.DataFrame( 928 | index=[], 929 | columns=( 930 | "start", 931 | "valley", 932 | "end", 933 | "days", 934 | "max drawdown", 935 | "99% max drawdown", 936 | ), 937 | ) 938 | 939 | # drawdown series begins in a drawdown 940 | if ends and starts[0] > ends[0]: 941 | starts.insert(0, drawdown.index[0]) 942 | 943 | # series ends in a drawdown fill with last date 944 | if not ends or starts[-1] > ends[-1]: 945 | ends.append(drawdown.index[-1]) 946 | 947 | # build dataframe from results 948 | data = [] 949 | for i, _ in enumerate(starts): 950 | dd = drawdown[starts[i] : ends[i]] 951 | clean_dd = -remove_outliers(-dd, 0.99) 952 | data.append( 953 | ( 954 | starts[i], 955 | dd.idxmin(), 956 | ends[i], 957 | (ends[i] - starts[i]).days + 1, 958 | dd.min() * 100, 959 | clean_dd.min() * 100, 960 | ) 961 | ) 962 | 963 | df = _pd.DataFrame( 964 | data=data, 965 | columns=( 966 | "start", 967 | "valley", 968 | "end", 969 | "days", 970 | "max drawdown", 971 | "99% max drawdown", 972 | ), 973 | ) 974 | df["days"] = df["days"].astype(int) 975 | df["max drawdown"] = df["max drawdown"].astype(float) 976 | df["99% max drawdown"] = df["99% max drawdown"].astype(float) 977 | 978 | df["start"] = df["start"].dt.strftime("%Y-%m-%d") 979 | df["end"] = df["end"].dt.strftime("%Y-%m-%d") 980 | df["valley"] = df["valley"].dt.strftime("%Y-%m-%d") 981 | 982 | return df 983 | 984 | if isinstance(drawdown, _pd.DataFrame): 985 | _dfs = {} 986 | for col in drawdown.columns: 987 | _dfs[col] = _drawdown_details(drawdown[col]) 988 | return _pd.concat(_dfs, axis=1) 989 | 990 | return _drawdown_details(drawdown) 991 | 992 | 993 | def kelly_criterion(returns, prepare_returns=True): 994 | """ 995 | Calculates the recommended maximum amount of capital that 996 | should be allocated to the given strategy, based on the 997 | Kelly Criterion (http://en.wikipedia.org/wiki/Kelly_criterion) 998 | """ 999 | if prepare_returns: 1000 | returns = _utils._prepare_returns(returns) 1001 | win_loss_ratio = payoff_ratio(returns) 1002 | win_prob = win_rate(returns) 1003 | lose_prob = 1 - win_prob 1004 | 1005 | return ((win_loss_ratio * win_prob) - lose_prob) / win_loss_ratio 1006 | 1007 | # Calculate the correlation to the benchmark 1008 | def benchmark_correlation(returns, benchmark, prepare_returns=True): 1009 | """Calculates the correlation to the benchmark""" 1010 | if prepare_returns: 1011 | returns = _utils._prepare_returns(returns) 1012 | return returns.corrwith(_utils._prepare_benchmark(benchmark, returns.index)) 1013 | 1014 | 1015 | # ==== VS. BENCHMARK ==== 1016 | 1017 | 1018 | def r_squared(returns, benchmark, prepare_returns=True): 1019 | """Measures the straight line fit of the equity curve""" 1020 | if prepare_returns: 1021 | returns = _utils._prepare_returns(returns) 1022 | 1023 | # if all values are identical, return 0 to avoid errors 1024 | if len(_np.unique(_np.array(returns))) == 1: 1025 | return 0 1026 | # Check if all returns index x values are identical to prevent the error ValueError: Cannot calculate a linear regression if all x values are identical 1027 | if len(_np.unique(_np.array(returns.index))) == 1: 1028 | return 0 1029 | 1030 | # Check if all benchmark values are identical 1031 | if len(_np.unique(_np.array(benchmark))) == 1: 1032 | return 0 1033 | 1034 | _, _, r_val, _, _ = _linregress( 1035 | returns, _utils._prepare_benchmark(benchmark, returns.index) 1036 | ) 1037 | return r_val**2 1038 | 1039 | 1040 | def r2(returns, benchmark): 1041 | """Shorthand for r_squared()""" 1042 | return r_squared(returns, benchmark) 1043 | 1044 | 1045 | def information_ratio(returns, benchmark, prepare_returns=True): 1046 | """ 1047 | Calculates the information ratio 1048 | (basically the risk return ratio of the net profits) 1049 | """ 1050 | if prepare_returns: 1051 | returns = _utils._prepare_returns(returns) 1052 | diff_rets = returns - _utils._prepare_benchmark(benchmark, returns.index) 1053 | 1054 | return diff_rets.mean() / diff_rets.std() 1055 | 1056 | 1057 | def greeks(returns, benchmark, periods=365.0, prepare_returns=True): 1058 | """Calculates alpha and beta of the portfolio""" 1059 | # ---------------------------- 1060 | # data cleanup 1061 | if prepare_returns: 1062 | returns = _utils._prepare_returns(returns) 1063 | benchmark = _utils._prepare_benchmark(benchmark, returns.index) 1064 | # ---------------------------- 1065 | 1066 | # find covariance 1067 | matrix = _np.cov(returns, benchmark) 1068 | beta = matrix[0, 1] / matrix[1, 1] 1069 | 1070 | # calculates measures now 1071 | alpha = returns.mean() - beta * benchmark.mean() 1072 | alpha = alpha * periods 1073 | 1074 | return _pd.Series( 1075 | { 1076 | "beta": beta, 1077 | "alpha": alpha, 1078 | # "vol": _np.sqrt(matrix[0, 0]) * _np.sqrt(periods) 1079 | } 1080 | ).fillna(0) 1081 | 1082 | 1083 | def rolling_greeks(returns, benchmark, periods=365, prepare_returns=True): 1084 | """Calculates rolling alpha and beta of the portfolio""" 1085 | if prepare_returns: 1086 | returns = _utils._prepare_returns(returns) 1087 | df = _pd.DataFrame( 1088 | data={ 1089 | "returns": returns, 1090 | "benchmark": _utils._prepare_benchmark(benchmark, returns.index), 1091 | } 1092 | ) 1093 | df = df.fillna(0) 1094 | corr = df.rolling(int(periods)).corr().unstack()["returns"]["benchmark"] 1095 | std = df.rolling(int(periods)).std() 1096 | beta = corr * std["returns"] / std["benchmark"] 1097 | 1098 | alpha = df["returns"].mean() - beta * df["benchmark"].mean() 1099 | 1100 | return _pd.DataFrame(index=returns.index, data={"beta": beta, "alpha": alpha}) 1101 | 1102 | 1103 | def compare( 1104 | returns, 1105 | benchmark, 1106 | aggregate=None, 1107 | compounded=True, 1108 | round_vals=None, 1109 | prepare_returns=True, 1110 | ): 1111 | """ 1112 | Compare returns to benchmark on a 1113 | day/week/month/quarter/year basis 1114 | """ 1115 | if prepare_returns: 1116 | returns = _utils._prepare_returns(returns) 1117 | benchmark = _utils._prepare_benchmark(benchmark, returns.index) 1118 | 1119 | if isinstance(returns, _pd.Series): 1120 | data = _pd.DataFrame( 1121 | data={ 1122 | "Benchmark": _utils.aggregate_returns(benchmark, aggregate, compounded) 1123 | * 100, 1124 | "Returns": _utils.aggregate_returns(returns, aggregate, compounded) 1125 | * 100, 1126 | } 1127 | ) 1128 | 1129 | data["Multiplier"] = data["Returns"] / data["Benchmark"] 1130 | data["Won"] = _np.where(data["Returns"] >= data["Benchmark"], "+", "-") 1131 | elif isinstance(returns, _pd.DataFrame): 1132 | bench = { 1133 | "Benchmark": _utils.aggregate_returns(benchmark, aggregate, compounded) 1134 | * 100 1135 | } 1136 | strategy = { 1137 | "Returns_" + str(i): _utils.aggregate_returns( 1138 | returns[col], aggregate, compounded 1139 | ) 1140 | * 100 1141 | for i, col in enumerate(returns.columns) 1142 | } 1143 | data = _pd.DataFrame(data={**bench, **strategy}) 1144 | 1145 | if round_vals is not None: 1146 | return _np.round(data, round_vals) 1147 | 1148 | return data 1149 | 1150 | 1151 | def monthly_returns(returns, eoy=True, compounded=True, prepare_returns=True): 1152 | """Calculates monthly returns""" 1153 | if isinstance(returns, _pd.DataFrame): 1154 | warn( 1155 | "Pandas DataFrame was passed (Series expected). " 1156 | "Only first column will be used." 1157 | ) 1158 | returns = returns.copy() 1159 | returns.columns = map(str.lower, returns.columns) 1160 | if len(returns.columns) > 1 and "close" in returns.columns: 1161 | returns = returns["close"] 1162 | else: 1163 | returns = returns[returns.columns[0]] 1164 | 1165 | if prepare_returns: 1166 | returns = _utils._prepare_returns(returns) 1167 | original_returns = returns.copy() 1168 | 1169 | returns = _pd.DataFrame( 1170 | _utils.group_returns(returns, returns.index.strftime("%Y-%m-01"), compounded) 1171 | ) 1172 | 1173 | returns.columns = ["Returns"] 1174 | returns.index = _pd.to_datetime(returns.index) 1175 | 1176 | # get returnsframe 1177 | returns["Year"] = returns.index.strftime("%Y") 1178 | returns["Month"] = returns.index.strftime("%b") 1179 | 1180 | # make pivot table 1181 | returns = returns.pivot(index="Year", columns="Month", values="Returns").fillna(0) 1182 | 1183 | # handle missing months 1184 | for month in [ 1185 | "Jan", 1186 | "Feb", 1187 | "Mar", 1188 | "Apr", 1189 | "May", 1190 | "Jun", 1191 | "Jul", 1192 | "Aug", 1193 | "Sep", 1194 | "Oct", 1195 | "Nov", 1196 | "Dec", 1197 | ]: 1198 | if month not in returns.columns: 1199 | returns.loc[:, month] = 0 1200 | 1201 | # order columns by month 1202 | returns = returns[ 1203 | [ 1204 | "Jan", 1205 | "Feb", 1206 | "Mar", 1207 | "Apr", 1208 | "May", 1209 | "Jun", 1210 | "Jul", 1211 | "Aug", 1212 | "Sep", 1213 | "Oct", 1214 | "Nov", 1215 | "Dec", 1216 | ] 1217 | ] 1218 | 1219 | if eoy: 1220 | returns["eoy"] = _utils.group_returns( 1221 | original_returns, original_returns.index.year, compounded=compounded 1222 | ).values 1223 | 1224 | returns.columns = map(lambda x: str(x).upper(), returns.columns) 1225 | returns.index.name = None 1226 | 1227 | return returns 1228 | 1229 | 1230 | # Calculate the romad (return/cagr over max drawdown) of a strategy 1231 | def romad(returns, periods=365, annualize=True, smart=False): 1232 | """ 1233 | Calculates the romad (return/cagr over max drawdown) of a strategy 1234 | Args: 1235 | * returns (Series, DataFrame): Input return series 1236 | * periods (int): Freq. of returns 1237 | * annualize: return annualize sharpe? 1238 | * smart: return smart sharpe ratio 1239 | """ 1240 | return cagr(returns, periods=periods) / -max_drawdown(returns) 1241 | -------------------------------------------------------------------------------- /quantstats_lumi/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | # 4 | # QuantStats: Portfolio analytics for quants 5 | # https://github.com/ranaroussi/quantstats 6 | # 7 | # Copyright 2019-2023 Ran Aroussi 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # ˜ 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | 20 | import io as _io 21 | import datetime as _dt 22 | import pandas as _pd 23 | import numpy as _np 24 | import yfinance as _yf 25 | from . import stats as _stats 26 | import inspect 27 | 28 | 29 | def _mtd(df): 30 | return df[df.index >= _dt.datetime.now().strftime("%Y-%m-01")] 31 | 32 | 33 | def _qtd(df): 34 | date = _dt.datetime.now() 35 | for q in [1, 4, 7, 10]: 36 | if date.month <= q: 37 | return df[df.index >= _dt.datetime(date.year, q, 1).strftime("%Y-%m-01")] 38 | return df[df.index >= date.strftime("%Y-%m-01")] 39 | 40 | 41 | def _ytd(df): 42 | return df[df.index >= _dt.datetime.now().strftime("%Y-01-01")] 43 | 44 | 45 | def _pandas_date(df, dates): 46 | if not isinstance(dates, list): 47 | dates = [dates] 48 | return df[df.index.isin(dates)] 49 | 50 | 51 | def _pandas_current_month(df): 52 | n = _dt.datetime.now() 53 | daterange = _pd.date_range(_dt.date(n.year, n.month, 1), n) 54 | return df[df.index.isin(daterange)] 55 | 56 | 57 | def multi_shift(df, shift=3): 58 | """Get last N rows relative to another row in pandas""" 59 | if isinstance(df, _pd.Series): 60 | df = _pd.DataFrame(df) 61 | 62 | dfs = [df.shift(i) for i in _np.arange(shift)] 63 | for ix, dfi in enumerate(dfs[1:]): 64 | dfs[ix + 1].columns = [str(col) for col in dfi.columns + str(ix + 1)] 65 | return _pd.concat(dfs, 1, sort=True) 66 | 67 | 68 | def to_returns(prices, rf=0.0): 69 | """Calculates the simple arithmetic returns of a price series""" 70 | return _prepare_returns(prices, rf) 71 | 72 | 73 | def to_prices(returns, base=1e5): 74 | """Converts returns series to price data""" 75 | returns = returns.copy().fillna(0).replace([_np.inf, -_np.inf], float("NaN")) 76 | 77 | return base + base * _stats.compsum(returns) 78 | 79 | 80 | def log_returns(returns, rf=0.0, nperiods=None): 81 | """Shorthand for to_log_returns""" 82 | return to_log_returns(returns, rf, nperiods) 83 | 84 | 85 | def to_log_returns(returns, rf=0.0, nperiods=None): 86 | """Converts returns series to log returns""" 87 | returns = _prepare_returns(returns, rf, nperiods) 88 | try: 89 | return _np.log(returns + 1).replace([_np.inf, -_np.inf], float("NaN")) 90 | except Exception: 91 | return 0.0 92 | 93 | 94 | def exponential_stdev(returns, window=30, is_halflife=False): 95 | """Returns series representing exponential volatility of returns""" 96 | returns = _prepare_returns(returns) 97 | halflife = window if is_halflife else None 98 | return returns.ewm( 99 | com=None, span=window, halflife=halflife, min_periods=window 100 | ).std() 101 | 102 | 103 | def rebase(prices, base=100.0): 104 | """ 105 | Rebase all series to a given intial base. 106 | This makes comparing/plotting different series together easier. 107 | Args: 108 | * prices: Expects a price series/dataframe 109 | * base (number): starting value for all series. 110 | """ 111 | return prices.dropna() / prices.dropna().iloc[0] * base 112 | 113 | 114 | def group_returns(returns, groupby, compounded=False): 115 | """Summarize returns 116 | group_returns(df, df.index.year) 117 | group_returns(df, [df.index.year, df.index.month]) 118 | """ 119 | if compounded: 120 | return returns.groupby(groupby).apply(_stats.comp) 121 | return returns.groupby(groupby).sum() 122 | 123 | 124 | def aggregate_returns(returns, period=None, compounded=True): 125 | """Aggregates returns based on date periods""" 126 | if period is None or "day" in period: 127 | return returns 128 | index = returns.index 129 | 130 | if "month" in period: 131 | return group_returns(returns, index.month, compounded=compounded) 132 | 133 | if "quarter" in period: 134 | return group_returns(returns, index.quarter, compounded=compounded) 135 | 136 | if period == "YE" or any(x in period for x in ["year", "eoy", "yoy"]): 137 | return group_returns(returns, index.year, compounded=compounded) 138 | 139 | if "week" in period: 140 | return group_returns(returns, index.week, compounded=compounded) 141 | 142 | if "eow" in period or period == "W": 143 | return group_returns(returns, [index.year, index.week], compounded=compounded) 144 | 145 | if "eom" in period or period == "ME": 146 | return group_returns(returns, [index.year, index.month], compounded=compounded) 147 | 148 | if "eoq" in period or period == "QE": 149 | return group_returns( 150 | returns, [index.year, index.quarter], compounded=compounded 151 | ) 152 | 153 | if not isinstance(period, str): 154 | return group_returns(returns, period, compounded) 155 | 156 | return returns 157 | 158 | 159 | def to_excess_returns(returns, rf, nperiods=None): 160 | """ 161 | Calculates excess returns by subtracting 162 | risk-free returns from total returns 163 | 164 | Args: 165 | * returns (Series, DataFrame): Returns 166 | * rf (float, Series, DataFrame): Risk-Free rate(s) 167 | * nperiods (int): Optional. If provided, will convert rf to different 168 | frequency using deannualize 169 | Returns: 170 | * excess_returns (Series, DataFrame): Returns - rf 171 | """ 172 | if isinstance(rf, int): 173 | rf = float(rf) 174 | 175 | if not isinstance(rf, float): 176 | rf = rf[rf.index.isin(returns.index)] 177 | 178 | if nperiods is not None: 179 | # deannualize 180 | rf = _np.power(1 + rf, 1.0 / nperiods) - 1.0 181 | 182 | return returns - rf 183 | 184 | 185 | def _prepare_prices(data, base=1.0): 186 | """Converts return data into prices + cleanup""" 187 | data = data.copy() 188 | if isinstance(data, _pd.DataFrame): 189 | for col in data.columns: 190 | if data[col].dropna().min() <= 0 or data[col].dropna().max() < 1: 191 | data[col] = to_prices(data[col], base) 192 | 193 | # is it returns? 194 | # elif data.min() < 0 and data.max() < 1: 195 | elif data.min() < 0 or data.max() < 1: 196 | data = to_prices(data, base) 197 | 198 | if isinstance(data, (_pd.DataFrame, _pd.Series)): 199 | data = data.fillna(0).replace([_np.inf, -_np.inf], float("NaN")) 200 | 201 | return data 202 | 203 | 204 | def _prepare_returns(data, rf=0.0, nperiods=None): 205 | """Converts price data into returns + cleanup""" 206 | data = data.copy() 207 | function = inspect.stack()[1][3] 208 | if isinstance(data, _pd.DataFrame): 209 | for col in data.columns: 210 | if data[col].dropna().min() >= 0 and data[col].dropna().max() > 1: 211 | data[col] = data[col].pct_change() 212 | elif data.min() >= 0 and data.max() > 1: 213 | data = data.pct_change() 214 | 215 | # cleanup data 216 | data = data.replace([_np.inf, -_np.inf], float("NaN")) 217 | 218 | if isinstance(data, (_pd.DataFrame, _pd.Series)): 219 | data = data.fillna(0).replace([_np.inf, -_np.inf], float("NaN")) 220 | unnecessary_function_calls = [ 221 | "_prepare_benchmark", 222 | "cagr", 223 | "gain_to_pain_ratio", 224 | "rolling_volatility", 225 | ] 226 | 227 | if function not in unnecessary_function_calls: 228 | if rf > 0: 229 | return to_excess_returns(data, rf, nperiods) 230 | return data 231 | 232 | 233 | def download_returns(ticker, period="max", proxy=None): 234 | params = { 235 | "tickers": ticker, 236 | "proxy": proxy, 237 | } 238 | if isinstance(period, _pd.DatetimeIndex): 239 | params["start"] = period[0] 240 | else: 241 | params["period"] = period 242 | return _yf.download(**params)["Close"].pct_change() 243 | 244 | 245 | def _prepare_benchmark(benchmark=None, period="max", rf=0.0, prepare_returns=True): 246 | """ 247 | Fetch benchmark if ticker is provided, and pass through 248 | _prepare_returns() 249 | 250 | period can be options or (expected) _pd.DatetimeIndex range 251 | """ 252 | if benchmark is None: 253 | return None 254 | 255 | if isinstance(benchmark, str): 256 | benchmark = download_returns(benchmark) 257 | 258 | elif isinstance(benchmark, _pd.DataFrame): 259 | benchmark = benchmark[benchmark.columns[0]].copy() 260 | 261 | if isinstance(period, _pd.DatetimeIndex) and set(period) != set(benchmark.index): 262 | 263 | # Adjust Benchmark to Strategy frequency 264 | benchmark_prices = to_prices(benchmark, base=1) 265 | new_index = _pd.date_range(start=period[0], end=period[-1], freq="D") 266 | benchmark = ( 267 | benchmark_prices.reindex(new_index, method="bfill") 268 | .reindex(period) 269 | .pct_change() 270 | .fillna(0) 271 | ) 272 | benchmark = benchmark[benchmark.index.isin(period)] 273 | 274 | benchmark.index = benchmark.index.tz_localize(None) 275 | 276 | if prepare_returns: 277 | return _prepare_returns(benchmark.dropna(), rf=rf) 278 | return benchmark.dropna() 279 | 280 | 281 | def _round_to_closest(val, res, decimals=None): 282 | """Round to closest resolution""" 283 | if decimals is None and "." in str(res): 284 | decimals = len(str(res).split(".")[1]) 285 | return round(round(val / res) * res, decimals) 286 | 287 | 288 | def _file_stream(): 289 | """Returns a file stream""" 290 | return _io.BytesIO() 291 | 292 | 293 | def _in_notebook(matplotlib_inline=False): 294 | """Identify enviroment (notebook, terminal, etc)""" 295 | try: 296 | shell = get_ipython().__class__.__name__ 297 | if shell == "ZMQInteractiveShell": 298 | # Jupyter notebook or qtconsole 299 | if matplotlib_inline: 300 | get_ipython().magic("matplotlib inline") 301 | return True 302 | if shell == "TerminalInteractiveShell": 303 | # Terminal running IPython 304 | return False 305 | # Other type (?) 306 | return False 307 | except NameError: 308 | # Probably standard Python interpreter 309 | return False 310 | 311 | 312 | def _count_consecutive(data): 313 | """Counts consecutive data (like cumsum() with reset on zeroes)""" 314 | 315 | def _count(data): 316 | return data * (data.groupby((data != data.shift(1)).cumsum()).cumcount() + 1) 317 | 318 | if isinstance(data, _pd.DataFrame): 319 | for col in data.columns: 320 | data[col] = _count(data[col]) 321 | return data 322 | return _count(data) 323 | 324 | 325 | def _score_str(val): 326 | """Returns + sign for positive values (used in plots)""" 327 | return ("" if "-" in val else "+") + str(val) 328 | 329 | 330 | def make_index( 331 | ticker_weights, rebalance="1M", period="max", returns=None, match_dates=False 332 | ): 333 | """ 334 | Makes an index out of the given tickers and weights. 335 | Optionally you can pass a dataframe with the returns. 336 | If returns is not given it try to download them with yfinance 337 | 338 | Args: 339 | * ticker_weights (Dict): A python dict with tickers as keys 340 | and weights as values 341 | * rebalance: Pandas resample interval or None for never 342 | * period: time period of the returns to be downloaded 343 | * returns (Series, DataFrame): Optional. Returns If provided, 344 | it will fist check if returns for the given ticker are in 345 | this dataframe, if not it will try to download them with 346 | yfinance 347 | Returns: 348 | * index_returns (Series, DataFrame): Returns for the index 349 | """ 350 | # Declare a returns variable 351 | index = None 352 | portfolio = {} 353 | 354 | # Iterate over weights 355 | for ticker in ticker_weights.keys(): 356 | if (returns is None) or (ticker not in returns.columns): 357 | # Download the returns for this ticker, e.g. GOOG 358 | ticker_returns = download_returns(ticker, period) 359 | else: 360 | ticker_returns = returns[ticker] 361 | 362 | portfolio[ticker] = ticker_returns 363 | 364 | # index members time-series 365 | index = _pd.DataFrame(portfolio).dropna() 366 | 367 | if match_dates: 368 | index = index[max(index.ne(0).idxmax()) :] 369 | 370 | # no rebalance? 371 | if rebalance is None: 372 | for ticker, weight in ticker_weights.items(): 373 | index[ticker] = weight * index[ticker] 374 | return index.sum(axis=1) 375 | 376 | last_day = index.index[-1] 377 | 378 | # rebalance marker 379 | rbdf = index.resample(rebalance).first() 380 | rbdf["break"] = rbdf.index.strftime("%s") 381 | 382 | # index returns with rebalance markers 383 | index = _pd.concat([index, rbdf["break"]], axis=1) 384 | 385 | # mark first day day 386 | index["first_day"] = _pd.isna(index["break"]) & ~_pd.isna(index["break"].shift(1)) 387 | index.loc[index.index[0], "first_day"] = True 388 | 389 | # multiply first day of each rebalance period by the weight 390 | for ticker, weight in ticker_weights.items(): 391 | index[ticker] = _np.where( 392 | index["first_day"], weight * index[ticker], index[ticker] 393 | ) 394 | 395 | # drop first marker 396 | index.drop(columns=["first_day"], inplace=True) 397 | 398 | # drop when all are NaN 399 | index.dropna(how="all", inplace=True) 400 | return index[index.index <= last_day].sum(axis=1) 401 | 402 | 403 | def make_portfolio(returns, start_balance=1e5, mode="comp", round_to=None): 404 | """Calculates compounded value of portfolio""" 405 | returns = _prepare_returns(returns) 406 | 407 | if mode.lower() in ["cumsum", "sum"]: 408 | p1 = start_balance + start_balance * returns.cumsum() 409 | elif mode.lower() in ["compsum", "comp"]: 410 | p1 = to_prices(returns, start_balance) 411 | else: 412 | # fixed amount every day 413 | comp_rev = (start_balance + start_balance * returns.shift(1)).fillna( 414 | start_balance 415 | ) * returns 416 | p1 = start_balance + comp_rev.cumsum() 417 | 418 | # add day before with starting balance 419 | p0 = _pd.Series(data=start_balance, index=p1.index + _pd.Timedelta(days=-1))[:1] 420 | 421 | portfolio = _pd.concat([p0, p1]) 422 | 423 | if isinstance(returns, _pd.DataFrame): 424 | portfolio.iloc[:1, :] = start_balance 425 | portfolio.drop(columns=[0], inplace=True) 426 | 427 | if round_to: 428 | portfolio = _np.round(portfolio, round_to) 429 | 430 | return portfolio 431 | 432 | 433 | def _flatten_dataframe(df, set_index=None): 434 | """Dirty method for flattening multi-index dataframe""" 435 | s_buf = _io.StringIO() 436 | df.to_csv(s_buf) 437 | s_buf.seek(0) 438 | 439 | df = _pd.read_csv(s_buf) 440 | if set_index is not None: 441 | df.set_index(set_index, inplace=True) 442 | 443 | return df 444 | -------------------------------------------------------------------------------- /quantstats_lumi/version.py: -------------------------------------------------------------------------------- 1 | version = "1.0.1" 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pandas>=2.2.0 2 | numpy>=1.26.4 3 | seaborn>=0.13.2 4 | matplotlib>=3.0.0 5 | scipy>=1.2.0 6 | tabulate>=0.8.0 7 | yfinance>=0.2.36 8 | python-dateutil>=2.0 9 | ipython>=8.22.2 10 | pytest 11 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | # This flag says that the code is written to work on both Python 2 and Python 3 | # 3. If at all possible, it is good practice to do this. If you cannot, you 4 | # will need to generate wheels for each Python version that you support. 5 | universal=1 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: UTF-8 -*- 3 | 4 | """QuantStats: Portfolio analytics for quants 5 | https://github.com/ranaroussi/quantstats 6 | QuantStats performs portfolio profiling, to allow quants and 7 | portfolio managers to understand their performance better, 8 | by providing them with in-depth analytics and risk metrics. 9 | """ 10 | 11 | # from codecs import open 12 | import io 13 | from os import path 14 | 15 | from setuptools import find_packages, setup 16 | 17 | # --- get version --- 18 | version = "unknown" 19 | with open("quantstats_lumi/version.py") as f: 20 | line = f.read().strip() 21 | version = line.replace("version = ", "").replace('"', "") 22 | # --- /get version --- 23 | 24 | here = path.abspath(path.dirname(__file__)) 25 | 26 | # Get the long description from the README file 27 | with io.open(path.join(here, "README.md"), encoding="utf-8") as f: # Changed from README.rst 28 | long_description = f.read() 29 | 30 | with io.open(path.join(here, "requirements.txt"), encoding="utf-8") as f: 31 | requirements = [line.rstrip() for line in f] 32 | 33 | setup( 34 | name="quantstats-lumi", 35 | version=version, 36 | description="Portfolio analytics for quants", 37 | long_description=long_description, 38 | long_description_content_type="text/markdown", # Changed from text/x-rst 39 | url="https://github.com/Lumiwealth/quantstats_lumi", 40 | author="Robert Grzesik (Lumiwealth)", 41 | author_email="rob@lumiwealth.com", 42 | license="Apache Software License", 43 | python_requires='>=3.6', 44 | classifiers=[ 45 | "License :: OSI Approved :: Apache Software License", 46 | # 'Development Status :: 1 - Planning', 47 | # 'Development Status :: 2 - Pre-Alpha', 48 | # 'Development Status :: 3 - Alpha', 49 | # 'Development Status :: 4 - Beta', 50 | "Development Status :: 5 - Production/Stable", 51 | "Operating System :: OS Independent", 52 | "Intended Audience :: Developers", 53 | "Intended Audience :: Financial and Insurance Industry", 54 | "Intended Audience :: Science/Research", 55 | "Topic :: Office/Business :: Financial", 56 | "Topic :: Office/Business :: Financial :: Investment", 57 | "Topic :: Software Development :: Libraries", 58 | "Topic :: Software Development :: Libraries :: Python Modules", 59 | "Topic :: Scientific/Engineering", 60 | "Topic :: Scientific/Engineering :: Information Analysis", 61 | "Topic :: Scientific/Engineering :: Mathematics", 62 | # 'Programming Language :: Python :: 3.5', 63 | "Programming Language :: Python :: 3.6", 64 | "Programming Language :: Python :: 3.7", 65 | "Programming Language :: Python :: 3.8", 66 | "Programming Language :: Python :: 3.9", 67 | ], 68 | platforms=["any"], 69 | keywords="""quant algotrading algorithmic-trading quantitative-trading 70 | quantitative-analysis algo-trading visualization plotting""", 71 | packages=find_packages(exclude=["contrib", "docs", "tests", "examples"]), 72 | install_requires=requirements, 73 | entry_points={ 74 | "console_scripts": [ 75 | "sample=sample:main", 76 | ], 77 | }, 78 | include_package_data=True, 79 | package_data={ 80 | "quantstats_lumi": ["report.html"], 81 | }, 82 | ) 83 | --------------------------------------------------------------------------------