├── __init__.py ├── src ├── __init__.py ├── service │ ├── constraints │ │ ├── cash │ │ │ ├── __init__.py │ │ │ ├── cash_validator.py │ │ │ └── withdrawal_validator.py │ │ ├── drift │ │ │ └── __init__.py │ │ ├── trade │ │ │ ├── __init__.py │ │ │ ├── no_buy_validator.py │ │ │ ├── no_simultaneous_trade_validator.py │ │ │ └── min_notional_validator.py │ │ ├── holding_time │ │ │ ├── __init__.py │ │ │ ├── trading_day_lookup.py │ │ │ └── holding_time_validator.py │ │ ├── improvement │ │ │ └── __init__.py │ │ ├── restriction │ │ │ ├── __init__.py │ │ │ └── restriction_validator.py │ │ ├── __init__.py │ │ └── base_validator.py │ ├── objectives │ │ ├── drift │ │ │ └── __init__.py │ │ ├── taxes │ │ │ ├── __init__.py │ │ │ └── tax_optimization.py │ │ ├── factor_model │ │ │ └── __init__.py │ │ ├── cash_deployment │ │ │ ├── __init__.py │ │ │ └── cash_deployment.py │ │ ├── transaction_costs │ │ │ ├── __init__.py │ │ │ └── transaction_optimization.py │ │ └── __init__.py │ ├── helpers │ │ ├── gradient_descent_optimal_weights.py │ │ ├── __init__.py │ │ ├── constants.py │ │ ├── logging_config.py │ │ ├── trade_netting.py │ │ ├── create_decision_vars.py │ │ └── enums.py │ ├── __init__.py │ ├── reports │ │ ├── __init__.py │ │ ├── actuals_report.py │ │ ├── drift_report.py │ │ └── gain_loss_report.py │ ├── types.py │ └── initializers │ │ ├── __init__.py │ │ ├── stock_restrictions.py │ │ ├── prices.py │ │ ├── tax_rates.py │ │ ├── spreads.py │ │ ├── closed_lots.py │ │ └── tax_lots.py └── solvers │ ├── __init__.py │ └── solver.py ├── tests ├── drift_refactor │ ├── __init__.py │ └── test_initialize_targets.py ├── example_oracle_inputs │ └── gain_loss.json ├── __init__.py ├── test_solver.py ├── test_no_simultaneous_buy_sell.py ├── test_cash_drag.py ├── test_small_trade_results.py └── test_onboarding.py ├── requirements.txt ├── _build ├── objects.inv ├── _static │ ├── file.png │ ├── plus.png │ ├── minus.png │ ├── css │ │ ├── fonts │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-normal.woff │ │ │ ├── lato-normal.woff2 │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ ├── lato-bold-italic.woff │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── lato-bold-italic.woff2 │ │ │ ├── lato-normal-italic.woff │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ ├── fontawesome-webfont.woff │ │ │ ├── fontawesome-webfont.woff2 │ │ │ └── lato-normal-italic.woff2 │ │ └── badge_only.css │ ├── fonts │ │ ├── Lato │ │ │ ├── lato-bold.eot │ │ │ ├── lato-bold.ttf │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-italic.eot │ │ │ ├── lato-italic.ttf │ │ │ ├── lato-italic.woff │ │ │ ├── lato-italic.woff2 │ │ │ ├── lato-regular.eot │ │ │ ├── lato-regular.ttf │ │ │ ├── lato-regular.woff │ │ │ ├── lato-bolditalic.eot │ │ │ ├── lato-bolditalic.ttf │ │ │ ├── lato-bolditalic.woff │ │ │ ├── lato-regular.woff2 │ │ │ └── lato-bolditalic.woff2 │ │ └── RobotoSlab │ │ │ ├── roboto-slab-v7-bold.eot │ │ │ ├── roboto-slab-v7-bold.ttf │ │ │ ├── roboto-slab-v7-bold.woff │ │ │ ├── roboto-slab-v7-bold.woff2 │ │ │ ├── roboto-slab-v7-regular.eot │ │ │ ├── roboto-slab-v7-regular.ttf │ │ │ ├── roboto-slab-v7-regular.woff │ │ │ └── roboto-slab-v7-regular.woff2 │ ├── documentation_options.js │ ├── js │ │ ├── badge_only.js │ │ └── theme.js │ ├── _sphinx_javascript_frameworks_compat.js │ ├── doctools.js │ ├── pygments.css │ ├── language_data.js │ └── sphinx_highlight.js ├── .doctrees │ ├── index.doctree │ ├── environment.pickle │ └── modules │ │ ├── helpers.doctree │ │ ├── oracle.doctree │ │ ├── objectives.doctree │ │ ├── constraints.doctree │ │ ├── initializers.doctree │ │ ├── oracle_strategy.doctree │ │ └── optimization_types.doctree ├── .buildinfo ├── _sources │ └── modules │ │ ├── helpers.rst.txt │ │ ├── oracle.rst.txt │ │ ├── oracle_strategy.rst.txt │ │ ├── initializers.rst.txt │ │ ├── objectives.rst.txt │ │ └── constraints.rst.txt ├── search.html ├── _modules │ └── index.html └── py-modindex.html ├── docs ├── requirements.txt ├── modules │ ├── helpers.rst │ ├── oracle.rst │ ├── oracle_strategy.rst │ ├── initializers.rst │ ├── objectives.rst │ └── constraints.rst ├── Makefile ├── make.bat └── conf.py ├── debug.py ├── lambda_function.py ├── pyproject.toml ├── deploy ├── Dockerfile ├── .github └── workflows │ └── documentation.yml ├── LICENSE ├── .vscode └── launch.json └── .gitignore /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/drift_refactor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/constraints/cash/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/constraints/drift/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/constraints/trade/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/objectives/drift/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/objectives/taxes/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/example_oracle_inputs/gain_loss.json: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | pulp 3 | numpy 4 | -------------------------------------------------------------------------------- /src/service/constraints/holding_time/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/constraints/improvement/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/constraints/restriction/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/objectives/factor_model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/objectives/cash_deployment/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/objectives/transaction_costs/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/service/helpers/gradient_descent_optimal_weights.py: -------------------------------------------------------------------------------- 1 | import pandas as pd -------------------------------------------------------------------------------- /_build/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/objects.inv -------------------------------------------------------------------------------- /_build/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/file.png -------------------------------------------------------------------------------- /_build/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/plus.png -------------------------------------------------------------------------------- /_build/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/minus.png -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx>=8.2.3 2 | sphinx-rtd-theme>=3.0.2 3 | sphinx-autodoc-typehints>=1.25.2 -------------------------------------------------------------------------------- /_build/.doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/index.doctree -------------------------------------------------------------------------------- /_build/.doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/environment.pickle -------------------------------------------------------------------------------- /_build/.doctrees/modules/helpers.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/modules/helpers.doctree -------------------------------------------------------------------------------- /_build/.doctrees/modules/oracle.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/modules/oracle.doctree -------------------------------------------------------------------------------- /_build/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /_build/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /_build/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-bold.eot -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-bold.ttf -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-bold.woff -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-bold.woff2 -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-italic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-italic.eot -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-italic.ttf -------------------------------------------------------------------------------- /src/solvers/__init__.py: -------------------------------------------------------------------------------- 1 | from .solver import solve_optimization_problem 2 | 3 | __all__ = [ 4 | solve_optimization_problem, 5 | ] -------------------------------------------------------------------------------- /_build/.doctrees/modules/objectives.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/modules/objectives.doctree -------------------------------------------------------------------------------- /_build/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-italic.woff -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-italic.woff2 -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-regular.eot -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-regular.ttf -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-regular.woff -------------------------------------------------------------------------------- /_build/.doctrees/modules/constraints.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/modules/constraints.doctree -------------------------------------------------------------------------------- /_build/.doctrees/modules/initializers.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/modules/initializers.doctree -------------------------------------------------------------------------------- /_build/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /_build/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-bolditalic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-bolditalic.eot -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-bolditalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-bolditalic.ttf -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-bolditalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-bolditalic.woff -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-regular.woff2 -------------------------------------------------------------------------------- /_build/.doctrees/modules/oracle_strategy.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/modules/oracle_strategy.doctree -------------------------------------------------------------------------------- /_build/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /_build/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /_build/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /_build/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /_build/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /_build/_static/fonts/Lato/lato-bolditalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/Lato/lato-bolditalic.woff2 -------------------------------------------------------------------------------- /_build/.doctrees/modules/optimization_types.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/.doctrees/modules/optimization_types.doctree -------------------------------------------------------------------------------- /_build/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /_build/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /_build/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /_build/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /_build/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /src/service/constraints/__init__.py: -------------------------------------------------------------------------------- 1 | from src.service.constraints.constraints_manager import ConstraintsManager 2 | 3 | __all__ = ['ConstraintsManager'] -------------------------------------------------------------------------------- /_build/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot -------------------------------------------------------------------------------- /_build/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf -------------------------------------------------------------------------------- /_build/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff -------------------------------------------------------------------------------- /src/service/__init__.py: -------------------------------------------------------------------------------- 1 | from .oracle_strategy import OracleStrategy 2 | from .oracle import Oracle 3 | 4 | __all__ = [ 5 | Oracle, 6 | OracleStrategy 7 | ] -------------------------------------------------------------------------------- /src/service/helpers/__init__.py: -------------------------------------------------------------------------------- 1 | from src.service.helpers.max_withdrawal import calculate_max_withdrawal 2 | 3 | __all__ = [ 4 | calculate_max_withdrawal 5 | ] -------------------------------------------------------------------------------- /_build/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 -------------------------------------------------------------------------------- /_build/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot -------------------------------------------------------------------------------- /_build/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf -------------------------------------------------------------------------------- /_build/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff -------------------------------------------------------------------------------- /_build/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/doublehq/oracle/HEAD/_build/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 -------------------------------------------------------------------------------- /debug.py: -------------------------------------------------------------------------------- 1 | import json 2 | from src.service.oracle import Oracle 3 | 4 | event = json.load(open("debug.json")) 5 | response = Oracle.process_lambda_event(event) 6 | print(json.dumps(response, indent=2)) 7 | -------------------------------------------------------------------------------- /_build/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file records the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 53197fb501dae66aefc6e0241fe9d354 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /src/service/helpers/constants.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from .logging_config import configure_logging 3 | 4 | # Configure logging 5 | configure_logging() 6 | 7 | # Create logger 8 | logger = logging.getLogger() 9 | 10 | CASH_CUSIP_ID = "_CASH_123" 11 | CASH_SYMBOL = "_CASH_" -------------------------------------------------------------------------------- /src/service/objectives/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Objectives module for Oracle optimization. 3 | 4 | This module contains classes and functions related to managing objective terms 5 | for portfolio optimization problems. 6 | """ 7 | 8 | from src.service.objectives.objective_manager import ObjectiveManager 9 | 10 | __all__ = ['ObjectiveManager'] -------------------------------------------------------------------------------- /src/service/reports/__init__.py: -------------------------------------------------------------------------------- 1 | from src.service.reports.gain_loss_report import generate_gain_loss_report 2 | from src.service.reports.actuals_report import generate_actuals_report 3 | from src.service.reports.drift_report import generate_drift_report 4 | 5 | __all__ = [ 6 | generate_gain_loss_report, 7 | generate_actuals_report, 8 | generate_drift_report, 9 | ] -------------------------------------------------------------------------------- /lambda_function.py: -------------------------------------------------------------------------------- 1 | from src.service.oracle import Oracle 2 | 3 | def lambda_handler(event, context): 4 | return { 5 | "lambda": { 6 | "function_version": (context.function_version if context else None), 7 | "aws_request_id": (context.aws_request_id if context else None), 8 | }, 9 | "oracle": Oracle.process_lambda_event(event), 10 | } 11 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "oracle" 3 | version = "0.1.0" 4 | description = "Oracle Portfolio Optimizer from Double" 5 | authors = ["JJ Maxwell jmaxwell@doule.finance"] 6 | readme = "README.md" 7 | license = "MIT" 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.10" 11 | 12 | 13 | [build-system] 14 | requires = ["poetry-core"] 15 | build-backend = "poetry.core.masonry.api" -------------------------------------------------------------------------------- /_build/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | const DOCUMENTATION_OPTIONS = { 2 | VERSION: '0.1.0', 3 | LANGUAGE: 'en', 4 | COLLAPSE_INDEX: false, 5 | BUILDER: 'html', 6 | FILE_SUFFIX: '.html', 7 | LINK_SUFFIX: '.html', 8 | HAS_SOURCE: true, 9 | SOURCELINK_SUFFIX: '.txt', 10 | NAVIGATION_WITH_KEYS: false, 11 | SHOW_SEARCH_SUMMARY: true, 12 | ENABLE_SEARCH_SHORTCUTS: true, 13 | }; -------------------------------------------------------------------------------- /src/service/types.py: -------------------------------------------------------------------------------- 1 | """Type definitions for the service package.""" 2 | from typing import TypeVar, Union, Tuple 3 | import pandas as pd 4 | from datetime import date, datetime 5 | 6 | # Create a type variable for OracleStrategy 7 | OracleStrategy = TypeVar('OracleStrategy', bound='service.oracle_strategy.OracleStrategy') 8 | 9 | # Define the return type for apply_trades_to_portfolio 10 | ApplyTradesReturn = Union[ 11 | Tuple[pd.DataFrame, float, pd.DataFrame], 12 | OracleStrategy 13 | ] -------------------------------------------------------------------------------- /docs/modules/helpers.rst: -------------------------------------------------------------------------------- 1 | Helpers 2 | ======= 3 | 4 | Enums 5 | ----- 6 | 7 | .. currentmodule:: service.helpers.enums 8 | 9 | .. autoclass:: OracleOptimizationType 10 | :members: 11 | :undoc-members: 12 | :show-inheritance: 13 | 14 | .. rubric:: Members 15 | 16 | .. autosummary:: 17 | 18 | ~OracleOptimizationType.HOLD 19 | ~OracleOptimizationType.BUY_ONLY 20 | ~OracleOptimizationType.TAX_UNAWARE 21 | ~OracleOptimizationType.TAX_AWARE 22 | ~OracleOptimizationType.PAIRS_TLH 23 | ~OracleOptimizationType.DIRECT_INDEX -------------------------------------------------------------------------------- /_build/_sources/modules/helpers.rst.txt: -------------------------------------------------------------------------------- 1 | Helpers 2 | ======= 3 | 4 | Enums 5 | ----- 6 | 7 | .. currentmodule:: service.helpers.enums 8 | 9 | .. autoclass:: OracleOptimizationType 10 | :members: 11 | :undoc-members: 12 | :show-inheritance: 13 | 14 | .. rubric:: Members 15 | 16 | .. autosummary:: 17 | 18 | ~OracleOptimizationType.HOLD 19 | ~OracleOptimizationType.BUY_ONLY 20 | ~OracleOptimizationType.TAX_UNAWARE 21 | ~OracleOptimizationType.TAX_AWARE 22 | ~OracleOptimizationType.PAIRS_TLH 23 | ~OracleOptimizationType.DIRECT_INDEX -------------------------------------------------------------------------------- /docs/modules/oracle.rst: -------------------------------------------------------------------------------- 1 | Oracle 2 | ====== 3 | 4 | .. currentmodule:: service.oracle 5 | 6 | .. autoclass:: Oracle 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | :special-members: __init__ 11 | 12 | .. rubric:: Methods Summary 13 | 14 | .. autosummary:: 15 | 16 | ~Oracle.__init__ 17 | ~Oracle.add_strategy 18 | ~Oracle.set_restrictions 19 | ~Oracle.initialize_wash_sale_restrictions 20 | ~Oracle.compute_optimal_trades_for_all_strategies 21 | ~Oracle.to_dict 22 | ~Oracle.from_dict 23 | ~Oracle.process_lambda_event -------------------------------------------------------------------------------- /_build/_sources/modules/oracle.rst.txt: -------------------------------------------------------------------------------- 1 | Oracle 2 | ====== 3 | 4 | .. currentmodule:: service.oracle 5 | 6 | .. autoclass:: Oracle 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | :special-members: __init__ 11 | 12 | .. rubric:: Methods Summary 13 | 14 | .. autosummary:: 15 | 16 | ~Oracle.__init__ 17 | ~Oracle.add_strategy 18 | ~Oracle.set_restrictions 19 | ~Oracle.initialize_wash_sale_restrictions 20 | ~Oracle.compute_optimal_trades_for_all_strategies 21 | ~Oracle.to_dict 22 | ~Oracle.from_dict 23 | ~Oracle.process_lambda_event -------------------------------------------------------------------------------- /docs/modules/oracle_strategy.rst: -------------------------------------------------------------------------------- 1 | OracleStrategy 2 | ============== 3 | 4 | .. currentmodule:: service.oracle_strategy 5 | 6 | .. autoclass:: OracleStrategy 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | :special-members: __init__ 11 | 12 | .. rubric:: Methods Summary 13 | 14 | .. autosummary:: 15 | 16 | ~OracleStrategy.__init__ 17 | ~OracleStrategy.set_oracle 18 | ~OracleStrategy.compute_optimal_trades 19 | ~OracleStrategy.to_dict 20 | ~OracleStrategy.from_dict 21 | 22 | .. rubric:: Properties 23 | 24 | .. autosummary:: 25 | 26 | ~OracleStrategy.current_date 27 | ~OracleStrategy.gain_loss_report -------------------------------------------------------------------------------- /_build/_sources/modules/oracle_strategy.rst.txt: -------------------------------------------------------------------------------- 1 | OracleStrategy 2 | ============== 3 | 4 | .. currentmodule:: service.oracle_strategy 5 | 6 | .. autoclass:: OracleStrategy 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | :special-members: __init__ 11 | 12 | .. rubric:: Methods Summary 13 | 14 | .. autosummary:: 15 | 16 | ~OracleStrategy.__init__ 17 | ~OracleStrategy.set_oracle 18 | ~OracleStrategy.compute_optimal_trades 19 | ~OracleStrategy.to_dict 20 | ~OracleStrategy.from_dict 21 | 22 | .. rubric:: Properties 23 | 24 | .. autosummary:: 25 | 26 | ~OracleStrategy.current_date 27 | ~OracleStrategy.gain_loss_report -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import importlib.util 4 | 5 | # Get the directory containing this __init__.py file 6 | test_dir = os.path.dirname(os.path.abspath(__file__)) 7 | 8 | # Find all test_*.py files 9 | test_files = glob.glob(os.path.join(test_dir, "test_*.py")) 10 | 11 | # Import each test module 12 | for test_file in test_files: 13 | # Get the module name from the file path 14 | module_name = os.path.splitext(os.path.basename(test_file))[0] 15 | 16 | # Import the module 17 | spec = importlib.util.spec_from_file_location(module_name, test_file) 18 | module = importlib.util.module_from_spec(spec) 19 | spec.loader.exec_module(module) 20 | 21 | # Add it to this package's globals 22 | globals()[module_name] = module 23 | -------------------------------------------------------------------------------- /deploy: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | ENV="${ENV:-dev}" 4 | VERSION="$(date -uI)-$(git rev-parse --short HEAD)" 5 | BUILD_IMAGE="oracle/$ENV:$VERSION" 6 | ECR_ACCOUNT_ID="341780365223" 7 | ECR_REGION="us-east-1" 8 | ECR_URI="$ECR_ACCOUNT_ID.dkr.ecr.$ECR_REGION.amazonaws.com" 9 | ECR_IMAGE="$ECR_URI/$BUILD_IMAGE" 10 | FUNCTION="oracle-$ENV" 11 | 12 | aws sso login 13 | aws ecr get-login-password --region $ECR_REGION | docker login --username AWS --password-stdin $ECR_URI 14 | docker buildx build --platform linux/arm64 --provenance=false --build-arg VERSION=$VERSION -t $BUILD_IMAGE . 15 | docker tag $BUILD_IMAGE $ECR_IMAGE 16 | docker push $ECR_IMAGE 17 | aws lambda update-function-code --function-name $FUNCTION --image-uri $ECR_IMAGE --publish 18 | aws lambda wait function-updated --function-name $FUNCTION 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/lambda/python:3.13 AS build 2 | 3 | RUN dnf install -y make git wget tar patch dos2unix pkg-config gcc gcc-c++ gcc-gfortran 4 | RUN wget https://raw.githubusercontent.com/coin-or/coinbrew/master/coinbrew 5 | RUN chmod u+x coinbrew 6 | RUN ./coinbrew fetch Cbc@2.10.12 --no-third-party 7 | RUN ./coinbrew build Cbc 8 | 9 | FROM public.ecr.aws/lambda/python:3.13 AS runtime 10 | COPY --from=build ${LAMBDA_TASK_ROOT}/dist ${LAMBDA_TASK_ROOT}/dist 11 | ENV PATH="${PATH}:${LAMBDA_TASK_ROOT}/dist/bin" 12 | 13 | COPY requirements.txt ${LAMBDA_TASK_ROOT} 14 | RUN pip install --no-cache-dir -r requirements.txt 15 | 16 | COPY solvers/ ${LAMBDA_TASK_ROOT}/solvers/ 17 | COPY service/ ${LAMBDA_TASK_ROOT}/service/ 18 | 19 | COPY lambda_function.py ${LAMBDA_TASK_ROOT} 20 | CMD ["lambda_function.lambda_handler"] 21 | 22 | ARG VERSION="unknown" 23 | ENV VERSION=${VERSION} 24 | -------------------------------------------------------------------------------- /src/service/initializers/__init__.py: -------------------------------------------------------------------------------- 1 | from src.service.initializers.tax_lots import initialize_tax_lots 2 | from src.service.initializers.targets import initialize_targets 3 | from src.service.initializers.prices import initialize_prices 4 | from src.service.initializers.spreads import initialize_spreads 5 | from src.service.initializers.closed_lots import initialize_closed_lots 6 | from src.service.initializers.stock_restrictions import initialize_stock_restrictions 7 | from src.service.initializers.tax_rates import initialize_tax_rates 8 | from src.service.initializers.factor_model import initialize_factor_model 9 | 10 | __all__ = [ 11 | 'initialize_tax_lots', 12 | 'initialize_targets', 13 | 'initialize_prices', 14 | 'initialize_spreads', 15 | 'initialize_closed_lots', 16 | 'initialize_stock_restrictions', 17 | 'initialize_tax_rates', 18 | 'initialize_factor_model', 19 | ] 20 | -------------------------------------------------------------------------------- /docs/modules/initializers.rst: -------------------------------------------------------------------------------- 1 | Initializers 2 | ============ 3 | 4 | .. currentmodule:: service.initializers 5 | 6 | The initializers module provides functions for initializing and validating various data structures used in the Oracle system. 7 | 8 | Tax Lots 9 | -------- 10 | 11 | .. autofunction:: initialize_tax_lots 12 | 13 | Targets 14 | ------- 15 | 16 | .. autofunction:: initialize_targets 17 | 18 | Prices 19 | ------ 20 | 21 | .. autofunction:: initialize_prices 22 | 23 | Spreads 24 | ------- 25 | 26 | .. autofunction:: initialize_spreads 27 | 28 | Closed Lots 29 | ----------- 30 | 31 | .. autofunction:: initialize_closed_lots 32 | 33 | Stock Restrictions 34 | ------------------ 35 | 36 | .. autofunction:: initialize_stock_restrictions 37 | 38 | Tax Rates 39 | --------- 40 | 41 | .. autofunction:: initialize_tax_rates 42 | 43 | Factor Model 44 | ------------ 45 | 46 | .. autofunction:: initialize_factor_model -------------------------------------------------------------------------------- /_build/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /_build/_sources/modules/initializers.rst.txt: -------------------------------------------------------------------------------- 1 | Initializers 2 | ============ 3 | 4 | .. currentmodule:: service.initializers 5 | 6 | The initializers module provides functions for initializing and validating various data structures used in the Oracle system. 7 | 8 | Tax Lots 9 | -------- 10 | 11 | .. autofunction:: initialize_tax_lots 12 | 13 | Targets 14 | ------- 15 | 16 | .. autofunction:: initialize_targets 17 | 18 | Prices 19 | ------ 20 | 21 | .. autofunction:: initialize_prices 22 | 23 | Spreads 24 | ------- 25 | 26 | .. autofunction:: initialize_spreads 27 | 28 | Closed Lots 29 | ----------- 30 | 31 | .. autofunction:: initialize_closed_lots 32 | 33 | Stock Restrictions 34 | ------------------ 35 | 36 | .. autofunction:: initialize_stock_restrictions 37 | 38 | Tax Rates 39 | --------- 40 | 41 | .. autofunction:: initialize_tax_rates 42 | 43 | Factor Model 44 | ------------ 45 | 46 | .. autofunction:: initialize_factor_model -------------------------------------------------------------------------------- /.github/workflows/documentation.yml: -------------------------------------------------------------------------------- 1 | name: documentation 2 | 3 | on: [push, pull_request, workflow_dispatch] 4 | 5 | permissions: 6 | contents: write 7 | 8 | jobs: 9 | docs: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: actions/setup-python@v5 14 | - name: Install dependencies 15 | run: | 16 | pip install --upgrade pip 17 | pip install -r docs/requirements.txt 18 | pip install -r requirements.txt 19 | - name: Build documentation 20 | run: | 21 | sphinx-build docs _build 22 | - name: Deploy to GitHub Pages 23 | uses: peaceiris/actions-gh-pages@v3 24 | if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} 25 | with: 26 | publish_branch: gh-pages 27 | github_token: ${{ secrets.GITHUB_TOKEN }} 28 | publish_dir: _build/ 29 | force_orphan: true -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /src/service/helpers/logging_config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | def configure_logging(): 5 | """Configure logging to output to both file and console with appropriate formatting.""" 6 | 7 | # Create root logger 8 | root_logger = logging.getLogger() 9 | root_logger.setLevel(logging.INFO) 10 | 11 | # Create formatters 12 | console_formatter = logging.Formatter('%(message)s') 13 | file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 14 | 15 | # Console handler 16 | console_handler = logging.StreamHandler(sys.stdout) 17 | console_handler.setLevel(logging.INFO) 18 | console_handler.setFormatter(console_formatter) 19 | 20 | # # File handler 21 | # file_handler = logging.FileHandler('oracle.log') 22 | # file_handler.setLevel(logging.INFO) 23 | # file_handler.setFormatter(file_formatter) 24 | 25 | # Add handlers to root logger 26 | root_logger.addHandler(console_handler) 27 | # root_logger.addHandler(file_handler) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2025 Double Finance, Inc 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /tests/test_solver.py: -------------------------------------------------------------------------------- 1 | """Test the solver functionality in OracleStrategy.""" 2 | import unittest 3 | import pulp 4 | from src.solvers import solve_optimization_problem 5 | 6 | class TestSolver(unittest.TestCase): 7 | def test_simple_optimization(self): 8 | """Test that the solver can solve a simple optimization problem.""" 9 | try: 10 | # Create a simple optimization problem 11 | prob = pulp.LpProblem("TestOptimization", pulp.LpMinimize) 12 | 13 | # Add a simple objective 14 | x = pulp.LpVariable("x", lowBound=0) 15 | y = pulp.LpVariable("y", lowBound=0) 16 | prob += x + y 17 | 18 | # Add a constraint 19 | prob += x + 2*y >= 10 20 | 21 | status, objective_value = solve_optimization_problem(prob) 22 | 23 | # Check that the solver worked 24 | self.assertIsNotNone(status) 25 | self.assertEqual(pulp.LpStatus[status], "Optimal") 26 | self.assertIsNotNone(objective_value) 27 | self.assertAlmostEqual(objective_value, 5.0) 28 | self.assertAlmostEqual(x.value(), 0.0) 29 | self.assertAlmostEqual(y.value(), 5.0) 30 | except Exception as e: 31 | self.fail(f"Solver failed with exception: {str(e)}") 32 | 33 | -------------------------------------------------------------------------------- /src/service/constraints/base_validator.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Dict, Optional, Tuple 3 | 4 | class BaseValidator(ABC): 5 | """Base class for all constraint validators.""" 6 | 7 | def __init__(self, oracle_strategy): 8 | """Initialize validator with reference to OracleStrategy.""" 9 | self.strategy = oracle_strategy 10 | 11 | @abstractmethod 12 | def validate_buy(self, identifier: str, quantity: float) -> Tuple[bool, Optional[str]]: 13 | """ 14 | Validate if a buy trade is allowed. 15 | 16 | Args: 17 | identifier: The security identifier 18 | quantity: The quantity to buy 19 | 20 | Returns: 21 | Tuple of (is_allowed, reason) 22 | - is_allowed: True if trade is allowed, False otherwise 23 | - reason: None if allowed, otherwise a string explaining why it's not allowed 24 | """ 25 | pass 26 | 27 | @abstractmethod 28 | def validate_sell(self, tax_lot_id: str, quantity: float) -> Tuple[bool, Optional[str]]: 29 | """ 30 | Validate if a sell trade is allowed. 31 | 32 | Args: 33 | tax_lot_id: The tax lot identifier 34 | quantity: The quantity to sell 35 | 36 | Returns: 37 | Tuple of (is_allowed, reason) 38 | - is_allowed: True if trade is allowed, False otherwise 39 | - reason: None if allowed, otherwise a string explaining why it's not allowed 40 | """ 41 | pass -------------------------------------------------------------------------------- /src/solvers/solver.py: -------------------------------------------------------------------------------- 1 | from shutil import which 2 | import pulp 3 | 4 | COIN_CMD_PATH = which("cbc") or "/opt/homebrew/opt/cbc/bin/cbc" 5 | 6 | def solve_optimization_problem(prob, time_limit=60, gap_rel=0.01, warm_start=True): 7 | """ 8 | Solve a PuLP optimization problem using the CBC solver with optimized parameters. 9 | 10 | Args: 11 | prob (pulp.LpProblem): The PuLP optimization problem to solve 12 | time_limit (int): Time limit in seconds 13 | gap_rel (float): Relative optimality gap 14 | warm_start (bool): Whether to use warm start 15 | 16 | Returns: 17 | tuple: (status, objective_value) - The solution status and objective value 18 | """ 19 | try: 20 | # Convert gap_rel to string to avoid type issues 21 | gap_rel_str = str(gap_rel) 22 | 23 | # Configure the solver with optimized parameters 24 | solver = pulp.COIN_CMD( 25 | path=COIN_CMD_PATH, 26 | timeLimit=time_limit, 27 | warmStart=warm_start, 28 | options=[ 29 | 'allowableGap', gap_rel_str, 30 | 'maxSolutions', '1', 31 | 'maxNodes', '10000' 32 | 33 | ] 34 | ) 35 | 36 | # Solve the problem 37 | status = prob.solve(solver) 38 | objective_value = pulp.value(prob.objective) 39 | 40 | return status, objective_value 41 | 42 | except Exception as e: 43 | print(f"Error solving optimization problem: {str(e)}") 44 | return None, None 45 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | sys.path.insert(0, os.path.abspath('..')) 4 | sys.path.insert(0, os.path.abspath('../src')) 5 | 6 | # Mock dependencies that might not be available in CI 7 | autodoc_mock_imports = ['pulp', 'numpy', 'pandas'] 8 | 9 | # Project information 10 | project = 'Oracle' 11 | copyright = '2024' 12 | author = 'Snowball' 13 | 14 | # The full version, including alpha/beta/rc tags 15 | release = '0.1.0' 16 | 17 | # General configuration 18 | extensions = [ 19 | 'sphinx.ext.autodoc', 20 | 'sphinx.ext.napoleon', 21 | 'sphinx.ext.viewcode', 22 | 'sphinx.ext.autosummary', 23 | 'sphinx_rtd_theme', 24 | 'sphinx_autodoc_typehints', 25 | ] 26 | 27 | # Autodoc settings 28 | autodoc_default_options = { 29 | 'members': True, 30 | 'undoc-members': True, 31 | 'show-inheritance': True, 32 | 'special-members': '__init__', 33 | } 34 | 35 | # Autosummary settings 36 | autosummary_generate = True 37 | 38 | templates_path = ['_templates'] 39 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 40 | 41 | # HTML output options 42 | html_theme = 'sphinx_rtd_theme' 43 | html_static_path = ['_static'] 44 | 45 | # Napoleon settings 46 | napoleon_google_docstring = True 47 | napoleon_numpy_docstring = True 48 | napoleon_include_init_with_doc = False 49 | napoleon_include_private_with_doc = False 50 | napoleon_include_special_with_doc = True 51 | napoleon_use_admonition_for_examples = False 52 | napoleon_use_admonition_for_notes = False 53 | napoleon_use_admonition_for_references = False 54 | napoleon_use_ivar = False 55 | napoleon_use_param = True 56 | napoleon_use_rtype = True 57 | napoleon_type_aliases = None 58 | 59 | # Type hints settings 60 | autodoc_typehints = 'description' 61 | typehints_use_signature = True 62 | typehints_use_signature_return = True 63 | 64 | # Suppress warnings that can happen in CI 65 | suppress_warnings = ['autosummary.import_cycle'] -------------------------------------------------------------------------------- /docs/modules/objectives.rst: -------------------------------------------------------------------------------- 1 | Objectives 2 | ========== 3 | 4 | .. currentmodule:: service.objectives 5 | 6 | The objectives module handles all optimization objective terms used in Oracle's portfolio optimization. 7 | 8 | ObjectiveManager 9 | ---------------- 10 | 11 | .. autoclass:: service.objectives.objective_manager.ObjectiveManager 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | :special-members: __init__ 16 | 17 | Objective Components 18 | -------------------- 19 | 20 | Tax Impact 21 | ~~~~~~~~~~ 22 | .. automodule:: service.objectives.taxes.tax_optimization 23 | :members: 24 | :undoc-members: 25 | 26 | Tax Loss Harvesting 27 | ~~~~~~~~~~~~~~~~~~~ 28 | .. automodule:: service.objectives.taxes.tlh 29 | :members: 30 | :undoc-members: 31 | 32 | Drift Impact 33 | ~~~~~~~~~~~~ 34 | .. automodule:: service.objectives.drift.drift_optimization 35 | :members: 36 | :undoc-members: 37 | 38 | Transaction Costs 39 | ~~~~~~~~~~~~~~~~~ 40 | .. automodule:: service.objectives.transaction_costs.transaction_optimization 41 | :members: 42 | :undoc-members: 43 | 44 | Factor Model Impact 45 | ~~~~~~~~~~~~~~~~~~~ 46 | .. automodule:: service.objectives.factor_model.factor_model_optimization 47 | :members: 48 | :undoc-members: 49 | 50 | Cash Deployment 51 | ~~~~~~~~~~~~~~~ 52 | .. automodule:: service.objectives.cash_deployment.cash_deployment 53 | :members: 54 | :undoc-members: 55 | 56 | Normalization Constants 57 | ----------------------- 58 | 59 | The following normalization constants are used to scale different objective components: 60 | 61 | .. code-block:: python 62 | 63 | TAX_NORMALIZATION = 800 # Scale tax impact 64 | DRIFT_NORMALIZATION = 1.0 # Scale drift impact 65 | TRANSACTION_NORMALIZATION = 1.0 # Scale transaction costs 66 | FACTOR_MODEL_NORMALIZATION = 1.0 # Scale factor model impact 67 | CASH_DRAG_NORMALIZATION = 1.0 # Scale cash drag impact -------------------------------------------------------------------------------- /_build/_sources/modules/objectives.rst.txt: -------------------------------------------------------------------------------- 1 | Objectives 2 | ========== 3 | 4 | .. currentmodule:: service.objectives 5 | 6 | The objectives module handles all optimization objective terms used in Oracle's portfolio optimization. 7 | 8 | ObjectiveManager 9 | ---------------- 10 | 11 | .. autoclass:: service.objectives.objective_manager.ObjectiveManager 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | :special-members: __init__ 16 | 17 | Objective Components 18 | -------------------- 19 | 20 | Tax Impact 21 | ~~~~~~~~~~ 22 | .. automodule:: service.objectives.taxes.tax_optimization 23 | :members: 24 | :undoc-members: 25 | 26 | Tax Loss Harvesting 27 | ~~~~~~~~~~~~~~~~~~~ 28 | .. automodule:: service.objectives.taxes.tlh 29 | :members: 30 | :undoc-members: 31 | 32 | Drift Impact 33 | ~~~~~~~~~~~~ 34 | .. automodule:: service.objectives.drift.drift_optimization 35 | :members: 36 | :undoc-members: 37 | 38 | Transaction Costs 39 | ~~~~~~~~~~~~~~~~~ 40 | .. automodule:: service.objectives.transaction_costs.transaction_optimization 41 | :members: 42 | :undoc-members: 43 | 44 | Factor Model Impact 45 | ~~~~~~~~~~~~~~~~~~~ 46 | .. automodule:: service.objectives.factor_model.factor_model_optimization 47 | :members: 48 | :undoc-members: 49 | 50 | Cash Deployment 51 | ~~~~~~~~~~~~~~~ 52 | .. automodule:: service.objectives.cash_deployment.cash_deployment 53 | :members: 54 | :undoc-members: 55 | 56 | Normalization Constants 57 | ----------------------- 58 | 59 | The following normalization constants are used to scale different objective components: 60 | 61 | .. code-block:: python 62 | 63 | TAX_NORMALIZATION = 800 # Scale tax impact 64 | DRIFT_NORMALIZATION = 1.0 # Scale drift impact 65 | TRANSACTION_NORMALIZATION = 1.0 # Scale transaction costs 66 | FACTOR_MODEL_NORMALIZATION = 1.0 # Scale factor model impact 67 | CASH_DRAG_NORMALIZATION = 1.0 # Scale cash drag impact -------------------------------------------------------------------------------- /src/service/reports/actuals_report.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from src.service.helpers.constants import CASH_CUSIP_ID 3 | 4 | def generate_actuals_report( 5 | tax_lots: pd.DataFrame, 6 | prices: pd.DataFrame, 7 | cash: float 8 | ) -> pd.DataFrame: 9 | """ 10 | Calculate actual portfolio weights based on current tax lots and prices. 11 | 12 | Args: 13 | tax_lots: DataFrame of current tax lots 14 | prices: DataFrame of current prices 15 | cash: Current cash balance 16 | 17 | Returns: 18 | DataFrame with columns: 19 | - identifier: Security identifier 20 | - market_value: Current total value (sum of quantity * price across tax lots) 21 | - actual_weight: Current portfolio weight 22 | - quantity: Total quantity held (sum across tax lots) 23 | """ 24 | if tax_lots.empty and cash == 0: 25 | return pd.DataFrame(columns=['identifier', 'market_value', 'actual_weight', 'quantity']) 26 | 27 | # Calculate market value for each tax lot 28 | holdings = tax_lots.merge( 29 | prices[['identifier', 'price']], 30 | on='identifier', 31 | how='left', 32 | validate='many_to_one' 33 | ) 34 | holdings['market_value'] = holdings['quantity'] * holdings['price'] 35 | 36 | # Sum market values and quantities by identifier 37 | actuals = holdings.groupby('identifier').agg({ 38 | 'market_value': 'sum', 39 | 'quantity': 'sum' 40 | }).reset_index() 41 | 42 | # Add cash position (use quantity=1 for cash since it's a single position) 43 | actuals = pd.concat([ 44 | actuals, 45 | pd.DataFrame([{ 46 | 'identifier': CASH_CUSIP_ID, 47 | 'market_value': cash, 48 | 'quantity': 1.0 49 | }]) 50 | ], ignore_index=True) 51 | 52 | # Calculate weights 53 | total_value = actuals['market_value'].sum() 54 | actuals['actual_weight'] = actuals['market_value'] / total_value if total_value > 0 else 0 55 | 56 | return actuals[['identifier', 'market_value', 'actual_weight', 'quantity']] -------------------------------------------------------------------------------- /docs/modules/constraints.rst: -------------------------------------------------------------------------------- 1 | Constraints 2 | =========== 3 | 4 | .. currentmodule:: service.constraints 5 | 6 | The constraints module handles all optimization constraints used in Oracle's portfolio optimization. 7 | 8 | ConstraintsManager 9 | ------------------ 10 | 11 | .. autoclass:: service.constraints.constraints_manager.ConstraintsManager 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | :special-members: __init__ 16 | 17 | Constraint Types 18 | ---------------- 19 | 20 | Cash Constraints 21 | ~~~~~~~~~~~~~~~~ 22 | - Minimum cash balance maintenance 23 | - Cash flow validation for trades 24 | - Withdrawal requirements 25 | - Non-negative cash position enforcement 26 | 27 | Trade Constraints 28 | ~~~~~~~~~~~~~~~~~ 29 | - Minimum notional amount for trades 30 | - No simultaneous buys/sells of same security 31 | - Buy-only or sell-only restrictions based on strategy type 32 | - Trade size rounding requirements 33 | - Maximum position size limits 34 | 35 | Holding Time Constraints 36 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 37 | - Minimum holding period enforcement 38 | - Tax lot sale restrictions 39 | - Tax-aware trading constraints 40 | - Wash sale prevention rules 41 | 42 | Stock Restrictions 43 | ~~~~~~~~~~~~~~~~~~ 44 | - Security-specific trading restrictions 45 | - Asset class constraints 46 | - Position limits 47 | - Concentration limits 48 | 49 | Factor Model Constraints 50 | ~~~~~~~~~~~~~~~~~~~~~~~~ 51 | - Factor exposure limits 52 | - Tracking error constraints 53 | - Risk model alignment 54 | - Portfolio characteristic constraints 55 | 56 | Optimization Types 57 | ------------------ 58 | 59 | .. autoclass:: service.helpers.enums.OracleOptimizationType 60 | :members: 61 | :undoc-members: 62 | :show-inheritance: 63 | 64 | Each optimization type enforces different constraints: 65 | 66 | - **HOLD**: No trading allowed 67 | - **BUY_ONLY**: Only buy trades permitted 68 | - **TAX_UNAWARE**: No tax-specific constraints 69 | - **TAX_AWARE**: Full tax awareness with wash sale prevention 70 | - **PAIRS_TLH**: Tax loss harvesting with paired replacements 71 | - **DIRECT_INDEX**: Factor model based optimization -------------------------------------------------------------------------------- /_build/_sources/modules/constraints.rst.txt: -------------------------------------------------------------------------------- 1 | Constraints 2 | =========== 3 | 4 | .. currentmodule:: service.constraints 5 | 6 | The constraints module handles all optimization constraints used in Oracle's portfolio optimization. 7 | 8 | ConstraintsManager 9 | ------------------ 10 | 11 | .. autoclass:: service.constraints.constraints_manager.ConstraintsManager 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | :special-members: __init__ 16 | 17 | Constraint Types 18 | ---------------- 19 | 20 | Cash Constraints 21 | ~~~~~~~~~~~~~~~~ 22 | - Minimum cash balance maintenance 23 | - Cash flow validation for trades 24 | - Withdrawal requirements 25 | - Non-negative cash position enforcement 26 | 27 | Trade Constraints 28 | ~~~~~~~~~~~~~~~~~ 29 | - Minimum notional amount for trades 30 | - No simultaneous buys/sells of same security 31 | - Buy-only or sell-only restrictions based on strategy type 32 | - Trade size rounding requirements 33 | - Maximum position size limits 34 | 35 | Holding Time Constraints 36 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 37 | - Minimum holding period enforcement 38 | - Tax lot sale restrictions 39 | - Tax-aware trading constraints 40 | - Wash sale prevention rules 41 | 42 | Stock Restrictions 43 | ~~~~~~~~~~~~~~~~~~ 44 | - Security-specific trading restrictions 45 | - Asset class constraints 46 | - Position limits 47 | - Concentration limits 48 | 49 | Factor Model Constraints 50 | ~~~~~~~~~~~~~~~~~~~~~~~~ 51 | - Factor exposure limits 52 | - Tracking error constraints 53 | - Risk model alignment 54 | - Portfolio characteristic constraints 55 | 56 | Optimization Types 57 | ------------------ 58 | 59 | .. autoclass:: service.helpers.enums.OracleOptimizationType 60 | :members: 61 | :undoc-members: 62 | :show-inheritance: 63 | 64 | Each optimization type enforces different constraints: 65 | 66 | - **HOLD**: No trading allowed 67 | - **BUY_ONLY**: Only buy trades permitted 68 | - **TAX_UNAWARE**: No tax-specific constraints 69 | - **TAX_AWARE**: Full tax awareness with wash sale prevention 70 | - **PAIRS_TLH**: Tax loss harvesting with paired replacements 71 | - **DIRECT_INDEX**: Factor model based optimization -------------------------------------------------------------------------------- /src/service/constraints/trade/no_buy_validator.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple 2 | import pulp 3 | 4 | from src.service.constraints.base_validator import BaseValidator 5 | from src.service.helpers.constants import CASH_CUSIP_ID 6 | 7 | class NoBuyValidator(BaseValidator): 8 | """Validator for preventing buying of securities (typically used for liquidation).""" 9 | 10 | def __init__(self, oracle_strategy, exclude_cash: bool = True): 11 | """ 12 | Initialize NoBuyValidator. 13 | 14 | Args: 15 | oracle_strategy: Reference to the OracleStrategy instance 16 | exclude_cash: Whether to exclude cash from the no-buy constraint (default True) 17 | """ 18 | super().__init__(oracle_strategy) 19 | self.exclude_cash = exclude_cash 20 | 21 | def validate_buy(self, identifier: str, quantity: float) -> Tuple[bool, Optional[str]]: 22 | """Check if buying a security is allowed.""" 23 | if self.exclude_cash and identifier == CASH_CUSIP_ID: 24 | return True, None 25 | return False, f"Buying {identifier} is not allowed during liquidation" 26 | 27 | def validate_sell(self, tax_lot_id: str, quantity: float) -> Tuple[bool, Optional[str]]: 28 | """Selling is always allowed during liquidation.""" 29 | return True, None 30 | 31 | def add_to_problem( 32 | self, 33 | prob: pulp.LpProblem, 34 | buys: dict, 35 | exclude_cash: bool = True 36 | ) -> None: 37 | """ 38 | Add constraints to prevent buying any securities. 39 | 40 | Args: 41 | prob: PuLP optimization problem 42 | buys: Dictionary of buy variables 43 | exclude_cash: Whether to exclude cash from the no-buy constraint (default True) 44 | """ 45 | for identifier, buy_var in buys.items(): 46 | # Skip cash if exclude_cash is True 47 | if exclude_cash and identifier == CASH_CUSIP_ID: 48 | continue 49 | # Add constraint to force buy variable to zero 50 | prob += buy_var == 0, f"No_Buy_{identifier}" -------------------------------------------------------------------------------- /tests/test_no_simultaneous_buy_sell.py: -------------------------------------------------------------------------------- 1 | """Test that optimization does not result in buys and sells of the same security.""" 2 | import unittest 3 | import pandas as pd 4 | import json 5 | from src.service.oracle import Oracle 6 | import pulp 7 | 8 | class TestNoSimultaneousBuySell(unittest.TestCase): 9 | def setUp(self): 10 | """Load dthomas data for testing.""" 11 | # Load dthomas data 12 | with open('tests/example_oracle_inputs/duplicate_trades.json', 'r') as f: 13 | self.data = json.load(f) 14 | 15 | # Create event dictionary in the format expected by process_lambda_event 16 | self.event = { 17 | "oracle": self.data["oracle"], 18 | "settings": self.data.get("settings", {}) 19 | } 20 | 21 | def test_no_simultaneous_buy_sell(self): 22 | """Test that no security is both bought and sold in the same optimization.""" 23 | # Process the event using Oracle.process_lambda_event 24 | response = Oracle.process_lambda_event(self.event) 25 | 26 | # Get the first strategy's results 27 | first_strategy_id = next(iter(response["results"])) 28 | strategy_result = response["results"][first_strategy_id] 29 | status = strategy_result["status"] 30 | should_trade = strategy_result["should_trade"] 31 | trades = pd.DataFrame(strategy_result["trades"]) 32 | 33 | # Verify optimization completed successfully or was feasible 34 | # (Allowing Feasible as well, in case the optimal solution isn't strictly found but is usable) 35 | self.assertEqual(status, pulp.LpStatusOptimal) 36 | 37 | if trades.empty: 38 | return 39 | 40 | # Group trades by security_id 41 | grouped_trades = trades.groupby('identifier')['action'].apply(set) 42 | 43 | # Check if any security has both 'buy' and 'sell' actions 44 | simultaneous_buy_sell = grouped_trades[grouped_trades.apply(lambda x: 'buy' in x and 'sell' in x)] 45 | 46 | self.assertTrue(simultaneous_buy_sell.empty, 47 | f"Found simultaneous buy and sell actions for securities: {list(simultaneous_buy_sell.index)}") 48 | 49 | print("Test passed: No security found with both buy and sell actions.") 50 | 51 | if __name__ == '__main__': 52 | unittest.main() -------------------------------------------------------------------------------- /src/service/constraints/holding_time/trading_day_lookup.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from datetime import date 3 | from pathlib import Path 4 | import json 5 | from typing import Dict, Optional 6 | 7 | class TradingDayLookup: 8 | """Class to handle trading day lookups from JSON data.""" 9 | 10 | def __init__(self, trading_days_df: pd.DataFrame = None): 11 | """ 12 | Initialize TradingDayLookup with either a DataFrame or load from JSON. 13 | 14 | Args: 15 | trading_days_df: Optional DataFrame containing trading day data 16 | """ 17 | self.trading_days_df = trading_days_df 18 | self._lookup_cache: Dict[str, pd.Series] = {} 19 | if trading_days_df is None: 20 | self._load_from_json() 21 | 22 | def _load_from_json(self): 23 | """Load trading days data from JSON file.""" 24 | json_path = Path(__file__).parent / 'trading_day.json' 25 | with open(json_path, 'r') as f: 26 | data = json.load(f) 27 | 28 | # Convert JSON to DataFrame 29 | self.trading_days_df = pd.DataFrame(data) 30 | # Convert date columns to datetime 31 | date_columns = ['nearest_trading_day', 'forward_trading_day', 'backward_trading_day', 'date'] 32 | for col in date_columns: 33 | self.trading_days_df[col] = pd.to_datetime(self.trading_days_df[col]) 34 | 35 | def get_trading_day(self, target_date: date) -> Optional[pd.Series]: 36 | """ 37 | Get trading day information for a given date. 38 | 39 | Args: 40 | target_date: Date to lookup 41 | 42 | Returns: 43 | Series containing trading day information or None if not found 44 | """ 45 | if self.trading_days_df is None: 46 | return None 47 | 48 | # Convert target_date to datetime for comparison and get string key for cache 49 | target_date_ts = pd.to_datetime(target_date) 50 | cache_key = target_date_ts.strftime('%Y-%m-%d') 51 | 52 | # Check cache first 53 | if cache_key in self._lookup_cache: 54 | return self._lookup_cache[cache_key] 55 | 56 | # Find the matching row 57 | matching_days = self.trading_days_df[self.trading_days_df['date'] == target_date_ts] 58 | 59 | if matching_days.empty: 60 | self._lookup_cache[cache_key] = None 61 | return None 62 | 63 | # Cache and return result 64 | result = matching_days.iloc[0] 65 | self._lookup_cache[cache_key] = result 66 | return result -------------------------------------------------------------------------------- /src/service/helpers/trade_netting.py: -------------------------------------------------------------------------------- 1 | from decimal import Decimal 2 | from operator import itemgetter, methodcaller 3 | import pandas as pd 4 | from typing import Dict, Tuple, Optional 5 | import numpy as np 6 | 7 | NETTED_TRADES_COLUMNS = ["identifier", "action", "quantity", "price", "tax_lot_id", "short_term_gain", "short_term_loss", "long_term_gain", "long_term_loss"] 8 | 9 | def net_trades_across_strategies(strategy_results: Dict[int, Tuple[Optional[int], bool, Dict, pd.DataFrame]], trade_rounding: int) -> pd.DataFrame: 10 | trades = pd.concat(strategy_trades for _, _, _, strategy_trades in strategy_results.values()).copy() 11 | if trades.empty: 12 | return pd.DataFrame(columns=NETTED_TRADES_COLUMNS) 13 | 14 | trades["quantity"] = trades["quantity"].map(lambda quantity: round(Decimal(quantity), trade_rounding)) 15 | trades["quantity"].mask(trades["action"] == "sell", -trades["quantity"], inplace=True) 16 | 17 | net = trades.groupby("identifier").agg({ 18 | "quantity": "sum", 19 | "price": "first", 20 | }) 21 | net = net[net["quantity"] != 0] 22 | net["action"] = np.where(net["quantity"] > 0, "buy", "sell") 23 | 24 | trades.sort_values("quantity", ascending=False, inplace=True) 25 | trades["cumqty"] = trades.groupby("identifier")["quantity"].transform(methodcaller("cumsum")) 26 | sells = trades[trades["cumqty"] < 0] 27 | sells["netqty"] = sells["quantity"].clip(lower=sells["cumqty"]) 28 | 29 | netpct = (sells["netqty"] / sells["quantity"]).astype(float) 30 | realized_gain = sells["gain_loss"].map(itemgetter("realized_gain")) * netpct 31 | gain_type = sells["gain_loss"].map(itemgetter("gain_type")) 32 | sells["short_term_gain"] = realized_gain.mask(gain_type != "short_term", 0).clip(lower=0) 33 | sells["short_term_loss"] = realized_gain.mask(gain_type != "short_term", 0).clip(upper=0).abs() 34 | sells["long_term_gain"] = realized_gain.mask(gain_type != "long_term", 0).clip(lower=0) 35 | sells["long_term_loss"] = realized_gain.mask(gain_type != "long_term", 0).clip(upper=0).abs() 36 | 37 | matching_lots = sells[["identifier", "tax_lot_id", "short_term_gain", "short_term_loss", "long_term_gain", "long_term_loss", "netqty"]] 38 | assert not matching_lots.duplicated(["identifier", "tax_lot_id"]).any(), "duplicate sell tax lots" 39 | 40 | net = net.merge(matching_lots, how="left", on="identifier") 41 | net["quantity"].mask(net["action"] == "sell", -net["netqty"], inplace=True) 42 | net["quantity"] = net["quantity"].astype(float) 43 | 44 | assert (net["quantity"] > 0).all(), "invalid netting" 45 | 46 | # Reset index to make identifier a column before returning 47 | return net[NETTED_TRADES_COLUMNS] 48 | -------------------------------------------------------------------------------- /src/service/initializers/stock_restrictions.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | 4 | 5 | def initialize_stock_restrictions(stock_restrictions: Optional[pd.DataFrame] = None) -> pd.DataFrame: 6 | """ 7 | Initialize and validate stock restrictions DataFrame. 8 | 9 | Args: 10 | stock_restrictions (Optional[pd.DataFrame]): DataFrame with columns: 11 | - identifier (str): Security identifier 12 | - can_buy (bool): Whether the security can be purchased 13 | - can_sell (bool): Whether the security can be sold 14 | 15 | Returns: 16 | Validated stock restrictions DataFrame with standardized columns: 17 | - identifier (str): Security identifier (uppercase) 18 | - can_buy (bool): Purchase permission flag 19 | - can_sell (bool): Sale permission flag 20 | 21 | Notes: 22 | - If no restrictions provided, returns empty DataFrame implying no restrictions 23 | - A security cannot have both can_buy and can_sell set to True 24 | - Identifiers are standardized to uppercase 25 | - Empty DataFrame implies all securities can be both bought and sold 26 | 27 | Raises: 28 | ValueError: If: 29 | - Required columns are missing 30 | - Both can_buy and can_sell are True for any security 31 | - Invalid data types in any column 32 | - Duplicate identifiers present 33 | """ 34 | if stock_restrictions is None or stock_restrictions.empty: 35 | return pd.DataFrame(columns=['identifier', 'can_buy', 'can_sell']) 36 | 37 | required_columns = {'identifier', 'can_buy', 'can_sell'} 38 | if not set(stock_restrictions.columns).issuperset(required_columns): 39 | raise ValueError(f"Stock restrictions DataFrame missing required columns: {required_columns}") 40 | 41 | # Ensure data types 42 | stock_restrictions = stock_restrictions.copy() 43 | stock_restrictions['identifier'] = stock_restrictions['identifier'].astype(str) 44 | stock_restrictions['can_buy'] = stock_restrictions['can_buy'].astype(bool) 45 | stock_restrictions['can_sell'] = stock_restrictions['can_sell'].astype(bool) 46 | # Validate that can_buy and can_sell are not both True for each stock 47 | invalid_restrictions = stock_restrictions['can_buy'] & stock_restrictions['can_sell'] 48 | if invalid_restrictions.any(): 49 | invalid_stocks = stock_restrictions.loc[invalid_restrictions, 'identifier'].tolist() 50 | raise ValueError( 51 | f"Found stocks that can be both bought and sold: {invalid_stocks}. " 52 | "At least one of can_buy or can_sell must be False." 53 | ) 54 | return stock_restrictions -------------------------------------------------------------------------------- /src/service/constraints/trade/no_simultaneous_trade_validator.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple, List 2 | import pandas as pd 3 | import pulp 4 | 5 | from src.service.constraints.base_validator import BaseValidator 6 | 7 | class NoSimultaneousTradeValidator(BaseValidator): 8 | """ 9 | Validator to prevent buying and selling the same security simultaneously. 10 | Note: Individual trade validation is not supported as this requires knowledge of all trades. 11 | """ 12 | 13 | def validate_buy(self, identifier: str, quantity: float) -> Tuple[bool, Optional[str]]: 14 | """Cannot validate individual buys without knowledge of all trades.""" 15 | raise NotImplementedError("No simultaneous trade validation requires knowledge of all trades") 16 | 17 | def validate_sell(self, tax_lot_id: str, quantity: float) -> Tuple[bool, Optional[str]]: 18 | """Cannot validate individual sells without knowledge of all trades.""" 19 | raise NotImplementedError("No simultaneous trade validation requires knowledge of all trades") 20 | 21 | def add_to_problem( 22 | self, 23 | prob: pulp.LpProblem, 24 | buys: dict, 25 | sells: dict, 26 | gain_loss: pd.DataFrame, 27 | all_identifiers: List[str] 28 | ) -> None: 29 | """Add no simultaneous buy/sell constraints to the optimization problem.""" 30 | # For each identifier, create a constraint that prevents buying and selling at the same time 31 | for identifier in all_identifiers: 32 | # Get all tax lots for this identifier 33 | identifier_lots = gain_loss[gain_loss['identifier'] == identifier] 34 | 35 | if identifier_lots.empty or identifier not in buys: 36 | continue 37 | 38 | # Sum up all sells for this identifier 39 | total_sells = pulp.lpSum( 40 | sells[lot['tax_lot_id']] 41 | for _, lot in identifier_lots.iterrows() 42 | if lot['tax_lot_id'] in sells 43 | ) 44 | 45 | # Add binary variable to indicate if we're buying 46 | is_buying = pulp.LpVariable(f"is_buying_{identifier}", cat='Binary') 47 | 48 | # If is_buying is 1, we can buy any amount up to a large number M 49 | # If is_buying is 0, we must buy 0 50 | M = 1e6 # A large number that's bigger than any reasonable trade 51 | prob += buys[identifier] <= M * is_buying, f"Buy_Indicator_{identifier}" 52 | 53 | # If is_buying is 1, we cannot sell (total_sells must be 0) 54 | # If is_buying is 0, we can sell any amount 55 | prob += total_sells <= M * (1 - is_buying), f"No_Simultaneous_{identifier}" -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Run Tests - local", 9 | "type": "debugpy", 10 | "request": "launch", 11 | "module": "unittest", 12 | "args": [ 13 | 14 | "discover", 15 | "-s", 16 | ".", 17 | "-p", 18 | // "test_factor_model.py" 19 | // "test_optimization_vars.py" 20 | "test_onboardi*.py" 21 | // "oracle.tests", 22 | // "oracle.tests", 23 | // "oracle.tests.test_min_notional.TestMinNotional", 24 | // "oracle.tests.test_wash_sale_restrictions_match_rowboat.TestWashSaleRestrictionsMatchRowboat", 25 | // "oracle.tests.test_multi_variable_optimizations.TestMultiVariableOptimizations", 26 | // "oracle.tests.test_single_variable_optimizations", 27 | // "oracle.tests.test_withdrawl", 28 | // "oracle.tests.test_optimization_vars.py", 29 | // "reporting.tests.target_actual_tests.TestTargetActuals.test_reporting_strategy", 30 | // "transfer.tests.acats_create_strategy_tests.TestACATSCreateStrategy", 31 | // "transfer.tests.transfer_handler_tests",// .TestScheduledTransfer.test_create_scheduled_transfer_with_assignment", 32 | // "transfer.tests.scheduled_transfer_tests.TestScheduledTransfer.test_create_scheduled_transfer_with_assignment", 33 | // "transfer.tests.acats_create_strategy_tests.TestACATSCreateStrategy", 34 | // "internal.tests.optimizer.rebalance_tests.RebalanceTests", 35 | // "-v 3", 36 | // "--noinput", 37 | // "--keepdb", 38 | ], 39 | "console": "integratedTerminal", 40 | "purpose": ["debug-test"] 41 | }, 42 | { 43 | "name": "Python: All Tests", 44 | "type": "debugpy", 45 | "request": "launch", 46 | "module": "unittest", 47 | "args": [ 48 | "discover", 49 | "-s", 50 | ".", 51 | "-p", 52 | "test_[!o]*.py", 53 | 54 | ], 55 | "console": "integratedTerminal", 56 | "purpose": ["debug-test"] 57 | }, 58 | { 59 | "name": "debug.json", 60 | "type": "debugpy", 61 | "request": "launch", 62 | "program": "debug.py", 63 | "env": { 64 | "VERSION": "debug" 65 | } 66 | } 67 | ] 68 | } 69 | -------------------------------------------------------------------------------- /src/service/initializers/prices.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Set 3 | from src.service.helpers.constants import CASH_CUSIP_ID 4 | 5 | def initialize_prices(prices: pd.DataFrame, all_identifiers: Set[str]) -> pd.DataFrame: 6 | """ 7 | Initialize and validate prices DataFrame. 8 | 9 | Args: 10 | prices (pd.DataFrame): DataFrame with columns: 11 | - identifier (str): Security identifier 12 | - price (float): Current market price 13 | all_identifiers (Set[str]): Set of all identifiers that need prices 14 | 15 | Returns: 16 | Validated prices DataFrame with standardized columns: 17 | - identifier (str): Security identifier (uppercase) 18 | - price (float): Current market price (non-negative) 19 | 20 | Notes: 21 | - CASH_CUSIP_ID is automatically added with price of 1.0 if needed 22 | - All identifiers must have a valid price 23 | - Prices are validated to be non-negative 24 | - Identifiers are standardized to uppercase 25 | - Duplicate identifiers are not allowed 26 | 27 | Raises: 28 | ValueError: If: 29 | - Required columns are missing 30 | - Prices are missing for any required identifier 31 | - Negative prices are present 32 | - Invalid data types in any column 33 | - Duplicate identifiers present 34 | """ 35 | required_columns = {'identifier', 'price'} 36 | if not set(prices.columns).issuperset(required_columns): 37 | raise ValueError(f"Prices DataFrame missing required columns: {required_columns}") 38 | 39 | # Ensure data types 40 | prices = prices.copy() 41 | prices['identifier'] = prices['identifier'].astype(str) 42 | prices['price'] = pd.to_numeric(prices['price'], errors='raise') 43 | 44 | # Replace any 'CASH' or 'cash' identifiers with CASH_CUSIP_ID 45 | cash_mask = prices['identifier'].str.upper() == 'CASH' 46 | if cash_mask.any(): 47 | prices.loc[cash_mask, 'identifier'] = CASH_CUSIP_ID 48 | # If we have multiple CASH entries after replacement, keep only the first one 49 | prices = prices.drop_duplicates(subset=['identifier'], keep='first') 50 | 51 | # Validate no negative prices 52 | if (prices['price'] < 0).any(): 53 | raise ValueError("Prices contain negative values") 54 | 55 | # Add CASH_CUSIP_ID with price 1.0 if it doesn't exist 56 | if CASH_CUSIP_ID not in set(prices['identifier']): 57 | prices = pd.concat([ 58 | prices, 59 | pd.DataFrame([{'identifier': CASH_CUSIP_ID, 'price': 1.0}]) 60 | ], ignore_index=True) 61 | 62 | # Check we have prices for all identifiers 63 | missing_prices = all_identifiers - set(prices['identifier']) 64 | if missing_prices: 65 | raise ValueError(f"Missing prices for identifiers: {missing_prices}") 66 | 67 | return prices -------------------------------------------------------------------------------- /src/service/objectives/transaction_costs/transaction_optimization.py: -------------------------------------------------------------------------------- 1 | import pulp 2 | import pandas as pd 3 | from typing import Dict 4 | 5 | def get_buy_cost( 6 | quantity: float, 7 | spread: float, 8 | total_value: float, 9 | transaction_normalization: float = 1.0 10 | ) -> float: 11 | """ 12 | Calculate the buy cost component of the objective function. 13 | 14 | Args: 15 | identifier: String identifier of the security 16 | spreads: DataFrame with bid-ask spreads 17 | 18 | Returns: 19 | The total buy cost as a percentage of portfolio value 20 | """ 21 | 22 | return quantity * spread / total_value * transaction_normalization if total_value > 0 else 0 23 | 24 | def get_sell_cost( 25 | quantity: float, 26 | spread: float, 27 | total_value: float, 28 | transaction_normalization: float = 1.0 29 | ) -> float: 30 | """ 31 | Calculate the sell cost component of the objective function. 32 | 33 | Args: 34 | identifier: String identifier of the security 35 | spreads: DataFrame with bid-ask spreads 36 | 37 | Returns: 38 | The total sell cost as a percentage of portfolio value 39 | """ 40 | return quantity * spread / total_value * transaction_normalization if total_value > 0 else 0 41 | 42 | 43 | def calculate_transaction_costs( 44 | buys: Dict[str, pulp.LpVariable], 45 | sells: Dict[str, pulp.LpVariable], 46 | total_value: float, 47 | spreads: pd.DataFrame, 48 | transaction_normalization: float 49 | ) -> tuple[pulp.LpAffineExpression, float]: 50 | """ 51 | Calculate the transaction costs component of the objective function. 52 | Uses pre-calculated per_share_cost from spreads DataFrame. 53 | 54 | Args: 55 | buys: Dictionary of buy variables 56 | sells: Dictionary of sell variables 57 | drift: DataFrame with drift report 58 | gain_loss: DataFrame with gain/loss report 59 | total_value: Total portfolio value 60 | prices: DataFrame with current prices 61 | spreads: DataFrame with bid-ask spreads 62 | transaction_normalization: Normalization factor for transaction costs 63 | 64 | Returns: 65 | The normalized transaction costs expression 66 | """ 67 | 68 | transaction_impact = (pulp.lpSum([ 69 | buys[key] * get_buy_cost(1, spreads.loc[spreads['identifier'] == key, 'per_share_cost'].iloc[0], total_value, transaction_normalization) 70 | for key, value in buys.items() 71 | if key in spreads['identifier'].values 72 | ] + [ 73 | sells[key] * get_sell_cost(1, spreads.loc[spreads['identifier'] == key, 'per_share_cost'].iloc[0], total_value, transaction_normalization) 74 | for key, value in sells.items() 75 | if key in spreads['identifier'].values 76 | ])) 77 | 78 | # Apply normalization multiplier to transaction costs 79 | return transaction_impact -------------------------------------------------------------------------------- /src/service/initializers/tax_rates.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | 4 | def initialize_tax_rates(tax_rates: Optional[pd.DataFrame] = None) -> pd.DataFrame: 5 | """ 6 | Initialize and validate tax rates DataFrame. 7 | 8 | Args: 9 | tax_rates (Optional[pd.DataFrame]): DataFrame with columns: 10 | - gain_type (str): Type of gain ('short_term', 'long_term', 'qualified_dividend') 11 | - federal_rate (float): Federal tax rate as decimal (0 to 1) 12 | - state_rate (float): State tax rate as decimal (0 to 1) 13 | - total_rate (float): Combined federal and state rate as decimal (0 to 1) 14 | 15 | Returns: 16 | Validated DataFrame with standardized tax rates. If no rates provided, uses defaults: 17 | - Short-term gains: 41% (35% federal + 6% state) 18 | - Long-term gains: 26% (20% federal + 6% state) 19 | - Qualified dividends: 21% (15% federal + 6% state) 20 | 21 | Notes: 22 | Default rates are based on 75th percentile wealth users: 23 | - Federal ordinary income (short term): 35% 24 | - Federal long term capital gains: 20% 25 | - Federal qualified dividends: 15% 26 | - State tax rate: 6% (average for high tax states) 27 | 28 | Raises: 29 | ValueError: If: 30 | - Required columns are missing 31 | - Invalid gain types are present 32 | - Required gain types are missing 33 | - Tax rates are outside [0,1] range 34 | - Total rate doesn't match federal + state 35 | """ 36 | if tax_rates is None or tax_rates.empty: 37 | # Default to realistic rates for 75th percentile wealth users 38 | return pd.DataFrame({ 39 | 'gain_type': ['short_term', 'long_term', 'qualified_dividend'], 40 | 'federal_rate': [0.35, 0.20, 0.15], # 35% ordinary income, 20% LT gains, 15% qualified dividends 41 | 'state_rate': [0.06, 0.06, 0.06], # 6% state tax (average for high tax states) 42 | 'total_rate': [0.41, 0.26, 0.21] # Combined rates 43 | }) 44 | 45 | required_columns = {'gain_type', 'federal_rate', 'state_rate', 'total_rate'} 46 | if not set(tax_rates.columns).issuperset(required_columns): 47 | raise ValueError(f"Tax rates DataFrame missing required columns: {required_columns}") 48 | 49 | # Validate gain types 50 | valid_gain_types = {'short_term', 'long_term', 'qualified_dividend'} 51 | invalid_types = set(tax_rates['gain_type']) - valid_gain_types 52 | if invalid_types: 53 | raise ValueError(f"Invalid gain types found: {invalid_types}") 54 | 55 | missing_types = valid_gain_types - set(tax_rates['gain_type']) 56 | if missing_types: 57 | raise ValueError(f"Missing required gain types: {missing_types}") 58 | 59 | # Validate rates are between 0 and 1 60 | for col in ['federal_rate', 'state_rate', 'total_rate']: 61 | if not ((tax_rates[col] >= 0) & (tax_rates[col] <= 1)).all(): 62 | raise ValueError(f"Tax rates must be between 0 and 1, found invalid rates in {col}") 63 | 64 | return tax_rates -------------------------------------------------------------------------------- /src/service/constraints/cash/cash_validator.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple 2 | import pandas as pd 3 | import pulp 4 | 5 | from src.service.constraints.base_validator import BaseValidator 6 | from src.service.helpers.constants import CASH_CUSIP_ID 7 | 8 | class CashValidator(BaseValidator): 9 | """ 10 | Validator for cash-related constraints. 11 | Note: Individual trade validation is not supported as cash validation requires knowledge of all trades. 12 | """ 13 | 14 | def __init__(self, oracle_strategy, min_cash_amount: float): 15 | """ 16 | Initialize CashValidator. 17 | 18 | Args: 19 | oracle_strategy: Reference to the OracleStrategy instance 20 | min_cash_amount: Minimum cash amount to maintain 21 | """ 22 | super().__init__(oracle_strategy) 23 | self.min_cash_amount = min_cash_amount 24 | 25 | def validate_buy(self, identifier: str, quantity: float) -> Tuple[bool, Optional[str]]: 26 | """Cash validation requires knowledge of all trades.""" 27 | raise NotImplementedError("Cash validation requires knowledge of all trades") 28 | 29 | def validate_sell(self, tax_lot_id: str, quantity: float) -> Tuple[bool, Optional[str]]: 30 | """Cash validation requires knowledge of all trades.""" 31 | raise NotImplementedError("Cash validation requires knowledge of all trades") 32 | 33 | def add_to_problem( 34 | self, 35 | prob: pulp.LpProblem, 36 | buys: dict, 37 | sells: dict, 38 | gain_loss: pd.DataFrame 39 | ) -> None: 40 | """ 41 | Add cash constraints to the optimization problem. 42 | This preserves the original add_cash_constraint functionality. 43 | 44 | Args: 45 | prob: PuLP optimization problem 46 | buys: Dictionary of buy variables 47 | sells: Dictionary of sell variables 48 | gain_loss: DataFrame with gain/loss report 49 | """ 50 | # Never buy CASH 51 | if CASH_CUSIP_ID in buys: 52 | prob += (buys[CASH_CUSIP_ID] == 0, "no_cash_buy") 53 | 54 | # Calculate total buy cost 55 | total_buy_cost = pulp.lpSum( 56 | buy_var * self.strategy.prices.loc[self.strategy.prices['identifier'] == identifier, 'price'].iloc[0] 57 | for identifier, buy_var in buys.items() 58 | ) 59 | 60 | # Calculate total sell proceeds using current_price from gain_loss 61 | total_sell_proceeds = pulp.lpSum( 62 | sells[lot['tax_lot_id']] * lot['current_price'] 63 | for _, lot in gain_loss.iterrows() 64 | ) 65 | 66 | # Ensure we don't exceed available cash plus proceeds from sales when buying 67 | prob += ( 68 | total_buy_cost <= self.strategy.cash + total_sell_proceeds, 69 | "cash_balance" 70 | ) 71 | 72 | # Add the minimum cash floor constraint 73 | prob += ( 74 | self.strategy.cash + total_sell_proceeds - total_buy_cost >= self.min_cash_amount, 75 | "min_cash_floor" 76 | ) -------------------------------------------------------------------------------- /_build/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions .rst-other-versions .rtd-current-item{font-weight:700}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}#flyout-search-form{padding:6px} -------------------------------------------------------------------------------- /src/service/initializers/spreads.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Set, Optional 3 | from src.service.helpers.constants import CASH_CUSIP_ID 4 | 5 | def initialize_spreads(spreads: pd.DataFrame | None, all_identifiers: Set[str], prices: pd.DataFrame) -> pd.DataFrame: 6 | """ 7 | Initialize spreads DataFrame. If no spreads provided, create default DataFrame 8 | with 0.0001 (1bps) spread for all identifiers. 9 | 10 | Args: 11 | spreads (pd.DataFrame | None): Optional DataFrame with columns: 12 | - identifier (str): Security identifier 13 | - spread (float): Bid-ask spread as decimal (e.g., 0.0001 for 1bps) 14 | all_identifiers (Set[str]): Set of all identifiers that need spreads 15 | prices (pd.DataFrame): DataFrame with columns: 16 | - identifier (str): Security identifier 17 | - price (float): Current price used to calculate per_share_cost 18 | 19 | Returns: 20 | DataFrame with spreads for all identifiers: 21 | - identifier (str): Security identifier 22 | - spread (float): Bid-ask spread as decimal 23 | - per_share_cost (float): Pre-calculated transaction cost per share (price * spread) 24 | 25 | Notes: 26 | - Default spread of 0.0001 (1bps) is used for any missing identifiers 27 | - CASH_CUSIP_ID is automatically added with 0 spread if needed 28 | - All spreads are validated to be non-negative 29 | 30 | Raises: 31 | ValueError: If: 32 | - Spreads DataFrame is provided but missing required columns 33 | - Contains negative spread values 34 | - Contains duplicate identifiers 35 | """ 36 | if spreads is not None and not spreads.empty: 37 | required_columns = {'identifier', 'spread'} 38 | if not set(spreads.columns).issuperset(required_columns): 39 | raise ValueError(f"Spreads DataFrame missing required columns: {required_columns}") 40 | 41 | # Ensure data types 42 | spreads = spreads.copy() 43 | spreads['identifier'] = spreads['identifier'].astype(str) 44 | spreads['spread'] = pd.to_numeric(spreads['spread'], errors='raise') 45 | 46 | # Validate no negative spreads 47 | if (spreads['spread'] < 0).any(): 48 | raise ValueError("Spreads contain negative values") 49 | 50 | # Ensure all required identifiers have spreads 51 | missing_identifiers = all_identifiers - set(spreads['identifier']) 52 | if missing_identifiers: 53 | # Add default spreads for missing identifiers 54 | default_spreads = pd.DataFrame({ 55 | 'identifier': list(missing_identifiers), 56 | 'spread': 0.0001 # Default 1bps spread 57 | }) 58 | spreads = pd.concat([spreads, default_spreads], ignore_index=True) 59 | else: 60 | # Create default spreads DataFrame if none provided 61 | spreads = pd.DataFrame({ 62 | 'identifier': list(all_identifiers), 63 | 'spread': 0.0003 # Default 3bps spread 64 | }) 65 | 66 | # Join with prices to calculate per_share_cost 67 | spreads = spreads.merge( 68 | prices[['identifier', 'price']], 69 | on='identifier', 70 | how='left', 71 | validate='one_to_one' 72 | ) 73 | 74 | # Calculate per_share_cost 75 | spreads['per_share_cost'] = spreads['price'] * spreads['spread'] 76 | 77 | # Drop the price column as it's no longer needed 78 | spreads = spreads.drop(columns=['price']) 79 | 80 | return spreads -------------------------------------------------------------------------------- /src/service/initializers/closed_lots.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | 4 | def initialize_closed_lots(closed_lots: Optional[pd.DataFrame] = None) -> Optional[pd.DataFrame]: 5 | """ 6 | Initialize and validate closed lots DataFrame. 7 | 8 | Args: 9 | closed_lots (Optional[pd.DataFrame]): DataFrame with columns: 10 | - identifier (str): Security identifier 11 | - quantity (float): Number of shares/units sold 12 | - cost_basis (float): Original purchase cost of the lot 13 | - date_acquired (datetime): Original purchase date 14 | - date_sold (datetime): Date the lot was sold 15 | - proceeds (float): Amount received from sale 16 | - realized_gain (float): Proceeds minus cost basis 17 | 18 | Returns: 19 | Optional[pd.DataFrame]: Validated closed lots DataFrame with standardized: 20 | - Data types for all columns 21 | - Uppercase identifiers 22 | - ISO format dates 23 | - Validated calculations 24 | Returns None if no closed lots provided 25 | 26 | Notes: 27 | - All numeric values must be non-negative 28 | - date_sold must be after date_acquired 29 | - realized_gain must equal proceeds minus cost_basis 30 | - quantity must be greater than 0 31 | 32 | Raises: 33 | ValueError: If: 34 | - Required columns are missing 35 | - Invalid data types in any column 36 | - Negative values in quantity/cost/proceeds 37 | - Invalid date relationships 38 | - Inconsistent gain calculations 39 | """ 40 | if closed_lots is None or closed_lots.empty: 41 | return None 42 | 43 | required_columns = { 44 | 'identifier', 'quantity', 'cost_basis', 'date_acquired', 45 | 'date_sold', 'proceeds', 'realized_gain' 46 | } 47 | if not set(closed_lots.columns).issuperset(required_columns): 48 | raise ValueError(f"Closed lots DataFrame missing required columns: {required_columns}") 49 | 50 | # Ensure data types 51 | closed_lots = closed_lots.copy() 52 | closed_lots['identifier'] = closed_lots['identifier'].astype(str) 53 | closed_lots['quantity'] = pd.to_numeric(closed_lots['quantity'], errors='raise') 54 | closed_lots['cost_basis'] = pd.to_numeric(closed_lots['cost_basis'], errors='raise') 55 | closed_lots['proceeds'] = pd.to_numeric(closed_lots['proceeds'], errors='raise') 56 | closed_lots['realized_gain'] = pd.to_numeric(closed_lots['realized_gain'], errors='raise') 57 | closed_lots['date_acquired'] = pd.to_datetime(closed_lots['date_acquired'], errors='raise') 58 | closed_lots['date_sold'] = pd.to_datetime(closed_lots['date_sold'], errors='raise') 59 | 60 | # Validate no negative quantities 61 | if (closed_lots['quantity'] < 0).any(): 62 | raise ValueError("Closed lots contain negative quantities") 63 | 64 | # Validate no negative cost basis 65 | if (closed_lots['cost_basis'] < 0).any(): 66 | raise ValueError("Closed lots contain negative cost basis") 67 | 68 | # Validate date_acquired is before date_sold 69 | if (closed_lots['date_acquired'] > closed_lots['date_sold']).any(): 70 | raise ValueError("Found closed lots where acquisition date is after sale date") 71 | 72 | # Validate realized_gain calculation 73 | calculated_gain = closed_lots['proceeds'] - closed_lots['cost_basis'] 74 | if not (abs(calculated_gain - closed_lots['realized_gain']) < 1e-6).all(): 75 | raise ValueError("Realized gain values do not match proceeds minus cost basis") 76 | 77 | return closed_lots -------------------------------------------------------------------------------- /tests/test_cash_drag.py: -------------------------------------------------------------------------------- 1 | """Test scenarios using dthomas data.""" 2 | import unittest 3 | from datetime import date 4 | import pandas as pd 5 | import json 6 | from src.service.oracle import Oracle 7 | from src.service.oracle_strategy import OracleStrategy 8 | from src.service.helpers.enums import OracleOptimizationType 9 | import pulp 10 | 11 | class TestCashDrag(unittest.TestCase): 12 | def setUp(self): 13 | """Load dthomas data for testing.""" 14 | # Load dthomas data 15 | with open('tests/example_oracle_inputs/cash_draggy.json', 'r') as f: 16 | self.data = json.load(f) 17 | 18 | # Create event dictionary in the format expected by process_lambda_event 19 | self.event = { 20 | "oracle": self.data["oracle"], 21 | "settings": self.data.get("settings", {}) 22 | } 23 | 24 | def test_cash_draggy_buy_only(self): 25 | """Test buy-only optimization with dthomas cash draggy data. 26 | 27 | Expected behavior: 28 | - Should result in ~$4300 of buys 29 | - No sells should occur 30 | - Drift cost should get worse 31 | - Factor cost should get better 32 | - Cash drag should get better 33 | - All cost changes should be roughly equal (+/- 0.1) 34 | """ 35 | # Process the event using Oracle.process_lambda_event 36 | response = Oracle.process_lambda_event(self.event) 37 | 38 | # Get the first strategy's results 39 | first_strategy_id = next(iter(response["results"])) 40 | strategy_result = response["results"][first_strategy_id] 41 | status = strategy_result["status"] 42 | should_trade = strategy_result["should_trade"] 43 | trades = pd.DataFrame(strategy_result["trades"]) 44 | trade_summary = strategy_result["trade_summary"] 45 | 46 | # Verify optimization completed successfully 47 | self.assertEqual(pulp.LpStatusOptimal, status) 48 | self.assertTrue(should_trade) 49 | 50 | # Verify no sells occurred 51 | sell_trades = trades[trades['action'] == 'sell'] 52 | self.assertEqual(len(sell_trades), 0, "Expected no sell trades") 53 | 54 | # Calculate total buy value 55 | buy_trades = trades[trades['action'] == 'buy'] 56 | computed_total_buy_value = buy_trades.apply(lambda x: x['quantity'] * x['price'], axis=1).sum() 57 | total_buy_value = buy_trades['trade_value'].sum() 58 | 59 | self.assertAlmostEqual(computed_total_buy_value, total_buy_value, 60 | msg="Computed total buy value does not match trade value") 61 | 62 | # Verify buy value is approximately $4300 63 | self.assertAlmostEqual(total_buy_value, 5250, delta=1000, 64 | msg=f"Expected total buy value to be ~$5250, got ${total_buy_value:.2f}") 65 | 66 | # Verify cost changes 67 | drift_cost_change = trade_summary['optimization_info']['after_optimization']['drift_cost'] - trade_summary['optimization_info']['before_optimization']['drift_cost'] 68 | factor_cost_change = trade_summary['optimization_info']['after_optimization']['factor_cost'] - trade_summary['optimization_info']['before_optimization']['factor_cost'] 69 | cash_drag_change = trade_summary['optimization_info']['after_optimization']['cash_drag'] - trade_summary['optimization_info']['before_optimization']['cash_drag'] 70 | 71 | # Verify drift cost got worse (positive change)å 72 | self.assertGreater(drift_cost_change, 0, "Expected drift cost to get worse") 73 | 74 | # Verify factor cost got better (negative change) 75 | self.assertLess(factor_cost_change, 0, "Expected factor cost to get better") 76 | 77 | # Verify cash drag got better (negative change) 78 | self.assertLess(cash_drag_change, 0, "Expected cash drag to get better") 79 | 80 | 81 | if __name__ == '__main__': 82 | unittest.main() -------------------------------------------------------------------------------- /src/service/initializers/tax_lots.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from src.service.helpers.constants import CASH_CUSIP_ID 3 | 4 | def initialize_tax_lots(tax_lots: pd.DataFrame) -> pd.DataFrame: 5 | """ 6 | Initialize and validate tax lots DataFrame. 7 | 8 | Args: 9 | tax_lots: DataFrame with columns: 10 | - tax_lot_id (str, optional): Unique identifier for each tax lot 11 | - identifier (str): Security identifier 12 | - quantity (float): Number of shares/units 13 | - cost_basis (float): Total cost basis of the lot 14 | - date or date_acquired (datetime): Purchase date of the lot 15 | 16 | Returns: 17 | Validated tax lots DataFrame with standardized columns and types: 18 | - tax_lot_id (str): Unique identifier for each tax lot (generated if not provided) 19 | - identifier (str): Security identifier (standardized to uppercase) 20 | - quantity (float): Number of shares/units (validated as non-negative) 21 | - cost_basis (float): Total cost basis of the lot (validated as non-negative) 22 | - date (datetime): Acquisition date (standardized column name from date or date_acquired) 23 | 24 | Raises: 25 | ValueError: If tax lots DataFrame is missing required columns or contains invalid data 26 | """ 27 | if tax_lots is None or tax_lots.empty: 28 | return pd.DataFrame(columns=['tax_lot_id', 'identifier', 'quantity', 'cost_basis', 'date']) 29 | 30 | # Check for either date or date_acquired 31 | if 'date_acquired' in tax_lots.columns and 'date' not in tax_lots.columns: 32 | tax_lots = tax_lots.rename(columns={'date_acquired': 'date'}) 33 | elif 'date' not in tax_lots.columns: 34 | raise ValueError("Tax lots DataFrame must have either 'date' or 'date_acquired' column") 35 | 36 | required_columns = {'identifier', 'quantity', 'cost_basis', 'date'} 37 | if not set(tax_lots.columns).issuperset(required_columns): 38 | raise ValueError(f"Tax lots DataFrame missing required columns: {required_columns}") 39 | 40 | # Ensure data types 41 | tax_lots = tax_lots.copy() 42 | tax_lots['identifier'] = tax_lots['identifier'].astype(str) 43 | 44 | # Replace any 'CASH' or 'cash' identifiers with CASH_CUSIP_ID 45 | cash_mask = tax_lots['identifier'].str.upper() == 'CASH' 46 | if cash_mask.any(): 47 | tax_lots.loc[cash_mask, 'identifier'] = CASH_CUSIP_ID 48 | 49 | tax_lots['quantity'] = pd.to_numeric(tax_lots['quantity'], errors='raise') 50 | tax_lots['cost_basis'] = pd.to_numeric(tax_lots['cost_basis'], errors='raise') 51 | tax_lots['date'] = pd.to_datetime(tax_lots['date'], errors='raise') 52 | 53 | # Handle tax lot IDs: preserve existing, fill nulls with unique IDs, or create new if column doesn't exist 54 | if 'tax_lot_id' not in tax_lots.columns: 55 | tax_lots['tax_lot_id'] = [f"lot_{i}_{pd.Timestamp.now().strftime('%Y%m%d%H%M%S%f')}" for i in range(len(tax_lots))] 56 | else: 57 | # Fill any null values with unique IDs 58 | null_mask = tax_lots['tax_lot_id'].isnull() 59 | if null_mask.any(): 60 | timestamp = pd.Timestamp.now().strftime('%Y%m%d%H%M%S%f') 61 | null_indices = null_mask[null_mask].index 62 | tax_lots.loc[null_indices, 'tax_lot_id'] = [ 63 | f"lot_{i}_{timestamp}" for i in range(len(null_indices)) 64 | ] 65 | 66 | # Validate that all tax lot IDs are unique 67 | if tax_lots['tax_lot_id'].duplicated().any(): 68 | duplicated_ids = tax_lots.loc[tax_lots['tax_lot_id'].duplicated(), 'tax_lot_id'].tolist() 69 | raise ValueError(f"Found duplicate tax lot IDs: {duplicated_ids}. All tax lot IDs must be unique.") 70 | 71 | # Validate no negative quantities 72 | if (tax_lots['quantity'] < 0).any(): 73 | raise ValueError("Tax lots contain negative quantities") 74 | 75 | # Validate no negative cost basis 76 | if (tax_lots['cost_basis'] < 0).any(): 77 | raise ValueError("Tax lots contain negative cost basis") 78 | 79 | return tax_lots -------------------------------------------------------------------------------- /src/service/objectives/taxes/tax_optimization.py: -------------------------------------------------------------------------------- 1 | import pulp 2 | import pandas as pd 3 | from typing import Dict 4 | 5 | def get_tax_cost( 6 | quantity: float, 7 | per_share_tax_liability: float, 8 | total_value: float, 9 | tax_normalization: float = 1.0 10 | ) -> float: 11 | """ 12 | Calculate the tax cost component of the objective function. 13 | 14 | Args: 15 | quantity: Quantity of the security to sell 16 | per_share_tax_liability: Tax liability per share 17 | total_value: Total portfolio value 18 | tax_normalization: Normalization factor for tax impact 19 | Returns: 20 | The total tax cost as a percentage of portfolio value 21 | """ 22 | 23 | return (quantity * per_share_tax_liability) / total_value * tax_normalization if total_value > 0 else 0 24 | 25 | def calculate_tax_impact( 26 | prob: pulp.LpProblem, 27 | sells: Dict[str, pulp.LpVariable], 28 | gain_loss: pd.DataFrame, 29 | total_value: float, 30 | tax_normalization: float = 1.0, 31 | enforce_wash_sale_prevention: bool = True 32 | ) -> tuple[pulp.LpAffineExpression, float]: 33 | """ 34 | Calculate the tax impact component of the objective function and current tax score. 35 | Uses pre-calculated per_share_tax_liability from gain_loss report. 36 | Tax impact is expressed as a percentage of portfolio value to be comparable with drift. 37 | 38 | For each tax lot, we track: 39 | - Current tax liability = quantity * per_share_tax_liability 40 | - New tax liability = (quantity - sells) * per_share_tax_liability 41 | - The difference represents realized tax cost 42 | - Negative tax_realized values represent beneficial tax loss harvesting 43 | 44 | Args: 45 | prob: The optimization problem to add constraints to 46 | sells: Dictionary of sell variables 47 | gain_loss: DataFrame with gain/loss report 48 | total_value: Total portfolio value 49 | tax_normalization: Normalization factor for tax impact 50 | 51 | Returns: 52 | Tuple of: 53 | - Tax impact expression representing the change in tax liability 54 | - Current tax score (sum of unrealized gains as percentage of portfolio) 55 | """ 56 | tax_impacts = [] 57 | current_tax_score = 0 58 | 59 | # Calculate tax impact for each lot 60 | for _, lot in gain_loss.iterrows(): 61 | tax_lot_id = lot['tax_lot_id'] 62 | if tax_lot_id not in sells: 63 | continue 64 | 65 | quantity = lot['quantity'] 66 | per_share_tax = lot['per_share_tax_liability'] 67 | if per_share_tax < 0: 68 | # When wash sale prevention is enabled, reduce negative tax liability by 1/5th 69 | # to make tax loss harvesting less attractive. Otherwise ignore negative tax liability 70 | # to prevent any tax loss harvesting. 71 | if enforce_wash_sale_prevention: 72 | per_share_tax = per_share_tax / 5 73 | else: 74 | per_share_tax = 0 75 | if per_share_tax == 0: 76 | continue 77 | 78 | # Calculate current tax liability for this lot 79 | current_lot_tax = quantity * per_share_tax 80 | current_tax_score += current_lot_tax / total_value 81 | 82 | # Create variable for realized tax (can be negative for tax loss harvesting) 83 | tax_realized = pulp.LpVariable(f"tax_realized_{tax_lot_id}") 84 | 85 | # Constraint: realized tax equals reduction in tax liability 86 | # new_tax_liability = (quantity - sells[tax_lot_id]) * per_share_tax 87 | # tax_realized = current_tax_liability - new_tax_liability 88 | # Scale the constraint by total_value to match units with drift 89 | prob += tax_realized == sells[tax_lot_id] * per_share_tax / total_value, f"tax_realized_{tax_lot_id}" 90 | 91 | # Add to total tax impact (no need to divide by total_value again since constraint is now scaled) 92 | tax_impacts.append(tax_realized * tax_normalization) 93 | 94 | # Sum all tax impacts 95 | total_tax_impact = pulp.lpSum(tax_impacts) 96 | 97 | return total_tax_impact 98 | -------------------------------------------------------------------------------- /src/service/helpers/create_decision_vars.py: -------------------------------------------------------------------------------- 1 | import pulp 2 | import pandas as pd 3 | from typing import Dict, List, Tuple, Any, Optional 4 | 5 | from src.service.helpers.constants import logger 6 | 7 | def _create_buy_dataframe( 8 | buys: Dict[str, pulp.LpVariable], 9 | prices: pd.DataFrame, 10 | ) -> pd.DataFrame: 11 | """ 12 | Create a standardized DataFrame from buy variables. 13 | 14 | Args: 15 | buys: Dictionary of buy variables 16 | include_prices: Whether to merge with price data 17 | exclude_cash: Whether to exclude cash positions 18 | 19 | Returns: 20 | DataFrame with buy variables and optional price data 21 | """ 22 | # Create base DataFrame from buys dictionary 23 | buy_df = pd.DataFrame([ 24 | {'identifier': id, 'buy_var': var} 25 | for id, var in buys.items() 26 | ]) 27 | 28 | # Return empty DataFrame if no buys 29 | if buy_df.empty: 30 | return buy_df 31 | 32 | buy_df = buy_df.merge( 33 | prices[['identifier', 'price']], 34 | on='identifier', 35 | how='left' 36 | ) 37 | 38 | return buy_df 39 | 40 | def _create_sell_dataframe( 41 | sells: Dict[str, pulp.LpVariable], 42 | gain_loss_report: pd.DataFrame, 43 | prices: pd.DataFrame, 44 | ) -> pd.DataFrame: 45 | """ 46 | Create a standardized DataFrame from sell variables. 47 | 48 | Args: 49 | sells: Dictionary of sell variables 50 | include_prices: Whether to merge with price data 51 | exclude_cash: Whether to exclude cash positions 52 | 53 | Returns: 54 | DataFrame with sell variables and optional price data 55 | """ 56 | # Create base DataFrame from sells dictionary 57 | sell_df = pd.DataFrame([ 58 | {'tax_lot_id': tid, 'sell_var': var} 59 | for tid, var in sells.items() 60 | ]) 61 | 62 | # Return empty DataFrame if no sells 63 | if sell_df.empty: 64 | return sell_df 65 | 66 | # Merge with gain_loss to get identifiers 67 | sell_df = sell_df.merge( 68 | gain_loss_report[['tax_lot_id', 'identifier']], 69 | on='tax_lot_id', 70 | how='inner' 71 | ) 72 | 73 | # Optionally merge with prices 74 | sell_df = sell_df.merge( 75 | prices[['identifier', 'price']], 76 | on='identifier', 77 | how='left' 78 | ) 79 | 80 | return sell_df 81 | 82 | def create_decision_variables( 83 | buy_identifiers: list[str], 84 | gain_loss: pd.DataFrame, 85 | prices: pd.DataFrame, 86 | debug: bool = False 87 | ) -> tuple[dict, dict, dict]: 88 | """ 89 | Create decision variables for the optimization problem. 90 | 91 | Args: 92 | drift: DataFrame with drift report 93 | gain_loss: DataFrame with gain/loss report 94 | debug: Enable debug logging 95 | 96 | Returns: 97 | Tuple of (buys, sells) dictionaries 98 | """ 99 | buys = {} 100 | sells = {} 101 | 102 | # Buy variables - one per security 103 | for identifier in buy_identifiers: 104 | buys[identifier] = pulp.LpVariable( 105 | f"buy_{identifier}", 106 | lowBound=0, 107 | cat='Continuous' 108 | ) 109 | if debug: 110 | # logger.info for debug steps 111 | logger.info(f"Created buy variable for {identifier}") 112 | 113 | # Sell variables - one per tax lot 114 | for _, lot in gain_loss.iterrows(): 115 | tax_lot_id = lot['tax_lot_id'] 116 | sells[tax_lot_id] = pulp.LpVariable( 117 | f"sell_{tax_lot_id}", 118 | lowBound=0, 119 | upBound=lot['quantity'], 120 | cat='Continuous' 121 | ) 122 | sells[tax_lot_id] 123 | if debug: 124 | # logger.info for debug steps 125 | logger.info(f"Created sell variable for lot {tax_lot_id} ({lot['identifier']}) - max {lot['quantity']} shares") 126 | 127 | buy_df = _create_buy_dataframe(buys, prices) 128 | sell_df = _create_sell_dataframe(sells, gain_loss, prices) 129 | 130 | return buys, sells, buy_df, sell_df 131 | -------------------------------------------------------------------------------- /src/service/constraints/cash/withdrawal_validator.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple 2 | import pandas as pd 3 | import pulp 4 | 5 | from src.service.constraints.base_validator import BaseValidator 6 | from src.service.helpers.constants import CASH_CUSIP_ID, logger 7 | 8 | class WithdrawalValidator(BaseValidator): 9 | """ 10 | Validator for withdrawal-related constraints. 11 | Note: Individual trade validation is not supported as withdrawal validation requires knowledge of all trades. 12 | """ 13 | 14 | def __init__(self, oracle_strategy, withdrawal_amount: float): 15 | """ 16 | Initialize WithdrawalValidator. 17 | 18 | Args: 19 | oracle_strategy: Reference to the OracleStrategy instance 20 | withdrawal_amount: Amount to withdraw from the portfolio 21 | """ 22 | super().__init__(oracle_strategy) 23 | self.withdrawal_amount = withdrawal_amount 24 | 25 | def validate_buy(self, identifier: str, quantity: float) -> Tuple[bool, Optional[str]]: 26 | """Cannot validate individual buys without knowledge of all trades.""" 27 | raise NotImplementedError("Withdrawal validation requires knowledge of all trades") 28 | 29 | def validate_sell(self, tax_lot_id: str, quantity: float) -> Tuple[bool, Optional[str]]: 30 | """Cannot validate individual sells without knowledge of all trades.""" 31 | raise NotImplementedError("Withdrawal validation requires knowledge of all trades") 32 | 33 | def add_to_problem( 34 | self, 35 | prob: pulp.LpProblem, 36 | buys: dict, 37 | sells: dict, 38 | drift: pd.DataFrame, 39 | gain_loss: pd.DataFrame, 40 | total_value: float, 41 | debug: bool = True 42 | ) -> None: 43 | """ 44 | Add withdrawal-related constraints to the optimization problem. 45 | 46 | Args: 47 | prob: PuLP optimization problem 48 | buys: Dictionary of buy variables 49 | sells: Dictionary of sell variables 50 | drift: DataFrame with drift report 51 | gain_loss: DataFrame with gain/loss report 52 | total_value: Total portfolio value 53 | debug: Enable debug logging 54 | """ 55 | if self.withdrawal_amount <= 0: 56 | return # No withdrawal, no constraints needed 57 | 58 | # Get cash information from drift report 59 | cash_row = drift[drift['asset_class'] == CASH_CUSIP_ID] 60 | if cash_row.empty: 61 | if debug: 62 | logger.warning("CASH_CUSIP_ID not found in drift report for withdrawal calculation.") 63 | return # No cash position defined 64 | 65 | current_cash = cash_row['actual_weight'].iloc[0] * total_value 66 | 67 | if debug: 68 | logger.info("Calculating Withdrawal Constraints ===") 69 | logger.info(f" Current cash: ${current_cash:.2f}") 70 | logger.info(f" Withdrawal amount: ${self.withdrawal_amount:.2f}") 71 | 72 | # Calculate total buys and sells in dollar terms 73 | total_buys = 0 74 | for identifier, buy_var in buys.items(): 75 | if identifier != CASH_CUSIP_ID: 76 | price = self.strategy.prices.loc[ 77 | self.strategy.prices['identifier'] == identifier, 'price' 78 | ].iloc[0] 79 | total_buys += buy_var * price 80 | 81 | total_sells = 0 82 | for _, lot in gain_loss.iterrows(): 83 | tax_lot_id = lot['tax_lot_id'] 84 | if tax_lot_id in sells: 85 | price = self.strategy.prices.loc[ 86 | self.strategy.prices['identifier'] == lot['identifier'], 'price' 87 | ].iloc[0] 88 | total_sells += sells[tax_lot_id] * price 89 | 90 | # Calculate new cash after trades and withdrawal 91 | new_cash = (current_cash + total_sells - total_buys - self.withdrawal_amount) 92 | 93 | # Constraint: Ensure new cash is non-negative 94 | prob += new_cash >= 0, "withdrawal_cash_constraint" 95 | 96 | if debug: 97 | logger.info(f"Added withdrawal constraint: new_cash >= 0") -------------------------------------------------------------------------------- /tests/test_small_trade_results.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import json 3 | import pandas as pd 4 | from pathlib import Path 5 | from decimal import Decimal 6 | 7 | from src.service.oracle import Oracle 8 | from src.service.oracle_strategy import OracleStrategy 9 | from src.service.helpers.enums import OracleOptimizationType 10 | 11 | class TestSmallTradeResults(unittest.TestCase): 12 | """Test that trades meet minimum notional and decimal place requirements.""" 13 | 14 | def setUp(self): 15 | """Set up test data.""" 16 | self.test_data_dir = Path('tests/example_oracle_inputs') 17 | self.min_notional = 5.1 # Minimum notional value for trades 18 | self.decimal_places = 4 # Required decimal places for rounding 19 | 20 | def test_min_notional_trades(self): 21 | """ 22 | Test that trades from min_notional_trades.json: 23 | 1. For buys: Each trade is greater than 5.1 min notional 24 | 2. For sells: Sum of notional values for same identifier is greater than 5.1 25 | 3. All trades are rounded to 4 decimal places 26 | """ 27 | # Load test data 28 | json_file ='tests/example_oracle_inputs/min_notional_trades.json' 29 | 30 | with open(json_file, 'r') as f: 31 | self.test_data = json.load(f) 32 | 33 | self.event = { 34 | "oracle": self.test_data["oracle"], 35 | "settings": self.test_data.get("settings", {}) 36 | } 37 | # Process the event using Oracle.process_lambda_event 38 | response = Oracle.process_lambda_event(self.event) 39 | 40 | # Get the first strategy's results 41 | first_strategy_id = next(iter(response["results"])) 42 | strategy_result = response["results"][first_strategy_id] 43 | status = strategy_result["status"] 44 | should_trade = strategy_result["should_trade"] 45 | trades = pd.DataFrame(strategy_result["trades"]) 46 | trade_summary = strategy_result["trade_summary"] 47 | 48 | # Verify we got trades 49 | self.assertIsNotNone(trades) 50 | self.assertFalse(trades.empty) 51 | 52 | # Group sells by identifier to sum their notional values 53 | sells = trades[trades['action'] == 'sell'].copy() 54 | if not sells.empty: 55 | sells['notional'] = abs(sells['quantity'] * sells['price']) 56 | sell_totals = sells.groupby('identifier')['notional'].sum() 57 | 58 | # Check that each sell group meets minimum notional 59 | for identifier, total_notional in sell_totals.items(): 60 | self.assertGreaterEqual( 61 | total_notional, 62 | self.min_notional, 63 | f"Total sell notional {total_notional} for {identifier} is below minimum {self.min_notional}" 64 | ) 65 | 66 | # Check buys individually 67 | buys = trades[trades['action'] == 'buy'] 68 | for _, trade in buys.iterrows(): 69 | notional = abs(float(trade['quantity']) * float(trade['price'])) 70 | 71 | # Skip tiny rounding error trades 72 | if notional > 0.01: 73 | self.assertGreaterEqual( 74 | notional, 75 | self.min_notional, 76 | f"Buy trade notional {notional} is below minimum {self.min_notional} for {trade['identifier']}" 77 | ) 78 | 79 | # Check decimal places for all trades 80 | for _, trade in trades.iterrows(): 81 | quantity_str = str(trade['quantity']) 82 | if '.' in quantity_str: 83 | decimal_part = quantity_str.split('.')[1] 84 | self.assertLessEqual( 85 | len(decimal_part), 86 | self.decimal_places, 87 | f"Trade quantity {trade['quantity']} has more than {self.decimal_places} decimal places" 88 | ) 89 | 90 | # Verify quantity is properly rounded 91 | rounded_quantity = round(float(trade['quantity']), self.decimal_places) 92 | self.assertEqual( 93 | float(trade['quantity']), 94 | rounded_quantity, 95 | f"Trade quantity {trade['quantity']} is not properly rounded to {self.decimal_places} decimal places" 96 | ) 97 | -------------------------------------------------------------------------------- /src/service/constraints/trade/min_notional_validator.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from typing import Optional, Tuple 3 | import pandas as pd 4 | import pulp 5 | 6 | from src.service.constraints.base_validator import BaseValidator 7 | 8 | class MinNotionalValidator(BaseValidator): 9 | """Validator for minimum notional trade constraints.""" 10 | 11 | def __init__(self, oracle_strategy, min_notional: float): 12 | """ 13 | Initialize MinNotionalValidator. 14 | 15 | Args: 16 | oracle_strategy: Reference to the OracleStrategy instance 17 | min_notional: Minimum notional amount for any trade (in dollars) 18 | """ 19 | super().__init__(oracle_strategy) 20 | self.min_notional = min_notional 21 | 22 | def validate_buy(self, identifier: str, quantity: float) -> Tuple[bool, Optional[str]]: 23 | """Validate if a buy trade meets minimum notional requirements.""" 24 | if self.min_notional <= 0: 25 | return True, None 26 | 27 | price = self.strategy.prices.loc[self.strategy.prices['identifier'] == identifier, 'price'].iloc[0] 28 | notional = quantity * price 29 | 30 | if notional < self.min_notional: 31 | return False, f"Trade notional ({notional:.2f}) below minimum ({self.min_notional})" 32 | 33 | return True, None 34 | 35 | def validate_sell(self, tax_lot_id: str, quantity: float) -> Tuple[bool, Optional[str]]: 36 | """Validate if a sell trade meets minimum notional requirements.""" 37 | if self.min_notional <= 0: 38 | return True, None 39 | 40 | # Get the identifier from tax lots 41 | lot_info = self.strategy.tax_lots[self.strategy.tax_lots['tax_lot_id'] == tax_lot_id].iloc[0] 42 | identifier = lot_info['identifier'] 43 | 44 | price = self.strategy.prices.loc[self.strategy.prices['identifier'] == identifier, 'price'].iloc[0] 45 | notional = quantity * price 46 | 47 | if notional < self.min_notional: 48 | return False, f"Trade notional ({notional:.2f}) below minimum ({self.min_notional})" 49 | 50 | return True, None 51 | 52 | def add_to_problem( 53 | self, 54 | prob: pulp.LpProblem, 55 | buys: dict, 56 | sells: dict, 57 | prices: pd.DataFrame, 58 | tax_lots: pd.DataFrame 59 | ) -> None: 60 | """Add minimum notional constraints to the optimization problem.""" 61 | if self.min_notional <= 0: 62 | return 63 | 64 | # Add minimum notional constraints for buys 65 | for identifier, buy_var in buys.items(): 66 | price = prices.loc[prices['identifier'] == identifier, 'price'].iloc[0] 67 | 68 | # Add binary variable to track if trade happens 69 | trade_happens = pulp.LpVariable(f"buy_happens_{identifier}", cat='Binary') 70 | 71 | # If trade_happens is 1, buy must be >= min_notional/price 72 | # If trade_happens is 0, buy must be 0 73 | prob += buy_var <= trade_happens * 1e6, f"Buy_Upper_{identifier}" # Big M constraint 74 | prob += buy_var >= (self.min_notional / price) * trade_happens, f"Buy_Min_Notional_{identifier}" 75 | 76 | sells_by_identifier = defaultdict(list) 77 | 78 | # Add minimum notional constraints for sells 79 | for tax_lot_id, sell_var in sells.items(): 80 | identifier = tax_lots.loc[tax_lots['tax_lot_id'] == tax_lot_id, "identifier"].iloc[0] 81 | sells_by_identifier[identifier].append(sell_var) 82 | 83 | for identifier, sell_vars in sells_by_identifier.items(): 84 | price = prices.loc[prices['identifier'] == identifier, 'price'].iloc[0] 85 | max_sell = tax_lots.loc[tax_lots['identifier'] == identifier, "quantity"].sum() 86 | 87 | # Add binary variable to track if trade happens 88 | trade_happens = pulp.LpVariable(f"sell_happens_{identifier}", cat='Binary') 89 | 90 | sell_sum = pulp.lpSum(sell_vars) 91 | # If trade_happens is 1, sell must be >= min_notional/price 92 | # If trade_happens is 0, sell must be 0 93 | prob += sell_sum <= trade_happens * max_sell, f"Sell_Upper_{identifier}" 94 | prob += sell_sum >= (self.min_notional / price) * trade_happens, f"Sell_Min_Notional_{identifier}" 95 | -------------------------------------------------------------------------------- /tests/drift_refactor/test_initialize_targets.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import pandas as pd 3 | import numpy as np 4 | from src.service.initializers import initialize_targets 5 | from src.service.helpers.constants import CASH_CUSIP_ID 6 | 7 | class TestInitializeTargets(unittest.TestCase): 8 | def test_basic_asset_class_targets(self): 9 | """Test basic initialization of asset class targets with valid data.""" 10 | targets = pd.DataFrame({ 11 | 'asset_class': ['US_EQUITY', 'INTL_EQUITY', 'FIXED_INCOME'], 12 | 'target_weight': [0.6, 0.2, 0.2], 13 | 'identifiers': [ 14 | ['VTI', 'ITOT'], # US_EQUITY options 15 | ['VXUS', 'IXUS'], # INTL_EQUITY options 16 | ['AGG', 'BND'] # FIXED_INCOME options 17 | ] 18 | }) 19 | 20 | result = initialize_targets(targets) 21 | 22 | # Check that CASH was added 23 | self.assertIn(CASH_CUSIP_ID, result['asset_class'].values) 24 | 25 | # Check that weights sum to 1 26 | self.assertAlmostEqual(result['target_weight'].sum(), 1.0) 27 | 28 | # Check that original asset classes were preserved 29 | original_classes = set(['US_EQUITY', 'INTL_EQUITY', 'FIXED_INCOME']) 30 | self.assertTrue(original_classes.issubset(set(result['asset_class']))) 31 | 32 | # Check that identifiers were converted to uppercase 33 | cash_row = result[result['asset_class'] == CASH_CUSIP_ID].iloc[0] 34 | self.assertEqual(cash_row['identifiers'], [CASH_CUSIP_ID]) 35 | 36 | def test_cash_handling_with_withdrawal(self): 37 | """Test that cash targets are properly handled with withdrawal targets.""" 38 | targets = pd.DataFrame({ 39 | 'asset_class': ['US_EQUITY', 'INTL_EQUITY',CASH_CUSIP_ID], 40 | 'target_weight': [0.5, 0.5, 0.00], # Initial cash below withdrawal target 41 | 'identifiers': [ 42 | ['VTI', 'ITOT'], 43 | ['VXUS', 'IXUS'], 44 | [CASH_CUSIP_ID] 45 | ] 46 | }) 47 | 48 | withdraw_target = 0.05 # 5% withdrawal target 49 | result = initialize_targets(targets, withdraw_target=withdraw_target) 50 | 51 | # Check that cash weight matches withdrawal target 52 | cash_weight = result[result['asset_class'] == CASH_CUSIP_ID]['target_weight'].iloc[0] 53 | self.assertEqual(cash_weight, withdraw_target) 54 | 55 | 56 | def test_invalid_inputs(self): 57 | """Test that invalid inputs raise appropriate errors.""" 58 | # Test missing required columns 59 | with self.assertRaises(ValueError): 60 | initialize_targets(pd.DataFrame({ 61 | 'asset_class': ['US_EQUITY'], 62 | 'target_weight': [1.0] 63 | # Missing identifiers column 64 | })) 65 | 66 | # Test empty identifier list 67 | with self.assertRaises(ValueError): 68 | initialize_targets(pd.DataFrame({ 69 | 'asset_class': ['US_EQUITY', 'INTL_EQUITY'], 70 | 'target_weight': [0.6, 0.4], 71 | 'identifiers': [[], ['VXUS']] # Empty list for US_EQUITY 72 | })) 73 | 74 | 75 | def test_deminimus_cash_handling(self): 76 | """Test that deminimus cash requirements are properly handled.""" 77 | targets = pd.DataFrame({ 78 | 'asset_class': ['US_EQUITY', 'INTL_EQUITY'], 79 | 'target_weight': [0.7, 0.3], 80 | 'identifiers': [ 81 | ['VTI', 'ITOT'], 82 | ['VXUS', 'IXUS'] 83 | ] 84 | }) 85 | 86 | deminimus_cash = 0.02 # 2% minimum cash 87 | result = initialize_targets(targets, deminimus_cash_target=deminimus_cash) 88 | 89 | # Check that cash was added with correct weight 90 | cash_weight = result[result['asset_class'] == CASH_CUSIP_ID]['target_weight'].iloc[0] 91 | self.assertEqual(cash_weight, deminimus_cash) 92 | 93 | # Check that other weights were scaled properly 94 | us_weight = result[result['asset_class'] == 'US_EQUITY']['target_weight'].iloc[0] 95 | intl_weight = result[result['asset_class'] == 'INTL_EQUITY']['target_weight'].iloc[0] 96 | 97 | # Original ratio was 0.7/0.3, should be preserved 98 | self.assertAlmostEqual(us_weight/intl_weight, 0.7/0.3) 99 | 100 | # Sum should still be 1 101 | self.assertAlmostEqual(result['target_weight'].sum(), 1.0) -------------------------------------------------------------------------------- /_build/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | !function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t= 0 && 56 | !jQuery(node.parentNode).hasClass(className) && 57 | !jQuery(node.parentNode).hasClass("nohighlight")) { 58 | var span; 59 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); 60 | if (isInSVG) { 61 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 62 | } else { 63 | span = document.createElement("span"); 64 | span.className = className; 65 | } 66 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 67 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 68 | document.createTextNode(val.substr(pos + text.length)), 69 | node.nextSibling)); 70 | node.nodeValue = val.substr(0, pos); 71 | if (isInSVG) { 72 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); 73 | var bbox = node.parentElement.getBBox(); 74 | rect.x.baseVal.value = bbox.x; 75 | rect.y.baseVal.value = bbox.y; 76 | rect.width.baseVal.value = bbox.width; 77 | rect.height.baseVal.value = bbox.height; 78 | rect.setAttribute('class', className); 79 | addItems.push({ 80 | "parent": node.parentNode, 81 | "target": rect}); 82 | } 83 | } 84 | } 85 | else if (!jQuery(node).is("button, select, textarea")) { 86 | jQuery.each(node.childNodes, function() { 87 | highlight(this, addItems); 88 | }); 89 | } 90 | } 91 | var addItems = []; 92 | var result = this.each(function() { 93 | highlight(this, addItems); 94 | }); 95 | for (var i = 0; i < addItems.length; ++i) { 96 | jQuery(addItems[i].parent).before(addItems[i].target); 97 | } 98 | return result; 99 | }; 100 | 101 | /* 102 | * backward compatibility for jQuery.browser 103 | * This will be supported until firefox bug is fixed. 104 | */ 105 | if (!jQuery.browser) { 106 | jQuery.uaMatch = function(ua) { 107 | ua = ua.toLowerCase(); 108 | 109 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 110 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 111 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 112 | /(msie) ([\w.]+)/.exec(ua) || 113 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 114 | []; 115 | 116 | return { 117 | browser: match[ 1 ] || "", 118 | version: match[ 2 ] || "0" 119 | }; 120 | }; 121 | jQuery.browser = {}; 122 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 123 | } 124 | -------------------------------------------------------------------------------- /src/service/reports/drift_report.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from enum import Enum 3 | from typing import Tuple 4 | 5 | # Threshold for considering a position on target (used for position_status) 6 | DRIFT_THRESHOLD = 0.001 7 | 8 | class PositionStatus(str, Enum): 9 | """Position status relative to target weight.""" 10 | ON_TARGET = 'ON_TARGET' 11 | OVERWEIGHT = 'OVERWEIGHT' 12 | UNDERWEIGHT = 'UNDERWEIGHT' 13 | NON_TARGET_INSTRUMENT = 'NON_TARGET_INSTRUMENT' 14 | 15 | def generate_drift_report( 16 | targets: pd.DataFrame, 17 | actuals: pd.DataFrame 18 | ) -> pd.DataFrame: 19 | """ 20 | Generate a drift report comparing actual vs target weights at the asset class level. 21 | 22 | Args: 23 | targets: DataFrame with asset class target weights with columns: 24 | - asset_class: Asset class identifier 25 | - target_weight: Target portfolio weight for the asset class 26 | - identifiers: List of valid security identifiers in this asset class 27 | actuals: DataFrame with actual weights and market values with columns: 28 | - identifier: Security identifier 29 | - actual_weight: Current portfolio weight 30 | - market_value: Current market value 31 | 32 | Returns: 33 | DataFrame with columns: 34 | - asset_class: Asset class identifier 35 | - target_weight: Target portfolio weight for the asset class 36 | - actual_weight: Current portfolio weight for the asset class 37 | - market_value: Current market value for the asset class 38 | - drift: Difference between actual and target weights (actual - target) 39 | - drift_pct: Percentage drift relative to target ((actual - target) / target) 40 | - drift_dollars: Dollar value of drift (market_value - target_weight * total_value) 41 | - position_status: PositionStatus enum value (OVERWEIGHT, UNDERWEIGHT, ON_TARGET, or NON_TARGET_INSTRUMENT) 42 | """ 43 | # Create mapping of identifier to asset class 44 | id_to_asset_class = {} 45 | for _, row in targets.iterrows(): 46 | for identifier in row['identifiers']: 47 | id_to_asset_class[identifier] = row['asset_class'] 48 | 49 | # Add asset class to actuals 50 | actuals_with_ac = actuals.copy() 51 | actuals_with_ac['asset_class'] = actuals_with_ac['identifier'].map(id_to_asset_class) 52 | 53 | # Aggregate actuals by asset class 54 | asset_class_actuals = actuals_with_ac.groupby('asset_class').agg({ 55 | 'market_value': 'sum', 56 | 'actual_weight': 'sum' 57 | }).reset_index() 58 | 59 | # Start with all asset classes from targets 60 | drift = targets[['asset_class', 'target_weight', 'identifiers']].copy() 61 | 62 | # Merge with aggregated actuals using full outer join 63 | drift = drift.merge( 64 | asset_class_actuals[['asset_class', 'market_value', 'actual_weight']], 65 | on='asset_class', 66 | how='outer' 67 | ) 68 | 69 | # Fill missing values 70 | drift['actual_weight'] = drift['actual_weight'].fillna(0.0) 71 | drift['market_value'] = drift['market_value'].fillna(0.0) 72 | drift['target_weight'] = drift['target_weight'].fillna(0.0) 73 | 74 | # Normalize target weights to sum to 1 75 | total_target_weight = drift['target_weight'].sum() 76 | if total_target_weight > 0: 77 | drift['target_weight'] = drift['target_weight'] / total_target_weight 78 | 79 | # Calculate total portfolio value 80 | total_value = drift['market_value'].sum() 81 | 82 | # Calculate drift metrics 83 | drift['drift'] = drift['actual_weight'] - drift['target_weight'] 84 | drift['drift_pct'] = ( 85 | (drift['actual_weight'] - drift['target_weight']) / 86 | drift['target_weight'].where(drift['target_weight'] != 0, 1.0) 87 | ) 88 | 89 | # Calculate dollar-based drift 90 | drift['drift_dollars'] = drift['market_value'] - (drift['target_weight'] * total_value) 91 | 92 | # Add position status 93 | drift['position_status'] = PositionStatus.ON_TARGET 94 | drift.loc[drift['drift'] > DRIFT_THRESHOLD, 'position_status'] = PositionStatus.OVERWEIGHT 95 | drift.loc[drift['drift'] < -DRIFT_THRESHOLD, 'position_status'] = PositionStatus.UNDERWEIGHT 96 | drift.loc[drift['target_weight'] == 0, 'position_status'] = PositionStatus.NON_TARGET_INSTRUMENT 97 | 98 | # Order columns logically 99 | column_order = [ 100 | 'asset_class', 101 | 'target_weight', 102 | 'actual_weight', 103 | 'market_value', 104 | 'drift', 105 | 'drift_pct', 106 | 'drift_dollars', 107 | 'position_status', 108 | 'identifiers' 109 | ] 110 | 111 | return drift[column_order].sort_values('drift', ascending=False) 112 | -------------------------------------------------------------------------------- /src/service/helpers/enums.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | import pulp 3 | from typing import Dict, Any, Tuple, Optional 4 | 5 | class OracleOptimizationType(Enum): 6 | """Defines the optimization types available for OracleStrategy.""" 7 | HOLD = "HOLD" # No trades allowed 8 | BUY_ONLY = "BUY_ONLY" # Only buys allowed, no sells 9 | TAX_UNAWARE = "TAX_UNAWARE" # Rebalance towards targets ignoring tax implications 10 | TAX_AWARE = "TAX_AWARE" # Rebalance considering tax implications (default) 11 | PAIRS_TLH = "PAIRS_TLH" # Tax-Loss Harvesting for pairs 12 | DIRECT_INDEX = "DIRECT_INDEX" # Direct Indexing strategy 13 | # Add other types as needed, e.g., TLH, DIRECT_INDEX 14 | 15 | @classmethod 16 | def from_string(cls, type_str: str) -> 'OracleOptimizationType': 17 | """Get enum member from string, case-insensitive.""" 18 | for member in cls: 19 | if member.value.upper() == type_str.upper(): 20 | return member 21 | raise ValueError(f"Invalid OracleOptimizationType: {type_str}") 22 | 23 | def allows_sells(self) -> bool: 24 | """Check if the optimization type allows sell trades.""" 25 | return self not in {self.BUY_ONLY, self.HOLD} 26 | 27 | def should_tlh(self) -> bool: 28 | """ 29 | Determine if the optimization type supports Tax Loss Harvesting (TLH). 30 | 31 | Different optimization types handle TLH differently: 32 | - PAIRS_TLH: Specifically designed for tax-loss harvesting with pairs trading 33 | - DIRECT_INDEX: Supports TLH as part of direct indexing strategy 34 | - Other types (HOLD, BUY_ONLY, TAX_UNAWARE, TAX_AWARE): Never support TLH 35 | 36 | Returns: 37 | bool: True if the optimization type supports TLH operations 38 | """ 39 | 40 | # PAIRS_TLH and DIRECT_INDEX always support TLH 41 | if self in {self.PAIRS_TLH, self.DIRECT_INDEX}: 42 | return True 43 | return False 44 | 45 | def setup_optimization(self, prob: pulp.LpProblem, buys: Dict, sells: Dict) -> None: 46 | """ 47 | Setup optimization problem based on strategy. 48 | 49 | Args: 50 | prob: The optimization problem 51 | buys: Dictionary of buy variables 52 | sells: Dictionary of sell variables 53 | data_context: Dictionary with optimization context data 54 | """ 55 | 56 | if self == self.BUY_ONLY: 57 | # Force sells to zero 58 | for sell_var in sells.values(): 59 | prob += sell_var == 0, f"constraint_buy_only_{sell_var.name}" 60 | 61 | elif self == self.HOLD: 62 | # Force all variables to zero 63 | for buy_var in buys.values(): 64 | prob += buy_var == 0, f"hold_buy_{buy_var.name}" 65 | for sell_var in sells.values(): 66 | prob += sell_var == 0, f"hold_sell_{sell_var.name}" 67 | 68 | def adjust_weights(self, 69 | weight_tax: float, 70 | weight_drift: float, 71 | weight_transaction: float, 72 | weight_factor_model: float, 73 | weight_cash_drag: float) -> Tuple[float, float, float, float, float]: 74 | """ 75 | Adjust weights based on strategy type. 76 | 77 | Args: 78 | weight_tax: Weight for tax component 79 | weight_drift: Weight for drift component 80 | weight_transaction: Weight for transaction cost component 81 | weight_factor_model: Weight for factor model component 82 | weight_cash_drag: Weight for cash deployment component 83 | 84 | Returns: 85 | Tuple of adjusted weights 86 | """ 87 | if self == self.TAX_UNAWARE: 88 | # Override tax weight to zero 89 | return 0, weight_drift, weight_transaction, 0, weight_cash_drag 90 | 91 | elif self == self.HOLD: 92 | # Weights don't matter for HOLD 93 | return 0, 0, 0, 0, 0 94 | 95 | elif self == self.DIRECT_INDEX: 96 | # Direct index might adjust weights to prioritize factor model 97 | return weight_tax, weight_drift, weight_transaction, weight_factor_model, weight_cash_drag 98 | 99 | else: 100 | # Default weights for TAX_AWARE, BUY_ONLY, PAIRS_TLH 101 | return weight_tax, weight_drift, weight_transaction, 0, weight_cash_drag 102 | 103 | def can_handle_withdrawal(self) -> bool: 104 | """ 105 | Check if the strategy can handle withdrawals. 106 | 107 | Returns: 108 | True if the strategy can handle withdrawals, False otherwise 109 | """ 110 | if self in {self.HOLD, self.BUY_ONLY}: 111 | return False 112 | return True -------------------------------------------------------------------------------- /_build/search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Search — Oracle 0.1.0 documentation 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 60 | 61 |
65 | 66 |
67 |
68 |
69 |
    70 |
  • 71 | 72 |
  • 73 |
  • 74 |
75 |
76 |
77 |
78 |
79 | 80 | 87 | 88 | 89 |
90 | 91 |
92 | 93 |
94 |
95 |
96 | 97 |
98 | 99 |
100 |

© Copyright 2024.

101 |
102 | 103 | Built with Sphinx using a 104 | theme 105 | provided by Read the Docs. 106 | 107 | 108 |
109 |
110 |
111 |
112 |
113 | 118 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | -------------------------------------------------------------------------------- /_build/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Base JavaScript utilities for all Sphinx HTML documentation. 3 | */ 4 | "use strict"; 5 | 6 | const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ 7 | "TEXTAREA", 8 | "INPUT", 9 | "SELECT", 10 | "BUTTON", 11 | ]); 12 | 13 | const _ready = (callback) => { 14 | if (document.readyState !== "loading") { 15 | callback(); 16 | } else { 17 | document.addEventListener("DOMContentLoaded", callback); 18 | } 19 | }; 20 | 21 | /** 22 | * Small JavaScript module for the documentation. 23 | */ 24 | const Documentation = { 25 | init: () => { 26 | Documentation.initDomainIndexTable(); 27 | Documentation.initOnKeyListeners(); 28 | }, 29 | 30 | /** 31 | * i18n support 32 | */ 33 | TRANSLATIONS: {}, 34 | PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), 35 | LOCALE: "unknown", 36 | 37 | // gettext and ngettext don't access this so that the functions 38 | // can safely bound to a different name (_ = Documentation.gettext) 39 | gettext: (string) => { 40 | const translated = Documentation.TRANSLATIONS[string]; 41 | switch (typeof translated) { 42 | case "undefined": 43 | return string; // no translation 44 | case "string": 45 | return translated; // translation exists 46 | default: 47 | return translated[0]; // (singular, plural) translation tuple exists 48 | } 49 | }, 50 | 51 | ngettext: (singular, plural, n) => { 52 | const translated = Documentation.TRANSLATIONS[singular]; 53 | if (typeof translated !== "undefined") 54 | return translated[Documentation.PLURAL_EXPR(n)]; 55 | return n === 1 ? singular : plural; 56 | }, 57 | 58 | addTranslations: (catalog) => { 59 | Object.assign(Documentation.TRANSLATIONS, catalog.messages); 60 | Documentation.PLURAL_EXPR = new Function( 61 | "n", 62 | `return (${catalog.plural_expr})` 63 | ); 64 | Documentation.LOCALE = catalog.locale; 65 | }, 66 | 67 | /** 68 | * helper function to focus on search bar 69 | */ 70 | focusSearchBar: () => { 71 | document.querySelectorAll("input[name=q]")[0]?.focus(); 72 | }, 73 | 74 | /** 75 | * Initialise the domain index toggle buttons 76 | */ 77 | initDomainIndexTable: () => { 78 | const toggler = (el) => { 79 | const idNumber = el.id.substr(7); 80 | const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); 81 | if (el.src.substr(-9) === "minus.png") { 82 | el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; 83 | toggledRows.forEach((el) => (el.style.display = "none")); 84 | } else { 85 | el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; 86 | toggledRows.forEach((el) => (el.style.display = "")); 87 | } 88 | }; 89 | 90 | const togglerElements = document.querySelectorAll("img.toggler"); 91 | togglerElements.forEach((el) => 92 | el.addEventListener("click", (event) => toggler(event.currentTarget)) 93 | ); 94 | togglerElements.forEach((el) => (el.style.display = "")); 95 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); 96 | }, 97 | 98 | initOnKeyListeners: () => { 99 | // only install a listener if it is really needed 100 | if ( 101 | !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && 102 | !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS 103 | ) 104 | return; 105 | 106 | document.addEventListener("keydown", (event) => { 107 | // bail for input elements 108 | if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; 109 | // bail with special keys 110 | if (event.altKey || event.ctrlKey || event.metaKey) return; 111 | 112 | if (!event.shiftKey) { 113 | switch (event.key) { 114 | case "ArrowLeft": 115 | if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; 116 | 117 | const prevLink = document.querySelector('link[rel="prev"]'); 118 | if (prevLink && prevLink.href) { 119 | window.location.href = prevLink.href; 120 | event.preventDefault(); 121 | } 122 | break; 123 | case "ArrowRight": 124 | if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; 125 | 126 | const nextLink = document.querySelector('link[rel="next"]'); 127 | if (nextLink && nextLink.href) { 128 | window.location.href = nextLink.href; 129 | event.preventDefault(); 130 | } 131 | break; 132 | } 133 | } 134 | 135 | // some keyboard layouts may need Shift to get / 136 | switch (event.key) { 137 | case "/": 138 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; 139 | Documentation.focusSearchBar(); 140 | event.preventDefault(); 141 | } 142 | }); 143 | }, 144 | }; 145 | 146 | // quick alias for translations 147 | const _ = Documentation.gettext; 148 | 149 | _ready(Documentation.init); 150 | -------------------------------------------------------------------------------- /_build/_static/pygments.css: -------------------------------------------------------------------------------- 1 | pre { line-height: 125%; } 2 | td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } 3 | span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } 4 | td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } 5 | span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } 6 | .highlight .hll { background-color: #ffffcc } 7 | .highlight { background: #f8f8f8; } 8 | .highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ 9 | .highlight .err { border: 1px solid #F00 } /* Error */ 10 | .highlight .k { color: #008000; font-weight: bold } /* Keyword */ 11 | .highlight .o { color: #666 } /* Operator */ 12 | .highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ 13 | .highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ 14 | .highlight .cp { color: #9C6500 } /* Comment.Preproc */ 15 | .highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ 16 | .highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ 17 | .highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ 18 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 19 | .highlight .ge { font-style: italic } /* Generic.Emph */ 20 | .highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ 21 | .highlight .gr { color: #E40000 } /* Generic.Error */ 22 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 23 | .highlight .gi { color: #008400 } /* Generic.Inserted */ 24 | .highlight .go { color: #717171 } /* Generic.Output */ 25 | .highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ 26 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 27 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 28 | .highlight .gt { color: #04D } /* Generic.Traceback */ 29 | .highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ 30 | .highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ 31 | .highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ 32 | .highlight .kp { color: #008000 } /* Keyword.Pseudo */ 33 | .highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ 34 | .highlight .kt { color: #B00040 } /* Keyword.Type */ 35 | .highlight .m { color: #666 } /* Literal.Number */ 36 | .highlight .s { color: #BA2121 } /* Literal.String */ 37 | .highlight .na { color: #687822 } /* Name.Attribute */ 38 | .highlight .nb { color: #008000 } /* Name.Builtin */ 39 | .highlight .nc { color: #00F; font-weight: bold } /* Name.Class */ 40 | .highlight .no { color: #800 } /* Name.Constant */ 41 | .highlight .nd { color: #A2F } /* Name.Decorator */ 42 | .highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ 43 | .highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ 44 | .highlight .nf { color: #00F } /* Name.Function */ 45 | .highlight .nl { color: #767600 } /* Name.Label */ 46 | .highlight .nn { color: #00F; font-weight: bold } /* Name.Namespace */ 47 | .highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ 48 | .highlight .nv { color: #19177C } /* Name.Variable */ 49 | .highlight .ow { color: #A2F; font-weight: bold } /* Operator.Word */ 50 | .highlight .w { color: #BBB } /* Text.Whitespace */ 51 | .highlight .mb { color: #666 } /* Literal.Number.Bin */ 52 | .highlight .mf { color: #666 } /* Literal.Number.Float */ 53 | .highlight .mh { color: #666 } /* Literal.Number.Hex */ 54 | .highlight .mi { color: #666 } /* Literal.Number.Integer */ 55 | .highlight .mo { color: #666 } /* Literal.Number.Oct */ 56 | .highlight .sa { color: #BA2121 } /* Literal.String.Affix */ 57 | .highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ 58 | .highlight .sc { color: #BA2121 } /* Literal.String.Char */ 59 | .highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ 60 | .highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ 61 | .highlight .s2 { color: #BA2121 } /* Literal.String.Double */ 62 | .highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ 63 | .highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ 64 | .highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ 65 | .highlight .sx { color: #008000 } /* Literal.String.Other */ 66 | .highlight .sr { color: #A45A77 } /* Literal.String.Regex */ 67 | .highlight .s1 { color: #BA2121 } /* Literal.String.Single */ 68 | .highlight .ss { color: #19177C } /* Literal.String.Symbol */ 69 | .highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ 70 | .highlight .fm { color: #00F } /* Name.Function.Magic */ 71 | .highlight .vc { color: #19177C } /* Name.Variable.Class */ 72 | .highlight .vg { color: #19177C } /* Name.Variable.Global */ 73 | .highlight .vi { color: #19177C } /* Name.Variable.Instance */ 74 | .highlight .vm { color: #19177C } /* Name.Variable.Magic */ 75 | .highlight .il { color: #666 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /src/service/objectives/cash_deployment/cash_deployment.py: -------------------------------------------------------------------------------- 1 | import pulp 2 | import pandas as pd 3 | from typing import Dict 4 | from src.service.helpers.constants import CASH_CUSIP_ID 5 | from src.service.helpers.constants import logger 6 | 7 | def calculate_cash_deployment_objective( 8 | prob: pulp.LpProblem, 9 | buys: Dict[str, pulp.LpVariable], 10 | sells: Dict[str, pulp.LpVariable], 11 | drift: pd.DataFrame, 12 | gain_loss: pd.DataFrame, 13 | total_value: float, 14 | prices: pd.DataFrame, 15 | cash_normalization: float, 16 | debug: bool = True, 17 | ) -> pulp.LpAffineExpression: 18 | """ 19 | Calculate a penalty for holding excess cash above target. 20 | Only penalizes cash when it's above target, not when below. 21 | 22 | Args: 23 | prob: The optimization problem to add constraints to 24 | buys: Dictionary of buy variables 25 | sells: Dictionary of sell variables 26 | drift: DataFrame with drift report 27 | gain_loss: DataFrame with gain/loss report 28 | total_value: Total portfolio value 29 | prices: DataFrame with current prices 30 | debug: Enable debug logging 31 | cash_normalization: Normalization factor for cash penalty 32 | 33 | Returns: 34 | The cash penalty expression 35 | """ 36 | # Get cash information from drift report 37 | cash_row = drift[drift['asset_class'] == CASH_CUSIP_ID] 38 | if cash_row.empty: 39 | if debug: 40 | logger.warning("CASH_CUSIP_ID not found in drift report for cash penalty calculation.") 41 | return 0 # No cash position defined 42 | 43 | cash_target = cash_row['target_weight'].iloc[0] 44 | current_cash = cash_row['actual_weight'].iloc[0] 45 | 46 | if debug: 47 | logger.info("Calculating Cash Deployment Component ===") 48 | logger.info(f" Current cash weight: {current_cash:.4%}") 49 | 50 | # Calculate total buys and sells in dollar terms 51 | total_buys = 0 52 | for identifier, buy_var in buys.items(): 53 | if identifier != CASH_CUSIP_ID: 54 | price = prices.loc[prices['identifier'] == identifier, 'price'].iloc[0] 55 | total_buys += buy_var * price 56 | 57 | total_sells = 0 58 | for _, lot in gain_loss.iterrows(): 59 | tax_lot_id = lot['tax_lot_id'] 60 | if tax_lot_id in sells: 61 | price = prices.loc[prices['identifier'] == lot['identifier'], 'price'].iloc[0] 62 | total_sells += sells[tax_lot_id] * price 63 | 64 | # Calculate new cash weight after trades 65 | new_cash_dollars = (current_cash * total_value) + total_sells - total_buys 66 | new_cash_weight = new_cash_dollars / total_value 67 | 68 | # Calculate initial excess cash 69 | initial_excess_cash = max(0, current_cash - cash_target) 70 | 71 | # Create variable for excess cash after trades 72 | excess_cash = pulp.LpVariable(f"excess_cash", lowBound=0) 73 | prob += new_cash_weight - cash_target <= excess_cash, "cash_excess_constr" 74 | 75 | if debug: 76 | logger.info(f"Added cash deployment component (minimizing new_cash_weight)") 77 | logger.info(f" Initial excess cash: {initial_excess_cash:.4%}") 78 | 79 | # Create list of terms to sum 80 | cash_terms = [(excess_cash - initial_excess_cash) * cash_normalization] 81 | 82 | # Return the sum of all cash terms 83 | return pulp.lpSum(cash_terms) 84 | 85 | 86 | def calculate_max_withdrawal_objective( 87 | self, 88 | prob: pulp.LpProblem, 89 | buys: Dict[str, pulp.LpVariable], 90 | sells: Dict[str, pulp.LpVariable], 91 | gain_loss: pd.DataFrame, 92 | debug: bool = True 93 | ) -> pulp.LpAffineExpression: 94 | """ 95 | Calculate an objective that maximizes cash withdrawal. 96 | Creates an objective that maximizes the cash generated from selling securities. 97 | 98 | Args: 99 | prob: PuLP optimization problem 100 | buys: Dictionary of buy variables 101 | sells: Dictionary of sell variables 102 | gain_loss: DataFrame with gain/loss report 103 | debug: Enable debug logging 104 | 105 | Returns: 106 | Cash withdrawal maximization objective term (negative since we're minimizing) 107 | """ 108 | if debug: 109 | logger.info("Calculating max withdrawal objective") 110 | 111 | # Create a cash generation objective (for maximizing withdrawal) 112 | # We want to maximize cash, which means minimizing the negative of cash 113 | total_cash_generated = 0 114 | for _, lot in gain_loss.iterrows(): 115 | tax_lot_id = lot['tax_lot_id'] 116 | identifier = lot['identifier'] 117 | price = self.strategy.prices.loc[self.strategy.prices['identifier'] == identifier, 'price'].iloc[0] 118 | total_cash_generated += sells[tax_lot_id] * price 119 | 120 | # In a minimization problem, we minimize the negative of what we want to maximize 121 | return -1 * total_cash_generated 122 | -------------------------------------------------------------------------------- /src/service/reports/gain_loss_report.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from datetime import date 3 | from typing import Optional 4 | 5 | def generate_gain_loss_report( 6 | tax_lots: pd.DataFrame, 7 | prices: pd.DataFrame, 8 | current_date: date, 9 | tax_rates: pd.DataFrame 10 | ) -> pd.DataFrame: 11 | """ 12 | Generate a gain/loss report for all tax lots using current prices. 13 | 14 | Args: 15 | tax_lots: DataFrame of tax lots 16 | prices: DataFrame of current prices 17 | current_date: Current date for calculating holding period 18 | tax_rates: DataFrame with tax rates 19 | 20 | Returns: 21 | DataFrame with columns: 22 | - tax_lot_id: Unique identifier for the tax lot 23 | - identifier: Security identifier 24 | - quantity: Number of shares 25 | - cost_basis: Total cost basis for the lot 26 | - cost_per_share: Cost basis per share 27 | - current_price: Current price per share 28 | - market_value: Current total value (quantity * current_price) 29 | - unrealized_gain: Market value - cost basis 30 | - unrealized_gain_pct: Unrealized gain as percentage of cost basis 31 | - holding_period_days: Number of days position has been held 32 | - is_long_term: Whether the position qualifies for long-term capital gains (held > 365 days) 33 | - gain_type: Type of gain for tax purposes (short_term or long_term) 34 | - federal_tax_rate: Applicable federal tax rate 35 | - state_tax_rate: Applicable state tax rate 36 | - total_tax_rate: Combined federal and state tax rate 37 | - tax_liability: Estimated tax liability if sold at current price 38 | - per_share_tax_liability: Tax liability per share 39 | - tax_gain_loss_percentage: Tax gain/loss percentage (gain/loss percentage divided by tax rate) 40 | """ 41 | # Define column order upfront for both empty and non-empty cases 42 | column_order = [ 43 | 'tax_lot_id', 44 | 'identifier', 45 | 'quantity', 46 | 'cost_basis', 47 | 'cost_per_share', 48 | 'current_price', 49 | 'market_value', 50 | 'unrealized_gain', 51 | 'unrealized_gain_pct', 52 | 'holding_period_days', 53 | 'is_long_term', 54 | 'gain_type', 55 | 'federal_tax_rate', 56 | 'state_tax_rate', 57 | 'total_tax_rate', 58 | 'tax_liability', 59 | 'per_share_tax_liability', 60 | 'tax_gain_loss_percentage' 61 | ] 62 | 63 | # Return empty DataFrame with correct columns if tax_lots is empty 64 | if tax_lots.empty: 65 | return pd.DataFrame(columns=column_order) 66 | 67 | # Create copy to avoid modifying inputs 68 | report = tax_lots.copy() 69 | 70 | # Join with prices 71 | report = report.merge( 72 | prices[['identifier', 'price']], 73 | on='identifier', 74 | how='left', 75 | validate='many_to_one' 76 | ) 77 | 78 | # Calculate derived values 79 | report['cost_per_share'] = report['cost_basis'] / report['quantity'] 80 | report['market_value'] = report['quantity'] * report['price'] 81 | report['unrealized_gain'] = report['market_value'] - report['cost_basis'] 82 | report['unrealized_gain_pct'] = report['unrealized_gain'] / report['cost_basis'] 83 | report['holding_period_days'] = (pd.Timestamp(current_date) - report['date']).dt.days 84 | report['is_long_term'] = report['holding_period_days'] > 365 85 | 86 | # Determine gain type for tax purposes 87 | report['gain_type'] = report['is_long_term'].map({True: 'long_term', False: 'short_term'}) 88 | 89 | # Join with tax rates based on gain type 90 | report = report.merge( 91 | tax_rates[['gain_type', 'federal_rate', 'state_rate', 'total_rate']], 92 | on='gain_type', 93 | how='left', 94 | validate='many_to_one' 95 | ) 96 | 97 | # Calculate tax impact 98 | # - Positive tax_liability means taxes owed on gains (unrealized_gain > 0) 99 | # - Negative tax_liability means potential tax benefit from losses (unrealized_gain < 0) 100 | # which can offset other gains when realized 101 | report['tax_liability'] = report['unrealized_gain'] * report['total_rate'] 102 | 103 | # Calculate per-share tax liability for optimization 104 | report['per_share_tax_liability'] = report['tax_liability'] / report['quantity'] 105 | 106 | # Calculate tax gain/loss percentage (gain/loss percentage divided by tax rate) 107 | # Note: Handle division by zero by replacing 0 tax rates with NaN 108 | safe_total_rate = report['total_rate'].replace(0, float('nan')) 109 | report['tax_gain_loss_percentage'] = report['unrealized_gain_pct'] * safe_total_rate 110 | 111 | # Rename columns for clarity 112 | report = report.rename(columns={ 113 | 'price': 'current_price', 114 | 'federal_rate': 'federal_tax_rate', 115 | 'state_rate': 'state_tax_rate', 116 | 'total_rate': 'total_tax_rate' 117 | }) 118 | 119 | return report[column_order] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | env_local.env 133 | .venv 134 | env/ 135 | venv/ 136 | ENV/ 137 | env.bak/ 138 | venv.bak/ 139 | 140 | # Spyder project settings 141 | .spyderproject 142 | .spyproject 143 | 144 | # Rope project settings 145 | .ropeproject 146 | 147 | # mkdocs documentation 148 | /site 149 | 150 | # mypy 151 | .mypy_cache/ 152 | .dmypy.json 153 | dmypy.json 154 | 155 | # Pyre type checker 156 | .pyre/ 157 | 158 | # pytype static type analyzer 159 | .pytype/ 160 | 161 | # Cython debug symbols 162 | cython_debug/ 163 | 164 | # PyCharm 165 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 166 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 167 | # and can be added to the global gitignore or merged into this file. For a more nuclear 168 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 169 | #.idea/ 170 | 171 | # Abstra 172 | # Abstra is an AI-powered process automation framework. 173 | # Ignore directories containing user credentials, local state, and settings. 174 | # Learn more at https://abstra.io/docs 175 | .abstra/ 176 | 177 | # Visual Studio Code 178 | # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore 179 | # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore 180 | # and can be added to the global gitignore or merged into this file. However, if you prefer, 181 | # you could uncomment the following to ignore the enitre vscode folder 182 | # .vscode/ 183 | 184 | # Ruff stuff: 185 | .ruff_cache/ 186 | 187 | # PyPI configuration file 188 | .pypirc 189 | 190 | # Cursor 191 | # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to 192 | # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data 193 | # refer to https://docs.cursor.com/context/ignore-files 194 | .cursorignore 195 | .cursorindexingignore -------------------------------------------------------------------------------- /src/service/constraints/holding_time/holding_time_validator.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple 2 | import pandas as pd 3 | import pulp 4 | from datetime import timedelta 5 | 6 | from src.service.constraints.base_validator import BaseValidator 7 | from src.service.constraints.holding_time.trading_day_lookup import TradingDayLookup 8 | 9 | class HoldingTimeValidator(BaseValidator): 10 | """Validator for minimum holding time requirements.""" 11 | 12 | def __init__(self, oracle_strategy, holding_time_delta: timedelta): 13 | """ 14 | Initialize HoldingTimeValidator. 15 | 16 | Args: 17 | oracle_strategy: Reference to the OracleStrategy instance 18 | holding_time_delta: Minimum holding time required 19 | """ 20 | super().__init__(oracle_strategy) 21 | self.holding_time_delta = holding_time_delta 22 | self.trading_day_lookup = TradingDayLookup() 23 | self._last_current_date = self.strategy.oracle.current_date 24 | self._before_date = self._calculate_before_date(self._last_current_date) 25 | 26 | def _calculate_before_date(self, current_date: pd.Timestamp) -> pd.Timestamp: 27 | """ 28 | Calculate the before date for holding time calculations. 29 | 30 | Args: 31 | current_date: The current date to calculate from 32 | 33 | Returns: 34 | The effective before date for holding time calculations 35 | """ 36 | target_date = current_date - self.holding_time_delta 37 | 38 | # Get the trading day information 39 | trading_day_info = self.trading_day_lookup.get_trading_day(target_date) 40 | 41 | # If no trading day info is found, use the target date directly 42 | if trading_day_info is None: 43 | return target_date 44 | 45 | # If the date is not a trading day, find the nearest trading day before the current date 46 | if trading_day_info['date'] != trading_day_info['nearest_trading_day']: 47 | return pd.to_datetime(trading_day_info['backward_trading_day']) 48 | 49 | return pd.to_datetime(trading_day_info['date']) 50 | 51 | def _get_before_date(self, current_date: pd.Timestamp) -> pd.Timestamp: 52 | """ 53 | Get the before date for holding time calculations, with caching. 54 | 55 | Args: 56 | current_date: The current date to calculate from 57 | 58 | Returns: 59 | The effective before date for holding time calculations 60 | """ 61 | # If we've already calculated for this current_date, return cached value 62 | if self._last_current_date == current_date and self._before_date is not None: 63 | return self._before_date 64 | 65 | # Calculate and cache new value 66 | self._last_current_date = current_date 67 | self._before_date = self._calculate_before_date(current_date) 68 | return self._before_date 69 | 70 | def validate_buy(self, identifier: str, quantity: float) -> Tuple[bool, Optional[str]]: 71 | """Buying is always allowed with respect to holding time.""" 72 | return True, None 73 | 74 | def validate_sell(self, tax_lot_id: str, quantity: float) -> Tuple[bool, Optional[str]]: 75 | """Check if a tax lot has been held long enough to sell.""" 76 | if self.holding_time_delta is None or self.holding_time_delta <= timedelta(days=0): 77 | return True, None 78 | 79 | # Get the lot information 80 | lot_info = self.strategy.tax_lots[self.strategy.tax_lots['tax_lot_id'] == tax_lot_id].iloc[0] 81 | purchase_date = lot_info['date'].date() 82 | 83 | # Get the before date using the cached method 84 | before_date = self._get_before_date(self.strategy.oracle.current_date) 85 | 86 | # If the purchase date is after or equal to the before_date, the lot cannot be sold 87 | if purchase_date >= before_date.date(): 88 | days_remaining = (self.holding_time_delta - (self.strategy.oracle.current_date - purchase_date)).days 89 | return False, f"Tax lot must be held for {days_remaining + 1} more days" 90 | 91 | return True, None 92 | 93 | def add_to_problem( 94 | self, 95 | prob: pulp.LpProblem, 96 | sells: dict, 97 | tax_lots: pd.DataFrame, 98 | current_date: pd.Timestamp 99 | ) -> None: 100 | """Add holding time constraints to the optimization problem.""" 101 | if self.holding_time_delta is None or self.holding_time_delta <= timedelta(days=0): 102 | return 103 | 104 | # Get the before date using the cached method 105 | before_date = self._get_before_date(current_date) 106 | 107 | # Find tax lots acquired within the holding time window 108 | recently_bought_lots = tax_lots[ 109 | pd.to_datetime(tax_lots["date"]).dt.date >= before_date.date() 110 | ] 111 | 112 | # Add constraint to prevent selling these lots 113 | for _, lot in recently_bought_lots.iterrows(): 114 | tax_lot_id = lot['tax_lot_id'] 115 | if tax_lot_id in sells: 116 | prob += ( 117 | sells[tax_lot_id] == 0, 118 | f"No_sell_recently_bought_{tax_lot_id}" 119 | ) -------------------------------------------------------------------------------- /_build/_static/language_data.js: -------------------------------------------------------------------------------- 1 | /* 2 | * This script contains the language-specific data used by searchtools.js, 3 | * namely the list of stopwords, stemmer, scorer and splitter. 4 | */ 5 | 6 | var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; 7 | 8 | 9 | /* Non-minified version is copied as a separate JS file, if available */ 10 | 11 | /** 12 | * Porter Stemmer 13 | */ 14 | var Stemmer = function() { 15 | 16 | var step2list = { 17 | ational: 'ate', 18 | tional: 'tion', 19 | enci: 'ence', 20 | anci: 'ance', 21 | izer: 'ize', 22 | bli: 'ble', 23 | alli: 'al', 24 | entli: 'ent', 25 | eli: 'e', 26 | ousli: 'ous', 27 | ization: 'ize', 28 | ation: 'ate', 29 | ator: 'ate', 30 | alism: 'al', 31 | iveness: 'ive', 32 | fulness: 'ful', 33 | ousness: 'ous', 34 | aliti: 'al', 35 | iviti: 'ive', 36 | biliti: 'ble', 37 | logi: 'log' 38 | }; 39 | 40 | var step3list = { 41 | icate: 'ic', 42 | ative: '', 43 | alize: 'al', 44 | iciti: 'ic', 45 | ical: 'ic', 46 | ful: '', 47 | ness: '' 48 | }; 49 | 50 | var c = "[^aeiou]"; // consonant 51 | var v = "[aeiouy]"; // vowel 52 | var C = c + "[^aeiouy]*"; // consonant sequence 53 | var V = v + "[aeiou]*"; // vowel sequence 54 | 55 | var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 56 | var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 57 | var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 58 | var s_v = "^(" + C + ")?" + v; // vowel in stem 59 | 60 | this.stemWord = function (w) { 61 | var stem; 62 | var suffix; 63 | var firstch; 64 | var origword = w; 65 | 66 | if (w.length < 3) 67 | return w; 68 | 69 | var re; 70 | var re2; 71 | var re3; 72 | var re4; 73 | 74 | firstch = w.substr(0,1); 75 | if (firstch == "y") 76 | w = firstch.toUpperCase() + w.substr(1); 77 | 78 | // Step 1a 79 | re = /^(.+?)(ss|i)es$/; 80 | re2 = /^(.+?)([^s])s$/; 81 | 82 | if (re.test(w)) 83 | w = w.replace(re,"$1$2"); 84 | else if (re2.test(w)) 85 | w = w.replace(re2,"$1$2"); 86 | 87 | // Step 1b 88 | re = /^(.+?)eed$/; 89 | re2 = /^(.+?)(ed|ing)$/; 90 | if (re.test(w)) { 91 | var fp = re.exec(w); 92 | re = new RegExp(mgr0); 93 | if (re.test(fp[1])) { 94 | re = /.$/; 95 | w = w.replace(re,""); 96 | } 97 | } 98 | else if (re2.test(w)) { 99 | var fp = re2.exec(w); 100 | stem = fp[1]; 101 | re2 = new RegExp(s_v); 102 | if (re2.test(stem)) { 103 | w = stem; 104 | re2 = /(at|bl|iz)$/; 105 | re3 = new RegExp("([^aeiouylsz])\\1$"); 106 | re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 107 | if (re2.test(w)) 108 | w = w + "e"; 109 | else if (re3.test(w)) { 110 | re = /.$/; 111 | w = w.replace(re,""); 112 | } 113 | else if (re4.test(w)) 114 | w = w + "e"; 115 | } 116 | } 117 | 118 | // Step 1c 119 | re = /^(.+?)y$/; 120 | if (re.test(w)) { 121 | var fp = re.exec(w); 122 | stem = fp[1]; 123 | re = new RegExp(s_v); 124 | if (re.test(stem)) 125 | w = stem + "i"; 126 | } 127 | 128 | // Step 2 129 | re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; 130 | if (re.test(w)) { 131 | var fp = re.exec(w); 132 | stem = fp[1]; 133 | suffix = fp[2]; 134 | re = new RegExp(mgr0); 135 | if (re.test(stem)) 136 | w = stem + step2list[suffix]; 137 | } 138 | 139 | // Step 3 140 | re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; 141 | if (re.test(w)) { 142 | var fp = re.exec(w); 143 | stem = fp[1]; 144 | suffix = fp[2]; 145 | re = new RegExp(mgr0); 146 | if (re.test(stem)) 147 | w = stem + step3list[suffix]; 148 | } 149 | 150 | // Step 4 151 | re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; 152 | re2 = /^(.+?)(s|t)(ion)$/; 153 | if (re.test(w)) { 154 | var fp = re.exec(w); 155 | stem = fp[1]; 156 | re = new RegExp(mgr1); 157 | if (re.test(stem)) 158 | w = stem; 159 | } 160 | else if (re2.test(w)) { 161 | var fp = re2.exec(w); 162 | stem = fp[1] + fp[2]; 163 | re2 = new RegExp(mgr1); 164 | if (re2.test(stem)) 165 | w = stem; 166 | } 167 | 168 | // Step 5 169 | re = /^(.+?)e$/; 170 | if (re.test(w)) { 171 | var fp = re.exec(w); 172 | stem = fp[1]; 173 | re = new RegExp(mgr1); 174 | re2 = new RegExp(meq1); 175 | re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 176 | if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) 177 | w = stem; 178 | } 179 | re = /ll$/; 180 | re2 = new RegExp(mgr1); 181 | if (re.test(w) && re2.test(w)) { 182 | re = /.$/; 183 | w = w.replace(re,""); 184 | } 185 | 186 | // and turn initial Y back to y 187 | if (firstch == "y") 188 | w = firstch.toLowerCase() + w.substr(1); 189 | return w; 190 | } 191 | } 192 | 193 | -------------------------------------------------------------------------------- /_build/_static/sphinx_highlight.js: -------------------------------------------------------------------------------- 1 | /* Highlighting utilities for Sphinx HTML documentation. */ 2 | "use strict"; 3 | 4 | const SPHINX_HIGHLIGHT_ENABLED = true 5 | 6 | /** 7 | * highlight a given string on a node by wrapping it in 8 | * span elements with the given class name. 9 | */ 10 | const _highlight = (node, addItems, text, className) => { 11 | if (node.nodeType === Node.TEXT_NODE) { 12 | const val = node.nodeValue; 13 | const parent = node.parentNode; 14 | const pos = val.toLowerCase().indexOf(text); 15 | if ( 16 | pos >= 0 && 17 | !parent.classList.contains(className) && 18 | !parent.classList.contains("nohighlight") 19 | ) { 20 | let span; 21 | 22 | const closestNode = parent.closest("body, svg, foreignObject"); 23 | const isInSVG = closestNode && closestNode.matches("svg"); 24 | if (isInSVG) { 25 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 26 | } else { 27 | span = document.createElement("span"); 28 | span.classList.add(className); 29 | } 30 | 31 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 32 | const rest = document.createTextNode(val.substr(pos + text.length)); 33 | parent.insertBefore( 34 | span, 35 | parent.insertBefore( 36 | rest, 37 | node.nextSibling 38 | ) 39 | ); 40 | node.nodeValue = val.substr(0, pos); 41 | /* There may be more occurrences of search term in this node. So call this 42 | * function recursively on the remaining fragment. 43 | */ 44 | _highlight(rest, addItems, text, className); 45 | 46 | if (isInSVG) { 47 | const rect = document.createElementNS( 48 | "http://www.w3.org/2000/svg", 49 | "rect" 50 | ); 51 | const bbox = parent.getBBox(); 52 | rect.x.baseVal.value = bbox.x; 53 | rect.y.baseVal.value = bbox.y; 54 | rect.width.baseVal.value = bbox.width; 55 | rect.height.baseVal.value = bbox.height; 56 | rect.setAttribute("class", className); 57 | addItems.push({ parent: parent, target: rect }); 58 | } 59 | } 60 | } else if (node.matches && !node.matches("button, select, textarea")) { 61 | node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); 62 | } 63 | }; 64 | const _highlightText = (thisNode, text, className) => { 65 | let addItems = []; 66 | _highlight(thisNode, addItems, text, className); 67 | addItems.forEach((obj) => 68 | obj.parent.insertAdjacentElement("beforebegin", obj.target) 69 | ); 70 | }; 71 | 72 | /** 73 | * Small JavaScript module for the documentation. 74 | */ 75 | const SphinxHighlight = { 76 | 77 | /** 78 | * highlight the search words provided in localstorage in the text 79 | */ 80 | highlightSearchWords: () => { 81 | if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight 82 | 83 | // get and clear terms from localstorage 84 | const url = new URL(window.location); 85 | const highlight = 86 | localStorage.getItem("sphinx_highlight_terms") 87 | || url.searchParams.get("highlight") 88 | || ""; 89 | localStorage.removeItem("sphinx_highlight_terms") 90 | url.searchParams.delete("highlight"); 91 | window.history.replaceState({}, "", url); 92 | 93 | // get individual terms from highlight string 94 | const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); 95 | if (terms.length === 0) return; // nothing to do 96 | 97 | // There should never be more than one element matching "div.body" 98 | const divBody = document.querySelectorAll("div.body"); 99 | const body = divBody.length ? divBody[0] : document.querySelector("body"); 100 | window.setTimeout(() => { 101 | terms.forEach((term) => _highlightText(body, term, "highlighted")); 102 | }, 10); 103 | 104 | const searchBox = document.getElementById("searchbox"); 105 | if (searchBox === null) return; 106 | searchBox.appendChild( 107 | document 108 | .createRange() 109 | .createContextualFragment( 110 | '" 114 | ) 115 | ); 116 | }, 117 | 118 | /** 119 | * helper function to hide the search marks again 120 | */ 121 | hideSearchWords: () => { 122 | document 123 | .querySelectorAll("#searchbox .highlight-link") 124 | .forEach((el) => el.remove()); 125 | document 126 | .querySelectorAll("span.highlighted") 127 | .forEach((el) => el.classList.remove("highlighted")); 128 | localStorage.removeItem("sphinx_highlight_terms") 129 | }, 130 | 131 | initEscapeListener: () => { 132 | // only install a listener if it is really needed 133 | if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; 134 | 135 | document.addEventListener("keydown", (event) => { 136 | // bail for input elements 137 | if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; 138 | // bail with special keys 139 | if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; 140 | if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { 141 | SphinxHighlight.hideSearchWords(); 142 | event.preventDefault(); 143 | } 144 | }); 145 | }, 146 | }; 147 | 148 | _ready(() => { 149 | /* Do not call highlightSearchWords() when we are on the search page. 150 | * It will highlight words from the *previous* search query. 151 | */ 152 | if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); 153 | SphinxHighlight.initEscapeListener(); 154 | }); 155 | -------------------------------------------------------------------------------- /src/service/constraints/restriction/restriction_validator.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple, List 2 | import pandas as pd 3 | import pulp 4 | 5 | from src.service.constraints.base_validator import BaseValidator 6 | 7 | class RestrictionValidator(BaseValidator): 8 | """Validator for stock and wash sale restrictions.""" 9 | 10 | def __init__(self, strategy, enforce_wash_sale_prevention: bool = True): 11 | """Initialize the validator. 12 | 13 | Args: 14 | strategy: The strategy object containing portfolio information 15 | enforce_wash_sales: If False, wash sale restrictions will be ignored 16 | """ 17 | super().__init__(strategy) 18 | self.enforce_wash_sale_prevention = enforce_wash_sale_prevention 19 | 20 | def validate_buy(self, identifier: str, quantity: float) -> Tuple[bool, Optional[str]]: 21 | """Check if buying a security is allowed by restrictions.""" 22 | # Check stock restrictions 23 | if self.strategy.oracle.stock_restrictions is not None: 24 | restrictions = self.strategy.oracle.stock_restrictions 25 | if identifier in restrictions["identifier"].values: 26 | if not restrictions.loc[identifier, 'can_buy']: 27 | return False, f"Security {identifier} is restricted from buying" 28 | 29 | # Check wash sale restrictions 30 | if self.enforce_wash_sale_prevention and self.strategy.oracle.wash_sale_restrictions is not None: 31 | if self.strategy.oracle.wash_sale_restrictions.is_restricted_from_buying(identifier): 32 | return False, f"Security {identifier} is restricted due to wash sale rules" 33 | 34 | return True, None 35 | 36 | def validate_sell(self, tax_lot_id: str, quantity: float) -> Tuple[bool, Optional[str]]: 37 | """Check if selling a tax lot is allowed by restrictions.""" 38 | # Get the identifier for this tax lot 39 | lot_info = self.strategy.tax_lots[self.strategy.tax_lots['tax_lot_id'] == tax_lot_id].iloc[0] 40 | identifier = lot_info['identifier'] 41 | 42 | # Check stock restrictions 43 | if self.strategy.oracle.stock_restrictions is not None: 44 | restrictions = self.strategy.oracle.stock_restrictions 45 | if identifier in restrictions["identifier"].values: 46 | if not restrictions.loc[restrictions["identifier"] == identifier, 'can_sell'].iloc[0]: 47 | return False, f"Security {identifier} is restricted from selling" 48 | 49 | # Check wash sale restrictions 50 | if self.enforce_wash_sale_prevention and self.strategy.oracle.wash_sale_restrictions is not None: 51 | restricted_lots = self.strategy.oracle.wash_sale_restrictions.get_restricted_lots(identifier) 52 | if not restricted_lots.empty: 53 | for _, lot in restricted_lots.iterrows(): 54 | if lot['tax_lot_id'] == tax_lot_id: 55 | return False, f"Tax lot {tax_lot_id} is restricted due to wash sale rules" 56 | 57 | return True, None 58 | 59 | def add_to_problem( 60 | self, 61 | prob: pulp.LpProblem, 62 | buys: dict, 63 | sells: dict, 64 | gain_loss: pd.DataFrame, 65 | stock_restrictions: pd.DataFrame, 66 | wash_sale_restrictions, 67 | all_identifiers: List[str] 68 | ) -> None: 69 | """Add stock and wash sale restrictions to the optimization problem.""" 70 | # Add stock restrictions 71 | if stock_restrictions is not None: 72 | for _, row in stock_restrictions.iterrows(): 73 | identifier = row['identifier'] 74 | if not row['can_buy']: 75 | if identifier in buys: 76 | prob += (buys[identifier] == 0), f"no_buy_{identifier}" 77 | if not row['can_sell']: 78 | for _, lot in gain_loss[gain_loss['identifier'] == identifier].iterrows(): 79 | if lot['tax_lot_id'] in sells: 80 | prob += (sells[lot['tax_lot_id']] == 0), f"no_sell_{lot['tax_lot_id']}" 81 | 82 | # Wash sale restrictions 83 | if self.enforce_wash_sale_prevention and wash_sale_restrictions is not None: 84 | for identifier in all_identifiers: 85 | # Check buy restrictions 86 | if wash_sale_restrictions.is_restricted_from_buying(identifier): 87 | if identifier in buys: 88 | prob += (buys[identifier] == 0), f"wash_sale_buy_{identifier}" 89 | 90 | # Check sell restrictions - get all restricted lots for this identifier 91 | restricted_lots = wash_sale_restrictions.get_restricted_lots(identifier) 92 | if not restricted_lots.empty: 93 | # Add binary variable to track if liquidating 94 | liquidate = pulp.LpVariable(f"liquidate_{identifier}", cat='Binary') 95 | 96 | all_tax_lots = self.strategy.oracle.all_tax_lots 97 | all_quantity = all_tax_lots.loc[all_tax_lots['identifier'] == identifier, 'quantity'].sum() 98 | 99 | identifier_sells = [] 100 | for _, lot in gain_loss[gain_loss['identifier'] == identifier].iterrows(): 101 | if lot['tax_lot_id'] in sells: 102 | identifier_sells.append(sells[lot['tax_lot_id']]) 103 | 104 | prob += (pulp.lpSum(identifier_sells) >= (all_quantity * liquidate)), f"wash_sale_liquidate_{identifier}" 105 | 106 | for _, lot in restricted_lots.iterrows(): 107 | if lot['tax_lot_id'] in sells: 108 | prob += (sells[lot['tax_lot_id']] == (lot["quantity"] * liquidate)), f"wash_sale_sell_{lot['tax_lot_id']}" 109 | -------------------------------------------------------------------------------- /tests/test_onboarding.py: -------------------------------------------------------------------------------- 1 | from datetime import date 2 | import pandas as pd 3 | import unittest 4 | from src.service.initializers import ( 5 | initialize_targets, 6 | initialize_tax_lots, 7 | initialize_prices, 8 | initialize_spreads, 9 | initialize_factor_model 10 | ) 11 | from src.service.oracle import Oracle 12 | from src.service.oracle_strategy import OracleStrategy 13 | from src.service.helpers.enums import OracleOptimizationType 14 | from src.service.helpers.constants import CASH_CUSIP_ID 15 | 16 | class TestBasicPortfolioOptimization(unittest.TestCase): 17 | """Test basic portfolio optimization functionality.""" 18 | 19 | def setUp(self): 20 | """Set up test data for basic portfolio optimization. 21 | 22 | Initial State: 23 | - STOCK_A: $10k position (50%) 24 | - Price: $100/share, 100 shares 25 | - Cost basis: $100/share 26 | 27 | - STOCK_B: $10k position (50%) 28 | - Price: $100/share, 100 shares 29 | - Cost basis: $100/share 30 | 31 | Total Portfolio: $20k + $10k cash 32 | Target Weights: 40-40-20 (STOCK_A, STOCK_B, CASH) 33 | """ 34 | self.current_date = date(2024, 4, 20) 35 | 36 | # Create tax rates data 37 | self.tax_rates = pd.DataFrame([ 38 | { 39 | 'gain_type': 'short_term', 40 | 'federal_rate': 0.35, 41 | 'state_rate': 0.06, 42 | 'total_rate': 0.41 43 | }, 44 | { 45 | 'gain_type': 'long_term', 46 | 'federal_rate': 0.20, 47 | 'state_rate': 0.06, 48 | 'total_rate': 0.26 49 | }, 50 | { 51 | "gain_type": "qualified_dividend", 52 | "federal_rate": 0.15, 53 | "state_rate": 0.06, 54 | "total_rate": 0.21 55 | } 56 | ]) 57 | 58 | # Define test data based on documentation example 59 | self.targets_df = pd.DataFrame({ 60 | 'asset_class': ['STOCK_A', 'STOCK_B', 'CASH'], 61 | 'target_weight': [0.4, 0.4, 0.2], 62 | 'identifiers': [['STOCK_A'], ['STOCK_B'], [CASH_CUSIP_ID]] 63 | }) 64 | 65 | self.tax_lots_df = pd.DataFrame({ 66 | 'tax_lot_id': ['lot_a1', 'lot_b1'], 67 | 'identifier': ['STOCK_A', 'STOCK_B'], 68 | 'quantity': [100, 100], 69 | 'cost_basis': [100, 100], 70 | 'date': ['2024-01-01', '2024-01-01'] 71 | }) 72 | 73 | self.prices_df = pd.DataFrame([ 74 | { 75 | 'identifier': 'STOCK_A', 76 | 'price': 100.0, 77 | }, 78 | { 79 | 'identifier': 'STOCK_B', 80 | 'price': 100.0, 81 | }, 82 | { 83 | 'identifier': CASH_CUSIP_ID, 84 | 'price': 1.0, 85 | } 86 | ]) 87 | 88 | self.spreads_df = pd.DataFrame([ 89 | { 90 | 'identifier': 'STOCK_A', 91 | 'spread': 0.001 # 10 basis points 92 | }, 93 | { 94 | 'identifier': 'STOCK_B', 95 | 'spread': 0.001 # 10 basis points 96 | }, 97 | { 98 | 'identifier': CASH_CUSIP_ID, 99 | 'spread': 0.0 # No spread for cash 100 | } 101 | ]) 102 | 103 | # Create Oracle instance 104 | self.oracle = Oracle( 105 | current_date=self.current_date, 106 | recently_closed_lots=pd.DataFrame(), 107 | stock_restrictions=pd.DataFrame(), 108 | tax_rates=self.tax_rates 109 | ) 110 | 111 | # Create and configure strategy 112 | self.strategy = OracleStrategy( 113 | strategy_id="STRATEGY_1", 114 | tax_lots=self.tax_lots_df, 115 | prices=self.prices_df, 116 | cash=10000.0, 117 | targets=self.targets_df, 118 | asset_class_targets=None, 119 | spreads=self.spreads_df, 120 | factor_model=None, 121 | optimization_type=OracleOptimizationType.TAX_AWARE, 122 | deminimus_cash_target=0.0001, 123 | withdrawal_amount=0.0, 124 | enforce_wash_sale_prevention=True 125 | ) 126 | 127 | self.strategy.set_oracle(self.oracle) 128 | self.oracle.strategies = [self.strategy] 129 | self.oracle.initialize_wash_sale_restrictions(percentage_protection_from_inadvertent_wash_sales=0.003) 130 | 131 | def test_optimization_response_format(self): 132 | """Test that the optimization returns the expected response format.""" 133 | # Run optimization with settings from documentation 134 | results, netted_trades = self.oracle.compute_optimal_trades_for_all_strategies( 135 | settings={ 136 | "strategies": { 137 | "STRATEGY_1": { 138 | "weight_tax": 1.0, 139 | "weight_drift": 1.0, 140 | "weight_transaction": 1.0, 141 | "weight_factor_model": 0.0, 142 | "weight_cash_drag": 0.0, 143 | "rebalance_threshold": 0.001, 144 | "buy_threshold": 0.0005, 145 | "holding_time_days": 0, 146 | "should_tlh": True, 147 | "tlh_min_loss_threshold": 0.015, 148 | "range_min_weight_multiplier": 0.5, 149 | "range_max_weight_multiplier": 2.0, 150 | "min_notional": 0, 151 | "rank_penalty_factor": 0.0, 152 | "trade_rounding": 4 153 | } 154 | } 155 | } 156 | ) 157 | 158 | # Verify response format and basic expectations 159 | self.assertIsInstance(results, dict, "Results should be a dictionary") 160 | self.assertIsInstance(netted_trades, pd.DataFrame, "Netted trades should be a DataFrame") 161 | 162 | # Verify we have exactly 2 buy trades 163 | buy_trades = netted_trades[netted_trades["action"] == "buy"] 164 | self.assertEqual(len(buy_trades), 2, "Expected exactly 2 buy trades") 165 | 166 | 167 | if __name__ == '__main__': 168 | unittest.main() 169 | -------------------------------------------------------------------------------- /_build/_modules/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Overview: module code — Oracle 0.1.0 documentation 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 |
25 | 57 | 58 |
62 | 63 | 118 |
119 |
120 | 125 | 126 | 127 | -------------------------------------------------------------------------------- /_build/py-modindex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Python Module Index — Oracle 0.1.0 documentation 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 60 | 61 |
65 | 66 |
67 |
68 |
69 |
    70 |
  • 71 | 72 |
  • 73 |
  • 74 |
75 |
76 |
77 |
78 |
79 | 80 | 81 |

Python Module Index

82 | 83 |
84 | s 85 |
86 | 87 | 88 | 89 | 91 | 92 | 94 | 97 | 98 | 99 | 102 | 103 | 104 | 107 | 108 | 109 | 112 | 113 | 114 | 117 | 118 | 119 | 122 | 123 | 124 | 127 |
 
90 | s
95 | service 96 |
    100 | service.objectives.cash_deployment.cash_deployment 101 |
    105 | service.objectives.drift.drift_optimization 106 |
    110 | service.objectives.factor_model.factor_model_optimization 111 |
    115 | service.objectives.taxes.tax_optimization 116 |
    120 | service.objectives.taxes.tlh 121 |
    125 | service.objectives.transaction_costs.transaction_optimization 126 |
128 | 129 | 130 |
131 |
132 |
133 | 134 |
135 | 136 |
137 |

© Copyright 2024.

138 |
139 | 140 | Built with Sphinx using a 141 | theme 142 | provided by Read the Docs. 143 | 144 | 145 |
146 |
147 |
148 |
149 |
150 | 155 | 156 | 157 | --------------------------------------------------------------------------------