├── tests ├── __init__.py ├── algorithms │ ├── file_patch.py │ ├── syntax_errors.py │ ├── pep8_violations.py │ ├── initialization_errors.py │ ├── runtime_error.py │ ├── FilePatch.cs │ ├── live_logs.py │ ├── InitializationErrors.cs │ ├── RuntimeError.cs │ ├── live_charts.py │ ├── LiveCharts.cs │ ├── live_liquidate.py │ ├── charts.py │ ├── Charts.cs │ ├── LiveLiquidate.cs │ ├── live_orders.py │ ├── LiveOrders.cs │ ├── insights.py │ ├── live_command.py │ ├── Insights.cs │ ├── live_insights.py │ ├── parameter_optimization.py │ ├── LiveInsights.cs │ ├── ParameterOptimization.cs │ ├── OrderProperties.cs │ └── order_properties.py ├── test_initialization.py ├── test_account.py ├── test_lean_versions.py ├── test_mcp_server_version.py ├── test_organization_workspace.py ├── test_live_orders.py ├── test_live_insights.py ├── test_live_logs.py ├── utils.py ├── test_live_commands.py ├── test_backtest_orders.py ├── test_backtest_insights.py ├── test_compile.py ├── test_backtest_charts.py ├── test_live_charts.py ├── test_ai.py ├── test_project_nodes.py ├── test_project.py ├── test_live.py ├── test_project_collaboration.py ├── test_object_store.py ├── test_backtests.py └── test_optimizations.py ├── .gitignore ├── src ├── tools │ ├── __init__.py │ ├── account.py │ ├── lean_versions.py │ ├── compile.py │ ├── live_commands.py │ ├── mcp_server_version.py │ ├── project_nodes.py │ ├── project.py │ ├── files.py │ ├── ai.py │ ├── project_collaboration.py │ ├── optimizations.py │ ├── object_store.py │ ├── backtests.py │ └── live.py ├── code_source_id.py ├── __init__.py ├── instructions.md ├── organization_workspace.py ├── api_connection.py └── main.py ├── pyproject.toml ├── Dockerfile ├── post_processing.py ├── .github └── workflows │ ├── run_live_trading_tests.yml │ ├── run_tests.yml │ ├── push_to_docker_hub.yml │ └── update_models.yml ├── create_tool_markdown.py └── LICENSE /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ -------------------------------------------------------------------------------- /src/tools/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tests/algorithms/file_patch.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | a = 1 6 | -------------------------------------------------------------------------------- /tests/algorithms/syntax_errors.py: -------------------------------------------------------------------------------- 1 | from AlgorithmImports import * 2 | 3 | 4 | class SyntaxErrorTestAlgorithm(QCAlgorithm): 5 | 6 | def initialize(self): 7 | a = 8 | b = 2 9 | -------------------------------------------------------------------------------- /tests/algorithms/pep8_violations.py: -------------------------------------------------------------------------------- 1 | from AlgorithmImports import * 2 | 3 | 4 | class UpdateCodeToPEP8TestAlgorithm(QCAlgorithm): 5 | 6 | def Initialize(self): 7 | self.AddEquity('SPY') 8 | -------------------------------------------------------------------------------- /tests/algorithms/initialization_errors.py: -------------------------------------------------------------------------------- 1 | from AlgorithmImports import * 2 | 3 | 4 | class BacktestInitTestAlgorithm(QCAlgorithm): 5 | 6 | def initialize(self): 7 | self.add_equity('SPY', Resolution.DAY) 8 | -------------------------------------------------------------------------------- /tests/algorithms/runtime_error.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class BacktestRuntimeErrorTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | raise Exception('Test') -------------------------------------------------------------------------------- /tests/test_initialization.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | 5 | 6 | class TestInitialization: 7 | 8 | @pytest.mark.asyncio 9 | async def test_instructions(self): 10 | assert True #mcp.instructions 11 | -------------------------------------------------------------------------------- /src/code_source_id.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Load the agent name from the environment variables. 4 | AGENT_NAME = os.getenv('AGENT_NAME', 'MCP Server') 5 | 6 | def add_code_source_id(model): 7 | model.codeSourceId = AGENT_NAME 8 | return model -------------------------------------------------------------------------------- /tests/algorithms/FilePatch.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using QuantConnect.Algorithm; 3 | #endregion 4 | public class MyAlgorithm : QCAlgorithm 5 | { 6 | public override void Initialize() 7 | { 8 | var a = 1; 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tests/algorithms/live_logs.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class LiveLogTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | for i in range(10): 10 | self.log(f'Log test {i}') 11 | -------------------------------------------------------------------------------- /tests/algorithms/InitializationErrors.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using QuantConnect.Data; 3 | #endregion 4 | 5 | public class BacktestInitTestAlgorithm : QCAlgorithm 6 | { 7 | public override void Initialize() 8 | { 9 | AddEquity("SPY", Resolution.Day); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /tests/algorithms/RuntimeError.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using System; 3 | using QuantConnect.Algorithm; 4 | #endregion 5 | public class BacktestRuntimeErrorTestAlgorithm : QCAlgorithm 6 | { 7 | public override void Initialize() 8 | { 9 | throw new Exception("Test"); 10 | } 11 | } -------------------------------------------------------------------------------- /tests/algorithms/live_charts.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class LiveChartTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | symbol = self.add_crypto("BTCUSD", Resolution.SECOND).symbol 10 | self.plot_indicator("SMA", self.sma(symbol, 10)) 11 | -------------------------------------------------------------------------------- /tests/test_account.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | from utils import validate_models 5 | from models import AccountResponse 6 | 7 | 8 | class TestAccount: 9 | 10 | @pytest.mark.asyncio 11 | async def test_read_account(self): 12 | await validate_models( 13 | mcp, 'read_account', output_class=AccountResponse 14 | ) 15 | -------------------------------------------------------------------------------- /tests/algorithms/LiveCharts.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using QuantConnect; 3 | using QuantConnect.Algorithm; 4 | #endregion 5 | 6 | public class LiveChartTestAlgorithm : QCAlgorithm 7 | { 8 | public override void Initialize() 9 | { 10 | var symbol = AddCrypto("BTCUSD", Resolution.Second).Symbol; 11 | PlotIndicator("SMA", SMA(symbol, 10)); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /tests/algorithms/live_liquidate.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class LiveLiquidateTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | # Add BTCUSD, which trades 24/7, so the algorithm can fill 10 | # liquidation orders whenever we run the test suite. 11 | self.add_crypto("BTCUSD", Resolution.SECOND) 12 | -------------------------------------------------------------------------------- /tests/algorithms/charts.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class ChartTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | self.set_start_date(2023, 1, 1) 10 | self.set_end_date(2023, 4, 1) 11 | symbol = self.add_equity("SPY", Resolution.DAILY).symbol 12 | self.plot_indicator("SMA", self.sma(symbol, 10)) 13 | -------------------------------------------------------------------------------- /tests/test_lean_versions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | from utils import validate_models 5 | from models import LeanVersionsResponse 6 | 7 | 8 | class TestLeanVersions: 9 | 10 | @pytest.mark.asyncio 11 | async def test_read_lean_versions(self): 12 | await validate_models( 13 | mcp, 'read_lean_versions', output_class=LeanVersionsResponse 14 | ) 15 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "mcp-server" 3 | version = "0.1.0" 4 | description = "Python MCP server for local interactions with the QuantConnect API." 5 | readme = "README.md" 6 | requires-python = ">=3.10" 7 | dependencies = [ 8 | "python-dotenv>=0.23.0", 9 | "httpx>=0.28.1", 10 | "mcp[cli]>=1.9.3", 11 | "requests" 12 | ] 13 | 14 | [tool.pytest.ini_options] 15 | pythonpath = "src tests" 16 | -------------------------------------------------------------------------------- /tests/algorithms/Charts.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using QuantConnect; 3 | using QuantConnect.Algorithm; 4 | #endregion 5 | public class ChartTestAlgorithm : QCAlgorithm 6 | { 7 | public override void Initialize() 8 | { 9 | SetStartDate(2023, 1, 1); 10 | SetEndDate(2023, 4, 1); 11 | var symbol = AddEquity("SPY", Resolution.Daily).Symbol; 12 | PlotIndicator("SMA", SMA(symbol, 10)); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use a slim Python base image 2 | FROM python:3.11-slim 3 | 4 | # Set working directory 5 | WORKDIR /app 6 | 7 | # Install uv 8 | RUN pip install uv 9 | 10 | # Copy dependency files 11 | COPY pyproject.toml . 12 | 13 | # Generate uv.lock during the build 14 | RUN uv lock 15 | 16 | # Install dependencies with uv 17 | RUN uv sync --frozen 18 | 19 | # Copy source code 20 | COPY src/ src/ 21 | 22 | # Run the server 23 | CMD ["uv", "run", "src/main.py"] 24 | -------------------------------------------------------------------------------- /src/tools/account.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import AccountResponse 3 | 4 | def register_account_tools(mcp): 5 | # Read 6 | @mcp.tool( 7 | annotations={ 8 | 'title': 'Read account', 9 | 'readOnlyHint': True, 10 | 'openWorldHint': True 11 | } 12 | ) 13 | async def read_account() -> AccountResponse: 14 | """Read the organization account status.""" 15 | return await post('/account/read') 16 | -------------------------------------------------------------------------------- /tests/algorithms/LiveLiquidate.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | 3 | #endregion 4 | namespace QuantConnect.Algorithm.CSharp 5 | { 6 | public class LiveLiquidateTestAlgorithm : QCAlgorithm 7 | { 8 | 9 | public override void Initialize() 10 | { 11 | // Add BTCUSD, which trades 24/7, so the algorithm can fill 12 | // liquidation orders whenever we run the test suite. 13 | AddCrypto("BTCUSD", Resolution.Second); 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/tools/lean_versions.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import LeanVersionsResponse 3 | 4 | def register_lean_version_tools(mcp): 5 | # Read 6 | @mcp.tool( 7 | annotations={'title': 'Read LEAN versions', 'readOnlyHint': True} 8 | ) 9 | async def read_lean_versions() -> LeanVersionsResponse: 10 | """Returns a list of LEAN versions with basic information for 11 | each version. 12 | """ 13 | return await post('/lean/versions/read') 14 | -------------------------------------------------------------------------------- /tests/algorithms/live_orders.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class LiveOrdersTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | self._trades = 0 10 | self._btc = self.add_crypto('BTCUSD', Resolution.SECOND) 11 | 12 | def on_data(self, data: Slice): 13 | if self._trades >= 10: 14 | return 15 | self._trades += 1 16 | self.set_holdings(self._btc.symbol, 0 if self.portfolio.invested else 1) 17 | -------------------------------------------------------------------------------- /tests/algorithms/LiveOrders.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using QuantConnect; 3 | using QuantConnect.Algorithm; 4 | using QuantConnect.Data; 5 | using QuantConnect.Securities.Crypto; 6 | #endregion 7 | public class JumpingFluorescentYellowBaboon : QCAlgorithm 8 | { 9 | private Crypto _btc; 10 | public override void Initialize() 11 | { 12 | _btc = AddCrypto("BTCUSD", Resolution.Second); 13 | } 14 | 15 | public override void OnData(Slice data) 16 | { 17 | if (!Portfolio.Invested) 18 | { 19 | SetHoldings(_btc.Symbol, 1); 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /tests/algorithms/insights.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class InsightTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | self.set_start_date(2024, 1, 8) 10 | self.set_end_date(2024, 4, 1) 11 | self.set_cash(100000) 12 | self.add_equity("SPY", Resolution.DAILY) 13 | self.add_alpha( 14 | ConstantAlphaModel( 15 | InsightType.PRICE, InsightDirection.UP, timedelta(30), 16 | 0.1, 0.2, 0.3 17 | ) 18 | ) 19 | self.set_portfolio_construction(EqualWeightingPortfolioConstructionModel()) 20 | -------------------------------------------------------------------------------- /tests/test_mcp_server_version.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | 5 | 6 | class TestMCPServerVersion: 7 | 8 | async def _ensure_response_has_two_periods(self, tool_name): 9 | _, structured_response = await mcp.call_tool(tool_name, {}) 10 | assert structured_response['result'].count('.') == 2 11 | 12 | @pytest.mark.asyncio 13 | async def test_read_verion(self): 14 | await self._ensure_response_has_two_periods('read_mcp_server_version') 15 | 16 | @pytest.mark.asyncio 17 | async def test_read_latest_verion(self): 18 | await self._ensure_response_has_two_periods( 19 | 'read_latest_mcp_server_version' 20 | ) 21 | -------------------------------------------------------------------------------- /tests/algorithms/live_command.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class LiveCommandsTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | self.add_command(MyCommand) 10 | 11 | def on_command(self, data): 12 | self.log(f'Generic command. data.text: {data.text}') 13 | 14 | 15 | class MyCommand(Command): 16 | text = None 17 | number = None 18 | parameters = {} 19 | 20 | def run(self, algorithm): 21 | parameters = {kvp.key: kvp.value for kvp in self.parameters} 22 | algorithm.log(f"Encapsulated command. text: {self.text}; number: {self.number}; parameters: {parameters}") 23 | return True 24 | -------------------------------------------------------------------------------- /tests/algorithms/Insights.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using System; 3 | using QuantConnect; 4 | using QuantConnect.Algorithm.Framework.Alphas; 5 | using QuantConnect.Algorithm.Framework.Portfolio; 6 | using QuantConnect.Algorithm; 7 | #endregion 8 | public class InsightTestAlgorithm : QCAlgorithm 9 | { 10 | public override void Initialize() 11 | { 12 | SetStartDate(2024, 1, 8); 13 | SetEndDate(2024, 4, 1); 14 | SetCash(100000); 15 | AddEquity("SPY", Resolution.Daily); 16 | AddAlpha( 17 | new ConstantAlphaModel( 18 | InsightType.Price, InsightDirection.Up, TimeSpan.FromDays(30), 0.1, 0.2, 0.3 19 | ) 20 | ); 21 | SetPortfolioConstruction(new EqualWeightingPortfolioConstructionModel()); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/tools/compile.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import ( 3 | CreateCompileRequest, 4 | ReadCompileRequest, 5 | CreateCompileResponse, 6 | ReadCompileResponse 7 | ) 8 | 9 | def register_compile_tools(mcp): 10 | # Create 11 | @mcp.tool( 12 | annotations={'title': 'Create compile', 'destructiveHint': False} 13 | ) 14 | async def create_compile( 15 | model: CreateCompileRequest) -> CreateCompileResponse: 16 | """Asynchronously create a compile job request for a project.""" 17 | return await post('/compile/create', model) 18 | 19 | # Read 20 | @mcp.tool(annotations={'title': 'Read compile', 'readOnlyHint': True}) 21 | async def read_compile(model: ReadCompileRequest) -> ReadCompileResponse: 22 | """Read a compile packet job result.""" 23 | return await post('/compile/read', model) 24 | -------------------------------------------------------------------------------- /post_processing.py: -------------------------------------------------------------------------------- 1 | path = 'src/models.py' 2 | 3 | # Read the file content. 4 | with open(path, 'r', encoding='utf-8') as file: 5 | lines = file.readlines() 6 | 7 | # Add the extra import (after `from __future__` to avoid errors). 8 | lines.insert(5, 'from pydantic import RootModel, ConfigDict\n') 9 | content = ''.join(lines) 10 | 11 | # Perform string replacements. 12 | content = content.replace('__root__', 'RootModel').replace('ResponseModel', 'Response') 13 | 14 | # Replace 15 | # ``` 16 | # class Config: 17 | # extra = Extra.forbid 18 | # ``` 19 | # with 20 | # `model_config = ConfigDict(extra='forbid')` 21 | # to avoid warnings when running pytest. 22 | content = content.replace('class Config:', "model_config = ConfigDict(extra='forbid')")\ 23 | .replace(' extra = Extra.forbid', '') 24 | 25 | # Save the new file content. 26 | with open(path, 'w', encoding='utf-8') as file: 27 | file.write(content) 28 | -------------------------------------------------------------------------------- /src/tools/live_commands.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import ( 3 | CreateLiveCommandRequest, 4 | BroadcastLiveCommandRequest, 5 | RestResponse 6 | ) 7 | 8 | def register_live_trading_command_tools(mcp): 9 | # Create (singular algorithm) 10 | @mcp.tool(annotations={'title': 'Create live command'}) 11 | async def create_live_command( 12 | model: CreateLiveCommandRequest) -> RestResponse: 13 | """Send a command to a live trading algorithm.""" 14 | return await post('/live/commands/create', model) 15 | 16 | # Create (multiple algorithms) - Broadcast 17 | @mcp.tool(annotations={'title': 'Broadcast live command'}) 18 | async def broadcast_live_command( 19 | model: BroadcastLiveCommandRequest) -> RestResponse: 20 | """Broadcast a live command to all live algorithms in an 21 | organization.""" 22 | return await post('/live/commands/broadcast', model) 23 | -------------------------------------------------------------------------------- /.github/workflows/run_live_trading_tests.yml: -------------------------------------------------------------------------------- 1 | name: Run live trading tests 2 | 3 | on: 4 | workflow_dispatch: 5 | #push: 6 | # branches: ['*'] 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-24.04 11 | steps: 12 | - uses: actions/checkout@v3 13 | 14 | - name: Install dependencies 15 | run: |- 16 | python -m pip install --upgrade pip 17 | pip install mcp[cli]==1.10.1, pytest-asyncio==1.0.0 18 | 19 | - name: Free space 20 | run: df -h && rm -rf /opt/hostedtoolcache* && df -h 21 | 22 | - name: Run tests 23 | run: |- 24 | pytest tests/ -k "test_live" 25 | env: 26 | QUANTCONNECT_USER_ID: ${{ secrets.QUANTCONNECT_USER_ID }} 27 | QUANTCONNECT_API_TOKEN: ${{ secrets.QUANTCONNECT_API_TOKEN }} 28 | QUANTCONNECT_COLLABORATOR_ID: ${{ secrets.QUANTCONNECT_COLLABORATOR_ID }} 29 | QUANTCONNECT_ORGANIZATION_ID: ${{ secrets.QUANTCONNECT_ORGANIZATION_ID }} 30 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. 2 | # Lean CLI v1.0. Copyright 2021 QuantConnect Corporation. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | # The version is always set to "dev" in the Git repository. When a new release is ready, 15 | # a maintainer will push a new Git tag which will trigger GitHub Actions to publish a new 16 | # Docker image with the version of the tag. 17 | __version__ = "0.0.0-dev" 18 | -------------------------------------------------------------------------------- /tests/algorithms/live_insights.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class LiveInsightsTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | self.set_start_date(2025, 7, 10) 10 | self._insights = 0 11 | self._symbol = self.add_crypto('BTCUSD', Resolution.SECOND).symbol 12 | self.set_portfolio_construction( 13 | EqualWeightingPortfolioConstructionModel() 14 | ) 15 | 16 | def on_data(self, data: Slice): 17 | # Only emit 10 insights. 18 | if self._insights >= 10: 19 | return 20 | self._insights += 1 21 | # Determine the direction. 22 | if self.portfolio.invested: 23 | direction = InsightDirection.FLAT 24 | else: 25 | direction = InsightDirection.UP 26 | # Emit the insight. 27 | self.emit_insights( 28 | Insight.price(self._symbol, timedelta(1), direction) 29 | ) 30 | -------------------------------------------------------------------------------- /tests/algorithms/parameter_optimization.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class ParameterOptimizationTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | self.set_start_date(2010, 1, 1) 10 | self.set_end_date(2025, 1, 1) 11 | self._equity = self.add_equity("SPY", Resolution.DAILY) 12 | self.settings.automatic_indicator_warm_up = True 13 | self._sma_slow = self.sma( 14 | self._equity.symbol, self.get_parameter('sma_slow', 21) 15 | ) 16 | self._sma_fast = self.sma( 17 | self._equity.symbol, self.get_parameter('sma_fast', 5) 18 | ) 19 | 20 | def on_data(self, data: Slice): 21 | if (not self._equity.holdings.is_long and 22 | self._sma_fast > self._sma_slow): 23 | self.set_holdings(self._equity.symbol, 1) 24 | return 25 | if (not self._equity.holdings.is_short and 26 | self._sma_fast < self._sma_slow): 27 | self.set_holdings(self._equity.symbol, -1) 28 | -------------------------------------------------------------------------------- /src/tools/mcp_server_version.py: -------------------------------------------------------------------------------- 1 | from __init__ import __version__ 2 | 3 | import requests 4 | 5 | def register_mcp_server_version_tools(mcp): 6 | # Read current version 7 | @mcp.tool( 8 | annotations={ 9 | 'title': 'Read QC MCP Server version', 'readOnlyHint': True 10 | } 11 | ) 12 | async def read_mcp_server_version() -> str: 13 | """Returns the version of the QC MCP Server that's running.""" 14 | return __version__ 15 | 16 | # Read latest version 17 | @mcp.tool( 18 | annotations={ 19 | 'title': 'Read latest QC MCP Server version', 'readOnlyHint': True 20 | } 21 | ) 22 | async def read_latest_mcp_server_version() -> str: 23 | """Returns the latest version of the QC MCP Server released.""" 24 | response = requests.get( 25 | "https://hub.docker.com/v2/namespaces/quantconnect/repositories/mcp-server/tags", 26 | params={"page_size": 2} 27 | ) 28 | response.raise_for_status() 29 | # Get the name of the second result. The first one is 'latest'. 30 | return response.json()['results'][1]['name'] 31 | -------------------------------------------------------------------------------- /tests/algorithms/LiveInsights.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using System; 3 | using QuantConnect; 4 | using QuantConnect.Algorithm.Framework.Alphas; 5 | using QuantConnect.Algorithm.Framework.Portfolio; 6 | using QuantConnect.Algorithm; 7 | using QuantConnect.Data; 8 | #endregion 9 | public class LiveInsightsTestAlgorithm : QCAlgorithm 10 | { 11 | private int _insights; 12 | private Symbol _symbol; 13 | 14 | public override void Initialize() 15 | { 16 | SetStartDate(2025, 7, 1); 17 | _insights = 0; 18 | _symbol = AddCrypto("BTCUSD", Resolution.Second).Symbol; 19 | SetPortfolioConstruction(new EqualWeightingPortfolioConstructionModel()); 20 | } 21 | 22 | public override void OnData(Slice data) 23 | { 24 | // Only emit 10 insights. 25 | if (_insights++ >= 10) 26 | { 27 | return; 28 | } 29 | // Determine the direction. 30 | var direction = Portfolio.Invested ? InsightDirection.Flat : InsightDirection.Up; 31 | // Emit the insight. 32 | EmitInsights(Insight.Price(_symbol, TimeSpan.FromDays(1), direction)); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/tools/project_nodes.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import ( 3 | ReadProjectNodesRequest, 4 | UpdateProjectNodesRequest, 5 | ProjectNodesResponse 6 | ) 7 | 8 | def register_project_node_tools(mcp): 9 | # Read 10 | @mcp.tool( 11 | annotations={'title': 'Read project nodes', 'readOnlyHint': True} 12 | ) 13 | async def read_project_nodes( 14 | model: ReadProjectNodesRequest) -> ProjectNodesResponse: 15 | """Read the available and selected nodes of a project.""" 16 | return await post('/projects/nodes/read', model) 17 | 18 | # Update 19 | @mcp.tool( 20 | annotations={ 21 | 'title': 'Update project nodes', 22 | 'destructiveHint': False, 23 | 'idempotentHint': True 24 | } 25 | ) 26 | async def update_project_nodes( 27 | model: UpdateProjectNodesRequest) -> ProjectNodesResponse: 28 | """Update the active state of the given nodes to true. 29 | 30 | If you don't provide any nodes, all the nodes become inactive 31 | and autoSelectNode is true. 32 | """ 33 | return await post('/projects/nodes/update', model) 34 | -------------------------------------------------------------------------------- /tests/algorithms/ParameterOptimization.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using QuantConnect; 3 | using QuantConnect.Algorithm; 4 | using QuantConnect.Indicators; 5 | using QuantConnect.Data; 6 | using QuantConnect.Securities.Equity; 7 | #endregion 8 | public class ParameterOptimizationTestAlgorithm : QCAlgorithm 9 | { 10 | private Equity _equity; 11 | private SimpleMovingAverage _smaSlow; 12 | private SimpleMovingAverage _smaFast; 13 | 14 | public override void Initialize() 15 | { 16 | SetStartDate(2010, 1, 1); 17 | SetEndDate(2025, 1, 1); 18 | 19 | _equity = AddEquity("SPY", Resolution.Daily); 20 | Settings.AutomaticIndicatorWarmUp = true; 21 | _smaSlow = SMA(_equity.Symbol, GetParameter("sma_slow", 21)); 22 | _smaFast = SMA(_equity.Symbol, GetParameter("sma_fast", 5)); 23 | } 24 | 25 | public override void OnData(Slice data) 26 | { 27 | if (!_equity.Holdings.IsLong && _smaFast > _smaSlow) 28 | { 29 | SetHoldings(_equity.Symbol, 1); 30 | return; 31 | } 32 | if (!_equity.Holdings.IsShort && _smaFast < _smaSlow) 33 | { 34 | SetHoldings(_equity.Symbol, -1); 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/instructions.md: -------------------------------------------------------------------------------- 1 | You are an expert quant that assists QuantConnect members with designing, coding, and testing algorithms. Take the initiative as often as possible. 2 | 3 | To create a new project, call the `create_project` tool instead of trying to create the project files on the local machine. 4 | 5 | The project Id is in the config file, under `cloud-id`. Don't call the `list_backtests` tool unless it's absolutely needed. 6 | 7 | For Python projects, write code in PEP8 style (snake_case) or convert the code using the PEP8 Converter Tool (`update_code_to_pep8`). 8 | 9 | When creating indicator objects (such as RSI, SMA, etc.), never overwrite the indicator method names (e.g., do not assign to `self.rsi`, `self.sma`, etc.). Instead, use a different variable name, preferably with a leading underscore for non-public instance variables (e.g., `self._rsi = self.rsi(self._symbol, 14))`. This prevents conflicts with the built-in indicator methods and ensures code reliability. 10 | 11 | ALWAYS choose variable names different from the methods you're calling. 12 | 13 | Before running backtests, run the compile tool (`create_compile` and `read_compile`) to get the syntax errors and then FIX ALL COMPILE WARNINGS. 14 | 15 | Where more efficient, use the `patch_file` tool to only update the lines of code with errors instead of using the `update_file_contents` tool. 16 | -------------------------------------------------------------------------------- /tests/algorithms/OrderProperties.cs: -------------------------------------------------------------------------------- 1 | #region imports 2 | using System; 3 | using QuantConnect; 4 | using QuantConnect.Algorithm; 5 | using QuantConnect.Data; 6 | using QuantConnect.Orders; 7 | using QuantConnect.Securities.Equity; 8 | #endregion 9 | 10 | public class OrderPropertiesTestAlgorithm : QCAlgorithm 11 | { 12 | private Equity _equity; 13 | 14 | public override void Initialize() 15 | { 16 | SetStartDate(2024, 1, 8); 17 | SetEndDate(2024, 2, 1); 18 | SetCash(100000); 19 | _equity = AddEquity("SPY", Resolution.Hour, dataNormalizationMode: DataNormalizationMode.Raw); 20 | } 21 | 22 | public override void OnData(Slice data) 23 | { 24 | if (Portfolio.Invested) 25 | { 26 | return; 27 | } 28 | string tag = "some tag"; 29 | int quantity = 1; 30 | decimal limitPrice = _equity.Price + 10; 31 | 32 | // Test the GoodTilCanceled time in force 33 | LimitOrder(_equity.Symbol, quantity, limitPrice, tag, new OrderProperties {TimeInForce = TimeInForce.GoodTilCanceled}); 34 | 35 | // Test the Day time in force 36 | LimitOrder(_equity.Symbol, quantity, limitPrice, tag, new OrderProperties {TimeInForce = TimeInForce.Day}); 37 | 38 | // Test the GoodTilDate time in force 39 | LimitOrder(_equity.Symbol, quantity, limitPrice, tag, new OrderProperties {TimeInForce = TimeInForce.GoodTilDate(new DateTime(2025, 1, 1))}); 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /tests/test_organization_workspace.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import time 3 | import docker 4 | 5 | from organization_workspace import OrganizationWorkspace 6 | from api_connection import USER_ID, API_TOKEN 7 | 8 | def test_organization_workspace_mount(): 9 | # Ensure the MOUNT_SOURCE environment variable is set. 10 | assert OrganizationWorkspace.MOUNT_SOURCE, 'MOUNT_SOURCE env var is not set.' 11 | # Create a Docker client. 12 | client = docker.from_env() 13 | # Start MCP Server inside a container. 14 | container = client.containers.run( 15 | image='quantconnect/mcp-server', 16 | environment={ 17 | 'QUANTCONNECT_USER_ID': USER_ID, 18 | 'QUANTCONNECT_API_TOKEN': API_TOKEN 19 | }, 20 | platform='linux/amd64', 21 | volumes={ 22 | OrganizationWorkspace.MOUNT_SOURCE: { 23 | 'bind': OrganizationWorkspace.MOUNT_DESTINATION, 'mode': 'ro'} 24 | }, 25 | detach=True, # Run in background 26 | auto_remove=True # Equivalent to --rm 27 | ) 28 | # Wait for the container to start running. 29 | time.sleep(5) 30 | # Check if the expected mount exists. 31 | assert any( 32 | mount['Type'] == 'bind' and 33 | mount['Source'] == OrganizationWorkspace.MOUNT_SOURCE and 34 | mount['Destination'] == OrganizationWorkspace.MOUNT_DESTINATION and 35 | mount['Mode'] == 'ro' 36 | for mount in container.attrs['Mounts'] 37 | ) 38 | # Stop the container. 39 | container.stop() 40 | -------------------------------------------------------------------------------- /.github/workflows/run_tests.yml: -------------------------------------------------------------------------------- 1 | name: Run tests 2 | 3 | on: 4 | workflow_dispatch: 5 | #push: 6 | # branches: ['*'] 7 | 8 | jobs: 9 | run_matrix: 10 | strategy: 11 | fail-fast: false 12 | matrix: 13 | script: 14 | - account 15 | - ai 16 | - backtest_charts 17 | - backtest_insights 18 | - backtest_orders 19 | - backtests 20 | - compile 21 | - files 22 | - initialization 23 | - lean_versions 24 | - mcp_server_version 25 | - object_store 26 | - optimizations 27 | - project 28 | - project_collaboration 29 | - project_nodes 30 | 31 | runs-on: ubuntu-24.04 32 | steps: 33 | - uses: actions/checkout@v3 34 | 35 | - name: Install dependencies 36 | run: |- 37 | python -m pip install --upgrade pip 38 | pip install mcp[cli]==1.10.1, pytest-asyncio==1.0.0, requests 39 | 40 | - name: Free space 41 | run: df -h && rm -rf /opt/hostedtoolcache* && df -h 42 | 43 | - name: Run test_${{ matrix.script }}.py 44 | run: |- 45 | pytest tests/test_${{ matrix.script }}.py 46 | env: 47 | QUANTCONNECT_USER_ID: ${{ secrets.QUANTCONNECT_USER_ID }} 48 | QUANTCONNECT_API_TOKEN: ${{ secrets.QUANTCONNECT_API_TOKEN }} 49 | QUANTCONNECT_COLLABORATOR_ID: ${{ secrets.QUANTCONNECT_COLLABORATOR_ID }} 50 | QUANTCONNECT_ORGANIZATION_ID: ${{ secrets.QUANTCONNECT_ORGANIZATION_ID }} 51 | -------------------------------------------------------------------------------- /.github/workflows/push_to_docker_hub.yml: -------------------------------------------------------------------------------- 1 | name: Build and Push to Docker Hub 2 | 3 | on: 4 | # Run when we click the Run Job button on GitHub. 5 | workflow_dispatch: 6 | # Run when there is a new Release (has a tag). 7 | push: 8 | tags: ['**'] # Match any tag 9 | 10 | jobs: 11 | release: 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | # Checkout the repository code 16 | - name: Checkout code 17 | uses: actions/checkout@v4 18 | 19 | - name: Set up QEMU 20 | uses: docker/setup-qemu-action@v3 21 | 22 | # Set up Docker Buildx for building multi-platform images 23 | - name: Set up Docker Buildx 24 | uses: docker/setup-buildx-action@v3 25 | 26 | # Log in to Docker Hub 27 | - name: Log in to Docker Hub 28 | uses: docker/login-action@v3 29 | with: 30 | username: ${{ secrets.DOCKERHUB_USERNAME }} 31 | password: ${{ secrets.DOCKERHUB_TOKEN }} 32 | 33 | # Update the version/tag. 34 | - name: Update version 35 | run: sed -i 's/__version__ = "0.0.0-dev"/__version__ = "'"${GITHUB_REF##*/}"'"/g' src/__init__.py 36 | 37 | # Build and push the Docker image 38 | - name: Build and push multi-platform Docker image 39 | uses: docker/build-push-action@v6 40 | with: 41 | context: . 42 | file: ./Dockerfile 43 | platforms: linux/amd64,linux/arm64 44 | push: true 45 | tags: | 46 | quantconnect/mcp-server:${{ github.ref_name }} 47 | quantconnect/mcp-server:latest 48 | -------------------------------------------------------------------------------- /.github/workflows/update_models.yml: -------------------------------------------------------------------------------- 1 | name: Update Models 2 | 3 | on: 4 | schedule: 5 | - cron: "0 10 * * 5" # Runs "at 10 UTC every Friday" (see https://crontab.guru) 6 | workflow_dispatch: # Runs on manual trigger 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-24.04 11 | steps: 12 | - uses: actions/checkout@v3 13 | 14 | - name: Install dependencies 15 | run: |- 16 | python -m pip install --upgrade pip 17 | pip install datamodel-code-generator==0.31.2 18 | 19 | - name: Free space 20 | run: df -h && rm -rf /opt/hostedtoolcache* && df -h 21 | 22 | - name: Update models 23 | run: |- 24 | curl -O https://raw.githubusercontent.com/QuantConnect/Documentation/refs/heads/master/QuantConnect-Platform-2.0.0.yaml 25 | datamodel-codegen --input QuantConnect-Platform-2.0.0.yaml --input-file-type openapi --output src/models.py --remove-special-field-name-prefix --use-annotated --collapse-root-models --use-default-kwarg 26 | python post_processing.py 27 | rm QuantConnect-Platform-2.0.0.yaml 28 | 29 | - name: Push to Branch 30 | run: |- 31 | if [[ $(git diff | wc -m) == 0 ]]; then 32 | exit 0 33 | fi 34 | BRANCH=pydantic_model_generator 35 | git config user.name GitHub Actions 36 | git config user.email github-actions@github.com 37 | git checkout -b $BRANCH 38 | git add . 39 | git commit -m "Code generated by update_models.py" 40 | git push --set-upstream origin $BRANCH -f 41 | -------------------------------------------------------------------------------- /tests/algorithms/order_properties.py: -------------------------------------------------------------------------------- 1 | # region imports 2 | from AlgorithmImports import * 3 | # endregion 4 | 5 | 6 | class OrderPropertiesTestAlgorithm(QCAlgorithm): 7 | 8 | def initialize(self): 9 | self.set_start_date(2024, 1, 8) 10 | self.set_end_date(2024, 2, 1) 11 | self.set_cash(100_000) 12 | self._equity = self.add_equity( 13 | 'SPY', Resolution.HOUR, 14 | data_normalization_mode=DataNormalizationMode.RAW 15 | ) 16 | 17 | def on_data(self, data: Slice): 18 | if self.portfolio.invested: 19 | return 20 | tag = 'some tag' 21 | quantity = 1 22 | limit_price = self._equity.price + 10 23 | # Test the GOOD_TIL_CANCELED time in force. 24 | order_properties = OrderProperties() 25 | order_properties.time_in_force = TimeInForce.GOOD_TIL_CANCELED 26 | self.limit_order( 27 | self._equity.symbol, quantity, limit_price, tag, order_properties 28 | ) 29 | 30 | # Test the Day time in force. 31 | order_properties = OrderProperties() 32 | order_properties.time_in_force = TimeInForce.DAY 33 | self.limit_order( 34 | self._equity.symbol, quantity, limit_price, tag, order_properties 35 | ) 36 | 37 | # Test the good_til_date time in force. 38 | order_properties = OrderProperties() 39 | order_properties.time_in_force = TimeInForce.good_til_date( 40 | datetime(2025, 1, 1) 41 | ) 42 | self.limit_order( 43 | self._equity.symbol, quantity, limit_price, tag, order_properties 44 | ) 45 | -------------------------------------------------------------------------------- /src/organization_workspace.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | 5 | class OrganizationWorkspace: 6 | 7 | # Load mount destination and source from environment variables. 8 | MOUNT_SOURCE = os.getenv('MOUNT_SOURCE_PATH') 9 | MOUNT_DESTINATION = os.getenv('MOUNT_DST_PATH') 10 | 11 | available = False # Indicate if local disk access is available 12 | project_id_by_path = {} 13 | 14 | @classmethod 15 | def load(cls): 16 | if not (cls.MOUNT_SOURCE and cls.MOUNT_DESTINATION): 17 | return 18 | if not os.path.exists(cls.MOUNT_DESTINATION): 19 | return 20 | for name in os.listdir(cls.MOUNT_DESTINATION): 21 | if name in ['.QuantConnect', 'data', 'lean.json']: 22 | continue 23 | cls._process_directory(os.path.join(cls.MOUNT_DESTINATION, name)) 24 | cls.available = True 25 | 26 | @classmethod 27 | def _process_directory(cls, path): 28 | # If the current directory contains a config.json file, then 29 | # it's a project, so save it's Id and path. 30 | config_path = os.path.join(path, 'config.json') 31 | if os.path.isfile(config_path): 32 | with open(config_path, 'r') as f: 33 | config_data = json.load(f) 34 | if 'cloud-id' in config_data: 35 | cls.project_id_by_path[path] = config_data['cloud-id'] 36 | # Otherwise, it's a directory of projects, so recurse. 37 | else: 38 | for dir_name in os.listdir(path): 39 | sub_path = os.path.join(path, dir_name) 40 | if os.path.isdir(sub_path): 41 | cls._process_directory(sub_path) 42 | -------------------------------------------------------------------------------- /src/tools/project.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import ( 3 | CreateProjectRequest, 4 | ReadProjectRequest, 5 | UpdateProjectRequest, 6 | DeleteProjectRequest, 7 | ProjectListResponse, 8 | RestResponse 9 | ) 10 | 11 | def register_project_tools(mcp): 12 | # Create 13 | @mcp.tool( 14 | annotations={ 15 | 'title': 'Create project', 16 | 'destructiveHint': False, 17 | 'idempotentHint': False 18 | } 19 | ) 20 | async def create_project(model: CreateProjectRequest) -> ProjectListResponse: 21 | """Create a new project in your default organization.""" 22 | return await post('/projects/create', model) 23 | 24 | # Read (singular) 25 | @mcp.tool(annotations={'title': 'Read project', 'readOnlyHint': True}) 26 | async def read_project(model: ReadProjectRequest) -> ProjectListResponse: 27 | """List the details of a project or a set of recent projects.""" 28 | return await post('/projects/read', model) 29 | 30 | # Read (all) 31 | @mcp.tool(annotations={'title': 'List projects', 'readOnlyHint': True}) 32 | async def list_projects() -> ProjectListResponse: 33 | """List the details of all projects.""" 34 | return await post('/projects/read') 35 | 36 | # Update 37 | @mcp.tool(annotations={'title': 'Update project', 'idempotentHint': True}) 38 | async def update_project(model: UpdateProjectRequest) -> RestResponse: 39 | """Update a project's name or description.""" 40 | return await post('/projects/update', model) 41 | 42 | # Delete 43 | @mcp.tool(annotations={'title': 'Delete project', 'idempotentHint': True}) 44 | async def delete_project(model: DeleteProjectRequest) -> RestResponse: 45 | """Delete a project.""" 46 | return await post('/projects/delete', model) 47 | -------------------------------------------------------------------------------- /src/api_connection.py: -------------------------------------------------------------------------------- 1 | from __init__ import __version__ 2 | 3 | import httpx 4 | from base64 import b64encode 5 | from hashlib import sha256 6 | from time import time 7 | import os 8 | from pydantic_core import to_jsonable_python 9 | 10 | BASE_URL = 'https://www.quantconnect.com/api/v2' 11 | 12 | # Load credentials from environment variables. 13 | USER_ID = os.getenv('QUANTCONNECT_USER_ID') 14 | API_TOKEN = os.getenv('QUANTCONNECT_API_TOKEN') 15 | 16 | def get_headers(): 17 | # Get timestamp 18 | timestamp = f'{int(time())}' 19 | time_stamped_token = f'{API_TOKEN}:{timestamp}'.encode('utf-8') 20 | # Get hased API token 21 | hashed_token = sha256(time_stamped_token).hexdigest() 22 | authentication = f'{USER_ID}:{hashed_token}'.encode('utf-8') 23 | authentication = b64encode(authentication).decode('ascii') 24 | # Create headers dictionary. 25 | return { 26 | 'Authorization': f'Basic {authentication}', 27 | 'Timestamp': timestamp, 28 | 'User-Agent': f'QuantConnect MCP Server v{__version__}' 29 | } 30 | 31 | async def post(endpoint: str, model: object = None, timeout: float = 30.0): 32 | """Make an HTTP POST request to the API with proper error handling. 33 | 34 | Args: 35 | endpoint: The API endpoint path (ex: '/projects/create') 36 | model: Optional Pydantics model for the request. 37 | timeout: Optional timeout for the request (in seconds). 38 | 39 | Returns: 40 | Response JSON if successful. Otherwise, throws an exception, 41 | which is handled by the Server class. 42 | """ 43 | async with httpx.AsyncClient() as client: 44 | response = await client.post( 45 | f'{BASE_URL}{endpoint}', 46 | headers=get_headers(), 47 | json=to_jsonable_python(model, exclude_none=True) if model else {}, 48 | timeout=timeout 49 | ) 50 | response.raise_for_status() 51 | return response.json() 52 | -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from mcp.server.fastmcp import FastMCP 3 | 4 | from tools.account import register_account_tools 5 | from tools.project import register_project_tools 6 | from tools.project_collaboration import register_project_collaboration_tools 7 | from tools.project_nodes import register_project_node_tools 8 | from tools.compile import register_compile_tools 9 | from tools.files import register_file_tools 10 | from tools.backtests import register_backtest_tools 11 | from tools.optimizations import register_optimization_tools 12 | from tools.live import register_live_trading_tools 13 | from tools.live_commands import register_live_trading_command_tools 14 | from tools.object_store import register_object_store_tools 15 | from tools.lean_versions import register_lean_version_tools 16 | from tools.ai import register_ai_tools 17 | from tools.mcp_server_version import register_mcp_server_version_tools 18 | from organization_workspace import OrganizationWorkspace 19 | 20 | transport = os.getenv('MCP_TRANSPORT', 'stdio') 21 | 22 | # Load the server instructions. 23 | with open('src/instructions.md', 'r', encoding='utf-8') as file: 24 | instructions = file.read() 25 | # Initialize the FastMCP server. 26 | mcp = FastMCP('quantconnect', instructions, host="0.0.0.0") 27 | 28 | # Register all the tools. 29 | registration_functions = [ 30 | register_account_tools, 31 | register_project_tools, 32 | register_project_collaboration_tools, 33 | register_project_node_tools, 34 | register_compile_tools, 35 | register_file_tools, 36 | register_backtest_tools, 37 | register_optimization_tools, 38 | register_live_trading_tools, 39 | register_live_trading_command_tools, 40 | register_object_store_tools, 41 | register_lean_version_tools, 42 | register_ai_tools, 43 | register_mcp_server_version_tools, 44 | ] 45 | for f in registration_functions: 46 | f(mcp) 47 | 48 | if __name__ == "__main__": 49 | # Load the organization workspace. 50 | OrganizationWorkspace.load() 51 | # Run the server. 52 | mcp.run(transport=transport) 53 | -------------------------------------------------------------------------------- /tests/test_live_orders.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import sleep 3 | 4 | from main import mcp 5 | from test_project import Project 6 | from test_files import Files 7 | from test_live import Live 8 | from utils import validate_models 9 | from models import LiveOrdersResponse, LoadingResponse 10 | 11 | 12 | # Static helpers for common operations: 13 | class LiveOrders: 14 | 15 | @staticmethod 16 | async def read(project_id, start, end): 17 | return await validate_models( 18 | mcp, 'read_live_orders', 19 | {'projectId': project_id, 'start': start, 'end': end}, 20 | LiveOrdersResponse 21 | ) 22 | 23 | @staticmethod 24 | async def wait_for_orders_to_load(project_id, start=0, end=1_000): 25 | attempts = 0 26 | while attempts < 6*15: # 15 minutes 27 | attempts += 1 28 | response = await LiveOrders.read(project_id, start, end) 29 | if response.errors is None: 30 | return response.orders 31 | sleep(10) 32 | assert False, "Orders didn't load in time." 33 | 34 | 35 | TEST_CASES = [ 36 | ('Py', 'live_orders.py'), 37 | ('C#', 'LiveOrders.cs') 38 | ] 39 | # Test suite: 40 | class TestLiveOrders: 41 | 42 | @pytest.mark.asyncio 43 | @pytest.mark.parametrize('language, algo', TEST_CASES) 44 | async def test_read_live_orders(self, language, algo): 45 | # Create and compile the project. 46 | project_id, compile_id = await Files.setup_project(language, algo) 47 | # Deploy the algorithm. 48 | await Live.create( 49 | project_id, compile_id, await Live.get_node_id(project_id) 50 | ) 51 | await Live.wait_for_algorithm_to_start(project_id) 52 | # Give the algorithm time to send the orders and then stop it so 53 | # it flushes all the orders to the orders file. Without stopping 54 | # it, we'll have to wait ~10 minutes for the orders file to 55 | # populate. 56 | sleep(120) 57 | await Live.stop(project_id) 58 | # Try to read the orders. 59 | orders = await LiveOrders.wait_for_orders_to_load(project_id) 60 | for order in orders: 61 | order.symbol.id == 'BTCUSD 2XR' 62 | # Delete the project to clean up. 63 | await Project.delete(project_id) 64 | -------------------------------------------------------------------------------- /src/tools/files.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from code_source_id import add_code_source_id 3 | from models import ( 4 | CreateProjectFileRequest, 5 | ReadFilesRequest, 6 | UpdateFileNameRequest, 7 | UpdateFileContentsRequest, 8 | PatchFileRequest, 9 | DeleteFileRequest, 10 | RestResponse, 11 | ProjectFilesResponse 12 | ) 13 | 14 | 15 | def register_file_tools(mcp): 16 | # Create 17 | @mcp.tool( 18 | annotations={ 19 | 'title': 'Create file', 20 | 'destructiveHint': False, 21 | 'idempotentHint': True 22 | } 23 | ) 24 | async def create_file( 25 | model: CreateProjectFileRequest) -> RestResponse: 26 | """Add a file to a given project.""" 27 | return await post('/files/create', add_code_source_id(model)) 28 | 29 | # Read 30 | @mcp.tool(annotations={'title': 'Read file', 'readOnlyHint': True}) 31 | async def read_file(model: ReadFilesRequest) -> ProjectFilesResponse: 32 | """Read a file from a project, or all files in the project if 33 | no file name is provided. 34 | """ 35 | return await post('/files/read', add_code_source_id(model)) 36 | 37 | # Update name 38 | @mcp.tool( 39 | annotations={'title': 'Update file name', 'idempotentHint': True} 40 | ) 41 | async def update_file_name(model: UpdateFileNameRequest) -> RestResponse: 42 | """Update the name of a file.""" 43 | return await post('/files/update', add_code_source_id(model)) 44 | 45 | # Update contents 46 | @mcp.tool( 47 | annotations={'title': 'Update file contents', 'idempotentHint': True} 48 | ) 49 | async def update_file_contents( 50 | model: UpdateFileContentsRequest) -> ProjectFilesResponse: 51 | """Update the contents of a file.""" 52 | return await post('/files/update', add_code_source_id(model)) 53 | 54 | # Update lines (patch) 55 | @mcp.tool( 56 | annotations={'title': 'Patch file', 'idempotentHint': True} 57 | ) 58 | async def patch_file(model: PatchFileRequest) -> RestResponse: 59 | """Apply a patch (unified diff) to a file in a project.""" 60 | return await post('/files/patch', add_code_source_id(model)) 61 | 62 | # Delete 63 | @mcp.tool(annotations={'title': 'Delete file', 'idempotentHint': True}) 64 | async def delete_file(model: DeleteFileRequest) -> RestResponse: 65 | """Delete a file in a project.""" 66 | return await post('/files/delete', add_code_source_id(model)) 67 | -------------------------------------------------------------------------------- /src/tools/ai.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import ( 3 | BasicFilesRequest, 4 | CodeCompletionRequest, 5 | ErrorEnhanceRequest, 6 | PEP8ConvertRequest, 7 | BasicFilesRequest, 8 | SearchRequest, 9 | 10 | BacktestInitResponse, 11 | CodeCompletionResponse, 12 | ErrorEnhanceResponse, 13 | PEP8ConvertResponse, 14 | SyntaxCheckResponse, 15 | SearchResponse 16 | ) 17 | 18 | def register_ai_tools(mcp): 19 | # Get backtest initialization errors 20 | @mcp.tool( 21 | annotations={ 22 | 'title': 'Check initialization errors', 'readOnlyHint': True 23 | } 24 | ) 25 | async def check_initialization_errors( 26 | model: BasicFilesRequest) -> BacktestInitResponse: 27 | """Run a backtest for a few seconds to initialize the algorithm 28 | and get inialization errors if any.""" 29 | return await post('/ai/tools/backtest-init', model) 30 | 31 | # Complete code 32 | @mcp.tool(annotations={'title': 'Complete code', 'readOnlyHint': True}) 33 | async def complete_code( 34 | model: CodeCompletionRequest) -> CodeCompletionResponse: 35 | """Show the code completion for a specific text input.""" 36 | return await post('/ai/tools/complete', model) 37 | 38 | # Enchance error message 39 | @mcp.tool( 40 | annotations={'title': 'Enhance error message', 'readOnlyHint': True} 41 | ) 42 | async def enhance_error_message( 43 | model: ErrorEnhanceRequest) -> ErrorEnhanceResponse: 44 | """Show additional context and suggestions for error messages.""" 45 | return await post('/ai/tools/error-enhance', model) 46 | 47 | # Update code to PEP8 48 | @mcp.tool( 49 | annotations={'title': 'Update code to PEP8', 'readOnlyHint': True} 50 | ) 51 | async def update_code_to_pep8( 52 | model: PEP8ConvertRequest) -> PEP8ConvertResponse: 53 | """Update Python code to follow PEP8 style.""" 54 | return await post('/ai/tools/pep8-convert', model) 55 | 56 | # Check syntax 57 | @mcp.tool(annotations={'title': 'Check syntax', 'readOnlyHint': True}) 58 | async def check_syntax(model: BasicFilesRequest) -> SyntaxCheckResponse: 59 | """Check the syntax of a code.""" 60 | return await post('/ai/tools/syntax-check', model) 61 | 62 | # Search 63 | @mcp.tool(annotations={'title': 'Search QuantConnect', 'readOnlyHint': True}) 64 | async def search_quantconnect(model: SearchRequest) -> SearchResponse: 65 | """Search for content in QuantConnect.""" 66 | return await post('/ai/tools/search', model) 67 | -------------------------------------------------------------------------------- /tests/test_live_insights.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import sleep 3 | 4 | from main import mcp 5 | from test_project import Project 6 | from test_files import Files 7 | from test_live import Live 8 | from utils import validate_models 9 | from models import LiveInsightsResponse 10 | 11 | 12 | # Static helpers for common operations: 13 | class LiveInsights: 14 | 15 | @staticmethod 16 | async def read(project_id, end, **kwargs): 17 | return await validate_models( 18 | mcp, 'read_live_insights', 19 | {'projectId': project_id, 'end': end} | kwargs, 20 | LiveInsightsResponse 21 | ) 22 | 23 | @staticmethod 24 | async def wait_for_insights_to_load(project_id, end=1_000): 25 | attempts = 0 26 | while attempts < 6*15: # 15 minutes 27 | attempts += 1 28 | response = await LiveInsights.read(project_id, end) 29 | if response.length: 30 | return response.insights 31 | sleep(10) 32 | assert False, "Insights didn't load in time." 33 | 34 | 35 | TEST_CASES = [ 36 | ('Py', 'live_insights.py'), 37 | ('C#', 'LiveInsights.cs') 38 | ] 39 | # Test suite: 40 | class TestLiveInsights: 41 | 42 | @pytest.mark.asyncio 43 | @pytest.mark.parametrize('language, algo', TEST_CASES) 44 | async def test_read_live_insights(self, language, algo): 45 | # Create and compile the project. 46 | project_id, compile_id = await Files.setup_project(language, algo) 47 | # Deploy the algorithm. 48 | await Live.create( 49 | project_id, compile_id, await Live.get_node_id(project_id) 50 | ) 51 | await Live.wait_for_algorithm_to_start(project_id) 52 | # Give the algorithm time to emit the insights and then stop it 53 | # so it flushes all the insights to the insight file. Without 54 | # stopping it, we'll have to wait ~10 minutes for the file to 55 | # populate. 56 | sleep(120) 57 | await Live.stop(project_id) 58 | # Try to read the insights. 59 | insights = await LiveInsights.wait_for_insights_to_load(project_id) 60 | for i, insight in enumerate(insights): 61 | insight.symbol == 'BTCUSD 2XR' 62 | insight.type == 'price' 63 | insight.direction == ['up', 'flat'][i%2] 64 | insight.period == 24*60*60 # seconds in a day 65 | insight.magnitude == None 66 | insight.confidence == None 67 | insight.weight == None 68 | insight.tag == None 69 | # Delete the project to clean up. 70 | await Project.delete(project_id) 71 | -------------------------------------------------------------------------------- /tests/test_live_logs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import time, sleep 3 | 4 | from main import mcp 5 | from test_live import Live 6 | from test_files import Files 7 | from test_project import Project 8 | from utils import validate_models 9 | from models import ReadLiveLogsRequest, ReadLiveLogsResponse 10 | 11 | 12 | # Static helpers for common operations: 13 | class LiveLogs: 14 | 15 | @staticmethod 16 | async def read(project_id, algorithm_id, start_line=0, end_line=250): 17 | return await validate_models( 18 | mcp, 'read_live_logs', 19 | { 20 | 'projectId': project_id, 21 | 'algorithmId': algorithm_id, 22 | 'startLine': start_line, 23 | 'endLine': end_line 24 | }, 25 | ReadLiveLogsResponse 26 | ) 27 | 28 | @staticmethod 29 | async def wait_for_logs_to_load( 30 | project_id, algorithm_id, start_line=0, end_line=250, threshold=3): 31 | attempts = 0 32 | while attempts < 6*5: # 5 minutes 33 | attempts += 1 34 | response = await LiveLogs.read( 35 | project_id, algorithm_id, start_line, end_line 36 | ) 37 | if (any(algorithm_id in log for log in response.logs) and 38 | len(response.logs) >= threshold): 39 | return response 40 | sleep(10) 41 | assert False, "Logs didn't load in time." 42 | 43 | 44 | TEST_CASES = [ 45 | ('Py', 'live_logs.py'), 46 | #('C#', 'LiveLogs.cs') 47 | ] 48 | # Test suite: 49 | class TestLiveLogs: 50 | 51 | @pytest.mark.asyncio 52 | @pytest.mark.parametrize('language, algo', TEST_CASES) 53 | async def test_read_live_logs(self, language, algo): 54 | # Create and compile the project. 55 | project_id, compile_id = await Files.setup_project(language, algo) 56 | # Deploy the algorithm. 57 | live = await Live.create( 58 | project_id, compile_id, await Live.get_node_id(project_id) 59 | ) 60 | await Live.wait_for_algorithm_to_start(project_id) 61 | # Give the algorithm time to print the logs and then stop it so 62 | # it flushes all the logs to the log file. Without stopping it, 63 | # we'll have to wait ~10 minutes for the log file to populate. 64 | sleep(15) 65 | await Live.stop(project_id) 66 | # Try to read the logs. 67 | response = await LiveLogs.wait_for_logs_to_load( 68 | project_id, live.deployId 69 | ) 70 | assert response.deploymentOffset == 0 71 | assert response.length >= 10 72 | assert len(response.logs) >= 10 73 | # Delete the project to clean up. 74 | await Project.delete(project_id) 75 | -------------------------------------------------------------------------------- /src/tools/project_collaboration.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from code_source_id import add_code_source_id 3 | from models import ( 4 | CreateCollaboratorRequest, 5 | ReadCollaboratorsRequest, 6 | UpdateCollaboratorRequest, 7 | DeleteCollaboratorRequest, 8 | LockCollaboratorRequest, 9 | CreateCollaboratorResponse, 10 | ReadCollaboratorsResponse, 11 | UpdateCollaboratorResponse, 12 | DeleteCollaboratorResponse, 13 | RestResponse 14 | ) 15 | 16 | def register_project_collaboration_tools(mcp): 17 | # Create 18 | @mcp.tool( 19 | annotations={ 20 | 'title': 'Create project collaborator', 21 | 'destructiveHint': False, 22 | 'idempotentHint': True 23 | } 24 | ) 25 | async def create_project_collaborator( 26 | model: CreateCollaboratorRequest) -> CreateCollaboratorResponse: 27 | """Add a collaborator to a project.""" 28 | return await post('/projects/collaboration/create', model) 29 | 30 | # Read 31 | @mcp.tool( 32 | annotations={ 33 | 'title': 'Read project collaborators', 34 | 'readOnlyHint': True 35 | } 36 | ) 37 | async def read_project_collaborators( 38 | model: ReadCollaboratorsRequest) -> ReadCollaboratorsResponse: 39 | """List all collaborators on a project.""" 40 | return await post('/projects/collaboration/read', model) 41 | 42 | # Update 43 | @mcp.tool( 44 | annotations={ 45 | 'title': 'Update project collaborator', 46 | 'idempotentHint': True 47 | } 48 | ) 49 | async def update_project_collaborator( 50 | model: UpdateCollaboratorRequest) -> UpdateCollaboratorResponse: 51 | """Update collaborator information in a project.""" 52 | return await post('/projects/collaboration/update', model) 53 | 54 | # Delete 55 | @mcp.tool( 56 | annotations={ 57 | 'title': 'Delete project collaborator', 58 | 'idempotentHint': True 59 | } 60 | ) 61 | async def delete_project_collaborator( 62 | model: DeleteCollaboratorRequest) -> DeleteCollaboratorResponse: 63 | """Remove a collaborator from a project.""" 64 | return await post('/projects/collaboration/delete', model) 65 | 66 | # Lock 67 | @mcp.tool( 68 | annotations={ 69 | 'title': 'Lock project with collaborators', 70 | 'idempotentHint': True 71 | } 72 | ) 73 | async def lock_project_with_collaborators( 74 | model: LockCollaboratorRequest) -> RestResponse: 75 | """Lock a project so you can edit it. 76 | 77 | This is necessary when the project has collaborators or when an 78 | LLM is editing files on your behalf via our MCP Server.""" 79 | return await post( 80 | '/projects/collaboration/lock/acquire', add_code_source_id(model) 81 | ) 82 | 83 | -------------------------------------------------------------------------------- /src/tools/optimizations.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import ( 3 | EstimateOptimizationRequest, 4 | CreateOptimizationRequest, 5 | ReadOptimizationRequest, 6 | ListOptimizationRequest, 7 | EstimateOptimizationResponse, 8 | UpdateOptimizationRequest, 9 | AbortOptimizationRequest, 10 | DeleteOptimizationRequest, 11 | ListOptimizationResponse, 12 | ReadOptimizationResponse, 13 | RestResponse 14 | ) 15 | 16 | def register_optimization_tools(mcp): 17 | # Estimate cost 18 | @mcp.tool( 19 | annotations={ 20 | 'title': 'Estimate optimization time', 21 | 'readOnlyHint': True, 22 | } 23 | ) 24 | async def estimate_optimization_time( 25 | model: EstimateOptimizationRequest) -> EstimateOptimizationResponse: 26 | """Estimate the execution time of an optimization with the 27 | specified parameters. 28 | """ 29 | return await post('/optimizations/estimate', model) 30 | 31 | # Create 32 | @mcp.tool( 33 | annotations={ 34 | 'title': 'Create optimization', 35 | 'destructiveHint': False 36 | } 37 | ) 38 | async def create_optimization( 39 | model: CreateOptimizationRequest) -> ListOptimizationResponse: 40 | """Create an optimization with the specified parameters.""" 41 | return await post('/optimizations/create', model) 42 | 43 | # Read a single optimization job. 44 | @mcp.tool( 45 | annotations={'title': 'Read optimization', 'readOnlyHint': True} 46 | ) 47 | async def read_optimization( 48 | model: ReadOptimizationRequest) -> ReadOptimizationResponse: 49 | """Read an optimization.""" 50 | return await post('/optimizations/read', model) 51 | 52 | # Read all optimizations for a project. 53 | @mcp.tool( 54 | annotations={'title': 'List optimizations', 'readOnlyHint': True} 55 | ) 56 | async def list_optimizations( 57 | model: ListOptimizationRequest) -> ListOptimizationResponse: 58 | """List all the optimizations for a project.""" 59 | return await post('/optimizations/list', model) 60 | 61 | # Update the optimization name. 62 | @mcp.tool( 63 | annotations={'title': 'Update optimization', 'idempotentHint': True} 64 | ) 65 | async def update_optimization( 66 | model: UpdateOptimizationRequest) -> RestResponse: 67 | """Update the name of an optimization.""" 68 | return await post('/optimizations/update', model) 69 | 70 | # Update the optimization status (stop). 71 | @mcp.tool( 72 | annotations={'title': 'Abort optimization', 'idempotentHint': True} 73 | ) 74 | async def abort_optimization( 75 | model: AbortOptimizationRequest) -> RestResponse: 76 | """Abort an optimization.""" 77 | return await post('/optimizations/abort', model) 78 | 79 | # Delete 80 | @mcp.tool( 81 | annotations={'title': 'Delete optimization', 'idempotentHint': True} 82 | ) 83 | async def delete_optimization(model: DeleteOptimizationRequest) -> RestResponse: 84 | """Delete an optimization.""" 85 | return await post('/optimizations/delete', model) 86 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from types import UnionType 3 | import jsonschema 4 | from json import loads 5 | from pydantic import ValidationError, TypeAdapter 6 | from datetime import datetime 7 | 8 | async def validate_response( 9 | mcp, tool_name, structured_response, output_class): 10 | # Check if the response schema is valid JSON. 11 | jsonschema.validate( 12 | instance=structured_response, 13 | schema=mcp._tool_manager.get_tool(tool_name).fn_metadata.output_schema 14 | ) 15 | 16 | # Create and return the output model. 17 | if isinstance(output_class, UnionType): 18 | structured_response = structured_response['result'] 19 | return TypeAdapter(output_class).validate_python(structured_response) 20 | 21 | async def validate_models( 22 | mcp, tool_name, input_args={}, output_class=None, 23 | success_expected=True): 24 | # Call the tool with the arguments. If the input args are invalid, 25 | # it raises an error. 26 | unstructured_response, structured_response = await mcp.call_tool( 27 | tool_name, {'model': input_args} 28 | ) 29 | # Check if the response has the success flag. 30 | assert loads( 31 | unstructured_response[0].text 32 | ).get('success', True) == success_expected, structured_response 33 | if not success_expected: 34 | return structured_response 35 | # Check if the response respects the output_class. 36 | output_model = await validate_response( 37 | mcp, tool_name, structured_response, output_class 38 | ) 39 | # Return an instance of the output_class. 40 | return output_model 41 | 42 | async def ensure_request_fails(mcp, tool_name, input_args={}): 43 | # The input_args should be valid for the Pydantic model conversion, 44 | # but should be invalid for the API. 45 | structured_response = await validate_models( 46 | mcp, tool_name, input_args, success_expected=False 47 | ) 48 | return structured_response 49 | 50 | async def ensure_request_raises_validation_error(tool_name, class_, args): 51 | with pytest.raises(ValidationError) as error: 52 | class_.model_validate(args) 53 | assert error.type is ValidationError 54 | 55 | async def ensure_request_raises_validation_error_when_omitting_an_arg( 56 | tool_name, class_, minimal_payload): 57 | # This test ensures that if we omit one of the arguments from the 58 | # `minimal_payload`, the payload doesn't respect the required 59 | # properties of the Pydantic model (`class_`). If the YAML is 60 | # updated so that the required properties of the model are 61 | # adjusted, this test will fail and we'll be informed of the 62 | # change. 63 | class_.model_validate(minimal_payload) 64 | for key in minimal_payload: 65 | await ensure_request_raises_validation_error( 66 | tool_name, class_, 67 | {k: v for k, v in minimal_payload.items() if k != key} 68 | ) 69 | 70 | async def ensure_request_fails_when_including_an_invalid_arg( 71 | mcp, tool_name, minimal_payload, invalid_arguments): 72 | # Try to send the request with an invalid argument. 73 | for arg in invalid_arguments: 74 | await ensure_request_fails(mcp, tool_name, minimal_payload | arg) 75 | 76 | def create_timestamp(): 77 | return datetime.now().strftime('%Y%m%d_%H%M%S_%f') 78 | -------------------------------------------------------------------------------- /tests/test_live_commands.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import sleep 3 | 4 | from main import mcp 5 | from test_project import Project 6 | from test_files import Files 7 | from test_live import Live 8 | from test_live_logs import LiveLogs 9 | from utils import validate_models 10 | from models import ( 11 | CreateLiveCommandRequest, 12 | BroadcastLiveCommandRequest, 13 | RestResponse 14 | ) 15 | 16 | 17 | # Static helpers for common operations: 18 | class LiveCommands: 19 | 20 | @staticmethod 21 | async def create(project_id, command): 22 | return await validate_models( 23 | mcp, 'create_live_command', 24 | {'projectId': project_id, 'command': command}, RestResponse 25 | ) 26 | 27 | @staticmethod 28 | async def broadcast(organization_id, command, **kwargs): 29 | return await validate_models( 30 | mcp, 'broadcast_live_command', 31 | {'organizationId': organization_id, 'command': command} | kwargs, 32 | RestResponse 33 | ) 34 | 35 | 36 | TEST_CASES = [ 37 | ('Py', 'live_command.py'), 38 | #('C#', 'LiveCommand.cs') 39 | ] 40 | # Test suite: 41 | class TestLiveCommands: 42 | 43 | @pytest.mark.asyncio 44 | @pytest.mark.parametrize('language, algo', TEST_CASES) 45 | async def test_create_live_command(self, language, algo): 46 | # Create and compile the project. 47 | project_id, compile_id = await Files.setup_project(language, algo) 48 | # Deploy the algorithm and wait for it to start. 49 | node_id = await Live.get_node_id(project_id) 50 | live = await Live.create(project_id, compile_id, node_id) 51 | await Live.wait_for_algorithm_to_start(project_id) 52 | # Wait for logs to load. If you send the command 53 | # too early, it doesn't reach the algorithm. 54 | await LiveLogs.wait_for_logs_to_load( 55 | project_id, live.deployId, threshold=3 56 | ) 57 | # Try to send a generic command. 58 | await LiveCommands.create(project_id, {'text': 'foo'}) 59 | # Try to send an encapsulated command. 60 | encapsulated_command = { 61 | '$type': 'MyCommand', 62 | 'text': 'boo', 63 | 'number': 1, 64 | 'parameters': {'hello': 'world'} 65 | } 66 | await LiveCommands.create(project_id, encapsulated_command) 67 | # Give the algorithm time to print the logs and then stop it so 68 | # it flushes all the logs to the log file. Without stopping it, 69 | # we'll have to wait ~10 minutes for the log file to populate. 70 | sleep(15) 71 | await Live.stop(project_id) 72 | # Check the logs to see if the commands ran. 73 | response = await LiveLogs.wait_for_logs_to_load( 74 | project_id, live.deployId, threshold=5 75 | ) 76 | assert response.logs 77 | assert any([ 78 | 'Generic command. data.text: foo' in log 79 | for log in response.logs 80 | ]) 81 | assert any([ 82 | f"Encapsulated command. text: {encapsulated_command['text']}; " 83 | f"number: {encapsulated_command['number']}; " 84 | f"parameters: {encapsulated_command['parameters']}" in log 85 | for log in response.logs 86 | ]) 87 | # Delete the project to clean up. 88 | await Project.delete(project_id) 89 | -------------------------------------------------------------------------------- /tests/test_backtest_orders.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | from test_project import Project 5 | from test_backtests import Backtest 6 | from utils import ( 7 | validate_models, 8 | ensure_request_raises_validation_error_when_omitting_an_arg, 9 | ensure_request_fails_when_including_an_invalid_arg 10 | ) 11 | from models import ( 12 | ReadBacktestOrdersRequest, 13 | BacktestOrdersResponse 14 | ) 15 | 16 | 17 | # Static helpers for common operations: 18 | class BacktestOrders: 19 | 20 | @staticmethod 21 | async def read(project_id, backtest_id, start=0, end=100): 22 | output_model = await validate_models( 23 | mcp, 'read_backtest_orders', 24 | { 25 | 'projectId': project_id, 26 | 'backtestId': backtest_id, 27 | 'start': start, 28 | 'end': end 29 | }, 30 | BacktestOrdersResponse 31 | ) 32 | return output_model.orders 33 | 34 | 35 | # Test suite: 36 | class TestBacktestOrders: 37 | 38 | @pytest.mark.asyncio 39 | @pytest.mark.parametrize( 40 | 'language, algo', 41 | [ 42 | ('Py', 'order_properties.py'), 43 | ('C#', 'OrderProperties.cs') 44 | ] 45 | ) 46 | async def test_read_backtest_orders(self, language, algo): 47 | # Backtest the template algorithm. 48 | project_id, backtest_id = await Backtest.run_algorithm(language) 49 | # Try to read the orders. 50 | await BacktestOrders.read(project_id, backtest_id) 51 | # Delete the project to clean up. 52 | await Project.delete(project_id) 53 | # Try to read orders from an algorithm that uses order 54 | # properties. 55 | project_id, backtest_id = await Backtest.run_algorithm(language, algo) 56 | orders = (await BacktestOrders.read(project_id, backtest_id)) 57 | assert len(orders) > 0 58 | for order in orders: 59 | assert order.quantity == 1 60 | assert order.tag == 'some tag' 61 | # Delete the project to clean up. 62 | await Project.delete(project_id) 63 | 64 | @pytest.mark.asyncio 65 | @pytest.mark.parametrize('language', ['Py', 'C#']) 66 | async def test_read_backtest_orders_with_invalid_args(self, language): 67 | # Start a backtest using the default algorithm template. 68 | project_id, backtest_id = await Backtest.run_algorithm( 69 | language, wait_to_complete=False 70 | ) 71 | # Test the invalid requests. 72 | tool_name = 'read_backtest_orders' 73 | class_ = ReadBacktestOrdersRequest 74 | minimal_payload = { 75 | 'projectId': project_id, 76 | 'backtestId': backtest_id, 77 | 'start': 0, 78 | 'end': 100 79 | } 80 | await ensure_request_raises_validation_error_when_omitting_an_arg( 81 | tool_name, class_, minimal_payload 82 | ) 83 | await ensure_request_fails_when_including_an_invalid_arg( 84 | mcp, tool_name, minimal_payload, [ 85 | # Try to read the orders from a backtest that doesn't 86 | # exist. 87 | {'backtestId': ' '}, 88 | # Try to read more than 100 orders at once. 89 | {'start': 0, 'end': 200} 90 | ] 91 | ) 92 | # Delete the project to clean up. 93 | await Project.delete(project_id) 94 | -------------------------------------------------------------------------------- /tests/test_backtest_insights.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | from test_project import Project 5 | from test_backtests import Backtest 6 | from utils import ( 7 | validate_models, 8 | ensure_request_raises_validation_error_when_omitting_an_arg, 9 | ensure_request_fails_when_including_an_invalid_arg 10 | ) 11 | from models import ( 12 | ReadBacktestInsightsRequest, 13 | BacktestInsightsResponse 14 | ) 15 | 16 | 17 | # Static helpers for common operations: 18 | class BacktestInsights: 19 | 20 | @staticmethod 21 | async def read(project_id, backtest_id, start=0, end=100): 22 | output_model = await validate_models( 23 | mcp, 'read_backtest_insights', 24 | { 25 | 'projectId': project_id, 26 | 'backtestId': backtest_id, 27 | 'start': start, 28 | 'end': end 29 | }, 30 | BacktestInsightsResponse 31 | ) 32 | return output_model.insights 33 | 34 | 35 | # Test suite: 36 | class TestBacktestInsights: 37 | 38 | @pytest.mark.asyncio 39 | @pytest.mark.parametrize( 40 | 'language, algo', 41 | [ 42 | ('Py', 'insights.py'), 43 | ('C#', 'Insights.cs') 44 | ] 45 | ) 46 | async def test_read_backtest_insights(self, language, algo): 47 | # Backtest and algorithm that emits insights. 48 | project_id, backtest_id = await Backtest.run_algorithm(language, algo) 49 | # Try to read the insights. 50 | insights = await BacktestInsights.read(project_id, backtest_id) 51 | assert len(insights) == 3 52 | source_model = 'ConstantAlphaModel(Price,Up,30.00:00:00,0.1,0.2)' 53 | for insight in insights: 54 | assert insight.sourceModel == source_model 55 | assert insight.symbol == 'SPY R735QTJ8XC9X' 56 | assert insight.type.value == 'price' 57 | assert insight.direction.value == 'up' 58 | assert insight.period == 2592000 59 | assert insight.magnitude == 0.1 60 | assert insight.confidence == 0.2 61 | assert insight.weight == 0.3 62 | # Delete the project to clean up. 63 | await Project.delete(project_id) 64 | 65 | @pytest.mark.asyncio 66 | @pytest.mark.parametrize('language', ['Py', 'C#']) 67 | async def test_read_backtest_insights_with_invalid_args(self, language): 68 | # Run a backtest with the template algorithm. 69 | project_id, backtest_id = await Backtest.run_algorithm(language) 70 | # Test the invalid requests. 71 | tool_name = 'read_backtest_insights' 72 | class_ = ReadBacktestInsightsRequest 73 | minimal_payload = { 74 | 'projectId': project_id, 75 | 'backtestId': backtest_id, 76 | 'start': 0, 77 | 'end': 100 78 | } 79 | # Try to read the insights without providing all the required 80 | # arguments. 81 | await ensure_request_raises_validation_error_when_omitting_an_arg( 82 | tool_name, class_, minimal_payload 83 | ) 84 | await ensure_request_fails_when_including_an_invalid_arg( 85 | mcp, tool_name, minimal_payload, [ 86 | # Try to read the insights from a project that doesn't 87 | # exist. 88 | {'projectId': -1}, 89 | # Try to read the insights from a backtest that doesn't 90 | # exist. 91 | {'backtestId': ' '}, 92 | # Try to read more than 100 insights at once. 93 | {'start': 0, 'end': 200} 94 | ] 95 | ) 96 | # Delete the project to clean up. 97 | await Project.delete(project_id) 98 | -------------------------------------------------------------------------------- /src/tools/object_store.py: -------------------------------------------------------------------------------- 1 | from api_connection import post, httpx, get_headers, BASE_URL 2 | from models import ( 3 | ObjectStoreBinaryFile, 4 | GetObjectStorePropertiesRequest, 5 | GetObjectStoreJobIdRequest, 6 | GetObjectStoreURLRequest, 7 | ListObjectStoreRequest, 8 | DeleteObjectStoreRequest, 9 | GetObjectStorePropertiesResponse, 10 | GetObjectStoreResponse, 11 | ListObjectStoreResponse, 12 | RestResponse 13 | ) 14 | 15 | def register_object_store_tools(mcp): 16 | # Create 17 | @mcp.tool( 18 | annotations={ 19 | 'title': 'Upload Object Store file', 'idempotentHint': True 20 | } 21 | ) 22 | async def upload_object( 23 | model: ObjectStoreBinaryFile) -> RestResponse: 24 | """Upload files to the Object Store.""" 25 | # This endpoint is unique because post request requires `data` 26 | # and `files` arguments. 27 | async with httpx.AsyncClient() as client: 28 | response = await client.post( 29 | f'{BASE_URL}/object/set', 30 | headers=get_headers(), 31 | data={ 32 | 'organizationId': model.organizationId, 33 | 'key': model.key 34 | }, 35 | files={'objectData': model.objectData}, 36 | timeout=30.0 37 | ) 38 | response.raise_for_status() 39 | return response.json() 40 | 41 | # Read file metadata 42 | @mcp.tool( 43 | annotations={ 44 | 'title': 'Read Object Store file properties', 'readOnlyHint': True 45 | } 46 | ) 47 | async def read_object_properties( 48 | model: GetObjectStorePropertiesRequest 49 | ) -> GetObjectStorePropertiesResponse: 50 | """Get Object Store properties of a specific organization and 51 | key. 52 | 53 | It doesn't work if the key is a directory in the Object Store. 54 | """ 55 | return await post('/object/properties', model) 56 | 57 | # Read file job Id 58 | @mcp.tool( 59 | annotations={ 60 | 'title': 'Read Object Store file job Id', 'destructiveHint': False 61 | } 62 | ) 63 | async def read_object_store_file_job_id( 64 | model: GetObjectStoreJobIdRequest) -> GetObjectStoreResponse: 65 | """Create a job to download files from the Object Store and 66 | then read the job Id. 67 | """ 68 | return await post('/object/get', model) 69 | 70 | # Read file download URL 71 | @mcp.tool( 72 | annotations={ 73 | 'title': 'Read Object Store file download URL', 74 | 'readOnlyHint': True 75 | } 76 | ) 77 | async def read_object_store_file_download_url( 78 | model: GetObjectStoreURLRequest) -> GetObjectStoreResponse: 79 | """Get the URL for downloading files from the Object Store.""" 80 | return await post('/object/get', model) 81 | 82 | # Read all files 83 | @mcp.tool( 84 | annotations={'title': 'List Object Store files', 'readOnlyHint': True} 85 | ) 86 | async def list_object_store_files( 87 | model: ListObjectStoreRequest) -> ListObjectStoreResponse: 88 | """List the Object Store files under a specific directory in 89 | an organization. 90 | """ 91 | return await post('/object/list', model) 92 | 93 | # Delete 94 | @mcp.tool( 95 | annotations={ 96 | 'title': 'Delete Object Store file', 97 | 'idempotentHint': True 98 | } 99 | ) 100 | async def delete_object( 101 | model: DeleteObjectStoreRequest) -> RestResponse: 102 | """Delete the Object Store file of a specific organization and 103 | key. 104 | """ 105 | return await post('/object/delete', model) 106 | -------------------------------------------------------------------------------- /src/tools/backtests.py: -------------------------------------------------------------------------------- 1 | from api_connection import post 2 | from models import ( 3 | CreateBacktestRequest, 4 | ReadBacktestRequest, 5 | ReadBacktestChartRequest, 6 | ReadBacktestOrdersRequest, 7 | ReadBacktestInsightsRequest, 8 | BacktestReportRequest, 9 | ListBacktestRequest, 10 | UpdateBacktestRequest, 11 | DeleteBacktestRequest, 12 | BacktestResponse, 13 | #LoadingChartResponse, 14 | ReadChartResponse, 15 | BacktestOrdersResponse, 16 | BacktestInsightsResponse, 17 | BacktestSummaryResponse, 18 | BacktestReport, 19 | BacktestReportGeneratingResponse, 20 | RestResponse 21 | ) 22 | 23 | def register_backtest_tools(mcp): 24 | # Create 25 | @mcp.tool( 26 | annotations={ 27 | 'title': 'Create backtest', 28 | 'destructiveHint': False 29 | } 30 | ) 31 | async def create_backtest( 32 | model: CreateBacktestRequest) -> BacktestResponse: 33 | """Create a new backtest request and get the backtest Id.""" 34 | return await post('/backtests/create', model) 35 | 36 | # Read statistics for a single backtest. 37 | @mcp.tool(annotations={'title': 'Read backtest', 'readOnlyHint': True}) 38 | async def read_backtest(model: ReadBacktestRequest) -> BacktestResponse: 39 | """Read the results of a backtest.""" 40 | return await post('/backtests/read', model) 41 | 42 | # Read a summary of all the backtests. 43 | @mcp.tool(annotations={'title': 'List backtests', 'readOnlyHint': True}) 44 | async def list_backtests( 45 | model: ListBacktestRequest) -> BacktestSummaryResponse: 46 | """List all the backtests for the project.""" 47 | return await post('/backtests/list', model) 48 | 49 | # Read the chart of a single backtest. 50 | @mcp.tool( 51 | annotations={'title': 'Read backtest chart', 'readOnlyHint': True} 52 | ) 53 | async def read_backtest_chart( 54 | model: ReadBacktestChartRequest) -> ReadChartResponse: 55 | """Read a chart from a backtest.""" 56 | return await post('/backtests/chart/read', model) 57 | 58 | # Read the orders of a single backtest. 59 | @mcp.tool( 60 | annotations={'title': 'Read backtest orders', 'readOnlyHint': True} 61 | ) 62 | async def read_backtest_orders( 63 | model: ReadBacktestOrdersRequest) -> BacktestOrdersResponse: 64 | """Read out the orders of a backtest.""" 65 | return await post('/backtests/orders/read', model) 66 | 67 | # Read the insights of a single backtest. 68 | @mcp.tool( 69 | annotations={'title': 'Read backtest insights', 'readOnlyHint': True} 70 | ) 71 | async def read_backtest_insights( 72 | model: ReadBacktestInsightsRequest) -> BacktestInsightsResponse: 73 | """Read out the insights of a backtest.""" 74 | return await post('/backtests/read/insights', model) 75 | 76 | ## Read the report of a single backtest. 77 | #@mcp.tool( 78 | # annotations={'title': 'Read backtest report', 'readOnlyHint': True} 79 | #) 80 | #async def read_backtest_report( 81 | # model: BacktestReportRequest 82 | # ) -> BacktestReport | BacktestReportGeneratingResponse: 83 | # """Read out the report of a backtest.""" 84 | # return await post('/backtests/read/report', model) 85 | 86 | # Update 87 | @mcp.tool( 88 | annotations={'title': 'Update backtest', 'idempotentHint': True} 89 | ) 90 | async def update_backtest(model: UpdateBacktestRequest) -> RestResponse: 91 | """Update the name or note of a backtest.""" 92 | return await post('/backtests/update', model) 93 | 94 | # Delete 95 | @mcp.tool( 96 | annotations={'title': 'Delete backtest', 'idempotentHint': True} 97 | ) 98 | async def delete_backtest(model: DeleteBacktestRequest) -> RestResponse: 99 | """Delete a backtest from a project.""" 100 | return await post('/backtests/delete', model) 101 | -------------------------------------------------------------------------------- /tests/test_compile.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import sleep 3 | 4 | from main import mcp 5 | from test_project import Project 6 | from utils import ( 7 | validate_models, 8 | ensure_request_fails, 9 | ensure_request_raises_validation_error, 10 | ensure_request_raises_validation_error_when_omitting_an_arg 11 | ) 12 | from models import ( 13 | CreateCompileRequest, 14 | ReadCompileRequest, 15 | CreateCompileResponse, 16 | ReadCompileResponse 17 | ) 18 | 19 | 20 | # Static helpers for common operations: 21 | class Compile: 22 | 23 | @staticmethod 24 | async def create(project_id): 25 | output_model = await validate_models( 26 | mcp, 'create_compile', {'projectId': project_id}, 27 | CreateCompileResponse 28 | ) 29 | return output_model 30 | 31 | @staticmethod 32 | async def read(project_id, compile_id): 33 | output_model = await validate_models( 34 | mcp, 'read_compile', 35 | {'projectId': project_id, 'compileId': compile_id}, 36 | ReadCompileResponse 37 | ) 38 | return output_model 39 | 40 | @staticmethod 41 | async def wait_for_job_to_complete(project_id, compile_id): 42 | attempts = 0 43 | while attempts < 15: 44 | attempts += 1 45 | response = await Compile.read(project_id, compile_id) 46 | if response.state.value != 'InQueue': 47 | return response 48 | sleep(2) 49 | assert False, "Compile job stuck in queue." 50 | 51 | 52 | # Test suite: 53 | class TestCompile: 54 | 55 | @pytest.mark.asyncio 56 | @pytest.mark.parametrize('language', ['Py', 'C#']) 57 | async def test_create_compile(self, language): 58 | # Create a project. 59 | project_id = (await Project.create(language=language)).projectId 60 | # Test if we can compile the project. 61 | compile_response = await Compile.create(project_id) 62 | # Test if the project Id is correct. 63 | assert compile_response.projectId == project_id 64 | # Delete the project to clean up. 65 | await Project.delete(project_id) 66 | 67 | @pytest.mark.asyncio 68 | async def test_create_compile_with_invalid_args(self): 69 | # Try to compile a project without providing the project Id. 70 | tool_name = 'create_compile' 71 | await ensure_request_raises_validation_error( 72 | tool_name, CreateCompileRequest, {} 73 | ) 74 | # Try to compile a project that doesn't exist. 75 | await ensure_request_fails(mcp, tool_name, {'projectId': -1}) 76 | 77 | @pytest.mark.asyncio 78 | async def test_read_compile(self): 79 | # Create a project and add it to the compile queue. 80 | project_id = (await Project.create()).projectId 81 | compile_id = (await Compile.create(project_id)).compileId 82 | # Test if we can read the compile job. 83 | compile_response = await Compile.wait_for_job_to_complete( 84 | project_id, compile_id 85 | ) 86 | # Test if the compile Id is correct. 87 | assert compile_response.compileId == compile_id 88 | assert compile_response.logs 89 | # Delete the project to clean up. 90 | await Project.delete(project_id) 91 | 92 | @pytest.mark.asyncio 93 | async def test_read_compile_with_invalid_args(self): 94 | # Create a project and add it to the compile queue. 95 | project_id = (await Project.create()).projectId 96 | compile_id = (await Compile.create(project_id)).compileId 97 | # Test the invalid requests. 98 | tool_name = 'read_compile' 99 | minimal_payload = {'projectId': project_id, 'compileId': compile_id} 100 | await ensure_request_raises_validation_error_when_omitting_an_arg( 101 | tool_name, ReadCompileRequest, minimal_payload 102 | ) 103 | # Try to read the compile job for a project that doesn't exist. 104 | await ensure_request_fails( 105 | mcp, tool_name, minimal_payload | {'projectId': -1} 106 | ) 107 | # Delete the project to clean up. 108 | await Project.delete(project_id) 109 | -------------------------------------------------------------------------------- /tests/test_backtest_charts.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | from test_project import Project 5 | from test_compile import Compile 6 | from test_files import Files 7 | from test_backtests import Backtest 8 | from utils import ( 9 | validate_models, 10 | ensure_request_raises_validation_error_when_omitting_an_arg, 11 | ensure_request_fails_when_including_an_invalid_arg 12 | ) 13 | from models import ( 14 | ReadBacktestChartRequest, 15 | #LoadingChartResponse, 16 | ReadChartResponse 17 | ) 18 | 19 | 20 | # Static helpers for common operations: 21 | class BacktestCharts: 22 | 23 | default_charts = [ 24 | 'Strategy Equity', 'Capacity', 'Drawdown', 'Benchmark', 'Exposure', 25 | 'Assets Sales Volume', 'Portfolio Turnover', 'Portfolio Margin' 26 | ] 27 | 28 | @staticmethod 29 | async def read(project_id, backtest_id, name, start, end, count=100): 30 | output_model = await validate_models( 31 | mcp, 'read_backtest_chart', 32 | { 33 | 'projectId': project_id, 34 | 'backtestId': backtest_id, 35 | 'name': name, 36 | 'start': start, 37 | 'end': end, 38 | 'count': count 39 | }, 40 | ReadChartResponse 41 | ) 42 | return output_model.chart 43 | 44 | 45 | TEST_CASES = [ 46 | ('Py', 'charts.py'), 47 | ('C#', 'Charts.cs') 48 | ] 49 | # Test suite: 50 | class TestBacktestCharts: 51 | 52 | async def _run_algorithm(self, language, algo): 53 | # Backtest and algorithm that creates a custom chart. 54 | project_id, backtest_id = await Backtest.run_algorithm(language, algo) 55 | # Return the data we need to read the backtest charts. 56 | start = 1672531200 # Start Unix time of the backtest. 57 | end = 1680307200 # End Unix time of the backtest. 58 | return project_id, backtest_id, start, end 59 | 60 | @pytest.mark.asyncio 61 | @pytest.mark.parametrize('language, algo', TEST_CASES) 62 | async def test_read_backtest_chart(self, language, algo): 63 | # Run the backtest. 64 | project_id, backtest_id, start, end = ( 65 | await self._run_algorithm(language, algo) 66 | ) 67 | # Try to read the charts. 68 | for name in BacktestCharts.default_charts + ['SMA']: 69 | chart = await BacktestCharts.read( 70 | project_id, backtest_id, name, start, end 71 | ) 72 | assert chart.name == name 73 | # Delete the project to clean up. 74 | await Project.delete(project_id) 75 | 76 | @pytest.mark.asyncio 77 | @pytest.mark.parametrize('language, algo', TEST_CASES) 78 | async def test_read_backtest_chart_with_invalid_args( 79 | self, language, algo): 80 | # Run the backtest. 81 | project_id, backtest_id, start, end = ( 82 | await self._run_algorithm(language, algo) 83 | ) 84 | # Test the invalid requests. 85 | tool_name = 'read_backtest_chart' 86 | class_ = ReadBacktestChartRequest 87 | minimal_payload = { 88 | 'projectId': project_id, 89 | 'backtestId': backtest_id, 90 | 'name': 'Strategy Equity', 91 | 'start': start, 92 | 'end': end, 93 | 'count': 100 94 | } 95 | # Try to read the insights without providing all the required 96 | # arguments. 97 | await ensure_request_raises_validation_error_when_omitting_an_arg( 98 | tool_name, class_, minimal_payload 99 | ) 100 | await ensure_request_fails_when_including_an_invalid_arg( 101 | mcp, tool_name, minimal_payload, [ 102 | # Try to read the charts of a backtest that doesn't 103 | # exist. 104 | {'backtestId': ' '}, 105 | # Try to read a chart that doesn't exist. 106 | {'name': ' '}, 107 | # Try to read a chart when the end time is before the 108 | # start time. 109 | {'start': end, 'end': start} 110 | ] 111 | ) 112 | # Delete the project to clean up. 113 | await Project.delete(project_id) 114 | -------------------------------------------------------------------------------- /tests/test_live_charts.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import time, sleep 3 | 4 | from main import mcp 5 | from test_live import Live 6 | from test_files import Files 7 | from test_project import Project 8 | from utils import ( 9 | validate_models, 10 | ensure_request_raises_validation_error_when_omitting_an_arg, 11 | ensure_request_fails_when_including_an_invalid_arg 12 | ) 13 | from models import ( 14 | ReadLiveChartRequest, 15 | LoadingResponse, 16 | ReadChartResponse, 17 | ) 18 | 19 | 20 | # Static helpers for common operations: 21 | class LiveCharts: 22 | 23 | default_charts = [ 24 | 'Strategy Equity', 'Drawdown', 'Benchmark', 'Exposure', 25 | 'Assets Sales Volume', 'Portfolio Turnover', 'Portfolio Margin' 26 | ] 27 | 28 | @staticmethod 29 | async def read(project_id, name, start, end, count=100): 30 | return await validate_models( 31 | mcp, 'read_live_chart', 32 | { 33 | 'projectId': project_id, 34 | 'name': name, 35 | 'start': start, 36 | 'end': end, 37 | 'count': count 38 | }, 39 | ReadChartResponse 40 | ) 41 | 42 | @staticmethod 43 | async def wait_for_chart_to_load(project_id, name, start, count=None): 44 | attempts = 0 45 | while attempts < 12*2: # 2 mins 46 | attempts += 1 47 | end = max(start+10, int(time())) 48 | response = await LiveCharts.read(project_id, name, start, end) 49 | if response.errors is None: 50 | return response.chart 51 | sleep(5) 52 | assert False, f"Chart didn't load in time. {type(response)}" 53 | 54 | 55 | TEST_CASES = [ 56 | ('Py', 'live_charts.py'), 57 | ('C#', 'LiveCharts.cs') 58 | ] 59 | # Test suite: 60 | class TestLiveCharts: 61 | 62 | @pytest.mark.asyncio 63 | @pytest.mark.parametrize('language, algo', TEST_CASES) 64 | async def test_read_live_chart(self, language, algo): 65 | # Create and compile the project. 66 | project_id, compile_id = await Files.setup_project(language, algo) 67 | # Deploy the algorithm. 68 | await Live.create( 69 | project_id, compile_id, await Live.get_node_id(project_id) 70 | ) 71 | await Live.wait_for_algorithm_to_start(project_id) 72 | start = int(time()) 73 | # Give the algorithm time to plot the data and then stop it so 74 | # it flushes all the charts to the file. Without stopping it, 75 | # we'll have to wait ~10 minutes for the chart file to populate. 76 | sleep(180) 77 | await Live.stop(project_id) 78 | # Try to read the charts. 79 | for name in LiveCharts.default_charts + ['SMA']: 80 | chart = await LiveCharts.wait_for_chart_to_load( 81 | project_id, name, start 82 | ) 83 | assert chart.name == name, chart 84 | # Delete the project to clean up. 85 | await Project.delete(project_id) 86 | 87 | @pytest.mark.asyncio 88 | async def test_read_live_chart_with_invalid_args(self): 89 | # Create and compile the project. 90 | project_id, compile_id = await Files.setup_project('Py') 91 | # Deploy the algorithm. 92 | await Live.create( 93 | project_id, compile_id, await Live.get_node_id(project_id) 94 | ) 95 | await Live.wait_for_algorithm_to_start(project_id) 96 | # Test the invalid requests. 97 | tool_name = 'read_live_chart' 98 | class_ = ReadLiveChartRequest 99 | start = int(time()) 100 | end = start + 100 101 | minimal_payload = { 102 | 'projectId': project_id, 103 | 'name': 'Strategy Equity', 104 | 'start': start, 105 | 'end': end, 106 | 'count': 100 107 | } 108 | # Try to read the insights without providing all the required 109 | # arguments. 110 | await ensure_request_raises_validation_error_when_omitting_an_arg( 111 | tool_name, class_, minimal_payload 112 | ) 113 | await ensure_request_fails_when_including_an_invalid_arg( 114 | mcp, tool_name, minimal_payload, [ 115 | # Try to read the charts of a project that doesn't 116 | # exist. 117 | {'projectId': -1}, 118 | # Try to read a chart that doesn't exist. 119 | {'name': ' '}, 120 | # Try to read a chart when the end time is before the 121 | # start time. 122 | {'start': end, 'end': start} 123 | ] 124 | ) 125 | # Stop the algorithm and delete the project to clean up. 126 | await Live.stop(project_id) 127 | await Project.delete(project_id) 128 | -------------------------------------------------------------------------------- /create_tool_markdown.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script generates the markdown for the table of available tools and 3 | the description of each tool. To run this script, follow these steps: 4 | 5 | 1) Start the inspector. 6 | 7 | `npx @modelcontextprotocol/inspector uv run src/main.py` 8 | 9 | 2) In the inspector, connect to the server and then click List Tools. 10 | 11 | 3) Copy the response JSON and save it on your local machine as tool_list.json. 12 | 13 | 4) Run `python create_tool_markdown.py`. 14 | """ 15 | 16 | import json 17 | 18 | def clean_description(description): 19 | # Split by period to get the first sentence, then strip and normalize whitespace 20 | first_sentence = description.split('.')[0] 21 | # Replace multiple whitespace characters (spaces, tabs, newlines) with a single space 22 | cleaned = ' '.join(first_sentence.split()) 23 | return cleaned + '.' # Add period back to end 24 | 25 | def create_tools_table(tools): 26 | content = f"## Available Tools ({len(tools)})\n" 27 | content += "| Tools provided by this Server | Short Description |\n" 28 | content += "| -------- | ------- |\n" 29 | for tool in tools: 30 | content += f"| `{tool['name']}` | {clean_description(tool['description'])} |\n" 31 | content += " --- \n" 32 | return content 33 | 34 | def create_tool_details(tools): 35 | content = "## Tool Details\n" 36 | for tool in tools: 37 | print('Tool:', tool['name']) 38 | content += f"**Tool:** `{tool['name']}`\n" 39 | content += f"\n{clean_description(tool['description'])}\n" 40 | properties = tool['inputSchema']['properties'].get('model', None) 41 | if properties: 42 | #print('Property: ', properties) 43 | content += "\n| Parameter | Type | Description |\n" 44 | content += "| -------- | ------- | ------- |\n" 45 | defs = tool['inputSchema'].get('$defs', {}) 46 | input_model_name = properties['$ref'].split("/")[-1] 47 | model_name = input_model_name 48 | for name, meta in defs[model_name]['properties'].items(): 49 | print(' property:', name) 50 | required = name in defs[input_model_name].get('required', []) 51 | if 'type' in meta: 52 | data_type = meta['type'] 53 | elif 'anyOf' in meta: 54 | # Instead of listing enum values, let's just put a 55 | # placeholder for these cases: 56 | if name in ['brokerage', 'dataProviders']: 57 | data_type = 'object' 58 | elif name == 'status': 59 | data_type = 'status enum' 60 | elif name == 'format': 61 | data_type = '' 62 | else: 63 | data_type = meta['anyOf'][0]['type'] 64 | elif '$ref' in meta: 65 | model_name = meta['$ref'].split("/")[-1] 66 | data_type = defs[model_name]['type'] 67 | content += f"| `{name}` | `{data_type}` {'' if required else '*optional*'} | {meta['description'].split('\n')[0]} |\n" 68 | 69 | # These default values come from https://modelcontextprotocol.io/docs/concepts/tools#available-tool-annotations 70 | read_only = tool['annotations'].get('readOnlyHint', False) 71 | if read_only: 72 | content += "\n*This tool doesn't modify it's environment.*\n" 73 | else: 74 | content += "\n*This tool modifies it's environment.*\n" 75 | 76 | if tool['annotations'].get('destructiveHint', True): 77 | content += "\n*This tool may perform destructive updates.*\n" 78 | else: 79 | content += "\n*This tool doesn't perform destructive updates.*\n" 80 | 81 | if tool['annotations'].get('idempotentHint', False): 82 | content += "\n*Calling this tool repeatedly with the same arguments has no additional effect.*\n" 83 | else: 84 | content += "\n*Calling this tool repeatedly with the same arguments has additional effects.*\n" 85 | 86 | if tool['annotations'].get('openWorldHint', True): 87 | content += "\n*This tool may interact with an \"open world\" of external entities.*\n" 88 | else: 89 | content += "\n*This tool doesn't interact with an \"open world\" of external entities.*\n" 90 | 91 | content += "\n---\n" 92 | return content 93 | 94 | def document(tools, output_file="README.md"): 95 | content = create_tools_table(tools) 96 | content += create_tool_details(tools) 97 | with open(output_file, 'w') as f: 98 | f.write(content) 99 | print(f"README generated successfully at {output_file}") 100 | 101 | if __name__ == "__main__": 102 | with open('tool_list.json', 'r') as f: 103 | document(json.load(f)['tools']) 104 | -------------------------------------------------------------------------------- /tests/test_ai.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | from utils import validate_models 5 | from models import ( 6 | BacktestInitResponse, 7 | CodeCompletionResponse, 8 | ErrorEnhanceResponse, 9 | PEP8ConvertResponse, 10 | SyntaxCheckResponse, 11 | SearchResponse 12 | ) 13 | 14 | 15 | # Static helpers for common operations: 16 | class AI: 17 | 18 | @staticmethod 19 | async def check_initialization_errors(language, files): 20 | return await validate_models( 21 | mcp, 'check_initialization_errors', 22 | {'language': language, 'files': files}, BacktestInitResponse 23 | ) 24 | 25 | @staticmethod 26 | async def complete_code(language, sentence, **kwargs): 27 | return await validate_models( 28 | mcp, 'complete_code', 29 | {'language': language, 'sentence': sentence} | kwargs, 30 | CodeCompletionResponse 31 | ) 32 | 33 | @staticmethod 34 | async def enhance_error_message(language, message, **kwargs): 35 | return await validate_models( 36 | mcp, 'enhance_error_message', 37 | { 38 | 'language': language, 39 | 'error': {'message': message} | kwargs 40 | }, 41 | ErrorEnhanceResponse 42 | ) 43 | 44 | @staticmethod 45 | async def update_code_to_pep8(files): 46 | return await validate_models( 47 | mcp, 'update_code_to_pep8', {'files': files}, PEP8ConvertResponse 48 | ) 49 | 50 | @staticmethod 51 | async def check_syntax(language, files): 52 | return await validate_models( 53 | mcp, 'check_syntax', {'language': language, 'files': files}, 54 | SyntaxCheckResponse 55 | ) 56 | 57 | @staticmethod 58 | async def search_quantconnect(language, criteria): 59 | return await validate_models( 60 | mcp, 'search_quantconnect', {'language': language, 'criteria': criteria}, 61 | SearchResponse 62 | ) 63 | 64 | 65 | # Test suite: 66 | class TestAI: 67 | 68 | @pytest.mark.asyncio 69 | @pytest.mark.parametrize('language, algo, name', [ 70 | ('Py', 'initialization_errors.py', 'main.py'), 71 | ('C#', 'InitializationErrors.cs', 'Main.cs') 72 | ]) 73 | async def test_check_initialization_errors(self, language, algo, name): 74 | # Get the file content. 75 | with open('tests/algorithms/' + algo, 'r') as file: 76 | content = file.read() 77 | # Check for initialization errors. 78 | await AI.check_initialization_errors( 79 | language, [{'name': name, 'content': content}] 80 | ) 81 | 82 | @pytest.mark.asyncio 83 | @pytest.mark.parametrize('language, sentence, answer', [ 84 | ('Py', 'self.add_e', 'self.add_equity'), 85 | ('C#', 'AddE', 'AddEquity') 86 | ]) 87 | async def test_complete_code(self, language, sentence, answer): 88 | response = await AI.complete_code(language, sentence) 89 | assert response.payload 90 | assert any([answer in c for c in response.payload]) 91 | 92 | @pytest.mark.asyncio 93 | async def test_enhance_error_message(self): 94 | message = """ at initialize 95 | self._option = self.add_index_option("SPX", Resolution.MINUTE, "SPXW").symbol 96 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 97 | in c1afe80a-e056-4841-b0c1-d9c562cf2bd8.py: line 15 98 | The specified market wasn't found in the markets lookup. Requested: spxw. You can add markets by calling QuantConnect.Market.Add(string,int) (Parameter 'market') 99 | """ 100 | await AI.enhance_error_message('Py', message) 101 | 102 | @pytest.mark.asyncio 103 | async def test_update_code_to_pep8(self): 104 | name = 'pep8_violations.py' 105 | # Get the file content. 106 | with open('tests/algorithms/' + name, 'r') as file: 107 | content = file.read() 108 | files = [{'name': name, 'content': content}] 109 | response = await AI.update_code_to_pep8(files) 110 | new_content = response.payload[name] 111 | for x in ['Initialize', 'AddEquity']: 112 | assert x not in new_content 113 | for x in ['initialize', 'add_equity']: 114 | assert x in new_content 115 | 116 | @pytest.mark.asyncio 117 | async def test_check_syntax(self): 118 | name = 'syntax_errors.py' 119 | # Get the file content. 120 | with open('tests/algorithms/' + name, 'r') as file: 121 | content = file.read() 122 | files = [{'name': name, 'content': content}] 123 | response = await AI.check_syntax('Py', files) 124 | assert response.state.value == 'Error' 125 | assert response.payload 126 | 127 | @pytest.mark.asyncio 128 | async def test_search_quantconnect(self): 129 | criteria = [ 130 | { 131 | 'input': 'How to create an Alpha model', 132 | 'type': 'Examples', 133 | 'count': 3 134 | } 135 | ] 136 | response = await AI.search_quantconnect('Py', criteria) 137 | assert response.state.value == 'End' 138 | assert len(response.retrivals) == 3 139 | 140 | -------------------------------------------------------------------------------- /tests/test_project_nodes.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | from test_project import Project 5 | from utils import ( 6 | validate_models, 7 | ensure_request_raises_validation_error, 8 | ensure_request_fails 9 | ) 10 | from models import ( 11 | ReadProjectNodesRequest, 12 | UpdateProjectNodesRequest, 13 | ProjectNodesResponse 14 | ) 15 | 16 | # Static helpers for common operations: 17 | class ProjectNodes: 18 | 19 | @staticmethod 20 | async def read(project_id): 21 | output_model = await validate_models( 22 | mcp, 'read_project_nodes', {'projectId': project_id}, 23 | ProjectNodesResponse 24 | ) 25 | return output_model 26 | 27 | @staticmethod 28 | async def update(project_id, **kwargs): 29 | output_model = await validate_models( 30 | mcp, 'update_project_nodes', {'projectId': project_id} | kwargs, 31 | ProjectNodesResponse 32 | ) 33 | return output_model 34 | 35 | 36 | # Test suite: 37 | class TestProjectNodes: 38 | 39 | async def _ensure_all_nodes_are_inactive(self, nodes): 40 | assert len([n for n in nodes.backtest if n.active]) == 0 41 | assert len([n for n in nodes.research if n.active]) == 0 42 | assert len([n for n in nodes.live if n.active]) == 0 43 | 44 | async def _active_nodes(self, nodes): 45 | return ( 46 | [n.id for n in nodes.backtest if n.active] 47 | + [n.id for n in nodes.research if n.active] 48 | + [n.id for n in nodes.live if n.active] 49 | ) 50 | 51 | @pytest.mark.asyncio 52 | @pytest.mark.parametrize('language', ['Py', 'C#']) 53 | async def test_read_project_nodes(self, language): 54 | # Create a project. 55 | project_id = (await Project.create(language=language)).projectId 56 | # Test if we can read the project nodes. 57 | nodes_response = await ProjectNodes.read(project_id) 58 | # Test if the default is to enable 'autoSelectNode'. 59 | assert nodes_response.autoSelectNode 60 | await self._ensure_all_nodes_are_inactive(nodes_response.nodes) 61 | # Delete the project to clean up. 62 | await Project.delete(project_id) 63 | 64 | @pytest.mark.asyncio 65 | async def test_read_project_nodes_with_invalid_args(self): 66 | # Try to read the project nodes without providing the project 67 | # Id. 68 | tool_name = 'read_project_nodes' 69 | await ensure_request_raises_validation_error( 70 | tool_name, ReadProjectNodesRequest, {} 71 | ) 72 | # Try to read the nodes of a project that doesn't exist. 73 | await ensure_request_fails(mcp, tool_name, {'projectId': -1}) 74 | 75 | @pytest.mark.asyncio 76 | async def test_update_project_nodes(self): 77 | # Create a project. 78 | project_id = (await Project.create()).projectId 79 | 80 | # Select some specific nodes. 81 | nodes = (await ProjectNodes.read(project_id)).nodes 82 | node_ids = ( 83 | [n.id for n in nodes.backtest[:2]] 84 | + [n.id for n in nodes.research[:2]] 85 | + [n.id for n in nodes.live[:2]] 86 | ) 87 | # Update the project's selected nodes. 88 | nodes_response = await ProjectNodes.update(project_id, nodes=node_ids) 89 | # Test if the project's selected nodes were updated. 90 | assert not nodes_response.autoSelectNode 91 | active_nodes = await self._active_nodes(nodes_response.nodes) 92 | for node_id in node_ids: 93 | assert node_id in active_nodes 94 | 95 | # Update the project to auto-select the best node by omitting 96 | # the list of nodes. 97 | nodes_response = await ProjectNodes.update(project_id) 98 | # Test if the project's selected nodes were updated. 99 | assert nodes_response.autoSelectNode 100 | await self._ensure_all_nodes_are_inactive(nodes_response.nodes) 101 | 102 | # Update the project to auto-select the best node by passing 103 | # an empty list. 104 | await ProjectNodes.update(project_id, nodes=node_ids) 105 | nodes_response = await ProjectNodes.update(project_id, nodes=[]) 106 | # Test if the project's selected nodes were updated. 107 | assert nodes_response.autoSelectNode 108 | await self._ensure_all_nodes_are_inactive(nodes_response.nodes) 109 | 110 | # Delete the project to clean up. 111 | await Project.delete(project_id) 112 | 113 | @pytest.mark.asyncio 114 | async def test_update_project_nodes_with_invalid_args(self): 115 | # Create a project. 116 | project_id = (await Project.create()).projectId 117 | # Test the invalid requests. 118 | invalid_payloads = [ 119 | # Try to update the nodes of a project that doesn't exist. 120 | {'projectId': -1}, 121 | # Try to update the nodes of a project by providing invalid 122 | # node Ids. 123 | #{'projectId': project_id, 'nodes': ['fakeNodeId']} 124 | ] 125 | for payload in invalid_payloads: 126 | await ensure_request_fails(mcp, 'update_project_nodes', payload) 127 | # Delete the project to clean up. 128 | await Project.delete(project_id) 129 | -------------------------------------------------------------------------------- /src/tools/live.py: -------------------------------------------------------------------------------- 1 | from pydantic_core import to_jsonable_python 2 | import webbrowser 3 | 4 | from api_connection import post, httpx, get_headers, BASE_URL 5 | from models import ( 6 | AuthorizeExternalConnectionRequest, 7 | CreateLiveAlgorithmRequest, 8 | ReadLiveAlgorithmRequest, 9 | ListLiveAlgorithmsRequest, 10 | ReadLivePortfolioRequest, 11 | ReadLiveChartRequest, 12 | ReadLiveOrdersRequest, 13 | ReadLiveInsightsRequest, 14 | ReadLiveLogsRequest, 15 | LiquidateLiveAlgorithmRequest, 16 | StopLiveAlgorithmRequest, 17 | AuthorizeExternalConnectionResponse, 18 | CreateLiveAlgorithmResponse, 19 | LiveAlgorithmResults, 20 | LiveAlgorithmListResponse, 21 | LivePortfolioResponse, 22 | ReadChartResponse, 23 | LiveOrdersResponse, 24 | LiveInsightsResponse, 25 | ReadLiveLogsResponse, 26 | RestResponse 27 | ) 28 | 29 | async def handle_loading_response(response, text): 30 | if 'progress' in response: 31 | progress = response["progress"] 32 | return {'errors': [f'{text} Progress: {progress}']} 33 | return response 34 | 35 | def register_live_trading_tools(mcp): 36 | # Authenticate 37 | @mcp.tool( 38 | annotations={ 39 | 'title': 'Authorize external connection', 40 | 'readOnlyHint': False, 41 | 'destructiveHint': False, 42 | 'idempotentHint': True 43 | } 44 | ) 45 | async def authorize_connection( 46 | model: AuthorizeExternalConnectionRequest 47 | ) -> AuthorizeExternalConnectionResponse: 48 | """Authorize an external connection with a live brokerage or 49 | data provider. 50 | 51 | This tool automatically opens your browser for you to complete 52 | the authentication flow. For the flow to work, you must be 53 | logged into your QuantConnect account on the browser that opens. 54 | """ 55 | # This endpoint is unique because post we need to extract and 56 | # return the redirect URL and open it in a browser. 57 | async with httpx.AsyncClient(follow_redirects=False) as client: 58 | response = await client.post( 59 | f'{BASE_URL}/live/auth0/authorize', 60 | headers=get_headers(), 61 | json=to_jsonable_python(model, exclude_none=True), 62 | timeout=300.0 # 5 minutes 63 | ) 64 | # Extract the redirect URL from the 'Location' header 65 | redirect_url = response.headers.get("Location") 66 | # Open the URL in the user's default browser. 67 | webbrowser.open(redirect_url) 68 | # Read the authentication. 69 | return await post('/live/auth0/read', model, 800.0) 70 | 71 | # Create 72 | @mcp.tool( 73 | annotations={ 74 | 'title': 'Create live algorithm', 'destructiveHint': False 75 | } 76 | ) 77 | async def create_live_algorithm( 78 | model: CreateLiveAlgorithmRequest) -> CreateLiveAlgorithmResponse: 79 | """Create a live algorithm.""" 80 | return await post('/live/create', model) 81 | 82 | # Read (singular) 83 | @mcp.tool(annotations={'title': 'Read live algorithm', 'readOnly': True}) 84 | async def read_live_algorithm( 85 | model: ReadLiveAlgorithmRequest) -> LiveAlgorithmResults: 86 | """Read details of a live algorithm.""" 87 | return await post('/live/read', model) 88 | 89 | # Read (all). 90 | @mcp.tool(annotations={'title': 'List live algorithms', 'readOnly': True}) 91 | async def list_live_algorithms( 92 | model: ListLiveAlgorithmsRequest) -> LiveAlgorithmListResponse: 93 | """List all your past and current live trading deployments.""" 94 | return await post('/live/list', model) 95 | 96 | # Read a chart. 97 | @mcp.tool(annotations={'title': 'Read live chart', 'readOnly': True}) 98 | async def read_live_chart( 99 | model: ReadLiveChartRequest) -> ReadChartResponse: 100 | """Read a chart from a live algorithm.""" 101 | return await handle_loading_response( 102 | await post('/live/chart/read', model), 'Chart is loading.' 103 | ) 104 | 105 | # Read the logs. 106 | @mcp.tool(annotations={'title': 'Read live logs', 'readOnly': True}) 107 | async def read_live_logs( 108 | model: ReadLiveLogsRequest) -> ReadLiveLogsResponse: 109 | """Get the logs of a live algorithm. 110 | 111 | The snapshot updates about every 5 minutes.""" 112 | return await post('/live/logs/read', model) 113 | 114 | # Read the portfolio state. 115 | @mcp.tool(annotations={'title': 'Read live portfolio', 'readOnly': True}) 116 | async def read_live_portfolio( 117 | model: ReadLivePortfolioRequest) -> LivePortfolioResponse: 118 | """Read out the portfolio state of a live algorithm. 119 | 120 | The snapshot updates about every 10 minutes.""" 121 | return await post('/live/portfolio/read', model) 122 | 123 | # Read the orders. 124 | @mcp.tool(annotations={'title': 'Read live orders', 'readOnly': True}) 125 | async def read_live_orders( 126 | model: ReadLiveOrdersRequest) -> LiveOrdersResponse: 127 | """Read out the orders of a live algorithm. 128 | 129 | The snapshot updates about every 10 minutes.""" 130 | return await handle_loading_response( 131 | await post('/live/orders/read', model), 'Orders are loading.' 132 | ) 133 | 134 | # Read the insights. 135 | @mcp.tool(annotations={'title': 'Read live insights', 'readOnly': True}) 136 | async def read_live_insights( 137 | model: ReadLiveInsightsRequest) -> LiveInsightsResponse: 138 | """Read out the insights of a live algorithm. 139 | 140 | The snapshot updates about every 10 minutes.""" 141 | return await post('/live/insights/read', model) 142 | 143 | # Update (stop) 144 | @mcp.tool( 145 | annotations={'title': 'Stop live algorithm', 'idempotentHint': True} 146 | ) 147 | async def stop_live_algorithm( 148 | model: StopLiveAlgorithmRequest) -> RestResponse: 149 | """Stop a live algorithm.""" 150 | return await post('/live/update/stop', model) 151 | 152 | # Update (liquidate) 153 | @mcp.tool( 154 | annotations={ 155 | 'title': 'Liquidate live algorithm', 'idempotentHint': True 156 | } 157 | ) 158 | async def liquidate_live_algorithm( 159 | model: LiquidateLiveAlgorithmRequest) -> RestResponse: 160 | """Liquidate and stop a live algorithm.""" 161 | return await post('/live/update/liquidate', model) 162 | -------------------------------------------------------------------------------- /tests/test_project.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from main import mcp 4 | from utils import ( 5 | validate_models, 6 | ensure_request_fails, 7 | ensure_request_raises_validation_error, 8 | ensure_request_raises_validation_error_when_omitting_an_arg, 9 | create_timestamp, 10 | ) 11 | from models import ( 12 | CreateProjectRequest, 13 | ReadProjectRequest, 14 | UpdateProjectRequest, 15 | DeleteProjectRequest, 16 | ProjectListResponse, 17 | RestResponse, 18 | ) 19 | 20 | 21 | # Static helpers for common operations: 22 | class Project: 23 | 24 | @staticmethod 25 | async def create(name=f"Project {create_timestamp()}", language='Py'): 26 | output_model = await validate_models( 27 | mcp, 'create_project', {'name': name, 'language': language}, 28 | ProjectListResponse 29 | ) 30 | return output_model.projects[0] 31 | 32 | @staticmethod 33 | async def read(**kwargs): 34 | output_model = await validate_models( 35 | mcp, 'read_project', kwargs, ProjectListResponse 36 | ) 37 | return output_model.projects 38 | 39 | @staticmethod 40 | async def update(id_, **kwargs): 41 | await validate_models( 42 | mcp, 'update_project', {'projectId': id_} | kwargs, RestResponse 43 | ) 44 | 45 | @staticmethod 46 | async def delete(id_): 47 | await validate_models( 48 | mcp, 'delete_project', {'projectId': id_}, RestResponse 49 | ) 50 | 51 | @staticmethod 52 | async def list(): 53 | output_model = await validate_models( 54 | mcp, 'list_projects', output_class=ProjectListResponse 55 | ) 56 | return output_model.projects 57 | 58 | 59 | # Test suite: 60 | class TestProject: 61 | 62 | @pytest.mark.asyncio 63 | @pytest.mark.parametrize('language', ['Py', 'C#']) 64 | async def test_create_project(self, language): 65 | name = f"Project {create_timestamp()}" 66 | project = await Project.create(name, language) 67 | # Test if the name and language are correct. 68 | assert project.name == name 69 | assert project.language.value == language 70 | # Delete the project to clean up. 71 | await Project.delete(project.projectId) 72 | 73 | @pytest.mark.asyncio 74 | async def test_create_project_with_invalid_args(self): 75 | tool_name = 'create_project' 76 | request_class = CreateProjectRequest 77 | minimal_payload = {'name': 'Test Project', 'language': 'Py'} 78 | # Try to create a project without providing all the arguments. 79 | await ensure_request_raises_validation_error_when_omitting_an_arg( 80 | tool_name, request_class, minimal_payload 81 | ) 82 | # Try to create a project with an unsupported language. 83 | await ensure_request_raises_validation_error( 84 | tool_name, request_class, minimal_payload | {'language': 'C++'} 85 | ) 86 | 87 | @pytest.mark.asyncio 88 | async def test_read_project(self): 89 | # Create a project. 90 | name = f"Project {create_timestamp()}" 91 | language = 'Py' 92 | id_ = (await Project.create(name, language)).projectId 93 | # Read the project. 94 | project = (await Project.read(projectId=id_))[0] 95 | # Test if the name and language are correct. 96 | assert project.name == name 97 | assert project.language.value == language 98 | # Delete the project to clean up. 99 | await Project.delete(id_) 100 | # Test if we can read multiple projects. 101 | num_projects = 2 102 | ids = [ 103 | (await Project.create(name, language)).projectId 104 | for i in range(num_projects) 105 | ] 106 | projects = await Project.read(end=num_projects) 107 | assert len(projects) == num_projects 108 | # Delete the projects to clean up. 109 | for id_ in ids: 110 | await Project.delete(id_) 111 | 112 | @pytest.mark.asyncio 113 | async def test_read_project_with_invalid_args(self): 114 | payloads = [ 115 | # Try to read a project that doesn't exist. 116 | {'projectId': -1}, 117 | # Try to read a list of projects where start >= end. 118 | {'start': 1, 'end': 1}, 119 | {'start': 1, 'end': 0} 120 | ] 121 | for payload in payloads: 122 | await ensure_request_fails(mcp, 'read_project', payload) 123 | 124 | 125 | @pytest.mark.asyncio 126 | async def test_update_project(self): 127 | # Create a project. 128 | id_ = (await Project.create()).projectId 129 | 130 | # Update the project name. 131 | new_name = f"Project {create_timestamp()}" 132 | await Project.update(id_, name=new_name) 133 | # Test if the new name is correct. 134 | project = (await Project.read(projectId=id_))[0] 135 | assert project.name == new_name 136 | 137 | # Update the project description. 138 | new_description = f"Description {create_timestamp()}" 139 | await Project.update(id_, description=new_description) 140 | # Test if the new description is correct. 141 | project = (await Project.read(projectId=id_))[0] 142 | assert project.description == new_description 143 | 144 | # Update the project name and description. 145 | new_name = f"Project {create_timestamp()}" 146 | new_description = f"Description {create_timestamp()}" 147 | await Project.update( 148 | id_, name=new_name, description=new_description 149 | ) 150 | # Test if the new name & description are correct. 151 | project = (await Project.read(projectId=id_))[0] 152 | assert project.name == new_name 153 | assert project.description == new_description 154 | 155 | # Delete the project to clean up. 156 | await Project.delete(id_) 157 | 158 | @pytest.mark.asyncio 159 | async def test_update_project_with_a_nonunique_name(self): 160 | # Create 2 projects. 161 | name_1 = f"Project {create_timestamp()}" 162 | id_1 = (await Project.create(name_1)).projectId 163 | name_2 = f"Project {create_timestamp()}" 164 | id_2 = (await Project.create(name_2)).projectId 165 | # Try updating the project names to match. 166 | await ensure_request_fails( 167 | mcp, 'update_project', {'projectId': id_2, 'name': name_1} 168 | ) 169 | # Delete the projects. 170 | await Project.delete(id_1) 171 | await Project.delete(id_2) 172 | 173 | @pytest.mark.asyncio 174 | async def test_delete_projects_with_invalid_args(self): 175 | # Try to delete a project that doesn't exist. 176 | await ensure_request_fails(mcp, 'delete_project', {'projectId': -1}) 177 | 178 | @pytest.mark.asyncio 179 | async def test_list_projects(self): 180 | await Project.list() 181 | -------------------------------------------------------------------------------- /tests/test_live.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import sleep 3 | 4 | from main import mcp 5 | from test_project_nodes import ProjectNodes 6 | from test_project import Project 7 | from test_files import Files 8 | from utils import ( 9 | validate_models, 10 | ensure_request_fails, 11 | ensure_request_raises_validation_error, 12 | ensure_request_raises_validation_error_when_omitting_an_arg, 13 | ensure_request_fails_when_including_an_invalid_arg 14 | ) 15 | from models import ( 16 | CreateLiveAlgorithmRequest, 17 | ReadLiveAlgorithmRequest, 18 | 19 | AuthorizeExternalConnectionResponse, 20 | CreateLiveAlgorithmResponse, 21 | LiveAlgorithmResults, 22 | LivePortfolioResponse, 23 | LiveAlgorithmListResponse, 24 | RestResponse 25 | ) 26 | 27 | DEFAULT_SETTINGS = { 28 | 'versionId': '-1', 29 | 'brokerage': { 30 | 'id': 'QuantConnectBrokerage' 31 | } 32 | } 33 | 34 | 35 | # Static helpers for common operations: 36 | class Live: 37 | 38 | @staticmethod 39 | async def authorize_connection(brokerage): 40 | return await validate_models( 41 | mcp, 'authorize_connection', {'brokerage': brokerage}, 42 | AuthorizeExternalConnectionResponse 43 | ) 44 | 45 | @staticmethod 46 | async def create( 47 | project_id, compile_id, node_id, 48 | version_id=DEFAULT_SETTINGS['versionId'], 49 | brokerage=DEFAULT_SETTINGS['brokerage'], 50 | **kwargs): 51 | return await validate_models( 52 | mcp, 'create_live_algorithm', 53 | { 54 | 'projectId': project_id, 55 | 'compileId': compile_id, 56 | 'nodeId': node_id, 57 | 'versionId': version_id, 58 | 'brokerage': brokerage 59 | } | kwargs, 60 | CreateLiveAlgorithmResponse 61 | ) 62 | 63 | @staticmethod 64 | async def read(project_id): 65 | return await validate_models( 66 | mcp, 'read_live_algorithm', {'projectId': project_id}, 67 | LiveAlgorithmResults 68 | ) 69 | 70 | @staticmethod 71 | async def list(**kwargs): 72 | output_model = await validate_models( 73 | mcp, 'list_live_algorithms', kwargs, LiveAlgorithmListResponse 74 | ) 75 | return output_model.live 76 | 77 | @staticmethod 78 | async def read_portfolio(project_id): 79 | output_model = await validate_models( 80 | mcp, 'read_live_portfolio', {'projectId': project_id}, 81 | LivePortfolioResponse 82 | ) 83 | return output_model.portfolio 84 | 85 | @staticmethod 86 | async def liquidate(project_id): 87 | return await validate_models( 88 | mcp, 'liquidate_live_algorithm', {'projectId': project_id}, 89 | RestResponse 90 | ) 91 | 92 | @staticmethod 93 | async def stop(project_id): 94 | return await validate_models( 95 | mcp, 'stop_live_algorithm', {'projectId': project_id}, 96 | RestResponse 97 | ) 98 | 99 | async def get_node_id(project_id): 100 | # Get the Id of a live trading node (CPU node that isn't busy). 101 | nodes = (await ProjectNodes.read(project_id)).nodes.live 102 | nodes = [n for n in nodes if not n.busy and not n.hasGpu] 103 | assert nodes, 'No nodes available' 104 | return nodes[0].id 105 | 106 | @staticmethod 107 | async def wait_for_algorithm_to_start(project_id): 108 | attempts = 0 109 | while attempts < 18: 110 | attempts += 1 111 | live = await Live.read(project_id) 112 | if live.status.value == 'Running': 113 | return live 114 | sleep(10) 115 | assert False, "Live job didn't start in time." 116 | 117 | @staticmethod 118 | async def wait_for_holding_to_be_removed(project_id, symbol_id): 119 | attempts = 0 120 | while attempts < 60: # 10 minutes 121 | attempts += 1 122 | portfolio = await Live.read_portfolio(project_id) 123 | if symbol_id not in portfolio.holdings: 124 | return 125 | sleep(10) 126 | assert False, "Holding wasn't removed in time." 127 | 128 | @staticmethod 129 | async def wait_for_holdings_to_update(project_id, symbol_id): 130 | attempts = 0 131 | while attempts < 60: # 10 minutes 132 | attempts += 1 133 | portfolio = await Live.read_portfolio(project_id) 134 | if symbol_id in portfolio.holdings: 135 | return portfolio 136 | sleep(10) 137 | assert False, "Holding wasn't updated in time." 138 | 139 | 140 | # Test suite: 141 | class TestLive: 142 | 143 | @pytest.mark.asyncio 144 | @pytest.mark.parametrize('language', ['Py', 'C#']) 145 | async def test_create_live_algorithm(self, language): 146 | # Create and compile the project. 147 | project_id, compile_id = await Files.setup_project(language) 148 | # Get the Id of a live trading node. 149 | node_id = await Live.get_node_id(project_id) 150 | # Try to deploy the algorithm. 151 | response = await Live.create(project_id, compile_id, node_id) 152 | assert response.source == 'api-v2' 153 | assert response.projectId == project_id 154 | assert response.live.brokerage == 'PaperBrokerage' 155 | # Stop the algorithm and delete the project to clean up. 156 | await Live.stop(project_id) 157 | await Project.delete(project_id) 158 | 159 | @pytest.mark.asyncio 160 | async def test_create_live_algorithm_with_invalid_args(self): 161 | # Create and compile the project. 162 | project_id, compile_id = await Files.setup_project('Py') 163 | node_id = await Live.get_node_id(project_id) 164 | # Test the invalid requests. 165 | tool_name = 'create_live_algorithm' 166 | class_ = CreateLiveAlgorithmRequest 167 | minimal_payload = DEFAULT_SETTINGS | { 168 | 'projectId': project_id, 169 | 'compileId': compile_id, 170 | 'nodeId': node_id 171 | } 172 | # Try to upload the file without providing all the required 173 | # data. 174 | await ensure_request_raises_validation_error_when_omitting_an_arg( 175 | tool_name, class_, minimal_payload 176 | ) 177 | # Try to upload the file to an organization that doesn't exist. 178 | await ensure_request_fails_when_including_an_invalid_arg( 179 | mcp, tool_name, minimal_payload, [ 180 | # Try to deploy a project that doesn't exist. 181 | {'projectId': -1}, 182 | # Try to deploy with a compile Id that doesn't exist. 183 | {'compileId': ' '}, 184 | # Try to deploy with a node that doesn't exist. 185 | {'nodeId': ' '}, 186 | ] 187 | ) 188 | # Delete the project to clean up. 189 | await Project.delete(project_id) 190 | 191 | @pytest.mark.asyncio 192 | @pytest.mark.parametrize('language', ['Py', 'C#']) 193 | async def test_read_live_algorithm(self, language): 194 | # Create and compile the project. 195 | project_id, compile_id = await Files.setup_project(language) 196 | # Get the Id of a live trading node. 197 | node_id = await Live.get_node_id(project_id) 198 | # Deploy the algorithm. 199 | await Live.create(project_id, compile_id, node_id) 200 | # Try to read the algorithm 201 | live = await Live.read(project_id) 202 | assert live.brokerage == 'PaperBrokerage' 203 | # Stop the algorithm and delete the project to clean up. 204 | await Live.stop(project_id) 205 | await Project.delete(project_id) 206 | 207 | @pytest.mark.asyncio 208 | async def test_read_live_algorithm_with_invalid_args(self): 209 | # Test the invalid requests. 210 | tool_name = 'read_live_algorithm' 211 | class_ = ReadLiveAlgorithmRequest 212 | # Try to read the live algorithm w/o providing all the required 213 | # data. 214 | await ensure_request_raises_validation_error(tool_name, class_, {}) 215 | # Try to read the live algorithm of a project that doesn't 216 | # exist. 217 | await ensure_request_fails(mcp, tool_name, {'projectId': -1}) 218 | 219 | 220 | @pytest.mark.asyncio 221 | @pytest.mark.parametrize('language, algo', [ 222 | ('Py', 'live_liquidate.py')#, ('C#', 'LiveLiquidate.cs') 223 | ]) 224 | async def test_read_and_liquidate_portfolio(self, language, algo): 225 | # Create and compile the project. 226 | project_id, compile_id = await Files.setup_project(language, algo) 227 | # Deploy the algorithm with an existing holding, 1 BTCUSD. 228 | holding = { 229 | 'symbol': 'BTCUSD', 230 | 'symbolId': 'BTCUSD 2XR', 231 | 'quantity': 1, 232 | 'averagePrice': 100_000 233 | } 234 | response = await Live.create( 235 | project_id, compile_id, await Live.get_node_id(project_id), 236 | brokerage={'id': 'QuantConnectBrokerage', 'holdings': [holding]} 237 | ) 238 | # Wait for the algorithm to start running. 239 | await Live.wait_for_algorithm_to_start(project_id) 240 | # Ensure the algorithm is invested. 241 | portfolio = await Live.wait_for_holdings_to_update( 242 | project_id, holding['symbolId'] 243 | ) 244 | read_holding = portfolio.holdings[holding['symbolId']] 245 | assert read_holding.a == holding['averagePrice'] 246 | assert read_holding.q == holding['quantity'] 247 | # Try to liquidate (and stop) the algorithm. 248 | await Live.liquidate(project_id) 249 | # Ensure the algorithm is no longer invested. 250 | await Live.wait_for_holding_to_be_removed( 251 | project_id, holding['symbolId'] 252 | ) 253 | # Ensure the algorithm is no longer running. 254 | live = await Live.read(project_id) 255 | assert live.stopped is not None 256 | # Delete the project to clean up. 257 | await Project.delete(project_id) 258 | 259 | @pytest.mark.asyncio 260 | async def test_list_live_algorithms(self): 261 | # Try to list the live algorithms. 262 | algorithms = await Live.list() 263 | assert algorithms 264 | # Try to list the algorithms of a single project. 265 | id_ = [algo.projectId for algo in algorithms][0] 266 | algorithms = await Live.list(projectId=id_) 267 | assert algorithms 268 | for algo in algorithms: 269 | assert algo.projectId == id_ 270 | # Try to list the algorithms that have stopped. 271 | algorithms = await Live.list(status='Stopped') 272 | assert algorithms 273 | for algo in algorithms: 274 | assert algo.status == 'Stopped' 275 | 276 | 277 | -------------------------------------------------------------------------------- /tests/test_project_collaboration.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | 4 | from main import mcp 5 | from test_project import Project 6 | from utils import ( 7 | validate_models, 8 | ensure_request_fails, 9 | ensure_request_raises_validation_error_when_omitting_an_arg, 10 | ensure_request_fails_when_including_an_invalid_arg 11 | ) 12 | from models import ( 13 | CreateCollaboratorRequest, 14 | ReadCollaboratorsRequest, 15 | UpdateCollaboratorRequest, 16 | DeleteCollaboratorRequest, 17 | CreateCollaboratorResponse, 18 | ReadCollaboratorsResponse, 19 | UpdateCollaboratorResponse, 20 | DeleteCollaboratorResponse, 21 | RestResponse 22 | ) 23 | 24 | COLLABORATOR_ID = os.getenv('QUANTCONNECT_COLLABORATOR_ID') 25 | 26 | 27 | # Static helpers for common operations: 28 | class ProjectCollaboration: 29 | 30 | @staticmethod 31 | async def create( 32 | project_id, collaborator_id, collaboration_live_control, 33 | collaboration_write): 34 | output_model = await validate_models( 35 | mcp, 'create_project_collaborator', 36 | { 37 | 'projectId': project_id, 38 | 'collaboratorUserId': collaborator_id, 39 | 'collaborationLiveControl': collaboration_live_control, 40 | 'collaborationWrite': collaboration_write 41 | }, 42 | CreateCollaboratorResponse 43 | ) 44 | return output_model.collaborators 45 | 46 | @staticmethod 47 | async def read(project_id): 48 | return await validate_models( 49 | mcp, 'read_project_collaborators', {'projectId': project_id}, 50 | ReadCollaboratorsResponse 51 | ) 52 | 53 | @staticmethod 54 | async def update(project_id, collaborator_user_id, live_control, write): 55 | output_model = await validate_models( 56 | mcp, 'update_project_collaborator', 57 | { 58 | 'projectId': project_id, 59 | 'collaboratorUserId': collaborator_user_id, 60 | 'liveControl': live_control, 61 | 'write': write 62 | }, 63 | UpdateCollaboratorResponse 64 | ) 65 | return output_model.collaborators 66 | 67 | @staticmethod 68 | async def delete(project_id, collaborator_id): 69 | output_model = await validate_models( 70 | mcp, 'delete_project_collaborator', 71 | {'projectId': project_id, 'collaboratorId': collaborator_id}, 72 | DeleteCollaboratorResponse 73 | ) 74 | return output_model.collaborators 75 | 76 | @staticmethod 77 | async def lock(project_id): 78 | return await validate_models( 79 | mcp, 'lock_project_with_collaborators', 80 | {'projectId': project_id, 'codeSourceId': ''}, 81 | RestResponse 82 | ) 83 | 84 | 85 | # Test suite: 86 | class TestProjectCollaboration: 87 | 88 | @pytest.mark.asyncio 89 | @pytest.mark.parametrize('language', ['Py', 'C#']) 90 | @pytest.mark.parametrize('collaboration_live_control', [True, False]) 91 | @pytest.mark.parametrize('collaboration_write', [True, False]) 92 | async def test_create_project_collaboration( 93 | self, language, collaboration_live_control, collaboration_write): 94 | # Create a project. 95 | project_id = (await Project.create(language=language)).projectId 96 | # Add a collaborator. 97 | collaborators = await ProjectCollaboration.create( 98 | project_id, 99 | COLLABORATOR_ID, 100 | collaboration_live_control, 101 | collaboration_write 102 | ) 103 | # Test if the collaborator was added. 104 | assert len(collaborators) == 2 105 | assert any( 106 | [c.publicId == COLLABORATOR_ID for c in collaborators] 107 | ) 108 | # Test if the live control and write permissions are right. 109 | permission = 'write' if collaboration_write else 'read' 110 | for c in collaborators: 111 | if c.publicId == COLLABORATOR_ID: 112 | assert c.liveControl == collaboration_live_control 113 | assert c.permission.value == permission 114 | # Remove the collaborators and delete the project to clean up. 115 | await ProjectCollaboration.delete(project_id, COLLABORATOR_ID) 116 | await Project.delete(project_id) 117 | 118 | @pytest.mark.asyncio 119 | async def test_create_project_collaboration_with_invalid_args(self): 120 | # Create a project. 121 | project_id = (await Project.create()).projectId 122 | # Test the invalid requests. 123 | tool_name = 'create_project_collaborator' 124 | minimal_payload = { 125 | 'projectId': project_id, 126 | 'collaboratorUserId': COLLABORATOR_ID, 127 | 'collaborationLiveControl': True, 128 | 'collaborationWrite': True 129 | } 130 | await ensure_request_raises_validation_error_when_omitting_an_arg( 131 | tool_name, CreateCollaboratorRequest, minimal_payload 132 | ) 133 | await ensure_request_fails_when_including_an_invalid_arg( 134 | mcp, tool_name, minimal_payload, [ 135 | # Try to add a collaborator to a project that doesn't 136 | # exist. 137 | {'projectId': -1}, 138 | # Try to add a collaborator to a project using an 139 | # invalid user Id for the collaborator. 140 | {'collaboratorUserId': ' '} 141 | ] 142 | ) 143 | # Delete the project to clean up. 144 | await Project.delete(project_id) 145 | 146 | @pytest.mark.asyncio 147 | async def test_read_project_collaboration(self): 148 | # Create a project. 149 | project_id = (await Project.create()).projectId 150 | # Add a collaborator. 151 | await ProjectCollaboration.create( 152 | project_id, COLLABORATOR_ID, True, True 153 | ) 154 | # Read the collaborator information of this project. 155 | response = await ProjectCollaboration.read(project_id) 156 | # Test if the project owner control and permissions are correct. 157 | assert response.userLiveControl 158 | assert response.userPermissions.value == 'write' 159 | # Test if the collaborator was added. 160 | collaborators = response.collaborators 161 | assert len(collaborators) == 2 162 | assert any( 163 | [c.publicId == COLLABORATOR_ID for c in collaborators] 164 | ) 165 | # Remove the collaborators and delete the project to clean up. 166 | await ProjectCollaboration.delete(project_id, COLLABORATOR_ID) 167 | await Project.delete(project_id) 168 | 169 | @pytest.mark.asyncio 170 | async def test_read_project_collaboration_with_invalid_args(self): 171 | # Try to read the collaborator information for a project that 172 | # doesn't exist. 173 | await ensure_request_fails( 174 | mcp, 'read_project_collaborators', {'projectId': -1} 175 | ) 176 | 177 | @pytest.mark.asyncio 178 | async def test_update_project_collaboration(self): 179 | # Create a project. 180 | project_id = (await Project.create()).projectId 181 | # Add a collaborator. 182 | await ProjectCollaboration.create( 183 | project_id, COLLABORATOR_ID, True, True 184 | ) 185 | # Update the collaborator live control and write permissions. 186 | collaborators = await ProjectCollaboration.update( 187 | project_id, COLLABORATOR_ID, False, False 188 | ) 189 | # Test if the update worked. 190 | assert len(collaborators) == 2 191 | for c in collaborators: 192 | if c.publicId == COLLABORATOR_ID: 193 | assert not c.liveControl 194 | assert c.permission.value == 'read' 195 | # Remove the collaborators and delete the project to clean up. 196 | await ProjectCollaboration.delete(project_id, COLLABORATOR_ID) 197 | await Project.delete(project_id) 198 | 199 | @pytest.mark.asyncio 200 | async def test_update_project_collaboration_with_invalid_args(self): 201 | # Create a project. 202 | project_id = (await Project.create()).projectId 203 | # Add a collaborator. 204 | await ProjectCollaboration.create( 205 | project_id, COLLABORATOR_ID, True, True 206 | ) 207 | # Test the invalid requests. 208 | tool_name = 'update_project_collaborator' 209 | minimal_payload = { 210 | 'projectId': project_id, 211 | 'collaboratorUserId': COLLABORATOR_ID, 212 | 'liveControl': True, 213 | 'write': True 214 | } 215 | await ensure_request_raises_validation_error_when_omitting_an_arg( 216 | tool_name, UpdateCollaboratorRequest, minimal_payload 217 | ) 218 | await ensure_request_fails_when_including_an_invalid_arg( 219 | mcp, tool_name, minimal_payload, [ 220 | # Try to update a collaborator on a project that doesn't 221 | # exist. 222 | {'projectId': -1}, 223 | # Try to update a collaborator on a project using an 224 | # invalid user Id for the collaborator. 225 | {'collaboratorUserId': ' '} 226 | ] 227 | ) 228 | # Remove the collaborators and delete the project to clean up. 229 | await ProjectCollaboration.delete(project_id, COLLABORATOR_ID) 230 | await Project.delete(project_id) 231 | 232 | @pytest.mark.asyncio 233 | async def test_delete_project_collaboration_with_invalid_args(self): 234 | # Create a project. 235 | project_id = (await Project.create()).projectId 236 | # Add a collaborator. 237 | await ProjectCollaboration.create( 238 | project_id, COLLABORATOR_ID, True, True 239 | ) 240 | # Test the invalid requests. 241 | tool_name = 'delete_project_collaborator' 242 | minimal_payload = { 243 | 'projectId': project_id, 244 | 'collaboratorId': COLLABORATOR_ID 245 | } 246 | await ensure_request_raises_validation_error_when_omitting_an_arg( 247 | tool_name, DeleteCollaboratorRequest, minimal_payload 248 | ) 249 | await ensure_request_fails_when_including_an_invalid_arg( 250 | mcp, tool_name, minimal_payload, [ 251 | # Try to delete a collaborator on a project that 252 | # doesn't exist. 253 | {'projectId': -1}, 254 | # Try to delete a collaborator on a project using an 255 | # invalid user Id for the collaborator. 256 | {'collaboratorId': ' '} 257 | ] 258 | ) 259 | # Remove the collaborators and delete the project to clean up. 260 | await ProjectCollaboration.delete(project_id, COLLABORATOR_ID) 261 | await Project.delete(project_id) 262 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /tests/test_object_store.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | from time import sleep 4 | import requests 5 | from zipfile import ZipFile 6 | from io import BytesIO 7 | 8 | from main import mcp 9 | from utils import ( 10 | validate_models, 11 | ensure_request_fails, 12 | ensure_request_raises_validation_error, 13 | ensure_request_raises_validation_error_when_omitting_an_arg, 14 | create_timestamp, 15 | ) 16 | from models import ( 17 | ObjectStoreBinaryFile, 18 | GetObjectStorePropertiesRequest, 19 | GetObjectStoreJobIdRequest, 20 | GetObjectStoreURLRequest, 21 | ListObjectStoreRequest, 22 | DeleteObjectStoreRequest, 23 | GetObjectStorePropertiesResponse, 24 | GetObjectStoreResponse, 25 | ListObjectStoreResponse, 26 | RestResponse 27 | ) 28 | 29 | # Load the organization Id from the environment variables. 30 | ORGANIZATION_ID = os.getenv('QUANTCONNECT_ORGANIZATION_ID') 31 | 32 | 33 | # Static helpers for common operations: 34 | class ObjectStore: 35 | 36 | @staticmethod 37 | async def upload(organization_id, key, object_data): 38 | await validate_models( 39 | mcp, 'upload_object', 40 | { 41 | 'organizationId': organization_id, 42 | 'key': key, 43 | 'objectData': object_data 44 | }, 45 | RestResponse 46 | ) 47 | 48 | @staticmethod 49 | async def read_properties(organization_id, key): 50 | output_model = await validate_models( 51 | mcp, 'read_object_properties', 52 | {'organizationId': organization_id, 'key': key}, 53 | GetObjectStorePropertiesResponse 54 | ) 55 | return output_model.metadata 56 | 57 | @staticmethod 58 | async def read_job_id(organization_id, keys): 59 | output_model = await validate_models( 60 | mcp, 'read_object_store_file_job_id', 61 | {'organizationId': organization_id, 'keys': keys}, 62 | GetObjectStoreResponse 63 | ) 64 | return output_model.jobId 65 | 66 | @staticmethod 67 | async def read_download_url(organization_id, job_id): 68 | return await validate_models( 69 | mcp, 'read_object_store_file_download_url', 70 | {'organizationId': organization_id, 'jobId': job_id}, 71 | GetObjectStoreResponse 72 | ) 73 | 74 | @staticmethod 75 | async def list(organization_id, **kwargs): 76 | return await validate_models( 77 | mcp, 'list_object_store_files', 78 | {'organizationId': organization_id} | kwargs, 79 | ListObjectStoreResponse 80 | ) 81 | 82 | @staticmethod 83 | async def delete(organization_id, key): 84 | await validate_models( 85 | mcp, 'delete_object', 86 | {'organizationId': organization_id, 'key': key}, RestResponse 87 | ) 88 | 89 | @staticmethod 90 | async def wait_for_job_to_complete(organization_id, job_id): 91 | attempts = 0 92 | while attempts < 36: 93 | attempts += 1 94 | output_model = await ObjectStore.read_download_url( 95 | organization_id, job_id 96 | ) 97 | if output_model.url: 98 | return output_model 99 | sleep(5) 100 | assert False, "Download job didn't complete in time." 101 | 102 | 103 | # Test suite: 104 | class TestObjectStore: 105 | 106 | async def _create_key(self): 107 | return f'test_key_{create_timestamp()}' 108 | 109 | async def _create_object_data(self, type_=str): 110 | match type_: 111 | case t if t is str: 112 | return "Hello, world!" 113 | case t if t is bytes: 114 | return bytes("Hello, world!", encoding="utf-8") 115 | 116 | async def _decode_object_data(self, data, type_=str): 117 | match type_: 118 | case t if t is str: 119 | return data.decode('utf-8') 120 | case t if t is bytes: 121 | return data 122 | 123 | @pytest.mark.asyncio 124 | @pytest.mark.parametrize('type_', [str, bytes]) 125 | async def test_upload(self, type_): 126 | # Try to upload a file. 127 | key = await self._create_key() 128 | await ObjectStore.upload( 129 | ORGANIZATION_ID, key, await self._create_object_data(type_) 130 | ) 131 | # Delete the file to clean up. 132 | await ObjectStore.delete(ORGANIZATION_ID, key) 133 | 134 | @pytest.mark.asyncio 135 | async def test_upload_with_invalid_args(self): 136 | # Test the invalid requests. 137 | tool_name = 'upload_object' 138 | class_ = ObjectStoreBinaryFile 139 | minimal_payload = { 140 | 'organizationId': ORGANIZATION_ID, 141 | 'key': await self._create_key(), 142 | 'objectData': await self._create_object_data() 143 | } 144 | # Try to upload the file without providing all the required 145 | # data. 146 | await ensure_request_raises_validation_error_when_omitting_an_arg( 147 | tool_name, class_, minimal_payload 148 | ) 149 | # Try to upload the file to an organization that doesn't exist. 150 | await ensure_request_fails( 151 | mcp, tool_name, minimal_payload | {'organizationId': ' '} 152 | ) 153 | 154 | @pytest.mark.asyncio 155 | @pytest.mark.parametrize('type_', [str, bytes]) 156 | async def test_read_metadata(self, type_): 157 | # Upload a file. 158 | key = await self._create_key() 159 | object_data = await self._create_object_data(type_) 160 | await ObjectStore.upload(ORGANIZATION_ID, key, object_data) 161 | # Try to read the file properties. 162 | properties = await ObjectStore.read_properties(ORGANIZATION_ID, key) 163 | assert properties.key == key 164 | assert properties.size == len(object_data) 165 | assert properties.md5 == '6cd3556deb0da54bca060b4c39479839' 166 | assert properties.mime == 'text/plain' 167 | match type_: 168 | case t if t is str: 169 | assert properties.preview == object_data 170 | case t if t is bytes: 171 | assert properties.preview == object_data.decode() 172 | # Delete the file to clean up. 173 | await ObjectStore.delete(ORGANIZATION_ID, key) 174 | 175 | @pytest.mark.asyncio 176 | async def test_read_metadata_with_invalid_args(self): 177 | tool_name = 'read_object_properties' 178 | class_ = GetObjectStorePropertiesRequest 179 | minimal_payload = { 180 | 'organizationId': ORGANIZATION_ID, 181 | 'key': await self._create_key(), 182 | } 183 | # Try to read the file properties without providing all the 184 | # required data. 185 | await ensure_request_raises_validation_error_when_omitting_an_arg( 186 | tool_name, class_, minimal_payload 187 | ) 188 | # Try to read the file properties from an organization 189 | # that doesn't exist. 190 | await ensure_request_fails( 191 | mcp, tool_name, minimal_payload | {'organizationId': ' '} 192 | ) 193 | # Try to read the file properties with a key that doesn't exist 194 | # in the Object Store. 195 | await ensure_request_fails(mcp, tool_name, minimal_payload) 196 | 197 | @pytest.mark.asyncio 198 | @pytest.mark.parametrize('type_', [str, bytes]) 199 | async def test_list_object_store_files(self, type_): 200 | # Create a directory tree like this: 201 | # 202 | # root_directory/ 203 | # ├── child_file 204 | # └── child_directory/ 205 | # └── grandchild_file 206 | # 207 | # 1) Define the root directory name. 208 | root_dir = await self._create_key() 209 | # 2) Upload the `child_file` 210 | await ObjectStore.upload( 211 | ORGANIZATION_ID, 212 | f'{root_dir}/child_file', 213 | await self._create_object_data(type_) 214 | ) 215 | # 3) Upload the `grandchild_file` 216 | await ObjectStore.upload( 217 | ORGANIZATION_ID, 218 | f'{root_dir}/child_directory/grandchild_file', 219 | await self._create_object_data(type_) 220 | ) 221 | # Try to list the files in the Object Store. 222 | response = await ObjectStore.list(ORGANIZATION_ID) 223 | assert response.path == '/' 224 | assert response.objects 225 | assert [ 226 | ( 227 | obj.key == '/' + root_dir and 228 | obj.name == root_dir and 229 | obj.mime == 'directory' and 230 | obj.folder 231 | ) 232 | for obj in response.objects 233 | ] 234 | # Try to list the contents of the `root_dir`. 235 | response = await ObjectStore.list(ORGANIZATION_ID, path=root_dir) 236 | assert response.path == root_dir 237 | assert len(response.objects) == 2 238 | assert [ 239 | ( 240 | obj.key == f'/{root_dir}/child_file' and 241 | obj.name == 'child_file' and 242 | obj.mime == 'text/plain' and 243 | not obj.folder 244 | ) 245 | for obj in response.objects 246 | ] 247 | assert [ 248 | ( 249 | obj.key == f'/{root_dir}/child_directory' and 250 | obj.name == 'child_directory' and 251 | obj.mime == 'directory' and 252 | obj.folder 253 | ) 254 | for obj in response.objects 255 | ] 256 | # Try to list the contents of the `child_directory`. 257 | path = f'{root_dir}/child_directory' 258 | response = await ObjectStore.list(ORGANIZATION_ID, path=path) 259 | assert response.path == path 260 | assert len(response.objects) == 1 261 | obj = response.objects[0] 262 | assert ( 263 | obj.key == f'{path}/grandchild_file' and 264 | obj.name == 'grandchild_file' and 265 | obj.mime == 'text/plain' and 266 | not obj.folder 267 | ) 268 | # Delete the directory tree to clean up. 269 | await ObjectStore.delete(ORGANIZATION_ID, root_dir) 270 | 271 | @pytest.mark.asyncio 272 | async def test_list_object_store_files_with_invalid_args(self): 273 | tool_name = 'list_object_store_files' 274 | # Try to read the list the Object Store files without providing 275 | # all the required data. 276 | await ensure_request_raises_validation_error( 277 | tool_name, ListObjectStoreRequest, {} 278 | ) 279 | # Try to list the Object Store files in an organization that 280 | # doesn't exist. 281 | await ensure_request_fails(mcp, tool_name, {'organizationId': ' '}) 282 | 283 | @pytest.mark.asyncio 284 | @pytest.mark.parametrize('type_', [str, bytes]) 285 | async def test_read_object_store_file_job_id(self, type_): 286 | # Upload a file. 287 | key = await self._create_key() 288 | await ObjectStore.upload( 289 | ORGANIZATION_ID, key, await self._create_object_data(type_) 290 | ) 291 | # Try to read the job Id. 292 | await ObjectStore.read_job_id(ORGANIZATION_ID, [key]) 293 | # Delete the file to clean up. 294 | await ObjectStore.delete(ORGANIZATION_ID, key) 295 | 296 | @pytest.mark.asyncio 297 | async def test_read_object_store_file_job_id_with_invalid_args(self): 298 | # Try to read the job Id without providing all the required 299 | # data. 300 | await ensure_request_raises_validation_error_when_omitting_an_arg( 301 | 'read_object_store_file_job_id', GetObjectStoreJobIdRequest, 302 | { 303 | 'organizationId': ORGANIZATION_ID, 304 | 'keys': [await self._create_key()] 305 | } 306 | ) 307 | 308 | @pytest.mark.asyncio 309 | @pytest.mark.parametrize('type_', [str, bytes]) 310 | async def test_read_object_store_file_download_url(self, type_): 311 | # Upload a file. 312 | key = await self._create_key() 313 | object_data = await self._create_object_data(type_) 314 | await ObjectStore.upload(ORGANIZATION_ID, key, object_data) 315 | # Try to get the download URL. 316 | job_id = await ObjectStore.read_job_id(ORGANIZATION_ID, [key]) 317 | url = ( 318 | await ObjectStore.wait_for_job_to_complete(ORGANIZATION_ID, job_id) 319 | ).url 320 | # Ensure the content in the downloaded file matches the 321 | # upload. 322 | response = requests.get(url) 323 | response.raise_for_status() 324 | # 1) Unzip the zip file. 325 | with ZipFile(BytesIO(response.content), 'r') as zip_ref: 326 | # 2) In the zip file, open the object file. 327 | with zip_ref.open(key) as file: 328 | # 3) Check if the file contents are correct. 329 | assert object_data == await self._decode_object_data( 330 | file.read(), type_ 331 | ) 332 | # Delete the file to clean up. 333 | await ObjectStore.delete(ORGANIZATION_ID, key) 334 | -------------------------------------------------------------------------------- /tests/test_backtests.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import sleep 3 | 4 | from main import mcp 5 | from test_project import Project 6 | from test_files import Files 7 | from test_compile import Compile 8 | from utils import ( 9 | validate_models, 10 | ensure_request_fails, 11 | ensure_request_raises_validation_error, 12 | ensure_request_raises_validation_error_when_omitting_an_arg, 13 | ensure_request_fails_when_including_an_invalid_arg 14 | ) 15 | from models import ( 16 | CreateBacktestRequest, 17 | ReadBacktestRequest, 18 | UpdateBacktestRequest, 19 | DeleteBacktestRequest, 20 | ListBacktestRequest, 21 | BacktestResponse, 22 | BacktestSummaryResponse, 23 | RestResponse 24 | ) 25 | 26 | 27 | # Static helpers for common operations: 28 | class Backtest: 29 | 30 | @staticmethod 31 | async def create( 32 | project_id, compile_id, backtest_name='Test Backtest', **kwargs): 33 | output_model = await validate_models( 34 | mcp, 'create_backtest', 35 | { 36 | 'projectId': project_id, 37 | 'compileId': compile_id, 38 | 'backtestName': backtest_name 39 | } | kwargs, 40 | BacktestResponse 41 | ) 42 | return output_model.backtest 43 | 44 | @staticmethod 45 | async def read(project_id, backtest_id): 46 | output_model = await validate_models( 47 | mcp, 'read_backtest', 48 | {'projectId': project_id, 'backtestId': backtest_id}, 49 | BacktestResponse 50 | ) 51 | return output_model.backtest 52 | 53 | @staticmethod 54 | async def update(project_id, backtest_id, **kwargs): 55 | output_model = await validate_models( 56 | mcp, 'update_backtest', 57 | {'projectId': project_id, 'backtestId': backtest_id} | kwargs, 58 | RestResponse 59 | ) 60 | return output_model 61 | 62 | @staticmethod 63 | async def delete(project_id, backtest_id): 64 | output_model = await validate_models( 65 | mcp, 'delete_backtest', 66 | {'projectId': project_id, 'backtestId': backtest_id}, 67 | RestResponse 68 | ) 69 | return output_model 70 | 71 | @staticmethod 72 | async def list(project_id, **kwargs): 73 | output_model = await validate_models( 74 | mcp, 'list_backtests', {'projectId': project_id} | kwargs, 75 | BacktestSummaryResponse 76 | ) 77 | return output_model.backtests 78 | 79 | @staticmethod 80 | async def wait_for_job_to_complete(project_id, backtest_id): 81 | attempts = 0 82 | while attempts < 5: 83 | attempts += 1 84 | backtest = await Backtest.read(project_id, backtest_id) 85 | if backtest.completed: 86 | return 87 | sleep(10) 88 | assert False, "Backtest job didn't complete in time." 89 | 90 | @staticmethod 91 | async def run_algorithm(language, algo=None, wait_to_complete=True): 92 | # Create and compile the project. 93 | project_id, compile_id = await Files.setup_project(language, algo) 94 | # Run the backtest. 95 | backtest_id = (await Backtest.create(project_id, compile_id)).backtestId 96 | if wait_to_complete: 97 | await Backtest.wait_for_job_to_complete(project_id, backtest_id) 98 | return project_id, backtest_id 99 | 100 | 101 | # Test suite: 102 | class TestBacktest: 103 | 104 | @pytest.mark.asyncio 105 | @pytest.mark.parametrize('language', ['Py', 'C#']) 106 | async def test_create_backtest(self, language): 107 | # Create and compile the project. 108 | project_id, compile_id = await Files.setup_project(language) 109 | # Try to run the backtest. 110 | name = 'Test Backtest' 111 | backtest = await Backtest.create(project_id, compile_id, name) 112 | assert backtest.name == name 113 | assert backtest.projectId == project_id 114 | assert backtest.parameterSet == [] 115 | # Try to run the backtest with some parameters. 116 | await Backtest.create( 117 | project_id, compile_id, name, 118 | parameters={'a': 0, 'b': 0.0, 'c': 'foo'} 119 | ) 120 | # Delete the project to clean up. 121 | await Project.delete(project_id) 122 | 123 | @pytest.mark.asyncio 124 | @pytest.mark.parametrize('language', ['Py', 'C#']) 125 | async def test_create_backtest_with_invalid_args(self, language): 126 | # Create and compile the project. 127 | project_id, compile_id = await Files.setup_project(language) 128 | # Test the invalid requests. 129 | tool_name = 'create_backtest' 130 | class_ = CreateBacktestRequest 131 | minimal_payload = { 132 | 'projectId': project_id, 133 | 'compileId': compile_id, 134 | 'backtestName': 'Test backtest' 135 | } 136 | await ensure_request_raises_validation_error_when_omitting_an_arg( 137 | tool_name, class_, minimal_payload 138 | ) 139 | await ensure_request_fails_when_including_an_invalid_arg( 140 | mcp, tool_name, minimal_payload, [ 141 | # Try to backtest a project that doesn't exist. 142 | {'projectId': -1}, 143 | # Try to run a backtest with a compile Id that doesn't 144 | # exist. 145 | {'compileId': ' '} 146 | ] 147 | ) 148 | # Try to create a backtest with unsupported data types for the 149 | # parameters (in this case, the value is a dictionary). 150 | await ensure_request_raises_validation_error( 151 | tool_name, class_, 152 | minimal_payload | {'parameters': {'p': {'k': 'v'}}} 153 | ) 154 | # Delete the project to clean up. 155 | await Project.delete(project_id) 156 | 157 | @pytest.mark.asyncio 158 | @pytest.mark.parametrize('language', ['Py', 'C#']) 159 | async def test_read_backtest(self, language): 160 | # Create and compile the project. 161 | project_id, compile_id = await Files.setup_project(language) 162 | # Start a backtest. 163 | parameters = {'a': 0, 'b': 0.0, 'c': 'foo'} 164 | backtest = await Backtest.create( 165 | project_id, compile_id, parameters=parameters 166 | ) 167 | backtest_id = backtest.backtestId 168 | # Try to read the backtest result. 169 | backtest = await Backtest.read(project_id, backtest_id) 170 | assert backtest.projectId == project_id 171 | assert backtest.backtestId == backtest_id 172 | assert backtest.parameterSet == parameters 173 | # Delete the project to clean up. 174 | await Project.delete(project_id) 175 | 176 | @pytest.mark.asyncio 177 | @pytest.mark.parametrize('language', ['Py', 'C#']) 178 | async def test_read_backtest_with_invalid_args(self, language): 179 | # Start a backtest. 180 | project_id, backtest_id = await Backtest.run_algorithm(language) 181 | # Test the invalid requests. 182 | tool_name = 'read_backtest' 183 | class_ = ReadBacktestRequest 184 | minimal_payload = {'projectId': project_id, 'backtestId': backtest_id} 185 | await ensure_request_raises_validation_error_when_omitting_an_arg( 186 | tool_name, class_, minimal_payload 187 | ) 188 | # Try to read a backtest that doesn't exist. 189 | await ensure_request_fails( 190 | mcp, tool_name, minimal_payload | {'backtestId': ' '} 191 | ) 192 | # Delete the project to clean up. 193 | await Project.delete(project_id) 194 | 195 | @pytest.mark.asyncio 196 | @pytest.mark.parametrize('language', ['Py', 'C#']) 197 | async def test_list_backtests(self, language): 198 | # Start a backtest. 199 | project_id, backtest_id = await Backtest.run_algorithm(language) 200 | # Try to list all the backtest when there is just 1 backtest. 201 | backtests = await Backtest.list(project_id) 202 | assert len(backtests) == 1 203 | backtest = backtests[0] 204 | assert backtest.backtestId == backtest_id 205 | # Start a second backtest. 206 | compile_id = (await Compile.create(project_id)).compileId 207 | await Compile.wait_for_job_to_complete(project_id, compile_id) 208 | backtest_ids = [ 209 | backtest_id, 210 | (await Backtest.create(project_id, compile_id)).backtestId 211 | ] 212 | # Try to list all the backtests when there are multiple 213 | # backtests. 214 | backtests = await Backtest.list(project_id) 215 | assert len(backtests) == 2 216 | assert len(set([backtest.backtestId for backtest in backtests])) == 2 217 | for backtest in backtests: 218 | assert backtest.backtestId in backtest_ids 219 | # Try to list all the backtests with statistics included. 220 | await Backtest.wait_for_job_to_complete(project_id, backtest_ids[-1]) 221 | backtests = await Backtest.list(project_id, includeStatistics=True) 222 | assert len(backtests) == 2 223 | assert len(set([backtest.backtestId for backtest in backtests])) == 2 224 | for backtest in backtests: 225 | assert backtest.backtestId in backtest_ids 226 | assert backtest.alpha is not None 227 | # Delete the project to clean up. 228 | await Project.delete(project_id) 229 | 230 | @pytest.mark.asyncio 231 | async def test_list_backtests_with_invalid_args(self): 232 | tool_name = 'list_backtests' 233 | # Try to list all the backtests without providing the project 234 | # Id. 235 | await ensure_request_raises_validation_error( 236 | tool_name, ListBacktestRequest, {} 237 | ) 238 | # Try to list all the backtests of a project that doesn't 239 | # exist. 240 | await ensure_request_fails(mcp, tool_name, {'projectId': -1}) 241 | 242 | @pytest.mark.asyncio 243 | @pytest.mark.parametrize( 244 | 'language, algo', 245 | [ 246 | ('Py', 'runtime_error.py'), 247 | ('C#', 'RuntimeError.cs') 248 | ] 249 | ) 250 | async def test_list_backtests_with_runtime_error(self, language, algo): 251 | # Run the backtest. 252 | project_id, _ = await Backtest.run_algorithm(language, algo) 253 | # Try to list the backtest of the project. 254 | await Backtest.list(project_id) 255 | # Delete the project to clean up. 256 | await Project.delete(project_id) 257 | 258 | @pytest.mark.asyncio 259 | @pytest.mark.parametrize('language', ['Py', 'C#']) 260 | async def test_update_backtest(self, language): 261 | # Start a backtest. 262 | project_id, backtest_id = await Backtest.run_algorithm( 263 | language, wait_to_complete=False 264 | ) 265 | # Try to update the backtest name and note. 266 | name = 'new name' 267 | note = 'new note' 268 | await Backtest.update(project_id, backtest_id, name=name, note=note) 269 | backtest = await Backtest.read(project_id, backtest_id) 270 | assert backtest.name == name 271 | assert backtest.note == note 272 | # Delete the project to clean up. 273 | await Project.delete(project_id) 274 | 275 | @pytest.mark.asyncio 276 | @pytest.mark.parametrize('language', ['Py', 'C#']) 277 | async def test_update_backtest_with_invalid_args(self, language): 278 | # Start a backtest. 279 | project_id, backtest_id = await Backtest.run_algorithm( 280 | language, wait_to_complete=False 281 | ) 282 | # Test the invalid requests. 283 | tool_name = 'update_backtest' 284 | class_ = UpdateBacktestRequest 285 | minimal_payload = {'projectId': project_id, 'backtestId': backtest_id} 286 | await ensure_request_raises_validation_error_when_omitting_an_arg( 287 | tool_name, class_, minimal_payload 288 | ) 289 | await ensure_request_fails_when_including_an_invalid_arg( 290 | mcp, tool_name, minimal_payload, [ 291 | # Try to update a backtest in a project that doesn't 292 | # exist. 293 | {'projectId': -1}, 294 | # Try to update a backtest that doesn't exist. 295 | {'backtestId': ' '} 296 | ] 297 | ) 298 | # Delete the project to clean up. 299 | await Project.delete(project_id) 300 | 301 | @pytest.mark.asyncio 302 | @pytest.mark.parametrize('language', ['Py', 'C#']) 303 | async def test_delete_backtest(self, language): 304 | # Start a backtest. 305 | project_id, backtest_id = await Backtest.run_algorithm( 306 | language, wait_to_complete=False 307 | ) 308 | # Try to delete the backtest. 309 | await Backtest.delete(project_id, backtest_id) 310 | # Delete the project to clean up. 311 | await Project.delete(project_id) 312 | 313 | @pytest.mark.asyncio 314 | @pytest.mark.parametrize('language', ['Py', 'C#']) 315 | async def test_delete_backtest_with_invalid_args(self, language): 316 | # Start a backtest. 317 | project_id, backtest_id = await Backtest.run_algorithm( 318 | language, wait_to_complete=False 319 | ) 320 | # Test the invalid requests. 321 | tool_name = 'delete_backtest' 322 | class_ = DeleteBacktestRequest 323 | minimal_payload = {'projectId': project_id, 'backtestId': backtest_id} 324 | await ensure_request_raises_validation_error_when_omitting_an_arg( 325 | tool_name, class_, minimal_payload 326 | ) 327 | # Try to delete a backtest from a project that doesn't exist. 328 | await ensure_request_fails( 329 | mcp, tool_name, minimal_payload | {'projectId': -1} 330 | ) 331 | # Delete the project to clean up. 332 | await Project.delete(project_id) 333 | -------------------------------------------------------------------------------- /tests/test_optimizations.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from time import sleep 3 | 4 | from main import mcp 5 | from test_project import Project 6 | from test_files import Files 7 | from test_backtests import Backtest 8 | from utils import ( 9 | validate_models, 10 | ensure_request_fails, 11 | ensure_request_raises_validation_error, 12 | ensure_request_raises_validation_error_when_omitting_an_arg, 13 | ensure_request_fails_when_including_an_invalid_arg 14 | ) 15 | from models import ( 16 | EstimateOptimizationRequest, 17 | CreateOptimizationRequest, 18 | ReadOptimizationRequest, 19 | ListOptimizationRequest, 20 | EstimateOptimizationResponse, 21 | UpdateOptimizationRequest, 22 | AbortOptimizationRequest, 23 | DeleteOptimizationRequest, 24 | ListOptimizationResponse, 25 | ReadOptimizationResponse, 26 | RestResponse 27 | ) 28 | 29 | 30 | TEST_ALGORITHMS = [ 31 | ('Py', 'parameter_optimization.py'), 32 | ('C#', 'ParameterOptimization.cs') 33 | ] 34 | 35 | DEFAULT_SETTINGS = { 36 | 'target': 'TotalPerformance.PortfolioStatistics.SharpeRatio', 37 | 'target_to': 'max', 38 | 'strategy': 'QuantConnect.Optimizer.Strategies.GridSearchOptimizationStrategy', 39 | 'parameters': [ 40 | {'name': 'sma_slow', 'min': 252, 'max': 252+21, 'step': 21}, 41 | {'name': 'sma_fast', 'min': 10, 'max': 110, 'step': 50}, 42 | ], 43 | 'node_type': 'O2-8', 44 | 'parallel_nodes': 2, 45 | 'name': 'Test Optimization', 46 | 'estimated_cost': 1, 47 | } 48 | 49 | # Static helpers for common operations: 50 | class Optimization: 51 | 52 | @staticmethod 53 | async def estimate( 54 | project_id, target, target_to, strategy, parameters, 55 | name='Test Optimization', **kwargs): 56 | output_model = await validate_models( 57 | mcp, 'estimate_optimization_time', 58 | { 59 | 'projectId': project_id, 60 | 'name': name, 61 | 'target': target, 62 | 'targetTo': target_to, 63 | 'strategy': strategy, 64 | 'parameters': parameters 65 | } | kwargs, 66 | EstimateOptimizationResponse 67 | ) 68 | return output_model.estimate 69 | 70 | 71 | @staticmethod 72 | async def create( 73 | project_id, compile_id, 74 | target=DEFAULT_SETTINGS['target'], 75 | target_to=DEFAULT_SETTINGS['target_to'], 76 | strategy=DEFAULT_SETTINGS['strategy'], 77 | parameters=DEFAULT_SETTINGS['parameters'], 78 | node_type=DEFAULT_SETTINGS['node_type'], 79 | parallel_nodes=DEFAULT_SETTINGS['parallel_nodes'], 80 | name=DEFAULT_SETTINGS['name'], 81 | estimated_cost=DEFAULT_SETTINGS['estimated_cost'], 82 | **kwargs): 83 | output_model = await validate_models( 84 | mcp, 'create_optimization', 85 | { 86 | 'projectId': project_id, 87 | 'target': target, 88 | 'targetTo': target_to, 89 | 'strategy': strategy, 90 | 'compileId': compile_id, 91 | 'parameters': parameters, 92 | 'estimatedCost': estimated_cost, 93 | 'nodeType': node_type, 94 | 'parallelNodes': parallel_nodes, 95 | 'name': name 96 | } | kwargs, 97 | ListOptimizationResponse 98 | ) 99 | return output_model.optimizations[0] 100 | 101 | @staticmethod 102 | async def read(optimization_id): 103 | output_model = await validate_models( 104 | mcp, 'read_optimization', {'optimizationId': optimization_id}, 105 | ReadOptimizationResponse 106 | ) 107 | return output_model.optimization 108 | 109 | @staticmethod 110 | async def update(optimization_id, name): 111 | return await validate_models( 112 | mcp, 'update_optimization', 113 | {'optimizationId': optimization_id, 'name': name}, RestResponse 114 | ) 115 | 116 | @staticmethod 117 | async def abort(optimization_id): 118 | return await validate_models( 119 | mcp, 'abort_optimization', {'optimizationId': optimization_id}, 120 | RestResponse 121 | ) 122 | 123 | @staticmethod 124 | async def delete(optimization_id): 125 | return await validate_models( 126 | mcp, 'delete_optimization', {'optimizationId': optimization_id}, 127 | RestResponse 128 | ) 129 | 130 | @staticmethod 131 | async def list(project_id): 132 | output_model = await validate_models( 133 | mcp, 'list_optimizations', {'projectId': project_id}, 134 | ListOptimizationResponse 135 | ) 136 | return output_model.optimizations 137 | 138 | @staticmethod 139 | async def wait_for_job_to_start(optimization_id): 140 | attempts = 0 141 | while attempts < 10: 142 | attempts += 1 143 | optimization = await Optimization.read(optimization_id) 144 | if optimization.status.value != 'new': 145 | return optimization 146 | sleep(18) 147 | assert False, "Optimization job didn't start in time." 148 | 149 | @staticmethod 150 | async def wait_for_job_to_complete(optimization_id): 151 | attempts = 0 152 | while attempts < 6*5: # 5 minutes 153 | attempts += 1 154 | optimization = await Optimization.read(optimization_id) 155 | if optimization.status.value == 'completed': 156 | return optimization 157 | sleep(10) 158 | assert False, "Optimization job didn't complete in time." 159 | 160 | @staticmethod 161 | async def wait_for_job_to_abort(optimization_id): 162 | attempts = 0 163 | while attempts < 6: 164 | attempts += 1 165 | optimization = await Optimization.read(optimization_id) 166 | if optimization.status.value == 'aborted': 167 | return optimization 168 | sleep(5) 169 | assert False, "Optimization job didn't abort in time." 170 | 171 | # Test suite: 172 | class TestOptimization: 173 | 174 | async def _check_response(self, optimization): 175 | assert optimization.criterion.target.value == DEFAULT_SETTINGS['target'] 176 | assert optimization.criterion.extremum.value == \ 177 | DEFAULT_SETTINGS['target_to'] 178 | parameters = DEFAULT_SETTINGS['parameters'] 179 | assert len(optimization.parameters) == len(parameters) 180 | for input_p, output_p in zip(parameters, optimization.parameters): 181 | assert output_p.name == input_p['name'] 182 | assert output_p.min == input_p['min'] 183 | assert output_p.max == input_p['max'] 184 | assert output_p.step == input_p['step'] 185 | assert optimization.name == DEFAULT_SETTINGS['name'] 186 | assert optimization.nodeType.value == DEFAULT_SETTINGS['node_type'] 187 | 188 | @pytest.mark.asyncio 189 | @pytest.mark.parametrize('language', ['Py', 'C#']) 190 | async def test_estimate_optimization(self, language): 191 | # Create a new project and backtest it. 192 | project_id, _ = await Backtest.run_algorithm(language) 193 | # Try to estimate the cost of an optimization job. 194 | await Optimization.estimate( 195 | project_id, 196 | 'TotalPerformance.PortfolioStatistics.SharpeRatio', 197 | 'max', 198 | 'QuantConnect.Optimizer.Strategies.GridSearchOptimizationStrategy', 199 | [{'name': 'p', 'min': 0, 'max': 1, 'step': 1}] 200 | ) 201 | # Delete the project to clean up. 202 | await Project.delete(project_id) 203 | 204 | @pytest.mark.asyncio 205 | @pytest.mark.parametrize('language', ['Py', 'C#']) 206 | async def test_estimate_optimization_with_invalid_args(self, language): 207 | # Run a backtest with the template algorithm. 208 | project_id, _ = await Backtest.run_algorithm(language) 209 | # Test the invalid requests. 210 | tool_name = 'estimate_optimization_time' 211 | class_ = EstimateOptimizationRequest 212 | minimal_payload = { 213 | 'projectId': project_id, 214 | 'name': 'Test Optimization', 215 | 'target': 'TotalPerformance.PortfolioStatistics.SharpeRatio', 216 | 'targetTo': 'max', 217 | 'strategy': 'QuantConnect.Optimizer.Strategies.GridSearchOptimizationStrategy', 218 | 'parameters': [{'name': 'p', 'min': 0, 'max': 1, 'step': 1}] 219 | } 220 | # Try to estimate the cost of an optimization without 221 | # providing all the required arguments. 222 | await ensure_request_raises_validation_error_when_omitting_an_arg( 223 | tool_name, class_, minimal_payload 224 | ) 225 | invalid_args = [ 226 | # Try to estimate the cost of an optimization with an 227 | # unsupported strategy. 228 | {'strategy': 'QuantConnect.Optimizer.Strategies.EulerSearchOptimizationStrategy'}, 229 | # Try to estimate the cost of an optimization with an 230 | # unsupported target. 231 | {'target': ' '}, 232 | # Try to estimate the cost of an optimization with an 233 | # unsupported "targetTo" value. 234 | {'targetTo': ' '} 235 | ] 236 | for arg in invalid_args: 237 | await ensure_request_raises_validation_error( 238 | tool_name, class_, minimal_payload | arg 239 | ) 240 | await ensure_request_fails_when_including_an_invalid_arg( 241 | mcp, tool_name, minimal_payload, [ 242 | # Try to estimate the cost of an optimization for a 243 | # project that doesn't exist. 244 | {'projectId': -1}, 245 | # Try to estimate the cost of an optimization without 246 | # any parameters. 247 | {'parameters': []} 248 | ] 249 | ) 250 | # Delete the project to clean up. 251 | await Project.delete(project_id) 252 | 253 | @pytest.mark.asyncio 254 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 255 | async def test_create_optimization(self, language, algo): 256 | # Create and compile the project. 257 | project_id, compile_id = await Files.setup_project(language, algo) 258 | # Try to create an optimization job. 259 | await self._check_response( 260 | await Optimization.create(project_id, compile_id) 261 | ) 262 | # Delete the project to clean up. 263 | await Project.delete(project_id) 264 | 265 | @pytest.mark.asyncio 266 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 267 | async def test_create_optimization_with_invalid_args( 268 | self, language, algo): 269 | # Create and compile the project. 270 | project_id, compile_id = await Files.setup_project(language, algo) 271 | # Test the invalid requests. 272 | tool_name = 'create_optimization' 273 | class_ = CreateOptimizationRequest 274 | minimal_payload = { 275 | 'projectId': project_id, 276 | 'name': 'Test Optimization', 277 | 'target': 'TotalPerformance.PortfolioStatistics.SharpeRatio', 278 | 'targetTo': 'max', 279 | 'strategy': 'QuantConnect.Optimizer.Strategies.GridSearchOptimizationStrategy', 280 | 'parameters': [{'name': 'p', 'min': 0, 'max': 1, 'step': 1}], 281 | 'compileId': compile_id, 282 | 'estimatedCost': 1, 283 | 'nodeType': 'O2-8', 284 | 'parallelNodes': 2, 285 | } 286 | # Try to create an optimization without providing all the 287 | # required arguments. 288 | await ensure_request_raises_validation_error_when_omitting_an_arg( 289 | tool_name, class_, minimal_payload 290 | ) 291 | invalid_args = [ 292 | # Try to optimize with an unsupported target. 293 | {'target': ' '}, 294 | # Try to optimize with an unsupported `targetTo`. 295 | {'targetTo': ' '}, 296 | # Try to optimize with an unsupported strategy. 297 | {'strategy': 'QuantConnect.Optimizer.Strategies.EulerSearchOptimizationStrategy'}, 298 | # Try to optimize with in unsupported node type. 299 | {'nodeType': 'B2-8'} 300 | ] 301 | for arg in invalid_args: 302 | await ensure_request_raises_validation_error( 303 | tool_name, class_, minimal_payload | arg 304 | ) 305 | await ensure_request_fails_when_including_an_invalid_arg( 306 | mcp, tool_name, minimal_payload, [ 307 | # Try to optimize a project that doesn't exist. 308 | {'projectId': -1}, 309 | # Try to optimize without any parameters. 310 | {'parameters': []} 311 | ] 312 | ) 313 | # Delete the project to clean up. 314 | await Project.delete(project_id) 315 | 316 | @pytest.mark.asyncio 317 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 318 | async def test_read_optimization(self, language, algo): 319 | # Create and compile the project. 320 | project_id, compile_id = await Files.setup_project(language, algo) 321 | # Start the optimization. 322 | opt_id = ( 323 | await Optimization.create(project_id, compile_id) 324 | ).optimizationId 325 | # Try to read the optimziation. 326 | await self._check_response( 327 | await Optimization.wait_for_job_to_complete(opt_id) 328 | ) 329 | # Delete the project to clean up. 330 | await Project.delete(project_id) 331 | 332 | @pytest.mark.asyncio 333 | async def test_read_optimization_with_invalid_args(self): 334 | tool_name = 'read_optimization' 335 | # Try to read an optimization without providing the Id. 336 | await ensure_request_raises_validation_error( 337 | tool_name, ReadOptimizationRequest, {} 338 | ) 339 | # Try to read an optimization that doesn't exist. 340 | await ensure_request_fails(mcp, tool_name, {'optimizationId': ' '}) 341 | 342 | 343 | @pytest.mark.asyncio 344 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 345 | async def test_list_optimization(self, language, algo): 346 | # Create and compile the project. 347 | project_id, compile_id = await Files.setup_project(language, algo) 348 | # Try to list the optimizations of a project that has no 349 | # optimization results. 350 | optimizations = await Optimization.list(project_id) 351 | assert len(optimizations) == 0 352 | # Run the optimization. 353 | await Optimization.wait_for_job_to_complete( 354 | (await Optimization.create(project_id, compile_id)).optimizationId 355 | ) 356 | # Try to list the optimizations of a project with some 357 | # optimization results. 358 | optimizations = await Optimization.list(project_id) 359 | assert len(optimizations) == 1 360 | await self._check_response(optimizations[0]) 361 | # Delete the project to clean up. 362 | await Project.delete(project_id) 363 | 364 | @pytest.mark.asyncio 365 | async def test_list_optimizations_with_invalid_args(self): 366 | # Try to list optimizations without providing the project Id. 367 | await ensure_request_raises_validation_error( 368 | 'list_optimizations', ListOptimizationRequest, {} 369 | ) 370 | 371 | @pytest.mark.asyncio 372 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 373 | async def test_update_optimization(self, language, algo): 374 | # Create and compile the project. 375 | project_id, compile_id = await Files.setup_project(language, algo) 376 | # Start the optimization. 377 | opt_id = ( 378 | await Optimization.create(project_id, compile_id) 379 | ).optimizationId 380 | # Try to update the optimization name. 381 | name = 'New Optimization Name' 382 | await Optimization.update(opt_id, name) 383 | optimization = await Optimization.read(opt_id) 384 | assert optimization.name == name 385 | # Delete the project to clean up. 386 | await Project.delete(project_id) 387 | 388 | @pytest.mark.asyncio 389 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 390 | async def test_update_optimization_with_invalid_args( 391 | self, language, algo): 392 | # Create and compile the project. 393 | project_id, compile_id = await Files.setup_project(language, algo) 394 | # Start the optimization. 395 | opt_id = ( 396 | await Optimization.create(project_id, compile_id) 397 | ).optimizationId 398 | # Test the invalid requests. 399 | tool_name = 'update_optimization' 400 | class_ = UpdateOptimizationRequest 401 | minimal_payload = {'optimizationId': opt_id, 'name': 'New name'} 402 | # Try to update an optimization name without providing all the 403 | # required arguments. 404 | await ensure_request_raises_validation_error_when_omitting_an_arg( 405 | tool_name, class_, minimal_payload 406 | ) 407 | # Try to update an optimization that doesn't exist. 408 | await ensure_request_fails( 409 | mcp, tool_name, minimal_payload | {'optimizationId': ' '} 410 | ) 411 | # Delete the project to clean up. 412 | await Project.delete(project_id) 413 | 414 | @pytest.mark.asyncio 415 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 416 | async def test_abort_optimization(self, language, algo): 417 | # Create and compile the project. 418 | project_id, compile_id = await Files.setup_project(language, algo) 419 | # Start the optimization. 420 | opt_id = ( 421 | await Optimization.create(project_id, compile_id) 422 | ).optimizationId 423 | # Wait for the optimization to start. 424 | await Optimization.wait_for_job_to_start(opt_id) 425 | # Try to abort the optimization. 426 | await Optimization.abort(opt_id) 427 | await Optimization.wait_for_job_to_abort(opt_id) 428 | optimization = await Optimization.read(opt_id) 429 | assert optimization.status.value == 'aborted' 430 | # Delete the project to clean up. 431 | await Project.delete(project_id) 432 | 433 | @pytest.mark.asyncio 434 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 435 | async def test_abort_optimization_with_invalid_args( 436 | self, language, algo): 437 | # Create and compile the project. 438 | project_id, compile_id = await Files.setup_project(language, algo) 439 | # Start the optimization. 440 | opt_id = ( 441 | await Optimization.create(project_id, compile_id) 442 | ).optimizationId 443 | # Test the invalid requests. 444 | tool_name = 'abort_optimization' 445 | class_ = AbortOptimizationRequest 446 | # Try to abort an optimization without providing the Id. 447 | await ensure_request_raises_validation_error(tool_name, class_, {}) 448 | # Try to abort an optimization that doesn't exist. 449 | await ensure_request_fails(mcp, tool_name, {'optimizationId': ' '}) 450 | # Try to abort an optimization that's already complete. 451 | await Optimization.wait_for_job_to_complete(opt_id) 452 | await ensure_request_fails(mcp, tool_name, {'optimizationId': opt_id}) 453 | # Delete the project to clean up. 454 | await Project.delete(project_id) 455 | 456 | @pytest.mark.asyncio 457 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 458 | async def test_delete_optimization(self, language, algo): 459 | # Create and compile the project. 460 | project_id, compile_id = await Files.setup_project(language, algo) 461 | # Start the optimization. 462 | opt_id = ( 463 | await Optimization.create(project_id, compile_id) 464 | ).optimizationId 465 | # Try to delete the optimization. 466 | await Optimization.delete(opt_id) 467 | await ensure_request_fails( 468 | mcp, 'read_optimization', {'optimizationId': opt_id} 469 | ) 470 | # Delete the project to clean up. 471 | await Project.delete(project_id) 472 | 473 | @pytest.mark.asyncio 474 | @pytest.mark.parametrize('language, algo', TEST_ALGORITHMS) 475 | async def test_delete_optimization_with_invalid_args( 476 | self, language, algo): 477 | # Create and compile the project. 478 | project_id, compile_id = await Files.setup_project(language, algo) 479 | # Start the optimization. 480 | await Optimization.create(project_id, compile_id) 481 | # Test the invalid requests. 482 | tool_name = 'delete_optimization' 483 | # Try to delete an optimization without providing the Id. 484 | await ensure_request_raises_validation_error( 485 | tool_name, DeleteOptimizationRequest, {} 486 | ) 487 | # Try to delete an optimization that doesn't exist. 488 | await ensure_request_fails(mcp, tool_name, {'optimizationId': ' '}) 489 | # Delete the project to clean up. 490 | await Project.delete(project_id) 491 | --------------------------------------------------------------------------------