├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── README.md
├── SECURITY.md
├── assets
└── logo.svg
├── deepchain
├── core
│ ├── blockchain
│ │ ├── chains.py
│ │ ├── ethereum.py
│ │ └── validator.py
│ ├── data
│ │ ├── indicators.py
│ │ └── stream.py
│ ├── deepseek
│ │ └── base.py
│ ├── edge
│ │ └── deployer.py
│ ├── exceptions.py
│ ├── monitoring
│ │ ├── alerts.py
│ │ ├── metrics.py
│ │ └── performance.py
│ └── strategy
│ │ └── base.py
└── utils
│ └── tools.py
├── docker-compose.yml
├── docs
├── api_reference.md
├── blockchain_integration.md
├── contributing.md
├── edge_deployment.md
├── getting_started.md
├── monitoring.md
├── security.md
└── strategy_development.md
├── examples
├── advanced_strategy.py
├── complete_example.py
├── data_stream_example.py
├── monitoring_example.py
├── simple_deployer.py
├── simple_strategy.py
├── simple_validator.py
└── usage_example.py
├── setup.py
└── tests
├── benchmarks
└── test_performance.py
├── integration
├── test_data.py
├── test_monitoring.py
└── test_workflow.py
├── performance
└── test_performance.py
├── test_blockchain.py
├── test_data.py
├── test_data_stream.py
├── test_deployer.py
├── test_ethereum.py
├── test_strategy.py
└── test_validator.py
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to DeepChain
2 |
3 | We love your input! We want to make contributing to DeepChain as easy and transparent as possible, whether it's:
4 |
5 | - Reporting a bug
6 | - Discussing the current state of the code
7 | - Submitting a fix
8 | - Proposing new features
9 | - Becoming a maintainer
10 |
11 | ## Development Process
12 |
13 | We use GitHub to host code, to track issues and feature requests, as well as accept pull requests.
14 |
15 | 1. Fork the repo and create your branch from `main`
16 | 2. If you've added code that should be tested, add tests
17 | 3. If you've changed APIs, update the documentation
18 | 4. Ensure the test suite passes
19 | 5. Make sure your code lints
20 | 6. Issue that pull request
21 |
22 | ## Pull Request Process
23 |
24 | 1. Update the README.md with details of changes to the interface
25 | 2. Update the docs/ with any necessary changes
26 | 3. The PR will be merged once you have the sign-off of two other developers
27 |
28 | ## Any contributions you make will be under the MIT Software License
29 |
30 | In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern.
31 |
32 | ## Report bugs using GitHub's [issue tracker]
33 |
34 | We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/Deep-Chain-IO/deepchain/issues/new).
35 |
36 | ## Write bug reports with detail, background, and sample code
37 |
38 | **Great Bug Reports** tend to have:
39 |
40 | - A quick summary and/or background
41 | - Steps to reproduce
42 | - Be specific!
43 | - Give sample code if you can
44 | - What you expected would happen
45 | - What actually happens
46 | - Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
47 |
48 | ## Use a Consistent Coding Style
49 |
50 | * 4 spaces for indentation rather than tabs
51 | * 80 character line length
52 | * Run `pylint` over your code
53 | * Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/)
54 |
55 | ## License
56 |
57 | By contributing, you agree that your contributions will be licensed under its MIT License.
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Python runtime as a parent image
2 | FROM python:3.10-slim
3 |
4 | # Set environment variables
5 | ENV PYTHONUNBUFFERED=1 \
6 | PYTHONDONTWRITEBYTECODE=1 \
7 | PIP_NO_CACHE_DIR=1 \
8 | PIP_DISABLE_PIP_VERSION_CHECK=1
9 |
10 | # Set work directory
11 | WORKDIR /app
12 |
13 | # Install system dependencies
14 | RUN apt-get update && apt-get install -y --no-install-recommends \
15 | build-essential \
16 | curl \
17 | git \
18 | && rm -rf /var/lib/apt/lists/*
19 |
20 | # Install Python dependencies
21 | COPY requirements.txt .
22 | RUN pip install --no-cache-dir -r requirements.txt
23 |
24 | # Copy project
25 | COPY . .
26 |
27 | # Install the package
28 | RUN pip install -e .
29 |
30 | # Create non-root user
31 | RUN useradd -m -u 1000 user && \
32 | chown -R user:user /app
33 | USER user
34 |
35 | # Command to run tests
36 | CMD ["pytest", "tests/"]
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 DeepChain
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DeepChain
2 |
3 |
4 |
5 |
Deep Seek AI-Driven Strategies, Blockchain-Verified Trust
6 |
Powered by DeepSeek AI
7 |
8 |
9 | ## Overview
10 |
11 | DeepChain is a framework for building decentralized AI trading strategy engines. It combines reinforcement learning, blockchain technology, and edge computing to create transparent, secure, and efficient trading systems. The framework is powered by DeepSeek AI, a pioneering company in quantitative trading that has revolutionized the AI, financial, and cryptocurrency industries through their innovative approaches to AI-driven trading.
12 |
13 | ## Key Features
14 |
15 | - **Reinforcement Learning-Driven Dynamic Strategies**
16 | - Real-time strategy adaptation using DeepSeek's R1 training system
17 | - Comprehensive market data processing and analysis
18 | - Advanced technical indicators and signal generation
19 | - Automated strategy optimization
20 |
21 | - **Distributed Strategy Validation Network**
22 | - Blockchain-based strategy verification
23 | - Zero-knowledge proof integration
24 | - Multi-chain support (Solana, Ethereum, BNB Chain, Base)
25 | - Transparent and tamper-proof execution records
26 |
27 | - **Edge Model Deployment**
28 | - Knowledge distillation for model compression
29 | - Optimized deployment for mobile and IoT devices
30 | - Real-time inference with minimal latency
31 | - Efficient on-chain settlement
32 |
33 | - **Comprehensive Monitoring System**
34 | - Real-time performance tracking
35 | - Automated alert system
36 | - Resource usage monitoring
37 | - System health checks
38 |
39 | ## Technology Stack
40 |
41 | - **AI & Machine Learning**
42 | - PyTorch for deep learning models
43 | - DeepSeek's R1 system for reinforcement learning
44 | - NumPy and Pandas for data processing
45 | - Scikit-learn for feature engineering
46 |
47 | - **Blockchain Integration**
48 | - Solana Web3.js
49 | - Ethereum Web3.py
50 | - Zero-knowledge proof libraries
51 | - Multi-chain support SDKs
52 |
53 | - **Edge Computing**
54 | - ONNX Runtime
55 | - TensorRT
56 | - Model compression tools
57 | - Edge deployment utilities
58 |
59 | ## Project Structure
60 |
61 | ```
62 | deepchain/
63 | ├── assets/ # Project assets
64 | │ ├── images/ # Image resources
65 | │ └── logo.svg # Project logo
66 | ├── deepchain/ # Main package directory
67 | │ ├── core/ # Core functionality
68 | │ │ ├── strategy/ # Trading strategy interfaces
69 | │ │ │ ├── base.py # Base strategy classes
70 | │ │ │ └── rl.py # Reinforcement learning strategies
71 | │ │ ├── blockchain/ # Blockchain interaction interfaces
72 | │ │ │ ├── validator.py # Strategy validation
73 | │ │ │ ├── chains.py # Multi-chain support
74 | │ │ │ └── ethereum.py # Ethereum integration
75 | │ │ ├── edge/ # Edge deployment interfaces
76 | │ │ │ ├── deployer.py # Model deployment
77 | │ │ │ └── optimizer.py # Model optimization
78 | │ │ ├── data/ # Data processing and streaming
79 | │ │ │ ├── stream.py # Data streaming
80 | │ │ │ └── indicators.py # Technical indicators
81 | │ │ └── monitoring/ # System monitoring and alerts
82 | │ │ ├── metrics.py # Metrics collection
83 | │ │ ├── alerts.py # Alert management
84 | │ │ └── performance.py# Performance tracking
85 | │ ├── protocols/ # Protocol definitions
86 | │ │ ├── trading.py # Trading protocols
87 | │ │ └── validation.py# Validation protocols
88 | │ └── utils/ # Utility functions
89 | │ ├── config.py # Configuration management
90 | │ ├── logging.py # Logging utilities
91 | │ └── errors.py # Error handling
92 | ├── examples/ # Example implementations
93 | │ ├── strategies/ # Strategy examples
94 | │ │ ├── simple_strategy.py # Basic strategy example
95 | │ │ └── advanced_strategy.py # Advanced strategy example
96 | │ ├── validation/ # Blockchain validation examples
97 | │ │ ├── solana_example.py # Solana validation
98 | │ │ └── ethereum_example.py # Ethereum validation
99 | │ └── deployment/ # Edge deployment examples
100 | │ ├── mobile_deployment.py # Mobile deployment
101 | │ └── iot_deployment.py # IoT deployment
102 | ├── tests/ # Test cases
103 | │ ├── unit/ # Unit tests
104 | │ │ ├── test_strategy.py # Strategy tests
105 | │ │ ├── test_blockchain.py # Blockchain tests
106 | │ │ └── test_deployment.py # Deployment tests
107 | │ └── integration/ # Integration tests
108 | │ ├── test_workflow.py # Workflow tests
109 | │ └── test_performance.py # Performance tests
110 | ├── docs/ # Documentation
111 | │ ├── getting_started.md # Getting started guide
112 | │ ├── api_reference.md # API documentation
113 | │ ├── strategy_development.md # Strategy guide
114 | │ ├── blockchain_integration.md# Blockchain guide
115 | │ ├── edge_deployment.md # Deployment guide
116 | │ ├── monitoring.md # Monitoring guide
117 | │ ├── contributing.md # Contributing guide
118 | │ └── security.md # Security policy
119 | ├── requirements.txt # Project dependencies
120 | ├── setup.py # Package configuration
121 | ├── LICENSE # License information
122 | └── README.md # Project documentation
123 | ```
124 |
125 | ## Installation
126 |
127 | ```bash
128 | pip install deepchain
129 | ```
130 |
131 | ## Quick Start
132 |
133 | 1. **Install Dependencies**
134 | ```bash
135 | pip install -r requirements.txt
136 | ```
137 |
138 | 2. **Configure Environment**
139 | ```bash
140 | export DEEPSEEK_API_KEY=your_api_key
141 | export BLOCKCHAIN_RPC_URL=your_rpc_url
142 | ```
143 |
144 | 3. **Run Example Strategy**
145 | ```python
146 | from deepchain.examples.strategies import SimpleStrategy
147 | from deepchain.core.blockchain import SolanaValidator
148 |
149 | # Initialize strategy and validator
150 | strategy = SimpleStrategy()
151 | validator = SolanaValidator()
152 |
153 | # Train and deploy strategy
154 | strategy.train()
155 | validator.register_strategy(strategy)
156 | ```
157 |
158 | Check out our [examples](./examples) directory for more implementation examples.
159 |
160 | ## Documentation
161 |
162 | - [Getting Started Guide](./docs/getting_started.md)
163 | - [API Reference](./docs/api_reference.md)
164 | - [Strategy Development](./docs/strategy_development.md)
165 | - [Blockchain Integration](./docs/blockchain_integration.md)
166 | - [Edge Deployment](./docs/edge_deployment.md)
167 | - [Monitoring System](./docs/monitoring.md)
168 |
169 | ## Acknowledgments
170 |
171 | DeepChain is powered by DeepSeek AI, a leading innovator in quantitative trading. We extend our heartfelt gratitude to DeepSeek for their groundbreaking contributions:
172 |
173 | - Revolutionizing AI-driven trading through advanced reinforcement learning
174 | - Pioneering transparent and verifiable trading strategies
175 | - Advancing the integration of AI and blockchain technology
176 | - Democratizing access to sophisticated trading technologies
177 |
178 | DeepSeek's commitment to innovation has transformed the landscape of AI, finance, and cryptocurrency trading, making advanced trading strategies accessible and trustworthy.
179 |
180 | ## Contributing
181 |
182 | We welcome contributions! Please see our [contributing guidelines](CONTRIBUTING.md) for details.
183 |
184 | ## Security
185 |
186 | Please report any security issues to security@deepchain.io. See our [security policy](SECURITY.md) for details.
187 |
188 | ## Community
189 |
190 | - [Twitter](https://x.com/DeepChain_AI)
191 |
192 | ## License
193 |
194 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Reporting a Vulnerability
4 |
5 | At DeepChain, we take security seriously. If you believe you have found a security vulnerability, please report it to us as described below.
6 |
7 | ## Reporting Process
8 |
9 | Please send emails to security@deepchain.io with the following information:
10 |
11 | - Description of the vulnerability
12 | - Steps to reproduce the issue
13 | - Potential impact
14 | - Any suggested fixes (if available)
15 |
16 | ## What to Expect
17 |
18 | 1. **Initial Response**: We will acknowledge receipt of your vulnerability report within 24 hours.
19 | 2. **Status Updates**: We will provide regular updates about the progress of addressing the vulnerability.
20 | 3. **Resolution**: Once fixed, we will notify you and discuss the details of public disclosure.
21 |
22 | ## Scope
23 |
24 | This security policy applies to:
25 |
26 | - DeepChain core framework
27 | - Official examples and documentation
28 | - Deployment tools and scripts
29 | - API endpoints and services
30 |
31 | ## Security Best Practices
32 |
33 | When using DeepChain, please follow these security guidelines:
34 |
35 | 1. **API Keys and Credentials**
36 | - Never commit API keys to version control
37 | - Use environment variables for sensitive data
38 | - Rotate keys regularly
39 |
40 | 2. **Network Security**
41 | - Use secure connections (HTTPS/WSS)
42 | - Implement proper firewall rules
43 | - Monitor network traffic
44 |
45 | 3. **Access Control**
46 | - Implement proper authentication
47 | - Use role-based access control
48 | - Regular access reviews
49 |
50 | 4. **Data Protection**
51 | - Encrypt sensitive data
52 | - Regular backups
53 | - Secure data transmission
54 |
55 | 5. **Blockchain Security**
56 | - Secure key management
57 | - Transaction signing best practices
58 | - Smart contract auditing
59 |
60 | 6. **Edge Device Security**
61 | - Secure boot process
62 | - Regular security updates
63 | - Device authentication
64 |
65 | ## Disclosure Policy
66 |
67 | - Public disclosure will be coordinated with the reporter
68 | - Credit will be given to the reporter (if desired)
69 | - Details will be published after the fix is deployed
70 |
71 | ## Security Updates
72 |
73 | We regularly publish security updates. To stay informed:
74 |
75 | 1. Watch our GitHub repository
76 | 2. Follow our security announcements
77 | 3. Subscribe to our security mailing list
78 |
79 | ## Contact
80 |
81 | For security issues: security@deepchain.io
82 | For general inquiries: support@deepchain.io
--------------------------------------------------------------------------------
/assets/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
38 |
39 |
43 |
44 |
45 |
46 |
47 |
48 |
53 |
54 |
55 |
56 |
61 |
66 |
67 |
--------------------------------------------------------------------------------
/deepchain/core/blockchain/chains.py:
--------------------------------------------------------------------------------
1 | """
2 | Blockchain integrations for DeepChain.
3 | Supports major blockchains including Solana, Ethereum, and others.
4 | """
5 |
6 | from typing import Dict, Any, Optional, List
7 | from abc import ABC, abstractmethod
8 | import solana
9 | from solana.rpc.api import Client
10 | from solana.transaction import Transaction
11 | from solana.system_program import TransactionInstruction
12 | import web3
13 | from web3 import Web3
14 | from .validator import BaseValidator, ZKValidator
15 | from .ethereum import (
16 | EthereumChain,
17 | BaseChain,
18 | BNBChain,
19 | EthereumValidator
20 | )
21 |
22 | class BlockchainBase(ABC):
23 | """Base class for blockchain interactions."""
24 |
25 | @abstractmethod
26 | def connect(self) -> bool:
27 | """Establish connection to blockchain node."""
28 | pass
29 |
30 | @abstractmethod
31 | def get_balance(self, address: str) -> float:
32 | """Get account balance."""
33 | pass
34 |
35 | @abstractmethod
36 | def send_transaction(self, transaction: Any) -> str:
37 | """Send transaction to blockchain."""
38 | pass
39 |
40 | @abstractmethod
41 | def verify_transaction(self, tx_hash: str) -> bool:
42 | """Verify transaction status."""
43 | pass
44 |
45 | class SolanaChain(BlockchainBase):
46 | """Solana blockchain integration."""
47 |
48 | def __init__(self, endpoint: str = "https://api.mainnet-beta.solana.com"):
49 | """Initialize Solana client.
50 |
51 | Args:
52 | endpoint: Solana RPC endpoint
53 | """
54 | self.client = Client(endpoint)
55 | self.program_id = None # Set in deploy_program
56 |
57 | def connect(self) -> bool:
58 | """Connect to Solana network."""
59 | try:
60 | self.client.get_health()
61 | return True
62 | except Exception:
63 | return False
64 |
65 | def get_balance(self, address: str) -> float:
66 | """Get SOL balance for address."""
67 | response = self.client.get_balance(address)
68 | return response['result']['value'] / 1e9 # Convert lamports to SOL
69 |
70 | def send_transaction(self, transaction: Transaction) -> str:
71 | """Send transaction to Solana network."""
72 | result = self.client.send_transaction(transaction)
73 | return result['result']
74 |
75 | def verify_transaction(self, tx_hash: str) -> bool:
76 | """Verify Solana transaction status."""
77 | result = self.client.get_confirmed_transaction(tx_hash)
78 | return result['result'] is not None
79 |
80 | def deploy_program(self, program_data: bytes) -> str:
81 | """Deploy Solana program (smart contract).
82 |
83 | Args:
84 | program_data: Compiled program bytecode
85 |
86 | Returns:
87 | Program ID
88 | """
89 | # Deploy program implementation
90 | # Returns program ID
91 | pass
92 |
93 | def create_strategy_account(self, strategy_id: str, space: int) -> str:
94 | """Create account for storing strategy data.
95 |
96 | Args:
97 | strategy_id: Strategy identifier
98 | space: Required space in bytes
99 |
100 | Returns:
101 | Account address
102 | """
103 | # Create account implementation
104 | # Returns account address
105 | pass
106 |
107 | class SolanaValidator(ZKValidator):
108 | """Solana-specific strategy validator."""
109 |
110 | def __init__(self, chain: SolanaChain):
111 | """Initialize validator with Solana chain.
112 |
113 | Args:
114 | chain: SolanaChain instance
115 | """
116 | self.chain = chain
117 |
118 | def validate_strategy(self, strategy_id: str, proof: Dict[str, Any]) -> bool:
119 | """Validate strategy on Solana."""
120 | # Implement Solana-specific validation
121 | instruction = TransactionInstruction(
122 | program_id=self.chain.program_id,
123 | data=b'validate_strategy' # Simplified
124 | )
125 | transaction = Transaction().add(instruction)
126 |
127 | try:
128 | tx_hash = self.chain.send_transaction(transaction)
129 | return self.chain.verify_transaction(tx_hash)
130 | except Exception:
131 | return False
132 |
133 | def register_strategy(self, strategy_id: str, metadata: Dict[str, Any]) -> str:
134 | """Register strategy on Solana."""
135 | # Create strategy account
136 | account = self.chain.create_strategy_account(strategy_id, 1000) # Example size
137 |
138 | # Register strategy
139 | instruction = TransactionInstruction(
140 | program_id=self.chain.program_id,
141 | data=b'register_strategy' # Simplified
142 | )
143 | transaction = Transaction().add(instruction)
144 |
145 | return self.chain.send_transaction(transaction)
146 |
147 | def verify_execution(self,
148 | strategy_id: str,
149 | execution_data: Dict[str, Any],
150 | proof: Optional[Dict[str, Any]] = None) -> bool:
151 | """Verify strategy execution on Solana."""
152 | instruction = TransactionInstruction(
153 | program_id=self.chain.program_id,
154 | data=b'verify_execution' # Simplified
155 | )
156 | transaction = Transaction().add(instruction)
157 |
158 | try:
159 | tx_hash = self.chain.send_transaction(transaction)
160 | return self.chain.verify_transaction(tx_hash)
161 | except Exception:
162 | return False
163 |
164 | def generate_proof(self, strategy_id: str, execution_data: Dict[str, Any]) -> Dict[str, Any]:
165 | """Generate zero-knowledge proof for Solana."""
166 | # Implement Solana-specific ZK proof generation
167 | return {
168 | 'type': 'solana_zk_proof',
169 | 'strategy_id': strategy_id,
170 | 'data': execution_data,
171 | # Add actual proof data
172 | }
173 |
174 | def verify_proof(self, proof: Dict[str, Any]) -> bool:
175 | """Verify Solana-specific zero-knowledge proof."""
176 | # Implement Solana-specific ZK proof verification
177 | return True # Simplified
178 |
179 | class ChainFactory:
180 | """Factory for creating blockchain instances."""
181 |
182 | SUPPORTED_CHAINS = {
183 | 'solana': (SolanaChain, SolanaValidator),
184 | 'ethereum': (EthereumChain, EthereumValidator),
185 | 'base': (BaseChain, EthereumValidator),
186 | 'bnb': (BNBChain, EthereumValidator)
187 | }
188 |
189 | @staticmethod
190 | def create_chain(chain_type: str, **kwargs) -> BlockchainBase:
191 | """Create blockchain instance.
192 |
193 | Args:
194 | chain_type: Type of blockchain ('solana', 'ethereum', 'base', 'bnb')
195 | **kwargs: Chain-specific parameters
196 |
197 | Returns:
198 | BlockchainBase instance
199 |
200 | Raises:
201 | ValueError: If chain type is not supported
202 | """
203 | chain_type = chain_type.lower()
204 | if chain_type not in ChainFactory.SUPPORTED_CHAINS:
205 | raise ValueError(
206 | f"Unsupported blockchain type: {chain_type}. "
207 | f"Supported types: {list(ChainFactory.SUPPORTED_CHAINS.keys())}"
208 | )
209 |
210 | chain_class = ChainFactory.SUPPORTED_CHAINS[chain_type][0]
211 | return chain_class(**kwargs)
212 |
213 | @staticmethod
214 | def create_validator(chain: BlockchainBase) -> BaseValidator:
215 | """Create validator for blockchain.
216 |
217 | Args:
218 | chain: BlockchainBase instance
219 |
220 | Returns:
221 | BaseValidator instance
222 |
223 | Raises:
224 | ValueError: If chain type is not supported
225 | """
226 | for chain_type, (chain_class, validator_class) in ChainFactory.SUPPORTED_CHAINS.items():
227 | if isinstance(chain, chain_class):
228 | return validator_class(chain)
229 |
230 | raise ValueError(f"Unsupported chain type: {type(chain)}")
231 |
232 | @staticmethod
233 | def get_supported_chains() -> List[str]:
234 | """Get list of supported blockchain types.
235 |
236 | Returns:
237 | List of supported chain types
238 | """
239 | return list(ChainFactory.SUPPORTED_CHAINS.keys())
--------------------------------------------------------------------------------
/deepchain/core/blockchain/ethereum.py:
--------------------------------------------------------------------------------
1 | """
2 | Ethereum and L2 chain integrations for DeepChain.
3 | """
4 |
5 | from typing import Dict, Any, Optional
6 | from web3 import Web3
7 | from eth_account.account import Account
8 | from eth_typing import Address
9 | from .chains import BlockchainBase
10 | from .validator import ZKValidator
11 |
12 | class EthereumBase(BlockchainBase):
13 | """Base class for Ethereum-based chains."""
14 |
15 | def __init__(self, endpoint: str, chain_id: int):
16 | """Initialize Ethereum client.
17 |
18 | Args:
19 | endpoint: Web3 provider endpoint
20 | chain_id: Chain ID
21 | """
22 | self.web3 = Web3(Web3.HTTPProvider(endpoint))
23 | self.chain_id = chain_id
24 | self.contract_address = None # Set in deploy_contract
25 | self.contract = None # Set in deploy_contract
26 |
27 | def connect(self) -> bool:
28 | """Connect to Ethereum network."""
29 | try:
30 | return self.web3.is_connected()
31 | except Exception:
32 | return False
33 |
34 | def get_balance(self, address: str) -> float:
35 | """Get ETH balance for address."""
36 | balance_wei = self.web3.eth.get_balance(address)
37 | return self.web3.from_wei(balance_wei, 'ether')
38 |
39 | def send_transaction(self, transaction: Dict[str, Any]) -> str:
40 | """Send transaction to Ethereum network."""
41 | signed_txn = self.web3.eth.account.sign_transaction(
42 | transaction,
43 | self.private_key
44 | )
45 | tx_hash = self.web3.eth.send_raw_transaction(signed_txn.rawTransaction)
46 | return self.web3.to_hex(tx_hash)
47 |
48 | def verify_transaction(self, tx_hash: str) -> bool:
49 | """Verify Ethereum transaction status."""
50 | try:
51 | receipt = self.web3.eth.get_transaction_receipt(tx_hash)
52 | return receipt is not None and receipt['status'] == 1
53 | except Exception:
54 | return False
55 |
56 | def deploy_contract(self, abi: list, bytecode: str) -> Address:
57 | """Deploy smart contract.
58 |
59 | Args:
60 | abi: Contract ABI
61 | bytecode: Contract bytecode
62 |
63 | Returns:
64 | Contract address
65 | """
66 | contract = self.web3.eth.contract(abi=abi, bytecode=bytecode)
67 | transaction = contract.constructor().build_transaction({
68 | 'from': self.web3.eth.default_account,
69 | 'nonce': self.web3.eth.get_transaction_count(
70 | self.web3.eth.default_account
71 | ),
72 | 'gas': 2000000,
73 | 'gasPrice': self.web3.eth.gas_price
74 | })
75 |
76 | tx_hash = self.send_transaction(transaction)
77 | receipt = self.web3.eth.wait_for_transaction_receipt(tx_hash)
78 |
79 | self.contract_address = receipt['contractAddress']
80 | self.contract = self.web3.eth.contract(
81 | address=self.contract_address,
82 | abi=abi
83 | )
84 | return self.contract_address
85 |
86 | class EthereumChain(EthereumBase):
87 | """Ethereum mainnet integration."""
88 |
89 | def __init__(self, endpoint: str = "https://mainnet.infura.io/v3/YOUR-PROJECT-ID"):
90 | """Initialize Ethereum mainnet client."""
91 | super().__init__(endpoint, chain_id=1)
92 |
93 | class BaseChain(EthereumBase):
94 | """Base L2 chain integration."""
95 |
96 | def __init__(self, endpoint: str = "https://mainnet.base.org"):
97 | """Initialize Base client."""
98 | super().__init__(endpoint, chain_id=8453)
99 |
100 | class BNBChain(EthereumBase):
101 | """BNB Chain integration."""
102 |
103 | def __init__(self, endpoint: str = "https://bsc-dataseed.binance.org"):
104 | """Initialize BNB Chain client."""
105 | super().__init__(endpoint, chain_id=56)
106 |
107 | class EthereumValidator(ZKValidator):
108 | """Ethereum-based strategy validator."""
109 |
110 | def __init__(self, chain: EthereumBase):
111 | """Initialize validator with Ethereum chain.
112 |
113 | Args:
114 | chain: EthereumBase instance
115 | """
116 | self.chain = chain
117 |
118 | def validate_strategy(self, strategy_id: str, proof: Dict[str, Any]) -> bool:
119 | """Validate strategy on Ethereum."""
120 | try:
121 | tx = self.chain.contract.functions.validateStrategy(
122 | strategy_id,
123 | proof
124 | ).build_transaction({
125 | 'from': self.chain.web3.eth.default_account,
126 | 'nonce': self.chain.web3.eth.get_transaction_count(
127 | self.chain.web3.eth.default_account
128 | ),
129 | 'gas': 200000,
130 | 'gasPrice': self.chain.web3.eth.gas_price
131 | })
132 |
133 | tx_hash = self.chain.send_transaction(tx)
134 | return self.chain.verify_transaction(tx_hash)
135 | except Exception:
136 | return False
137 |
138 | def register_strategy(self, strategy_id: str, metadata: Dict[str, Any]) -> str:
139 | """Register strategy on Ethereum."""
140 | tx = self.chain.contract.functions.registerStrategy(
141 | strategy_id,
142 | metadata
143 | ).build_transaction({
144 | 'from': self.chain.web3.eth.default_account,
145 | 'nonce': self.chain.web3.eth.get_transaction_count(
146 | self.chain.web3.eth.default_account
147 | ),
148 | 'gas': 200000,
149 | 'gasPrice': self.chain.web3.eth.gas_price
150 | })
151 |
152 | return self.chain.send_transaction(tx)
153 |
154 | def verify_execution(self,
155 | strategy_id: str,
156 | execution_data: Dict[str, Any],
157 | proof: Optional[Dict[str, Any]] = None) -> bool:
158 | """Verify strategy execution on Ethereum."""
159 | try:
160 | tx = self.chain.contract.functions.verifyExecution(
161 | strategy_id,
162 | execution_data,
163 | proof
164 | ).build_transaction({
165 | 'from': self.chain.web3.eth.default_account,
166 | 'nonce': self.chain.web3.eth.get_transaction_count(
167 | self.chain.web3.eth.default_account
168 | ),
169 | 'gas': 200000,
170 | 'gasPrice': self.chain.web3.eth.gas_price
171 | })
172 |
173 | tx_hash = self.chain.send_transaction(tx)
174 | return self.chain.verify_transaction(tx_hash)
175 | except Exception:
176 | return False
177 |
178 | def generate_proof(self, strategy_id: str, execution_data: Dict[str, Any]) -> Dict[str, Any]:
179 | """Generate zero-knowledge proof for Ethereum."""
180 | # Implement Ethereum-specific ZK proof generation
181 | return {
182 | 'type': 'ethereum_zk_proof',
183 | 'strategy_id': strategy_id,
184 | 'data': execution_data,
185 | # Add actual proof data
186 | }
187 |
188 | def verify_proof(self, proof: Dict[str, Any]) -> bool:
189 | """Verify Ethereum-specific zero-knowledge proof."""
190 | try:
191 | return self.chain.contract.functions.verifyProof(
192 | proof
193 | ).call()
194 | except Exception:
195 | return False
--------------------------------------------------------------------------------
/deepchain/core/blockchain/validator.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Any, Dict, Optional
3 |
4 | class BaseValidator(ABC):
5 | """
6 | Base class for blockchain-based strategy validators.
7 | Defines interface for validating and verifying trading strategies on blockchain.
8 | """
9 |
10 | @abstractmethod
11 | def validate_strategy(self, strategy_id: str, proof: Dict[str, Any]) -> bool:
12 | """
13 | Validate a trading strategy using zero-knowledge proofs.
14 |
15 | Args:
16 | strategy_id: Unique identifier for the strategy
17 | proof: Zero-knowledge proof data
18 |
19 | Returns:
20 | Boolean indicating whether the strategy is valid
21 | """
22 | pass
23 |
24 | @abstractmethod
25 | def register_strategy(self, strategy_id: str, metadata: Dict[str, Any]) -> str:
26 | """
27 | Register a new strategy on the blockchain.
28 |
29 | Args:
30 | strategy_id: Unique identifier for the strategy
31 | metadata: Strategy metadata
32 |
33 | Returns:
34 | Transaction hash or identifier
35 | """
36 | pass
37 |
38 | @abstractmethod
39 | def verify_execution(self,
40 | strategy_id: str,
41 | execution_data: Dict[str, Any],
42 | proof: Optional[Dict[str, Any]] = None) -> bool:
43 | """
44 | Verify the execution of a strategy on the blockchain.
45 |
46 | Args:
47 | strategy_id: Unique identifier for the strategy
48 | execution_data: Data about strategy execution
49 | proof: Optional zero-knowledge proof
50 |
51 | Returns:
52 | Boolean indicating whether the execution is valid
53 | """
54 | pass
55 |
56 | class ZKValidator(BaseValidator):
57 | """
58 | Zero-knowledge proof based validator implementation.
59 | """
60 |
61 | @abstractmethod
62 | def generate_proof(self, strategy_id: str, execution_data: Dict[str, Any]) -> Dict[str, Any]:
63 | """
64 | Generate zero-knowledge proof for strategy execution.
65 |
66 | Args:
67 | strategy_id: Unique identifier for the strategy
68 | execution_data: Data about strategy execution
69 |
70 | Returns:
71 | Generated proof data
72 | """
73 | pass
74 |
75 | @abstractmethod
76 | def verify_proof(self, proof: Dict[str, Any]) -> bool:
77 | """
78 | Verify a zero-knowledge proof.
79 |
80 | Args:
81 | proof: The proof to verify
82 |
83 | Returns:
84 | Boolean indicating whether the proof is valid
85 | """
86 | pass
--------------------------------------------------------------------------------
/deepchain/core/data/indicators.py:
--------------------------------------------------------------------------------
1 | """
2 | Technical indicators for market data analysis.
3 | """
4 |
5 | import pandas as pd
6 | import numpy as np
7 | from typing import Optional
8 |
9 | def calculate_ma(data: pd.DataFrame, period: int, column: str = 'close') -> pd.Series:
10 | """Calculate Moving Average.
11 |
12 | Args:
13 | data: Price data
14 | period: MA period
15 | column: Price column name
16 |
17 | Returns:
18 | MA values
19 | """
20 | return data[column].rolling(window=period).mean()
21 |
22 | def calculate_ema(data: pd.DataFrame, period: int, column: str = 'close') -> pd.Series:
23 | """Calculate Exponential Moving Average.
24 |
25 | Args:
26 | data: Price data
27 | period: EMA period
28 | column: Price column name
29 |
30 | Returns:
31 | EMA values
32 | """
33 | return data[column].ewm(span=period, adjust=False).mean()
34 |
35 | def calculate_rsi(data: pd.DataFrame, period: int = 14, column: str = 'close') -> pd.Series:
36 | """Calculate Relative Strength Index.
37 |
38 | Args:
39 | data: Price data
40 | period: RSI period
41 | column: Price column name
42 |
43 | Returns:
44 | RSI values
45 | """
46 | delta = data[column].diff()
47 | gain = (delta.where(delta > 0, 0)).rolling(window=period).mean()
48 | loss = (-delta.where(delta < 0, 0)).rolling(window=period).mean()
49 |
50 | rs = gain / loss
51 | return 100 - (100 / (1 + rs))
52 |
53 | def calculate_macd(
54 | data: pd.DataFrame,
55 | fast_period: int = 12,
56 | slow_period: int = 26,
57 | signal_period: int = 9,
58 | column: str = 'close'
59 | ) -> pd.DataFrame:
60 | """Calculate MACD (Moving Average Convergence Divergence).
61 |
62 | Args:
63 | data: Price data
64 | fast_period: Fast EMA period
65 | slow_period: Slow EMA period
66 | signal_period: Signal line period
67 | column: Price column name
68 |
69 | Returns:
70 | DataFrame with MACD line, signal line and histogram
71 | """
72 | fast_ema = calculate_ema(data, fast_period, column)
73 | slow_ema = calculate_ema(data, slow_period, column)
74 |
75 | macd_line = fast_ema - slow_ema
76 | signal_line = macd_line.ewm(span=signal_period, adjust=False).mean()
77 | histogram = macd_line - signal_line
78 |
79 | return pd.DataFrame({
80 | 'macd': macd_line,
81 | 'signal': signal_line,
82 | 'histogram': histogram
83 | })
84 |
85 | def calculate_bollinger_bands(
86 | data: pd.DataFrame,
87 | period: int = 20,
88 | std_dev: float = 2.0,
89 | column: str = 'close'
90 | ) -> pd.DataFrame:
91 | """Calculate Bollinger Bands.
92 |
93 | Args:
94 | data: Price data
95 | period: Moving average period
96 | std_dev: Number of standard deviations
97 | column: Price column name
98 |
99 | Returns:
100 | DataFrame with upper band, middle band and lower band
101 | """
102 | middle_band = calculate_ma(data, period, column)
103 | std = data[column].rolling(window=period).std()
104 |
105 | upper_band = middle_band + (std * std_dev)
106 | lower_band = middle_band - (std * std_dev)
107 |
108 | return pd.DataFrame({
109 | 'upper': upper_band,
110 | 'middle': middle_band,
111 | 'lower': lower_band
112 | })
113 |
114 | def calculate_atr(
115 | data: pd.DataFrame,
116 | period: int = 14
117 | ) -> pd.Series:
118 | """Calculate Average True Range.
119 |
120 | Args:
121 | data: Price data with high, low, close columns
122 | period: ATR period
123 |
124 | Returns:
125 | ATR values
126 | """
127 | high = data['high']
128 | low = data['low']
129 | close = data['close']
130 |
131 | tr1 = high - low
132 | tr2 = abs(high - close.shift())
133 | tr3 = abs(low - close.shift())
134 |
135 | tr = pd.concat([tr1, tr2, tr3], axis=1).max(axis=1)
136 | return tr.rolling(window=period).mean()
137 |
138 | def calculate_stochastic(
139 | data: pd.DataFrame,
140 | k_period: int = 14,
141 | d_period: int = 3,
142 | smooth_k: int = 3
143 | ) -> pd.DataFrame:
144 | """Calculate Stochastic Oscillator.
145 |
146 | Args:
147 | data: Price data with high, low, close columns
148 | k_period: %K period
149 | d_period: %D period
150 | smooth_k: %K smoothing period
151 |
152 | Returns:
153 | DataFrame with %K and %D values
154 | """
155 | low_min = data['low'].rolling(window=k_period).min()
156 | high_max = data['high'].rolling(window=k_period).max()
157 |
158 | k = 100 * ((data['close'] - low_min) / (high_max - low_min))
159 | k = k.rolling(window=smooth_k).mean()
160 | d = k.rolling(window=d_period).mean()
161 |
162 | return pd.DataFrame({
163 | 'k': k,
164 | 'd': d
165 | })
166 |
167 | def calculate_obv(data: pd.DataFrame) -> pd.Series:
168 | """Calculate On-Balance Volume.
169 |
170 | Args:
171 | data: Price data with close and volume columns
172 |
173 | Returns:
174 | OBV values
175 | """
176 | close_diff = data['close'].diff()
177 | volume = data['volume']
178 |
179 | obv = pd.Series(index=data.index, dtype=float)
180 | obv.iloc[0] = volume.iloc[0]
181 |
182 | for i in range(1, len(data)):
183 | if close_diff.iloc[i] > 0:
184 | obv.iloc[i] = obv.iloc[i-1] + volume.iloc[i]
185 | elif close_diff.iloc[i] < 0:
186 | obv.iloc[i] = obv.iloc[i-1] - volume.iloc[i]
187 | else:
188 | obv.iloc[i] = obv.iloc[i-1]
189 |
190 | return obv
191 |
192 | def calculate_vwap(data: pd.DataFrame) -> pd.Series:
193 | """Calculate Volume Weighted Average Price.
194 |
195 | Args:
196 | data: Price data with high, low, close and volume columns
197 |
198 | Returns:
199 | VWAP values
200 | """
201 | typical_price = (data['high'] + data['low'] + data['close']) / 3
202 | return (typical_price * data['volume']).cumsum() / data['volume'].cumsum()
203 |
204 | def calculate_momentum(
205 | data: pd.DataFrame,
206 | period: int = 14,
207 | column: str = 'close'
208 | ) -> pd.Series:
209 | """Calculate Momentum.
210 |
211 | Args:
212 | data: Price data
213 | period: Momentum period
214 | column: Price column name
215 |
216 | Returns:
217 | Momentum values
218 | """
219 | return data[column].diff(period)
220 |
221 | def calculate_williams_r(
222 | data: pd.DataFrame,
223 | period: int = 14
224 | ) -> pd.Series:
225 | """Calculate Williams %R.
226 |
227 | Args:
228 | data: Price data with high, low, close columns
229 | period: Look-back period
230 |
231 | Returns:
232 | Williams %R values
233 | """
234 | highest_high = data['high'].rolling(window=period).max()
235 | lowest_low = data['low'].rolling(window=period).min()
236 |
237 | wr = -100 * ((highest_high - data['close']) / (highest_high - lowest_low))
238 | return wr
--------------------------------------------------------------------------------
/deepchain/core/data/stream.py:
--------------------------------------------------------------------------------
1 | """
2 | Real-time data streaming and processing system for DeepChain.
3 | """
4 |
5 | from typing import Dict, Any, List, Optional, Callable
6 | import pandas as pd
7 | import numpy as np
8 | from abc import ABC, abstractmethod
9 | import asyncio
10 | import aiohttp
11 | import websockets
12 | import logging
13 | from queue import Queue
14 | from threading import Thread
15 | from datetime import datetime
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 | class DataSource(ABC):
20 | """Abstract base class for data sources."""
21 |
22 | @abstractmethod
23 | async def connect(self) -> bool:
24 | """Connect to data source."""
25 | pass
26 |
27 | @abstractmethod
28 | async def subscribe(self, symbols: List[str]) -> None:
29 | """Subscribe to market data."""
30 | pass
31 |
32 | @abstractmethod
33 | async def unsubscribe(self, symbols: List[str]) -> None:
34 | """Unsubscribe from market data."""
35 | pass
36 |
37 | @abstractmethod
38 | async def get_historical_data(
39 | self,
40 | symbol: str,
41 | start_time: datetime,
42 | end_time: datetime,
43 | interval: str
44 | ) -> pd.DataFrame:
45 | """Get historical market data."""
46 | pass
47 |
48 | class WebSocketSource(DataSource):
49 | """WebSocket-based data source."""
50 |
51 | def __init__(self, url: str, api_key: Optional[str] = None):
52 | """Initialize WebSocket source.
53 |
54 | Args:
55 | url: WebSocket endpoint URL
56 | api_key: Optional API key
57 | """
58 | self.url = url
59 | self.api_key = api_key
60 | self.ws = None
61 | self.connected = False
62 |
63 | async def connect(self) -> bool:
64 | """Connect to WebSocket."""
65 | try:
66 | self.ws = await websockets.connect(self.url)
67 | self.connected = True
68 | return True
69 | except Exception as e:
70 | logger.error(f"WebSocket connection failed: {e}")
71 | return False
72 |
73 | async def subscribe(self, symbols: List[str]) -> None:
74 | """Subscribe to market data."""
75 | if not self.connected:
76 | raise ConnectionError("Not connected to WebSocket")
77 |
78 | subscribe_msg = {
79 | 'type': 'subscribe',
80 | 'symbols': symbols
81 | }
82 | await self.ws.send(subscribe_msg)
83 |
84 | async def unsubscribe(self, symbols: List[str]) -> None:
85 | """Unsubscribe from market data."""
86 | if not self.connected:
87 | raise ConnectionError("Not connected to WebSocket")
88 |
89 | unsubscribe_msg = {
90 | 'type': 'unsubscribe',
91 | 'symbols': symbols
92 | }
93 | await self.ws.send(unsubscribe_msg)
94 |
95 | async def get_historical_data(
96 | self,
97 | symbol: str,
98 | start_time: datetime,
99 | end_time: datetime,
100 | interval: str
101 | ) -> pd.DataFrame:
102 | """Get historical market data using REST API."""
103 | async with aiohttp.ClientSession() as session:
104 | params = {
105 | 'symbol': symbol,
106 | 'start_time': int(start_time.timestamp() * 1000),
107 | 'end_time': int(end_time.timestamp() * 1000),
108 | 'interval': interval
109 | }
110 | if self.api_key:
111 | params['api_key'] = self.api_key
112 |
113 | async with session.get(f"{self.url}/history", params=params) as response:
114 | data = await response.json()
115 | return pd.DataFrame(data)
116 |
117 | class DataStream:
118 | """Real-time data streaming and processing."""
119 |
120 | def __init__(self, buffer_size: int = 1000):
121 | """Initialize data stream.
122 |
123 | Args:
124 | buffer_size: Size of data buffer
125 | """
126 | self.buffer_size = buffer_size
127 | self.sources: Dict[str, DataSource] = {}
128 | self.processors: List[Callable] = []
129 | self.data_buffer = Queue(maxsize=buffer_size)
130 | self.running = False
131 | self.processing_thread = None
132 |
133 | def add_source(self, name: str, source: DataSource) -> None:
134 | """Add data source.
135 |
136 | Args:
137 | name: Source name
138 | source: DataSource instance
139 | """
140 | self.sources[name] = source
141 |
142 | def add_processor(self, processor: Callable) -> None:
143 | """Add data processor.
144 |
145 | Args:
146 | processor: Processing function
147 | """
148 | self.processors.append(processor)
149 |
150 | async def start(self) -> None:
151 | """Start data streaming."""
152 | if self.running:
153 | return
154 |
155 | self.running = True
156 | self.processing_thread = Thread(target=self._process_data)
157 | self.processing_thread.start()
158 |
159 | # Connect to all sources
160 | for name, source in self.sources.items():
161 | connected = await source.connect()
162 | if not connected:
163 | logger.error(f"Failed to connect to source: {name}")
164 |
165 | async def stop(self) -> None:
166 | """Stop data streaming."""
167 | self.running = False
168 | if self.processing_thread:
169 | self.processing_thread.join()
170 |
171 | # Disconnect from all sources
172 | for source in self.sources.values():
173 | if hasattr(source, 'ws') and source.ws:
174 | await source.ws.close()
175 |
176 | def _process_data(self) -> None:
177 | """Process incoming data."""
178 | while self.running:
179 | if not self.data_buffer.empty():
180 | data = self.data_buffer.get()
181 |
182 | # Apply all processors
183 | for processor in self.processors:
184 | try:
185 | data = processor(data)
186 | except Exception as e:
187 | logger.error(f"Data processing error: {e}")
188 |
189 | # Emit processed data
190 | self._emit_data(data)
191 |
192 | def _emit_data(self, data: Any) -> None:
193 | """Emit processed data."""
194 | # Implement your data emission logic here
195 | pass
196 |
197 | class DataPipeline:
198 | """Data processing pipeline."""
199 |
200 | def __init__(self):
201 | """Initialize data pipeline."""
202 | self.steps: List[Callable] = []
203 |
204 | def add_step(self, step: Callable) -> None:
205 | """Add processing step.
206 |
207 | Args:
208 | step: Processing function
209 | """
210 | self.steps.append(step)
211 |
212 | def process(self, data: pd.DataFrame) -> pd.DataFrame:
213 | """Process data through pipeline.
214 |
215 | Args:
216 | data: Input DataFrame
217 |
218 | Returns:
219 | Processed DataFrame
220 | """
221 | result = data.copy()
222 | for step in self.steps:
223 | try:
224 | result = step(result)
225 | except Exception as e:
226 | logger.error(f"Pipeline step failed: {e}")
227 | raise
228 | return result
229 |
230 | class MarketDataStream(DataStream):
231 | """Market data streaming with technical indicators."""
232 |
233 | def __init__(self, buffer_size: int = 1000):
234 | """Initialize market data stream."""
235 | super().__init__(buffer_size)
236 | self.indicators = {}
237 |
238 | def add_indicator(self, name: str, func: Callable, **params) -> None:
239 | """Add technical indicator.
240 |
241 | Args:
242 | name: Indicator name
243 | func: Indicator calculation function
244 | **params: Indicator parameters
245 | """
246 | self.indicators[name] = (func, params)
247 |
248 | def _process_data(self) -> None:
249 | """Process market data with indicators."""
250 | while self.running:
251 | if not self.data_buffer.empty():
252 | data = self.data_buffer.get()
253 |
254 | # Calculate indicators
255 | for name, (func, params) in self.indicators.items():
256 | try:
257 | data[name] = func(data, **params)
258 | except Exception as e:
259 | logger.error(f"Indicator calculation error: {e}")
260 |
261 | # Apply processors
262 | for processor in self.processors:
263 | try:
264 | data = processor(data)
265 | except Exception as e:
266 | logger.error(f"Data processing error: {e}")
267 |
268 | self._emit_data(data)
--------------------------------------------------------------------------------
/deepchain/core/deepseek/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Any, Dict, List, Optional
3 | import torch
4 |
5 | class DeepSeekBase(ABC):
6 | """
7 | Base class for DeepSeek AI integration.
8 | Provides interface for interacting with DeepSeek's AI models and services.
9 | """
10 |
11 | @abstractmethod
12 | def initialize_model(self, model_name: str, **kwargs) -> None:
13 | """
14 | Initialize a DeepSeek model.
15 |
16 | Args:
17 | model_name: Name of the DeepSeek model to initialize
18 | **kwargs: Additional initialization parameters
19 | """
20 | pass
21 |
22 | @abstractmethod
23 | def get_model_info(self) -> Dict[str, Any]:
24 | """
25 | Get information about the currently loaded model.
26 |
27 | Returns:
28 | Dictionary containing model information
29 | """
30 | pass
31 |
32 | class DeepSeekRL(DeepSeekBase):
33 | """
34 | Interface for DeepSeek's reinforcement learning capabilities.
35 | """
36 |
37 | @abstractmethod
38 | def train_agent(self,
39 | env_config: Dict[str, Any],
40 | training_config: Dict[str, Any]) -> Dict[str, Any]:
41 | """
42 | Train an RL agent using DeepSeek's training infrastructure.
43 |
44 | Args:
45 | env_config: Environment configuration
46 | training_config: Training parameters
47 |
48 | Returns:
49 | Training results and metrics
50 | """
51 | pass
52 |
53 | @abstractmethod
54 | def optimize_strategy(self,
55 | strategy: Any,
56 | optimization_config: Dict[str, Any]) -> Any:
57 | """
58 | Optimize a trading strategy using DeepSeek's RL algorithms.
59 |
60 | Args:
61 | strategy: Strategy to optimize
62 | optimization_config: Optimization parameters
63 |
64 | Returns:
65 | Optimized strategy
66 | """
67 | pass
68 |
69 | @abstractmethod
70 | def evaluate_strategy(self,
71 | strategy: Any,
72 | evaluation_data: Dict[str, Any]) -> Dict[str, float]:
73 | """
74 | Evaluate a strategy using DeepSeek's evaluation metrics.
75 |
76 | Args:
77 | strategy: Strategy to evaluate
78 | evaluation_data: Data for evaluation
79 |
80 | Returns:
81 | Evaluation metrics
82 | """
83 | pass
84 |
85 | class DeepSeekDistill(DeepSeekBase):
86 | """
87 | Interface for DeepSeek's model distillation capabilities.
88 | """
89 |
90 | @abstractmethod
91 | def distill_model(self,
92 | teacher_model: Any,
93 | distillation_config: Dict[str, Any]) -> Any:
94 | """
95 | Distill a large model into a smaller one using DeepSeek's distillation techniques.
96 |
97 | Args:
98 | teacher_model: Original large model
99 | distillation_config: Distillation parameters
100 |
101 | Returns:
102 | Distilled model
103 | """
104 | pass
105 |
106 | @abstractmethod
107 | def optimize_distilled_model(self,
108 | model: Any,
109 | optimization_config: Dict[str, Any]) -> Any:
110 | """
111 | Optimize a distilled model for specific hardware targets.
112 |
113 | Args:
114 | model: Model to optimize
115 | optimization_config: Optimization parameters
116 |
117 | Returns:
118 | Optimized model
119 | """
120 | pass
121 |
122 | @abstractmethod
123 | def validate_distilled_model(self,
124 | model: Any,
125 | validation_data: Dict[str, Any]) -> Dict[str, float]:
126 | """
127 | Validate a distilled model's performance.
128 |
129 | Args:
130 | model: Model to validate
131 | validation_data: Data for validation
132 |
133 | Returns:
134 | Validation metrics
135 | """
136 | pass
137 |
138 | class DeepSeekAPI:
139 | """
140 | Main interface for interacting with DeepSeek's API services.
141 | """
142 |
143 | def __init__(self, api_key: str, api_url: Optional[str] = None):
144 | """
145 | Initialize DeepSeek API connection.
146 |
147 | Args:
148 | api_key: DeepSeek API key
149 | api_url: Optional custom API endpoint
150 | """
151 | self.api_key = api_key
152 | self.api_url = api_url or "https://api.deepseek.ai"
153 |
154 | def get_rl_interface(self) -> DeepSeekRL:
155 | """
156 | Get interface for DeepSeek's RL capabilities.
157 |
158 | Returns:
159 | DeepSeekRL interface
160 | """
161 | # Implementation would initialize and return a concrete DeepSeekRL instance
162 | pass
163 |
164 | def get_distill_interface(self) -> DeepSeekDistill:
165 | """
166 | Get interface for DeepSeek's model distillation capabilities.
167 |
168 | Returns:
169 | DeepSeekDistill interface
170 | """
171 | # Implementation would initialize and return a concrete DeepSeekDistill instance
172 | pass
173 |
174 | def validate_api_key(self) -> bool:
175 | """
176 | Validate the API key.
177 |
178 | Returns:
179 | Boolean indicating whether the API key is valid
180 | """
181 | # Implementation would validate the API key with DeepSeek's servers
182 | pass
--------------------------------------------------------------------------------
/deepchain/core/edge/deployer.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Any, Dict, Optional
3 |
4 | class BaseDeployer(ABC):
5 | """
6 | Base class for edge deployment functionality.
7 | Defines interface for model compression and edge deployment.
8 | """
9 |
10 | @abstractmethod
11 | def compress_model(self,
12 | model: Any,
13 | target_size: Optional[int] = None,
14 | target_latency: Optional[float] = None) -> Any:
15 | """
16 | Compress the model for edge deployment.
17 |
18 | Args:
19 | model: Original model to compress
20 | target_size: Target model size in bytes
21 | target_latency: Target inference latency in seconds
22 |
23 | Returns:
24 | Compressed model
25 | """
26 | pass
27 |
28 | @abstractmethod
29 | def export_model(self, model: Any, format: str, path: str) -> None:
30 | """
31 | Export model to specific format for edge deployment.
32 |
33 | Args:
34 | model: Model to export
35 | format: Target format (e.g., 'onnx', 'tflite')
36 | path: Export path
37 | """
38 | pass
39 |
40 | @abstractmethod
41 | def validate_performance(self,
42 | model: Any,
43 | test_data: Dict[str, Any]) -> Dict[str, float]:
44 | """
45 | Validate performance of compressed model.
46 |
47 | Args:
48 | model: Compressed model to validate
49 | test_data: Test data for validation
50 |
51 | Returns:
52 | Dictionary containing performance metrics
53 | """
54 | pass
55 |
56 | class EdgeOptimizer(BaseDeployer):
57 | """
58 | Edge deployment optimizer implementation.
59 | """
60 |
61 | @abstractmethod
62 | def quantize_model(self, model: Any, **kwargs) -> Any:
63 | """
64 | Quantize model for edge deployment.
65 |
66 | Args:
67 | model: Model to quantize
68 | **kwargs: Additional quantization parameters
69 |
70 | Returns:
71 | Quantized model
72 | """
73 | pass
74 |
75 | @abstractmethod
76 | def prune_model(self, model: Any, **kwargs) -> Any:
77 | """
78 | Prune model for edge deployment.
79 |
80 | Args:
81 | model: Model to prune
82 | **kwargs: Additional pruning parameters
83 |
84 | Returns:
85 | Pruned model
86 | """
87 | pass
88 |
89 | @abstractmethod
90 | def benchmark_model(self,
91 | model: Any,
92 | device_specs: Dict[str, Any]) -> Dict[str, float]:
93 | """
94 | Benchmark model performance on target edge device.
95 |
96 | Args:
97 | model: Model to benchmark
98 | device_specs: Target device specifications
99 |
100 | Returns:
101 | Dictionary containing benchmark metrics
102 | """
103 | pass
--------------------------------------------------------------------------------
/deepchain/core/exceptions.py:
--------------------------------------------------------------------------------
1 | """
2 | Core exceptions for the DeepChain framework.
3 | """
4 |
5 | class DeepChainError(Exception):
6 | """Base exception for all DeepChain errors."""
7 | pass
8 |
9 | class ModelError(DeepChainError):
10 | """Errors related to model operations."""
11 | pass
12 |
13 | class ValidationError(DeepChainError):
14 | """Errors related to validation operations."""
15 | pass
16 |
17 | class BlockchainError(DeepChainError):
18 | """Errors related to blockchain operations."""
19 | pass
20 |
21 | class APIError(DeepChainError):
22 | """Errors related to API operations."""
23 | pass
24 |
25 | class DataError(DeepChainError):
26 | """Errors related to data operations."""
27 | pass
28 |
29 | class ConfigurationError(DeepChainError):
30 | """Errors related to configuration."""
31 | pass
32 |
33 | class DeploymentError(DeepChainError):
34 | """Errors related to model deployment."""
35 | pass
36 |
37 | class SecurityError(DeepChainError):
38 | """Errors related to security operations."""
39 | pass
40 |
41 | class ResourceError(DeepChainError):
42 | """Errors related to resource management."""
43 | pass
44 |
45 | # Specific Exceptions
46 | class ModelNotFoundError(ModelError):
47 | """Raised when a model cannot be found."""
48 | pass
49 |
50 | class InvalidModelError(ModelError):
51 | """Raised when a model is invalid."""
52 | pass
53 |
54 | class ValidationFailedError(ValidationError):
55 | """Raised when validation fails."""
56 | pass
57 |
58 | class BlockchainConnectionError(BlockchainError):
59 | """Raised when blockchain connection fails."""
60 | pass
61 |
62 | class APIKeyError(APIError):
63 | """Raised when there are issues with API keys."""
64 | pass
65 |
66 | class DataSourceError(DataError):
67 | """Raised when there are issues with data sources."""
68 | pass
69 |
70 | class InvalidConfigError(ConfigurationError):
71 | """Raised when configuration is invalid."""
72 | pass
73 |
74 | class DeploymentFailedError(DeploymentError):
75 | """Raised when deployment fails."""
76 | pass
77 |
78 | class SecurityBreachError(SecurityError):
79 | """Raised when security is compromised."""
80 | pass
81 |
82 | class ResourceExhaustedError(ResourceError):
83 | """Raised when resources are exhausted."""
84 | pass
--------------------------------------------------------------------------------
/deepchain/core/monitoring/alerts.py:
--------------------------------------------------------------------------------
1 | """
2 | Alert management and notification system.
3 | """
4 |
5 | import time
6 | import logging
7 | from typing import Dict, List, Any, Optional
8 | from dataclasses import dataclass
9 | from enum import Enum
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 | class AlertSeverity(Enum):
14 | """Alert severity levels."""
15 | INFO = "info"
16 | WARNING = "warning"
17 | CRITICAL = "critical"
18 |
19 | @dataclass
20 | class Alert:
21 | """Alert data structure."""
22 | id: str
23 | type: str
24 | severity: AlertSeverity
25 | message: str
26 | timestamp: float
27 | value: float
28 | threshold: float
29 | component: str
30 | metadata: Dict[str, Any]
31 | active: bool = True
32 |
33 | class AlertManager:
34 | """Manages system alerts and notifications."""
35 |
36 | def __init__(self):
37 | """Initialize alert manager."""
38 | self.thresholds = {}
39 | self.active_alerts = {}
40 | self.alert_history = []
41 | self.alert_handlers = []
42 |
43 | # Default thresholds
44 | self.default_thresholds = {
45 | 'latency': 100, # ms
46 | 'error_rate': 0.01, # 1%
47 | 'memory_usage': 0.9, # 90%
48 | 'cpu_usage': 0.8, # 80%
49 | 'disk_usage': 0.9, # 90%
50 | 'model_drift': 0.1 # 10%
51 | }
52 |
53 | def set_thresholds(self, thresholds: Dict[str, float]) -> None:
54 | """Set alert thresholds.
55 |
56 | Args:
57 | thresholds: Dictionary of metric thresholds
58 | """
59 | self.thresholds.update(thresholds)
60 |
61 | def add_alert_handler(self, handler: callable) -> None:
62 | """Add alert notification handler.
63 |
64 | Args:
65 | handler: Callback function for alert notifications
66 | """
67 | self.alert_handlers.append(handler)
68 |
69 | def check_metric(
70 | self,
71 | metric_name: str,
72 | value: float,
73 | component: str,
74 | metadata: Optional[Dict[str, Any]] = None
75 | ) -> None:
76 | """Check metric against thresholds and generate alerts.
77 |
78 | Args:
79 | metric_name: Name of the metric
80 | value: Current metric value
81 | component: Component name
82 | metadata: Additional alert metadata
83 | """
84 | threshold = self.thresholds.get(
85 | metric_name,
86 | self.default_thresholds.get(metric_name)
87 | )
88 |
89 | if threshold is None:
90 | return
91 |
92 | alert_id = f"{component}_{metric_name}"
93 |
94 | if value >= threshold:
95 | # Determine severity
96 | severity = self._get_severity(metric_name, value, threshold)
97 |
98 | # Create alert if not already active
99 | if alert_id not in self.active_alerts:
100 | alert = Alert(
101 | id=alert_id,
102 | type=metric_name,
103 | severity=severity,
104 | message=self._generate_message(
105 | metric_name,
106 | value,
107 | threshold,
108 | component
109 | ),
110 | timestamp=time.time(),
111 | value=value,
112 | threshold=threshold,
113 | component=component,
114 | metadata=metadata or {}
115 | )
116 |
117 | self.active_alerts[alert_id] = alert
118 | self.alert_history.append(alert)
119 |
120 | # Notify handlers
121 | self._notify_handlers(alert)
122 | else:
123 | # Clear alert if it exists
124 | if alert_id in self.active_alerts:
125 | alert = self.active_alerts[alert_id]
126 | alert.active = False
127 | del self.active_alerts[alert_id]
128 |
129 | def get_active_alerts(self) -> List[Dict[str, Any]]:
130 | """Get currently active alerts.
131 |
132 | Returns:
133 | List of active alert dictionaries
134 | """
135 | return [self._alert_to_dict(alert) for alert in self.active_alerts.values()]
136 |
137 | def get_alert_history(
138 | self,
139 | start_time: Optional[float] = None,
140 | end_time: Optional[float] = None
141 | ) -> List[Dict[str, Any]]:
142 | """Get historical alerts within time range.
143 |
144 | Args:
145 | start_time: Start timestamp
146 | end_time: End timestamp
147 |
148 | Returns:
149 | List of historical alert dictionaries
150 | """
151 | alerts = self.alert_history
152 |
153 | if start_time is not None:
154 | alerts = [a for a in alerts if a.timestamp >= start_time]
155 |
156 | if end_time is not None:
157 | alerts = [a for a in alerts if a.timestamp <= end_time]
158 |
159 | return [self._alert_to_dict(alert) for alert in alerts]
160 |
161 | def clear_alert(self, alert_id: str) -> None:
162 | """Clear active alert.
163 |
164 | Args:
165 | alert_id: ID of alert to clear
166 | """
167 | if alert_id in self.active_alerts:
168 | alert = self.active_alerts[alert_id]
169 | alert.active = False
170 | del self.active_alerts[alert_id]
171 |
172 | def _get_severity(
173 | self,
174 | metric_name: str,
175 | value: float,
176 | threshold: float
177 | ) -> AlertSeverity:
178 | """Determine alert severity based on value and threshold."""
179 | # Critical thresholds (2x default threshold)
180 | critical_thresholds = {
181 | 'latency': 200,
182 | 'error_rate': 0.02,
183 | 'memory_usage': 0.95,
184 | 'cpu_usage': 0.9,
185 | 'disk_usage': 0.95,
186 | 'model_drift': 0.2
187 | }
188 |
189 | critical = critical_thresholds.get(metric_name, threshold * 2)
190 |
191 | if value >= critical:
192 | return AlertSeverity.CRITICAL
193 | elif value >= threshold:
194 | return AlertSeverity.WARNING
195 | else:
196 | return AlertSeverity.INFO
197 |
198 | def _generate_message(
199 | self,
200 | metric_name: str,
201 | value: float,
202 | threshold: float,
203 | component: str
204 | ) -> str:
205 | """Generate alert message."""
206 | return (
207 | f"{component}: {metric_name} value {value:.2f} "
208 | f"exceeds threshold {threshold:.2f}"
209 | )
210 |
211 | def _alert_to_dict(self, alert: Alert) -> Dict[str, Any]:
212 | """Convert alert to dictionary."""
213 | return {
214 | 'id': alert.id,
215 | 'type': alert.type,
216 | 'severity': alert.severity.value,
217 | 'message': alert.message,
218 | 'timestamp': alert.timestamp,
219 | 'value': alert.value,
220 | 'threshold': alert.threshold,
221 | 'component': alert.component,
222 | 'metadata': alert.metadata,
223 | 'active': alert.active
224 | }
225 |
226 | def _notify_handlers(self, alert: Alert) -> None:
227 | """Notify all registered handlers of new alert."""
228 | alert_dict = self._alert_to_dict(alert)
229 | for handler in self.alert_handlers:
230 | try:
231 | handler(alert_dict)
232 | except Exception as e:
233 | logger.error(f"Error in alert handler: {e}")
234 |
235 | def reset(self) -> None:
236 | """Reset alert manager state."""
237 | self.active_alerts.clear()
238 | self.alert_history.clear()
--------------------------------------------------------------------------------
/deepchain/core/monitoring/metrics.py:
--------------------------------------------------------------------------------
1 | """
2 | Metrics collection and aggregation for system monitoring.
3 | """
4 |
5 | import time
6 | import numpy as np
7 | from typing import Dict, List, Any, Optional
8 | from collections import deque
9 | import psutil
10 | import logging
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 | class MetricsCollector:
15 | """Collects and aggregates system metrics."""
16 |
17 | def __init__(self, window_size: int = 1000):
18 | """Initialize metrics collector.
19 |
20 | Args:
21 | window_size: Size of sliding window for metrics
22 | """
23 | self.window_size = window_size
24 |
25 | # Initialize metric buffers
26 | self.latencies = deque(maxlen=window_size)
27 | self.memory_usages = deque(maxlen=window_size)
28 | self.predictions = deque(maxlen=window_size)
29 | self.errors = deque(maxlen=window_size)
30 |
31 | # System metrics
32 | self.system_metrics = {
33 | 'cpu_usage': deque(maxlen=window_size),
34 | 'memory_usage': deque(maxlen=window_size),
35 | 'disk_usage': deque(maxlen=window_size),
36 | 'network_io': deque(maxlen=window_size)
37 | }
38 |
39 | # Initialize timestamps
40 | self.last_update = time.time()
41 |
42 | def record_latency(self, latency: float) -> None:
43 | """Record inference latency.
44 |
45 | Args:
46 | latency: Latency in milliseconds
47 | """
48 | self.latencies.append(latency)
49 | self._update_timestamp()
50 |
51 | def record_memory_usage(self, usage: int) -> None:
52 | """Record memory usage.
53 |
54 | Args:
55 | usage: Memory usage in bytes
56 | """
57 | self.memory_usages.append(usage)
58 | self._update_timestamp()
59 |
60 | def record_prediction(self, prediction: float) -> None:
61 | """Record model prediction.
62 |
63 | Args:
64 | prediction: Prediction value
65 | """
66 | self.predictions.append(prediction)
67 | self._update_timestamp()
68 |
69 | def record_error(self, error: float) -> None:
70 | """Record prediction error.
71 |
72 | Args:
73 | error: Error value
74 | """
75 | self.errors.append(error)
76 | self._update_timestamp()
77 |
78 | def record_system_metrics(self, metrics: Dict[str, float]) -> None:
79 | """Record system metrics.
80 |
81 | Args:
82 | metrics: Dictionary of system metrics
83 | """
84 | for key, value in metrics.items():
85 | if key in self.system_metrics:
86 | self.system_metrics[key].append(value)
87 | self._update_timestamp()
88 |
89 | def get_statistics(self) -> Dict[str, float]:
90 | """Get aggregated statistics.
91 |
92 | Returns:
93 | Dictionary containing various statistics
94 | """
95 | stats = {}
96 |
97 | # Latency statistics
98 | if self.latencies:
99 | stats.update({
100 | 'avg_latency': np.mean(self.latencies),
101 | 'p95_latency': np.percentile(self.latencies, 95),
102 | 'p99_latency': np.percentile(self.latencies, 99),
103 | 'max_latency': np.max(self.latencies)
104 | })
105 |
106 | # Memory statistics
107 | if self.memory_usages:
108 | stats.update({
109 | 'avg_memory_usage': np.mean(self.memory_usages),
110 | 'max_memory_usage': np.max(self.memory_usages)
111 | })
112 |
113 | # Prediction statistics
114 | if self.predictions:
115 | stats.update({
116 | 'prediction_count': len(self.predictions),
117 | 'prediction_rate': len(self.predictions) / self.window_size
118 | })
119 |
120 | # Error statistics
121 | if self.errors:
122 | stats.update({
123 | 'avg_error': np.mean(self.errors),
124 | 'error_rate': len(self.errors) / self.window_size
125 | })
126 |
127 | # Add system metrics
128 | for key, values in self.system_metrics.items():
129 | if values:
130 | stats[f'avg_{key}'] = np.mean(values)
131 | stats[f'max_{key}'] = np.max(values)
132 |
133 | return stats
134 |
135 | def get_system_health(self) -> Dict[str, Any]:
136 | """Get system health status.
137 |
138 | Returns:
139 | Dictionary containing health status information
140 | """
141 | stats = self.get_statistics()
142 |
143 | # Define health thresholds
144 | thresholds = {
145 | 'latency': 100, # ms
146 | 'error_rate': 0.01, # 1%
147 | 'memory_usage': 0.9, # 90%
148 | 'cpu_usage': 0.8 # 80%
149 | }
150 |
151 | # Check component status
152 | components = {}
153 | status = 'healthy'
154 |
155 | # Check latency
156 | if 'avg_latency' in stats:
157 | components['latency'] = {
158 | 'status': 'healthy' if stats['avg_latency'] < thresholds['latency'] else 'warning',
159 | 'value': stats['avg_latency']
160 | }
161 | if stats['avg_latency'] >= thresholds['latency']:
162 | status = 'warning'
163 |
164 | # Check error rate
165 | if 'error_rate' in stats:
166 | components['errors'] = {
167 | 'status': 'healthy' if stats['error_rate'] < thresholds['error_rate'] else 'critical',
168 | 'value': stats['error_rate']
169 | }
170 | if stats['error_rate'] >= thresholds['error_rate']:
171 | status = 'critical'
172 |
173 | # Check system resources
174 | if 'avg_memory_usage' in stats:
175 | components['memory'] = {
176 | 'status': 'healthy' if stats['avg_memory_usage'] < thresholds['memory_usage'] else 'warning',
177 | 'value': stats['avg_memory_usage']
178 | }
179 | if stats['avg_memory_usage'] >= thresholds['memory_usage']:
180 | status = 'warning'
181 |
182 | if 'avg_cpu_usage' in stats:
183 | components['cpu'] = {
184 | 'status': 'healthy' if stats['avg_cpu_usage'] < thresholds['cpu_usage'] else 'warning',
185 | 'value': stats['avg_cpu_usage']
186 | }
187 | if stats['avg_cpu_usage'] >= thresholds['cpu_usage']:
188 | status = 'warning'
189 |
190 | return {
191 | 'status': status,
192 | 'components': components,
193 | 'last_update': self.last_update,
194 | 'metrics': stats
195 | }
196 |
197 | def _update_timestamp(self) -> None:
198 | """Update last update timestamp."""
199 | self.last_update = time.time()
200 |
201 | def reset(self) -> None:
202 | """Reset all metrics."""
203 | self.latencies.clear()
204 | self.memory_usages.clear()
205 | self.predictions.clear()
206 | self.errors.clear()
207 | for buffer in self.system_metrics.values():
208 | buffer.clear()
209 | self._update_timestamp()
--------------------------------------------------------------------------------
/deepchain/core/monitoring/performance.py:
--------------------------------------------------------------------------------
1 | """
2 | Performance tracking and analysis system.
3 | """
4 |
5 | import time
6 | import numpy as np
7 | from typing import Dict, List, Any, Optional
8 | from collections import deque
9 | import logging
10 | from scipy import stats
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 | class PerformanceTracker:
15 | """Tracks and analyzes system performance metrics."""
16 |
17 | def __init__(self, window_size: int = 1000):
18 | """Initialize performance tracker.
19 |
20 | Args:
21 | window_size: Size of sliding window for metrics
22 | """
23 | self.window_size = window_size
24 |
25 | # Initialize metric buffers
26 | self.metrics = {
27 | 'latency': deque(maxlen=window_size),
28 | 'throughput': deque(maxlen=window_size),
29 | 'error_rate': deque(maxlen=window_size),
30 | 'memory_usage': deque(maxlen=window_size),
31 | 'cpu_usage': deque(maxlen=window_size)
32 | }
33 |
34 | # Performance baseline
35 | self.baseline = {}
36 |
37 | # Drift detection
38 | self.drift_thresholds = {
39 | 'latency': 0.2, # 20% change
40 | 'throughput': 0.2,
41 | 'error_rate': 0.1,
42 | 'memory_usage': 0.3,
43 | 'cpu_usage': 0.3
44 | }
45 |
46 | # Initialize timestamps
47 | self.last_update = time.time()
48 |
49 | def set_baseline(self, baseline: Dict[str, float]) -> None:
50 | """Set performance baseline.
51 |
52 | Args:
53 | baseline: Dictionary of baseline metrics
54 | """
55 | self.baseline = baseline.copy()
56 |
57 | def record_metrics(self, metrics: Dict[str, float]) -> None:
58 | """Record performance metrics.
59 |
60 | Args:
61 | metrics: Dictionary of current metrics
62 | """
63 | for key, value in metrics.items():
64 | if key in self.metrics:
65 | self.metrics[key].append(value)
66 |
67 | self.last_update = time.time()
68 |
69 | def get_performance_report(self) -> Dict[str, Any]:
70 | """Get comprehensive performance report.
71 |
72 | Returns:
73 | Dictionary containing performance analysis
74 | """
75 | report = {
76 | 'current_metrics': self._get_current_metrics(),
77 | 'trends': self._analyze_trends(),
78 | 'model_drift': self._detect_drift(),
79 | 'anomalies': self._detect_anomalies(),
80 | 'statistics': self._compute_statistics(),
81 | 'last_update': self.last_update
82 | }
83 |
84 | # Add performance score
85 | report['performance_score'] = self._compute_performance_score(report)
86 |
87 | return report
88 |
89 | def _get_current_metrics(self) -> Dict[str, float]:
90 | """Get current metric values."""
91 | current = {}
92 | for key, values in self.metrics.items():
93 | if values:
94 | current[key] = values[-1]
95 | return current
96 |
97 | def _analyze_trends(self) -> Dict[str, Dict[str, float]]:
98 | """Analyze metric trends."""
99 | trends = {}
100 | for key, values in self.metrics.items():
101 | if len(values) > 1:
102 | values_array = np.array(values)
103 | x = np.arange(len(values_array))
104 |
105 | # Compute linear regression
106 | slope, intercept, r_value, p_value, std_err = stats.linregress(
107 | x,
108 | values_array
109 | )
110 |
111 | trends[key] = {
112 | 'slope': slope,
113 | 'r_squared': r_value ** 2,
114 | 'p_value': p_value,
115 | 'trend': 'increasing' if slope > 0 else 'decreasing'
116 | }
117 | return trends
118 |
119 | def _detect_drift(self) -> Dict[str, Dict[str, Any]]:
120 | """Detect model drift from baseline."""
121 | drift = {}
122 | for key, values in self.metrics.items():
123 | if values and key in self.baseline:
124 | baseline_value = self.baseline[key]
125 | current_value = np.mean(values)
126 |
127 | relative_change = abs(current_value - baseline_value) / baseline_value
128 | threshold = self.drift_thresholds.get(key, 0.2)
129 |
130 | drift[key] = {
131 | 'baseline': baseline_value,
132 | 'current': current_value,
133 | 'change': relative_change,
134 | 'threshold': threshold,
135 | 'significant': relative_change > threshold
136 | }
137 | return drift
138 |
139 | def _detect_anomalies(self) -> Dict[str, List[Dict[str, Any]]]:
140 | """Detect metric anomalies using statistical methods."""
141 | anomalies = {}
142 | for key, values in self.metrics.items():
143 | if len(values) > 30: # Need enough data for statistical significance
144 | values_array = np.array(values)
145 | mean = np.mean(values_array)
146 | std = np.std(values_array)
147 |
148 | # Detect values outside 3 standard deviations
149 | z_scores = np.abs((values_array - mean) / std)
150 | anomaly_indices = np.where(z_scores > 3)[0]
151 |
152 | if len(anomaly_indices) > 0:
153 | anomalies[key] = [
154 | {
155 | 'index': int(i),
156 | 'value': float(values_array[i]),
157 | 'z_score': float(z_scores[i])
158 | }
159 | for i in anomaly_indices
160 | ]
161 | return anomalies
162 |
163 | def _compute_statistics(self) -> Dict[str, Dict[str, float]]:
164 | """Compute detailed statistics for each metric."""
165 | stats = {}
166 | for key, values in self.metrics.items():
167 | if values:
168 | values_array = np.array(values)
169 | stats[key] = {
170 | 'mean': np.mean(values_array),
171 | 'std': np.std(values_array),
172 | 'min': np.min(values_array),
173 | 'max': np.max(values_array),
174 | 'p25': np.percentile(values_array, 25),
175 | 'p50': np.percentile(values_array, 50),
176 | 'p75': np.percentile(values_array, 75),
177 | 'p95': np.percentile(values_array, 95),
178 | 'p99': np.percentile(values_array, 99)
179 | }
180 | return stats
181 |
182 | def _compute_performance_score(self, report: Dict[str, Any]) -> float:
183 | """Compute overall performance score."""
184 | score = 100.0
185 |
186 | # Penalize for drift
187 | drift = report['model_drift']
188 | for key, info in drift.items():
189 | if info['significant']:
190 | score -= 10 * (info['change'] / info['threshold'])
191 |
192 | # Penalize for anomalies
193 | anomalies = report['anomalies']
194 | for key, anomaly_list in anomalies.items():
195 | score -= len(anomaly_list) * 5
196 |
197 | # Penalize for negative trends
198 | trends = report['trends']
199 | for key, trend_info in trends.items():
200 | if trend_info['trend'] == 'increasing' and key in ['latency', 'error_rate']:
201 | score -= 5 * abs(trend_info['slope'])
202 | elif trend_info['trend'] == 'decreasing' and key == 'throughput':
203 | score -= 5 * abs(trend_info['slope'])
204 |
205 | return max(0.0, min(100.0, score))
206 |
207 | def reset(self) -> None:
208 | """Reset performance tracker state."""
209 | for buffer in self.metrics.values():
210 | buffer.clear()
211 | self.baseline.clear()
212 | self.last_update = time.time()
--------------------------------------------------------------------------------
/deepchain/core/strategy/base.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Any, Dict, List, Optional
3 |
4 | class BaseStrategy(ABC):
5 | """
6 | Base class for all trading strategies in DeepChain framework.
7 | This abstract class defines the interface that all strategies must implement.
8 | """
9 |
10 | @abstractmethod
11 | def train(self, data: Dict[str, Any], **kwargs) -> None:
12 | """
13 | Train the strategy model with historical data.
14 |
15 | Args:
16 | data: Dictionary containing training data
17 | **kwargs: Additional training parameters
18 | """
19 | pass
20 |
21 | @abstractmethod
22 | def predict(self, state: Dict[str, Any]) -> Dict[str, Any]:
23 | """
24 | Generate trading decisions based on current market state.
25 |
26 | Args:
27 | state: Current market state information
28 |
29 | Returns:
30 | Dictionary containing trading decisions
31 | """
32 | pass
33 |
34 | @abstractmethod
35 | def save(self, path: str) -> None:
36 | """
37 | Save strategy model to disk.
38 |
39 | Args:
40 | path: Path to save the model
41 | """
42 | pass
43 |
44 | @abstractmethod
45 | def load(self, path: str) -> None:
46 | """
47 | Load strategy model from disk.
48 |
49 | Args:
50 | path: Path to load the model from
51 | """
52 | pass
53 |
54 | class RLStrategy(BaseStrategy):
55 | """
56 | Base class for reinforcement learning based trading strategies.
57 | Extends BaseStrategy with RL-specific methods.
58 | """
59 |
60 | @abstractmethod
61 | def get_action_space(self) -> Dict[str, Any]:
62 | """
63 | Define the action space for the RL agent.
64 |
65 | Returns:
66 | Dictionary describing the action space
67 | """
68 | pass
69 |
70 | @abstractmethod
71 | def get_state_space(self) -> Dict[str, Any]:
72 | """
73 | Define the state space for the RL agent.
74 |
75 | Returns:
76 | Dictionary describing the state space
77 | """
78 | pass
79 |
80 | @abstractmethod
81 | def get_reward(self, state: Dict[str, Any], action: Dict[str, Any]) -> float:
82 | """
83 | Calculate reward for the current state-action pair.
84 |
85 | Args:
86 | state: Current state information
87 | action: Action taken by the agent
88 |
89 | Returns:
90 | Reward value
91 | """
92 | pass
--------------------------------------------------------------------------------
/deepchain/utils/tools.py:
--------------------------------------------------------------------------------
1 | """
2 | Utility functions for the DeepChain framework.
3 | """
4 |
5 | import logging
6 | import json
7 | import hashlib
8 | from typing import Any, Dict, List, Optional
9 | from pathlib import Path
10 | import yaml
11 |
12 | # Configure logging
13 | logger = logging.getLogger(__name__)
14 |
15 | def setup_logging(
16 | level: int = logging.INFO,
17 | log_file: Optional[str] = None
18 | ) -> None:
19 | """
20 | Set up logging configuration.
21 |
22 | Args:
23 | level: Logging level
24 | log_file: Optional log file path
25 | """
26 | config = {
27 | 'level': level,
28 | 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
29 | }
30 |
31 | if log_file:
32 | config['filename'] = log_file
33 |
34 | logging.basicConfig(**config)
35 |
36 | def load_config(config_path: str) -> Dict[str, Any]:
37 | """
38 | Load configuration from YAML file.
39 |
40 | Args:
41 | config_path: Path to config file
42 |
43 | Returns:
44 | Configuration dictionary
45 | """
46 | try:
47 | with open(config_path, 'r') as f:
48 | return yaml.safe_load(f)
49 | except Exception as e:
50 | logger.error(f"Failed to load config from {config_path}: {str(e)}")
51 | raise
52 |
53 | def save_config(config: Dict[str, Any], config_path: str) -> None:
54 | """
55 | Save configuration to YAML file.
56 |
57 | Args:
58 | config: Configuration dictionary
59 | config_path: Path to save config
60 | """
61 | try:
62 | with open(config_path, 'w') as f:
63 | yaml.safe_dump(config, f)
64 | except Exception as e:
65 | logger.error(f"Failed to save config to {config_path}: {str(e)}")
66 | raise
67 |
68 | def compute_hash(data: Any) -> str:
69 | """
70 | Compute SHA256 hash of data.
71 |
72 | Args:
73 | data: Data to hash
74 |
75 | Returns:
76 | Hash string
77 | """
78 | try:
79 | data_str = json.dumps(data, sort_keys=True)
80 | return hashlib.sha256(data_str.encode()).hexdigest()
81 | except Exception as e:
82 | logger.error(f"Failed to compute hash: {str(e)}")
83 | raise
84 |
85 | def ensure_directory(path: str) -> None:
86 | """
87 | Ensure directory exists, create if not.
88 |
89 | Args:
90 | path: Directory path
91 | """
92 | Path(path).mkdir(parents=True, exist_ok=True)
93 |
94 | def validate_config(config: Dict[str, Any], schema: Dict[str, Any]) -> bool:
95 | """
96 | Validate configuration against schema.
97 |
98 | Args:
99 | config: Configuration to validate
100 | schema: Validation schema
101 |
102 | Returns:
103 | True if valid, False otherwise
104 | """
105 | # Simple schema validation
106 | try:
107 | for key, value_type in schema.items():
108 | if key not in config:
109 | return False
110 | if not isinstance(config[key], value_type):
111 | return False
112 | return True
113 | except Exception as e:
114 | logger.error(f"Config validation failed: {str(e)}")
115 | return False
116 |
117 | def format_size(size_bytes: int) -> str:
118 | """
119 | Format size in bytes to human readable string.
120 |
121 | Args:
122 | size_bytes: Size in bytes
123 |
124 | Returns:
125 | Formatted string
126 | """
127 | for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
128 | if size_bytes < 1024:
129 | return f"{size_bytes:.2f}{unit}"
130 | size_bytes /= 1024
131 | return f"{size_bytes:.2f}PB"
132 |
133 | def parse_timeframe(timeframe: str) -> int:
134 | """
135 | Parse timeframe string to seconds.
136 |
137 | Args:
138 | timeframe: Timeframe string (e.g., '1h', '1d')
139 |
140 | Returns:
141 | Seconds
142 | """
143 | units = {
144 | 's': 1,
145 | 'm': 60,
146 | 'h': 3600,
147 | 'd': 86400,
148 | 'w': 604800
149 | }
150 |
151 | try:
152 | value = int(timeframe[:-1])
153 | unit = timeframe[-1].lower()
154 | return value * units[unit]
155 | except Exception as e:
156 | logger.error(f"Failed to parse timeframe {timeframe}: {str(e)}")
157 | raise
158 |
159 | def retry(max_attempts: int = 3, delay: float = 1.0):
160 | """
161 | Retry decorator for functions.
162 |
163 | Args:
164 | max_attempts: Maximum number of attempts
165 | delay: Delay between attempts in seconds
166 | """
167 | from functools import wraps
168 | import time
169 |
170 | def decorator(func):
171 | @wraps(func)
172 | def wrapper(*args, **kwargs):
173 | attempts = 0
174 | while attempts < max_attempts:
175 | try:
176 | return func(*args, **kwargs)
177 | except Exception as e:
178 | attempts += 1
179 | if attempts == max_attempts:
180 | raise
181 | logger.warning(f"Attempt {attempts} failed: {str(e)}")
182 | time.sleep(delay)
183 | return wrapper
184 | return decorator
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | # Main application service
5 | app:
6 | build: .
7 | volumes:
8 | - .:/app
9 | environment:
10 | - PYTHONPATH=/app
11 | - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY}
12 | - BLOCKCHAIN_NODE_URL=${BLOCKCHAIN_NODE_URL}
13 | depends_on:
14 | - redis
15 | - postgres
16 | networks:
17 | - deepchain-network
18 |
19 | # Redis for caching and message queue
20 | redis:
21 | image: redis:6-alpine
22 | ports:
23 | - "6379:6379"
24 | volumes:
25 | - redis-data:/data
26 | networks:
27 | - deepchain-network
28 |
29 | # PostgreSQL for data storage
30 | postgres:
31 | image: postgres:13-alpine
32 | environment:
33 | - POSTGRES_USER=${DB_USER:-deepchain}
34 | - POSTGRES_PASSWORD=${DB_PASSWORD:-deepchain}
35 | - POSTGRES_DB=${DB_NAME:-deepchain}
36 | ports:
37 | - "5432:5432"
38 | volumes:
39 | - postgres-data:/var/lib/postgresql/data
40 | networks:
41 | - deepchain-network
42 |
43 | # Monitoring service
44 | prometheus:
45 | image: prom/prometheus:v2.30.3
46 | volumes:
47 | - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
48 | - prometheus-data:/prometheus
49 | ports:
50 | - "9090:9090"
51 | networks:
52 | - deepchain-network
53 |
54 | # Visualization dashboard
55 | grafana:
56 | image: grafana/grafana:8.2.2
57 | depends_on:
58 | - prometheus
59 | ports:
60 | - "3000:3000"
61 | volumes:
62 | - grafana-data:/var/lib/grafana
63 | networks:
64 | - deepchain-network
65 |
66 | volumes:
67 | redis-data:
68 | postgres-data:
69 | prometheus-data:
70 | grafana-data:
71 |
72 | networks:
73 | deepchain-network:
74 | driver: bridge
--------------------------------------------------------------------------------
/docs/api_reference.md:
--------------------------------------------------------------------------------
1 | # DeepChain API Reference
2 |
3 | ## Core Modules
4 |
5 | ### Strategy Module
6 | The strategy module provides interfaces for implementing trading strategies.
7 |
8 | #### BaseStrategy
9 | Base class for all trading strategies.
10 |
11 | ```python
12 | class BaseStrategy:
13 | def train(self, data: Dict[str, np.ndarray]) -> None:
14 | """Train the strategy."""
15 |
16 | def predict(self, state: Dict[str, float]) -> int:
17 | """Predict action for given state."""
18 |
19 | def save(self, path: str) -> None:
20 | """Save strategy to disk."""
21 |
22 | def load(self, path: str) -> None:
23 | """Load strategy from disk."""
24 | ```
25 |
26 | #### RLStrategy
27 | Base class for reinforcement learning strategies.
28 |
29 | ```python
30 | class RLStrategy(BaseStrategy):
31 | def get_action_space(self) -> Dict[str, Any]:
32 | """Get action space definition."""
33 |
34 | def get_state_space(self) -> Dict[str, Any]:
35 | """Get state space definition."""
36 |
37 | def get_reward(self, state: Dict[str, float], action: int) -> float:
38 | """Calculate reward for state-action pair."""
39 | ```
40 |
41 | ### Blockchain Module
42 | The blockchain module provides interfaces for strategy validation.
43 |
44 | #### BaseValidator
45 | Base class for blockchain validators.
46 |
47 | ```python
48 | class BaseValidator:
49 | def validate_strategy(self, strategy_id: str) -> bool:
50 | """Validate a strategy."""
51 |
52 | def register_strategy(self, strategy_id: str, metadata: Dict[str, Any]) -> str:
53 | """Register a strategy on the blockchain."""
54 |
55 | def verify_execution(self, strategy_id: str, data: Dict[str, Any], proof: Any) -> bool:
56 | """Verify strategy execution."""
57 | ```
58 |
59 | #### ZKValidator
60 | Zero-knowledge proof based validator.
61 |
62 | ```python
63 | class ZKValidator(BaseValidator):
64 | def generate_proof(self, strategy_id: str, data: Dict[str, Any]) -> Any:
65 | """Generate execution proof."""
66 |
67 | def verify_proof(self, proof: Any) -> bool:
68 | """Verify zero-knowledge proof."""
69 | ```
70 |
71 | ### Edge Module
72 | The edge module provides interfaces for model deployment.
73 |
74 | #### BaseDeployer
75 | Base class for edge deployment.
76 |
77 | ```python
78 | class BaseDeployer:
79 | def compress_model(self, model: Any, target_size: int) -> Any:
80 | """Compress model to target size."""
81 |
82 | def export_model(self, model: Any, format: str, path: str) -> None:
83 | """Export model to specified format."""
84 |
85 | def validate_performance(self, model: Any, data: Dict[str, Any]) -> Dict[str, float]:
86 | """Validate model performance."""
87 | ```
88 |
89 | #### EdgeOptimizer
90 | Advanced edge deployment optimizer.
91 |
92 | ```python
93 | class EdgeOptimizer(BaseDeployer):
94 | def quantize_model(self, model: Any) -> Any:
95 | """Quantize model weights."""
96 |
97 | def prune_model(self, model: Any, ratio: float) -> Any:
98 | """Prune model weights."""
99 | ```
100 |
101 | ### Monitoring Module
102 | The monitoring module provides interfaces for system monitoring.
103 |
104 | #### MetricsCollector
105 | Collects and aggregates system metrics.
106 |
107 | ```python
108 | class MetricsCollector:
109 | def record_latency(self, latency: float) -> None:
110 | """Record prediction latency."""
111 |
112 | def record_prediction(self, prediction: Any) -> None:
113 | """Record model prediction."""
114 |
115 | def record_error(self, error: float) -> None:
116 | """Record prediction error."""
117 |
118 | def record_system_metrics(self, metrics: Dict[str, float]) -> None:
119 | """Record system metrics."""
120 |
121 | def get_statistics(self) -> Dict[str, float]:
122 | """Get aggregated statistics."""
123 |
124 | def get_system_health(self) -> Dict[str, Any]:
125 | """Get system health status."""
126 | ```
127 |
128 | #### AlertManager
129 | Manages system alerts and notifications.
130 |
131 | ```python
132 | class AlertManager:
133 | def set_thresholds(self, thresholds: Dict[str, float]) -> None:
134 | """Set alert thresholds."""
135 |
136 | def add_alert_handler(self, handler: Callable) -> None:
137 | """Add alert handler function."""
138 |
139 | def check_metric(self, metric: str, value: float, component: str) -> None:
140 | """Check metric against threshold."""
141 |
142 | def get_active_alerts(self) -> List[Dict[str, Any]]:
143 | """Get currently active alerts."""
144 | ```
145 |
146 | #### PerformanceTracker
147 | Tracks and analyzes system performance.
148 |
149 | ```python
150 | class PerformanceTracker:
151 | def set_baseline(self, baseline: Dict[str, float]) -> None:
152 | """Set performance baseline."""
153 |
154 | def record_metrics(self, metrics: Dict[str, float]) -> None:
155 | """Record performance metrics."""
156 |
157 | def get_performance_report(self) -> Dict[str, Any]:
158 | """Get performance analysis report."""
159 | ```
160 |
161 | ### DeepSeek Integration
162 | The DeepSeek module provides integration with DeepSeek AI services.
163 |
164 | #### DeepSeekAPI
165 | Main interface for DeepSeek AI services.
166 |
167 | ```python
168 | class DeepSeekAPI:
169 | def __init__(self, api_key: str):
170 | """Initialize DeepSeek API client."""
171 |
172 | def get_rl_interface(self) -> DeepSeekRL:
173 | """Get reinforcement learning interface."""
174 |
175 | def get_distill_interface(self) -> DeepSeekDistill:
176 | """Get model distillation interface."""
177 | ```
178 |
179 | ## Usage Examples
180 |
181 | For detailed usage examples, please refer to the following files in the `examples/` directory:
182 | - `monitoring_example.py`: Demonstrates monitoring system usage
183 | - `advanced_strategy.py`: Shows implementation of an advanced trading strategy
184 | - `complete_example.py`: Provides a complete workflow example
185 |
186 | ## Error Handling
187 |
188 | The framework defines several custom exceptions:
189 |
190 | - `DeepChainError`: Base exception class
191 | - `ModelError`: Model-related errors
192 | - `ValidationError`: Validation failures
193 | - `BlockchainError`: Blockchain interaction errors
194 | - `DataError`: Data processing errors
195 | - `ConfigError`: Configuration errors
196 | - `DeploymentError`: Model deployment errors
197 |
198 | ## Best Practices
199 |
200 | 1. Strategy Implementation
201 | - Implement all abstract methods
202 | - Handle edge cases
203 | - Add proper logging
204 | - Include error handling
205 |
206 | 2. Blockchain Integration
207 | - Always verify transactions
208 | - Handle network errors
209 | - Implement retry logic
210 | - Store proofs securely
211 |
212 | 3. Edge Deployment
213 | - Test on target devices
214 | - Monitor performance
215 | - Handle resource constraints
216 | - Implement fallback logic
217 |
218 | 4. Monitoring
219 | - Set appropriate thresholds
220 | - Configure alerts
221 | - Track key metrics
222 | - Regular health checks
223 |
224 | ## Examples
225 |
226 | See the `examples/` directory for implementation examples:
227 |
228 | - `simple_strategy.py`: Basic RL strategy
229 | - `simple_validator.py`: Basic validator
230 | - `simple_deployer.py`: Basic deployer
231 | - `usage_example.py`: Complete usage example
--------------------------------------------------------------------------------
/docs/blockchain_integration.md:
--------------------------------------------------------------------------------
1 | # Blockchain Integration Guide
2 |
3 | ## Overview
4 |
5 | This guide explains how to integrate your trading strategies with various blockchain networks using DeepChain.
6 |
7 | ## Table of Contents
8 |
9 | 1. [Supported Chains](#supported-chains)
10 | 2. [Setting Up Validators](#setting-up-validators)
11 | 3. [Strategy Registration](#strategy-registration)
12 | 4. [Execution Verification](#execution-verification)
13 | 5. [Zero-Knowledge Proofs](#zero-knowledge-proofs)
14 | 6. [Security Considerations](#security-considerations)
15 |
16 | ## Supported Chains
17 |
18 | DeepChain currently supports the following blockchains:
19 | - Solana
20 | - Ethereum
21 | - BNB Chain
22 | - Base
23 |
24 | ## Setting Up Validators
25 |
26 | Initialize blockchain validators:
27 |
28 | ```python
29 | from deepchain.core.blockchain import (
30 | SolanaValidator,
31 | EthereumValidator
32 | )
33 |
34 | # For Solana
35 | solana_validator = SolanaValidator(
36 | rpc_url="your_rpc_url",
37 | private_key="your_private_key"
38 | )
39 |
40 | # For Ethereum
41 | eth_validator = EthereumValidator(
42 | rpc_url="your_rpc_url",
43 | private_key="your_private_key"
44 | )
45 | ```
46 |
47 | ## Strategy Registration
48 |
49 | Register your strategy on the blockchain:
50 |
51 | ```python
52 | # Register strategy
53 | tx_hash = validator.register_strategy(
54 | strategy,
55 | metadata={
56 | "name": "MyStrategy",
57 | "version": "1.0.0",
58 | "description": "My trading strategy"
59 | }
60 | )
61 |
62 | # Verify registration
63 | status = validator.verify_registration(tx_hash)
64 | ```
65 |
66 | ## Execution Verification
67 |
68 | Verify strategy execution:
69 |
70 | ```python
71 | # Generate execution proof
72 | proof = validator.generate_proof(
73 | strategy_id,
74 | execution_data
75 | )
76 |
77 | # Verify execution
78 | is_valid = validator.verify_execution(
79 | strategy_id,
80 | execution_data,
81 | proof
82 | )
83 | ```
84 |
85 | ## Zero-Knowledge Proofs
86 |
87 | Implement zero-knowledge proofs:
88 |
89 | ```python
90 | from deepchain.core.blockchain import ZKProver
91 |
92 | # Generate ZK proof
93 | prover = ZKProver()
94 | proof = prover.generate_proof(
95 | strategy,
96 | execution_data
97 | )
98 |
99 | # Verify ZK proof
100 | is_valid = prover.verify_proof(proof)
101 | ```
102 |
103 | ## Security Considerations
104 |
105 | 1. Secure key management
106 | 2. Regular security audits
107 | 3. Gas optimization
108 | 4. Error handling
109 | 5. Transaction monitoring
110 | 6. Network redundancy
--------------------------------------------------------------------------------
/docs/contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing to DeepChain
2 |
3 | We love your input! We want to make contributing to DeepChain as easy and transparent as possible, whether it's:
4 |
5 | - Reporting a bug
6 | - Discussing the current state of the code
7 | - Submitting a fix
8 | - Proposing new features
9 | - Becoming a maintainer
10 |
11 | ## Development Process
12 |
13 | We use GitHub to host code, to track issues and feature requests, as well as accept pull requests.
14 |
15 | 1. Fork the repo and create your branch from `main`
16 | 2. If you've added code that should be tested, add tests
17 | 3. If you've changed APIs, update the documentation
18 | 4. Ensure the test suite passes
19 | 5. Make sure your code lints
20 | 6. Issue that pull request
21 |
22 | ## Pull Request Process
23 |
24 | 1. Update the README.md with details of changes to the interface
25 | 2. Update the docs/ with any necessary changes
26 | 3. The PR will be merged once you have the sign-off of two other developers
27 |
28 | ## Any contributions you make will be under the MIT Software License
29 |
30 | In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern.
31 |
32 | ## Report bugs using GitHub's [issue tracker]
33 |
34 | We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/Deep-Chain-IO/deepchain/issues/new).
35 |
36 | ## Write bug reports with detail, background, and sample code
37 |
38 | **Great Bug Reports** tend to have:
39 |
40 | - A quick summary and/or background
41 | - Steps to reproduce
42 | - Be specific!
43 | - Give sample code if you can
44 | - What you expected would happen
45 | - What actually happens
46 | - Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
47 |
48 | ## Use a Consistent Coding Style
49 |
50 | * 4 spaces for indentation rather than tabs
51 | * 80 character line length
52 | * Run `pylint` over your code
53 | * Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/)
54 |
55 | ## License
56 |
57 | By contributing, you agree that your contributions will be licensed under its MIT License.
--------------------------------------------------------------------------------
/docs/edge_deployment.md:
--------------------------------------------------------------------------------
1 | # Edge Deployment Guide
2 |
3 | ## Overview
4 |
5 | This guide explains how to deploy your trading strategies to edge devices using DeepChain.
6 |
7 | ## Table of Contents
8 |
9 | 1. [Model Optimization](#model-optimization)
10 | 2. [Knowledge Distillation](#knowledge-distillation)
11 | 3. [Device Deployment](#device-deployment)
12 | 4. [Performance Monitoring](#performance-monitoring)
13 | 5. [Troubleshooting](#troubleshooting)
14 |
15 | ## Model Optimization
16 |
17 | Optimize your model for edge deployment:
18 |
19 | ```python
20 | from deepchain.core.edge import EdgeOptimizer
21 |
22 | optimizer = EdgeOptimizer()
23 |
24 | # Compress model
25 | compressed_model = optimizer.compress_model(
26 | model,
27 | target_size="10MB",
28 | precision="fp16"
29 | )
30 |
31 | # Validate performance
32 | performance = optimizer.validate_performance(
33 | compressed_model,
34 | test_data
35 | )
36 | ```
37 |
38 | ## Knowledge Distillation
39 |
40 | Implement knowledge distillation:
41 |
42 | ```python
43 | from deepchain.core.edge import Distiller
44 |
45 | # Create teacher and student models
46 | teacher = LargeModel()
47 | student = SmallModel()
48 |
49 | # Setup distillation
50 | distiller = Distiller(
51 | teacher=teacher,
52 | student=student,
53 | temperature=2.0
54 | )
55 |
56 | # Train student model
57 | distiller.train(training_data)
58 | ```
59 |
60 | ## Device Deployment
61 |
62 | Deploy to edge devices:
63 |
64 | ```python
65 | from deepchain.core.edge import EdgeDeployer
66 |
67 | deployer = EdgeDeployer()
68 |
69 | # Export model
70 | model_path = deployer.export_model(
71 | model,
72 | format="onnx",
73 | target_device="mobile"
74 | )
75 |
76 | # Deploy model
77 | deployment = deployer.deploy(
78 | model_path,
79 | device_config={
80 | "type": "mobile",
81 | "os": "android",
82 | "compute": "cpu"
83 | }
84 | )
85 | ```
86 |
87 | ## Performance Monitoring
88 |
89 | Monitor edge deployment:
90 |
91 | ```python
92 | from deepchain.core.monitoring import EdgeMonitor
93 |
94 | monitor = EdgeMonitor(deployment)
95 |
96 | # Track metrics
97 | metrics = monitor.get_metrics()
98 | print(f"Latency: {metrics['latency']}ms")
99 | print(f"Memory: {metrics['memory_usage']}MB")
100 | print(f"Battery: {metrics['battery_impact']}%")
101 | ```
102 |
103 | ## Troubleshooting
104 |
105 | Common issues and solutions:
106 |
107 | 1. Memory constraints
108 | - Use model quantization
109 | - Implement pruning
110 | - Optimize batch size
111 |
112 | 2. Latency issues
113 | - Profile model operations
114 | - Use hardware acceleration
115 | - Optimize input processing
116 |
117 | 3. Battery consumption
118 | - Implement power-aware scheduling
119 | - Use efficient compute modes
120 | - Optimize network calls
--------------------------------------------------------------------------------
/docs/getting_started.md:
--------------------------------------------------------------------------------
1 | # Getting Started with DeepChain
2 |
3 | This guide will help you get started with the DeepChain framework for building decentralized AI trading strategies.
4 |
5 | ## Installation
6 |
7 | ```bash
8 | pip install deepchain
9 | ```
10 |
11 | ## Basic Usage
12 |
13 | Here's a simple example of how to use DeepChain:
14 |
15 | ```python
16 | from deepchain.core.strategy import RLStrategy
17 | from deepchain.core.blockchain import ZKValidator
18 | from deepchain.core.edge import EdgeOptimizer
19 |
20 | # 1. Create your strategy
21 | class MyStrategy(RLStrategy):
22 | def train(self, data, **kwargs):
23 | # Implement your training logic
24 | pass
25 |
26 | def predict(self, state):
27 | # Implement your prediction logic
28 | pass
29 |
30 | # 2. Train and validate
31 | strategy = MyStrategy()
32 | strategy.train(historical_data)
33 |
34 | validator = ZKValidator()
35 | strategy_id = validator.register_strategy(strategy)
36 |
37 | # 3. Deploy to edge
38 | deployer = EdgeOptimizer()
39 | compressed_model = deployer.compress_model(strategy.model)
40 | deployer.export_model(compressed_model, "onnx", "my_model.onnx")
41 | ```
42 |
43 | ## Core Components
44 |
45 | ### 1. Strategy Module
46 |
47 | The strategy module provides interfaces for implementing trading strategies:
48 |
49 | - `BaseStrategy`: Base class for all strategies
50 | - `RLStrategy`: Base class for reinforcement learning strategies
51 |
52 | Key methods to implement:
53 | - `train()`: Train the strategy
54 | - `predict()`: Generate trading decisions
55 | - `save()/load()`: Model persistence
56 |
57 | ### 2. Blockchain Validation
58 |
59 | The blockchain module handles strategy validation and verification:
60 |
61 | - `BaseValidator`: Base validator interface
62 | - `ZKValidator`: Zero-knowledge proof validator
63 |
64 | Key features:
65 | - Strategy registration
66 | - Execution verification
67 | - Zero-knowledge proofs
68 |
69 | ### 3. Edge Deployment
70 |
71 | The edge module manages model compression and deployment:
72 |
73 | - `BaseDeployer`: Base deployer interface
74 | - `EdgeOptimizer`: Model optimization tools
75 |
76 | Key features:
77 | - Model compression
78 | - Format conversion
79 | - Performance validation
80 |
81 | ## Examples
82 |
83 | Check out the `examples/` directory for complete implementation examples:
84 |
85 | - `simple_strategy.py`: Basic RL strategy implementation
86 | - `simple_validator.py`: Simple blockchain validator
87 | - `simple_deployer.py`: Basic edge deployment
88 | - `usage_example.py`: Complete usage example
89 |
90 | ## Best Practices
91 |
92 | 1. Strategy Implementation
93 | - Implement all abstract methods
94 | - Handle edge cases
95 | - Add proper logging
96 |
97 | 2. Blockchain Integration
98 | - Use secure proof generation
99 | - Validate all inputs
100 | - Handle network issues
101 |
102 | 3. Edge Deployment
103 | - Test on target devices
104 | - Monitor performance metrics
105 | - Optimize for specific hardware
106 |
107 | ## Contributing
108 |
109 | We welcome contributions! Please see our contributing guidelines for details.
110 |
111 | ## Support
112 |
113 | For questions and support:
114 | - GitHub Issues
115 | - Documentation
116 | - Community Forums
--------------------------------------------------------------------------------
/docs/monitoring.md:
--------------------------------------------------------------------------------
1 | # Monitoring System Guide
2 |
3 | ## Overview
4 |
5 | This guide explains how to use DeepChain's monitoring system to track and analyze your trading strategies.
6 |
7 | ## Table of Contents
8 |
9 | 1. [Metrics Collection](#metrics-collection)
10 | 2. [Alert Management](#alert-management)
11 | 3. [Performance Tracking](#performance-tracking)
12 | 4. [System Health](#system-health)
13 | 5. [Visualization](#visualization)
14 |
15 | ## Metrics Collection
16 |
17 | Set up metrics collection:
18 |
19 | ```python
20 | from deepchain.core.monitoring import MetricsCollector
21 |
22 | collector = MetricsCollector()
23 |
24 | # Record metrics
25 | collector.record_latency(100) # ms
26 | collector.record_memory_usage(512) # MB
27 | collector.record_prediction(prediction_data)
28 | collector.record_error(error_data)
29 |
30 | # Get statistics
31 | stats = collector.get_statistics()
32 | print(f"Average latency: {stats['avg_latency']}ms")
33 | print(f"Max memory usage: {stats['max_memory']}MB")
34 | ```
35 |
36 | ## Alert Management
37 |
38 | Configure and manage alerts:
39 |
40 | ```python
41 | from deepchain.core.monitoring import AlertManager
42 |
43 | manager = AlertManager()
44 |
45 | # Set thresholds
46 | manager.set_threshold(
47 | metric="latency",
48 | warning=200, # ms
49 | critical=500 # ms
50 | )
51 |
52 | # Add alert handler
53 | def alert_handler(alert):
54 | print(f"Alert: {alert.message}")
55 |
56 | manager.add_handler(alert_handler)
57 |
58 | # Check metrics
59 | manager.check_metrics(metrics_data)
60 | ```
61 |
62 | ## Performance Tracking
63 |
64 | Track strategy performance:
65 |
66 | ```python
67 | from deepchain.core.monitoring import PerformanceTracker
68 |
69 | tracker = PerformanceTracker()
70 |
71 | # Set baseline
72 | tracker.set_baseline({
73 | 'win_rate': 0.6,
74 | 'sharpe_ratio': 1.5,
75 | 'max_drawdown': 0.2
76 | })
77 |
78 | # Record metrics
79 | tracker.record_metrics(current_metrics)
80 |
81 | # Get performance report
82 | report = tracker.get_performance_report()
83 | print(f"Model drift: {report['drift_score']}")
84 | print(f"Performance score: {report['performance_score']}")
85 | ```
86 |
87 | ## System Health
88 |
89 | Monitor system health:
90 |
91 | ```python
92 | from deepchain.core.monitoring import HealthChecker
93 |
94 | checker = HealthChecker()
95 |
96 | # Check system health
97 | health = checker.check_health()
98 | print(f"System status: {health['status']}")
99 | print(f"Components: {health['components']}")
100 |
101 | # Get detailed report
102 | report = checker.get_health_report()
103 | for component, status in report.items():
104 | print(f"{component}: {status}")
105 | ```
106 |
107 | ## Visualization
108 |
109 | Create monitoring dashboards:
110 |
111 | ```python
112 | from deepchain.core.monitoring import Dashboard
113 |
114 | dashboard = Dashboard()
115 |
116 | # Add metrics panels
117 | dashboard.add_panel(
118 | title="Latency",
119 | metric="latency",
120 | chart_type="line"
121 | )
122 |
123 | dashboard.add_panel(
124 | title="Memory Usage",
125 | metric="memory",
126 | chart_type="gauge"
127 | )
128 |
129 | # Generate dashboard
130 | dashboard.render()
131 | ```
132 |
133 | ## Best Practices
134 |
135 | 1. Regular metric collection
136 | 2. Appropriate alert thresholds
137 | 3. Performance baseline updates
138 | 4. System health checks
139 | 5. Resource monitoring
140 | 6. Alert response procedures
--------------------------------------------------------------------------------
/docs/security.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Reporting a Vulnerability
4 |
5 | At DeepChain, we take security seriously. If you believe you have found a security vulnerability, please report it to us as described below.
6 |
7 | ## Reporting Process
8 |
9 | Please send emails to security@deepchain.io with the following information:
10 |
11 | - Description of the vulnerability
12 | - Steps to reproduce the issue
13 | - Potential impact
14 | - Any suggested fixes (if available)
15 |
16 | ## What to Expect
17 |
18 | 1. **Initial Response**: We will acknowledge receipt of your vulnerability report within 24 hours.
19 | 2. **Status Updates**: We will provide regular updates about the progress of addressing the vulnerability.
20 | 3. **Resolution**: Once fixed, we will notify you and discuss the details of public disclosure.
21 |
22 | ## Scope
23 |
24 | This security policy applies to:
25 |
26 | - DeepChain core framework
27 | - Official examples and documentation
28 | - Deployment tools and scripts
29 | - API endpoints and services
30 |
31 | ## Security Best Practices
32 |
33 | When using DeepChain, please follow these security guidelines:
34 |
35 | 1. **API Keys and Credentials**
36 | - Never commit API keys to version control
37 | - Use environment variables for sensitive data
38 | - Rotate keys regularly
39 |
40 | 2. **Network Security**
41 | - Use secure connections (HTTPS/WSS)
42 | - Implement proper firewall rules
43 | - Monitor network traffic
44 |
45 | 3. **Access Control**
46 | - Implement proper authentication
47 | - Use role-based access control
48 | - Regular access reviews
49 |
50 | 4. **Data Protection**
51 | - Encrypt sensitive data
52 | - Regular backups
53 | - Secure data transmission
54 |
55 | 5. **Blockchain Security**
56 | - Secure key management
57 | - Transaction signing best practices
58 | - Smart contract auditing
59 |
60 | 6. **Edge Device Security**
61 | - Secure boot process
62 | - Regular security updates
63 | - Device authentication
64 |
65 | ## Disclosure Policy
66 |
67 | - Public disclosure will be coordinated with the reporter
68 | - Credit will be given to the reporter (if desired)
69 | - Details will be published after the fix is deployed
70 |
71 | ## Security Updates
72 |
73 | We regularly publish security updates. To stay informed:
74 |
75 | 1. Watch our GitHub repository
76 | 2. Follow our security announcements
77 | 3. Subscribe to our security mailing list
78 |
79 | ## Contact
80 |
81 | For security issues: security@deepchain.io
82 | For general inquiries: support@deepchain.io
--------------------------------------------------------------------------------
/docs/strategy_development.md:
--------------------------------------------------------------------------------
1 | # Strategy Development Guide
2 |
3 | ## Overview
4 |
5 | This guide explains how to develop trading strategies using the DeepChain framework.
6 |
7 | ## Table of Contents
8 |
9 | 1. [Basic Concepts](#basic-concepts)
10 | 2. [Creating a Strategy](#creating-a-strategy)
11 | 3. [Implementing Core Methods](#implementing-core-methods)
12 | 4. [Using Technical Indicators](#using-technical-indicators)
13 | 5. [Testing Your Strategy](#testing-your-strategy)
14 | 6. [Best Practices](#best-practices)
15 |
16 | ## Basic Concepts
17 |
18 | A trading strategy in DeepChain consists of several key components:
19 | - Strategy Interface
20 | - Data Processing
21 | - Model Training
22 | - Execution Logic
23 | - Performance Monitoring
24 |
25 | ## Creating a Strategy
26 |
27 | To create a new strategy, inherit from the base strategy class:
28 |
29 | ```python
30 | from deepchain.core.strategy import BaseStrategy
31 |
32 | class MyStrategy(BaseStrategy):
33 | def __init__(self):
34 | super().__init__()
35 | # Initialize your strategy parameters
36 | ```
37 |
38 | ## Implementing Core Methods
39 |
40 | Every strategy must implement these core methods:
41 |
42 | ```python
43 | def train(self, data):
44 | """Train the strategy model."""
45 | pass
46 |
47 | def predict(self, state):
48 | """Make trading decisions."""
49 | pass
50 |
51 | def save(self, path):
52 | """Save strategy state."""
53 | pass
54 |
55 | def load(self, path):
56 | """Load strategy state."""
57 | pass
58 | ```
59 |
60 | ## Using Technical Indicators
61 |
62 | DeepChain provides various technical indicators:
63 |
64 | ```python
65 | from deepchain.core.data.indicators import (
66 | calculate_ma,
67 | calculate_rsi,
68 | calculate_macd
69 | )
70 |
71 | # Calculate indicators
72 | ma = calculate_ma(data, period=20)
73 | rsi = calculate_rsi(data, period=14)
74 | macd = calculate_macd(data)
75 | ```
76 |
77 | ## Testing Your Strategy
78 |
79 | Use the testing framework to validate your strategy:
80 |
81 | ```python
82 | from deepchain.core.testing import StrategyTester
83 |
84 | tester = StrategyTester(strategy)
85 | results = tester.backtest(data)
86 | performance = tester.evaluate()
87 | ```
88 |
89 | ## Best Practices
90 |
91 | 1. Always validate input data
92 | 2. Implement proper error handling
93 | 3. Use logging for debugging
94 | 4. Write comprehensive tests
95 | 5. Document your code
96 | 6. Monitor performance metrics
--------------------------------------------------------------------------------
/examples/data_stream_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Example of using the data streaming system.
3 | """
4 |
5 | import asyncio
6 | import logging
7 | from datetime import datetime, timedelta
8 | import pandas as pd
9 | from deepchain.core.data.stream import WebSocketSource, MarketDataStream
10 | from deepchain.core.data.indicators import (
11 | calculate_ma,
12 | calculate_rsi,
13 | calculate_macd,
14 | calculate_bollinger_bands,
15 | calculate_vwap
16 | )
17 |
18 | # Configure logging
19 | logging.basicConfig(level=logging.INFO)
20 | logger = logging.getLogger(__name__)
21 |
22 | class DataStreamExample:
23 | """Example class demonstrating data streaming system usage."""
24 |
25 | def __init__(self):
26 | """Initialize data streaming example."""
27 | # Initialize data stream
28 | self.stream = MarketDataStream(buffer_size=1000)
29 |
30 | # Add data source
31 | self.source = WebSocketSource(
32 | url="wss://stream.binance.com:9443/ws",
33 | api_key="YOUR-API-KEY" # Replace with your API key
34 | )
35 | self.stream.add_source("binance", self.source)
36 |
37 | # Add technical indicators
38 | self.setup_indicators()
39 |
40 | # Add data processors
41 | self.setup_processors()
42 |
43 | def setup_indicators(self):
44 | """Setup technical indicators."""
45 | # Add moving averages
46 | self.stream.add_indicator(
47 | "ma_20",
48 | calculate_ma,
49 | period=20
50 | )
51 | self.stream.add_indicator(
52 | "ma_50",
53 | calculate_ma,
54 | period=50
55 | )
56 |
57 | # Add RSI
58 | self.stream.add_indicator(
59 | "rsi",
60 | calculate_rsi,
61 | period=14
62 | )
63 |
64 | # Add MACD
65 | self.stream.add_indicator(
66 | "macd",
67 | calculate_macd,
68 | fast_period=12,
69 | slow_period=26,
70 | signal_period=9
71 | )
72 |
73 | # Add Bollinger Bands
74 | self.stream.add_indicator(
75 | "bbands",
76 | calculate_bollinger_bands,
77 | period=20,
78 | std_dev=2.0
79 | )
80 |
81 | # Add VWAP
82 | self.stream.add_indicator(
83 | "vwap",
84 | calculate_vwap
85 | )
86 |
87 | def setup_processors(self):
88 | """Setup data processors."""
89 | # Add missing value handler
90 | def handle_missing(data: pd.DataFrame) -> pd.DataFrame:
91 | return data.fillna(method='ffill').fillna(method='bfill')
92 |
93 | # Add outlier detector
94 | def detect_outliers(data: pd.DataFrame) -> pd.DataFrame:
95 | for col in ['close', 'volume']:
96 | if col in data.columns:
97 | mean = data[col].mean()
98 | std = data[col].std()
99 | data[f'{col}_is_outlier'] = (
100 | (data[col] < mean - 3 * std) |
101 | (data[col] > mean + 3 * std)
102 | )
103 | return data
104 |
105 | # Add processors to stream
106 | self.stream.add_processor(handle_missing)
107 | self.stream.add_processor(detect_outliers)
108 |
109 | async def start_streaming(self, symbols: list):
110 | """Start data streaming for given symbols.
111 |
112 | Args:
113 | symbols: List of trading pairs (e.g., ['BTCUSDT', 'ETHUSDT'])
114 | """
115 | logger.info(f"Starting data stream for symbols: {symbols}")
116 |
117 | # Start stream
118 | await self.stream.start()
119 |
120 | # Subscribe to symbols
121 | await self.source.subscribe(symbols)
122 |
123 | try:
124 | while True:
125 | # Keep the stream running
126 | await asyncio.sleep(1)
127 | except KeyboardInterrupt:
128 | logger.info("Stopping data stream...")
129 | await self.stream.stop()
130 |
131 | def process_historical_data(self, symbol: str, days: int = 30):
132 | """Process historical data for backtesting.
133 |
134 | Args:
135 | symbol: Trading pair symbol
136 | days: Number of days of historical data
137 | """
138 | logger.info(f"Processing historical data for {symbol}")
139 |
140 | # Get historical data
141 | end_time = datetime.now()
142 | start_time = end_time - timedelta(days=days)
143 |
144 | data = asyncio.run(
145 | self.source.get_historical_data(
146 | symbol,
147 | start_time,
148 | end_time,
149 | interval='1h'
150 | )
151 | )
152 |
153 | # Calculate indicators
154 | for name, (func, params) in self.stream.indicators.items():
155 | try:
156 | result = func(data, **params)
157 | if isinstance(result, pd.DataFrame):
158 | for col in result.columns:
159 | data[f'{name}_{col}'] = result[col]
160 | else:
161 | data[name] = result
162 | except Exception as e:
163 | logger.error(f"Error calculating {name}: {e}")
164 |
165 | # Apply processors
166 | for processor in self.stream.processors:
167 | try:
168 | data = processor(data)
169 | except Exception as e:
170 | logger.error(f"Error in processor: {e}")
171 |
172 | return data
173 |
174 | async def main():
175 | """Run data streaming example."""
176 | example = DataStreamExample()
177 |
178 | # Process historical data
179 | historical_data = example.process_historical_data('BTCUSDT', days=30)
180 | logger.info("\nHistorical Data Sample:")
181 | logger.info(historical_data.tail())
182 |
183 | # Start real-time streaming
184 | symbols = ['BTCUSDT', 'ETHUSDT']
185 | await example.start_streaming(symbols)
186 |
187 | if __name__ == '__main__':
188 | asyncio.run(main())
--------------------------------------------------------------------------------
/examples/monitoring_example.py:
--------------------------------------------------------------------------------
1 | """
2 | Example of using the monitoring system.
3 | """
4 |
5 | import time
6 | import logging
7 | from typing import Dict, Any
8 | import numpy as np
9 | from deepchain.core.monitoring.metrics import MetricsCollector
10 | from deepchain.core.monitoring.alerts import AlertManager
11 | from deepchain.core.monitoring.performance import PerformanceTracker
12 |
13 | # Configure logging
14 | logging.basicConfig(level=logging.INFO)
15 | logger = logging.getLogger(__name__)
16 |
17 | class MonitoringExample:
18 | """Example class demonstrating monitoring system usage."""
19 |
20 | def __init__(self):
21 | """Initialize monitoring components."""
22 | # Initialize monitoring components
23 | self.metrics = MetricsCollector(window_size=1000)
24 | self.alerts = AlertManager()
25 | self.performance = PerformanceTracker(window_size=1000)
26 |
27 | # Configure alert handlers
28 | self.alerts.add_alert_handler(self._handle_alert)
29 |
30 | # Set custom thresholds
31 | self.alerts.set_thresholds({
32 | 'latency': 50, # ms
33 | 'error_rate': 0.05, # 5%
34 | 'memory_usage': 0.8, # 80%
35 | 'cpu_usage': 0.7 # 70%
36 | })
37 |
38 | # Set performance baseline
39 | self.performance.set_baseline({
40 | 'latency': 30,
41 | 'throughput': 1000,
42 | 'error_rate': 0.01,
43 | 'memory_usage': 0.5,
44 | 'cpu_usage': 0.4
45 | })
46 |
47 | def simulate_trading_system(self, n_iterations: int = 1000):
48 | """Simulate a trading system with monitoring.
49 |
50 | Args:
51 | n_iterations: Number of iterations to simulate
52 | """
53 | logger.info("Starting trading system simulation...")
54 |
55 | for i in range(n_iterations):
56 | # Simulate model prediction
57 | start_time = time.time()
58 | prediction = self._simulate_prediction()
59 | latency = (time.time() - start_time) * 1000 # Convert to ms
60 |
61 | # Record metrics
62 | self.metrics.record_latency(latency)
63 | self.metrics.record_prediction(prediction)
64 |
65 | # Simulate error calculation
66 | error = abs(prediction - 0.5) # Assuming 0.5 is ground truth
67 | self.metrics.record_error(error)
68 |
69 | # Record system metrics
70 | system_metrics = self._get_system_metrics()
71 | self.metrics.record_system_metrics(system_metrics)
72 |
73 | # Check metrics against thresholds
74 | self.alerts.check_metric('latency', latency, 'model')
75 | self.alerts.check_metric('error_rate', error, 'model')
76 |
77 | for key, value in system_metrics.items():
78 | self.alerts.check_metric(key, value, 'system')
79 |
80 | # Record performance metrics
81 | self.performance.record_metrics({
82 | 'latency': latency,
83 | 'throughput': 1000 / (latency / 1000), # Convert ms to seconds
84 | 'error_rate': error,
85 | 'memory_usage': system_metrics['memory_usage'],
86 | 'cpu_usage': system_metrics['cpu_usage']
87 | })
88 |
89 | # Periodically log status
90 | if (i + 1) % 100 == 0:
91 | self._log_status()
92 |
93 | # Simulate some delay
94 | time.sleep(0.01)
95 |
96 | def _simulate_prediction(self) -> float:
97 | """Simulate model prediction."""
98 | # Add random latency
99 | time.sleep(np.random.exponential(0.02))
100 |
101 | # Generate prediction with some noise
102 | return np.random.normal(0.5, 0.1)
103 |
104 | def _get_system_metrics(self) -> Dict[str, float]:
105 | """Get current system metrics."""
106 | import psutil
107 |
108 | return {
109 | 'cpu_usage': psutil.cpu_percent() / 100,
110 | 'memory_usage': psutil.virtual_memory().percent / 100,
111 | 'disk_usage': psutil.disk_usage('/').percent / 100,
112 | 'network_io': sum(psutil.net_io_counters()[:2]) / 1e6 # Convert to MB
113 | }
114 |
115 | def _handle_alert(self, alert: Dict[str, Any]):
116 | """Handle generated alerts.
117 |
118 | Args:
119 | alert: Alert information dictionary
120 | """
121 | logger.warning(
122 | f"Alert: {alert['message']} "
123 | f"(Severity: {alert['severity']})"
124 | )
125 |
126 | def _log_status(self):
127 | """Log current system status."""
128 | # Get metrics
129 | stats = self.metrics.get_statistics()
130 | health = self.metrics.get_system_health()
131 | performance = self.performance.get_performance_report()
132 |
133 | # Log summary
134 | logger.info("\n=== System Status ===")
135 | logger.info(f"Health: {health['status']}")
136 | logger.info(f"Average Latency: {stats.get('avg_latency', 0):.2f}ms")
137 | logger.info(f"Error Rate: {stats.get('error_rate', 0):.2%}")
138 | logger.info(f"Performance Score: {performance['performance_score']:.1f}")
139 |
140 | # Log active alerts
141 | active_alerts = self.alerts.get_active_alerts()
142 | if active_alerts:
143 | logger.warning(f"Active Alerts: {len(active_alerts)}")
144 | for alert in active_alerts:
145 | logger.warning(f"- {alert['message']}")
146 |
147 | def main():
148 | """Run monitoring example."""
149 | example = MonitoringExample()
150 | example.simulate_trading_system()
151 |
152 | if __name__ == '__main__':
153 | main()
--------------------------------------------------------------------------------
/examples/simple_deployer.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, Optional
2 | import torch
3 | import torch.nn as nn
4 | import onnx
5 | import onnxruntime as ort
6 | import numpy as np
7 | from pathlib import Path
8 |
9 | from deepchain.core.edge.deployer import EdgeOptimizer
10 |
11 | class SimpleDeployer(EdgeOptimizer):
12 | """
13 | Simple example implementation of an edge deployment optimizer.
14 | """
15 | def __init__(self):
16 | """
17 | Initialize the deployer with default settings.
18 | """
19 | self.supported_formats = ['onnx', 'torchscript']
20 | self.quantization_config = {
21 | 'weight_dtype': torch.qint8,
22 | 'activation_dtype': torch.quint8
23 | }
24 |
25 | def compress_model(self,
26 | model: nn.Module,
27 | target_size: Optional[int] = None,
28 | target_latency: Optional[float] = None) -> nn.Module:
29 | """
30 | Compress the model using quantization and pruning.
31 | """
32 | # Apply quantization first
33 | quantized_model = self.quantize_model(model)
34 |
35 | # If still above target size, apply pruning
36 | if target_size and self._get_model_size(quantized_model) > target_size:
37 | quantized_model = self.prune_model(quantized_model)
38 |
39 | return quantized_model
40 |
41 | def export_model(self, model: nn.Module, format: str, path: str) -> None:
42 | """
43 | Export the model to specified format.
44 | """
45 | if format not in self.supported_formats:
46 | raise ValueError(f"Unsupported format: {format}")
47 |
48 | if format == 'onnx':
49 | self._export_onnx(model, path)
50 | elif format == 'torchscript':
51 | self._export_torchscript(model, path)
52 |
53 | def validate_performance(self,
54 | model: nn.Module,
55 | test_data: Dict[str, Any]) -> Dict[str, float]:
56 | """
57 | Validate model performance after compression.
58 | """
59 | # Convert test data to tensors
60 | inputs = torch.FloatTensor(test_data['inputs'])
61 | targets = torch.FloatTensor(test_data['targets'])
62 |
63 | # Evaluate model
64 | model.eval()
65 | with torch.no_grad():
66 | outputs = model(inputs)
67 |
68 | # Calculate metrics
69 | mse = nn.MSELoss()(outputs, targets).item()
70 | mae = nn.L1Loss()(outputs, targets).item()
71 |
72 | # Calculate latency
73 | latency = self._measure_latency(model, inputs)
74 |
75 | return {
76 | 'mse': mse,
77 | 'mae': mae,
78 | 'latency': latency,
79 | 'model_size': self._get_model_size(model)
80 | }
81 |
82 | def quantize_model(self, model: nn.Module, **kwargs) -> nn.Module:
83 | """
84 | Quantize model weights and activations.
85 | """
86 | # Configure quantization
87 | model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
88 |
89 | # Prepare for quantization
90 | model_prepared = torch.quantization.prepare(model)
91 |
92 | # Quantize the model
93 | model_quantized = torch.quantization.convert(model_prepared)
94 |
95 | return model_quantized
96 |
97 | def prune_model(self, model: nn.Module, **kwargs) -> nn.Module:
98 | """
99 | Prune model weights.
100 | """
101 | # Simple magnitude-based pruning
102 | for name, module in model.named_modules():
103 | if isinstance(module, nn.Linear):
104 | # Prune 30% of weights with lowest magnitude
105 | mask = torch.ones_like(module.weight.data)
106 | threshold = torch.quantile(torch.abs(module.weight.data), 0.3)
107 | mask[torch.abs(module.weight.data) < threshold] = 0
108 | module.weight.data *= mask
109 |
110 | return model
111 |
112 | def benchmark_model(self,
113 | model: nn.Module,
114 | device_specs: Dict[str, Any]) -> Dict[str, float]:
115 | """
116 | Benchmark model performance on target device.
117 | """
118 | # Create dummy input based on device specs
119 | input_shape = device_specs.get('input_shape', (1, 10))
120 | dummy_input = torch.randn(*input_shape)
121 |
122 | # Measure latency and memory usage
123 | latency = self._measure_latency(model, dummy_input)
124 | memory = self._get_model_size(model)
125 |
126 | return {
127 | 'latency': latency,
128 | 'memory_usage': memory,
129 | 'device_compatibility': self._check_device_compatibility(device_specs)
130 | }
131 |
132 | def _export_onnx(self, model: nn.Module, path: str) -> None:
133 | """
134 | Export model to ONNX format.
135 | """
136 | dummy_input = torch.randn(1, model.input_dim)
137 | torch.onnx.export(model, dummy_input, path,
138 | input_names=['input'],
139 | output_names=['output'],
140 | dynamic_axes={'input': {0: 'batch_size'},
141 | 'output': {0: 'batch_size'}})
142 |
143 | def _export_torchscript(self, model: nn.Module, path: str) -> None:
144 | """
145 | Export model to TorchScript format.
146 | """
147 | scripted_model = torch.jit.script(model)
148 | torch.jit.save(scripted_model, path)
149 |
150 | def _measure_latency(self, model: nn.Module, inputs: torch.Tensor) -> float:
151 | """
152 | Measure model inference latency.
153 | """
154 | # Warm up
155 | for _ in range(10):
156 | _ = model(inputs)
157 |
158 | # Measure latency
159 | start_time = torch.cuda.Event(enable_timing=True)
160 | end_time = torch.cuda.Event(enable_timing=True)
161 |
162 | start_time.record()
163 | for _ in range(100):
164 | _ = model(inputs)
165 | end_time.record()
166 |
167 | torch.cuda.synchronize()
168 | return start_time.elapsed_time(end_time) / 100 # Average latency
169 |
170 | def _get_model_size(self, model: nn.Module) -> int:
171 | """
172 | Get model size in bytes.
173 | """
174 | return sum(p.numel() * p.element_size() for p in model.parameters())
175 |
176 | def _check_device_compatibility(self, device_specs: Dict[str, Any]) -> bool:
177 | """
178 | Check if model is compatible with target device.
179 | """
180 | required_memory = device_specs.get('required_memory', float('inf'))
181 | required_compute = device_specs.get('required_compute', float('inf'))
182 |
183 | # Simple compatibility check
184 | return (required_memory >= self._get_model_size(self.model) and
185 | required_compute >= self.model.parameters().__sizeof__())
--------------------------------------------------------------------------------
/examples/simple_strategy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 | from typing import Dict, Any
5 |
6 | from deepchain.core.strategy.base import RLStrategy
7 |
8 | class SimpleMLPPolicy(nn.Module):
9 | """
10 | Simple MLP-based policy network for the RL strategy.
11 | """
12 | def __init__(self, input_dim: int, hidden_dim: int, output_dim: int):
13 | super().__init__()
14 | self.network = nn.Sequential(
15 | nn.Linear(input_dim, hidden_dim),
16 | nn.ReLU(),
17 | nn.Linear(hidden_dim, output_dim)
18 | )
19 |
20 | def forward(self, x: torch.Tensor) -> torch.Tensor:
21 | return self.network(x)
22 |
23 | class SimpleRLStrategy(RLStrategy):
24 | """
25 | Example implementation of a simple RL-based trading strategy.
26 | """
27 | def __init__(self, input_dim: int = 10, hidden_dim: int = 64, output_dim: int = 3):
28 | """
29 | Initialize the strategy with a simple MLP policy.
30 |
31 | Args:
32 | input_dim: Dimension of the state space
33 | hidden_dim: Hidden layer dimension
34 | output_dim: Dimension of the action space
35 | """
36 | self.policy = SimpleMLPPolicy(input_dim, hidden_dim, output_dim)
37 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
38 | self.policy.to(self.device)
39 |
40 | def train(self, data: Dict[str, Any], **kwargs) -> None:
41 | """
42 | Simple training loop using policy gradient.
43 | """
44 | # Example training logic
45 | optimizer = torch.optim.Adam(self.policy.parameters(), lr=kwargs.get('lr', 1e-3))
46 | n_epochs = kwargs.get('n_epochs', 100)
47 |
48 | for epoch in range(n_epochs):
49 | state = torch.FloatTensor(data['states']).to(self.device)
50 | action_probs = self.policy(state)
51 | loss = self._compute_loss(action_probs, data)
52 |
53 | optimizer.zero_grad()
54 | loss.backward()
55 | optimizer.step()
56 |
57 | def predict(self, state: Dict[str, Any]) -> Dict[str, Any]:
58 | """
59 | Generate trading decision based on current state.
60 | """
61 | with torch.no_grad():
62 | state_tensor = torch.FloatTensor([list(state.values())]).to(self.device)
63 | action_probs = self.policy(state_tensor)
64 | action = torch.argmax(action_probs, dim=1).item()
65 |
66 | # Map action to trading decision
67 | action_map = {0: 'buy', 1: 'hold', 2: 'sell'}
68 | return {'action': action_map[action], 'confidence': float(action_probs.max())}
69 |
70 | def save(self, path: str) -> None:
71 | """
72 | Save the policy model.
73 | """
74 | torch.save(self.policy.state_dict(), path)
75 |
76 | def load(self, path: str) -> None:
77 | """
78 | Load the policy model.
79 | """
80 | self.policy.load_state_dict(torch.load(path))
81 |
82 | def get_action_space(self) -> Dict[str, Any]:
83 | """
84 | Define the action space.
85 | """
86 | return {
87 | 'type': 'discrete',
88 | 'size': 3, # buy, hold, sell
89 | 'actions': ['buy', 'hold', 'sell']
90 | }
91 |
92 | def get_state_space(self) -> Dict[str, Any]:
93 | """
94 | Define the state space.
95 | """
96 | return {
97 | 'type': 'continuous',
98 | 'shape': (10,), # 10 features
99 | 'features': [
100 | 'price', 'volume', 'high', 'low',
101 | 'ma_5', 'ma_10', 'rsi', 'macd',
102 | 'bollinger_upper', 'bollinger_lower'
103 | ]
104 | }
105 |
106 | def get_reward(self, state: Dict[str, Any], action: Dict[str, Any]) -> float:
107 | """
108 | Calculate the reward for the action taken.
109 | """
110 | # Example reward calculation based on price change
111 | price_change = state.get('next_price', 0) - state.get('price', 0)
112 |
113 | if action['action'] == 'buy':
114 | return price_change
115 | elif action['action'] == 'sell':
116 | return -price_change
117 | else: # hold
118 | return 0.0
119 |
120 | def _compute_loss(self, action_probs: torch.Tensor, data: Dict[str, Any]) -> torch.Tensor:
121 | """
122 | Compute the policy gradient loss.
123 | """
124 | rewards = torch.FloatTensor(data['rewards']).to(self.device)
125 | actions = torch.LongTensor(data['actions']).to(self.device)
126 |
127 | # Simple policy gradient loss
128 | log_probs = torch.log(action_probs)
129 | selected_log_probs = log_probs[range(len(actions)), actions]
130 | loss = -(selected_log_probs * rewards).mean()
131 |
132 | return loss
--------------------------------------------------------------------------------
/examples/simple_validator.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Any, Optional
2 | import hashlib
3 | import json
4 | from web3 import Web3
5 |
6 | from deepchain.core.blockchain.validator import ZKValidator
7 |
8 | class SimpleValidator(ZKValidator):
9 | """
10 | Simple example implementation of a blockchain validator.
11 | """
12 | def __init__(self, web3_provider: str = "http://localhost:8545"):
13 | """
14 | Initialize the validator with Web3 connection.
15 |
16 | Args:
17 | web3_provider: URL of the Web3 provider
18 | """
19 | self.w3 = Web3(Web3.HTTPProvider(web3_provider))
20 | self.strategies = {} # Simple in-memory storage
21 |
22 | def validate_strategy(self, strategy_id: str, proof: Dict[str, Any]) -> bool:
23 | """
24 | Validate a trading strategy using simple hash-based proof.
25 | """
26 | if strategy_id not in self.strategies:
27 | return False
28 |
29 | # Simple validation logic
30 | stored_hash = self.strategies[strategy_id].get('proof_hash')
31 | current_hash = self._compute_hash(proof['data'])
32 |
33 | return stored_hash == current_hash
34 |
35 | def register_strategy(self, strategy_id: str, metadata: Dict[str, Any]) -> str:
36 | """
37 | Register a new strategy with metadata.
38 | """
39 | # Create a simple hash of the metadata
40 | metadata_hash = self._compute_hash(metadata)
41 |
42 | # Store strategy information
43 | self.strategies[strategy_id] = {
44 | 'metadata': metadata,
45 | 'proof_hash': metadata_hash
46 | }
47 |
48 | # In a real implementation, this would be a blockchain transaction hash
49 | return metadata_hash
50 |
51 | def verify_execution(self,
52 | strategy_id: str,
53 | execution_data: Dict[str, Any],
54 | proof: Optional[Dict[str, Any]] = None) -> bool:
55 | """
56 | Verify the execution of a strategy.
57 | """
58 | if not proof:
59 | return False
60 |
61 | # Verify the proof matches the execution data
62 | execution_hash = self._compute_hash(execution_data)
63 | return proof.get('execution_hash') == execution_hash
64 |
65 | def generate_proof(self, strategy_id: str, execution_data: Dict[str, Any]) -> Dict[str, Any]:
66 | """
67 | Generate a simple hash-based proof for strategy execution.
68 | """
69 | execution_hash = self._compute_hash(execution_data)
70 |
71 | # Create a simple proof structure
72 | proof = {
73 | 'strategy_id': strategy_id,
74 | 'execution_hash': execution_hash,
75 | 'timestamp': self.w3.eth.get_block('latest')['timestamp']
76 | }
77 |
78 | return proof
79 |
80 | def verify_proof(self, proof: Dict[str, Any]) -> bool:
81 | """
82 | Verify a proof's authenticity.
83 | """
84 | # In a real implementation, this would verify the zero-knowledge proof
85 | # Here we just check if the proof has valid structure
86 | required_fields = {'strategy_id', 'execution_hash', 'timestamp'}
87 | return all(field in proof for field in required_fields)
88 |
89 | def _compute_hash(self, data: Any) -> str:
90 | """
91 | Compute a hash of the input data.
92 | """
93 | # Convert data to JSON string and compute SHA256 hash
94 | data_str = json.dumps(data, sort_keys=True)
95 | return hashlib.sha256(data_str.encode()).hexdigest()
--------------------------------------------------------------------------------
/examples/usage_example.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from simple_strategy import SimpleRLStrategy
3 | from simple_validator import SimpleValidator
4 | from simple_deployer import SimpleDeployer
5 |
6 | def main():
7 | """
8 | Example usage of the DeepChain framework.
9 | """
10 | # 1. Create and train a strategy
11 | print("1. Creating and training strategy...")
12 | strategy = SimpleRLStrategy(input_dim=10, hidden_dim=64, output_dim=3)
13 |
14 | # Generate some dummy training data
15 | train_data = {
16 | 'states': np.random.randn(1000, 10),
17 | 'actions': np.random.randint(0, 3, 1000),
18 | 'rewards': np.random.randn(1000)
19 | }
20 |
21 | # Train the strategy
22 | strategy.train(train_data, n_epochs=10, lr=0.001)
23 |
24 | # 2. Validate strategy on blockchain
25 | print("\n2. Validating strategy on blockchain...")
26 | validator = SimpleValidator(web3_provider="http://localhost:8545")
27 |
28 | # Register strategy
29 | strategy_id = "example_strategy_001"
30 | metadata = {
31 | 'name': 'Simple RL Strategy',
32 | 'version': '0.1.0',
33 | 'type': 'reinforcement_learning'
34 | }
35 |
36 | tx_hash = validator.register_strategy(strategy_id, metadata)
37 | print(f"Strategy registered with hash: {tx_hash}")
38 |
39 | # Generate and verify proof
40 | execution_data = {
41 | 'timestamp': 1234567890,
42 | 'actions': ['buy', 'hold', 'sell'],
43 | 'performance': 0.15
44 | }
45 |
46 | proof = validator.generate_proof(strategy_id, execution_data)
47 | is_valid = validator.verify_proof(proof)
48 | print(f"Strategy proof verification: {is_valid}")
49 |
50 | # 3. Deploy strategy to edge device
51 | print("\n3. Deploying strategy to edge...")
52 | deployer = SimpleDeployer()
53 |
54 | # Compress model
55 | compressed_model = deployer.compress_model(
56 | strategy.policy,
57 | target_size=1024 * 1024, # 1MB
58 | target_latency=0.1 # 100ms
59 | )
60 |
61 | # Export model
62 | export_path = "exported_model.onnx"
63 | deployer.export_model(compressed_model, "onnx", export_path)
64 | print(f"Model exported to: {export_path}")
65 |
66 | # Validate performance
67 | test_data = {
68 | 'inputs': np.random.randn(100, 10),
69 | 'targets': np.random.randn(100, 3)
70 | }
71 |
72 | performance = deployer.validate_performance(compressed_model, test_data)
73 | print("\nModel performance metrics:")
74 | for metric, value in performance.items():
75 | print(f"{metric}: {value}")
76 |
77 | # 4. Make predictions
78 | print("\n4. Making predictions...")
79 | current_state = {
80 | 'price': 100.0,
81 | 'volume': 1000000,
82 | 'high': 105.0,
83 | 'low': 95.0,
84 | 'ma_5': 101.0,
85 | 'ma_10': 100.5,
86 | 'rsi': 55.0,
87 | 'macd': 0.5,
88 | 'bollinger_upper': 110.0,
89 | 'bollinger_lower': 90.0
90 | }
91 |
92 | prediction = strategy.predict(current_state)
93 | print(f"Trading decision: {prediction}")
94 |
95 | if __name__ == "__main__":
96 | main()
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name="deepchain",
5 | version="0.1.0",
6 | description="A framework for decentralized AI trading strategy engines",
7 | long_description=open("README.md").read(),
8 | long_description_content_type="text/markdown",
9 | author="DeepChain Team",
10 | author_email="deep-chain_io@hotmail.com",
11 | url="https://github.com/Deep-Chain-IO/deepchain",
12 | packages=find_packages(),
13 | install_requires=[
14 | # Core dependencies
15 | "numpy>=1.21.0",
16 | "pandas>=1.3.0",
17 | "torch>=1.9.0",
18 | # DeepSeek related
19 | "deepseek-ai>=0.1.0",
20 | "deepseek-rl>=0.1.0",
21 | "deepseek-distill>=0.1.0",
22 | # Blockchain related
23 | "web3>=5.24.0",
24 | # Edge deployment related
25 | "onnx>=1.10.0",
26 | "onnxruntime>=1.9.0",
27 | ],
28 | classifiers=[
29 | "Development Status :: 3 - Alpha",
30 | "Intended Audience :: Developers",
31 | "License :: OSI Approved :: MIT License",
32 | "Programming Language :: Python :: 3.8",
33 | "Programming Language :: Python :: 3.9",
34 | "Programming Language :: Python :: 3.10",
35 | ],
36 | python_requires=">=3.8",
37 | )
--------------------------------------------------------------------------------
/tests/integration/test_data.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import numpy as np
3 | import pandas as pd
4 | from datetime import datetime, timedelta
5 | from deepchain.core.data.loader import DataLoader
6 | from deepchain.core.data.processor import DataProcessor
7 | from deepchain.core.data.stream import DataStream
8 |
9 | class TestDataInterface(unittest.TestCase):
10 | """
11 | Integration tests for the data interface system.
12 | """
13 |
14 | def setUp(self):
15 | """Set up test environment."""
16 | self.loader = DataLoader()
17 | self.processor = DataProcessor()
18 | self.stream = DataStream()
19 |
20 | # Generate test data
21 | self.test_data = self._generate_test_data()
22 |
23 | def test_data_loading(self):
24 | """Test data loading from different sources."""
25 | # Test CSV loading
26 | csv_data = self.loader.load_csv("test_data.csv")
27 | self.assertIsInstance(csv_data, pd.DataFrame)
28 |
29 | # Test API loading
30 | api_data = self.loader.load_api(
31 | url="https://api.example.com/market_data",
32 | params={'symbol': 'BTC-USD'}
33 | )
34 | self.assertIsInstance(api_data, pd.DataFrame)
35 |
36 | # Test database loading
37 | db_data = self.loader.load_database(
38 | query="SELECT * FROM market_data LIMIT 1000"
39 | )
40 | self.assertIsInstance(db_data, pd.DataFrame)
41 |
42 | def test_data_preprocessing(self):
43 | """Test data preprocessing pipeline."""
44 | # Configure preprocessing steps
45 | self.processor.add_step('fill_missing', method='forward')
46 | self.processor.add_step('normalize', method='zscore')
47 | self.processor.add_step('add_technical_indicators',
48 | indicators=['MA', 'RSI', 'MACD'])
49 |
50 | # Process data
51 | processed_data = self.processor.process(self.test_data)
52 |
53 | # Verify processing results
54 | self.assertFalse(processed_data.isnull().any().any())
55 | self.assertIn('MA_5', processed_data.columns)
56 | self.assertIn('RSI', processed_data.columns)
57 | self.assertIn('MACD', processed_data.columns)
58 |
59 | def test_data_streaming(self):
60 | """Test real-time data streaming."""
61 | received_data = []
62 |
63 | def on_data(data):
64 | received_data.append(data)
65 |
66 | # Subscribe to data stream
67 | self.stream.subscribe(
68 | symbols=['BTC-USD', 'ETH-USD'],
69 | callback=on_data
70 | )
71 |
72 | # Wait for some data
73 | import time
74 | time.sleep(5)
75 |
76 | # Stop streaming
77 | self.stream.stop()
78 |
79 | # Verify received data
80 | self.assertGreater(len(received_data), 0)
81 | self.assertIsInstance(received_data[0], dict)
82 | self.assertIn('timestamp', received_data[0])
83 | self.assertIn('price', received_data[0])
84 |
85 | def test_data_pipeline(self):
86 | """Test complete data pipeline."""
87 | # 1. Load historical data
88 | historical_data = self.loader.load_csv("historical_data.csv")
89 |
90 | # 2. Process historical data
91 | processed_historical = self.processor.process(historical_data)
92 |
93 | # 3. Set up real-time pipeline
94 | def process_stream(data):
95 | # Process streaming data
96 | df = pd.DataFrame([data])
97 | processed = self.processor.process(df)
98 | return processed.iloc[0].to_dict()
99 |
100 | # 4. Start streaming with processing
101 | processed_stream = []
102 | self.stream.subscribe(
103 | symbols=['BTC-USD'],
104 | callback=lambda x: processed_stream.append(process_stream(x))
105 | )
106 |
107 | # Wait for some data
108 | time.sleep(5)
109 | self.stream.stop()
110 |
111 | # Verify pipeline results
112 | self.assertGreater(len(processed_stream), 0)
113 | self.assertIsInstance(processed_stream[0], dict)
114 | self.assertIn('MA_5', processed_stream[0])
115 |
116 | def test_data_validation(self):
117 | """Test data validation and quality checks."""
118 | # Define validation rules
119 | rules = {
120 | 'price': {
121 | 'type': 'float',
122 | 'min': 0,
123 | 'max': 1e6
124 | },
125 | 'volume': {
126 | 'type': 'float',
127 | 'min': 0
128 | },
129 | 'timestamp': {
130 | 'type': 'datetime'
131 | }
132 | }
133 |
134 | # Validate test data
135 | validation_result = self.processor.validate(
136 | self.test_data,
137 | rules
138 | )
139 |
140 | # Verify validation results
141 | self.assertTrue(validation_result['is_valid'])
142 | self.assertEqual(len(validation_result['errors']), 0)
143 |
144 | def _generate_test_data(self):
145 | """Generate synthetic market data for testing."""
146 | dates = pd.date_range(
147 | start=datetime.now() - timedelta(days=100),
148 | end=datetime.now(),
149 | freq='1H'
150 | )
151 |
152 | data = pd.DataFrame({
153 | 'timestamp': dates,
154 | 'price': np.random.randn(len(dates)).cumsum() + 100,
155 | 'volume': np.random.randint(1000, 100000, len(dates)),
156 | 'high': np.random.randn(len(dates)).cumsum() + 102,
157 | 'low': np.random.randn(len(dates)).cumsum() + 98
158 | })
159 |
160 | return data
161 |
162 | if __name__ == '__main__':
163 | unittest.main()
--------------------------------------------------------------------------------
/tests/integration/test_monitoring.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import time
3 | import numpy as np
4 | from concurrent.futures import ThreadPoolExecutor
5 | from deepchain.core.monitoring.metrics import MetricsCollector
6 | from deepchain.core.monitoring.alerts import AlertManager
7 | from deepchain.core.monitoring.performance import PerformanceTracker
8 |
9 | class TestMonitoringSystem(unittest.TestCase):
10 | """
11 | Integration tests for the monitoring system.
12 | """
13 |
14 | def setUp(self):
15 | """Set up test environment."""
16 | self.metrics = MetricsCollector()
17 | self.alerts = AlertManager()
18 | self.performance = PerformanceTracker()
19 |
20 | # Configure test thresholds
21 | self.thresholds = {
22 | 'latency': 100, # ms
23 | 'error_rate': 0.01, # 1%
24 | 'memory_usage': 1024 * 1024 * 1024, # 1GB
25 | 'model_drift': 0.1 # 10% drift
26 | }
27 |
28 | def test_metrics_collection(self):
29 | """Test metrics collection and aggregation."""
30 | # Generate test metrics
31 | for _ in range(100):
32 | self.metrics.record_latency(np.random.randint(10, 200))
33 | self.metrics.record_memory_usage(np.random.randint(100_000, 1_000_000))
34 | self.metrics.record_prediction(np.random.random())
35 |
36 | # Get aggregated metrics
37 | stats = self.metrics.get_statistics()
38 |
39 | # Verify metrics
40 | self.assertIn('avg_latency', stats)
41 | self.assertIn('max_memory_usage', stats)
42 | self.assertIn('prediction_count', stats)
43 | self.assertEqual(stats['prediction_count'], 100)
44 |
45 | def test_alert_system(self):
46 | """Test alert generation and handling."""
47 | # Configure alert thresholds
48 | self.alerts.set_thresholds(self.thresholds)
49 |
50 | # Generate test alerts
51 | high_latency = 200 # ms
52 | high_memory = 2 * 1024 * 1024 * 1024 # 2GB
53 |
54 | # Record metrics that should trigger alerts
55 | self.metrics.record_latency(high_latency)
56 | self.metrics.record_memory_usage(high_memory)
57 |
58 | # Check alerts
59 | active_alerts = self.alerts.get_active_alerts()
60 | self.assertGreater(len(active_alerts), 0)
61 |
62 | # Verify alert contents
63 | latency_alert = next(
64 | (a for a in active_alerts if a['type'] == 'high_latency'),
65 | None
66 | )
67 | self.assertIsNotNone(latency_alert)
68 | self.assertEqual(latency_alert['severity'], 'critical')
69 |
70 | def test_performance_tracking(self):
71 | """Test performance tracking over time."""
72 | # Record initial performance baseline
73 | baseline = {
74 | 'latency': 50,
75 | 'throughput': 1000,
76 | 'error_rate': 0.005
77 | }
78 | self.performance.set_baseline(baseline)
79 |
80 | # Simulate performance changes
81 | for _ in range(10):
82 | metrics = {
83 | 'latency': 50 + np.random.randint(-10, 10),
84 | 'throughput': 1000 + np.random.randint(-100, 100),
85 | 'error_rate': 0.005 + np.random.random() * 0.001
86 | }
87 | self.performance.record_metrics(metrics)
88 |
89 | # Get performance report
90 | report = self.performance.get_performance_report()
91 |
92 | # Verify report contents
93 | self.assertIn('latency_trend', report)
94 | self.assertIn('throughput_trend', report)
95 | self.assertIn('error_rate_trend', report)
96 | self.assertIn('model_drift', report)
97 |
98 | def test_concurrent_monitoring(self):
99 | """Test monitoring system under concurrent load."""
100 | def simulate_load():
101 | # Simulate a busy strategy
102 | latency = np.random.randint(10, 100)
103 | memory = np.random.randint(100_000, 1_000_000)
104 | prediction = np.random.random()
105 |
106 | self.metrics.record_latency(latency)
107 | self.metrics.record_memory_usage(memory)
108 | self.metrics.record_prediction(prediction)
109 |
110 | return True
111 |
112 | # Run concurrent operations
113 | n_concurrent = 100
114 | with ThreadPoolExecutor(max_workers=10) as executor:
115 | results = list(executor.map(
116 | lambda _: simulate_load(),
117 | range(n_concurrent)
118 | ))
119 |
120 | # Verify results
121 | self.assertTrue(all(results))
122 |
123 | # Check metrics were recorded correctly
124 | stats = self.metrics.get_statistics()
125 | self.assertEqual(stats['prediction_count'], n_concurrent)
126 |
127 | def test_system_health_check(self):
128 | """Test system health monitoring."""
129 | # Record system metrics
130 | self.metrics.record_system_metrics({
131 | 'cpu_usage': 0.5,
132 | 'memory_usage': 0.7,
133 | 'disk_usage': 0.3,
134 | 'network_io': 1000
135 | })
136 |
137 | # Get health status
138 | health = self.metrics.get_system_health()
139 |
140 | # Verify health check
141 | self.assertIn('status', health)
142 | self.assertIn('components', health)
143 | self.assertIn('last_update', health)
144 |
145 | # Check component status
146 | components = health['components']
147 | self.assertIn('cpu', components)
148 | self.assertIn('memory', components)
149 | self.assertIn('disk', components)
150 | self.assertIn('network', components)
151 |
152 | if __name__ == '__main__':
153 | unittest.main()
--------------------------------------------------------------------------------
/tests/integration/test_workflow.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import torch
3 | import numpy as np
4 | from deepchain.core.strategy.base import RLStrategy
5 | from deepchain.core.blockchain.validator import ZKValidator
6 | from deepchain.core.edge.deployer import EdgeOptimizer
7 |
8 | class TestCompleteWorkflow(unittest.TestCase):
9 | """
10 | Integration tests for complete DeepChain workflow.
11 | """
12 |
13 | def setUp(self):
14 | """Set up test environment."""
15 | self.strategy = RLStrategy()
16 | self.validator = ZKValidator(web3_provider="http://localhost:8545")
17 | self.deployer = EdgeOptimizer()
18 |
19 | # Generate test market data
20 | self.market_data = self._generate_market_data()
21 |
22 | def test_complete_workflow(self):
23 | """Test complete workflow from training to deployment."""
24 | # 1. Train strategy
25 | print("\nStep 1: Training Strategy")
26 | self.strategy.train(self.market_data)
27 |
28 | # Verify training results
29 | state = self._get_sample_state()
30 | action = self.strategy.predict(state)
31 | self.assertIsNotNone(action)
32 |
33 | # 2. Register and validate strategy
34 | print("\nStep 2: Validating Strategy")
35 | strategy_id = "test_integration_001"
36 | metadata = {
37 | 'name': 'Integration Test Strategy',
38 | 'version': '1.0.0',
39 | 'type': 'reinforcement_learning'
40 | }
41 |
42 | # Register strategy
43 | tx_hash = self.validator.register_strategy(strategy_id, metadata)
44 | self.assertIsNotNone(tx_hash)
45 |
46 | # Generate and verify execution proof
47 | execution_data = {
48 | 'timestamp': 1234567890,
49 | 'state': state,
50 | 'action': action,
51 | 'reward': 0.1
52 | }
53 |
54 | proof = self.validator.generate_proof(strategy_id, execution_data)
55 | is_valid = self.validator.verify_execution(
56 | strategy_id,
57 | execution_data,
58 | proof
59 | )
60 | self.assertTrue(is_valid)
61 |
62 | # 3. Deploy optimized model
63 | print("\nStep 3: Deploying Strategy")
64 | # Compress model
65 | compressed_model = self.deployer.compress_model(
66 | self.strategy.policy,
67 | target_size=1024 * 1024 # 1MB target
68 | )
69 |
70 | # Export model
71 | self.deployer.export_model(
72 | compressed_model,
73 | format="onnx",
74 | path="./test_model.onnx"
75 | )
76 |
77 | # Validate deployed model
78 | test_data = {
79 | 'inputs': np.random.randn(100, 10),
80 | 'targets': np.random.randn(100, 3)
81 | }
82 | metrics = self.deployer.validate_performance(compressed_model, test_data)
83 |
84 | self.assertIn('mse', metrics)
85 | self.assertIn('latency', metrics)
86 | self.assertLess(metrics['latency'], 0.1) # Latency under 100ms
87 |
88 | def test_error_recovery(self):
89 | """Test system's ability to handle and recover from errors."""
90 | # 1. Test invalid model training
91 | with self.assertRaises(Exception):
92 | self.strategy.train(None) # Invalid training data
93 |
94 | # 2. Test invalid strategy validation
95 | with self.assertRaises(Exception):
96 | self.validator.validate_strategy(
97 | "nonexistent_id",
98 | {'invalid': 'proof'}
99 | )
100 |
101 | # 3. Test invalid model deployment
102 | with self.assertRaises(Exception):
103 | self.deployer.export_model(
104 | None,
105 | "invalid_format",
106 | "invalid_path"
107 | )
108 |
109 | def test_concurrent_operations(self):
110 | """Test system's ability to handle concurrent operations."""
111 | from concurrent.futures import ThreadPoolExecutor
112 | import time
113 |
114 | def validate_operation():
115 | state = self._get_sample_state()
116 | action = self.strategy.predict(state)
117 | return action is not None
118 |
119 | # Run concurrent predictions
120 | n_concurrent = 10
121 | start_time = time.time()
122 |
123 | with ThreadPoolExecutor(max_workers=n_concurrent) as executor:
124 | results = list(executor.map(
125 | lambda _: validate_operation(),
126 | range(n_concurrent)
127 | ))
128 |
129 | end_time = time.time()
130 |
131 | # Verify results
132 | self.assertTrue(all(results))
133 | self.assertLess(end_time - start_time, 5) # Should complete within 5 seconds
134 |
135 | def _generate_market_data(self):
136 | """Generate synthetic market data for testing."""
137 | return {
138 | 'states': np.random.randn(1000, 10),
139 | 'actions': np.random.randint(0, 3, 1000),
140 | 'rewards': np.random.randn(1000),
141 | 'next_states': np.random.randn(1000, 10)
142 | }
143 |
144 | def _get_sample_state(self):
145 | """Get a sample market state."""
146 | return {
147 | 'price': 100.0,
148 | 'volume': 1000000,
149 | 'high': 105.0,
150 | 'low': 95.0,
151 | 'ma_5': 101.0,
152 | 'ma_10': 100.5,
153 | 'rsi': 55.0,
154 | 'macd': 0.5,
155 | 'bollinger_upper': 110.0,
156 | 'bollinger_lower': 90.0
157 | }
158 |
159 | if __name__ == '__main__':
160 | unittest.main()
--------------------------------------------------------------------------------
/tests/performance/test_performance.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import time
3 | import torch
4 | import numpy as np
5 | from concurrent.futures import ThreadPoolExecutor
6 | import psutil
7 | import os
8 |
9 | from examples.simple_strategy import SimpleRLStrategy
10 | from examples.simple_validator import SimpleValidator
11 | from examples.simple_deployer import SimpleDeployer
12 |
13 | class PerformanceTest(unittest.TestCase):
14 | """
15 | Performance tests for the DeepChain framework.
16 | """
17 |
18 | def setUp(self):
19 | """
20 | Set up test environment before each test case.
21 | """
22 | self.strategy = SimpleRLStrategy()
23 | self.validator = SimpleValidator()
24 | self.deployer = SimpleDeployer()
25 |
26 | # Generate test data
27 | self.test_data = {
28 | 'states': np.random.randn(1000, 10),
29 | 'actions': np.random.randint(0, 3, 1000),
30 | 'rewards': np.random.randn(1000)
31 | }
32 |
33 | def test_training_performance(self):
34 | """
35 | Test training performance.
36 | """
37 | start_time = time.time()
38 | start_memory = self._get_memory_usage()
39 |
40 | # Train model
41 | self.strategy.train(self.test_data, n_epochs=100)
42 |
43 | end_time = time.time()
44 | end_memory = self._get_memory_usage()
45 |
46 | training_time = end_time - start_time
47 | memory_used = end_memory - start_memory
48 |
49 | print(f"\nTraining Performance:")
50 | print(f"Time taken: {training_time:.2f} seconds")
51 | print(f"Memory used: {memory_used / 1024 / 1024:.2f} MB")
52 |
53 | # Assert reasonable performance
54 | self.assertLess(training_time, 60) # Should train within 60 seconds
55 | self.assertLess(memory_used / 1024 / 1024, 1000) # Should use less than 1GB
56 |
57 | def test_inference_latency(self):
58 | """
59 | Test inference latency.
60 | """
61 | state = {
62 | 'price': 100.0,
63 | 'volume': 1000000,
64 | 'high': 105.0,
65 | 'low': 95.0,
66 | 'ma_5': 101.0,
67 | 'ma_10': 100.5,
68 | 'rsi': 55.0,
69 | 'macd': 0.5,
70 | 'bollinger_upper': 110.0,
71 | 'bollinger_lower': 90.0
72 | }
73 |
74 | latencies = []
75 | n_iterations = 1000
76 |
77 | # Warm up
78 | for _ in range(10):
79 | _ = self.strategy.predict(state)
80 |
81 | # Measure latency
82 | for _ in range(n_iterations):
83 | start_time = time.time()
84 | _ = self.strategy.predict(state)
85 | latencies.append(time.time() - start_time)
86 |
87 | avg_latency = np.mean(latencies) * 1000 # Convert to ms
88 | p95_latency = np.percentile(latencies, 95) * 1000
89 | p99_latency = np.percentile(latencies, 99) * 1000
90 |
91 | print(f"\nInference Latency:")
92 | print(f"Average: {avg_latency:.2f}ms")
93 | print(f"P95: {p95_latency:.2f}ms")
94 | print(f"P99: {p99_latency:.2f}ms")
95 |
96 | # Assert reasonable latency
97 | self.assertLess(avg_latency, 10) # Average latency should be under 10ms
98 | self.assertLess(p99_latency, 50) # P99 latency should be under 50ms
99 |
100 | def test_concurrent_validation(self):
101 | """
102 | Test concurrent validation performance.
103 | """
104 | n_concurrent = 100
105 | strategy_id = "test_strategy"
106 |
107 | # Register strategy
108 | self.validator.register_strategy(strategy_id, {'type': 'test'})
109 |
110 | execution_data = {
111 | 'timestamp': int(time.time()),
112 | 'actions': ['buy', 'hold', 'sell']
113 | }
114 |
115 | def validate_execution():
116 | proof = self.validator.generate_proof(strategy_id, execution_data)
117 | return self.validator.verify_execution(strategy_id, execution_data, proof)
118 |
119 | start_time = time.time()
120 |
121 | # Run concurrent validations
122 | with ThreadPoolExecutor(max_workers=10) as executor:
123 | results = list(executor.map(lambda _: validate_execution(), range(n_concurrent)))
124 |
125 | end_time = time.time()
126 | total_time = end_time - start_time
127 |
128 | success_rate = sum(results) / len(results) * 100
129 | throughput = n_concurrent / total_time
130 |
131 | print(f"\nConcurrent Validation Performance:")
132 | print(f"Total time: {total_time:.2f} seconds")
133 | print(f"Throughput: {throughput:.2f} validations/second")
134 | print(f"Success rate: {success_rate:.2f}%")
135 |
136 | # Assert reasonable performance
137 | self.assertGreater(throughput, 10) # Should handle at least 10 validations/second
138 | self.assertEqual(success_rate, 100) # All validations should succeed
139 |
140 | def test_model_compression_ratio(self):
141 | """
142 | Test model compression performance.
143 | """
144 | original_size = self._get_model_size(self.strategy.policy)
145 |
146 | start_time = time.time()
147 | compressed_model = self.deployer.compress_model(
148 | self.strategy.policy,
149 | target_size=original_size // 4 # Target 75% compression
150 | )
151 | compression_time = time.time() - start_time
152 |
153 | compressed_size = self._get_model_size(compressed_model)
154 | compression_ratio = original_size / compressed_size
155 |
156 | print(f"\nModel Compression Performance:")
157 | print(f"Original size: {original_size / 1024:.2f}KB")
158 | print(f"Compressed size: {compressed_size / 1024:.2f}KB")
159 | print(f"Compression ratio: {compression_ratio:.2f}x")
160 | print(f"Compression time: {compression_time:.2f} seconds")
161 |
162 | # Assert reasonable compression
163 | self.assertGreater(compression_ratio, 2) # Should achieve at least 2x compression
164 | self.assertLess(compression_time, 30) # Should compress within 30 seconds
165 |
166 | def _get_memory_usage(self) -> int:
167 | """Helper function to get current memory usage."""
168 | process = psutil.Process(os.getpid())
169 | return process.memory_info().rss
170 |
171 | def _get_model_size(self, model: torch.nn.Module) -> int:
172 | """Helper function to get model size in bytes."""
173 | return sum(p.numel() * p.element_size() for p in model.parameters())
174 |
175 | if __name__ == '__main__':
176 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_blockchain.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for blockchain integrations.
3 | """
4 |
5 | import unittest
6 | from unittest.mock import Mock, patch
7 | import json
8 | from deepchain.core.blockchain.chains import (
9 | ChainFactory,
10 | SolanaChain,
11 | SolanaValidator
12 | )
13 |
14 | class TestSolanaIntegration(unittest.TestCase):
15 | """Test Solana blockchain integration."""
16 |
17 | def setUp(self):
18 | """Set up test environment."""
19 | self.endpoint = "https://api.devnet.solana.com" # Use devnet for testing
20 | self.chain = SolanaChain(endpoint=self.endpoint)
21 | self.validator = SolanaValidator(self.chain)
22 |
23 | # Mock Solana client
24 | self.mock_client = Mock()
25 | self.chain.client = self.mock_client
26 |
27 | def test_connection(self):
28 | """Test Solana connection."""
29 | # Mock successful connection
30 | self.mock_client.get_health.return_value = True
31 | self.assertTrue(self.chain.connect())
32 |
33 | # Mock failed connection
34 | self.mock_client.get_health.side_effect = Exception("Connection failed")
35 | self.assertFalse(self.chain.connect())
36 |
37 | def test_balance(self):
38 | """Test balance checking."""
39 | test_address = "test_address"
40 | expected_balance = 1.5 # SOL
41 |
42 | # Mock balance response
43 | self.mock_client.get_balance.return_value = {
44 | 'result': {'value': int(expected_balance * 1e9)} # Convert to lamports
45 | }
46 |
47 | balance = self.chain.get_balance(test_address)
48 | self.assertEqual(balance, expected_balance)
49 | self.mock_client.get_balance.assert_called_with(test_address)
50 |
51 | def test_transaction(self):
52 | """Test transaction handling."""
53 | test_tx = "test_transaction"
54 | test_hash = "test_hash"
55 |
56 | # Mock transaction sending
57 | self.mock_client.send_transaction.return_value = {
58 | 'result': test_hash
59 | }
60 |
61 | # Mock transaction verification
62 | self.mock_client.get_confirmed_transaction.return_value = {
63 | 'result': {'signature': test_hash}
64 | }
65 |
66 | # Test sending transaction
67 | tx_hash = self.chain.send_transaction(test_tx)
68 | self.assertEqual(tx_hash, test_hash)
69 |
70 | # Test verifying transaction
71 | self.assertTrue(self.chain.verify_transaction(test_hash))
72 |
73 | def test_strategy_validation(self):
74 | """Test strategy validation."""
75 | strategy_id = "test_strategy"
76 | test_proof = {'type': 'test_proof'}
77 |
78 | # Mock transaction responses
79 | self.mock_client.send_transaction.return_value = {'result': 'tx_hash'}
80 | self.mock_client.get_confirmed_transaction.return_value = {'result': {}}
81 |
82 | # Test strategy validation
83 | result = self.validator.validate_strategy(strategy_id, test_proof)
84 | self.assertTrue(result)
85 |
86 | # Test failed validation
87 | self.mock_client.send_transaction.side_effect = Exception("Validation failed")
88 | result = self.validator.validate_strategy(strategy_id, test_proof)
89 | self.assertFalse(result)
90 |
91 | def test_strategy_registration(self):
92 | """Test strategy registration."""
93 | strategy_id = "test_strategy"
94 | metadata = {'version': '1.0.0'}
95 | expected_tx = "tx_hash"
96 |
97 | # Mock responses
98 | self.mock_client.send_transaction.return_value = {'result': expected_tx}
99 |
100 | # Test registration
101 | tx_hash = self.validator.register_strategy(strategy_id, metadata)
102 | self.assertEqual(tx_hash, expected_tx)
103 |
104 | def test_execution_verification(self):
105 | """Test execution verification."""
106 | strategy_id = "test_strategy"
107 | execution_data = {
108 | 'timestamp': 1234567890,
109 | 'action': 'buy',
110 | 'amount': 1.0
111 | }
112 |
113 | # Mock responses
114 | self.mock_client.send_transaction.return_value = {'result': 'tx_hash'}
115 | self.mock_client.get_confirmed_transaction.return_value = {'result': {}}
116 |
117 | # Test verification
118 | result = self.validator.verify_execution(strategy_id, execution_data)
119 | self.assertTrue(result)
120 |
121 | def test_proof_generation(self):
122 | """Test proof generation and verification."""
123 | strategy_id = "test_strategy"
124 | execution_data = {
125 | 'timestamp': 1234567890,
126 | 'action': 'buy',
127 | 'amount': 1.0
128 | }
129 |
130 | # Generate proof
131 | proof = self.validator.generate_proof(strategy_id, execution_data)
132 |
133 | # Verify proof structure
134 | self.assertEqual(proof['type'], 'solana_zk_proof')
135 | self.assertEqual(proof['strategy_id'], strategy_id)
136 | self.assertEqual(proof['data'], execution_data)
137 |
138 | # Verify proof
139 | self.assertTrue(self.validator.verify_proof(proof))
140 |
141 | def test_chain_factory(self):
142 | """Test chain factory."""
143 | # Test Solana chain creation
144 | chain = ChainFactory.create_chain('solana', endpoint=self.endpoint)
145 | self.assertIsInstance(chain, SolanaChain)
146 |
147 | # Test validator creation
148 | validator = ChainFactory.create_validator(chain)
149 | self.assertIsInstance(validator, SolanaValidator)
150 |
151 | # Test unsupported chain
152 | with self.assertRaises(ValueError):
153 | ChainFactory.create_chain('unsupported_chain')
154 |
155 | def tearDown(self):
156 | """Clean up test environment."""
157 | pass
158 |
159 | if __name__ == '__main__':
160 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_data.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for data processing module.
3 | """
4 |
5 | import unittest
6 | import numpy as np
7 | import pandas as pd
8 | from deepchain.core.data.loader import DataLoader
9 | from deepchain.core.data.processor import DataProcessor
10 |
11 | class TestDataProcessing(unittest.TestCase):
12 | """Test data processing functionality."""
13 |
14 | def setUp(self):
15 | """Set up test environment."""
16 | self.loader = DataLoader()
17 | self.processor = DataProcessor()
18 |
19 | # Create sample data
20 | self.sample_data = pd.DataFrame({
21 | 'timestamp': pd.date_range('2024-01-01', periods=100, freq='H'),
22 | 'price': np.random.randn(100).cumsum() + 100,
23 | 'volume': np.random.randint(1000, 10000, 100),
24 | 'missing_col': [np.nan if i % 10 == 0 else i for i in range(100)]
25 | })
26 |
27 | def test_data_loading(self):
28 | """Test data loading functionality."""
29 | # Test CSV loading
30 | self.loader.save_csv(self.sample_data, 'test_data.csv')
31 | loaded_data = self.loader.load_csv('test_data.csv')
32 | pd.testing.assert_frame_equal(loaded_data, self.sample_data)
33 |
34 | # Test API loading
35 | api_data = self.loader.load_api(
36 | 'https://api.example.com/data',
37 | {'symbol': 'BTC/USD'}
38 | )
39 | self.assertIsInstance(api_data, pd.DataFrame)
40 |
41 | def test_missing_value_handling(self):
42 | """Test missing value handling."""
43 | # Add missing value handling step
44 | self.processor.add_step('fill_missing', method='forward')
45 | processed_data = self.processor.process(self.sample_data)
46 |
47 | # Check no missing values
48 | self.assertFalse(processed_data.isnull().any().any())
49 |
50 | # Check forward fill
51 | missing_indices = self.sample_data['missing_col'].isnull()
52 | self.assertTrue(
53 | (processed_data.loc[missing_indices, 'missing_col'] ==
54 | processed_data.loc[missing_indices.shift(1), 'missing_col']).all()
55 | )
56 |
57 | def test_normalization(self):
58 | """Test data normalization."""
59 | # Add normalization step
60 | self.processor.add_step('normalize', method='zscore')
61 | processed_data = self.processor.process(self.sample_data)
62 |
63 | # Check z-score normalization
64 | numeric_cols = ['price', 'volume']
65 | for col in numeric_cols:
66 | normalized = processed_data[col]
67 | self.assertAlmostEqual(normalized.mean(), 0, places=2)
68 | self.assertAlmostEqual(normalized.std(), 1, places=2)
69 |
70 | def test_technical_indicators(self):
71 | """Test technical indicator calculation."""
72 | # Add technical indicators
73 | self.processor.add_step(
74 | 'add_technical_indicators',
75 | indicators=['MA', 'RSI', 'MACD', 'BBANDS']
76 | )
77 | processed_data = self.processor.process(self.sample_data)
78 |
79 | # Check indicators exist
80 | expected_columns = [
81 | 'price_MA_5', 'price_MA_10',
82 | 'price_RSI_14',
83 | 'price_MACD', 'price_MACD_signal',
84 | 'price_BB_upper', 'price_BB_lower'
85 | ]
86 | for col in expected_columns:
87 | self.assertIn(col, processed_data.columns)
88 |
89 | def test_data_validation(self):
90 | """Test data validation."""
91 | # Add validation rules
92 | validation_rules = {
93 | 'price': {'min': 0, 'max': 1000},
94 | 'volume': {'min': 0},
95 | 'timestamp': {'unique': True}
96 | }
97 | self.processor.add_step('validate', rules=validation_rules)
98 |
99 | # Test valid data
100 | try:
101 | self.processor.process(self.sample_data)
102 | except Exception as e:
103 | self.fail(f"Validation failed: {e}")
104 |
105 | # Test invalid data
106 | invalid_data = self.sample_data.copy()
107 | invalid_data.loc[0, 'price'] = -1
108 | with self.assertRaises(ValueError):
109 | self.processor.process(invalid_data)
110 |
111 | def test_feature_engineering(self):
112 | """Test feature engineering."""
113 | # Add feature engineering steps
114 | self.processor.add_step(
115 | 'add_features',
116 | features={
117 | 'price_change': lambda df: df['price'].pct_change(),
118 | 'volume_ma': lambda df: df['volume'].rolling(5).mean(),
119 | 'price_momentum': lambda df: df['price'].diff(5)
120 | }
121 | )
122 | processed_data = self.processor.process(self.sample_data)
123 |
124 | # Check engineered features
125 | expected_features = ['price_change', 'volume_ma', 'price_momentum']
126 | for feature in expected_features:
127 | self.assertIn(feature, processed_data.columns)
128 |
129 | def test_data_pipeline(self):
130 | """Test complete data processing pipeline."""
131 | # Configure complete pipeline
132 | self.processor.add_step('fill_missing', method='forward')
133 | self.processor.add_step('normalize', method='zscore')
134 | self.processor.add_step(
135 | 'add_technical_indicators',
136 | indicators=['MA', 'RSI']
137 | )
138 | self.processor.add_step(
139 | 'add_features',
140 | features={'price_change': lambda df: df['price'].pct_change()}
141 | )
142 |
143 | # Process data
144 | processed_data = self.processor.process(self.sample_data)
145 |
146 | # Check pipeline results
147 | self.assertFalse(processed_data.isnull().any().any())
148 | self.assertIn('price_MA_5', processed_data.columns)
149 | self.assertIn('price_change', processed_data.columns)
150 |
151 | def tearDown(self):
152 | """Clean up test environment."""
153 | import os
154 | if os.path.exists('test_data.csv'):
155 | os.remove('test_data.csv')
156 |
157 | if __name__ == '__main__':
158 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_data_stream.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for data streaming system.
3 | """
4 |
5 | import unittest
6 | import asyncio
7 | import pandas as pd
8 | import numpy as np
9 | from datetime import datetime, timedelta
10 | from unittest.mock import Mock, patch
11 | from deepchain.core.data.stream import (
12 | DataSource,
13 | WebSocketSource,
14 | DataStream,
15 | DataPipeline,
16 | MarketDataStream
17 | )
18 | from deepchain.core.data.indicators import (
19 | calculate_ma,
20 | calculate_rsi,
21 | calculate_macd,
22 | calculate_bollinger_bands
23 | )
24 |
25 | class MockWebSocket:
26 | """Mock WebSocket for testing."""
27 |
28 | def __init__(self):
29 | self.messages = []
30 |
31 | async def send(self, message):
32 | self.messages.append(message)
33 |
34 | async def close(self):
35 | pass
36 |
37 | class TestDataStream(unittest.TestCase):
38 | """Test data streaming functionality."""
39 |
40 | def setUp(self):
41 | """Set up test environment."""
42 | self.stream = DataStream(buffer_size=1000)
43 | self.mock_source = Mock(spec=DataSource)
44 | self.stream.add_source("test", self.mock_source)
45 |
46 | async def test_source_connection(self):
47 | """Test data source connection."""
48 | self.mock_source.connect.return_value = True
49 | await self.stream.start()
50 | self.mock_source.connect.assert_called_once()
51 |
52 | def test_processor_addition(self):
53 | """Test adding data processors."""
54 | def test_processor(data):
55 | return data
56 |
57 | self.stream.add_processor(test_processor)
58 | self.assertIn(test_processor, self.stream.processors)
59 |
60 | async def test_stream_lifecycle(self):
61 | """Test stream start and stop."""
62 | await self.stream.start()
63 | self.assertTrue(self.stream.running)
64 |
65 | await self.stream.stop()
66 | self.assertFalse(self.stream.running)
67 |
68 | def test_buffer_management(self):
69 | """Test data buffer management."""
70 | self.assertEqual(self.stream.data_buffer.maxsize, 1000)
71 | self.assertTrue(self.stream.data_buffer.empty())
72 |
73 | class TestWebSocketSource(unittest.TestCase):
74 | """Test WebSocket data source."""
75 |
76 | def setUp(self):
77 | """Set up test environment."""
78 | self.source = WebSocketSource(
79 | url="ws://test.com",
80 | api_key="test-key"
81 | )
82 | self.mock_ws = MockWebSocket()
83 |
84 | @patch('websockets.connect')
85 | async def test_connection(self, mock_connect):
86 | """Test WebSocket connection."""
87 | mock_connect.return_value = self.mock_ws
88 | connected = await self.source.connect()
89 | self.assertTrue(connected)
90 | self.assertTrue(self.source.connected)
91 |
92 | async def test_subscription(self):
93 | """Test market data subscription."""
94 | self.source.ws = self.mock_ws
95 | self.source.connected = True
96 |
97 | symbols = ['BTCUSDT', 'ETHUSDT']
98 | await self.source.subscribe(symbols)
99 |
100 | last_message = self.mock_ws.messages[-1]
101 | self.assertEqual(last_message['type'], 'subscribe')
102 | self.assertEqual(last_message['symbols'], symbols)
103 |
104 | async def test_historical_data(self):
105 | """Test historical data retrieval."""
106 | with patch('aiohttp.ClientSession.get') as mock_get:
107 | mock_get.return_value.__aenter__.return_value.json.return_value = [
108 | {'timestamp': 1, 'close': 100},
109 | {'timestamp': 2, 'close': 101}
110 | ]
111 |
112 | data = await self.source.get_historical_data(
113 | 'BTCUSDT',
114 | datetime.now() - timedelta(days=1),
115 | datetime.now(),
116 | '1h'
117 | )
118 |
119 | self.assertIsInstance(data, pd.DataFrame)
120 | self.assertEqual(len(data), 2)
121 |
122 | class TestMarketDataStream(unittest.TestCase):
123 | """Test market data streaming."""
124 |
125 | def setUp(self):
126 | """Set up test environment."""
127 | self.stream = MarketDataStream(buffer_size=1000)
128 |
129 | # Create sample data
130 | self.sample_data = pd.DataFrame({
131 | 'timestamp': pd.date_range('2024-01-01', periods=100, freq='H'),
132 | 'open': np.random.randn(100).cumsum() + 100,
133 | 'high': np.random.randn(100).cumsum() + 102,
134 | 'low': np.random.randn(100).cumsum() + 98,
135 | 'close': np.random.randn(100).cumsum() + 100,
136 | 'volume': np.random.randint(1000, 10000, 100)
137 | })
138 |
139 | def test_indicator_addition(self):
140 | """Test adding technical indicators."""
141 | self.stream.add_indicator(
142 | "ma_20",
143 | calculate_ma,
144 | period=20
145 | )
146 | self.assertIn("ma_20", self.stream.indicators)
147 |
148 | def test_indicator_calculation(self):
149 | """Test technical indicator calculation."""
150 | # Add indicators
151 | self.stream.add_indicator(
152 | "ma_20",
153 | calculate_ma,
154 | period=20
155 | )
156 | self.stream.add_indicator(
157 | "rsi",
158 | calculate_rsi,
159 | period=14
160 | )
161 |
162 | # Process data
163 | data = self.sample_data.copy()
164 | for name, (func, params) in self.stream.indicators.items():
165 | result = func(data, **params)
166 | if isinstance(result, pd.DataFrame):
167 | for col in result.columns:
168 | data[f'{name}_{col}'] = result[col]
169 | else:
170 | data[name] = result
171 |
172 | # Verify results
173 | self.assertIn("ma_20", data.columns)
174 | self.assertIn("rsi", data.columns)
175 | self.assertTrue(all(data["ma_20"].notna().tail(80)))
176 | self.assertTrue(all(data["rsi"].notna().tail(80)))
177 |
178 | class TestDataPipeline(unittest.TestCase):
179 | """Test data processing pipeline."""
180 |
181 | def setUp(self):
182 | """Set up test environment."""
183 | self.pipeline = DataPipeline()
184 |
185 | # Create sample data
186 | self.sample_data = pd.DataFrame({
187 | 'close': [100, 101, np.nan, 103, 104],
188 | 'volume': [1000, np.nan, 3000, 4000, 5000]
189 | })
190 |
191 | def test_step_addition(self):
192 | """Test adding processing steps."""
193 | def test_step(data):
194 | return data
195 |
196 | self.pipeline.add_step(test_step)
197 | self.assertIn(test_step, self.pipeline.steps)
198 |
199 | def test_data_processing(self):
200 | """Test data processing through pipeline."""
201 | # Add processing steps
202 | def fill_missing(data):
203 | return data.fillna(method='ffill')
204 |
205 | def add_returns(data):
206 | data['returns'] = data['close'].pct_change()
207 | return data
208 |
209 | self.pipeline.add_step(fill_missing)
210 | self.pipeline.add_step(add_returns)
211 |
212 | # Process data
213 | result = self.pipeline.process(self.sample_data)
214 |
215 | # Verify results
216 | self.assertFalse(result.isnull().any().any())
217 | self.assertIn('returns', result.columns)
218 | self.assertEqual(len(result), len(self.sample_data))
219 |
220 | if __name__ == '__main__':
221 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_deployer.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import torch
3 | import torch.nn as nn
4 | import numpy as np
5 | from pathlib import Path
6 | import tempfile
7 | import os
8 |
9 | from deepchain.core.edge.deployer import EdgeOptimizer
10 | from deepchain.core.exceptions import DeploymentError
11 |
12 | class SimpleModel(nn.Module):
13 | """Simple model for testing."""
14 | def __init__(self):
15 | super().__init__()
16 | self.network = nn.Sequential(
17 | nn.Linear(10, 64),
18 | nn.ReLU(),
19 | nn.Linear(64, 3)
20 | )
21 | self.input_dim = 10
22 |
23 | def forward(self, x):
24 | return self.network(x)
25 |
26 | class TestEdgeDeployer(unittest.TestCase):
27 | """
28 | Unit tests for EdgeOptimizer implementation.
29 | """
30 |
31 | def setUp(self):
32 | """
33 | Set up test environment before each test case.
34 | """
35 | self.deployer = EdgeOptimizer()
36 | self.model = SimpleModel()
37 | self.test_input = torch.randn(1, 10)
38 |
39 | # Create temporary directory for test artifacts
40 | self.test_dir = tempfile.mkdtemp()
41 |
42 | def tearDown(self):
43 | """
44 | Clean up after tests.
45 | """
46 | # Remove temporary files
47 | for file in Path(self.test_dir).glob("*"):
48 | os.remove(file)
49 | os.rmdir(self.test_dir)
50 |
51 | def test_model_compression(self):
52 | """
53 | Test model compression functionality.
54 | """
55 | original_size = self._get_model_size(self.model)
56 | target_size = original_size // 2
57 |
58 | compressed_model = self.deployer.compress_model(
59 | self.model,
60 | target_size=target_size
61 | )
62 |
63 | compressed_size = self._get_model_size(compressed_model)
64 | self.assertLessEqual(compressed_size, target_size)
65 |
66 | def test_model_export_onnx(self):
67 | """
68 | Test ONNX model export.
69 | """
70 | export_path = os.path.join(self.test_dir, "model.onnx")
71 | self.deployer.export_model(self.model, "onnx", export_path)
72 |
73 | self.assertTrue(os.path.exists(export_path))
74 | self.assertGreater(os.path.getsize(export_path), 0)
75 |
76 | def test_model_export_torchscript(self):
77 | """
78 | Test TorchScript model export.
79 | """
80 | export_path = os.path.join(self.test_dir, "model.pt")
81 | self.deployer.export_model(self.model, "torchscript", export_path)
82 |
83 | self.assertTrue(os.path.exists(export_path))
84 | self.assertGreater(os.path.getsize(export_path), 0)
85 |
86 | def test_invalid_export_format(self):
87 | """
88 | Test handling of invalid export format.
89 | """
90 | with self.assertRaises(ValueError):
91 | self.deployer.export_model(
92 | self.model,
93 | "invalid_format",
94 | "model.invalid"
95 | )
96 |
97 | def test_performance_validation(self):
98 | """
99 | Test model performance validation.
100 | """
101 | test_data = {
102 | 'inputs': np.random.randn(100, 10),
103 | 'targets': np.random.randn(100, 3)
104 | }
105 |
106 | metrics = self.deployer.validate_performance(self.model, test_data)
107 |
108 | self.assertIn('mse', metrics)
109 | self.assertIn('mae', metrics)
110 | self.assertIn('latency', metrics)
111 | self.assertIn('model_size', metrics)
112 |
113 | def test_quantization(self):
114 | """
115 | Test model quantization.
116 | """
117 | quantized_model = self.deployer.quantize_model(self.model)
118 |
119 | # Verify model still works
120 | with torch.no_grad():
121 | output = quantized_model(self.test_input)
122 | self.assertEqual(output.shape, (1, 3))
123 |
124 | def test_pruning(self):
125 | """
126 | Test model pruning.
127 | """
128 | pruned_model = self.deployer.prune_model(self.model)
129 |
130 | # Verify model still works
131 | with torch.no_grad():
132 | output = pruned_model(self.test_input)
133 | self.assertEqual(output.shape, (1, 3))
134 |
135 | def test_benchmark(self):
136 | """
137 | Test model benchmarking.
138 | """
139 | device_specs = {
140 | 'input_shape': (1, 10),
141 | 'required_memory': 1024 * 1024, # 1MB
142 | 'required_compute': 1000000
143 | }
144 |
145 | results = self.deployer.benchmark_model(self.model, device_specs)
146 |
147 | self.assertIn('latency', results)
148 | self.assertIn('memory_usage', results)
149 | self.assertIn('device_compatibility', results)
150 |
151 | def test_compression_with_latency_target(self):
152 | """
153 | Test model compression with latency target.
154 | """
155 | compressed_model = self.deployer.compress_model(
156 | self.model,
157 | target_latency=0.1 # 100ms
158 | )
159 |
160 | # Measure actual latency
161 | with torch.no_grad():
162 | start_time = torch.cuda.Event(enable_timing=True)
163 | end_time = torch.cuda.Event(enable_timing=True)
164 |
165 | start_time.record()
166 | _ = compressed_model(self.test_input)
167 | end_time.record()
168 |
169 | torch.cuda.synchronize()
170 | latency = start_time.elapsed_time(end_time) / 1000 # Convert to seconds
171 |
172 | self.assertLessEqual(latency, 0.1)
173 |
174 | def _get_model_size(self, model: nn.Module) -> int:
175 | """Helper function to get model size in bytes."""
176 | return sum(p.numel() * p.element_size() for p in model.parameters())
177 |
178 | if __name__ == '__main__':
179 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_ethereum.py:
--------------------------------------------------------------------------------
1 | """
2 | Unit tests for Ethereum-based chain integrations.
3 | """
4 |
5 | import unittest
6 | from unittest.mock import Mock, patch
7 | import json
8 | from web3 import Web3
9 | from eth_account import Account
10 | from deepchain.core.blockchain.ethereum import (
11 | EthereumChain,
12 | BaseChain,
13 | BNBChain,
14 | EthereumValidator
15 | )
16 |
17 | class TestEthereumIntegration(unittest.TestCase):
18 | """Test Ethereum-based chain integrations."""
19 |
20 | def setUp(self):
21 | """Set up test environment."""
22 | # Initialize chains
23 | self.eth_chain = EthereumChain(endpoint="http://localhost:8545")
24 | self.base_chain = BaseChain(endpoint="http://localhost:8545")
25 | self.bnb_chain = BNBChain(endpoint="http://localhost:8545")
26 |
27 | # Mock Web3 instances
28 | self.mock_eth_web3 = Mock()
29 | self.mock_base_web3 = Mock()
30 | self.mock_bnb_web3 = Mock()
31 |
32 | self.eth_chain.web3 = self.mock_eth_web3
33 | self.base_chain.web3 = self.mock_base_web3
34 | self.bnb_chain.web3 = self.mock_bnb_web3
35 |
36 | # Create validators
37 | self.eth_validator = EthereumValidator(self.eth_chain)
38 | self.base_validator = EthereumValidator(self.base_chain)
39 | self.bnb_validator = EthereumValidator(self.bnb_chain)
40 |
41 | # Test data
42 | self.test_address = "0x742d35Cc6634C0532925a3b844Bc454e4438f44e"
43 | self.test_private_key = "0x" + "1" * 64
44 | self.test_contract_address = "0x" + "2" * 40
45 |
46 | def test_ethereum_connection(self):
47 | """Test Ethereum connection."""
48 | # Mock successful connection
49 | self.mock_eth_web3.is_connected.return_value = True
50 | self.assertTrue(self.eth_chain.connect())
51 |
52 | # Mock failed connection
53 | self.mock_eth_web3.is_connected.side_effect = Exception("Connection failed")
54 | self.assertFalse(self.eth_chain.connect())
55 |
56 | def test_base_connection(self):
57 | """Test Base connection."""
58 | # Mock successful connection
59 | self.mock_base_web3.is_connected.return_value = True
60 | self.assertTrue(self.base_chain.connect())
61 |
62 | # Mock failed connection
63 | self.mock_base_web3.is_connected.side_effect = Exception("Connection failed")
64 | self.assertFalse(self.base_chain.connect())
65 |
66 | def test_bnb_connection(self):
67 | """Test BNB Chain connection."""
68 | # Mock successful connection
69 | self.mock_bnb_web3.is_connected.return_value = True
70 | self.assertTrue(self.bnb_chain.connect())
71 |
72 | # Mock failed connection
73 | self.mock_bnb_web3.is_connected.side_effect = Exception("Connection failed")
74 | self.assertFalse(self.bnb_chain.connect())
75 |
76 | def test_balance_checking(self):
77 | """Test balance checking across chains."""
78 | expected_balance = 1.5 # ETH/BNB
79 | balance_wei = Web3.to_wei(expected_balance, 'ether')
80 |
81 | # Mock balance responses
82 | self.mock_eth_web3.eth.get_balance.return_value = balance_wei
83 | self.mock_base_web3.eth.get_balance.return_value = balance_wei
84 | self.mock_bnb_web3.eth.get_balance.return_value = balance_wei
85 |
86 | # Test Ethereum balance
87 | balance = self.eth_chain.get_balance(self.test_address)
88 | self.assertEqual(balance, expected_balance)
89 |
90 | # Test Base balance
91 | balance = self.base_chain.get_balance(self.test_address)
92 | self.assertEqual(balance, expected_balance)
93 |
94 | # Test BNB Chain balance
95 | balance = self.bnb_chain.get_balance(self.test_address)
96 | self.assertEqual(balance, expected_balance)
97 |
98 | def test_contract_deployment(self):
99 | """Test contract deployment."""
100 | test_abi = [{"type": "function", "name": "test"}]
101 | test_bytecode = "0x123456"
102 |
103 | # Mock contract deployment
104 | mock_contract = Mock()
105 | mock_contract.constructor.return_value.build_transaction.return_value = {
106 | 'to': None,
107 | 'data': test_bytecode
108 | }
109 |
110 | self.mock_eth_web3.eth.contract.return_value = mock_contract
111 | self.mock_eth_web3.eth.get_transaction_count.return_value = 0
112 | self.mock_eth_web3.eth.gas_price = 20000000000
113 |
114 | # Mock transaction sending
115 | self.mock_eth_web3.eth.account.sign_transaction.return_value.rawTransaction = b'raw_tx'
116 | self.mock_eth_web3.eth.send_raw_transaction.return_value = b'tx_hash'
117 | self.mock_eth_web3.eth.wait_for_transaction_receipt.return_value = {
118 | 'contractAddress': self.test_contract_address,
119 | 'status': 1
120 | }
121 |
122 | # Deploy contract
123 | contract_address = self.eth_chain.deploy_contract(test_abi, test_bytecode)
124 | self.assertEqual(contract_address, self.test_contract_address)
125 |
126 | def test_strategy_validation(self):
127 | """Test strategy validation across chains."""
128 | strategy_id = "test_strategy"
129 | test_proof = {'type': 'test_proof'}
130 |
131 | # Mock contract calls
132 | for chain in [self.eth_chain, self.base_chain, self.bnb_chain]:
133 | chain.contract = Mock()
134 | chain.contract.functions.validateStrategy.return_value.build_transaction.return_value = {
135 | 'to': self.test_contract_address,
136 | 'data': b'validate'
137 | }
138 |
139 | # Test validation on each chain
140 | for validator in [self.eth_validator, self.base_validator, self.bnb_validator]:
141 | result = validator.validate_strategy(strategy_id, test_proof)
142 | self.assertTrue(result)
143 |
144 | def test_strategy_registration(self):
145 | """Test strategy registration across chains."""
146 | strategy_id = "test_strategy"
147 | metadata = {'version': '1.0.0'}
148 | expected_tx = "0x" + "3" * 64
149 |
150 | # Mock contract calls
151 | for chain in [self.eth_chain, self.base_chain, self.bnb_chain]:
152 | chain.contract = Mock()
153 | chain.contract.functions.registerStrategy.return_value.build_transaction.return_value = {
154 | 'to': self.test_contract_address,
155 | 'data': b'register'
156 | }
157 | chain.web3.eth.get_transaction_count.return_value = 0
158 | chain.web3.eth.gas_price = 20000000000
159 | chain.web3.eth.account.sign_transaction.return_value.rawTransaction = b'raw_tx'
160 | chain.web3.eth.send_raw_transaction.return_value = expected_tx.encode()
161 | chain.web3.to_hex.return_value = expected_tx
162 |
163 | # Test registration on each chain
164 | for validator in [self.eth_validator, self.base_validator, self.bnb_validator]:
165 | tx_hash = validator.register_strategy(strategy_id, metadata)
166 | self.assertEqual(tx_hash, expected_tx)
167 |
168 | def test_execution_verification(self):
169 | """Test execution verification across chains."""
170 | strategy_id = "test_strategy"
171 | execution_data = {
172 | 'timestamp': 1234567890,
173 | 'action': 'buy',
174 | 'amount': 1.0
175 | }
176 | test_proof = {'type': 'test_proof'}
177 |
178 | # Mock contract calls
179 | for chain in [self.eth_chain, self.base_chain, self.bnb_chain]:
180 | chain.contract = Mock()
181 | chain.contract.functions.verifyExecution.return_value.build_transaction.return_value = {
182 | 'to': self.test_contract_address,
183 | 'data': b'verify'
184 | }
185 |
186 | # Test verification on each chain
187 | for validator in [self.eth_validator, self.base_validator, self.bnb_validator]:
188 | result = validator.verify_execution(strategy_id, execution_data, test_proof)
189 | self.assertTrue(result)
190 |
191 | def test_proof_verification(self):
192 | """Test proof verification across chains."""
193 | test_proof = {
194 | 'type': 'ethereum_zk_proof',
195 | 'data': {'test': 'data'}
196 | }
197 |
198 | # Mock contract calls
199 | for chain in [self.eth_chain, self.base_chain, self.bnb_chain]:
200 | chain.contract = Mock()
201 | chain.contract.functions.verifyProof.return_value.call.return_value = True
202 |
203 | # Test proof verification on each chain
204 | for validator in [self.eth_validator, self.base_validator, self.bnb_validator]:
205 | result = validator.verify_proof(test_proof)
206 | self.assertTrue(result)
207 |
208 | def tearDown(self):
209 | """Clean up test environment."""
210 | pass
211 |
212 | if __name__ == '__main__':
213 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_strategy.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import torch
3 | import numpy as np
4 | from examples.simple_strategy import SimpleRLStrategy
5 |
6 | class TestSimpleRLStrategy(unittest.TestCase):
7 | """
8 | Unit tests for SimpleRLStrategy implementation.
9 | """
10 |
11 | def setUp(self):
12 | """
13 | Set up test environment before each test case.
14 | """
15 | self.strategy = SimpleRLStrategy(input_dim=10, hidden_dim=64, output_dim=3)
16 | self.test_state = {
17 | 'price': 100.0,
18 | 'volume': 1000000,
19 | 'high': 105.0,
20 | 'low': 95.0,
21 | 'ma_5': 101.0,
22 | 'ma_10': 100.5,
23 | 'rsi': 55.0,
24 | 'macd': 0.5,
25 | 'bollinger_upper': 110.0,
26 | 'bollinger_lower': 90.0
27 | }
28 |
29 | def test_initialization(self):
30 | """
31 | Test strategy initialization.
32 | """
33 | self.assertIsNotNone(self.strategy.policy)
34 | self.assertEqual(self.strategy.policy.network[0].in_features, 10)
35 | self.assertEqual(self.strategy.policy.network[0].out_features, 64)
36 | self.assertEqual(self.strategy.policy.network[2].out_features, 3)
37 |
38 | def test_predict(self):
39 | """
40 | Test prediction functionality.
41 | """
42 | prediction = self.strategy.predict(self.test_state)
43 |
44 | self.assertIn('action', prediction)
45 | self.assertIn('confidence', prediction)
46 | self.assertIn(prediction['action'], ['buy', 'hold', 'sell'])
47 | self.assertGreaterEqual(prediction['confidence'], 0.0)
48 | self.assertLessEqual(prediction['confidence'], 1.0)
49 |
50 | def test_training(self):
51 | """
52 | Test training functionality.
53 | """
54 | train_data = {
55 | 'states': np.random.randn(100, 10),
56 | 'actions': np.random.randint(0, 3, 100),
57 | 'rewards': np.random.randn(100)
58 | }
59 |
60 | # Should not raise any exceptions
61 | self.strategy.train(train_data, n_epochs=2, lr=0.001)
62 |
63 | def test_save_load(self):
64 | """
65 | Test model saving and loading.
66 | """
67 | # Save model
68 | save_path = "test_model.pth"
69 | self.strategy.save(save_path)
70 |
71 | # Create new strategy and load saved model
72 | new_strategy = SimpleRLStrategy(input_dim=10, hidden_dim=64, output_dim=3)
73 | new_strategy.load(save_path)
74 |
75 | # Compare predictions
76 | pred1 = self.strategy.predict(self.test_state)
77 | pred2 = new_strategy.predict(self.test_state)
78 |
79 | self.assertEqual(pred1['action'], pred2['action'])
80 | self.assertEqual(pred1['confidence'], pred2['confidence'])
81 |
82 | def test_action_space(self):
83 | """
84 | Test action space definition.
85 | """
86 | action_space = self.strategy.get_action_space()
87 |
88 | self.assertEqual(action_space['type'], 'discrete')
89 | self.assertEqual(action_space['size'], 3)
90 | self.assertEqual(action_space['actions'], ['buy', 'hold', 'sell'])
91 |
92 | def test_state_space(self):
93 | """
94 | Test state space definition.
95 | """
96 | state_space = self.strategy.get_state_space()
97 |
98 | self.assertEqual(state_space['type'], 'continuous')
99 | self.assertEqual(state_space['shape'], (10,))
100 | self.assertEqual(len(state_space['features']), 10)
101 |
102 | def test_reward_calculation(self):
103 | """
104 | Test reward calculation.
105 | """
106 | state = {'price': 100, 'next_price': 110}
107 |
108 | # Test buy action
109 | buy_reward = self.strategy.get_reward(state, {'action': 'buy'})
110 | self.assertEqual(buy_reward, 10)
111 |
112 | # Test sell action
113 | sell_reward = self.strategy.get_reward(state, {'action': 'sell'})
114 | self.assertEqual(sell_reward, -10)
115 |
116 | # Test hold action
117 | hold_reward = self.strategy.get_reward(state, {'action': 'hold'})
118 | self.assertEqual(hold_reward, 0)
119 |
120 | if __name__ == '__main__':
121 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_validator.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import Mock, patch
3 | import json
4 | from deepchain.core.blockchain.validator import ZKValidator
5 | from deepchain.core.exceptions import BlockchainConnectionError, ValidationFailedError
6 |
7 | class TestZKValidator(unittest.TestCase):
8 | """
9 | Unit tests for ZKValidator implementation.
10 | """
11 |
12 | def setUp(self):
13 | """
14 | Set up test environment before each test case.
15 | """
16 | self.validator = ZKValidator(web3_provider="http://localhost:8545")
17 | self.strategy_id = "test_strategy_001"
18 | self.test_metadata = {
19 | 'name': 'Test Strategy',
20 | 'version': '0.1.0',
21 | 'type': 'reinforcement_learning'
22 | }
23 |
24 | @patch('web3.Web3.HTTPProvider')
25 | def test_connection(self, mock_provider):
26 | """
27 | Test blockchain connection.
28 | """
29 | mock_provider.return_value = Mock()
30 | validator = ZKValidator(web3_provider="http://localhost:8545")
31 | self.assertIsNotNone(validator.w3)
32 |
33 | def test_register_strategy(self):
34 | """
35 | Test strategy registration.
36 | """
37 | tx_hash = self.validator.register_strategy(
38 | self.strategy_id,
39 | self.test_metadata
40 | )
41 | self.assertIsInstance(tx_hash, str)
42 | self.assertEqual(len(tx_hash), 64) # Standard hash length
43 |
44 | def test_validate_strategy(self):
45 | """
46 | Test strategy validation.
47 | """
48 | # Register strategy first
49 | self.validator.register_strategy(self.strategy_id, self.test_metadata)
50 |
51 | # Test validation
52 | proof = {'data': self.test_metadata}
53 | is_valid = self.validator.validate_strategy(self.strategy_id, proof)
54 | self.assertTrue(is_valid)
55 |
56 | def test_validate_nonexistent_strategy(self):
57 | """
58 | Test validation of non-existent strategy.
59 | """
60 | proof = {'data': self.test_metadata}
61 | is_valid = self.validator.validate_strategy("nonexistent_id", proof)
62 | self.assertFalse(is_valid)
63 |
64 | def test_verify_execution(self):
65 | """
66 | Test execution verification.
67 | """
68 | execution_data = {
69 | 'timestamp': 1234567890,
70 | 'actions': ['buy', 'hold', 'sell'],
71 | 'performance': 0.15
72 | }
73 |
74 | proof = self.validator.generate_proof(self.strategy_id, execution_data)
75 | is_valid = self.validator.verify_execution(
76 | self.strategy_id,
77 | execution_data,
78 | proof
79 | )
80 | self.assertTrue(is_valid)
81 |
82 | def test_verify_execution_without_proof(self):
83 | """
84 | Test execution verification without proof.
85 | """
86 | execution_data = {
87 | 'timestamp': 1234567890,
88 | 'actions': ['buy', 'hold', 'sell']
89 | }
90 |
91 | is_valid = self.validator.verify_execution(
92 | self.strategy_id,
93 | execution_data
94 | )
95 | self.assertFalse(is_valid)
96 |
97 | def test_generate_proof(self):
98 | """
99 | Test proof generation.
100 | """
101 | execution_data = {
102 | 'timestamp': 1234567890,
103 | 'actions': ['buy', 'hold', 'sell']
104 | }
105 |
106 | proof = self.validator.generate_proof(self.strategy_id, execution_data)
107 |
108 | self.assertIn('strategy_id', proof)
109 | self.assertIn('execution_hash', proof)
110 | self.assertIn('timestamp', proof)
111 |
112 | def test_verify_proof(self):
113 | """
114 | Test proof verification.
115 | """
116 | execution_data = {
117 | 'timestamp': 1234567890,
118 | 'actions': ['buy', 'hold', 'sell']
119 | }
120 |
121 | proof = self.validator.generate_proof(self.strategy_id, execution_data)
122 | is_valid = self.validator.verify_proof(proof)
123 | self.assertTrue(is_valid)
124 |
125 | def test_verify_invalid_proof(self):
126 | """
127 | Test verification of invalid proof.
128 | """
129 | invalid_proof = {
130 | 'strategy_id': self.strategy_id
131 | # Missing required fields
132 | }
133 |
134 | is_valid = self.validator.verify_proof(invalid_proof)
135 | self.assertFalse(is_valid)
136 |
137 | @patch('web3.Web3.HTTPProvider')
138 | def test_connection_error(self, mock_provider):
139 | """
140 | Test handling of connection errors.
141 | """
142 | mock_provider.side_effect = Exception("Connection failed")
143 |
144 | with self.assertRaises(BlockchainConnectionError):
145 | ZKValidator(web3_provider="http://invalid:8545")
146 |
147 | def test_validation_error(self):
148 | """
149 | Test handling of validation errors.
150 | """
151 | invalid_metadata = "invalid" # Should be a dictionary
152 |
153 | with self.assertRaises(ValidationFailedError):
154 | self.validator.register_strategy(self.strategy_id, invalid_metadata)
155 |
156 | if __name__ == '__main__':
157 | unittest.main()
--------------------------------------------------------------------------------