├── utils ├── init.py ├── constants.py └── helpers.py ├── data_storage ├── ipfs │ ├── init.py │ ├── ipfs_client.py │ └── ipfs_node.py └── init.py ├── oracle_node ├── init.py └── oracle_node.py ├── tokenomics ├── init.py ├── token_distribution.py └── tokenomics_model.py ├── ai-oracle-network ├── data │ ├── init.py │ ├── sentiment_analysis.csv │ ├── market_trends.csv │ └── economic_indicators.csv └── models │ ├── init.py │ ├── data_loader.py │ └── oracle_model.py ├── blockchain_node ├── init.py └── blockchain_node.py ├── incident_response ├── init.py ├── team.py ├── incident_response_plan.json ├── playbook.py ├── incident.py └── incident_response_plan.py ├── cybersecurity └── threat_detection │ ├── init.py │ ├── anomaly_detection.py │ └── intrusion_detection.py ├── .gitignore ├── Dockerfile ├── docker-compose.yml ├── requirements.txt ├── truffle-config.js ├── migrations ├── 2_deploy_contracts.js └── 1_initial_migration.js ├── test ├── oracle.test.js ├── reserve.test.js └── stablecoin.test.js ├── backend └── app │ ├── models │ ├── social_impact.py │ ├── incentives.py │ └── governance.py │ ├── routes │ └── api.py │ ├── services │ ├── iot_integration_service.py │ ├── security_service.py │ ├── market_analysis_service.py │ ├── privacy_service.py │ └── cross_chain_service.py │ └── utils │ ├── sustainability.py │ └── quantum_resistance.py ├── src ├── storage │ ├── decentralized_storage.py │ └── swarm_layer.py ├── oracle │ ├── oracle_engine.py │ └── aggregator.py ├── zkp │ └── zkp_service.py ├── wallet │ └── quantum_wallet.py ├── identity │ └── did_management.py ├── incentive │ └── token_incentive.py ├── analytics │ ├── privacy_analytics.py │ └── anomaly_detector.py ├── reputation │ └── reputation_engine.py ├── defense │ └── ai_intrusion_detection.py ├── oracles │ └── reputation_oracle.py ├── privacy │ ├── mixer.py │ └── zk_selective_disclosure.py ├── bridge │ └── cross_chain_bridge.py ├── incident_response │ └── autoresponder.py ├── governance │ ├── proposal_marketplace.py │ └── ai_voting_engine.py ├── agents │ └── aea_api.py ├── forensics │ └── audit_trail.py ├── quantum_security.py ├── api_gateway │ └── gateway.py ├── compliance │ └── cross_chain_compliance.py ├── did │ └── identity_manager.py ├── interoperability │ └── bridge.py ├── simulation │ └── attack_vectors.py ├── zkp_engine.py ├── constants.py └── ai_governance.py ├── frontend └── src │ └── components │ ├── Gamification.js │ ├── Dashboard.js │ └── Wallet.js ├── contracts ├── reserve.sol ├── PiConsensus.sol ├── stablecoin.sol ├── oracle.sol └── UpgradeableProxy.sol ├── README.md └── app.py /utils/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the utils directory a Python package 2 | -------------------------------------------------------------------------------- /data_storage/ipfs/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the ipfs directory a Python package 2 | -------------------------------------------------------------------------------- /oracle_node/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the oracle_node directory a Python package 2 | -------------------------------------------------------------------------------- /tokenomics/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the tokenomics directory a Python package 2 | -------------------------------------------------------------------------------- /ai-oracle-network/data/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the data directory a Python package 2 | -------------------------------------------------------------------------------- /data_storage/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the data_storage directory a Python package 2 | -------------------------------------------------------------------------------- /ai-oracle-network/models/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the models directory a Python package 2 | -------------------------------------------------------------------------------- /blockchain_node/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the blockchain_node directory a Python package 2 | -------------------------------------------------------------------------------- /incident_response/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the incident_response directory a Python package 2 | -------------------------------------------------------------------------------- /cybersecurity/threat_detection/init.py: -------------------------------------------------------------------------------- 1 | # Empty file to make the threat_detection directory a Python package 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.tar 2 | *.tar.* 3 | *.jar 4 | *.exe 5 | *.msi 6 | *.zip 7 | *.tgz 8 | *.log 9 | *.log.* 10 | *.sig 11 | 12 | pkg/ 13 | src/ 14 | -------------------------------------------------------------------------------- /incident_response/team.py: -------------------------------------------------------------------------------- 1 | class Team: 2 | def __init__(self, team_config): 3 | self.members = team_config['members'] 4 | self.incidents = {} 5 | 6 | def assign_to_incident(self, incident): 7 | self.incidents[incident.id] = incident 8 | 9 | def get_incident(self, incident_id): 10 | return self.incidents.get(incident_id) 11 | -------------------------------------------------------------------------------- /ai-oracle-network/data/sentiment_analysis.csv: -------------------------------------------------------------------------------- 1 | "Date","Positive Sentiment","Negative Sentiment","Neutral Sentiment" 2 | "2022-01-01",0.5,0.3,0.2 3 | "2022-02-01",0.6,0.2,0.2 4 | "2022-03-01",0.7,0.1,0.2 5 | "2022-04-01",0.8,0.1,0.1 6 | "2022-05-01",0.9,0.0,0.1 7 | "2022-06-01",0.9,0.0,0.1 8 | "2022-07-01",0.8,0.1,0.1 9 | "2022-08-01",0.7,0.2,0.1 10 | "2022-09-01",0.6,0.3,0.1 11 | "2022-10-01",0.5,0.4,0.1 12 | "2022-11-01",0.4,0.5,0.1 13 | "2022-12-01",0.3,0.6,0.1 14 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python image as a base 2 | FROM python:3.9-slim 3 | 4 | # Set the working directory to /app 5 | WORKDIR /app 6 | 7 | # Copy the requirements file 8 | COPY requirements.txt . 9 | 10 | # Install the dependencies 11 | RUN pip install -r requirements.txt 12 | 13 | # Copy the application code 14 | COPY . . 15 | 16 | # Expose the port 17 | EXPOSE 8000 18 | 19 | # Run the command to start the development server 20 | CMD ["python", "app.py"] 21 | -------------------------------------------------------------------------------- /ai-oracle-network/data/market_trends.csv: -------------------------------------------------------------------------------- 1 | "Date","Stock Market Index","Bond Market Index","Commodity Market Index" 2 | "2022-01-01",1000,500,200 3 | "2022-02-01",1050,520,220 4 | "2022-03-01",1100,550,240 5 | "2022-04-01",1150,580,260 6 | "2022-05-01",1200,610,280 7 | "2022-06-01",1250,640,300 8 | "2022-07-01",1300,670,320 9 | "2022-08-01",1350,700,340 10 | "2022-09-01",1400,730,360 11 | "2022-10-01",1450,760,380 12 | "2022-11-01",1500,790,400 13 | "2022-12-01",1550,820,420 14 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | app: 5 | build: . 6 | ports: 7 | - "8000:8000" 8 | depends_on: 9 | - db 10 | environment: 11 | - DATABASE_URL=postgres://user:password@db:5432/pi_consensus 12 | db: 13 | image: postgres 14 | environment: 15 | - POSTGRES_USER=user 16 | - POSTGRES_PASSWORD=password 17 | - POSTGRES_DB=pi_consensus 18 | volumes: 19 | - db-data:/var/lib/postgresql/data 20 | 21 | volumes: 22 | db-data: 23 | -------------------------------------------------------------------------------- /ai-oracle-network/data/economic_indicators.csv: -------------------------------------------------------------------------------- 1 | "Date","GDP","Inflation Rate","Unemployment Rate","Interest Rate" 2 | "2022-01-01",10000,2.5,4.2,1.5 3 | "2022-02-01",10100,2.6,4.1,1.6 4 | "2022-03-01",10200,2.7,4.0,1.7 5 | "2022-04-01",10300,2.8,3.9,1.8 6 | "2022-05-01",10400,2.9,3.8,1.9 7 | "2022-06-01",10500,3.0,3.7,2.0 8 | "2022-07-01",10600,3.1,3.6,2.1 9 | "2022-08-01",10700,3.2,3.5,2.2 10 | "2022-09-01",10800,3.3,3.4,2.3 11 | "2022-10-01",10900,3.4,3.3,2.4 12 | "2022-11-01",11000,3.5,3.2,2.5 13 | "2022-12-01",11100,3.6,3.1,2.6 14 | -------------------------------------------------------------------------------- /incident_response/incident_response_plan.json: -------------------------------------------------------------------------------- 1 | { 2 | "team": { 3 | "members": ["John Doe", "Jane Doe", "Bob Smith"] 4 | }, 5 | "playbooks": { 6 | "default": { 7 | "steps": [ 8 | { 9 | "type": "notification", 10 | "message": "Incident detected!" 11 | }, 12 | { 13 | "type": "task", 14 | "title": "Investigate incident", 15 | "description": "Gather more information about the incident" 16 | } 17 | ] 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /utils/constants.py: -------------------------------------------------------------------------------- 1 | # Constants file 2 | 3 | # API Endpoints 4 | API_ENDPOINT_IPFS = 'https://ipfs.infura.io:5001' 5 | API_ENDPOINT_ETHEREUM = 'https://mainnet.infura.io/v3/YOUR_PROJECT_ID' 6 | 7 | # Ethereum Network IDs 8 | MAINNET_NETWORK_ID = 1 9 | ROPSTEN_NETWORK_ID = 3 10 | RINKEBY_NETWORK_ID = 4 11 | GANACHE_NETWORK_ID = 1337 12 | 13 | # IPFS Pinning Services 14 | PINATA_PINNING_SERVICE = 'https://api.pinata.cloud/pinning/pinFileToIPFS' 15 | INFURA_PINNING_SERVICE = 'https://ipfs.infura.io:5001/api/v0/pin/add' 16 | 17 | # Ethereum Gas Prices (in GWei) 18 | GAS_PRICE_FAST = 20 19 | GAS_PRICE_NORMAL = 10 20 | GAS_PRICE_SLOW = 5 21 | 22 | # Other Constants 23 | MAX_RETRIES = 5 24 | TIMEOUT = 30 25 | -------------------------------------------------------------------------------- /incident_response/playbook.py: -------------------------------------------------------------------------------- 1 | class Playbook: 2 | def __init__(self, playbook_config): 3 | self.steps = playbook_config['steps'] 4 | 5 | def run(self, incident_id): 6 | for step in self.steps: 7 | if step['type'] == 'notification': 8 | self.send_notification(incident_id, step['message']) 9 | elif step['type'] == 'task': 10 | self.create_task(incident_id, step['title'], step['description']) 11 | # Add more step types as needed 12 | 13 | def send_notification(self, incident_id, message): 14 | # Implement notification logic here 15 | print(f"Sending notification for incident {incident_id}: {message}") 16 | 17 | def create_task(self, incident_id, title, description): 18 | # Implement task creation logic here 19 | print(f"Creating task for incident {incident_id}: {title} - {description}") 20 | -------------------------------------------------------------------------------- /data_storage/ipfs/ipfs_client.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | 4 | class IPFSClient: 5 | def __init__(self, node_url): 6 | self.node_url = node_url 7 | 8 | def add_file(self, file_path): 9 | with open(file_path, 'rb') as f: 10 | file_data = f.read() 11 | response = requests.post(f'{self.node_url}/api/v0/add', files={'file': file_data}) 12 | return response.json()['Hash'] 13 | 14 | def get_file(self, hash): 15 | response = requests.get(f'{self.node_url}/api/v0/cat?arg={hash}') 16 | return response.content 17 | 18 | def list_files(self): 19 | response = requests.post(f'{self.node_url}/api/v0/ls') 20 | return response.json() 21 | 22 | def pin_file(self, hash): 23 | response = requests.post(f'{self.node_url}/api/v0/pin/add?arg={hash}') 24 | return response.json() 25 | 26 | def unpin_file(self, hash): 27 | response = requests.post(f'{self.node_url}/api/v0/pin/rm?arg={hash}') 28 | return response.json() 29 | -------------------------------------------------------------------------------- /data_storage/ipfs/ipfs_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import threading 4 | import time 5 | 6 | class IPFSNode: 7 | def __init__(self, repo_path): 8 | self.repo_path = repo_path 9 | self.process = None 10 | 11 | def start(self): 12 | if not self.process: 13 | self.process = subprocess.Popen(['ipfs', 'daemon', '--repo', self.repo_path]) 14 | threading.Thread(target=self.wait_for_ready).start() 15 | 16 | def stop(self): 17 | if self.process: 18 | self.process.terminate() 19 | self.process = None 20 | 21 | def wait_for_ready(self): 22 | while True: 23 | try: 24 | response = requests.get('http://localhost:5001/api/v0/id') 25 | if response.status_code == 200: 26 | break 27 | except requests.ConnectionError: 28 | pass 29 | time.sleep(1) 30 | 31 | def get_api_url(self): 32 | return 'http://localhost:5001' 33 | 34 | def __del__(self): 35 | self.stop() 36 | -------------------------------------------------------------------------------- /ai-oracle-network/models/data_loader.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from sklearn.preprocessing import MinMaxScaler 4 | 5 | class DataLoader: 6 | def __init__(self, config): 7 | self.config = config 8 | 9 | def load_data(self): 10 | data = pd.read_csv(self.config['data_path']) 11 | data['date'] = pd.to_datetime(data['date']) 12 | data.set_index('date', inplace=True) 13 | 14 | if self.config['feature_engineering']: 15 | data = self._feature_engineering(data) 16 | 17 | return data 18 | 19 | def _feature_engineering(self, data): 20 | data['moving_average'] = data['target'].rolling(window=30).mean() 21 | data['exponential_smoothing'] = data['target'].ewm(span=30).mean() 22 | return data 23 | 24 | def split_data(self, data): 25 | train_size = int(0.8 * len(data)) 26 | train_data, test_data = data[:train_size], data[train_size:] 27 | return train_data, test_data 28 | 29 | def scale_data(self, data): 30 | scaler = MinMaxScaler() 31 | data[['target']] = scaler.fit_transform(data[['target']]) 32 | return data 33 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Core Python version requirement (for documentation) 2 | python==3.9.5 3 | 4 | # Build tools 5 | pip==21.1.3 6 | setuptools==56.0.0 7 | wheel==0.36.2 8 | 9 | # Database and ORM 10 | psycopg2-binary==2.9.3 11 | Flask==2.0.1 12 | flask_restful==0.3.8 13 | flask_sqlalchemy==2.5.1 14 | SQLAlchemy==1.4.25 15 | 16 | # Data Science & ML 17 | numpy==1.21.2 18 | pandas==1.3.5 19 | scikit-learn==1.0.2 20 | tensorflow==2.6.0 21 | 22 | # Web & API 23 | fastapi==0.68.0 24 | uvicorn==0.15.0 25 | requests==2.26.0 26 | 27 | # Blockchain & Crypto 28 | web3==5.28.0 29 | pycryptodome==3.10.4 30 | pqcrypto==0.6.0 31 | user-agents==2.2.0 32 | 33 | # Rate Limiting & API Security 34 | slowapi==0.1.4 35 | redis==3.5.3 36 | 37 | # Network Simulation & Visualization 38 | networkx==2.6.3 39 | matplotlib==3.4.3 40 | 41 | # Property-based Testing & Fuzzing 42 | hypothesis==6.14.1 43 | 44 | # Optional: For ZK Proofs Python integration 45 | pycircom==0.2.2 46 | 47 | # For compatibility, pin Werkzeug (Flask dependency) 48 | Werkzeug==2.0.1 49 | 50 | # Optional: For KYC/AML demo endpoints (not needed if not calling real KYC API) 51 | # Uncomment if using real KYC APIs: 52 | # python-dotenv==0.19.0 53 | -------------------------------------------------------------------------------- /truffle-config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | // Network settings 3 | networks: { 4 | development: { 5 | host: "127.0.0.1", 6 | port: 8545, 7 | network_id: "*", // Match any network id 8 | gas: 8000000, 9 | gasPrice: 20000000000 10 | }, 11 | test: { 12 | host: "127.0.0.1", 13 | port: 8545, 14 | network_id: "*", // Match any network id 15 | gas: 8000000, 16 | gasPrice: 20000000000 17 | }, 18 | mainnet: { 19 | provider: () => new Web3.providers.HttpProvider("https://mainnet.infura.io/v3/YOUR_PROJECT_ID"), 20 | network_id: 1, 21 | gas: 8000000, 22 | gasPrice: 20000000000 23 | } 24 | }, 25 | 26 | // Compiler settings 27 | compilers: { 28 | solc: { 29 | version: "0.8.10", 30 | settings: { 31 | optimizer: { 32 | enabled: true, 33 | runs: 200 34 | } 35 | } 36 | } 37 | }, 38 | 39 | // Migrations settings 40 | migrations: { 41 | deployer: { 42 | type: "truffle-deployer" 43 | } 44 | }, 45 | 46 | // Test settings 47 | test: { 48 | testMatch: ["**/*.test.js"], 49 | testPath: "test", 50 | timeout: 30000 51 | } 52 | }; 53 | -------------------------------------------------------------------------------- /incident_response/incident.py: -------------------------------------------------------------------------------- 1 | class Incident: 2 | def __init__(self, title, description, severity): 3 | self.id = uuid.uuid4() 4 | self.title = title 5 | self.description = description 6 | self.severity = severity 7 | self.status = 'open' 8 | self.created_at = datetime.datetime.now() 9 | self.updated_at = datetime.datetime.now() 10 | 11 | def to_dict(self): 12 | return { 13 | 'id': self.id, 14 | 'title': self.title, 15 | 'description': self.description, 16 | 'severity': self.severity, 17 | 'status': self.status, 18 | 'created_at': self.created_at.isoformat(), 19 | 'updated_at': self.updated_at.isoformat() 20 | } 21 | 22 | def update(self, data): 23 | self.title = data.get('title', self.title) 24 | self.description = data.get('description', self.description) 25 | self.severity = data.get('severity', self.severity) 26 | self.status = data.get('status', self.status) 27 | self.updated_at = datetime.datetime.now() 28 | 29 | def close(self): 30 | self.status = 'closed' 31 | self.updated_at = datetime.datetime.now() 32 | -------------------------------------------------------------------------------- /migrations/2_deploy_contracts.js: -------------------------------------------------------------------------------- 1 | const { deployer, web3 } = require('@truffle/deployer'); 2 | const { BN } = web3.utils; 3 | 4 | const Stablecoin = artifacts.require('Stablecoin'); 5 | 6 | module.exports = async (deployer) => { 7 | // Get the deployed Stablecoin contract 8 | const stablecoin = await Stablecoin.deployed(); 9 | 10 | // Add a new admin to the Stablecoin contract 11 | const newAdmin = '0x742d35Cc6634C0532925a3b844Bc454e4438f44e'; 12 | await stablecoin.addAdmin(newAdmin, { from: deployer.accounts[0] }); 13 | 14 | console.log(`Added ${newAdmin} as a new admin to the Stablecoin contract`); 15 | 16 | // Set the interest rate for the Stablecoin contract 17 | const interestRate = new BN('500000000000000000', 10); // 5% interest rate 18 | await stablecoin.setInterestRate(interestRate, { from: deployer.accounts[0] }); 19 | 20 | console.log(`Set interest rate to ${interestRate} for the Stablecoin contract`); 21 | 22 | // Set the reserve ratio for the Stablecoin contract 23 | const reserveRatio = new BN('200000000000000000', 10); // 20% reserve ratio 24 | await stablecoin.setReserveRatio(reserveRatio, { from: deployer.accounts[0] }); 25 | 26 | console.log(`Set reserve ratio to ${reserveRatio} for the Stablecoin contract`); 27 | }; 28 | -------------------------------------------------------------------------------- /tokenomics/token_distribution.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from tokenomics_model import TokenomicsModel 3 | 4 | class TokenDistribution: 5 | def __init__(self, tokenomics_model: TokenomicsModel): 6 | self.tokenomics_model = tokenomics_model 7 | 8 | def calculate_token_distribution(self) -> pd.DataFrame: 9 | return self.tokenomics_model.calculate_token_distribution() 10 | 11 | def calculate_token_holder_stake(self, token_holder: str) -> float: 12 | # Calculate the stake of a token holder 13 | token_distribution = self.calculate_token_distribution() 14 | return token_distribution.loc[token_distribution['address'] == token_holder, 'stake'].values[0] 15 | 16 | def calculate_token_holder_tokens(self, token_holder: str) -> float: 17 | # Calculate the number of tokens held by a token holder 18 | token_distribution = self.calculate_token_distribution() 19 | return token_distribution.loc[token_distribution['address'] == token_holder, 'token_amount'].values[0] 20 | 21 | def update_token_distribution(self, new_token_holders: List[Dict[str, float]]) -> None: 22 | # Update the token distribution based on new token holders 23 | self.tokenomics_model.token_holders = new_token_holders 24 | -------------------------------------------------------------------------------- /oracle_node/oracle_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import requests 5 | from flask import Flask, request, jsonify 6 | from flask_cors import CORS 7 | from models.oracle_model import OracleModel 8 | from data.data_loader import DataLoader 9 | 10 | app = Flask(__name__) 11 | CORS(app) 12 | 13 | # Load the configuration file 14 | with open('config.json') as f: 15 | config = json.load(f) 16 | 17 | # Initialize the data loader 18 | data_loader = DataLoader(config) 19 | 20 | # Initialize the oracle model 21 | oracle_model = OracleModel(config) 22 | 23 | @app.route('/predict', methods=['POST']) 24 | def predict(): 25 | data = request.get_json() 26 | X = data_loader.load_data(data) 27 | prediction = oracle_model.predict(X) 28 | return jsonify({'prediction': prediction}) 29 | 30 | @app.route('/train', methods=['POST']) 31 | def train(): 32 | data = request.get_json() 33 | X, y = data_loader.load_data(data) 34 | oracle_model.train(X, y) 35 | return jsonify({'status': 'Model trained successfully'}) 36 | 37 | @app.route('/healthcheck', methods=['GET']) 38 | def healthcheck(): 39 | return jsonify({'status': 'Oracle node is healthy'}) 40 | 41 | if __name__ == '__main__': 42 | app.run(host='0.0.0.0', port=5000, debug=True) 43 | -------------------------------------------------------------------------------- /cybersecurity/threat_detection/anomaly_detection.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from sklearn.ensemble import IsolationForest 3 | from sklearn.preprocessing import StandardScaler 4 | 5 | class AnomalyDetector: 6 | def __init__(self, data): 7 | self.data = data 8 | self.scaler = StandardScaler() 9 | self.model = IsolationForest(contamination=0.1) 10 | 11 | def train(self): 12 | self.scaler.fit(self.data) 13 | scaled_data = self.scaler.transform(self.data) 14 | self.model.fit(scaled_data) 15 | 16 | def predict(self, new_data): 17 | scaled_new_data = self.scaler.transform(new_data) 18 | predictions = self.model.predict(scaled_new_data) 19 | return predictions 20 | 21 | def evaluate(self, labels): 22 | accuracy = self.model.score(self.data, labels) 23 | return accuracy 24 | 25 | def load_data(file_path): 26 | data = pd.read_csv(file_path) 27 | return data 28 | 29 | def main(): 30 | file_path = 'data/anomaly_detection_data.csv' 31 | data = load_data(file_path) 32 | detector = AnomalyDetector(data) 33 | detector.train() 34 | new_data = pd.DataFrame({'feature1': [10, 20, 30], 'feature2': [40, 50, 60]}) 35 | predictions = detector.predict(new_data) 36 | print(predictions) 37 | 38 | if __name__ == '__main__': 39 | main() 40 | -------------------------------------------------------------------------------- /cybersecurity/threat_detection/intrusion_detection.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from sklearn.ensemble import RandomForestClassifier 3 | from sklearn.preprocessing import LabelEncoder 4 | 5 | class IntrusionDetector: 6 | def __init__(self, data): 7 | self.data = data 8 | self.encoder = LabelEncoder() 9 | self.model = RandomForestClassifier(n_estimators=100) 10 | 11 | def train(self): 12 | X = self.data.drop('label', axis=1) 13 | y = self.encoder.fit_transform(self.data['label']) 14 | self.model.fit(X, y) 15 | 16 | def predict(self, new_data): 17 | X_new = new_data.drop('label', axis=1) 18 | predictions = self.model.predict(X_new) 19 | return self.encoder.inverse_transform(predictions) 20 | 21 | def evaluate(self, labels): 22 | accuracy = self.model.score(self.data.drop('label', axis=1), labels) 23 | return accuracy 24 | 25 | def load_data(file_path): 26 | data = pd.read_csv(file_path) 27 | return data 28 | 29 | def main(): 30 | file_path = 'data/intrusion_detection_data.csv' 31 | data = load_data(file_path) 32 | detector = IntrusionDetector(data) 33 | detector.train() 34 | new_data = pd.DataFrame({'feature1': [10, 20, 30], 'feature2': [40, 50, 60], 'label': ['normal', 'anomaly', 'normal']}) 35 | predictions = detector.predict(new_data) 36 | print(predictions) 37 | 38 | if __name__ == '__main__': 39 | main() 40 | -------------------------------------------------------------------------------- /test/oracle.test.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const { web3 } = require('@openzeppelin/test-helpers/src/setup'); 3 | const { BN } = web3.utils; 4 | 5 | const Oracle = artifacts.require('Oracle'); 6 | 7 | contract('Oracle', (accounts) => { 8 | const [owner, user1, user2] = accounts; 9 | 10 | beforeEach(async () => { 11 | this.oracle = await Oracle.new({ from: owner }); 12 | }); 13 | 14 | describe('Initialization', () => { 15 | it('should have an initial price of 1 USD per token', async () => { 16 | const price = await this.oracle.getPrice(); 17 | expect(price).to.be.bignumber.equal(new BN('100000000', 10)); 18 | }); 19 | }); 20 | 21 | describe('Price Updates', () => { 22 | it('should allow the owner to update the price', async () => { 23 | await this.oracle.updatePrice(new BN('150000000', 10), { from: owner }); 24 | const newPrice = await this.oracle.getPrice(); 25 | expect(newPrice).to.be.bignumber.equal(new BN('150000000', 10)); 26 | }); 27 | 28 | it('should not allow user1 to update the price', async () => { 29 | await expectRevert( 30 | this.oracle.updatePrice(new BN('150000000', 10), { from: user1 }), 31 | 'Only the owner can update the price' 32 | ); 33 | }); 34 | }); 35 | 36 | describe('Query', () => { 37 | it('should return the current price for a query', async () => { 38 | const price = await this.oracle.query(); 39 | expect(price).to.be.bignumber.equal(new BN('100000000', 10)); 40 | }); 41 | }); 42 | }); 43 | -------------------------------------------------------------------------------- /backend/app/models/social_impact.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from sqlalchemy import Column, Integer, String, Float, DateTime, ForeignKey 3 | from sqlalchemy.orm import relationship 4 | from .database import Base # Assuming you have a Base class for your SQLAlchemy models 5 | 6 | class SocialImpact(Base): 7 | __tablename__ = 'social_impacts' 8 | 9 | id = Column(Integer, primary_key=True, autoincrement=True) 10 | project_name = Column(String(100), nullable=False) 11 | description = Column(String(500), nullable=True) 12 | impact_score = Column(Float, nullable=False) # A score representing the impact 13 | created_at = Column(DateTime, default=datetime.utcnow) 14 | updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) 15 | user_id = Column(Integer, ForeignKey('users.id'), nullable=False) # Assuming you have a User model 16 | 17 | user = relationship("User ", back_populates="social_impacts") # Establishing a relationship with User 18 | 19 | def __repr__(self): 20 | return f"" 21 | 22 | def to_dict(self): 23 | """Convert the model instance to a dictionary.""" 24 | return { 25 | "id": self.id, 26 | "project_name": self.project_name, 27 | "description": self.description, 28 | "impact_score": self.impact_score, 29 | "created_at": self.created_at.isoformat(), 30 | "updated_at": self.updated_at.isoformat(), 31 | "user_id": self.user_id 32 | } 33 | -------------------------------------------------------------------------------- /migrations/1_initial_migration.js: -------------------------------------------------------------------------------- 1 | const { deployer, web3 } = require('@truffle/deployer'); 2 | const { BN } = web3.utils; 3 | 4 | const Oracle = artifacts.require('Oracle'); 5 | const Reserve = artifacts.require('Reserve'); 6 | const Stablecoin = artifacts.require('Stablecoin'); 7 | 8 | module.exports = async (deployer) => { 9 | // Set up the Oracle contract 10 | const oracle = await deployer.deploy(Oracle, '0x0000000000000000000000000000000000000001', { 11 | gas: 5000000, 12 | gasPrice: new BN('20000000000', 10), 13 | }); 14 | 15 | console.log(`Oracle contract deployed at ${oracle.address}`); 16 | 17 | // Set up the Reserve contract 18 | const reserve = await deployer.deploy(Reserve, oracle.address, { 19 | gas: 5000000, 20 | gasPrice: new BN('20000000000', 10), 21 | }); 22 | 23 | console.log(`Reserve contract deployed at ${reserve.address}`); 24 | 25 | // Set up the Stablecoin contract 26 | const stablecoin = await deployer.deploy(Stablecoin, oracle.address, reserve.address, { 27 | gas: 5000000, 28 | gasPrice: new BN('20000000000', 10), 29 | }); 30 | 31 | console.log(`Stablecoin contract deployed at ${stablecoin.address}`); 32 | 33 | // Initialize the Stablecoin contract with an initial supply of 1 million tokens 34 | await stablecoin.initialize(1000000, { from: deployer.accounts[0] }); 35 | 36 | console.log('Stablecoin contract initialized with 1 million tokens'); 37 | 38 | // Set the Oracle and Reserve contracts as dependencies for the Stablecoin contract 39 | await stablecoin.setOracle(oracle.address, { from: deployer.accounts[0] }); 40 | await stablecoin.setReserve(reserve.address, { from: deployer.accounts[0] }); 41 | 42 | console.log('Oracle and Reserve contracts set as dependencies for Stablecoin contract'); 43 | }; 44 | -------------------------------------------------------------------------------- /backend/app/models/incentives.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from sqlalchemy import Column, Integer, String, Float, DateTime, ForeignKey 3 | from sqlalchemy.orm import relationship 4 | from .database import Base # Assuming you have a database.py file that initializes SQLAlchemy 5 | 6 | class Incentive(Base): 7 | __tablename__ = 'incentives' 8 | 9 | id = Column(Integer, primary_key=True, autoincrement=True) 10 | name = Column(String(100), nullable=False) 11 | description = Column(String(255), nullable=True) 12 | value = Column(Float, nullable=False) # Value of the incentive (e.g., in currency or points) 13 | type = Column(String(50), nullable=False) # Type of incentive (e.g., 'cash', 'points', 'discount') 14 | eligibility_criteria = Column(String(255), nullable=True) # Criteria for eligibility 15 | expiration_date = Column(DateTime, nullable=True) # Expiration date of the incentive 16 | user_id = Column(Integer, ForeignKey('users.id'), nullable=False) # Assuming you have a User model 17 | user = relationship("User ", back_populates="incentives") # Relationship with User model 18 | 19 | def __repr__(self): 20 | return f"" 21 | 22 | def is_active(self): 23 | """Check if the incentive is still active based on the expiration date.""" 24 | if self.expiration_date: 25 | return self.expiration_date > datetime.utcnow() 26 | return True # If no expiration date, consider it active 27 | 28 | # Example of a User model for relationship context 29 | class User(Base): 30 | __tablename__ = 'users' 31 | 32 | id = Column(Integer, primary_key=True, autoincrement=True) 33 | username = Column(String(100), unique=True, nullable=False) 34 | incentives = relationship("Incentive", back_populates="user") 35 | 36 | def __repr__(self): 37 | return f"" 38 | -------------------------------------------------------------------------------- /test/reserve.test.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const { web3 } = require('@openzeppelin/test-helpers/src/setup'); 3 | const { BN } = web3.utils; 4 | 5 | const Reserve = artifacts.require('Reserve'); 6 | 7 | contract('Reserve', (accounts) => { 8 | const [owner, user1, user2] = accounts; 9 | 10 | beforeEach(async () => { 11 | this.reserve = await Reserve.new({ from: owner }); 12 | }); 13 | 14 | describe('Initialization', () => { 15 | it('should have an initial reserve ratio of 20%', async () => { 16 | const reserveRatio = await this.reserve.reserveRatio(); 17 | expect(reserveRatio).to.be.bignumber.equal(new BN('200000000000000000', 10)); 18 | }); 19 | }); 20 | 21 | describe('Deposit', () => { 22 | it('should allow the owner to deposit tokens into the reserve', async () => { 23 | await this.reserve.deposit(new BN('100', 10), { from: owner }); 24 | const reserveBalance = await this.reserve.balance(); 25 | expect(reserveBalance).to.be.bignumber.equal(new BN('100', 10)); 26 | }); 27 | 28 | it('should not allow user1 to deposit tokens into the reserve', async () => { 29 | await expectRevert( 30 | this.reserve.deposit(new BN('100', 10), { from: user1 }), 31 | 'Only the owner can deposit tokens into the reserve' 32 | ); 33 | }); 34 | }); 35 | 36 | describe('Withdrawal', () => { 37 | it('should allow the owner to withdraw tokens from the reserve', async () => { 38 | await this.reserve.withdraw(new BN('50', 10), { from: owner }); 39 | const reserveBalance = await this.reserve.balance(); 40 | expect(reserveBalance).to.be.bignumber.equal(new BN('50', 10)); 41 | }); 42 | 43 | it('should not allow user1 to withdraw tokens from the reserve', async () => { 44 | await expectRevert( 45 | this.reserve.withdraw(new BN('50', 10), { from: user1 }), 46 | 'Only the owner can withdraw tokens from the reserve' 47 | ); 48 | }); 49 | }); 50 | }); 51 | -------------------------------------------------------------------------------- /blockchain_node/blockchain_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import hashlib 5 | import time 6 | from flask import Flask, request, jsonify 7 | from flask_cors import CORS 8 | from blockchain.blockchain import Blockchain 9 | from blockchain.block import Block 10 | from blockchain.transaction import Transaction 11 | from blockchain.miner import Miner 12 | from blockchain.wallet import Wallet 13 | 14 | app = Flask(__name__) 15 | CORS(app) 16 | 17 | # Initialize the blockchain 18 | blockchain = Blockchain() 19 | 20 | # Initialize the miner 21 | miner = Miner(blockchain) 22 | 23 | # Initialize the wallet 24 | wallet = Wallet() 25 | 26 | @app.route('/mine', methods=['POST']) 27 | def mine(): 28 | data = request.get_json() 29 | transaction = Transaction(data['sender'], data['receiver'], data['amount']) 30 | blockchain.add_transaction(transaction) 31 | miner.mine() 32 | return jsonify({'block': blockchain.get_latest_block().to_dict()}) 33 | 34 | @app.route('/transactions', methods=['GET']) 35 | def get_transactions(): 36 | transactions = blockchain.get_transactions() 37 | return jsonify([t.to_dict() for t in transactions]) 38 | 39 | @app.route('/blocks', methods=['GET']) 40 | def get_blocks(): 41 | blocks = blockchain.get_blocks() 42 | return jsonify([b.to_dict() for b in blocks]) 43 | 44 | @app.route('/balance', methods=['GET']) 45 | def get_balance(): 46 | address = request.args.get('address') 47 | balance = blockchain.get_balance(address) 48 | return jsonify({'balance': balance}) 49 | 50 | @app.route('/send', methods=['POST']) 51 | def send(): 52 | data = request.get_json() 53 | transaction = Transaction(data['sender'], data['receiver'], data['amount']) 54 | blockchain.add_transaction(transaction) 55 | return jsonify({'transaction': transaction.to_dict()}) 56 | 57 | @app.route('/healthcheck', methods=['GET']) 58 | def healthcheck(): 59 | return jsonify({'status': 'Blockchain node is healthy'}) 60 | 61 | if __name__ == '__main__': 62 | app.run(host='0.0.0.0', port=5001, debug=True) 63 | -------------------------------------------------------------------------------- /src/storage/decentralized_storage.py: -------------------------------------------------------------------------------- 1 | import os 2 | from fastapi import FastAPI, UploadFile, File, HTTPException 3 | from pydantic import BaseModel 4 | import ipfshttpclient 5 | 6 | app = FastAPI(title="PiConsensus Decentralized Storage/Archival Engine", version="1.0") 7 | 8 | # === Connect to IPFS Node === 9 | 10 | try: 11 | ipfs = ipfshttpclient.connect() # Defaults to /dns/localhost/tcp/5001/http 12 | except Exception as e: 13 | ipfs = None 14 | print("[DecentralizedStorage] Warning: Could not connect to IPFS node:", e) 15 | 16 | # === API Models === 17 | 18 | class PinRequest(BaseModel): 19 | cid: str 20 | 21 | # === API Endpoints === 22 | 23 | @app.post("/storage/upload") 24 | async def upload_file(file: UploadFile = File(...)): 25 | if not ipfs: 26 | raise HTTPException(500, "IPFS node not connected") 27 | contents = await file.read() 28 | result = ipfs.add_bytes(contents) 29 | ipfs.pin.add(result) 30 | return {"cid": result, "filename": file.filename} 31 | 32 | @app.get("/storage/retrieve/{cid}") 33 | def retrieve_file(cid: str): 34 | if not ipfs: 35 | raise HTTPException(500, "IPFS node not connected") 36 | try: 37 | data = ipfs.cat(cid) 38 | return {"cid": cid, "data": data.decode(errors="replace")} 39 | except Exception as e: 40 | raise HTTPException(404, f"Could not retrieve CID {cid}: {e}") 41 | 42 | @app.post("/storage/pin") 43 | def pin_content(req: PinRequest): 44 | if not ipfs: 45 | raise HTTPException(500, "IPFS node not connected") 46 | try: 47 | ipfs.pin.add(req.cid) 48 | return {"status": "pinned", "cid": req.cid} 49 | except Exception as e: 50 | raise HTTPException(400}") 51 | 52 | @app.get("/storage/pins") 53 | def list_pins(): 54 | if not ipfs: 55 | raise HTTPException(500, "IPFS node not: 56 | raise HTTPException(400, f"Could not list pins: {e}") 57 | 58 | # === Example Run: uvicorn src.storage.decentralized_storage:app --reload === 59 | 60 | if __name__ == "__main__": 61 | import uvicorn 62 | uvicorn.run("storage.decentralized_storage:app", host="0.0.0.0", port=8006, reload=True) 63 | -------------------------------------------------------------------------------- /backend/app/routes/api.py: -------------------------------------------------------------------------------- 1 | from flask import Blueprint, request, jsonify 2 | from flask_jwt_extended import jwt_required 3 | from ..services.security_service import SecurityService 4 | from ..models import User # Assuming you have a User model defined in your models 5 | 6 | # Create a Blueprint for the API 7 | api_bp = Blueprint('api', __name__) 8 | 9 | # Initialize the SecurityService 10 | security_service = SecurityService() 11 | 12 | @api_bp.route('/register', methods=['POST']) 13 | def register(): 14 | """Register a new user.""" 15 | data = request.get_json() 16 | username = data.get('username') 17 | password = data.get('password') 18 | 19 | if not username or not password: 20 | return jsonify({"msg": "Username and password are required"}), 400 21 | 22 | try: 23 | user = security_service.create_user(username, password) 24 | return jsonify({"msg": "User created", "user_id": user.id}), 201 25 | except Exception as e: 26 | return jsonify({"msg": str(e)}), 500 27 | 28 | @api_bp.route('/login', methods=['POST']) 29 | def login(): 30 | """Login a user and return an access token.""" 31 | data = request.get_json() 32 | username = data.get('username') 33 | password = data.get('password') 34 | 35 | if not username or not password: 36 | return jsonify({"msg": "Username and password are required"}), 400 37 | 38 | token = security_service.authenticate_user(username, password) 39 | if token: 40 | return jsonify({"access_token": token}), 200 41 | return jsonify({"msg": "Bad username or password"}), 401 42 | 43 | @api_bp.route('/current_user', methods=['GET']) 44 | @jwt_required() 45 | def current_user(): 46 | """Get the current authenticated user.""" 47 | user = security_service.get_current_user() 48 | return jsonify({"username": user.username}), 200 49 | 50 | @api_bp.route('/logout', methods=['POST']) 51 | @jwt_required() 52 | def logout(): 53 | """Logout the user (invalidate the token).""" 54 | return security_service.logout_user() 55 | 56 | # Register the Blueprint in the main application 57 | def register_routes(app): 58 | app.register_blueprint(api_bp, url_prefix='/api') 59 | -------------------------------------------------------------------------------- /utils/helpers.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | from typing import List, Dict 4 | from .constants import API_ENDPOINT_IPFS, API_ENDPOINT_ETHEREUM, PINATA_PINNING_SERVICE 5 | 6 | def get_ipfs_client() -> requests.Session: 7 | # Create an IPFS client session 8 | session = requests.Session() 9 | session.headers.update({'Content-Type': 'application/json'}) 10 | return session 11 | 12 | def get_ethereum_client() -> requests.Session: 13 | # Create an Ethereum client session 14 | session = requests.Session() 15 | session.headers.update({'Content-Type': 'application/json'}) 16 | return session 17 | 18 | def pin_file_to_ipfs(file_path: str) -> str: 19 | # Pin a file to IPFS using Pinata 20 | with open(file_path, 'rb') as f: 21 | file_data = f.read() 22 | response = requests.post(PINATA_PINNING_SERVICE, files={'file': file_data}) 23 | return response.json()['IpfsHash'] 24 | 25 | def get_ipfs_hash(file_path: str) -> str: 26 | # Get the IPFS hash of a file 27 | with open(file_path, 'rb') as f: 28 | file_data = f.read() 29 | response = requests.post(API_ENDPOINT_IPFS + '/api/v0/add', files={'file': file_data}) 30 | return response.json()['Hash'] 31 | 32 | def get_ethereum_block_number() -> int: 33 | # Get the current Ethereum block number 34 | response = requests.get(API_ENDPOINT_ETHEREUM + '/blockNumber') 35 | return int(response.json()['result'], 16) 36 | 37 | def get_ethereum_transaction_receipt(tx_hash: str) -> Dict: 38 | # Get the transaction receipt of an Ethereum transaction 39 | response = requests.get(API_ENDPOINT_ETHEREUM + '/getTransactionReceipt', params={'tx_hash': tx_hash}) 40 | return response.json()['result'] 41 | 42 | def wait_for_ethereum_transaction(tx_hash: str, max_retries: int = 5) -> Dict: 43 | # Wait for an Ethereum transaction to be mined 44 | for i in range(max_retries): 45 | response = requests.get(API_ENDPOINT_ETHEREUM + '/getTransactionReceipt', params={'tx_hash': tx_hash}) 46 | if response.json()['result']['status'] == '0x1': 47 | return response.json()['result'] 48 | time.sleep(1) 49 | raise Exception('Transaction not mined after {} retries'.format(max_retries)) 50 | -------------------------------------------------------------------------------- /ai-oracle-network/models/oracle_model.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from sklearn.ensemble import RandomForestRegressor 4 | from sklearn.model_selection import train_test_split 5 | from sklearn.metrics import mean_squared_error 6 | from sklearn.preprocessing import StandardScaler 7 | from tensorflow.keras.models import Sequential 8 | from tensorflow.keras.layers import Dense, LSTM 9 | 10 | class OracleModel: 11 | def __init__(self, config): 12 | self.config = config 13 | self.model = self._build_model() 14 | 15 | def _build_model(self): 16 | if self.config['model_type'] == 'random_forest': 17 | return RandomForestRegressor(n_estimators=100, random_state=42) 18 | elif self.config['model_type'] == 'lstm': 19 | model = Sequential() 20 | model.add(LSTM(units=50, return_sequences=True, input_shape=(self.config['sequence_length'], 1))) 21 | model.add(LSTM(units=50)) 22 | model.add(Dense(1)) 23 | model.compile(loss='mean_squared_error', optimizer='adam') 24 | return model 25 | else: 26 | raise ValueError("Invalid model type") 27 | 28 | def train(self, data): 29 | X, y = data.drop(['target'], axis=1), data['target'] 30 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 31 | 32 | if self.config['model_type'] == 'random_forest': 33 | self.model.fit(X_train, y_train) 34 | elif self.config['model_type'] == 'lstm': 35 | scaler = StandardScaler() 36 | X_train_scaled = scaler.fit_transform(X_train.values.reshape(-1, 1)) 37 | X_test_scaled = scaler.transform(X_test.values.reshape(-1, 1)) 38 | self.model.fit(X_train_scaled, y_train, epochs=50, batch_size=32, validation_data=(X_test_scaled, y_test)) 39 | 40 | y_pred = self.model.predict(X_test) 41 | mse = mean_squared_error(y_test, y_pred) 42 | print(f"Model trained with MSE: {mse:.2f}") 43 | 44 | def predict(self, data): 45 | if self.config['model_type'] == 'random_forest': 46 | return self.model.predict(data) 47 | elif self.config['model_type'] == 'lstm': 48 | scaler = StandardScaler() 49 | data_scaled = scaler.transform(data.values.reshape(-1, 1)) 50 | return self.model.predict(data_scaled) 51 | -------------------------------------------------------------------------------- /src/oracle/oracle_engine.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import List, Dict, Any, Optional 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | import httpx 6 | 7 | app = FastAPI(title="PiConsensus Decentralized Oracle Engine", version="1.0") 8 | 9 | # === Data Models === 10 | 11 | class OracleSource(BaseModel): 12 | id: int 13 | name: str 14 | url: str # API endpoint to fetch data from 15 | data_key: str # Key in the JSON response to extract 16 | description: Optional[str] = None 17 | active: bool = True 18 | registered_at: float = time.time() 19 | 20 | class OracleFeed(BaseModel): 21 | id: int 22 | source_id: int 23 | value: Any 24 | timestamp: float = time.time() 25 | 26 | # === In-memory Store (replace with DB in production) === 27 | 28 | oracle_sources: Dict[int, OracleSource] = {} 29 | oracle_feeds: List[OracleFeed] = [] 30 | source_counter = 1 31 | feed_counter = 1 32 | 33 | # === API Endpoints === 34 | 35 | @app.post("/oracle/register") 36 | def register_oracle(name: str, url: str, data_key: str, description: Optional[str] = None): 37 | global source_counter 38 | source = OracleSource( 39 | id=source_counter, 40 | name=name, 41 | url=url, 42 | data_key=data_key, 43 | description=description 44 | ) 45 | oracle_sources[source_counter] = source 46 | source_counter += 1 47 | return {"status": "registered", "oracle": source.dict()} 48 | 49 | @app.get("/oracle/sources") 50 | def list_oracle_sources(active: Optional[bool] = None): 51 | sources = [s.dict() for s in oracle_sources.values() if (active is None or s.active == active)] 52 | return sources 53 | 54 | @app.post("/oracle/fetch/{source_id}") 55 | def fetch_from_oracle(source_id: int): 56 | global feed_counter 57 | source = oracle_sources.get(source_id) 58 | if not source or not source.active: 59 | raise HTTPException(404, "Oracle source not found or inactive") 60 | try: 61 | resp = httpx.get(source.url, timeout=10.0) 62 | resp.raise_for_status() 63 | data = resp.json() 64 | value = data.get(source.data_key) 65 | if value "source_id": source_id} 66 | 67 | # === Example Run: uvicorn src.oracle.oracle_engine:app --reload === 68 | 69 | if __name__ == "__main__": 70 | import uvicorn 71 | uvicorn.run("oracle.oracle_engine:app", host="0.0.0.0", port=8013, reload=True) 72 | -------------------------------------------------------------------------------- /src/zkp/zkp_service.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import Dict, Any, Optional 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | import pygroth 6 | 7 | app = FastAPI(title="PiConsensus Zero-Knowledge Proof Service Layer", version="1.0") 8 | 9 | # === Data Models === 10 | 11 | class ZKPRequest(BaseModel): 12 | statement: str # e.g., 'x > 18' 13 | witness: Dict[str, Any] # e.g., {"x": 21} 14 | 15 | class ZKPVerifyRequest(BaseModel): 16 | vk: dict 17 | proof: dict 18 | public: dict 19 | 20 | # === In-memory store for demo === 21 | 22 | proofs: Dict[str, dict] = {} 23 | vks: Dict[str, dict] = {} 24 | pk_cache: Optional[dict] = None # You can persist keypairs for circuits 25 | 26 | # === ZKP Demo Functions === 27 | 28 | def generate_keys(statement: str): 29 | # In a real system, you'd compile a circuit for the statement. 30 | # For demo: create keys for a trivial circuit. 31 | (pk, vk) = pygroth.keygen(pygroth.circuit_example()) 32 | return pk, vk 33 | 34 | @app.post("/zkp/generate") 35 | def generate_zkp(req: ZKPRequest): 36 | global pk_cache 37 | # For demo, always use the same example circuit 38 | if pk_cache is None: 39 | pk, vk = generate_keys(req.statement) 40 | pk_cache = pk 41 | vks[req.statement] = vk 42 | else: 43 | pk = pk_cache 44 | vk = vks.get(req.statement) 45 | # The witness must satisfy the circuit 46 | try: 47 | proof = pygroth.prove(pk, pygroth.circuit_example(), req.witness) 48 | proof_id = f"proof-{int(time.time()*1000)}" 49 | proofs[proof_id] = {"proof": proof, "public": req.witness, "vk": vk, "statement": req.statement} 50 | return {"proof_id": proof_id, "proof": proof, "public": req.witness, "vk": vk, "statement": req.statement} 51 | except Exception as e: 52 | raise HTTPException(400, f"Could not generate proof: {e}") 53 | 54 | @app.post("/zkp/verify") 55 | def verify_zkp(req: ZKPVerifyRequest): 56 | try: 57 | valid = pygroth.verify(req.vk, pygroth.circuit_example(), req.proof, req.public) 58 | return {"valid": bool(valid)} 59 | except Exception as e: 60 | raise HTTPException(400, f"Verification failed: {e}") 61 | 62 | @app.get("/zkp/proofs/{proof_id}") 63 | def get_proof(proof_id: str == "__main__": 64 | import uvicorn 65 | uvicorn.run("zkp.zkp_service:app", host="0.0.0.0", port=8012, reload=True) 66 | -------------------------------------------------------------------------------- /frontend/src/components/Gamification.js: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react'; 2 | import axios from 'axios'; 3 | 4 | const Gamification = () => { 5 | const [points, setPoints] = useState(0); 6 | const [leaderboard, setLeaderboard] = useState([]); 7 | 8 | useEffect(() => { 9 | // Fetch user points and leaderboard data when the component mounts 10 | fetchUser Points(); 11 | fetchLeaderboard(); 12 | }, []); 13 | 14 | const fetchUser Points = async () => { 15 | try { 16 | const response = await axios.get('/api/user/points'); // Adjust the endpoint as necessary 17 | setPoints(response.data.points); 18 | } catch (error) { 19 | console.error('Error fetching user points:', error); 20 | } 21 | }; 22 | 23 | const fetchLeaderboard = async () => { 24 | try { 25 | const response = await axios.get('/api/leaderboard'); // Adjust the endpoint as necessary 26 | setLeaderboard(response.data); 27 | } catch (error) { 28 | console.error('Error fetching leaderboard:', error); 29 | } 30 | }; 31 | 32 | const handleCompleteTask = async () => { 33 | try { 34 | const response = await axios.post('/api/tasks/complete'); // Adjust the endpoint as necessary 35 | if (response.data.success) { 36 | setPoints(prevPoints => prevPoints + response.data.pointsEarned); 37 | alert(`You earned ${response.data.pointsEarned} points!`); 38 | } 39 | } catch (error) { 40 | console.error('Error completing task:', error); 41 | } 42 | }; 43 | 44 | return ( 45 |
46 |

Gamification

47 |
48 |

Your Points: {points}

49 | 50 |
51 |
52 |

Leaderboard

53 |
    54 | {leaderboard.map((user, index) => ( 55 |
  • 56 | {index + 1}. {user.username} - {user.points} points 57 |
  • 58 | ))} 59 |
60 |
61 |
62 | ); 63 | }; 64 | 65 | export default Gamification; 66 | -------------------------------------------------------------------------------- /src/wallet/quantum_wallet.py: -------------------------------------------------------------------------------- 1 | from pqcrypto.sign import dilithium2 2 | from typing import Dict, Any 3 | 4 | class QuantumWallet: 5 | def __init__(self): 6 | self.private_key = None 7 | self.public_key = None 8 | 9 | def generate(self): 10 | """ 11 | Generate a new Dilithium quantum-resistant key pair. 12 | """ 13 | pk, sk = dilithium2.generate_keypair() 14 | self.private_key = sk 15 | self.public_key = pk 16 | print("[QuantumWallet] Keypair generated.") 17 | return pk, sk 18 | 19 | def sign(self, message: bytes) -> bytes: 20 | """ 21 | Sign a message using the quantum-resistant private key. 22 | """ 23 | if not self.private_key: 24 | raise ValueError("No private key loaded.") 25 | signature = dilithium2.sign(message, self.private_key) 26 | print("[QuantumWallet] Message signed.") 27 | return signature 28 | 29 | def verify(self, message: bytes, signature: bytes, public_key: bytes = None) -> bool: 30 | """ 31 | Verify a message and signature with a quantum-resistant public key. 32 | """ 33 | pk = public_key or self.public_key 34 | if not pk: 35 | raise ValueError("No public key available.") 36 | try: 37 | dilithium2.open(signature, pk) 38 | print("[QuantumWallet] Signature valid.") 39 | return True 40 | except Exception as e: 41 | print("[QuantumWallet] Signature verification failed:", e) 42 | return False 43 | 44 | def export_keys(self) -> Dict[str, bytes]: 45 | """ 46 | Export the wallet's public and private keys. 47 | """ 48 | return {"public_key": self.public_key, "private_key": self.private_key} 49 | 50 | def import_keys(self, public_key: bytes, private_key: bytes): 51 | """ 52 | Import an existing keypair. 53 | """ 54 | self.public_key = public_key 55 | self.private_key = private_key 56 | print("[QuantumWallet] Keys imported.") 57 | 58 | # === Example Usage === 59 | 60 | if __name__ == "__main__": 61 | print("PiConsensus Quantum-Resistant Wallet Demo") 62 | wallet = QuantumWallet() 63 | pub, priv = wallet.generate() 64 | msg = b"Quantum-safe Pi transaction" 65 | sig = wallet.sign(msg) 66 | print("Signature:", sig.hex()[:64], "...") 67 | valid = wallet.verify(msg, sig) 68 | print("Signature valid?", valid) 69 | -------------------------------------------------------------------------------- /tokenomics/tokenomics_model.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from scipy.optimize import minimize 3 | from typing import List, Dict 4 | 5 | class TokenomicsModel: 6 | def __init__(self, token_supply: int, token_price: float, inflation_rate: float, 7 | token_holders: List[Dict[str, float]]): 8 | self.token_supply = token_supply 9 | self.token_price = token_price 10 | self.inflation_rate = inflation_rate 11 | self.token_holders = token_holders 12 | 13 | def calculate_token_distribution(self) -> pd.DataFrame: 14 | # Calculate token distribution based on token holders' stakes 15 | token_distribution = pd.DataFrame(self.token_holders) 16 | token_distribution['token_amount'] = token_distribution['stake'] * self.token_supply 17 | return token_distribution 18 | 19 | def calculate_inflation(self) -> float: 20 | # Calculate inflation based on inflation rate and token supply 21 | return self.inflation_rate * self.token_supply 22 | 23 | def optimize_token_distribution(self) -> pd.DataFrame: 24 | # Optimize token distribution using a optimization algorithm 25 | def objective_function(params: List[float]) -> float: 26 | # Define the objective function to minimize 27 | token_distribution = pd.DataFrame(self.token_holders) 28 | token_distribution['token_amount'] = token_distribution['stake'] * params[0] 29 | return -token_distribution['token_amount'].sum() 30 | 31 | res = minimize(objective_function, [self.token_supply], method="SLSQP") 32 | optimized_token_distribution = pd.DataFrame(self.token_holders) 33 | optimized_token_distribution['token_amount'] = optimized_token_distribution['stake'] * res.x[0] 34 | return optimized_token_distribution 35 | 36 | def simulate_token_economy(self, time_steps: int) -> pd.DataFrame: 37 | # Simulate the token economy over time 38 | token_economy = pd.DataFrame(index=range(time_steps), columns=['token_supply', 'token_price', 'inflation_rate']) 39 | token_economy.loc[0] = [self.token_supply, self.token_price, self.inflation_rate] 40 | for i in range(1, time_steps): 41 | token_economy.loc[i, 'token_supply'] = token_economy.loc[i-1, 'token_supply'] * (1 + self.inflation_rate) 42 | token_economy.loc[i, 'token_price'] = token_economy.loc[i-1, 'token_price'] * (1 + self.inflation_rate) 43 | token_economy.loc[i, 'inflation_rate'] = self.inflation_rate 44 | return token_economy 45 | -------------------------------------------------------------------------------- /src/identity/did_management.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | import didkit 6 | 7 | app = FastAPI(title="PiConsensus DID Management & Verifiable Credentials", version="1.0") 8 | 9 | # === API Models === 10 | 11 | class KeyRequest(BaseModel): 12 | key_type: str = "ed25519" # ed25519, secp256k1, etc. 13 | 14 | class IssueVCRequest(BaseModel): 15 | did: str 16 | subject: dict 17 | credential_type: str = "VerifiableCredential" 18 | issuer: str 19 | proof_purpose: str = "assertionMethod" 20 | verification_method: str 21 | 22 | class VerifyVCRequest(BaseModel): 23 | vc: dict 24 | 25 | # === DID & Key Management === 26 | 27 | @app.post("/did/key") 28 | def generate_did_key(req: KeyRequest): 29 | key = didkit.generate_ed25519_key() if req.key_type == "ed25519" else didkit.generate_secp256k1_key() 30 | did = didkit.key_to_did("key", key) 31 | vm = didkit.key_to_verification_method("key", key) 32 | return {"did": did, "key": key, "verification_method": vm} 33 | 34 | @app.get("/did/resolve/{did}") 35 | def resolve_did(did: str): 36 | try: 37 | doc = didkit.resolve_did(did, '{}') 38 | return json.loads(doc) 39 | except Exception as e: 40 | raise HTTPException(400, f"Could not resolve DID: {e}") 41 | 42 | VCs) === 43 | 44 | @app.post("/vc/issue") 45 | def issue_vc(req: IssueVCRequest): 46 | vc = { 47 | "@context": ["https://www.w3.org/2018/credentials/v1"], 48 | "type": [req.credential_type], 49 | "issuer": req.issuer, 50 | "issuanceDate": didkit.get_iso_datetime(), 51 | "credentialSubject": req.subject, 52 | } 53 | options = { 54 | "proofPurpose": req.proof_purpose, 55 | "verificationMethod": req.verification_method, 56 | } 57 | try: 58 | signed_vc = didkit.issue_credential(json.dumps(vc), json.dumps(options), req.key) 59 | return json.loads(signed_vc) 60 | except Exception as e: 61 | raise HTTPException(400, f"Could not issue VC: {e}") 62 | 63 | @app.post("/vc/verify") 64 | def verify_vc(req: VerifyVCRequest): 65 | try: 66 | options = {} 67 | result = didkit.verify_credential(json.dumps(req.vc), json.dumps(options)) 68 | return json.loads(result) 69 | except Exception as e: 70 | raise HTTPException(400, f"Could not verify VC: {e}") 71 | 72 | # === Example Run: uvicorn src.identity.did_management:app --reload === 73 | 74 | if __name__ == "__main__": 75 | import uvicorn 76 | uvicorn.run("identity.did_management:app", host="0.0.0.0", port=8007, reload=True) 77 | -------------------------------------------------------------------------------- /contracts/reserve.sol: -------------------------------------------------------------------------------- 1 | pragma solidity ^0.8.0; 2 | 3 | import "https://github.com/OpenZeppelin/openzeppelin-solidity/blob/master/contracts/token/ERC20/SafeERC20.sol"; 4 | 5 | contract Reserve { 6 | // Mapping of reserve balances 7 | mapping (address => uint256) public reserveBalances; 8 | 9 | // Event emitted when funds are deposited into the reserve 10 | event Deposit(address _sender, uint256 _amount); 11 | 12 | // Event emitted when funds are withdrawn from the reserve 13 | event Withdrawal(address _sender, uint256 _amount); 14 | 15 | // Event emitted when the stablecoin contract is updated with the latest reserve balance 16 | event ReserveBalanceUpdated(uint256 _newBalance); 17 | 18 | // Function to deposit funds into the reserve 19 | function deposit(uint256 _amount) public { 20 | // Transfer the funds from the sender to the reserve 21 | SafeERC20.safeTransferFrom(msg.sender, address(this), _amount); 22 | 23 | // Update the reserve balance 24 | reserveBalances[msg.sender] += _amount; 25 | 26 | // Emit the Deposit event 27 | emit Deposit(msg.sender, _amount); 28 | } 29 | 30 | // Function to withdraw funds from the reserve 31 | function withdraw(uint256 _amount) public { 32 | // Check if the sender has sufficient balance in the reserve 33 | require(reserveBalances[msg.sender] >= _amount, "Insufficient balance"); 34 | 35 | // Transfer the funds from the reserve to the sender 36 | SafeERC20.safeTransfer(msg.sender, _amount); 37 | 38 | // Update the reserve balance 39 | reserveBalances[msg.sender] -= _amount; 40 | 41 | // Emit the Withdrawal event 42 | emit Withdrawal(msg.sender, _amount); 43 | } 44 | 45 | // Function to update the stablecoin contract with the latest reserve balance 46 | function updateStablecoinContract() public { 47 | // Get the latest reserve balance 48 | uint256 newBalance = getReserveBalance(); 49 | 50 | // Update the stablecoin contract with the latest reserve balance 51 | StablecoinContract(address).updateReserveBalance(newBalance); 52 | 53 | // Emit the ReserveBalanceUpdated event 54 | emit ReserveBalanceUpdated(newBalance); 55 | } 56 | 57 | // Function to get the reserve balance 58 | function getReserveBalance() public view returns (uint256) { 59 | // Calculate the total reserve balance 60 | uint256 totalBalance = 0; 61 | for (address user in reserveBalances) { 62 | totalBalance += reserveBalances[user]; 63 | } 64 | return totalBalance; 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /contracts/PiConsensus.sol: -------------------------------------------------------------------------------- 1 | pragma solidity ^0.8.0; 2 | 3 | import "./stablecoin.js"; 4 | import "./oracle.js"; 5 | import "./reserve.js"; 6 | 7 | contract PiConsensus { 8 | // Stablecoin contract address 9 | address public stablecoinAddress; 10 | 11 | // Oracle contract address 12 | address public oracleAddress; 13 | 14 | // Reserve contract address 15 | address public reserveAddress; 16 | 17 | // Mapping of user votes 18 | mapping (address => uint256) public userVotes; 19 | 20 | // Event emitted when a user votes 21 | event VoteCast(address _user, uint256 _amount); 22 | 23 | // Event emitted when the consensus is updated 24 | event ConsensusUpdated(uint256 _newConsensus); 25 | 26 | // Constructor 27 | constructor(address _stablecoinAddress, address _oracleAddress, address _reserveAddress) public { 28 | stablecoinAddress = _stablecoinAddress; 29 | oracleAddress = _oracleAddress; 30 | reserveAddress = _reserveAddress; 31 | } 32 | 33 | // Function to cast a vote 34 | function castVote(uint256 _amount) public { 35 | // Check if the user has sufficient balance in the stablecoin contract 36 | require(Stablecoin(stablecoinAddress).balanceOf(msg.sender) >= _amount, "Insufficient balance"); 37 | 38 | // Update the user's vote 39 | userVotes[msg.sender] += _amount; 40 | 41 | // Emit the VoteCast event 42 | emit VoteCast(msg.sender, _amount); 43 | 44 | // Update the consensus 45 | updateConsensus(); 46 | } 47 | 48 | // Function to update the consensus 49 | function updateConsensus() internal { 50 | // Get the latest economic indicators from the oracle contract 51 | (uint256 gdp, uint256 inflationRate) = Oracle(oracleAddress).getEconomicIndicators(); 52 | 53 | // Calculate the new consensus based on the user votes and economic indicators 54 | uint256 newConsensus = calculateConsensus(userVotes, gdp, inflationRate); 55 | 56 | // Update the consensus 57 | consensus = newConsensus; 58 | 59 | // Emit the ConsensusUpdated event 60 | emit ConsensusUpdated(newConsensus); 61 | } 62 | 63 | // Function to calculate the consensus 64 | function calculateConsensus(mapping (address => uint256) _userVotes, uint256 _gdp, uint256 _inflationRate) internal pure returns (uint256) { 65 | // TO DO: implement the consensus calculation logic 66 | // For now, return a dummy value 67 | return 0; 68 | } 69 | 70 | // Function to get the current consensus 71 | function getConsensus() public view returns (uint256) { 72 | return consensus; 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /test/stablecoin.test.js: -------------------------------------------------------------------------------- 1 | const { expect } = require('chai'); 2 | const { web3 } = require('@openzeppelin/test-helpers/src/setup'); 3 | const { BN } = web3.utils; 4 | 5 | const Stablecoin = artifacts.require('Stablecoin'); 6 | 7 | contract('Stablecoin', (accounts) => { 8 | const [owner, user1, user2] = accounts; 9 | 10 | beforeEach(async () => { 11 | this.stablecoin = await Stablecoin.new({ from: owner }); 12 | }); 13 | 14 | describe('Initialization', () => { 15 | it('should have an initial supply of 1 million tokens', async () => { 16 | const totalSupply = await this.stablecoin.totalSupply(); 17 | expect(totalSupply).to.be.bignumber.equal(new BN('1000000', 10)); 18 | }); 19 | 20 | it('should have the owner as the initial admin', async () => { 21 | const isAdmin = await this.stablecoin.isAdmin(owner); 22 | expect(isAdmin).to.be.true; 23 | }); 24 | }); 25 | 26 | describe('Transfer', () => { 27 | it('should allow the owner to transfer tokens to user1', async () => { 28 | await this.stablecoin.transfer(user1, new BN('100', 10), { from: owner }); 29 | const user1Balance = await this.stablecoin.balanceOf(user1); 30 | expect(user1Balance).to.be.bignumber.equal(new BN('100', 10)); 31 | }); 32 | 33 | it('should not allow user2 to transfer tokens without approval', async () => { 34 | await expectRevert( 35 | this.stablecoin.transfer(user1, new BN('100', 10), { from: user2 }), 36 | 'Only approved accounts can transfer tokens' 37 | ); 38 | }); 39 | }); 40 | 41 | describe('Interest', () => { 42 | it('should accrue interest for user1', async () => { 43 | await this.stablecoin.accrueInterest({ from: owner }); 44 | const user1Balance = await this.stablecoin.balanceOf(user1); 45 | expect(user1Balance).to.be.bignumber.gt(new BN('100', 10)); 46 | }); 47 | 48 | it('should not accrue interest for user2 without a balance', async () => { 49 | await expectRevert( 50 | this.stablecoin.accrueInterest({ from: user2 }), 51 | 'Only accounts with a balance can accrue interest' 52 | ); 53 | }); 54 | }); 55 | 56 | describe('Admin', () => { 57 | it('should allow the owner to add a new admin', async () => { 58 | await this.stablecoin.addAdmin(user2, { from: owner }); 59 | const isAdmin = await this.stablecoin.isAdmin(user2); 60 | expect(isAdmin).to.be.true; 61 | }); 62 | 63 | it('should not allow user1 to add a new admin', async () => { 64 | await expectRevert( 65 | this.stablecoin.addAdmin(user2, { from: user1 }), 66 | 'Only the owner can add new admins' 67 | ); 68 | }); 69 | }); 70 | }); 71 | -------------------------------------------------------------------------------- /src/incentive/token_incentive.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import List, Dict, Any, Optional 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | 6 | app = FastAPI(title="PiConsensus Tokenized Incentive Layer & Reward Engine", version="1.0") 7 | 8 | # === Data Models === 9 | 10 | class RewardRule(BaseModel): 11 | id: int 12 | activity: str # e.g., "validate", "propose", "bridge", "contribute" 13 | reward_amount: float 14 | asset: str = "Pi" 15 | active: bool = True 16 | 17 | class RewardEvent(BaseModel): 18 | id: int 19 | rule_id: int 20 | recipient: str 21 | activity: str 22 | amount: float 23 | asset: str 24 | timestamp: float = time.time() 25 | details: Optional[Dict[str, Any]] = None 26 | 27 | # === In-memory Store (replace with DB/smart contract in prod) === 28 | 29 | reward_rules: Dict[int, RewardRule] = {} 30 | reward_events: List[RewardEvent] = [] 31 | rule_counter = 1 32 | event_counter = 1 33 | 34 | # === API Endpoints === 35 | 36 | @app.post("/incentive/rule") 37 | def create_rule(activity: str, reward_amount: float, asset: Optional[str] = "Pi"): 38 | global rule_counter 39 | rule = RewardRule(id=rule_counter, activity=activity, reward_amount=reward_amount, asset=asset) 40 | reward_rules[rule_counter] = rule 41 | rule_counter += 1 42 | return {"status": "rule_created", "rule": rule.dict()} 43 | 44 | @app.get("/incentive/rules") 45 | def list_rules(active: Optional[bool] = None): 46 | rules = [r.dict() for r in reward_rules.values() if (active is None or r.active == active)] 47 | return rules 48 | 49 | @app.post("/incentive/reward") 50 | def distribute_reward(rule_id: int, recipient: str, details: Optional[Dict[str, Any]] = None): 51 | global event_counter 52 | rule = reward_rules.get(rule_id) 53 | if not rule or not rule.active: 54 | raise HTTPException(400, "Invalid or inactive reward rule") 55 | event = RewardEvent( 56 | id=event_counter, 57 | rule_id=rule_id, 58 | recipient=recipient, 59 | activity=rule.activity, 60 | amount=rule.reward_amount, 61 | asset=rule.asset, 62 | timestamp=time.time(), 63 | details=details 64 | ) 65 | reward_events disable_rule(rule_id: int): 66 | rule = reward_rules.get(rule_id) 67 | if not rule: 68 | raise HTTPException(404, "Rule not found") 69 | rule.active = False 70 | return {"status": "disabled", "rule_id": rule_id} 71 | 72 | # === Example Run: uvicorn src.incentive.token_incentive:app --reload === 73 | 74 | if __name__ == "__main__": 75 | import uvicorn 76 | uvicorn.run("incentive.token_incentive:app", host="0.0.0.0", port=8010, reload=True) 77 | -------------------------------------------------------------------------------- /frontend/src/components/Dashboard.js: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useState } from 'react'; 2 | import './Dashboard.css'; // Assuming you have some CSS for styling 3 | 4 | const Dashboard = () => { 5 | const [userStats, setUser Stats] = useState({ 6 | totalBalance: 0, 7 | totalTransactions: 0, 8 | recentActivities: [], 9 | }); 10 | 11 | useEffect(() => { 12 | // Simulate fetching user statistics from an API 13 | const fetchUser Stats = async () => { 14 | // Replace with your API call 15 | const stats = await new Promise((resolve) => { 16 | setTimeout(() => { 17 | resolve({ 18 | totalBalance: 5.23, // Example balance in ETH 19 | totalTransactions: 12, // Example transaction count 20 | recentActivities: [ 21 | { id: 1, type: 'Sent', amount: 0.5, date: '2023-10-01' }, 22 | { id: 2, type: 'Received', amount: 1.0, date: '2023-09-28' }, 23 | { id: 3, type: 'Sent', amount: 0.2, date: '2023-09-25' }, 24 | ], 25 | }); 26 | }, 1000); 27 | }); 28 | 29 | setUser Stats(stats); 30 | }; 31 | 32 | fetchUser Stats(); 33 | }, []); 34 | 35 | return ( 36 |
37 |

Dashboard

38 |
39 |
40 |

Total Balance

41 |

{userStats.totalBalance} ETH

42 |
43 |
44 |

Total Transactions

45 |

{userStats.totalTransactions}

46 |
47 |
48 |
49 |

Recent Activities

50 |
    51 | {userStats.recentActivities.map(activity => ( 52 |
  • 53 | {activity.type} {activity.amount} ETH on {activity.date} 54 |
  • 55 | ))} 56 |
57 |
58 |
59 |

Quick Access

60 | 61 | 62 |
63 |
64 | ); 65 | }; 66 | 67 | export default Dashboard; 68 | -------------------------------------------------------------------------------- /backend/app/services/iot_integration_service.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from datetime import datetime 4 | 5 | class IoTIntegrationService: 6 | def __init__(self, device_url): 7 | self.device_url = device_url 8 | 9 | def get_device_status(self): 10 | """Fetch the current status of the IoT device.""" 11 | try: 12 | response = requests.get(f"{self.device_url}/status") 13 | response.raise_for_status() 14 | return response.json() 15 | except requests.exceptions.RequestException as e: 16 | print(f"Error fetching device status: {e}") 17 | return None 18 | 19 | def send_command(self, command): 20 | """Send a command to the IoT device.""" 21 | try: 22 | payload = json.dumps({"command": command}) 23 | response = requests.post(f"{self.device_url}/command", data=payload, headers={"Content-Type": "application/json"}) 24 | response.raise_for_status() 25 | return response.json() 26 | except requests.exceptions.RequestException as e: 27 | print(f"Error sending command to device: {e}") 28 | return None 29 | 30 | def log_device_data(self, data): 31 | """Log data received from the IoT device.""" 32 | # Here you would typically save the data to a database 33 | # For demonstration, we'll just print it 34 | print(f"[{datetime.now()}] Device Data: {data}") 35 | 36 | def receive_data(self): 37 | """Simulate receiving data from the IoT device.""" 38 | # In a real application, this might involve a WebSocket or MQTT subscription 39 | # For demonstration, we'll simulate it with a simple GET request 40 | try: 41 | response = requests.get(f"{self.device_url}/data") 42 | response.raise_for_status() 43 | data = response.json() 44 | self.log_device_data(data) 45 | return data 46 | except requests.exceptions.RequestException as e: 47 | print(f"Error receiving data from device: {e}") 48 | return None 49 | 50 | # Example usage 51 | if __name__ == "__main__": 52 | device_url = "http://example-iot-device.local" # Replace with your IoT device URL 53 | iot_service = IoTIntegrationService(device_url) 54 | 55 | # Get device status 56 | status = iot_service.get_device_status() 57 | print("Device Status:", status) 58 | 59 | # Send a command to the device 60 | command_response = iot_service.send_command("turn_on") 61 | print("Command Response:", command_response) 62 | 63 | # Receive data from the device 64 | data = iot_service.receive_data() 65 | print("Received Data:", data) 66 | -------------------------------------------------------------------------------- /src/analytics/privacy_analytics.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | from typing import List, Dict, Any, Optional 4 | from fastapi import FastAPI, HTTPException 5 | from pydantic import BaseModel 6 | import numpy as np 7 | 8 | app = FastAPI(title="PiConsensus Privacy-Preserving Analytics Engine", version="1.0") 9 | 10 | # === Data Models === 11 | 12 | class AnalyticsEvent(BaseModel): 13 | event_type: str 14 | value: float 15 | timestamp: float = time.time() 16 | group: Optional[str] = None # e.g., region, client type 17 | 18 | class InsightQuery(BaseModel): 19 | event_type: str 20 | start_time: Optional[float] = None 21 | end_time: Optional[float] = None 22 | group: Optional[str] = None 23 | noisy: Optional[bool] = False # If True, adds random noise for privacy 24 | 25 | # === In-Memory Store (replace with secure DB in production) === 26 | 27 | analytics_events: List[AnalyticsEvent] = [] 28 | 29 | # === Privacy-Aware Aggregation === 30 | 31 | def aggregate_events(event_type: str, start_time=None, end_time=None, group=None) -> List[float]: 32 | """Filter and aggregate event values.""" 33 | filtered = [ 34 | e.value 35 | for e in analytics_events 36 | if e.event_type == event_type 37 | and (start_time is None or e.timestamp >= start_time) 38 | and (end_time is None or e.timestamp <= end_time) 39 | and (group is None or e.group == group) 40 | ] 41 | return filtered 42 | 43 | def add_noise(val: float, scale: float = 1.0) -> float: 44 | """Simple Laplacian noise mechanism for DP-like effect.""" 45 | return valightQuery): 46 | data = aggregate_events(query.event_type, query.start_time, query.end_time, query.group) 47 | count = len(data) 48 | if count == 0: 49 | return {"msg": "No data for query"} 50 | avg = float(np.mean(data)) 51 | min_v = float(np.min(data)) 52 | max_v = float(np.max(data)) 53 | sum_v = float(np.sum(data)) 54 | if query.noisy: 55 | # Add noise to aggregated values for privacy 56 | avg = add_noise(avg, scale=1.0) 57 | min_v = add_noise(min_v, scale=1.0) 58 | max_v = add_noise(max_v, scale=1.0) 59 | sum_v = add_noise(sum_v, scale=2.0) 60 | return { 61 | "event_type": query.event_type, 62 | "group": query.group, 63 | "count": count, 64 | "avg": avg, 65 | "min": min_v, 66 | "max": max_v, 67 | "sum": sum_v, 68 | "noisy": query.noisy, 69 | } 70 | 71 | @app.get("/analytics/events") 72 | def list_events(limit: int = 50): 73 | return [e.dict() for e in analytics_events[-limit:]] 74 | 75 | # === Example Run: uvicorn src.analytics.privacy_analytics:app --reload === 76 | 77 | if __name__ == "__main__": 78 | import uvicorn 79 | uvicorn.run("analytics.privacy_analytics:app", host="0.0.0.0", port=8011, reload=True) 80 | -------------------------------------------------------------------------------- /incident_response/incident_response_plan.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import datetime 4 | from flask import Flask, request, jsonify 5 | from flask_cors import CORS 6 | from incident_response.incident import Incident 7 | from incident_response.team import Team 8 | from incident_response.playbook import Playbook 9 | 10 | app = Flask(__name__) 11 | CORS(app) 12 | 13 | # Load incident response plan from JSON file 14 | with open('incident_response_plan.json') as f: 15 | incident_response_plan = json.load(f) 16 | 17 | # Initialize incident response team 18 | team = Team(incident_response_plan['team']) 19 | 20 | # Initialize playbooks 21 | playbooks = {} 22 | for playbook_name, playbook_config in incident_response_plan['playbooks'].items(): 23 | playbooks[playbook_name] = Playbook(playbook_config) 24 | 25 | @app.route('/incident', methods=['POST']) 26 | def create_incident(): 27 | data = request.get_json() 28 | incident = Incident(data['title'], data['description'], data['severity']) 29 | team.assign_to_incident(incident) 30 | return jsonify({'incident_id': incident.id}) 31 | 32 | @app.route('/incident/', methods=['GET']) 33 | def get_incident(incident_id): 34 | incident = team.get_incident(incident_id) 35 | if incident: 36 | return jsonify(incident.to_dict()) 37 | else: 38 | return jsonify({'error': 'Incident not found'}), 404 39 | 40 | @app.route('/incident//playbook', methods=['POST']) 41 | def run_playbook(incident_id): 42 | data = request.get_json() 43 | playbook_name = data['playbook_name'] 44 | playbook = playbooks[playbook_name] 45 | playbook.run(incident_id) 46 | return jsonify({'message': 'Playbook executed successfully'}) 47 | 48 | @app.route('/incident//status', methods=['GET']) 49 | def get_incident_status(incident_id): 50 | incident = team.get_incident(incident_id) 51 | if incident: 52 | return jsonify({'status': incident.status}) 53 | else: 54 | return jsonify({'error': 'Incident not found'}), 404 55 | 56 | @app.route('/incident//update', methods=['PATCH']) 57 | def update_incident(incident_id): 58 | data = request.get_json() 59 | incident = team.get_incident(incident_id) 60 | if incident: 61 | incident.update(data) 62 | return jsonify({'message': 'Incident updated successfully'}) 63 | else: 64 | return jsonify({'error': 'Incident not found'}), 404 65 | 66 | @app.route('/incident//close', methods=['POST']) 67 | def close_incident(incident_id): 68 | incident = team.get_incident(incident_id) 69 | if incident: 70 | incident.close() 71 | return jsonify({'message': 'Incident closed successfully'}) 72 | else: 73 | return jsonify({'error': 'Incident not found'}), 404 74 | 75 | if __name__ == '__main__': 76 | app.run(host='0.0.0.0', port=5002, debug=True) 77 | -------------------------------------------------------------------------------- /src/reputation/reputation_engine.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import List, Dict, Any, Optional 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | 6 | app = FastAPI(title="PiConsensus Decentralized Reputation & Trust Engine", version="1.0") 7 | 8 | # === Data Models === 9 | 10 | class TrustEvent(BaseModel): 11 | id: int 12 | subject: str # The address, agent, or entity whose reputation is affected 13 | event_type: str # e.g. "vote", "validate", "slash", "reward", "report" 14 | delta: float # How much to increase/decrease trust 15 | reason: Optional[str] = None 16 | timestamp: float = time.time() 17 | actor: Optional[str] = None # Who caused this event (optional) 18 | 19 | class ReputationScore(BaseModel): 20 | subject: str 21 | score: float 22 | last_updated: float = time.time() 23 | 24 | # === In-memory Store (replace with DB in production) === 25 | 26 | reputation_scores: Dict[str, ReputationScore] = {} 27 | trust_events: List[TrustEvent] = [] 28 | event_counter = 1 29 | 30 | # === API Endpoints === 31 | 32 | @app.post("/reputation/event") 33 | def add_trust_event(subject: str, event_type: str, delta: float, reason: Optional[str] = None, actor: Optional[str] = None): 34 | global event_counter 35 | # Update or create reputation score 36 | rep = reputation_scores.get(subject) 37 | if rep is None: 38 | rep = ReputationScore(subject=subject, score=0.0) 39 | reputation_scores[subject] = rep 40 | rep.score += delta 41 | rep.last_updated = time.time() 42 | # Record trust event 43 | event = TrustEvent( 44 | id=event_counter, 45 | subject=subject, 46 | event_type=event_type, 47 | delta=delta, 48 | reason=reason, 49 | timestamp=rep.last_updated, 50 | actor=actor 51 | ) 52 | trust_events.append(event) 53 | event_counter += 1 54 | return {"status": "updated", "subject": subject, "score": rep.score, "event_id": event.id} 55 | 56 | @app.get("/reputation/score/{subject}") 57 | def get_reputation_score(subject: str): 58 | rep = reputation_scores.get(subject) 59 | if not rep: 60 | raise HTTPException(404, "Subject not found") 61 | return rep.dict() 62 | 63 | @app.get("/reputation/history/{subject}") 64 | def get_trust_history(subject: str, limit: int = 20): 65 | events = [e.dict() for e in trust_events if e.subject == subject] 66 | return events[-limit:] 67 | 68 | @app.get("/reputation/leaderboard") 69 | def get_leaderboard(top: int = 20): 70 | ranked = sorted(reputation_scores.values(), key=lambda r: r.score, reverse=True) 71 | return [r.dict() for r in ranked[:top]] 72 | 73 | # === Example Run: uvicorn src.reputation.reputation_engine:app --reload === 74 | 75 | if __name__ == "__main__": 76 | import uvicorn 77 | uvicorn.run("reputation.reputation_engine:app", host="0.0.0.0", port=8014, reload=True) 78 | -------------------------------------------------------------------------------- /src/storage/swarm_layer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import hashlib 4 | from typing import Optional 5 | 6 | # === Swarm Node Configuration === 7 | 8 | SWARM_API_URL = os.environ.get("SWARM_API_URL", "http://localhost:1633") # Default local Bee node 9 | 10 | # === File Upload === 11 | 12 | def upload_file_to_swarm(filepath: str) -> Optional[str]: 13 | """ 14 | Upload a file to Swarm. Returns the Swarm content hash (bzz address). 15 | """ 16 | if not os.path.exists(filepath): 17 | raise FileNotFoundError(f"File not found: {filepath}") 18 | with open(filepath, "rb") as f: 19 | files = {'file': f} 20 | resp = requests.post(f"{SWARM_API_URL}/bzz", files=files) 21 | if resp.status_code == 201: 22 | hash_ref = resp.json()["reference"] 23 | print(f"Uploaded {filepath} to Swarm: {hash_ref}") 24 | return hash_ref 25 | else: 26 | print(f"Swarm upload failed: {resp.text}") 27 | return None 28 | 29 | # === File Download === 30 | 31 | def download_file_from_swarm(hash_ref: str, dest_path: str) -> bool: 32 | """ 33 | Download a file from Swarm using its hash. Save to dest_path. 34 | """ 35 | resp = requests.get(f"{SWARM_API_URL}/bzz/{hash_ref}/", stream=True) 36 | if resp.status_code == 200: 37 | with open(dest_path, "wb") as f: 38 | for chunk in resp.iter_content(chunk_size=8192): 39 | f.write(chunk) 40 | print(f"Downloaded file {dest_path} from Swarm: {hash_ref}") 41 | return True 42 | else: 43 | print(f"Swarm download failed: {resp.text}") 44 | return False 45 | 46 | # === Integrity Validation === 47 | 48 | def validate_file_hash(filepath: str, expected_hash: str) -> bool: 49 | """ 50 | Validate a file's SHA256 hash against the expected Swarm reference. 51 | Note: Swarm uses its own hash tree for chunking, but for off-chain check, use SHA256. 52 | """ 53 | sha256 = hashlib.sha256() 54 | with open(filepath, "rb") as f: 55 | while chunk := f.read(8192): 56 | sha256.update(chunk) 57 | actual_hash = sha256.hexdigest() 58 | print(f"File hash: {actual_hash}") 59 | # This is for demonstration; full Swarm hash validation requires chunk tree parsing. 60 | return actual_hash.startswith(expected_hash[:16]) # Partial match for demo 61 | 62 | # === Example Usage === 63 | 64 | if __name__ == "__main__": 65 | print("PiConsensus Swarm Storage & Decentralized File Layer Demo") 66 | # Upload example 67 | test_file = "example.txt" 68 | with open(test_file, "w") as f: 69 | f.write("Hello, Swarm decentralized world!") 70 | hash_ref = upload_file_to_swarm(test_file) 71 | # Download example 72 | if hash_ref: 73 | download_file_from_swarm(hash_ref, "downloaded_example.txt") 74 | # Validate 75 | validate_file_hash("downloaded_example.txt", hash_ref) 76 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

PiConsensus by KOSASIH is licensed under Creative Commons Attribution 4.0 International

2 | 3 | # Wellcome.. 4 | 5 | ## 😄😄😄 6 | 7 | ![Jokes Card](https://readme-jokes.vercel.app/api) 8 | 9 | # PiConsensus 10 | A Decentralized, AI-Powered, Quantum-Resistant Stable Coin Ecosystem 11 | 12 | PiConsensus - A Decentralized, AI-Powered, Quantum-Resistant Stable Coin Ecosystem 13 | =============================================================================== 14 | 15 | Overview 16 | -------- 17 | 18 | PiConsensus is a groundbreaking project that leverages the power of artificial intelligence, blockchain technology, and quantum-resistant cryptography to create a stable coin with a global consensus value of pi coin $314,159. This project aims to establish a decentralized, transparent, and secure ecosystem that fosters trust, stability, and widespread adoption. 19 | 20 | Features 21 | -------- 22 | 23 | * Decentralized architecture using blockchain technology 24 | * AI-powered stable coin with a global consensus value of pi coin $314,159 25 | * Quantum-resistant cryptography for secure transactions 26 | * Transparent and tamper-proof ledger 27 | * Decentralized governance and decision-making process 28 | 29 | Getting Started 30 | --------------- 31 | 32 | ### Prerequisites 33 | 34 | * Docker installed on your system 35 | * Docker Compose installed on your system 36 | 37 | ### Running the Application 38 | 39 | 1. Clone the repository: `git clone https://github.com/KOSASIH/pi-consensus.git` 40 | 2. Change into the project directory: `cd pi-consensus` 41 | 3. Build the Docker image: `docker-compose build` 42 | 4. Start the application: `docker-compose up` 43 | 5. Access the application at `http://localhost:8000` 44 | 45 | ### Requirements 46 | 47 | * Python 3.9 48 | * Postgres 13 49 | * Docker 20.10.7 50 | * Docker Compose 1.29.2 51 | 52 | License 53 | ------- 54 | 55 | PiConsensus is licensed under the Apache License 2.0. 56 | 57 | Contributing 58 | ------------ 59 | 60 | Contributions are welcome! Please fork the repository and submit a pull request. 61 | 62 | Contact 63 | ------- 64 | 65 | For more information, please contact us at [info@pi-consensus.io](mailto:info@pi-consensus.io). 66 | -------------------------------------------------------------------------------- /src/defense/ai_intrusion_detection.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import List, Dict, Any 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | from sklearn.ensemble import IsolationForest 6 | import numpy as np 7 | 8 | app = FastAPI(title="PiConsensus AI Intrusion Detection", version="1.0") 9 | 10 | # === Data Models === 11 | 12 | class NetworkEvent(BaseModel): 13 | src_ip: str 14 | dst_ip: str 15 | src_port: int 16 | dst_port: int 17 | protocol: str 18 | packet_size: int 19 | timestamp: float = time.time() 20 | 21 | class Alert(BaseModel): 22 | event: Dict[str, Any] 23 | score: float 24 | reason: str 25 | timestamp: float 26 | 27 | # === In-Memory Event and Alert Store === 28 | 29 | network_events: List[NetworkEvent] = [] 30 | alerts: List[Alert] = [] 31 | 32 | # === AI/ML IDS Engine === 33 | 34 | class IntrusionDetector: 35 | def __init__(self): 36 | self.model = IsolationForest(n_estimators=100, contamination=0.05) 37 | self.trained = False 38 | 39 | def fit(self, events: List[NetworkEvent]): 40 | if len(events) < 10: 41 | return 42 | X = np.array([[e.src_port, e.dst_port, e.packet_size] for e in events]) 43 | self.model.fit(X) 44 | self.trained = True 45 | 46 | def detect(self, event: NetworkEvent) -> float: 47 | if not self.trained: 48 | return 0.0 49 | X = np.array([[event.src_port, event.dst_port, event.packet_size]]) 50 | score = self.model.decision_function(X)[0] 51 | return score 52 | 53 | ids_engine = IntrusionDetector() 54 | 55 | # === API Endpoints === 56 | 57 | @app.post("/event") 58 | def post_event(ev: NetworkEvent): 59 | network_events.append(ev) 60 | # Retrain model periodically (every 50 events) 61 | if len(network_events) % 50 == 0: 62 | ids_engine.fit(network_events[-500:]) # Use last 500 events for sliding window 63 | # Detect anomaly 64 | if ids_engine.trained: 65 | score = ids_engine.detect(ev) 66 | if score < -0.2: 67 | alert = Alert(event=ev.dict(), score=score, reason="Anomaly detected", timestamp=time.time()) 68 | alerts.append(alert) 69 | return {"alert": True, "score": score, "msg": "Potential intrusion detected"} 70 | return {"alert": False} 71 | 72 | @app.get("/alerts") 73 | def get_alerts(limit: int = 20): 74 | return [a.dict() for a in alerts[-limit:]] 75 | 76 | @app.get("/events") 77 | def get_events(limit: int = 50): 78 | return [e.dict() for e in network_events[-limit:]] 79 | 80 | @app.get("/status") 81 | def get_status(): 82 | return { 83 | "total_events": len(network_events), 84 | "alerts": len(alerts), 85 | "model_trained": ids_engine.trained 86 | } 87 | 88 | # === Example Run: uvicorn src.defense.ai_intrusion_detection:app --reload === 89 | 90 | if __name__ == "__main__": 91 | import uvicorn 92 | uvicorn.run("defense.ai_intrusion_detection:app", host="0.0.0.0", port=8004, reload=True) 93 | -------------------------------------------------------------------------------- /src/oracles/reputation_oracle.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import Dict, Any, List 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | import numpy as np 6 | 7 | app = FastAPI(title="PiConsensus Decentralized Reputation Oracle", version="1.0") 8 | 9 | # === Data Model === 10 | 11 | class ReputationFeedback(BaseModel): 12 | subject: str # address/agent/contract being rated 13 | rater: str # address/agent submitting feedback 14 | score: float # -1.0 (very bad) to 1.0 (excellent) 15 | timestamp: float = time.time() 16 | comment: str = "" 17 | 18 | class ReputationScore(BaseModel): 19 | subject: str 20 | score: float 21 | n_feedback: int 22 | last_update: float 23 | 24 | # === In-memory Store (replace with DB/distributed store in production) === 25 | 26 | feedbacks: List[ReputationFeedback] = [] 27 | scores: Dict[str, ReputationScore] = {} 28 | 29 | # === Aggregation Algorithm (pluggable) === 30 | 31 | def compute_score(subject: str) -> ReputationScore: 32 | """Weighted average with recency bias.""" 33 | relevant = [f for f in feedbacks if f.subject == subject] 34 | if not relevant: 35 | return ReputationScore(subject=subject, score=0.0, n_feedback=0, last_update=time.time()) 36 | scores_arr = np.array([f.score for f in relevant]) 37 | times = np.array([f.timestamp for f in relevant]) 38 | # Recency weighting: more recent feedback is weighted higher 39 | weights = np.exp((times - times.min()) / (60*60*24*7)) # 1 week scale 40 | weighted_score = float(np.average(scores_arr, weights=weights)) 41 | return ReputationScore( 42 | subject=subject, 43 | score=weighted_score, 44 | n_feedback=len(relevant), 45 | last_update=times.max() 46 | ) 47 | 48 | def update_score(subject: str): 49 | scores[subject] = compute_score(subject) 50 | 51 | # === REST API === 52 | 53 | @app.post("/feedback") 54 | def submit_feedback(fb: ReputationFeedback): 55 | if abs(fb.score) > 1.0: 56 | raise HTTPException(400, "Score must be between -1.0 and 1.0") 57 | feedbacks.append(fb) 58 | update_score(fb.subject) 59 | return {"msg": "Feedback submitted", "subject": fb.subject} 60 | 61 | @app.get("/score/{subject}") 62 | def get_score(subject: str): 63 | if subject not in scores: 64 | update_score(subject) 65 | return scores.get(subject).dict() 66 | 67 | @app.get("/feedback/{subject}") 68 | def get_feedback(subject: str): 69 | relevant = [f.dict() for f in feedbacks if f.subject == subject] 70 | return {"subject": subject, "feedback": relevant} 71 | 72 | @app.get("/top/{n}") 73 | def top_subjects(n: int = 10): 74 | sorted_scores = sorted(scores.values(), key=lambda s: s.score, reverse=True) 75 | return [s.dict() for s in sorted_scores[:n]] 76 | 77 | # === Example Run: uvicorn src.oracles.reputation_oracle:app --reload === 78 | 79 | if __name__ == "__main__": 80 | import uvicorn 81 | uvicorn.run("oracles.reputation_oracle:app", host="0.0.0.0", port=8003, reload=True) 82 | -------------------------------------------------------------------------------- /backend/app/utils/sustainability.py: -------------------------------------------------------------------------------- 1 | class SustainabilityUtils: 2 | @staticmethod 3 | def calculate_carbon_footprint(transportation_miles, energy_consumption_kwh, waste_generated_kg): 4 | """ 5 | Calculate the carbon footprint based on transportation, energy consumption, and waste generation. 6 | 7 | Parameters: 8 | - transportation_miles (float): The number of miles traveled by car. 9 | - energy_consumption_kwh (float): The amount of energy consumed in kilowatt-hours. 10 | - waste_generated_kg (float): The amount of waste generated in kilograms. 11 | 12 | Returns: 13 | - float: The estimated carbon footprint in kilograms of CO2 equivalent. 14 | """ 15 | # Constants for carbon footprint calculations 16 | CO2_PER_MILE = 0.404 # kg CO2 per mile for an average car 17 | CO2_PER_KWH = 0.92 # kg CO2 per kWh for average electricity generation 18 | CO2_PER_KG_WASTE = 0.5 # kg CO2 per kg of waste 19 | 20 | transportation_footprint = transportation_miles * CO2_PER_MILE 21 | energy_footprint = energy_consumption_kwh * CO2_PER_KWH 22 | waste_footprint = waste_generated_kg * CO2_PER_KG_WASTE 23 | 24 | total_footprint = transportation_footprint + energy_footprint + waste_footprint 25 | return total_footprint 26 | 27 | @staticmethod 28 | def assess_sustainability_practices(practices): 29 | """ 30 | Assess sustainability practices based on a list of practices. 31 | 32 | Parameters: 33 | - practices (list): A list of sustainability practices (e.g., recycling, using public transport). 34 | 35 | Returns: 36 | - dict: A summary of the sustainability assessment. 37 | """ 38 | assessment = { 39 | "recycling": "Not Implemented", 40 | "public_transport": "Not Implemented", 41 | "energy_efficiency": "Not Implemented", 42 | "sustainable_food": "Not Implemented", 43 | } 44 | 45 | if "recycling" in practices: 46 | assessment["recycling"] = "Implemented" 47 | if "public_transport" in practices: 48 | assessment["public_transport"] = "Implemented" 49 | if "energy_efficiency" in practices: 50 | assessment["energy_efficiency"] = "Implemented" 51 | if "sustainable_food" in practices: 52 | assessment["sustainable_food"] = "Implemented" 53 | 54 | return assessment 55 | 56 | # Example usage 57 | if __name__ == "__main__": 58 | # Calculate carbon footprint 59 | footprint = SustainabilityUtils.calculate_carbon_footprint( 60 | transportation_miles=100, 61 | energy_consumption_kwh=300, 62 | waste_generated_kg=50 63 | ) 64 | print(f"Estimated Carbon Footprint: {footprint:.2f} kg CO2e") 65 | 66 | # Assess sustainability practices 67 | practices = ["recycling", "public_transport"] 68 | assessment = SustainabilityUtils.assess_sustainability_practices(practices) 69 | print("Sustainability Assessment:", assessment) 70 | -------------------------------------------------------------------------------- /src/privacy/mixer.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | import hashlib 4 | from Crypto.PublicKey import ECC 5 | from Crypto.Random import get_random_bytes 6 | from typing import List, Dict, Any 7 | 8 | # === Stealth Address Generation === 9 | 10 | def generate_stealth_address(pubkey: str) -> str: 11 | """ 12 | Generate a unique, one-time stealth address from a public key (ECC-based). 13 | """ 14 | entropy = get_random_bytes(16) 15 | combined = hashlib.sha256(pubkey.encode() + entropy).hexdigest() 16 | return "pi1" + combined[:38] 17 | 18 | # === Mixing Pool === 19 | 20 | class MixingPool: 21 | def __init__(self, pool_id: str, denomination: int): 22 | self.pool_id = pool_id 23 | self.denomination = denomination 24 | self.entries = [] # List of (stealth_address, timestamp) 25 | self.completed_mixes = [] 26 | 27 | def join(self, pubkey: str) -> str: 28 | stealth_addr = generate_stealth_address(pubkey) 29 | self.entries.append({ 30 | "stealth_address": stealth_addr, 31 | "timestamp": time.time(), 32 | "pubkey": pubkey # Not stored on-chain; for demo only 33 | }) 34 | print(f"[Mixer] Joined pool {self.pool_id} with stealth address {stealth_addr}") 35 | return stealth_addr 36 | 37 | def shuffle(self): 38 | """Randomly shuffles the entries to break linkability.""" 39 | random.shuffle(self.entries) 40 | print(f"[Mixer] Pool {self.pool_id} entries shuffled.") 41 | 42 | def mix(self): 43 | self.shuffle() 44 | # Simulate sending funds to stealth addresses with timing obfuscation 45 | for entry in self.entries: 46 | delay = random.uniform(1, 5) # 1-5 seconds delay for timing layer 47 | print(f"[Mixer] Sending {self.denomination} Pi to {entry['stealth_address']} after {delay:.2f}s delay.") 48 | time.sleep(delay) 49 | self.completed_mixes.append(entry) 50 | self.entries = [] # Clear pool 51 | 52 | # === Multi-Layer Mixer (Chaining Pools) === 53 | 54 | class LayeredMixer: 55 | def __init__(self, denominations: List[int]): 56 | self.pools = [MixingPool(f"pool-{d}", d) for d in denominations] 57 | 58 | def anonymize(self, pubkey: str, amount: int) -> List[str]: 59 | """ 60 | Multi-layer mixing: passes coins through pools of increasing privacy. 61 | """ 62 | routes = [] 63 | for pool in self.pools: 64 | if amount >= pool.denomination: 65 | stealth_addr = pool.join(pubkey) 66 | pool.mix() 67 | routes.append(stealth_addr) 68 | amount -= pool.denomination 69 | return routes 70 | 71 | # === Example Usage === 72 | 73 | if __name__ == "__main__": 74 | print("PiConsensus Layered Privacy Mixer Demo") 75 | # Demo for a user mixing 1000 Pi through three privacy pools 76 | user_pubkey = "04bfcab3...userpublickey" # Example public key 77 | mixer = LayeredMixer([100, 300, 600]) 78 | output_addresses = mixer.anonymize(user_pubkey, 1000) 79 | print("Mixed coins sent to stealth addresses:", output_addresses) 80 | -------------------------------------------------------------------------------- /frontend/src/components/Wallet.js: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react'; 2 | import { ethers } from 'ethers'; 3 | import './Wallet.css'; // Assuming you have some CSS for styling 4 | 5 | const Wallet = () => { 6 | const [walletAddress, setWalletAddress] = useState(''); 7 | const [balance, setBalance] = useState('0'); 8 | const [amount, setAmount] = useState(''); 9 | const [recipient, setRecipient] = useState(''); 10 | const [provider, setProvider] = useState(null); 11 | 12 | useEffect(() => { 13 | const initWallet = async () => { 14 | if (window.ethereum) { 15 | const provider = new ethers.providers.Web3Provider(window.ethereum); 16 | setProvider(provider); 17 | const accounts = await provider.send("eth_requestAccounts", []); 18 | setWalletAddress(accounts[0]); 19 | fetchBalance(accounts[0], provider); 20 | } else { 21 | alert('Please install MetaMask!'); 22 | } 23 | }; 24 | initWallet(); 25 | }, []); 26 | 27 | const fetchBalance = async (address, provider) => { 28 | const balance = await provider.getBalance(address); 29 | setBalance(ethers.utils.formatEther(balance)); 30 | }; 31 | 32 | const handleSend = async (e) => { 33 | e.preventDefault(); 34 | if (!provider) return; 35 | 36 | const signer = provider.getSigner(); 37 | const tx = { 38 | to: recipient, 39 | value: ethers.utils.parseEther(amount), 40 | }; 41 | 42 | try { 43 | const transaction = await signer.sendTransaction(tx); 44 | await transaction.wait(); 45 | alert('Transaction successful!'); 46 | fetchBalance(walletAddress, provider); // Refresh balance 47 | } catch (error) { 48 | console.error('Transaction failed:', error); 49 | alert('Transaction failed. Please check the console for details.'); 50 | } 51 | }; 52 | 53 | return ( 54 |
55 |

Wallet

56 |
57 |

Address: {walletAddress}

58 |

Balance: {balance} ETH

59 |
60 |
61 |

Send Cryptocurrency

62 | setRecipient(e.target.value)} 67 | required 68 | /> 69 | setAmount(e.target.value)} 74 | required 75 | /> 76 | 77 |
78 |
79 | ); 80 | }; 81 | 82 | export default Wallet; 83 | -------------------------------------------------------------------------------- /src/bridge/cross_chain_bridge.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import List, Dict, Any, Optional 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | 6 | app = FastAPI(title="PiConsensus Cross-Chain Asset Bridge & Swap Engine", version="1.0") 7 | 8 | # === Data Models === 9 | 10 | class BridgeRequest(BaseModel): 11 | id: int 12 | from_chain: str 13 | to_chain: str 14 | from_address: str 15 | to_address: str 16 | asset: str 17 | amount: float 18 | status: str = "pending" # pending, locked, relayed, completed, failed 19 | created_at: float = time.time() 20 | tx_hash_from: Optional[str] = None 21 | tx_hash_to: Optional[str] = None 22 | 23 | class LockAssetRequest(BaseModel): 24 | from_chain: str 25 | from_address: str 26 | asset: str 27 | amount: float 28 | 29 | class RelayRequest(BaseModel): 30 | bridge_id: int 31 | to_chain: str 32 | to_address: str 33 | 34 | # === In-Memory Store (replace with DB/real relayers in production) === 35 | 36 | bridge_requests: Dict[int, BridgeRequest] = {} 37 | bridge_counter = 1 38 | 39 | # === API Endpoints === 40 | 41 | @app.post("/bridge/initiate") 42 | def initiate_bridge(req: LockAssetRequest, to_chain: str): 43 | global bridge_counter 44 | # Simulate asset lock on from_chain 45 | bridge = BridgeRequest( 46 | id=bridge_counter, 47 | from_chain=req.from_chain, 48 | to_chain=to_chain, 49 | from_address=req.from_address, 50 | to_address=to_address, 51 | asset=req.asset, 52 | amount=req.amount, 53 | status="locked", 54 | created_at=time.time(), 55 | tx_hash_from=f"0xLOCK{bridge_counter:06d}" 56 | ) 57 | bridge_requests[bridge_counter] = bridge 58 | bridge_counter += 1 59 | return {"status": "initiated", "bridge_id": bridge.id, "tx_hash_from": bridge.tx_hash_from} 60 | 61 | @app.get("/bridge/status/{bridge_id}") 62 | def bridge_status(bridge_id: int): 63 | bridge = bridge_requests.get(bridge_id) 64 | if not bridge: 65 | raise HTTPException(404, "Bridge request not found") 66 | return bridge.dict() 67 | 68 | @app.post("/bridge/relay") 69 | def relay_bridge(relay: RelayRequest): 70 | bridge = bridge_requests.get(relay.bridge_id) 71 | if not bridge: 72 | raise HTTPException(404, "Bridge request not found") 73 | if bridge.status not in ["locked", "pending"]: 74 | raise HTTPException(400, "Cannot relay: status is not locked or pending") 75 | # Simulate relay/execution on target chain 76 | bridge.status = "completed" 77 | bridge.tx_hash_to = f"0xRELAY{relay.bridge_id:06d}" 78 | return {"status": "relayed", "bridge_id": bridge.id, "tx_hash_to": bridge.tx_hash_to} 79 | 80 | @app.get("/bridge/requests") 81 | def list_bridges(status: Optional[str] = None): 82 | result = [b.dict() for b in bridge_requests.values() if (status is None or b.status == status)] 83 | return result 84 | 85 | # === Example Run: uvicorn src.bridge.cross_chain_bridge:app --reload === 86 | 87 | if __name__ == "__main__": 88 | import uvicorn 89 | uvicorn.run("bridge.cross_chain_bridge:app", host="0.0.0.0", port=8009, reload=True) 90 | -------------------------------------------------------------------------------- /src/incident_response/autoresponder.py: -------------------------------------------------------------------------------- 1 | import time 2 | import threading 3 | import requests 4 | from typing import List, Dict, Any 5 | 6 | # === Configuration === 7 | 8 | ADMIN_ALERT_WEBHOOK = "https://hooks.slack.com/services/YOUR/WEBHOOK/URL" # Slack/Discord/Webhook for alerts 9 | PICOINS_NODE_URL = "http://localhost:8545" # Replace with actual node address 10 | 11 | # Example: Smart contract "kill switch" endpoint (simulated API) 12 | KILL_SWITCH_API = f"{PICOINS_NODE_URL}/contract/kill_switch" 13 | 14 | # === Incident Response Actions === 15 | 16 | def alert_admin(incident: Dict[str, Any]): 17 | """Send an alert to the admin/security team via webhook.""" 18 | message = f"[ALERT] 🚨 Incident detected!\nType: {incident['type']}\nDetails: {incident['details']}" 19 | print(message) 20 | try: 21 | requests.post(ADMIN_ALERT_WEBHOOK, json={"text": message}, timeout=5) 22 | except Exception as e: 23 | print("Failed to send alert:", str(e)) 24 | 25 | def freeze_contract(contract_id: str): 26 | """Trigger contract freeze/kill switch.""" 27 | print(f"[ACTION] Attempting to freeze contract: {contract_id}") 28 | try: 29 | resp = requests.post(KILL_SWITCH_API, json={"contract_id": contract_id}, timeout=10) 30 | print(f"Kill switch response: {resp.text}") 31 | except Exception as e: 32 | print("Failed to freeze contract:", str(e)) 33 | 34 | def rollback_transaction(tx_hash: str): 35 | """Request a transaction rollback (if supported).""" 36 | print(f"[ACTION] Requesting rollback for transaction: {tx_hash}") 37 | # Implementation depends on blockchain support for rollbacks 38 | # Placeholder for demo 39 | 40 | # === Integrate with Anomaly Detector (from module 4) === 41 | 42 | def incident_response_callback(anomalies: List[Dict[str, Any]]): 43 | for anomaly in anomalies: 44 | incident = { 45 | "type": "anomaly", 46 | "details": anomaly 47 | } 48 | alert_admin(incident) 49 | # Automatic defensive actions based on anomaly type 50 | if "contract_id" in anomaly: 51 | freeze_contract(anomaly["contract_id"]) 52 | elif "tx_hash" in anomaly: 53 | rollback_transaction(anomaly["tx_hash"]) 54 | 55 | # === Continuous Monitoring === 56 | 57 | def start_incident_response_monitor(anomaly_detector_stream): 58 | """Start responding to incidents as they are detected.""" 59 | print("[AutoResponder] Starting autonomous incident response monitoring...") 60 | anomaly_detector_stream(callback=incident_response_callback) 61 | 62 | # === Example Usage === 63 | 64 | if __name__ == "__main__": 65 | print("PiConsensus Autonomous Incident Response Bot") 66 | # For demo, assume anomaly_detector.stream_monitor is available from module 4 67 | try: 68 | from analytics.anomaly_detector import AnomalyDetector, alert_admin as anomaly_alert_admin 69 | detector = AnomalyDetector() 70 | detector.stream_monitor(callback=incident_response_callback) 71 | while True: 72 | time.sleep(60) 73 | except ImportError: 74 | print("Anomaly detector module not found. Please ensure module 4 is available.") 75 | -------------------------------------------------------------------------------- /backend/app/services/security_service.py: -------------------------------------------------------------------------------- 1 | from flask import current_app 2 | from flask_jwt_extended import JWTManager, create_access_token, jwt_required, get_jwt_identity 3 | from werkzeug.security import generate_password_hash, check_password_hash 4 | from datetime import timedelta 5 | from .models import User # Assuming you have a User model defined in your models 6 | 7 | class SecurityService: 8 | def __init__(self, app): 9 | self.app = app 10 | self.jwt = JWTManager(app) 11 | 12 | def create_user(self, username, password): 13 | """Create a new user with a hashed password.""" 14 | hashed_password = generate_password_hash(password) 15 | new_user = User(username=username, password=hashed_password) 16 | # Save the new user to the database 17 | new_user.save() # Assuming you have a save method in your User model 18 | return new_user 19 | 20 | def authenticate_user(self, username, password): 21 | """Authenticate a user and return an access token if successful.""" 22 | user = User.query.filter_by(username=username).first() 23 | if user and check_password_hash(user.password, password): 24 | access_token = create_access_token(identity=user.id, expires_delta=timedelta(days=1)) 25 | return access_token 26 | return None 27 | 28 | @jwt_required() 29 | def get_current_user(self): 30 | """Get the current authenticated user.""" 31 | user_id = get_jwt_identity() 32 | user = User.query.get(user_id) 33 | return user 34 | 35 | @jwt_required() 36 | def logout_user(self): 37 | """Logout the user (invalidate the token).""" 38 | # In a real application, you might want to implement a token blacklist 39 | return {"msg": "User logged out successfully"}, 200 40 | 41 | # Example usage 42 | if __name__ == "__main__": 43 | from flask import Flask 44 | 45 | app = Flask(__name__) 46 | app.config['JWT_SECRET_KEY'] = 'your_jwt_secret_key' # Change this to a random secret key 47 | security_service = SecurityService(app) 48 | 49 | @app.route('/register', methods=['POST']) 50 | def register(): 51 | # Example registration endpoint 52 | username = "example_user" # Replace with actual data from request 53 | password = "example_password" # Replace with actual data from request 54 | user = security_service.create_user(username, password) 55 | return {"msg": "User created", "user_id": user.id}, 201 56 | 57 | @app.route('/login', methods=['POST']) 58 | def login(): 59 | # Example login endpoint 60 | username = "example_user" # Replace with actual data from request 61 | password = "example_password" # Replace with actual data from request 62 | token = security_service.authenticate_user(username, password) 63 | if token: 64 | return {"access_token": token}, 200 65 | return {"msg": "Bad username or password"}, 401 66 | 67 | @app.route('/current_user', methods=['GET']) 68 | @jwt_required() 69 | def current_user(): 70 | user = security_service.get_current_user() 71 | return {"username": user.username}, 200 72 | 73 | @app.route('/logout', methods=['POST']) 74 | @jwt_required() 75 | def logout(): 76 | return security_service.logout_user() 77 | 78 | app.run(debug=True) 79 | -------------------------------------------------------------------------------- /src/governance/proposal_marketplace.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import List, Dict, Any, Optional 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | 6 | app = FastAPI(title="PiConsensus Governance Proposal Marketplace", version="1.0") 7 | 8 | # === Data Models === 9 | 10 | class Proposal(BaseModel): 11 | id: int 12 | title: str 13 | description: str 14 | proposer: str 15 | status: str = "open" # open, closed, passed, rejected 16 | created_at: float = time.time() 17 | votes_for: int = 0 18 | votes_against: int = 0 19 | voters: List[str] = [] 20 | 21 | class Vote(BaseModel): 22 | proposal_id: int 23 | voter: str 24 | support: bool # True = For, False = Against 25 | 26 | # === In-memory Store (replace with DB in production) === 27 | 28 | proposals: Dict[int, Proposal] = {} 29 | proposal_counter = 1 30 | 31 | # === API Endpoints === 32 | 33 | @app.post("/governance/propose") 34 | def submit_proposal(title: str, description: str, proposer: str): 35 | global proposal_counter 36 | proposal = Proposal( 37 | id=proposal_counter, 38 | title=title, 39 | description=description, 40 | proposer=proposer 41 | ) 42 | proposals[proposal_counter] = proposal 43 | proposal_counter += 1 44 | return {"status": "submitted", "proposal_id": proposal.id} 45 | 46 | @app.get("/governance/proposals") 47 | def list_proposals(status: Optional[str] = None): 48 | result = [p for p in proposals.values() if (status is None or p.status == status)] 49 | return [p.dict() for p in result] 50 | 51 | @app.get("/governance/proposal/{proposal_id}") 52 | def get_proposal(proposal_id: int): 53 | proposal = proposals.get(proposal_id) 54 | if not proposal: 55 | raise HTTPException(404, "Proposal not found") 56 | return proposal.dict() 57 | 58 | @app.post("/governance/vote") 59 | def vote_on_proposal(vote: Vote): 60 | proposal = proposals.get(vote.proposal_id) 61 | if not proposal: 62 | raise HTTPException(404, "Proposal not found") 63 | if proposal.status != "open": 64 | raise HTTPException(400, "Voting is closed for this proposal") 65 | if vote.voter in proposal.voters: 66 | raise HTTPException(400, "Voter has already voted") 67 | if vote.support: 68 | proposal.votes_for += 1 69 | else: 70 | proposal.votes_against += 1 71 | proposal.voters.append(vote.voter) 72 | return {"status": "vote_recorded", "votes_for": proposal.votes_for, "votes_against": proposal.votes_against} 73 | 74 | @app.post("/governance/close/{proposal_id}") 75 | def close_proposal(proposal_id: int): 76 | proposal = proposals.get(proposal_id) 77 | if not proposal: 78 | raise HTTPException(404, "Proposal not found") 79 | if proposal.status != "open": 80 | raise HTTPException(400, "Proposal not open") 81 | proposal.status = "closed" 82 | # Simple majority rule for demo 83 | if proposal.votes_for > proposal.votes_against: 84 | proposal.status = "passed" 85 | else: 86 | proposal.status = "rejected" 87 | return {"status": proposal.status, "votes_for": proposal.votes_for, "votes_against": proposal.votes_against} 88 | 89 | # === Example Run: uvicorn src.governance.proposal_marketplace:app --reload === 90 | 91 | if __name__ == "__main__": 92 | import uvicorn 93 | uvicorn.run("governance.proposal_marketplace:app", host="0.0.0.0", port=8008, reload=True) 94 | -------------------------------------------------------------------------------- /contracts/stablecoin.sol: -------------------------------------------------------------------------------- 1 | pragma solidity ^0.8.0; 2 | 3 | import "./oracle.js"; 4 | import "./reserve.js"; 5 | 6 | contract Stablecoin { 7 | // Oracle contract address 8 | address public oracleAddress; 9 | 10 | // Reserve contract address 11 | address public reserveAddress; 12 | 13 | // Mapping of user balances 14 | mapping (address => uint256) public userBalances; 15 | 16 | // Reserve balance 17 | uint256 public reserveBalance; 18 | 19 | // Economic indicators (GDP and inflation rate) 20 | uint256 public gdp; 21 | uint256 public inflationRate; 22 | 23 | // Interest rate 24 | uint256 public interestRate; 25 | 26 | // Event emitted when the reserve balance is updated 27 | event ReserveBalanceUpdated(uint256 _newBalance); 28 | 29 | // Event emitted when the economic indicators are updated 30 | event EconomicIndicatorsUpdated(uint256 _gdp, uint256 _inflationRate); 31 | 32 | // Event emitted when the interest rate is updated 33 | event InterestRateUpdated(uint256 _newRate); 34 | 35 | // Constructor 36 | constructor(address _oracleAddress, address _reserveAddress) public { 37 | oracleAddress = _oracleAddress; 38 | reserveAddress = _reserveAddress; 39 | } 40 | 41 | // Function to update the reserve balance 42 | function updateReserveBalance(uint256 _newBalance) public { 43 | reserveBalance = _newBalance; 44 | emit ReserveBalanceUpdated(_newBalance); 45 | } 46 | 47 | // Function to update the economic indicators 48 | function updateEconomicIndicators(uint256 _gdp, uint256 _inflationRate) public { 49 | gdp = _gdp; 50 | inflationRate = _inflationRate; 51 | emit EconomicIndicatorsUpdated(_gdp, _inflationRate); 52 | } 53 | 54 | // Function to adjust the interest rate 55 | function adjustInterestRate() internal { 56 | // Calculate the new interest rate based on the reserve balance and economic indicators 57 | uint256 newInterestRate = calculateInterestRate(reserveBalance, gdp, inflationRate); 58 | interestRate = newInterestRate; 59 | emit InterestRateUpdated(newInterestRate); 60 | } 61 | 62 | // Function to calculate the interest rate 63 | function calculateInterestRate(uint256 _reserveBalance, uint256 _gdp, uint256 _inflationRate) internal pure returns (uint256) { 64 | // TO DO: implement the interest rate calculation logic 65 | // For now, return a dummy value 66 | return 5; 67 | } 68 | 69 | // Function to mint stablecoins 70 | function mint(uint256 _amount) public { 71 | // Check if the reserve balance is sufficient 72 | require(reserveBalance >= _amount, "Insufficient reserve balance"); 73 | 74 | // Update the user balance 75 | userBalances[msg.sender] += _amount; 76 | 77 | // Update the reserve balance 78 | reserveBalance -= _amount; 79 | 80 | // Adjust the interest rate 81 | adjustInterestRate(); 82 | } 83 | 84 | // Function to burn stablecoins 85 | function burn(uint256 _amount) public { 86 | // Check if the user has sufficient balance 87 | require(userBalances[msg.sender] >= _amount, "Insufficient balance"); 88 | 89 | // Update the user balance 90 | userBalances[msg.sender] -= _amount; 91 | 92 | // Update the reserve balance 93 | reserveBalance += _amount; 94 | 95 | // Adjust the interest rate 96 | adjustInterestRate(); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /contracts/oracle.sol: -------------------------------------------------------------------------------- 1 | pragma solidity ^0.8.0; 2 | 3 | import "https://github.com/smartcontractkit/chainlink/blob/master/evm-contracts/src/v0.8/ChainlinkClient.sol"; 4 | import "https://github.com/smartcontractkit/chainlink/blob/master/evm-contracts/src/v0.8/Chainlink.sol"; 5 | 6 | contract Oracle { 7 | // Chainlink oracle address 8 | address private oracleAddress; 9 | 10 | // Mapping of economic indicators to their corresponding values 11 | mapping (string => uint256) public economicIndicators; 12 | 13 | // Mapping of economic indicators to their corresponding timestamps 14 | mapping (string => uint256) public economicIndicatorTimestamps; 15 | 16 | // Event emitted when new data is received from the oracle 17 | event NewDataReceived(string _indicator, uint256 _value, uint256 _timestamp); 18 | 19 | // Event emitted when the oracle request is sent 20 | event OracleRequestSent(bytes32 _requestId, string _indicator); 21 | 22 | // Event emitted when the oracle request is fulfilled 23 | event OracleRequestFulfilled(bytes32 _requestId, uint256 _value, uint256 _timestamp); 24 | 25 | // Constructor 26 | constructor(address _oracleAddress) public { 27 | oracleAddress = _oracleAddress; 28 | } 29 | 30 | // Function to request economic data from the oracle 31 | function requestEconomicData(string memory _indicator) public { 32 | // Create a new Chainlink request 33 | Chainlink.Request memory req = buildChainlinkRequest( 34 | oracleAddress, 35 | this, 36 | "fulfillEconomicData", 37 | _indicator 38 | ); 39 | 40 | // Send the request to the oracle 41 | sendChainlinkRequestTo(oracleAddress, req, ORACLE_FEE); 42 | 43 | // Emit the OracleRequestSent event 44 | emit OracleRequestSent(req.requestId, _indicator); 45 | } 46 | 47 | // Function to fulfill the oracle request 48 | function fulfillEconomicData(bytes32 _requestId, uint256 _value, uint256 _timestamp) public recordChainlinkFulfillment(_requestId) { 49 | // Get the indicator from the request ID 50 | string memory _indicator = getIndicatorFromRequestId(_requestId); 51 | 52 | // Update the economic indicator mapping 53 | economicIndicators[_indicator] = _value; 54 | 55 | // Update the economic indicator timestamp mapping 56 | economicIndicatorTimestamps[_indicator] = _timestamp; 57 | 58 | // Emit the NewDataReceived event 59 | emit NewDataReceived(_indicator, _value, _timestamp); 60 | 61 | // Emit the OracleRequestFulfilled event 62 | emit OracleRequestFulfilled(_requestId, _value, _timestamp); 63 | } 64 | 65 | // Function to get the indicator from the request ID 66 | function getIndicatorFromRequestId(bytes32 _requestId) internal pure returns (string memory) { 67 | // Implement a custom logic to extract the indicator from the request ID 68 | // For example, using a mapping of request IDs to indicators 69 | // ... 70 | } 71 | 72 | // Function to update the stablecoin contract with the latest data 73 | function updateStablecoinContract() public { 74 | // Get the latest economic indicator values 75 | uint256 gdp = economicIndicators["GDP"]; 76 | uint256 inflationRate = economicIndicators["InflationRate"]; 77 | // ... 78 | 79 | // Update the stablecoin contract with the latest data 80 | StablecoinContract(address).updateEconomicIndicators(gdp, inflationRate, ...); 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/privacy/zk_selective_disclosure.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import json 3 | import os 4 | from typing import Dict, Any 5 | from Crypto.Random import get_random_bytes 6 | 7 | CIRCUIT_PATH = "circuits/kyc_proof.circom" 8 | BUILD_DIR = "circuits/build" 9 | INPUT_JSON = os.path.join(BUILD_DIR, "input.json") 10 | WITNESS_WTN = os.path.join(BUILD_DIR, "witness.wtns") 11 | PROOF_JSON = os.path.join(BUILD_DIR, "proof.json") 12 | PUBLIC_JSON = os.path.join(BUILD_DIR, "public.json") 13 | 14 | # === Example Circom Circuit (KYC) === 15 | # pragma circom 2.0.0; 16 | # template KYCProof() { 17 | # signal input secret_age; 18 | # signal public min_age; 19 | # signal output valid; 20 | # valid <== secret_age >= min_age; 21 | # } 22 | # component main = KYCProof(); 23 | 24 | def generate_input_json(secret_age: int, min_age: int): 25 | os.makedirs(BUILD_DIR, exist_ok=True) 26 | with open(INPUT_JSON, "w") as f: 27 | json.dump({"secret_age": secret_age, "min_age": min_age}, f) 28 | 29 | def run_circom_and_snarkjs(): 30 | # Compile the circuit 31 | subprocess.run(["circom", CIRCUIT_PATH, "--r1cs", "--wasm", "--sym", "-o", BUILD_DIR], check=True) 32 | # Generate the witness 33 | subprocess.run(["node", os.path.join(BUILD_DIR, "kyc_proof_js/generate_witness.js"), 34 | os.path.join(BUILD_DIR, "kyc_proof_js/kyc_proof.wasm"), 35 | INPUT_JSON, 36 | WITNESS_WTN], check=True) 37 | # Setup (trusted setup phase, only once) 38 | subprocess.run(["snarkjs", "groth16", "setup", 39 | os.path.join(BUILD_DIR, "kyc_proof.r1cs"), 40 | "pot12_final.ptau", 41 | os.path.join(BUILD_DIR, "kyc_proof_0000.zkey")], check=True) 42 | # Generate the proof 43 | subprocess.run(["snarkjs", "groth16", "prove", 44 | os.path.join(BUILD_DIR, "kyc_proof_0000.zkey"), 45 | WITNESS_WTN, 46 | PROOF_JSON, 47 | PUBLIC_JSON], check=True) 48 | 49 | def verify_proof() -> bool: 50 | # Verify the proof 51 | result = subprocess.run(["snarkjs", "groth16", "verify", 52 | os.path.join(BUILD_DIR, "verification_key.json"), 53 | PUBLIC_JSON, 54 | PROOF_JSON], capture_output=True, text=True) 55 | print(result.stdout) 56 | return "OK!" in result.stdout 57 | 58 | # === Python API === 59 | 60 | def generate_kyc_proof(secret_age: int, min_age: int) -> Dict[str, Any]: 61 | """ 62 | Generates a zk-SNARK proof that secret_age >= min_age without revealing the actual age. 63 | Returns the proof and public signals. 64 | """ 65 | generate_input_json(secret_age, min_age) 66 | run_circom_and_snarkjs() 67 | with open(PROOF_JSON) as pf, open(PUBLIC_JSON) as pubf: 68 | proof = json.load(pf) 69 | public = json.load(pubf) 70 | return {"proof": proof, "public": public} 71 | 72 | def verify_kyc_proof() -> bool: 73 | """ 74 | Verifies the previously generated zk-SNARK proof. 75 | """ 76 | return verify_proof() 77 | 78 | # === Example Usage === 79 | 80 | if __name__ == "__main__": 81 | print("PiConsensus ZK Selective Disclosure Demo (KYC Age Proof)") 82 | # User wants to prove age >= 18, but does not reveal true age 83 | proof_obj = generate_kyc_proof(secret_age=24, min_age=18) 84 | print("Proof:", json.dumps(proof_obj["proof"], indent=2)) 85 | print("Public signals:", proof_obj["public"]) 86 | valid = verify_kyc_proof() 87 | print("Proof valid?", valid) 88 | -------------------------------------------------------------------------------- /src/analytics/anomaly_detector.py: -------------------------------------------------------------------------------- 1 | import time 2 | import threading 3 | import numpy as np 4 | import pandas as pd 5 | from typing import List, Dict, Any, Callable 6 | from sklearn.ensemble import IsolationForest 7 | from sklearn.preprocessing import StandardScaler 8 | 9 | # === Simulated Blockchain Transaction Stream === 10 | 11 | def get_recent_transactions(n: int = 100) -> pd.DataFrame: 12 | """ 13 | Simulate fetching the n most recent Pi Coin transactions. 14 | In production, connect to blockchain node or API. 15 | """ 16 | np.random.seed(int(time.time())) 17 | # Random data: [amount, fee, block_time, sender_entropy, receiver_entropy] 18 | data = np.random.normal(loc=[314, 0.01, 10, 0.5, 0.5], scale=[50, 0.005, 2, 0.2, 0.2], size=(n, 5)) 19 | df = pd.DataFrame(data, columns=["amount", "fee", "block_time", "sender_entropy", "receiver_entropy"]) 20 | df["tx_hash"] = [f"0x{np.random.bytes(16).hex()}" for _ in range(n)] 21 | df["timestamp"] = pd.Timestamp.now() 22 | return df 23 | 24 | # === Anomaly Detection Engine === 25 | 26 | class AnomalyDetector: 27 | def __init__(self): 28 | self.model = IsolationForest(n_estimators=100, contamination=0.05, random_state=42) 29 | self.scaler = StandardScaler() 30 | self.last_fit = None 31 | 32 | def fit(self, df: pd.DataFrame): 33 | X = df[["amount", "fee", "block_time", "sender_entropy", "receiver_entropy"]] 34 | X_scaled = self.scaler.fit_transform(X) 35 | self.model.fit(X_scaled) 36 | self.last_fit = pd.Timestamp.now() 37 | print(f"[AnomalyDetector] Model trained on {len(df)} transactions.") 38 | 39 | def predict(self, df: pd.DataFrame) -> List[Dict[str, Any]]: 40 | X = df[["amount", "fee", "block_time", "sender_entropy", "receiver_entropy"]] 41 | X_scaled = self.scaler.transform(X) 42 | preds = self.model.predict(X_scaled) 43 | anomalies = df[preds == -1] 44 | return anomalies.to_dict(orient="records") 45 | 46 | def stream_monitor(self, poll_interval: float = 5.0, callback: Callable = None): 47 | """ 48 | Continuously monitor incoming blockchain transactions for anomalies. 49 | Calls callback(anomalies) if found. 50 | """ 51 | def monitor(): 52 | print("[AnomalyDetector] Starting live anomaly monitoring...") 53 | while True: 54 | txs = get_recent_transactions(50) 55 | if self.last_fit is None or (pd.Timestamp.now() - self.last_fit).seconds > 60: 56 | self.fit(txs) 57 | anomalies = self.predict(txs) 58 | if anomalies and callback: 59 | callback(anomalies) 60 | time.sleep(poll_interval) 61 | t = threading.Thread(target=monitor, daemon=True) 62 | t.start() 63 | 64 | # === Alerting & Auto-Response === 65 | 66 | def alert_admin(anomalies: List[Dict[str, Any]]): 67 | """ 68 | Alert admin or security team about detected anomalies. 69 | In production, send email, push notification, or on-chain event. 70 | """ 71 | print("\n[ALERT] 🚨 Anomalies Detected 🚨") 72 | for a in anomalies: 73 | print(a) 74 | 75 | # === Example Usage === 76 | 77 | if __name__ == "__main__": 78 | print("Demo: PiConsensus On-Chain Anomaly Detector") 79 | detector = AnomalyDetector() 80 | # Initial training on sample data 81 | txs = get_recent_transactions(200) 82 | detector.fit(txs) 83 | # Predict anomalies on new batch 84 | new_txs = get_recent_transactions(50) 85 | anomalies = detector.predict(new_txs) 86 | alert_admin(anomalies) 87 | # Start live monitoring (Ctrl+C to exit) 88 | detector.stream_monitor(callback=alert_admin) 89 | while True: 90 | time.sleep(60) 91 | -------------------------------------------------------------------------------- /src/agents/aea_api.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import Dict, Any, Callable, List, Optional 3 | from fastapi import FastAPI, HTTPException 4 | from pydantic import BaseModel 5 | from apscheduler.schedulers.background import BackgroundScheduler 6 | 7 | app = FastAPI(title="PiConsensus Autonomous Economic Agent API", version="1.0") 8 | 9 | # === Agent Model === 10 | 11 | class Agent(BaseModel): 12 | id: str 13 | owner: str 14 | balance: float 15 | rules: Dict[str, Any] 16 | actions: List[str] = [] 17 | last_action: Optional[float] = None 18 | 19 | # === In-memory Agent Store === 20 | 21 | agents: Dict[str, Agent] = {} 22 | 23 | # === Example Action Functions === 24 | 25 | def send_payment(agent: Agent, to: str, amount: float) -> str: 26 | if agent.balance < amount: 27 | return f"Insufficient balance for {agent.id}" 28 | agent.balance -= amount 29 | # Simulate sending: in production, integrate with blockchain 30 | return f"Agent {agent.id} sent {amount} Pi to {to}" 31 | 32 | def auto_invest(agent: Agent, asset: str, threshold: float) -> str: 33 | # Simulate investment logic 34 | if agent.balance > threshold: 35 | agent.balance -= threshold 36 | return f"Agent {agent.id} invested {threshold} Pi in {asset}" 37 | return f"No investment: {agent.id} balance below threshold" 38 | 39 | # === Rule Engine === 40 | 41 | ACTION_MAP: Dict[str, Callable] = { 42 | "send_payment": send_payment, 43 | "auto_invest": auto_invest, 44 | } 45 | 46 | def evaluate_rules(agent: Agent): 47 | # Simple rule evaluation engine (expand as needed) 48 | responses = [] 49 | for rule_name, rule_params in agent.rules.items(): 50 | action_func = ACTION_MAP.get(rule_name) 51 | if action_func: 52 | resp = action_func(agent, **rule_params) 53 | agent.actions.append(resp) 54 | responses.append(resp) 55 | agent.last_action = time.time() 56 | return responses 57 | 58 | # === API Endpoints === 59 | 60 | @app.post("/agents/") 61 | def create_agent(agent: Agent): 62 | if agent.id in agents: 63 | raise HTTPException(400, "Agent with this ID already exists") 64 | agents[agent.id] = agent 65 | return {"status": "created", "agent_id": agent.id} 66 | 67 | @app.get("/agents/{agent_id}") 68 | def get_agent(agent_id: str): 69 | agent = agents.get(agent_id) 70 | if not agent: 71 | raise HTTPException(404, "Agent not found") 72 | return agent.dict() 73 | 74 | @app.post("/agents/{agent_id}/trigger") 75 | def trigger_agent(agent_id: str): 76 | agent = agents.get(agent_id) 77 | if not agent: 78 | raise HTTPException(404, "Agent not found") 79 | responses = evaluate_rules(agent) 80 | return {"status": "triggered", "responses": responses} 81 | 82 | @app.post("/agents/{agent_id}/deposit") 83 | def deposit(agent_id: str, amount: float): 84 | agent = agents.get(agent_id) 85 | if not agent: 86 | raise HTTPException(404, "Agent not found") 87 | agent.balance += amount 88 | return {"status": "deposit_ok", "new_balance": agent.balance} 89 | 90 | @app.get("/agents/") 91 | def list_agents(): 92 | return [a.dict() for a in agents.values()] 93 | 94 | # === Scheduler for Autonomous Triggering === 95 | 96 | def schedule_all_agents(): 97 | for agent in agents.values(): 98 | evaluate_rules(agent) 99 | 100 | scheduler = BackgroundScheduler() 101 | scheduler.add_job(schedule_all_agents, 'interval', seconds=30) 102 | scheduler.start() 103 | 104 | # === Example Run: uvicorn src.agents.aea_api:app --reload === 105 | 106 | if __name__ == "__main__": 107 | import uvicorn 108 | uvicorn.run("agents.aea_api:app", host="0.0.0.0", port=8002, reload=True) 109 | -------------------------------------------------------------------------------- /contracts/UpgradeableProxy.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity ^0.8.20; 3 | 4 | /** 5 | * @title UpgradeableProxy — PiConsensus Self-Upgrading Smart Contract Framework 6 | * @notice Enables secure contract upgrades via governance or admin, following industry-standard proxy patterns. 7 | * Designed for unstoppable, future-proof smart contracts. 8 | */ 9 | 10 | contract UpgradeableProxy { 11 | // Storage slot for the address of the current implementation 12 | bytes32 private constant IMPLEMENTATION_SLOT = keccak256("piconsensus.proxy.implementation"); 13 | // Storage slot for the proxy admin 14 | bytes32 private constant ADMIN_SLOT = keccak256("piconsensus.proxy.admin"); 15 | // Storage slot for contract version 16 | bytes32 private constant VERSION_SLOT = keccak256("piconsensus.proxy.version"); 17 | 18 | event Upgraded(address indexed newImplementation, string version); 19 | event AdminChanged(address indexed previousAdmin, address indexed newAdmin); 20 | 21 | modifier onlyAdmin() { 22 | require(msg.sender == _admin(), "Not admin"); 23 | _; 24 | } 25 | 26 | constructor(address _initialImplementation, string memory _initialVersion) { 27 | _setAdmin(msg.sender); 28 | _upgradeTo(_initialImplementation, _initialVersion); 29 | } 30 | 31 | function _implementation() internal view returns (address impl) { 32 | bytes32 slot = IMPLEMENTATION_SLOT; 33 | assembly { 34 | impl := sload(slot) 35 | } 36 | } 37 | 38 | function _admin() internal view returns (address adm) { 39 | bytes32 slot = ADMIN_SLOT; 40 | assembly { 41 | adm := sload(slot) 42 | } 43 | } 44 | 45 | function _version() public view returns (string memory ver) { 46 | bytes32 slot = VERSION_SLOT; 47 | assembly { 48 | ver := sload(slot) 49 | } 50 | } 51 | 52 | function admin() external view returns (address) { 53 | return _admin(); 54 | } 55 | 56 | function changeAdmin(address newAdmin) external onlyAdmin { 57 | require(newAdmin != address(0), "Zero address"); 58 | emit AdminChanged(_admin(), newAdmin); 59 | _setAdmin(newAdmin); 60 | } 61 | 62 | function upgradeTo(address newImplementation, string memory newVersion) external onlyAdmin { 63 | _upgradeTo(newImplementation, newVersion); 64 | } 65 | 66 | function _upgradeTo(address newImplementation, string memory newVersion) internal { 67 | require(newImplementation != address(0), "Zero address"); 68 | bytes32 implSlot = IMPLEMENTATION_SLOT; 69 | bytes32 versionSlot = VERSION_SLOT; 70 | assembly { 71 | sstore(implSlot, newImplementation) 72 | sstore(versionSlot, newVersion) 73 | } 74 | emit Upgraded(newImplementation, newVersion); 75 | } 76 | 77 | // Delegate all call data to the implementation 78 | fallback() external payable { 79 | address impl = _implementation(); 80 | require(impl != address(0), "No implementation"); 81 | assembly { 82 | // Copy msg.data 83 | calldatacopy(0, 0, calldatasize()) 84 | // Delegatecall to the implementation 85 | let result := delegatecall(gas(), impl, 0, calldatasize(), 0, 0) 86 | let size := returndatasize() 87 | returndatacopy(0, 0, size) 88 | switch result 89 | case 0 { revert(0, size) } 90 | default { return(0, size) } 91 | } 92 | } 93 | 94 | receive() external payable {} 95 | 96 | function _setAdmin(address newAdmin) internal { 97 | bytes32 slot = ADMIN_SLOT; 98 | assembly { 99 | sstore(slot, newAdmin) 100 | } 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/forensics/audit_trail.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import Dict, Any, List, Optional 3 | from fastapi import FastAPI, HTTPException, Query 4 | from pydantic import BaseModel 5 | from sqlalchemy import create_engine, Column, Integer, String, Float, Text, desc 6 | from sqlalchemy.ext.declarative import declarative_base 7 | from sqlalchemy.orm import sessionmaker 8 | 9 | app = FastAPI(title="PiConsensus On-Chain Audit Trail & Forensics Engine", version="1.0") 10 | 11 | # === Database Setup (SQLite for demo, switch to PostgreSQL in prod) === 12 | 13 | DATABASE_URL = "sqlite:///./audit_trail.db" 14 | engine = create_engine(DATABASE_URL, connect_args={"check_same_thread": False}) 15 | Base = declarative_base() 16 | SessionLocal = sessionmaker(bind=engine) 17 | session = SessionLocal() 18 | 19 | class AuditEventDB(Base): 20 | __tablename__ = "audit_events" 21 | id = Column(Integer, primary_key=True, index=True) 22 | event_type = Column(String, index=True# === Pydantic Models === 23 | 24 | class AuditEvent(BaseModel): 25 | event_type: str 26 | subject: str 27 | data: Dict[str, Any] 28 | timestamp: float = time.time() 29 | 30 | # === API Endpoints === 31 | 32 | @app.post("/audit/log") 33 | def log_event(event: AuditEvent): 34 | db_event = AuditEventDB( 35 | event_type=event.event_type, 36 | subject=event.subject, 37 | data=str(event.data), 38 | timestamp=event.timestamp 39 | ) 40 | session.add(db_event) 41 | session.commit() 42 | return {"status": "logged", "event_id": db_event.id} 43 | 44 | @app.get("/audit/search") 45 | def search_events( 46 | subject: Optional[str] = None, 47 | event_type: Optional[str] = None, 48 | start_time: Optional[float] = None, 49 | end_time: Optional[float] = None, 50 | limit: int = Query(50, le=200) 51 | ): 52 | query = session.query(AuditEventDB) 53 | if subject: 54 | query = query.filter(AuditEventDB.subject == subject) 55 | if event_type: 56 | query = query.filter(AuditEventDB.event_type == event_type) 57 | if start_time: 58 | query = query.filter(AuditEventDB.timestamp >= start_time) 59 | if end_time: 60 | query = query.filter(AuditEventDB.timestamp <= end_time) 61 | events = query.order_by(desc(AuditEventDB.timestamp)).limit(limit).all() 62 | return [ 63 | { 64 | "id": e.id, 65 | "event_type": e.event_type, 66 | "subject": e.subject, 67 | "data": e.data, 68 | "timestamp": e.timestamp 69 | } 70 | for e in events 71 | ] 72 | 73 | @app.get("/audit/forensics_report") 74 | def forensics_report(subject: str): 75 | """Generate a simple forensics report for a subject.""" 76 | query = session.query(AuditEventDB).filter(AuditEventDB.subject == subject).order_by(AuditEventDB.timestamp) 77 | events = query.all() 78 | return { 79 | "subject": subject, 80 | "n_events": len(events), 81 | "timeline": [ 82 | { 83 | "event_type": e.event_type, 84 | "data": e.data, 85 | "timestamp": e.timestamp 86 | } for e in events 87 | ] 88 | } 89 | 90 | @app.get("/audit/recent") 91 | def recent_events(limit: int = 20): 92 | events = session.query(AuditEventDB).order_by(desc(AuditEventDB.timestamp)).limit(limit).all() 93 | return [ 94 | { 95 | "id": e.id, 96 | "event_type": e.event_type, 97 | "subject": e.subject, 98 | "data": e.data, 99 | "timestamp": e.timestamp 100 | } 101 | for e in events 102 | ] 103 | 104 | # === Example Run: uvicorn src.forensics.audit_trail:app --reload === 105 | 106 | if __name__ == "__main__": 107 | import uvicorn 108 | uvicorn.run("forensics.audit_trail:app", host="0.0.0.0", port=8005, reload=True) 109 | -------------------------------------------------------------------------------- /backend/app/services/market_analysis_service.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from sklearn.model_selection import train_test_split 4 | from sklearn.ensemble import RandomForestRegressor 5 | from sklearn.metrics import mean_squared_error 6 | import requests 7 | import joblib 8 | import logging 9 | 10 | # Configure logging 11 | logging.basicConfig(level=logging.INFO) 12 | logger = logging.getLogger(__name__) 13 | 14 | class MarketAnalysisService: 15 | def __init__(self, data_source_url): 16 | self.data_source_url = data_source_url 17 | self.model = None 18 | self.data = None 19 | 20 | def fetch_data(self): 21 | """Fetch market data from a specified URL.""" 22 | try: 23 | logger.info("Fetching data from %s", self.data_source_url) 24 | response = requests.get(self.data_source_url) 25 | response.raise_for_status() 26 | self.data = pd.DataFrame(response.json()) 27 | logger.info("Data fetched successfully.") 28 | except Exception as e: 29 | logger.error("Error fetching data: %s", e) 30 | raise 31 | 32 | def preprocess_data(self): 33 | """Preprocess the data for training.""" 34 | logger.info("Preprocessing data...") 35 | # Example preprocessing steps 36 | self.data.dropna(inplace=True) # Remove missing values 37 | self.data['date'] = pd.to_datetime(self.data['date']) # Convert date column 38 | self.data.set_index('date', inplace=True) # Set date as index 39 | self.data['price_change'] = self.data['price'].pct_change() # Calculate price change 40 | self.data.dropna(inplace=True) # Drop NaN values after calculation 41 | logger.info("Data preprocessing completed.") 42 | 43 | def train_model(self): 44 | """Train the Random Forest model on the market data.""" 45 | logger.info("Training model...") 46 | X = self.data.drop(columns=['price', 'price_change']) 47 | y = self.data['price'] 48 | 49 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 50 | 51 | self.model = RandomForestRegressor(n_estimators=100, random_state=42) 52 | self.model.fit(X_train, y_train) 53 | 54 | # Evaluate the model 55 | predictions = self.model.predict(X_test) 56 | mse = mean_squared_error(y_test, predictions) 57 | logger.info("Model training completed with MSE: %.2f", mse) 58 | 59 | def predict(self, input_data): 60 | """Make predictions using the trained model.""" 61 | if self.model is None: 62 | logger.error("Model is not trained yet. Call train_model() first.") 63 | raise Exception("Model not trained") 64 | 65 | logger.info("Making predictions...") 66 | input_df = pd.DataFrame(input_data) 67 | predictions = self.model.predict(input_df) 68 | return predictions 69 | 70 | def save_model(self, file_path): 71 | """Save the trained model to a file.""" 72 | joblib.dump(self.model, file_path) 73 | logger.info("Model saved to %s", file_path) 74 | 75 | def load_model(self, file_path): 76 | """Load a trained model from a file.""" 77 | self.model = joblib.load(file_path) 78 | logger.info("Model loaded from %s", file_path) 79 | 80 | # Example usage 81 | if __name__ == "__main__": 82 | data_source = "https://api.example.com/market_data" # Replace with actual data source 83 | service = MarketAnalysisService(data_source) 84 | 85 | service.fetch_data() 86 | service.preprocess_data() 87 | service.train_model() 88 | 89 | # Example input for prediction 90 | input_data = { 91 | 'feature1': [0.5], 92 | 'feature2': [1.2], 93 | # Add other features as necessary 94 | } 95 | predictions = service.predict(input_data) 96 | print("Predictions:", predictions) 97 | 98 | # Save the model 99 | service.save_model("market_analysis_model.pkl") 100 | -------------------------------------------------------------------------------- /src/quantum_security.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Dict, Any 2 | from Crypto.Hash import SHA512 3 | from Crypto.PublicKey import ECC 4 | from Crypto.Signature import DSS 5 | 6 | try: 7 | from pqcrypto.sign import dilithium2 8 | except ImportError: 9 | dilithium2 = None # Ensure pqcrypto is installed for full PQC support 10 | 11 | # === Quantum-Safe Key Generation === 12 | 13 | def generate_ecc_keypair() -> Tuple[bytes, bytes]: 14 | """Generate ECDSA secp256k1 keypair (classic).""" 15 | key = ECC.generate(curve='P-256') 16 | private_key = key.export_key(format='DER') 17 | public_key = key.public_key().export_key(format='DER') 18 | return private_key, public_key 19 | 20 | def generate_pqc_keypair() -> Tuple[bytes, bytes]: 21 | """Generate Dilithium2 quantum-resistant keypair.""" 22 | if not dilithium2: 23 | raise RuntimeError("pqcrypto not installed.") 24 | pk, sk = dilithium2.generate_keypair() 25 | return sk, pk 26 | 27 | def hybrid_keypair() -> Dict[str, Any]: 28 | """Generate a hybrid keypair (ECDSA + Dilithium2).""" 29 | ecc_priv, ecc_pub = generate_ecc_keypair() 30 | pqc_priv, pqc_pub = generate_pqc_keypair() 31 | return { 32 | "ecdsa_private": ecc_priv, 33 | "ecdsa_public": ecc_pub, 34 | "dilithium_private": pqc_priv, 35 | "dilithium_public": pqc_pub, 36 | } 37 | 38 | # === Digital Signatures (Hybrid) === 39 | 40 | def ecdsa_sign(message: bytes, private_key: bytes) -> bytes return signer.sign(h) 41 | 42 | def ecdsa_verify(message: bytes, signature: bytes, public_key: bytes) -> bool: 43 | """Verify ECDSA signature.""" 44 | key = ECC.import_key(public_key) 45 | h = SHA512.new(message) 46 | verifier = DSS.new(key, 'fips-186-3') 47 | try: 48 | verifier.verify(h, signature) 49 | return True 50 | except ValueError: 51 | return False 52 | 53 | def pqc_sign(message: bytes, private_key: bytes) -> bytes: 54 | """Sign message with Dilithium2 (quantum-safe).""" 55 | if not dilithium2: 56 | raise RuntimeError("pqcrypto not installed.") 57 | return dilithium2.sign(message, private_key) 58 | 59 | def pqc_verify(message: bytes, signature: bytes, public_key: bytes) -> bool: 60 | """Verify Dilithium2 signature.""" 61 | if not dilithium2: 62 | raise RuntimeError("pqcrypto not installed.") 63 | try: 64 | dilithium2.open(signature, public_key) 65 | return True 66 | except Exception: 67 | return False 68 | 69 | def hybrid_sign(message: bytes, hybrid_priv: Dict[str, bytes]) -> Dict[str, bytes]: 70 | """Produce a hybrid (ECDSA + Dilithium2) signature.""" 71 | return { 72 | "ecdsa": ecdsa_sign(message, hybrid_priv["ecdsa_private"]), 73 | "pqc": pqc_sign(message, hybrid_priv["dilithium_private"]), 74 | } 75 | 76 | def hybrid_verify(message: bytes, signature: Dict[str, bytes], hybrid_pub: Dict[str, bytes]) -> bool: 77 | """Verify hybrid signature (accepts only if both are valid).""" 78 | ecdsa_ok = ecdsa_verify(message, signature["ecdsa"], hybrid_pub["ecdsa_public"]) 79 | pqc_ok = pqc_verify(message, signature["pqc"], hybrid_pub["dilithium_public"]) 80 | return ecdsa_ok and pqc_ok 81 | 82 | # === Key Migration & Utilities === 83 | 84 | def migrate_keys_to_pqc(ecc_private_key: bytes) -> Dict[str, Any]: 85 | """ 86 | Assist migration from legacy ECDSA to hybrid quantum-resistant keys. 87 | """ 88 | pqc_priv, pqc_pub = generate_pqc_keypair() 89 | return { 90 | "old_ecdsa_private": ecc_private_key, 91 | "new_dilithium_private": pqc_priv, 92 | "new_dilithium_public": pqc_pub, 93 | } 94 | 95 | # === Example Usage === 96 | 97 | if __name__ == "__main__": 98 | print("Generating quantum-safe hybrid wallet...") 99 | keys = hybrid_keypair() 100 | msg = b"Ultra high-tech PiConsensus transaction" 101 | sig = hybrid_sign(msg, keys) 102 | print("Signature valid?", hybrid_verify(msg, sig, keys)) 103 | print("Legacy to PQC migration example:", migrate_keys_to_pqc(keys["ecdsa_private"])) 104 | -------------------------------------------------------------------------------- /src/api_gateway/gateway.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Request, HTTPException, status 2 | from fastapi.responses import JSONResponse 3 | from slowapi import Limiter, _rate_limit_exceeded_handler 4 | from slowapi.util import get_remote_address 5 | from slowapi.errors import RateLimitExceeded 6 | from user_agents import parse as parse_ua 7 | import redis 8 | import time 9 | import hashlib 10 | 11 | app = FastAPI(title="PiConsensus API Gateway", version="1.0") 12 | limiter = Limiter(key_func=get_remote_address) 13 | app.state.limiter = limiter 14 | app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) 15 | rdb = redis.Redis(host="localhost", port=6379, db=0) 16 | 17 | # === Abuse & Anomaly Detection Utilities === 18 | 19 | def is_suspicious_user_agent(user_agent: str) -> bool: 20 | ua = parse_ua(user_agent) 21 | if not ua.is_bot and ua.browser.family not in ["", "Unknown"]: 22 | return False 23 | return True 24 | 25 | def is_ip_flagged(ip: str) -> bool: 26 | return rdb.get(f"bad_ip:{ip}") is not None 27 | 28 | def flag_ip(ip: str, reason: str = "abuse"): 29 | rdb.setex(f"bad_ip:{ip}", 3600, reason) 30 | 31 | def adaptive_rate_limit(ip: str) -> str: 32 | """Increase rate limits for trusted, decrease for flagged.""" 33 | if is_ip_flagged(ip): 34 | return "10/minute" 35 | # Could enhance with scoring based on API key, reputation, etc. 36 | return "100/minute" 37 | 38 | def log_anomaly(ip: str, reason: str, details: dict): 39 | key = f"anomaly:{hashlib.sha256((ip + str(time.time())).encode()).hexdigest()}" 40 | rdb.setex(key, 3600, str({"ip": ip, "reason": reason, "details": details})) 41 | 42 | # === API Gateway Routes === 43 | 44 | @app.middleware("http") 45 | async def abuse_detection_middleware(request: Request, call_next): 46 | ip = get_remote_address(request) 47 | user_agent = request.headers.get("User-Agent", "") 48 | if is_suspicious_user_agent(user_agent): 49 | flag_ip(ip, "bot or suspicious user agent") 50 | log_anomaly(ip, "suspicious_user_agent", {"user_agent": user_agent}) 51 | return JSONResponse(status_code=403, content={"error": "Access denied: Suspicious user agent"}) 52 | if is_ip_flagged(ip): 53 | return JSONResponse(status_code=429, content={"error": "Your IP is temporarily blocked due to suspicious activity."}) 54 | response = await call_next(request) 55 | return response 56 | 57 | @app.get("/status") 58 | @limiter.limit("50/minute") 59 | async def status(): 60 | return {"status": "OK", "message": "PiConsensus API Gateway operational."} 61 | 62 | @app.post("/api/v1/tx") 63 | @limiter.limit("20/minute") 64 | async def submit_tx(request: Request): 65 | ip = get_remote_address(request) 66 | body = await request.json() 67 | # Basic abuse logic: e.g., block repeated identical payloads 68 | tx_hash = hashlib.sha256(str(body).encode()).hexdigest() 69 | if rdb.get(f"recent_tx:{ip}:{tx_hash}"): 70 | flag_ip(ip, "replay attack") 71 | log_anomaly(ip, "replay_attack", {"tx_hash": tx_hash, "body": body}) 72 | raise HTTPException(status_code=429, detail="Repeated transaction detected.") 73 | rdb.setex(f"recent_tx:{ip}:{tx_hash}", 60, 1) 74 | # Forward transaction to backend here 75 | return {"status": "submitted", "tx_hash": tx_hash} 76 | 77 | @app.get("/api/v1/analytics") 78 | @limiter.limit("10/minute") 79 | async def analytics(): 80 | # Example endpoint for querying analytics 81 | return {"analytics": "Not implemented"} 82 | 83 | # === Example of Adaptive Rate Limit Endpoint === 84 | 85 | @app.get("/api/v1/profile") 86 | async def profile(request: Request): 87 | ip = get_remote_address(request) 88 | limit = adaptive_rate_limit(ip) 89 | # Manually apply limiter for custom rate limit 90 | if not limiter.hit(limit, request): 91 | raise HTTPException(status_code=429, detail="Rate limit reached.") 92 | return {"profile": "Your profile info here"} 93 | 94 | # === Run with: uvicorn src.api_gateway.gateway:app --reload === 95 | 96 | if __name__ == "__main__": 97 | import uvicorn 98 | uvicorn.run("api_gateway.gateway:app", host="0.0.0.0", port=8000, reload=True) 99 | -------------------------------------------------------------------------------- /src/compliance/cross_chain_compliance.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | from web3 import Web3 4 | from typing import List, Dict, Any 5 | 6 | # === Example Chain RPC URLs (customize for your needs) === 7 | CHAIN_CONFIG = { 8 | "ethereum": os.environ.get("ETH_RPC_URL", "https://mainnet.infura.io/v3/YOUR_KEY"), 9 | "bsc": os.environ.get("BSC_RPC_URL", "https://bsc-dataseed.binance.org/"), 10 | "polygon": os.environ.get("POLY_RPC_URL", "https://polygon-rpc.com/"), 11 | } 12 | 13 | # === Example Compliance Rules === 14 | BLACKLISTED_ADDRESSES = set([ 15 | "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", 16 | # Add more 17 | ]) 18 | 19 | MAX_TX_AMOUNT = 1_000_000 # Example: 1,000,000 Pi (adjust as needed) 20 | 21 | def is_blacklisted(address: str) -> bool: 22 | return address.lower() in BLACKLISTED_ADDRESSES 23 | 24 | def exceeds_amount(amount: int) -> bool: 25 | return amount > MAX_TX_AMOUNT 26 | 27 | def check_kyc(address: str) -> bool: 28 | # Example: Query an external KYC service (stub) 29 | # In production, replace with real KYC API or on-chain attestation check 30 | resp = requests.get(f"https://kyc.pi.example/api/v1/check/{address}") 31 | if resp.status_code == 200: 32 | return resp.json().get("kyc_passed", False) 33 | return False 34 | 35 | # === Cross-Chain Connector === 36 | 37 | class ChainConnector: 38 | def __init__(self, chain_name: str, rpc_url: str): 39 | self.chain_name = chain_name 40 | self.web3 = Web3(Web3.HTTPProvider(rpc_url)) 41 | if not self.web3.is_connected(): 42 | raise RuntimeError(f"Could not connect to {chain_name} node.") 43 | 44 | def get_latest_transactions(self, address: str, limit=10) -> List[Dict[str, Any]]: 45 | # This is a stub; use chain-specific indexers (Etherscan, BSCScan, etc.) for production 46 | # Here, just returns empty for demo 47 | return [] 48 | 49 | # === Compliance Engine === 50 | 51 | class CrossChainComplianceEngine: 52 | def __init__(self, chain_config: Dict[str, str]): 53 | self.connectors = {name: ChainConnector(name, url) for name, url in chain_config.items()} 54 | 55 | def check_transaction(self, tx: Dict[str, Any]) -> Dict[str, Any]: 56 | """ 57 | tx = { 58 | "from": "0x...", 59 | "to": "0x...", 60 | "value": 12345, 61 | "chain": "ethereum" 62 | } 63 | """ 64 | compliance = { 65 | "blacklisted": is_blacklisted(tx["from"]) or is_blacklisted(tx["to"]), 66 | "exceeds_amount": exceeds_amount(tx["value"]), 67 | "kyc_sender": check_kyc(tx["from"]), 68 | "kyc_receiver": check_kyc(tx["to"]), 69 | } 70 | compliance["compliant"] = all([ 71 | not compliance["blacklisted"], 72 | not compliance["exceeds_amount"], 73 | compliance["kyc_sender"], 74 | compliance["kyc_receiver"], 75 | ]) 76 | return compliance 77 | 78 | def monitor_address_across_chains(self, address: str) -> Dict[str, Any]: 79 | results = {} 80 | for chain, connector in self.connectors.items(): 81 | txs = connector.get_latest_transactions(address) 82 | results[chain] = [self.check_transaction(tx) for tx in txs] 83 | return results 84 | 85 | def report(self, tx: Dict[str, Any], compliance: Dict[str, Any]): 86 | # Send report to admin dashboard, SIEM system, or compliance officer 87 | print(f"[Compliance] TX {tx} | Compliance: {compliance}") 88 | 89 | # === Example Usage === 90 | 91 | if __name__ == "__main__": 92 | print("PiConsensus Automated Cross-Chain Compliance Layer Demo") 93 | engine = CrossChainComplianceEngine(CHAIN_CONFIG) 94 | # Simulate a transaction for testing 95 | test_tx = { 96 | "from": "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", 97 | "to": "0x1234567890abcdef1234567890abcdef12345678", 98 | "value": 123_456, 99 | "chain": "ethereum" 100 | } 101 | compliance = engine.check_transaction(test_tx) 102 | engine.report(test_tx, compliance) 103 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | from fastapi import FastAPI, HTTPException, Depends 3 | from fastapi.responses import HTMLResponse 4 | from fastapi.middleware.cors import CORSMiddleware 5 | from pydantic import BaseModel 6 | from typing import List, Dict, Any 7 | from src import constants 8 | from utils import helpers 9 | from incident_response import team 10 | 11 | app = FastAPI( 12 | title="PiConsensus: Decentralized Quantum-Resistant Stable Coin Ecosystem API", 13 | version="2.0.0", 14 | description="Feature-rich API and dashboard backend for PiConsensus" 15 | ) 16 | 17 | # CORS for frontend dashboard integration 18 | app.add_middleware( 19 | CORSMiddleware, 20 | allow_origins=["*"], # Update for production! 21 | allow_credentials=True, 22 | allow_methods=["*"], 23 | allow_headers=["*"], 24 | ) 25 | 26 | # Blockchain API Models 27 | class MintRequest(BaseModel): 28 | address: str 29 | amount: float 30 | 31 | class BurnRequest(BaseModel): 32 | address: str 33 | amount: float 34 | 35 | class VoteRequest(BaseModel): 36 | address: str 37 | amount: float 38 | 39 | class IncidentReport(BaseModel): 40 | reporter: str 41 | description: str 42 | 43 | # Dummy blockchain interaction functions (replace with web3.py logic) 44 | def mint_tokens(address: str, amount: float) -> str: 45 | # TODO: Real blockchain call 46 | return f"Minted {amount} tokens to {address}" 47 | 48 | def burn_tokens(address: str, amount: float) -> str: 49 | return f"Burned {amount} tokens from {address}" 50 | 51 | def cast_vote(address: str, amount: float) -> str: 52 | return f"Vote cast by {address} for {amount}" 53 | 54 | # Dashboard root 55 | @app.get("/", response_class=HTMLResponse) 56 | def root(): 57 | return """ 58 |

PiConsensus API & Dashboard

59 |

Welcome to the PiConsensus backend.

60 | 64 | """ 65 | 66 | # Network status endpoint 67 | @app.get("/status") 68 | def status(): 69 | return { 70 | "network": "online", 71 | "pi_coin_symbol": constants.PI_COIN_SYMBOL, 72 | "supply": constants.PI_COIN_SUPPLY, 73 | "block_time": constants.PI_COIN_BLOCK_TIME, 74 | "mining_difficulty": constants.PI_COIN_MINING_DIFFICULTY 75 | } 76 | 77 | # Blockchain operations 78 | @app.post("/mint") 79 | def api_mint(req: MintRequest): 80 | result = mint_tokens(req.address, req.amount) 81 | return {"result": result} 82 | 83 | @app.post("/burn") 84 | def api_burn(req: BurnRequest): 85 | result = burn_tokens(req.address, req.amount) 86 | return {".isupper()} 87 | 88 | # IPFS & Ethereum utilities 89 | @app.post("/pin_ipfs/") 90 | def pin_file(file_path: str): 91 | hash = helpers.pin_file_to_ipfs(file_path) 92 | return {"ipfs_hash": hash} 93 | 94 | @app.get("/ethereum/block_number") 95 | def get_block_number(): 96 | return {"block_number": helpers.get_ethereum_block_number()} 97 | 98 | # Incident response integration 99 | team_config = {"members": ["admin@pi.org", "security@pi.org"]} 100 | incident_team = team.Team(team_config) 101 | 102 | @app.post("/incident/report") 103 | def report_incident(report: IncidentReport): 104 | incident_id = len(incident_team.incidents) + 1 105 | incident = {"id": incident_id, "reporter": report.reporter, "description": report.description} 106 | incident_team.assign_to_incident(incident) 107 | return {"incident_id": incident_id, "status": "assigned"} 108 | 109 | @app.get("/incident/{incident_id}") 110 | def get_incident(incident_id: int): 111 | incident = incident_team.get_incident(incident_id) 112 | if not incident: 113 | raise HTTPException(status_code=404, detail="Incident not found") 114 | return incident 115 | 116 | # Admin interface 117 | @app.post("/admin/update_constants") 118 | def update_constants(key: str, value: Any): 119 | if not hasattr(constants, key): 120 | raise HTTPException(status_code=404, detail="Constant not found") 121 | setattr(constants, key, value) 122 | return {"status": "updated", "key": key, "new_value": value} 123 | 124 | if __name__ == "__main__": 125 | uvicorn.run(app, host="0.0.0.0", port=8000) -------------------------------------------------------------------------------- /src/did/identity_manager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import base64 4 | import hashlib 5 | from typing import Dict, Any, Optional 6 | from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey, Ed25519PublicKey 7 | from cryptography.hazmat.primitives import serialization 8 | from jwcrypto import jwk, jws 9 | 10 | # === DID Utilities === 11 | 12 | def generate_ed25519_keypair() -> (bytes, bytes): 13 | private_key = Ed25519PrivateKey.generate() 14 | public_key = private_key.public_key() 15 | priv_bytes = private_key.private_bytes( 16 | encoding=serialization.Encoding.Raw, 17 | format=serialization.PrivateFormat.Raw, 18 | encryption_algorithm=serialization.NoEncryption() 19 | ) 20 | pub_bytes = public_key.public_bytes( 21 | encoding=serialization.Encoding.Raw, 22 | format=serialization.PublicFormat.Raw 23 | ) 24 | return priv_bytes, pub_bytes 25 | 26 | def did_from_pubkey(pubkey: bytes) -> str: 27 | # DID example: did:picoin: 28 | return "did:picoin:" + base64.urlsafe_b64encode(pubkey).decode('utf-8').rstrip("=") 29 | 30 | # === DID Document Construction === 31 | 32 | def create_did_document(did: str, pubkey: bytes) -> Dict[str, Any]: 33 | return { 34 | "@context": "https://www.w3.org/ns/did/v1", 35 | "id": did, 36 | "verificationMethod": [{ 37 | "id": f"{did}#key-1", 38 | "type": "Ed25519VerificationKey2018", 39 | "controller": did, 40 | "publicKeyBase64": base64.b64encode(pubkey).decode('utf-8') 41 | }], 42 | "authentication": [f"{did}#key-1"], 43 | "assertionMethod": [f"{did}#key-1"], 44 | } 45 | 46 | # === Verifiable Credential Creation & Verification === 47 | 48 | def sign_credential(credential: Dict[str, Any], privkey: bytes) -> str: 49 | key = jwk.JWK.generate(kty='OKP', crv='Ed25519') 50 | key.import_from_bytes(privkey) 51 | payload = json.dumps(credential).encode() 52 | signer = jws.JWS(payload) 53 | signer.add_signature(key, None, json_encode({"alg": "EdDSA"})) 54 | return signer.serialize() 55 | 56 | def verify_credential(signed_credential: str, pubkey: bytes) -> Optional[Dict[str, Any]]: 57 | key = jwk.JWK.generate(kty='OKP', crv='Ed25519') 58 | key.import_from_bytes(pubkey, is_private=False) 59 | try: 60 | verifier = jws.JWS() 61 | verifier.deserialize(signed_credential) 62 | verifier.verify(key) 63 | return json.loads(verifier.payload) 64 | except Exception as e: 65 | print("Verification failed:", str(e)) 66 | return None 67 | 68 | # === Selective Disclosure Utility === 69 | 70 | def selective_disclose(vc: Dict[str, Any], fields: list) -> Dict[str, Any]: 71 | """Return only selected fields from a verifiable credential.""" 72 | return {k: v for k, v in vc.items() if k in fields} 73 | 74 | # === Example Usage === 75 | 76 | if __name__ == "__main__": 77 | print("PiConsensus Decentralized Identity Demo") 78 | # Generate DID and DID Document 79 | priv, pub = generate_ed25519_keypair() 80 | did = did_from_pubkey(pub) 81 | doc = create_did_document(did, pub) 82 | print("DID Document:", json.dumps(doc, indent=2)) 83 | 84 | # Issue a verifiable credential (KYC) 85 | credential = { 86 | "@context": ["https://www.w3.org/2018/credentials/v1"], 87 | "type": ["VerifiableCredential", "KYC"], 88 | "issuer": did, 89 | "issuanceDate": "2025-06-30T12:00:00Z", 90 | "credentialSubject": { 91 | "id": did, 92 | "name": "Alice", 93 | "age": 25, 94 | "jurisdiction": "US", 95 | "kycStatus": "verified" 96 | } 97 | } 98 | # Sign credential 99 | # NOTE: This is a stub for Ed25519 signature, actual JWS signing requires more handling 100 | # For production, use a library like didkit or identity-py for full credential flow 101 | signed_vc = base64.b64encode(json.dumps(credential).encode()).decode() 102 | print("Verifiable Credential (signed):", signed_vc) 103 | 104 | # Selective disclosure (e.g., prove only age) 105 | disclosed = selective_disclose(credential["credentialSubject"], ["age", "jurisdiction"]) 106 | print("Selective Disclosure Example:", disclosed) 107 | -------------------------------------------------------------------------------- /src/governance/ai_voting_engine.py: -------------------------------------------------------------------------------- 1 | import time 2 | import threading 3 | from typing import List, Dict, Any, Optional 4 | from fastapi import FastAPI, HTTPException 5 | from pydantic import BaseModel 6 | import numpy as np 7 | from sklearn.ensemble import IsolationForest 8 | 9 | app = FastAPI(title="PiConsensus AI-Driven Governance Engine", version="1.0") 10 | 11 | # === Data Models === 12 | 13 | class Proposal(BaseModel): 14 | id: int 15 | title: str 16 | description: str 17 | creator: str 18 | start_time: float 19 | end_time: float 20 | votes: Dict[str, float] = {} # voter: votes (quadratic) 21 | status: str = "open" # open/closed 22 | 23 | class Vote(BaseModel): 24 | proposal_id: int 25 | voter: str 26 | amount: float # Number of tokens used for voting 27 | 28 | # === Governance State === 29 | 30 | proposals: Dict[int, Proposal] = {} 31 | proposal_counter = 0 32 | votes_log: List[Dict[str, Any]] = [] 33 | 34 | # === AI/ML Voting Pattern Analysis (Fraud/Anomaly Detection) === 35 | 36 | class VoteAnomalyDetector: 37 | def __init__(self): 38 | self.model = IsolationForest(n_estimators=50, contamination=0.05) 39 | self.trained = False 40 | 41 | v in votes]) 42 | self.model.fit(X) 43 | self.trained = True 44 | 45 | def check(self, votes: List[Vote]) -> List[str]: 46 | if not self.trained or not votes: 47 | return [] 48 | X = np.array([[v.amount] for v in votes]) 49 | preds = self.model.predict(X) 50 | anomalies = [votes[i].voter for i in range(len(votes)) if preds[i] == -1] 51 | return anomalies 52 | 53 | vote_analyzer = VoteAnomalyDetector() 54 | 55 | # === Quadratic Voting Utility === 56 | 57 | def quadratic_vote(amount: float) -> float: 58 | return np.sqrt(amount) 59 | 60 | # === Proposal Management === 61 | 62 | @app.post("/propose") 63 | def create_proposal(title: str, description: str, creator: str, duration_sec: int = 3600): 64 | global proposal_counter 65 | proposal_id = proposal_counter 66 | proposal = Proposal( 67 | id=proposal_id, 68 | title=title, 69 | description=description, 70 | creator=creator, 71 | start_time=time.time(), 72 | end_time=time.time() + duration_sec, 73 | ) 74 | proposals[proposal_id] = proposal 75 | proposal_counter += 1 76 | return {"proposal_id": proposal_id, "status": "created"} 77 | 78 | @app.get("/proposals") 79 | def list_proposals(): 80 | return [p.dict() for p in proposals.values()] 81 | 82 | @app.post("/vote") 83 | def cast_vote(vote: Vote): 84 | if vote.proposal_id not in proposals: 85 | raise HTTPException(404, "Proposal not found") 86 | proposal =} 87 | 88 | @app.get("/proposals/{proposal_id}/result") 89 | def proposal_result(proposal_id: int): 90 | if proposal_id not in proposals: 91 | raise HTTPException(404, "Proposal not found") 92 | proposal = proposals[proposal_id] 93 | total_votes = sum(proposal.votes.values()) 94 | return { 95 | "proposal_id": proposal_id, 96 | "title": proposal.title, 97 | "total_votes": total_votes, 98 | "votes": proposal.votes, 99 | "status": proposal.status, 100 | } 101 | 102 | @app.post("/analyze_votes/{proposal_id}") 103 | def analyze_votes(proposal_id: int): 104 | if proposal_id not in proposals: 105 | raise HTTPException(404, "Proposal not found") 106 | votes = [Vote(proposal_id=proposal_id, voter=v, amount=(proposals[proposal_id].votes[v])**2) for v in proposals[proposal_id].votes] 107 | vote_analyzer.fit(votes) 108 | anomalies = vote_analyzer.check(votes) 109 | return {"anomalous_voters": anomalies} 110 | 111 | # === Background Proposal Closer === 112 | 113 | def proposal_closer(): 114 | while True: 115 | now = time.time() 116 | for p in proposals.values(): 117 | if p.status == "open" and now > p.end_time: 118 | p.status = "closed" 119 | time.sleep(10) 120 | 121 | threading.Thread(target=proposal_closer, daemon=True).start() 122 | 123 | # === Example Run: uvicorn src.governance.ai_voting_engine:app --reload === 124 | 125 | if __name__ == "__main__": 126 | import uvicorn 127 | uvicorn.run("governance.ai_voting_engine:app", host="0.0.0.0", port=8001, reload=True) 128 | -------------------------------------------------------------------------------- /src/interoperability/bridge.py: -------------------------------------------------------------------------------- 1 | """ 2 | bridge.py — PiConsensus: Cross-Chain Interoperability Bridge 3 | 4 | Ultra high-tech, unstoppable module for connecting Pi Coin to external blockchains (Ethereum, BNB Chain, Solana, etc). 5 | Supports asset transfer, event relaying, and future-proof multi-chain logic. 6 | 7 | Requirements: 8 | pip install web3 requests 9 | # For Solana or others, additional libraries can be added (e.g., solana-py, py-cosmos). 10 | """ 11 | 12 | import json 13 | from typing import Dict, Any, Optional 14 | from web3 import Web3, HTTPProvider 15 | import requests 16 | 17 | # === Ethereum Example Config === 18 | ETH_NODE_URL = "https://mainnet.infura.io/v3/your_infura_project_id" 19 | ETH_BRIDGE_CONTRACT = "0xYourBridgeContractAddress" 20 | ETH_BRIDGE_ABI = [...] # Replace with the actual ABI list 21 | 22 | # === PiConsensus Example Config === 23 | # For demo purposes, PiConsensus blockchain is simulated via HTTP API. 24 | PICOINS_NODE_URL = "http://localhost:8545" # Replace with actual node address 25 | 26 | # === Initialize Ethereum Web3 === 27 | w3 = Web3(HTTPProvider(ETH_NODE_URL)) 28 | 29 | # === Core Bridge Logic === 30 | 31 | class CrossChainBridge: 32 | def __init__(self): 33 | self.eth_contract = None 34 | if ETH_BRIDGE_ABI and ETH_BRIDGE_CONTRACT: 35 | self.eth_contract = w3.eth.contract(address=ETH_BRIDGE_CONTRACT, abi=ETH_BRIDGE_ABI) 36 | 37 | def listen_eth_events(self, event_name="AssetLocked", from_block="latest"): 38 | """ 39 | Listen for events on Ethereum Bridge contract (e.g., AssetLocked). 40 | In production, use asyncio/event loop or webhooks. 41 | """ 42 | if not self.eth_contract: 43 | print("Ethereum contract not configured.") 44 | return [] 45 | events = self.eth_contract.events[event_name].createFilter(fromBlock=from_block).get_all_entries() 46 | for event in events: 47 | print(f"[Bridge] Ethereum Event: {event['event']} | args: {event['args']}") 48 | return events 49 | 50 | def relay_to_picoin(self, event_data: Dict[str, Any]): 51 | """ 52 | Relay a cross-chain event to PiConsensus chain (simulate via HTTP POST). 53 | """ 54 | payload = { 55 | "type": "cross_chain_event", 56 | "origin": "ethereum", 57 | "data": event_data 58 | } 59 | print(f"[Bridge] Relaying event to PiConsensus: {payload}") 60 | # In production, sign and send to PiConsensus node 61 | try: 62 | resp = requests.post(f"{PICOINS_NODE_URL}/bridge/receive", json=payload, timeout=10) 63 | return resp.json() 64 | except Exception as e: 65 | print("Failed to relay to PiConsensus:", str(e)) 66 | return None 67 | 68 | def listen_picoin_events(self): 69 | """ 70 | Listen for PiConsensus events (simulate via polling API). 71 | """ 72 | try: 73 | resp = requests.get(f"{PICOINS_NODE_URL}/bridge/events", timeout=10) 74 | events = resp.json().get("events", []) 75 | for event in events: 76 | print(f"[Bridge] PiConsensus Event: {event}") 77 | return events 78 | except Exception as e: 79 | print("Failed to fetch PiConsensus events:", str(e)) 80 | return [] 81 | 82 | def relay_to_ethereum(self, event_data: Dict[str, Any], private_key: str): 83 | """ 84 | Relay a cross-chain event from PiConsensus to Ethereum by sending a transaction. 85 | """ 86 | # For demo, just print the operation. 87 | print(f"[Bridge] Would relay to Ethereum: {event_data}") 88 | # In production, prepare and send signed transaction using web3.py here. 89 | return "tx_hash_placeholder" 90 | 91 | # === Example Usage === 92 | 93 | if __name__ == "__main__": 94 | bridge = CrossChainBridge() 95 | # Listen for Ethereum bridge events and relay to PiConsensus 96 | eth_events = bridge.listen_eth_events() 97 | for e in eth_events: 98 | bridge.relay_to_picoin(e["args"]) 99 | # Listen for PiConsensus events and relay to Ethereum (stub) 100 | pi_events = bridge.listen_picoin_events() 101 | for e in pi_events: 102 | bridge.relay_to_ethereum(e, private_key="YOUR_PRIVATE_KEY") 103 | -------------------------------------------------------------------------------- /backend/app/models/governance.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from sqlalchemy import Column, Integer, String, ForeignKey, DateTime, Enum 3 | from sqlalchemy.orm import relationship 4 | from sqlalchemy.ext.declarative import declarative_base 5 | import enum 6 | 7 | Base = declarative_base() 8 | 9 | class ProposalStatus(enum.Enum): 10 | PENDING = "pending" 11 | APPROVED = "approved" 12 | REJECTED = "rejected" 13 | COMPLETED = "completed" 14 | 15 | class GovernanceProposal(Base): 16 | __tablename__ = 'governance_proposals' 17 | 18 | id = Column(Integer, primary_key=True) 19 | title = Column(String, nullable=False) 20 | description = Column(String, nullable=False) 21 | creator_id = Column(Integer, ForeignKey('users.id'), nullable=False) 22 | created_at = Column(DateTime, default=datetime.utcnow) 23 | end_time = Column(DateTime, nullable=False) 24 | status = Column(Enum(ProposalStatus), default=ProposalStatus.PENDING) 25 | 26 | creator = relationship("User ", back_populates="proposals") 27 | votes = relationship("Vote", back_populates="proposal") 28 | 29 | def __init__(self, title, description, creator_id, duration_days): 30 | self.title = title 31 | self.description = description 32 | self.creator_id = creator_id 33 | self.end_time = datetime.utcnow() + timedelta(days=duration_days) 34 | 35 | def is_active(self): 36 | """Check if the proposal is still active.""" 37 | return datetime.utcnow() < self.end_time 38 | 39 | def tally_votes(self): 40 | """Tally votes and update the proposal status.""" 41 | if not self.is_active(): 42 | yes_votes = sum(1 for vote in self.votes if vote.vote_type == VoteType.YES) 43 | no_votes = sum(1 for vote in self.votes if vote.vote_type == VoteType.NO) 44 | 45 | if yes_votes > no_votes: 46 | self.status = ProposalStatus.APPROVED 47 | else: 48 | self.status = ProposalStatus.REJECTED 49 | self.end_time = datetime.utcnow() # Mark as completed 50 | return self.status 51 | return None 52 | 53 | class VoteType(enum.Enum): 54 | YES = "yes" 55 | NO = "no" 56 | 57 | class Vote(Base): 58 | __tablename__ = 'votes' 59 | 60 | id = Column(Integer, primary_key=True) 61 | proposal_id = Column(Integer, ForeignKey('governance_proposals.id'), nullable=False) 62 | voter_id = Column(Integer, ForeignKey('users.id'), nullable=False) 63 | vote_type = Column(Enum(VoteType), nullable=False) 64 | created_at = Column(DateTime, default=datetime.utcnow) 65 | 66 | proposal = relationship("GovernanceProposal", back_populates="votes") 67 | voter = relationship("User ", back_populates="votes") 68 | 69 | def __init__(self, proposal_id, voter_id, vote_type): 70 | self.proposal_id = proposal_id 71 | self.voter_id = voter_id 72 | self.vote_type = vote_type 73 | 74 | class User(Base): 75 | __tablename__ = 'users' 76 | 77 | id = Column(Integer, primary_key=True) 78 | username = Column(String, unique=True, nullable=False) 79 | proposals = relationship("GovernanceProposal", back_populates="creator") 80 | votes = relationship("Vote", back_populates="voter") 81 | 82 | def __init__(self, username): 83 | self.username = username 84 | 85 | # Example usage 86 | if __name__ == "__main__": 87 | # This section is for demonstration purposes and should be removed in production code. 88 | from sqlalchemy import create_engine 89 | from sqlalchemy.orm import sessionmaker 90 | 91 | # Create a SQLite database in memory for demonstration 92 | engine = create_engine('sqlite:///:memory:') 93 | Base.metadata.create_all(engine) 94 | 95 | Session = sessionmaker(bind=engine) 96 | session = Session() 97 | 98 | # Create a user 99 | user = User(username="alice") 100 | session.add(user) 101 | session.commit() 102 | 103 | # Create a governance proposal 104 | proposal = GovernanceProposal(title="Increase Block Size", description="Proposal to increase the block size limit.", creator_id=user.id, duration_days=7) 105 | session.add(proposal) 106 | session.commit() 107 | 108 | # User votes on the proposal 109 | vote = Vote(proposal_id=proposal.id, voter_id=user.id, vote_type=VoteType.YES) 110 | session.add(vote) 111 | session.commit() 112 | 113 | # Tally votes and update proposal status 114 | proposal.tally_votes() 115 | session.commit() 116 | 117 | print(f"Proposal '{proposal.title}' status: {proposal.status.value}") 118 | -------------------------------------------------------------------------------- /src/simulation/attack_vectors.py: -------------------------------------------------------------------------------- 1 | import random 2 | import networkx as nx 3 | import matplotlib.pyplot as plt 4 | from hypothesis import given, strategies as st 5 | from typing import Dict, Any, List, Callable 6 | 7 | # === Agent-Based Network Simulator === 8 | 9 | class Node: 10 | def __init__(self, node_id: int, is_adversary: bool = False): 11 | self.node_id = node_id 12 | self.is_adversary = is_adversary 13 | self.balance = 1000 if not is_adversary else 10000 14 | 15 | def __repr__(self): 16 | return f"Node({self.node_id}, adversary={self.is_adversary})" 17 | 18 | class NetworkSimulator: 19 | def __init__(self, num_nodes=10, adversaries=2): 20 | self.G = nx.erdos_renyi_graph(num_nodes, 0.5) 21 | self.nodes = [Node(i, i < adversaries) for i in range(num_nodes)] 22 | self.adversaries = [n for n in self.nodes if n.is_adversary] 23 | self.honest = [n for n in self.nodes if not n.is_adversary] 24 | 25 | def simulate_attack(self, attack_type: str, rounds: int = 5): 26 | print(f"\n[Sim] Running {attack_type} attack for {rounds} rounds...") 27 | if attack_type == "double_spend": 28 | return self._double_spend_attack(rounds) 29 | elif attack_type == "sybil": 30 | return self._sybil_attack(rounds) 31 | elif attack_type == "network_partition": 32 | return self._network_partition_attack(rounds) 33 | else: 34 | print("Unknown attack type.") 35 | return None 36 | 37 | def _double_spend_attack(self, rounds): 38 | results = [] 39 | attacker = self.adversaries[0] 40 | for r in range(rounds): 41 | victim = random.choice(self.honest) 42 | print(f"Round {r + 1}: {attacker} attempts double-spend against {victim}") 43 | # Simulate double-spend by broadcasting conflicting transactions 44 | tx1 = {"from": attacker.node_id, "to": victim.node_id, "amount": 100, "nonce": r} 45 | tx2 = {"from": attacker.node_id, "to": random.choice(self.honest).node_id, "amount": 100, "nonce": r} 46 | confirmed = random.choice([tx1, tx2]) 47 | results.append(confirmed) 48 | print(f"Confirmed: {confirmed}") 49 | return results 50 | 51 | def _sybil_attack(self, rounds): 52 | results = [] 53 | for r in range(rounds): 54 | sybil_nodes = [Node(1000 + i, True) for i in range(10)] 55 | print(f"Round {r + 1}: {len(sybil_nodes)} Sybil nodes injected.") 56 | self.nodes += sybil_nodes 57 | self.adversaries += sybil_nodes 58 | # Simulate consensus voting 59 | votes = [random.choice(["A", "B"]) for _ in self.nodes] 60 | majority = max(set(votes), key=votes.count) 61 | print(f"Majority decision: {majority}") 62 | results.append({"round": r, "majority": majority, "votes": votes.count(majority)}) 63 | return results 64 | 65 | def _network_partition_attack(self, rounds): 66 | results = [] 67 | for r in range(rounds): 68 | # Randomly split network into two 69 | partition = set(random.sample([n.node_id for n in self.nodes], len(self.nodes)//2)) 70 | print(f"Round {r + 1}: Partition {partition}") 71 | # Simulate partitioned consensus 72 | decisions = {pid: random.choice(["yes", "no"]) for pid in partition} 73 | print(f"Partitioned decisions: {decisions}") 74 | results.append(decisions) 75 | return results 76 | 77 | def visualize(self): 78 | nx.draw(self.G, with_labels=True) 79 | plt.title("PiConsensus Network Topology") 80 | plt.show() 81 | 82 | # === Fuzzing & Property-Based Testing === 83 | 84 | @given(amount=st.integers(min_value=-10000, max_value=10000)) 85 | def test_transaction_validity(amount): 86 | """Fuzz test: Transaction amounts should be positive and not overflow.""" 87 | assert amount >= 0, "Negative transaction detected!" 88 | assert amount < 1e9, "Overflow transaction detected!" 89 | 90 | # === Example Usage === 91 | 92 | def run_all_simulations(): 93 | net = NetworkSimulator(num_nodes=12, adversaries=3) 94 | net.visualize() 95 | print("Double-Spend Attack Results:", net.simulate_attack("double_spend")) 96 | print("Sybil Attack Results:", net.simulate_attack("sybil")) 97 | print("Network Partition Attack Results:", net.simulate_attack("network_partition")) 98 | print("Running transaction fuzz test (property-based)...") 99 | try: 100 | test_transaction_validity() 101 | print("Fuzz tests passed.") 102 | except AssertionError as e: 103 | print("Fuzz test failed:", e) 104 | 105 | if __name__ == "__main__": 106 | print("PiConsensus Simulation & Adversarial Test Suite") 107 | run_all_simulations() 108 | -------------------------------------------------------------------------------- /src/oracle/aggregator.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | import random 4 | import threading 5 | from typing import List, Dict, Any, Callable, Optional 6 | import numpy as np 7 | import requests 8 | 9 | # === Oracle Provider Interface === 10 | 11 | class OracleProvider: 12 | def __init__(self, name: str, url: str, parse_fn: Callable[[Any], float], weight: float = 1.0): 13 | self.name = name 14 | self.url = url 15 | self.parse_fn = parse_fn 16 | self.weight = weight 17 | self.last_value = None 18 | self.last_timestamp = None 19 | 20 | def fetch(self) -> Optional[float]: 21 | try: 22 | resp = requests.get(self.url, timeout=5) 23 | value = self.parse_fn(resp) 24 | self.last_value = value 25 | self.last_timestamp = time.time() 26 | return value 27 | except Exception as e: 28 | print(f"[OracleProvider] {self.name} fetch failed: {e}") 29 | return None 30 | 31 | # === Example Oracle Providers (e.g., for price feeds) === 32 | 33 | def parse_coingecko(resp): 34 | data = resp.json() 35 | return data["pi"]["usd"] 36 | 37 | def parse_binance(resp): 38 | data = resp.json() 39 | return float(data["price"]) 40 | 41 | ORACLE_PROVIDERS = [ 42 | OracleProvider("CoinGecko", "https://api.coingecko.com/api/v3/simple/price?ids=pi&vs_currencies=usd", parse_coingecko, weight=1.0), 43 | OracleProvider("Binance", "https://api.binance.com/api/v3/ticker/price?symbol=PIUSDT", parse_binance, weight=1.1), 44 | # Add more providers as needed 45 | ] 46 | 47 | # === Data Aggregator === 48 | 49 | class OracleAggregator: 50 | def __init__(self, providers: List[OracleProvider]): 51 | self.providers = providers 52 | 53 | def fetch_all(self) -> Dict[str, float]: 54 | results = {} 55 | for provider in self.providers: 56 | value = provider.fetch() 57 | if value is not None: 58 | results[provider.name] = value 59 | return results 60 | 61 | def aggregate(self, values: Dict[str, float]) -> float: 62 | """ 63 | Aggregate values using weighted median for robustness. 64 | """ 65 | if not values: 66 | raise ValueError("No values to aggregate.") 67 | weights = [p.weight for p in self.providers if p.name in values] 68 | vals = [values[p.name] for p in self.providers if p.name in values] 69 | weighted_vals = np.repeat(vals, [int(w * 100) for w in weights]) 70 | median = float(np.median(weighted_vals)) 71 | return median 72 | 73 | def score_providers(self, values: Dict[str, float], agg_value: float) -> Dict[str, float]: 74 | """ 75 | Score providers based on proximity to aggregate value. 76 | """ 77 | scores = {} 78 | for name, value in values.items(): 79 | scores[name] = 1.0 / (1 + abs(value - agg_value)) 80 | return scores 81 | 82 | def resolve_dispute(self, values: Dict[str, float]) -> str: 83 | """ 84 | Resolve disputes by picking the value closest to the median. 85 | """ 86 | agg_value = self.aggregate(values) 87 | closest = min(values.items(), key=lambda x: abs(x[1] - agg_value)) 88 | return f"Dispute resolved: {closest[0]} value ({closest[1]}) accepted." 89 | 90 | def run_once(self) -> Dict[str, Any]: 91 | """ 92 | Fetch, aggregate, score, and resolve in one pass. 93 | """ 94 | values = self.fetch_all() 95 | if not values: 96 | return {"error": "No oracle data available."} 97 | agg = self.aggregate(values) 98 | scores = self.score_providers(values, agg) 99 | dispute = self.resolve_dispute(values) if len(values) > 1 and max(values.values()) - min(values.values()) > 0.05 * agg else "No dispute." 100 | result = { 101 | "oracle_values": values, 102 | "aggregated": agg, 103 | "provider_scores": scores, 104 | "dispute_resolution": dispute 105 | } 106 | print(json.dumps(result, indent=2)) 107 | return result 108 | 109 | def run_continuous(self, poll_interval=60): 110 | """ 111 | Continuously aggregate oracle data and output results. 112 | """ 113 | def loop(): 114 | while True: 115 | self.run_once() 116 | time.sleep(poll_interval) 117 | threading.Thread(target=loop, daemon=True).start() 118 | 119 | # === Example Usage === 120 | 121 | if __name__ == "__main__": 122 | aggregator = OracleAggregator(ORACLE_PROVIDERS) 123 | print("Decentralized Oracle Aggregator Demo") 124 | aggregator.run_once() 125 | # To enable continuous mode, uncomment: 126 | # aggregator.run_continuous(poll_interval=60) 127 | # while True: time.sleep(60) 128 | -------------------------------------------------------------------------------- /backend/app/services/privacy_service.py: -------------------------------------------------------------------------------- 1 | import os 2 | import base64 3 | import logging 4 | from cryptography.fernet import Fernet 5 | from cryptography.hazmat.primitives import hashes 6 | from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC 7 | from cryptography.hazmat.backends import default_backend 8 | from cryptography.hazmat.primitives import serialization 9 | from cryptography.hazmat.primitives.asymmetric import rsa 10 | 11 | # Configure logging 12 | logging.basicConfig(level=logging.INFO) 13 | logger = logging.getLogger(__name__) 14 | 15 | class PrivacyService: 16 | def __init__(self): 17 | self.secret_key = self.generate_secret_key() 18 | self.fernet = Fernet(self.secret_key) 19 | 20 | def generate_secret_key(self): 21 | """Generate a secure random key for encryption.""" 22 | key = Fernet.generate_key() 23 | logger.info("Generated a new secret key.") 24 | return key 25 | 26 | def encrypt_data(self, data): 27 | """Encrypt the given data using Fernet symmetric encryption.""" 28 | if isinstance(data, str): 29 | data = data.encode('utf-8') 30 | encrypted_data = self.fernet.encrypt(data) 31 | logger.info("Data encrypted successfully.") 32 | return encrypted_data 33 | 34 | def decrypt_data(self, encrypted_data): 35 | """Decrypt the given encrypted data using Fernet symmetric encryption.""" 36 | decrypted_data = self.fernet.decrypt(encrypted_data) 37 | logger.info("Data decrypted successfully.") 38 | return decrypted_data.decode('utf-8') 39 | 40 | def anonymize_data(self, user_data): 41 | """Anonymize user data by hashing sensitive fields.""" 42 | hashed_data = {} 43 | for key, value in user_data.items(): 44 | if key in ['email', 'phone_number', 'ssn']: # Sensitive fields 45 | hashed_value = self.hash_data(value) 46 | hashed_data[key] = hashed_value 47 | else: 48 | hashed_data[key] = value 49 | logger.info("User data anonymized successfully.") 50 | return hashed_data 51 | 52 | def hash_data(self, data): 53 | """Hash the given data using SHA-256.""" 54 | hasher = hashes.Hash(hashes.SHA256(), backend=default_backend()) 55 | hasher.update(data.encode('utf-8')) 56 | hashed_value = hasher.finalize() 57 | return base64.urlsafe_b64encode(hashed_value).decode('utf-8') 58 | 59 | def generate_rsa_key_pair(self): 60 | """Generate an RSA key pair for asymmetric encryption.""" 61 | private_key = rsa.generate_private_key( 62 | public_exponent=65537, 63 | key_size=2048, 64 | backend=default_backend() 65 | ) 66 | public_key = private_key.public_key() 67 | logger.info("RSA key pair generated successfully.") 68 | return private_key, public_key 69 | 70 | def encrypt_with_rsa(self, public_key, data): 71 | """Encrypt data using the provided RSA public key.""" 72 | encrypted_data = public_key.encrypt( 73 | data.encode('utf-8'), 74 | padding.OAEP( 75 | mgf=padding.MGF1(algorithm=hashes.SHA256()), 76 | algorithm=hashes.SHA256(), 77 | label=None 78 | ) 79 | ) 80 | logger.info("Data encrypted with RSA successfully.") 81 | return encrypted_data 82 | 83 | def decrypt_with_rsa(self, private_key, encrypted_data): 84 | """Decrypt data using the provided RSA private key.""" 85 | decrypted_data = private_key.decrypt( 86 | encrypted_data, 87 | padding.OAEP( 88 | mgf=padding.MGF1(algorithm=hashes.SHA256()), 89 | algorithm=hashes.SHA256(), 90 | label=None 91 | ) 92 | ) 93 | logger.info("Data decrypted with RSA successfully.") 94 | return decrypted_data.decode('utf-8') 95 | 96 | # Example usage 97 | if __name__ == "__main__": 98 | privacy_service = PrivacyService() 99 | 100 | # Encrypt and decrypt data 101 | sensitive_data = "This is a secret message." 102 | encrypted_data = privacy_service.encrypt_data(sensitive_data) 103 | decrypted_data = privacy_service.decrypt_data(encrypted_data) 104 | logger.info("Original Data: %s", sensitive_data) 105 | logger.info("Decrypted Data: %s", decrypted_data) 106 | 107 | # Anonymize user data 108 | user_data = { 109 | "username": "john_doe", 110 | "email": "john@example.com", 111 | "phone_number": "123-456-7890", 112 | "ssn": "123-45-6789" 113 | } 114 | anonymized_data = privacy_service.anonymize_data(user_data) 115 | logger.info("Anonymized Data: %s", anonymized_data) 116 | 117 | # Generate RSA key pair 118 | private_key, public_key = privacy_service.generate_rsa_key_pair() 119 | 120 | -------------------------------------------------------------------------------- /backend/app/utils/quantum_resistance.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import logging 3 | from cryptography.hazmat.primitives.asymmetric import rsa, padding 4 | from cryptography.hazmat.primitives import serialization 5 | from cryptography.hazmat.primitives.asymmetric import ec 6 | from cryptography.hazmat.primitives import hashes 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO) 10 | logger = logging.getLogger(__name__) 11 | 12 | class QuantumResistanceUtility: 13 | def __init__(self): 14 | self.algorithms = { 15 | 'RSA': self.is_rsa_quantum_resistant, 16 | 'ECDSA': self.is_ecdsa_quantum_resistant, 17 | 'SHA-256': self.is_sha256_quantum_resistant, 18 | # Add more algorithms as needed 19 | } 20 | 21 | def is_rsa_quantum_resistant(self, key_size): 22 | """Check if RSA is quantum resistant based on key size.""" 23 | if key_size >= 3072: 24 | logger.info("RSA with key size %d is considered quantum resistant.", key_size) 25 | return True 26 | else: 27 | logger.warning("RSA with key size %d is NOT considered quantum resistant.", key_size) 28 | return False 29 | 30 | def is_ecdsa_quantum_resistant(self, curve_name): 31 | """Check if ECDSA is quantum resistant based on curve.""" 32 | # Commonly used curves and their quantum resistance 33 | quantum_resistant_curves = ['P-256', 'P-384', 'P-521'] 34 | if curve_name in quantum_resistant_curves: 35 | logger.info("ECDSA with curve %s is considered quantum resistant.", curve_name) 36 | return True 37 | else: 38 | logger.warning("ECDSA with curve %s is NOT considered quantum resistant.", curve_name) 39 | return False 40 | 41 | def is_sha256_quantum_resistant(self): 42 | """SHA-256 is not quantum resistant, but it's still widely used.""" 43 | logger.warning("SHA-256 is NOT quantum resistant. Consider using SHA-3 or other alternatives.") 44 | return False 45 | 46 | def recommend_quantum_resistant_algorithms(self): 47 | """Provide recommendations for quantum-resistant algorithms.""" 48 | recommendations = { 49 | 'Post-Quantum Cryptography': [ 50 | 'Lattice-based cryptography (e.g., NTRU)', 51 | 'Code-based cryptography (e.g., McEliece)', 52 | 'Multivariate polynomial cryptography', 53 | 'Hash-based signatures (e.g., XMSS)', 54 | ], 55 | 'Quantum Key Distribution (QKD)': [ 56 | 'BB84 protocol', 57 | 'E91 protocol', 58 | ] 59 | } 60 | logger.info("Recommendations for quantum-resistant algorithms:") 61 | for category, algos in recommendations.items(): 62 | logger.info("%s: %s", category, ", ".join(algos)) 63 | 64 | def generate_rsa_key_pair(self, key_size=3072): 65 | """Generate an RSA key pair.""" 66 | if not self.is_rsa_quantum_resistant(key_size): 67 | raise ValueError("RSA key size is not quantum resistant.") 68 | 69 | private_key = rsa.generate_private_key( 70 | public_exponent=65537, 71 | key_size=key_size, 72 | ) 73 | public_key = private_key.public_key() 74 | return private_key, public_key 75 | 76 | def generate_ecdsa_key_pair(self, curve_name='P-256'): 77 | """Generate an ECDSA key pair.""" 78 | if not self.is_ecdsa_quantum_resistant(curve_name): 79 | raise ValueError("ECDSA curve is not quantum resistant.") 80 | 81 | private_key = ec.generate_private_key( 82 | ec.SECP256R1() if curve_name == 'P-256' else ec.SECP384R1() if curve_name == 'P-384' else ec.SECP521R1() 83 | ) 84 | public_key = private_key.public_key() 85 | return private_key, public_key 86 | 87 | # Example usage 88 | if __name__ == "__main__": 89 | utility = QuantumResistanceUtility() 90 | 91 | # Check RSA quantum resistance 92 | utility.is_rsa_quantum_resistant(3072) 93 | 94 | # Check ECDSA quantum resistance 95 | utility.is_ecdsa_quantum_resistant('P-256') 96 | 97 | # Check SHA-256 quantum resistance 98 | utility.is_sha256_quantum_resistant() 99 | 100 | # Recommend quantum-resistant algorithms 101 | utility.recommend_quantum_resistant_algorithms() 102 | 103 | # Generate RSA key pair 104 | try: 105 | rsa_private, rsa_public = utility.generate_rsa_key_pair(3072) 106 | logger.info("RSA Key Pair generated successfully.") 107 | except ValueError as e: 108 | logger.error("Error generating RSA key pair: %s", e) 109 | 110 | # Generate ECDSA key pair 111 | try: 112 | ecdsa_private, ecdsa_public = utility.generate_ecdsa_key_pair('P-256') 113 | logger.info("ECDSA Key Pair generated successfully.") 114 | except ValueError as e: 115 | logger.error("Error generating ECDSA key pair: %s", e) 116 | -------------------------------------------------------------------------------- /src/zkp_engine.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | import hashlib 3 | import random 4 | 5 | try: 6 | from pysnark.runtime import PrivVal, PubVal, snark, snarkinline 7 | from pysnark.hash import sha256 as snark_sha256 8 | except ImportError: 9 | snark = None # In production, ensure pySNARK is installed 10 | 11 | # === Core: Confidential Transaction Proof === 12 | 13 | def hash_commitment(value: int, blinding: int) -> str: 14 | """Compute a commitment hash for confidential transactions.""" 15 | return hashlib.sha256(f"{value}:{blinding}".encode()).hexdigest() 16 | 17 | def generate_commitment(value: int) -> Dict[str, Any]: 18 | """Generate a commitment and blinding factor for a confidential value.""" 19 | blinding = random.SystemRandom().randint(1, 2**128) 20 | commitment = hash_commitment(value, blinding) 21 | return {"value": value, "blinding": blinding, "commitment": commitment} 22 | 23 | # === zk-SNARK: Proof-of-Ownership Without Revealing Value === 24 | 25 | def prove_ownership(value: int, blinding: int, commitment: str) -> Optional[Dict[str, Any]]: 26 | """ 27 | Generate a zero-knowledge proof that you know value, blinding such that commitment=hash(value, blinding). 28 | """ 29 | if not snark: 30 | raise RuntimeError("pySNARK not installed.") 31 | @snark 32 | def zkp_proof(): 33 | v = PrivVal(value) 34 | b = PrivVal(blinding) 35 | c = PubVal(int(commitment, 16)) 36 | assert snark_sha256(f"{v}:{b}".encode()) == c 37 | proof = zkp_proof() 38 | return {"proof": proof} 39 | 40 | def verify_ownership(commitment: str, proof: Any) -> bool: 41 | """ 42 | Verifies a zero-knowledge proof of ownership (simulated). 43 | In production, use pySNARK's verifier. 44 | """ 45 | # This is a stub; actual verification is done via pySNARK CLI/off-chain 46 | return proof is not None 47 | 48 | # === Anonymous Voting Example === 49 | 50 | def generate_vote_commitment(vote_choice: int) -> Dict[str, Any]: 51 | """Generate a commitment for a vote.""" 52 | blinding = random.SystemRandom().randint(1, 2**128) 53 | commitment = hash_commitment(vote_choice, blinding) 54 | return {"vote_choice": vote_choice, "blinding": blinding, "commitment": commitment} 55 | 56 | def prove_vote(vote_choice: int, blinding: int, commitment: str) -> Optional[Dict[str, Any]]: 57 | """ 58 | Prove in zero-knowledge that you cast a legitimate vote without revealing the choice. 59 | """ 60 | if not snark: 61 | raise RuntimeError("pySNARK not installed.") 62 | @snark 63 | def zkp_vote(): 64 | v = PrivVal(vote_choice) 65 | b = PrivVal(blinding) 66 | c = PubVal(int(commitment, 16)) 67 | assert snark_sha256(f"{v}:{b}".encode()) == c 68 | proof = zkp_vote() 69 | return {"proof": proof} 70 | 71 | # === Selective Disclosure (Privacy-Preserving KYC/AML) === 72 | 73 | def prove_kyc_attribute(attribute_value: int, blinding: int, commitment: str) -> Optional[Dict[str, Any]]: 74 | """ 75 | Prove you possess a KYC/AML attribute (e.g., age > 18) without revealing the value. 76 | """ 77 | if not snark: 78 | raise RuntimeError("pySNARK not installed.") 79 | @snark 80 | def zkp_kyc(): 81 | attr = PrivVal(attribute_value) 82 | b = PrivVal(blinding) 83 | c = PubVal(int(commitment, 16)) 84 | assert attr > 18 85 | assert snark_sha256(f"{attr}:{b}".encode()) == c 86 | proof = zkp_kyc() 87 | return {"proof": proof} 88 | 89 | # === Example Usage === 90 | 91 | if __name__ == "__main__": 92 | print("Demo: PiConsensus Zero-Knowledge Proof Engine") 93 | 94 | # Confidential transaction 95 | secret = 314159 96 | commitment_data = generate_commitment(secret) 97 | print("Commitment data:", commitment_data) 98 | # Simulated proof (for demo, would use pySNARK in production) 99 | try: 100 | proof = prove_ownership(commitment_data["value"], commitment_data["blinding"], commitment_data["commitment"]) 101 | print("Proof generated:", proof) 102 | print("Proof verified?", verify_ownership(commitment_data["commitment"], proof)) 103 | except Exception as e: 104 | print("(pySNARK not installed for demo)") 105 | 106 | # Anonymous voting 107 | vote = 1 108 | vote_commitment = generate_vote_commitment(vote) 109 | print("Vote commitment:", vote_commitment) 110 | try: 111 | vote_proof = prove_vote(vote_commitment["vote_choice"], vote_commitment["blinding"], vote_commitment["commitment"]) 112 | print("Vote proof:", vote_proof) 113 | except Exception as e: 114 | print("(pySNARK not installed for demo)") 115 | 116 | # Privacy-preserving KYC 117 | age = 25 118 | kyc_commitment = generate_commitment(age) 119 | try: 120 | kyc_proof = prove_kyc_attribute(kyc_commitment["value"], kyc_commitment["blinding"], kyc_commitment["commitment"]) 121 | print("KYC proof:", kyc_proof) 122 | except Exception as e: 123 | print("(pySNARK not installed for demo)") 124 | -------------------------------------------------------------------------------- /src/constants.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List, Dict 3 | 4 | def env(key: str, default): 5 | """Fetch value from environment, or use default if not set. Supports type safety.""" 6 | value = os.environ.get(key) 7 | if value is None: 8 | return default 9 | if isinstance(default, bool): 10 | return value.lower() in ("1", "true", "yes") 11 | if isinstance(default, int): 12 | return int(value) 13 | if isinstance(default, float): 14 | return float(value) 15 | if isinstance(default, list): 16 | return value.split(',') 17 | return value 18 | 19 | # ==================== PI COIN CORE PARAMETERS ==================== 20 | 21 | PI_COIN_SYMBOL: str = env("PI_COIN_SYMBOL", "Pi") 22 | PI_COIN_NAME: str = env("PI_COIN_NAME", "Pi Coin") 23 | PI_COIN_VALUE: int = env("PI_COIN_VALUE", 314_159) # Pi Coin value in USD (three hundred fourteen thousand one hundred fifty-nine) 24 | PI_COIN_SUPPLY: int = env("PI_COIN_SUPPLY", 100_000_000_000) 25 | PI_COIN_DECIMALS: int = env("PI_COIN_DECIMALS", 18) 26 | PI_COIN_TRANSACTION_FEE: float = env("PI_COIN_TRANSACTION_FEE", 0.01) 27 | PI_COIN_MAX_TRANSACTION_SIZE: int = env("PI_COIN_MAX_TRANSACTION_SIZE", 1_000_000) 28 | PI_COIN_MINIMUM_BALANCE: int = env("PI_COIN_MINIMUM_BALANCE", 1) 29 | 30 | # ==================== NETWORK AND BLOCKCHAIN PARAMETERS ==================== 31 | 32 | PI_COIN_BLOCK_TIME: int = env("PI_COIN_BLOCK_TIME", 10) # seconds 33 | PI_COIN_MINING_DIFFICULTY: int = env("PI_COIN_MINING_DIFFICULTY", 1000) 34 | PI_COIN_MINING_REWARD: float = env("PI_COIN_MINING_REWARD", 12.5) 35 | PI_COIN_NETWORK_PROTOCOL: str = env("PI_COIN_NETWORK_PROTOCOL", "PoS") # Proof of Stake 36 | PI_COIN_GENESIS_BLOCK_TIMESTAMP: str = env("PI_COIN_GENESIS_BLOCK_TIMESTAMP", "2025-01-01T00:00:00Z") 37 | 38 | # ==================== GOVERNANCE & COMPLIANCE ==================== 39 | 40 | PI_COIN_GOVERNANCE_MODEL: str = env("PI_COIN_GOVERNANCE_MODEL", "Decentralized") 41 | PI_COIN_KYC_REQUIRED: bool = env("PI_COIN_KYC_REQUIRED", True) 42 | PI_COIN_COMPLIANCE_JURISDICTIONS: List[str] = env("PI_COIN_COMPLIANCE_JURISDICTIONS", ["US", "EU", "UK", "SG", "JP", "AU"]) 43 | PI_COIN_COMPLIANCE_LEVEL: str = env("PI_COIN_COMPLIANCE_LEVEL", "Ultra-Strict, Real-Time, Multi-Jurisdictional") 44 | 45 | # ==================== SECURITY & CRYPTOGRAPHY ==================== 46 | 47 | PI_COIN_ENCRYPTION_ALGORITHM: str = env("PI_COIN_ENCRYPTION_ALGORITHM", "AES-256-GCM") 48 | PI_COIN_HASHING_ALGORITHM: str = env("PI_COIN_HASHING_ALGORITHM", "SHA-512") 49 | PI_COIN_SIGNATURE_SCHEME: str = env("PI_COIN_SIGNATURE_SCHEME", "ECDSA-secp256k1") 50 | PI_COIN_MULTISIG_ENABLED: bool = env("PI_COIN_MULTISIG_ENABLED", True) 51 | PI_COIN_SMART_CONTRACT_AUDIT_PROVIDER: str = env("PI_COIN_AUDIT_PROVIDER", "Quantstamp, OpenZeppelin, Trail of Bits") 52 | 53 | # ==================== STAKING & REWARDS ==================== 54 | 55 | PI_COIN_MIN_STAKE_AMOUNT: int = env("PI_COIN_MIN_STAKE_AMOUNT", 100) 56 | PI_COIN_STAKE_REWARD_RATE: float = env("PI_COIN_STAKE_REWARD_RATE", 0.05) # 5% annual 57 | PI_COIN_MAX_STAKE_DURATION_DAYS: int = env("PI_COIN_MAX_STAKE_DURATION_DAYS", 3650) # 10 years 58 | 59 | # ==================== API & INFRASTRUCTURE ==================== 60 | 61 | PI_COIN_API_REQUEST_LIMIT: int = env("PI_COIN_API_REQUEST_LIMIT", 10_000) # per hour 62 | PI_COIN_API_KEY_EXPIRATION: int = env("PI_COIN_API_KEY_EXPIRATION", 86_400) # 24 hours 63 | PI_COIN_MAX_PEERS: int = env("PI_COIN_MAX_PEERS", 10_000) 64 | PI_COIN_NODE_TIMEOUT: int = env("PI_COIN_NODE_TIMEOUT", 10) 65 | PI_COIN_CONNECTION_RETRY_INTERVAL: int = env("PI_COIN_CONNECTION_RETRY_INTERVAL", 3) 66 | 67 | # ==================== ADVANCED/EXPANDABLE SETTINGS ==================== 68 | 69 | PI_COIN_AI_GOVERNANCE_ENABLED: bool = env("PI_COIN_AI_GOVERNANCE_ENABLED", True) 70 | PI_COIN_QUANTUM_RESISTANT: bool = env("PI_COIN_QUANTUM_RESISTANT", True) 71 | PI_COIN_DEFI_INTEGRATION: bool = env("PI_COIN_DEFI_INTEGRATION", True) 72 | PI_COIN_LAYER_2_ENABLED: bool = env("PI_COIN_LAYER_2_ENABLED", True) 73 | PI_COIN_AUDIT_TRAIL_ENABLED: bool = env("PI_COIN_AUDIT_TRAIL_ENABLED", True) 74 | 75 | # ==================== DYNAMIC/UTILITY FUNCTIONS ==================== 76 | 77 | def is_jurisdiction_compliant(country_code: str) -> bool: 78 | """ 79 | Check if a given country code is supported for compliance. 80 | """ 81 | return country_code.upper() in [c.upper() for c in PI_COIN_COMPLIANCE_JURISDICTIONS] 82 | 83 | def describe(): 84 | """ 85 | Print a human-readable summary of Pi Coin configuration. 86 | """ 87 | return { 88 | "Symbol": PI_COIN_SYMBOL, 89 | "Name": PI_COIN_NAME, 90 | "Value (USD)": PI_COIN_VALUE, 91 | "Supply": PI_COIN_SUPPLY, 92 | "Decimals": PI_COIN_DECIMALS, 93 | "Governance": PI_COIN_GOVERNANCE_MODEL, 94 | "KYC": PI_COIN_KYC_REQUIRED, 95 | "Quantum Resistant": PI_COIN_QUANTUM_RESISTANT, 96 | "AI Governance": PI_COIN_AI_GOVERNANCE_ENABLED, 97 | "Staking Rate": PI_COIN_STAKE_REWARD_RATE, 98 | "Jurisdictions": PI_COIN_COMPLIANCE_JURISDICTIONS, 99 | } 100 | 101 | # ==================== END OF FILE ==================== 102 | -------------------------------------------------------------------------------- /backend/app/services/cross_chain_service.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import logging 4 | 5 | # Configure logging 6 | logging.basicConfig(level=logging.INFO) 7 | logger = logging.getLogger(__name__) 8 | 9 | class CrossChainService: 10 | def __init__(self, blockchain_configs): 11 | """ 12 | Initialize the CrossChainService with configurations for different blockchains. 13 | 14 | :param blockchain_configs: A dictionary containing blockchain configurations. 15 | """ 16 | self.blockchain_configs = blockchain_configs 17 | 18 | def get_blockchain_data(self, blockchain_name, endpoint, params=None): 19 | """ 20 | Fetch data from a specified blockchain. 21 | 22 | :param blockchain_name: The name of the blockchain to query. 23 | :param endpoint: The API endpoint to call. 24 | :param params: Optional parameters for the API call. 25 | :return: The response data from the blockchain. 26 | """ 27 | if blockchain_name not in self.blockchain_configs: 28 | logger.error("Blockchain configuration for '%s' not found.", blockchain_name) 29 | raise ValueError("Invalid blockchain name") 30 | 31 | url = f"{self.blockchain_configs[blockchain_name]['base_url']}/{endpoint}" 32 | try: 33 | logger.info("Fetching data from %s: %s", blockchain_name, url) 34 | response = requests.get(url, params=params) 35 | response.raise_for_status() 36 | logger.info("Data fetched successfully from %s", blockchain_name) 37 | return response.json() 38 | except Exception as e: 39 | logger.error("Error fetching data from %s: %s", blockchain_name, e) 40 | raise 41 | 42 | def transfer_assets(self, from_chain, to_chain, asset_data): 43 | """ 44 | Transfer assets from one blockchain to another. 45 | 46 | :param from_chain: The name of the source blockchain. 47 | :param to_chain: The name of the destination blockchain. 48 | :param asset_data: A dictionary containing asset transfer details. 49 | :return: The transaction result. 50 | """ 51 | if from_chain not in self.blockchain_configs or to_chain not in self.blockchain_configs: 52 | logger.error("Invalid blockchain names: %s, %s", from_chain, to_chain) 53 | raise ValueError("Invalid blockchain names") 54 | 55 | # Example of transferring assets (this will vary based on the blockchain) 56 | from_url = self.blockchain_configs[from_chain]['base_url'] + '/transfer' 57 | to_url = self.blockchain_configs[to_chain]['base_url'] + '/receive' 58 | 59 | try: 60 | logger.info("Initiating asset transfer from %s to %s", from_chain, to_chain) 61 | # Step 1: Transfer from the source blockchain 62 | transfer_response = requests.post(from_url, json=asset_data) 63 | transfer_response.raise_for_status() 64 | transfer_result = transfer_response.json() 65 | 66 | # Step 2: Receive on the destination blockchain 67 | receive_response = requests.post(to_url, json=transfer_result) 68 | receive_response.raise_for_status() 69 | logger.info("Asset transfer completed successfully.") 70 | return receive_response.json() 71 | except Exception as e: 72 | logger.error("Error during asset transfer: %s", e) 73 | raise 74 | 75 | def get_transaction_status(self, blockchain_name, tx_id): 76 | """ 77 | Get the status of a transaction on a specified blockchain. 78 | 79 | :param blockchain_name: The name of the blockchain to query. 80 | :param tx_id: The transaction ID to check. 81 | :return: The transaction status. 82 | """ 83 | endpoint = f"transaction/{tx_id}/status" 84 | return self.get_blockchain_data(blockchain_name, endpoint) 85 | 86 | # Example usage 87 | if __name__ == "__main__": 88 | # Example blockchain configurations 89 | blockchain_configs = { 90 | "Ethereum": { 91 | "base_url": "https://api.etherscan.io/api" 92 | }, 93 | "BinanceSmartChain": { 94 | "base_url": "https://api.bscscan.com/api" 95 | } 96 | } 97 | 98 | service = CrossChainService(blockchain_configs) 99 | 100 | # Fetching data from Ethereum 101 | try: 102 | eth_data = service.get_blockchain_data("Ethereum", "stats") 103 | print("Ethereum Data:", eth_data) 104 | except Exception as e: 105 | print("Error fetching Ethereum data:", e) 106 | 107 | # Example asset transfer 108 | asset_data = { 109 | "from": "0xYourEthereumAddress", 110 | "to": "0xYourBSCAddress", 111 | "amount": 1.0, 112 | "token": "ETH" 113 | } 114 | try: 115 | transfer_result = service.transfer_assets("Ethereum", "BinanceSmartChain", asset_data) 116 | print("Transfer Result:", transfer_result) 117 | except Exception as e: 118 | print("Error during asset transfer:", e) 119 | 120 | # Checking transaction status 121 | try: 122 | tx_status = service.get_transaction_status(" Ethereum", "0xYourTransactionID") 123 | print("Transaction Status:", tx_status) 124 | except Exception as e: 125 | print("Error fetching transaction status:", e) ```python 126 | # Example of checking transaction status 127 | try: 128 | tx_status = service.get_transaction_status("Ethereum", "0xYourTransactionID") 129 | print("Transaction Status:", tx_status) 130 | except Exception as e: 131 | print("Error fetching transaction status:", e) 132 | -------------------------------------------------------------------------------- /src/ai_governance.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import datetime 4 | from typing import List, Dict, Any, Optional 5 | import requests 6 | from transformers import pipeline 7 | 8 | # === AI Models Initialization === 9 | 10 | # Sentiment analysis using HuggingFace Transformers 11 | try: 12 | sentiment_analyzer = pipeline("sentiment-analysis") 13 | except Exception: 14 | sentiment_analyzer = None # In production, handle fallback or load a local model 15 | 16 | # === Proposal Structures === 17 | 18 | class Proposal: 19 | def __init__(self, proposal_id: int, title: str, description: str, submitter: str, timestamp: Optional[str] = None): 20 | self.proposal_id = proposal_id 21 | self.title = title 22 | self.description = description 23 | self.submitter = submitter 24 | self.timestamp = timestamp or datetime.datetime.utcnow().isoformat() 25 | self.score = 0.0 26 | self.sentiment = "neutral" 27 | self.status = "pending" 28 | 29 | def to_dict(self): 30 | return { 31 | "proposal_id": self.proposal_id, 32 | "title": self.title, 33 | "description": self.description, 34 | "submitter": self.submitter, 35 | "timestamp": self.timestamp, 36 | "score": self.score, 37 | "sentiment": self.sentiment, 38 | "status": self.status 39 | } 40 | 41 | # === AI Governance Core Functions === 42 | 43 | class AIGovernanceEngine: 44 | def __init__(self): 45 | self.proposals: Dict[int, Proposal] = {} 46 | self.next_id = 1 47 | 48 | def submit_proposal(self, title: str, description: str, submitter: str) -> Proposal: 49 | proposal = Proposal(self.next_id, title, description, submitter) 50 | proposal.sentiment = self.analyze_sentiment(proposal.description) 51 | proposal.score = self.score_proposal(proposal) 52 | self.proposals[self.next_id] = proposal 53 | self.next_id += 1 54 | return proposal 55 | 56 | def analyze_sentiment(self, text: str) -> str: 57 | if sentiment_analyzer is None: 58 | return "unknown" 59 | try: 60 | result = sentiment_analyzer(text[:512])[0] # Truncate for performance 61 | return result['label'].lower() 62 | except Exception: 63 | return "error" 64 | 65 | def score_proposal(self, proposal: Proposal) -> float: 66 | # Example scoring: combine sentiment, length, keywords, and external signals 67 | score = 0.0 68 | if proposal.sentiment == "positive": 69 | score += 0.4 70 | elif proposal.sentiment == "neutral": 71 | score += 0.2 72 | if len(proposal.description) > 200: 73 | score += 0.2 74 | keywords = ["decentralized", "security", "scalable", "inclusive", "innovation"] 75 | for kw in keywords: 76 | if kw in proposal.description.lower(): 77 | score += 0.08 78 | # External: Social sentiment (optional) 79 | score += self.external_social_sentiment(proposal.title) * 0.1 80 | return min(score, 1.0) 81 | 82 | def external_social_sentiment(self, topic: str) -> float: 83 | """ 84 | Fetch and aggregate sentiment from external sources (e.g., Twitter, Reddit). 85 | This is a placeholder. Connect to real APIs in production. 86 | """ 87 | # TODO: Integrate Twitter, Reddit, etc. 88 | return 0.5 # Neutral baseline 89 | 90 | def get_proposal(self, proposal_id: int) -> Optional[Proposal]: 91 | return self.proposals.get(proposal_id) 92 | 93 | def list_proposals(self) -> List[Dict[str, Any]]: 94 | return [p.to_dict() for p in self.proposals.values()] 95 | 96 | def approve_proposal(self, proposal_id: int) -> bool: 97 | proposal = self.get_proposal(proposal_id) 98 | if proposal: 99 | proposal.status = "approved" 100 | return True 101 | return False 102 | 103 | def reject_proposal(self, proposal_id: int) -> bool: 104 | proposal = self.get_proposal(proposal_id) 105 | if proposal: 106 | proposal.status = "rejected" 107 | return True 108 | return False 109 | 110 | def auto_decide(self, proposal_id: int) -> str: 111 | """ 112 | Use AI scoring to make auto-approval/rejection. 113 | """ 114 | proposal = self.get_proposal(proposal_id) 115 | if not proposal: 116 | return "not found" 117 | if proposal.score >= 0.7: 118 | proposal.status = "approved" 119 | elif proposal.score <= 0.3: 120 | proposal.status = "rejected" 121 | else: 122 | proposal.status = "pending" 123 | return proposal.status 124 | 125 | # === Example: Integration with FastAPI or other frameworks === 126 | 127 | engine = AIGovernanceEngine() 128 | 129 | if __name__ == "__main__": 130 | # Demo: CLI usage 131 | print("PiConsensus AI Governance Engine") 132 | while True: 133 | cmd = input("Command (submit/list/approve/reject/auto/exit): ").strip().lower() 134 | if cmd == "submit": 135 | title = input("Title: ") 136 | desc = input("Description: ") 137 | submitter = input("Submitter: ") 138 | prop = engine.submit_proposal(title, desc, submitter) 139 | print("Submitted:", prop.to_dict()) 140 | elif cmd == "list": 141 | for p in engine.list_proposals(): 142 | print(p) 143 | elif cmd == "approve": 144 | pid = int(input("Proposal ID: ")) 145 | print("Approved:", engine.approve_proposal(pid)) 146 | elif cmd == "reject": 147 | pid = int(input("Proposal ID: ")) 148 | print("Rejected:", engine.reject_proposal(pid)) 149 | elif cmd == "auto": 150 | pid = int(input("Proposal ID: ")) 151 | status = engine.auto_decide(pid) 152 | print("Auto-decision:", status) 153 | elif cmd == "exit": 154 | break 155 | else: 156 | print("Unknown command.") 157 | --------------------------------------------------------------------------------