├── ocmonitor ├── models │ ├── __init__.py │ ├── session.py │ └── analytics.py ├── services │ ├── __init__.py │ ├── live_monitor.py │ ├── session_analyzer.py │ └── export_service.py ├── ui │ ├── __init__.py │ ├── dashboard.py │ └── tables.py ├── utils │ ├── __init__.py │ ├── formatting.py │ ├── time_utils.py │ ├── file_utils.py │ └── error_handling.py ├── __init__.py └── config.py ├── screenshots ├── live_dashboard.png ├── session-analysis.png ├── sessions-summary.png ├── daily-usage-breakdown.png ├── model-usage-analysis.png ├── model-usage-breakdown.png └── README.md ├── requirements.txt ├── run_ocmonitor.py ├── config.toml ├── LICENSE ├── .gitignore ├── setup.py ├── models.json ├── install.sh ├── QUICK_START.md ├── CHANGELOG.md ├── CONTRIBUTING.md ├── README.md └── MANUAL_TEST_GUIDE.md /ocmonitor/models/__init__.py: -------------------------------------------------------------------------------- 1 | """Data models for OpenCode Monitor.""" -------------------------------------------------------------------------------- /ocmonitor/services/__init__.py: -------------------------------------------------------------------------------- 1 | """Services for OpenCode Monitor.""" -------------------------------------------------------------------------------- /ocmonitor/ui/__init__.py: -------------------------------------------------------------------------------- 1 | """UI components for OpenCode Monitor.""" -------------------------------------------------------------------------------- /ocmonitor/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Utility functions for OpenCode Monitor.""" -------------------------------------------------------------------------------- /screenshots/live_dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Shlomob/ocmonitor-share/HEAD/screenshots/live_dashboard.png -------------------------------------------------------------------------------- /screenshots/session-analysis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Shlomob/ocmonitor-share/HEAD/screenshots/session-analysis.png -------------------------------------------------------------------------------- /screenshots/sessions-summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Shlomob/ocmonitor-share/HEAD/screenshots/sessions-summary.png -------------------------------------------------------------------------------- /screenshots/daily-usage-breakdown.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Shlomob/ocmonitor-share/HEAD/screenshots/daily-usage-breakdown.png -------------------------------------------------------------------------------- /screenshots/model-usage-analysis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Shlomob/ocmonitor-share/HEAD/screenshots/model-usage-analysis.png -------------------------------------------------------------------------------- /screenshots/model-usage-breakdown.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Shlomob/ocmonitor-share/HEAD/screenshots/model-usage-breakdown.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | click>=8.0.0 2 | rich>=13.0.0 3 | pydantic>=2.0.0 4 | toml>=0.10.0 5 | pytest>=7.0.0 6 | pytest-click>=1.1.0 7 | pytest-mock>=3.10.0 8 | coverage>=7.0.0 -------------------------------------------------------------------------------- /ocmonitor/__init__.py: -------------------------------------------------------------------------------- 1 | """OpenCode Monitor - Analytics and monitoring for OpenCode sessions.""" 2 | 3 | __version__ = "1.0.0" 4 | __author__ = "OpenCode Monitor Team" 5 | __description__ = "Analytics and monitoring tool for OpenCode AI coding sessions" -------------------------------------------------------------------------------- /run_ocmonitor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Wrapper script to run OpenCode Monitor CLI.""" 3 | 4 | import sys 5 | import os 6 | from pathlib import Path 7 | 8 | # Ensure we're in the right directory and add to Python path 9 | script_dir = Path(__file__).parent 10 | os.chdir(script_dir) 11 | sys.path.insert(0, str(script_dir)) 12 | 13 | try: 14 | from ocmonitor.cli import main 15 | main() 16 | except ImportError as e: 17 | print(f"❌ Import error: {e}") 18 | print(f"Current directory: {os.getcwd()}") 19 | print(f"Python path: {sys.path[:3]}") 20 | print("\n🔧 Please ensure you're running from the correct directory:") 21 | print("cd /Users/shelli/Documents/apps/ocmonitor/ocmonitor") 22 | sys.exit(1) 23 | except Exception as e: 24 | print(f"❌ Error running OpenCode Monitor: {e}") 25 | sys.exit(1) -------------------------------------------------------------------------------- /config.toml: -------------------------------------------------------------------------------- 1 | # OpenCode Monitor Configuration 2 | 3 | [paths] 4 | # Default path to OpenCode messages directory 5 | messages_dir = "~/.local/share/opencode/storage/message" 6 | # Directory for exports 7 | export_dir = "./exports" 8 | 9 | [ui] 10 | # Table style: "rich", "simple", "minimal" 11 | table_style = "rich" 12 | # Enable progress bars 13 | progress_bars = true 14 | # Enable colors in output 15 | colors = true 16 | # Refresh interval for live dashboard (seconds) 17 | live_refresh_interval = 5 18 | 19 | [export] 20 | # Default export format: "csv", "json" 21 | default_format = "csv" 22 | # Include metadata in exports 23 | include_metadata = true 24 | # Include raw data in exports 25 | include_raw_data = false 26 | 27 | [models] 28 | # Path to models pricing configuration 29 | config_file = "models.json" 30 | 31 | [analytics] 32 | # Default timeframe for reports: "daily", "weekly", "monthly" 33 | default_timeframe = "daily" 34 | # Number of recent sessions to analyze by default 35 | recent_sessions_limit = 50 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 OpenCode Monitor Contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .nox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *.cover 48 | *.py,cover 49 | .hypothesis/ 50 | .pytest_cache/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | db.sqlite3 60 | db.sqlite3-journal 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # IPython 79 | profile_default/ 80 | ipython_config.py 81 | 82 | # pyenv 83 | .python-version 84 | 85 | # pipenv 86 | Pipfile.lock 87 | 88 | # PEP 582 89 | __pypackages__/ 90 | 91 | # Celery stuff 92 | celerybeat-schedule 93 | celerybeat.pid 94 | 95 | # SageMath parsed files 96 | *.sage.py 97 | 98 | # Environments 99 | .env 100 | .venv 101 | env/ 102 | venv/ 103 | ENV/ 104 | env.bak/ 105 | venv.bak/ 106 | 107 | # Spyder project settings 108 | .spyderproject 109 | .spyproject 110 | 111 | # Rope project settings 112 | .ropeproject 113 | 114 | # mkdocs documentation 115 | /site 116 | 117 | # mypy 118 | .mypy_cache/ 119 | .dmypy.json 120 | dmypy.json 121 | 122 | # Pyre type checker 123 | .pyre/ 124 | 125 | # Project specific 126 | exports/ 127 | *.csv 128 | *.json 129 | !models.json 130 | !config.toml 131 | 132 | # IDE 133 | .vscode/ 134 | .idea/ 135 | *.swp 136 | *.swo 137 | 138 | # OS 139 | .DS_Store 140 | Thumbs.db -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """Setup script for OpenCode Monitor.""" 2 | 3 | from setuptools import setup, find_packages 4 | import os 5 | 6 | # Read version from __init__.py 7 | version = {} 8 | with open(os.path.join("ocmonitor", "__init__.py")) as f: 9 | exec(f.read(), version) 10 | 11 | # Read long description from README 12 | long_description = "" 13 | if os.path.exists("README.md"): 14 | with open("README.md", "r", encoding="utf-8") as f: 15 | long_description = f.read() 16 | 17 | setup( 18 | name="ocmonitor", 19 | version=version["__version__"], 20 | description="Analytics and monitoring tool for OpenCode AI coding sessions", 21 | long_description=long_description, 22 | long_description_content_type="text/markdown", 23 | author="OpenCode Monitor Team", 24 | author_email="", 25 | url="https://github.com/yourusername/ocmonitor", 26 | packages=find_packages(), 27 | include_package_data=True, 28 | package_data={ 29 | "ocmonitor": ["*.toml", "*.json"], 30 | }, 31 | install_requires=[ 32 | "click>=8.0.0", 33 | "rich>=13.0.0", 34 | "pydantic>=2.0.0", 35 | "toml>=0.10.0", 36 | ], 37 | extras_require={ 38 | "dev": [ 39 | "pytest>=7.0.0", 40 | "pytest-click>=1.1.0", 41 | "pytest-mock>=3.10.0", 42 | "coverage>=7.0.0", 43 | "black>=22.0.0", 44 | "isort>=5.10.0", 45 | "flake8>=4.0.0", 46 | ], 47 | }, 48 | entry_points={ 49 | "console_scripts": [ 50 | "ocmonitor=ocmonitor.cli:main", 51 | ], 52 | }, 53 | classifiers=[ 54 | "Development Status :: 4 - Beta", 55 | "Intended Audience :: Developers", 56 | "License :: OSI Approved :: MIT License", 57 | "Programming Language :: Python :: 3", 58 | "Programming Language :: Python :: 3.8", 59 | "Programming Language :: Python :: 3.9", 60 | "Programming Language :: Python :: 3.10", 61 | "Programming Language :: Python :: 3.11", 62 | "Programming Language :: Python :: 3.12", 63 | "Topic :: Software Development :: Tools", 64 | "Topic :: System :: Monitoring", 65 | ], 66 | python_requires=">=3.8", 67 | keywords="opencode ai coding analytics monitoring tokens cost", 68 | ) -------------------------------------------------------------------------------- /models.json: -------------------------------------------------------------------------------- 1 | { 2 | "claude-sonnet-4-20250514": { 3 | "input": 3.00, 4 | "output": 15.00, 5 | "cacheWrite": 3.75, 6 | "cacheRead": 0.30, 7 | "contextWindow": 200000, 8 | "sessionQuota": 6.00 9 | }, 10 | "claude-sonnet-4-5-20250929": { 11 | "input": 3.00, 12 | "output": 15.00, 13 | "cacheWrite": 3.75, 14 | "cacheRead": 0.30, 15 | "contextWindow": 200000, 16 | "sessionQuota": 6.00 17 | }, 18 | "claude-haiku-4-5-20251001": { 19 | "input": 1.00, 20 | "output": 5.00, 21 | "cacheWrite": 1.25, 22 | "cacheRead": 0.10, 23 | "contextWindow": 200000, 24 | "sessionQuota": 6.00 25 | }, 26 | "claude-opus-4": { 27 | "input": 15.00, 28 | "output": 75.00, 29 | "cacheWrite": 18.75, 30 | "cacheRead": 1.50, 31 | "contextWindow": 200000, 32 | "sessionQuota": 10.00 33 | }, 34 | "claude-opus-4.1": { 35 | "input": 15.00, 36 | "output": 75.00, 37 | "cacheWrite": 18.75, 38 | "cacheRead": 1.50, 39 | "contextWindow": 200000, 40 | "sessionQuota": 10.00 41 | }, 42 | "grok-code": { 43 | "input": 0.00, 44 | "output": 0.00, 45 | "cacheWrite": 0.00, 46 | "cacheRead": 0.00, 47 | "contextWindow": 256000, 48 | "sessionQuota": 0.00 49 | }, 50 | "qwen3-coder": { 51 | "input": 0.00, 52 | "output": 0.00, 53 | "cacheWrite": 0.00, 54 | "cacheRead": 0.00, 55 | "contextWindow": 256000, 56 | "sessionQuota": 0.00 57 | }, 58 | "qwen/qwen3-coder": { 59 | "input": 0.00, 60 | "output": 0.00, 61 | "cacheWrite": 0.00, 62 | "cacheRead": 0.00, 63 | "contextWindow": 256000, 64 | "sessionQuota": 0.00 65 | }, 66 | "GPT-5": { 67 | "input": 1.25, 68 | "output": 10.00, 69 | "cacheWrite": 0.125, 70 | "cacheRead": 0.00, 71 | "contextWindow": 400000, 72 | "sessionQuota": 0.00 73 | }, 74 | "kimi-k2": { 75 | "input": 0.60, 76 | "output": 2.50, 77 | "cacheWrite": 0.00, 78 | "cacheRead": 0.00, 79 | "contextWindow": 256000, 80 | "sessionQuota": 0.00 81 | }, 82 | "z-ai/glm-4.5-air": { 83 | "input": 0.00, 84 | "output": 0.00, 85 | "cacheWrite": 0.00, 86 | "cacheRead": 0.00, 87 | "contextWindow": 128000, 88 | "sessionQuota": 0.00 89 | }, 90 | "z-ai/glm-4.5": { 91 | "input": 0.60, 92 | "output": 2.20, 93 | "cacheWrite": 0.11, 94 | "cacheRead": 0.00, 95 | "contextWindow": 131000, 96 | "sessionQuota": 0.00 97 | }, 98 | "z-ai/glm-4.6": { 99 | "input": 0.60, 100 | "output": 2.20, 101 | "cacheWrite": 0.11, 102 | "cacheRead": 0.00, 103 | "contextWindow": 131000, 104 | "sessionQuota": 0.00 105 | } 106 | } -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # OpenCode Monitor Installation Script 4 | # This script automates the installation process for OpenCode Monitor 5 | 6 | set -e # Exit on any error 7 | 8 | echo "🚀 OpenCode Monitor Installation Script" 9 | echo "======================================" 10 | 11 | # Check if we're in the right directory 12 | if [ ! -f "setup.py" ] || [ ! -d "ocmonitor" ]; then 13 | echo "❌ Error: Please run this script from the ocmonitor root directory" 14 | echo " The directory should contain setup.py and ocmonitor/ folder" 15 | exit 1 16 | fi 17 | 18 | echo "✅ Found ocmonitor project directory" 19 | 20 | # Check Python version 21 | PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")') 22 | if [[ "$(printf '%s\n' "3.7" "$PYTHON_VERSION" | sort -V | head -n1)" == "3.7" ]]; then 23 | echo "✅ Python version $PYTHON_VERSION is supported" 24 | else 25 | echo "❌ Python 3.7 or higher is required" 26 | exit 1 27 | fi 28 | 29 | # Install dependencies 30 | echo "📥 Installing dependencies..." 31 | python3 -m pip install -r requirements.txt 32 | 33 | # Install package in development mode 34 | echo "🔧 Installing ocmonitor in development mode..." 35 | python3 -m pip install -e . 36 | 37 | # Get the scripts directory and add to PATH instructions 38 | SCRIPTS_DIR=$(python3 -m site --user-base)/bin 39 | echo "📁 Python scripts will be installed to: $SCRIPTS_DIR" 40 | 41 | # Check if scripts directory is in PATH 42 | if [[ ":$PATH:" != *":$SCRIPTS_DIR:"* ]]; then 43 | echo "" 44 | echo "⚠️ Warning: $SCRIPTS_DIR is not in your PATH" 45 | echo "" 46 | echo "📝 To fix this, add the following line to your shell configuration file:" 47 | echo "" 48 | echo " For bash (~/.bashrc):" 49 | echo " echo 'export PATH=\"$SCRIPTS_DIR:\$PATH\"' >> ~/.bashrc && source ~/.bashrc" 50 | echo "" 51 | echo " For zsh (~/.zshrc):" 52 | echo " echo 'export PATH=\"$SCRIPTS_DIR:\$PATH\"' >> ~/.zshrc && source ~/.zshrc" 53 | echo "" 54 | echo " Then restart your terminal or run: source ~/.bashrc (or ~/.zshrc)" 55 | echo "" 56 | else 57 | echo "✅ $SCRIPTS_DIR is already in your PATH" 58 | fi 59 | 60 | # Test installation 61 | echo "🧪 Testing installation..." 62 | if command -v ocmonitor &> /dev/null; then 63 | echo "✅ ocmonitor command is available" 64 | ocmonitor --version 65 | else 66 | echo "⚠️ ocmonitor command not found in PATH" 67 | echo " You can run it directly with:" 68 | echo " $SCRIPTS_DIR/ocmonitor --help" 69 | fi 70 | 71 | echo "" 72 | echo "🎉 Installation complete!" 73 | echo "" 74 | echo "📝 Next steps:" 75 | echo "1. Add $SCRIPTS_DIR to your PATH if you haven't already (see instructions above)" 76 | echo "2. Run 'ocmonitor --help' to see available commands" 77 | echo "3. Run 'ocmonitor config show' to view current configuration" 78 | echo "" 79 | echo "For more detailed usage instructions, see MANUAL_TEST_GUIDE.md" -------------------------------------------------------------------------------- /QUICK_START.md: -------------------------------------------------------------------------------- 1 | # 🚀 Quick Start Guide 2 | 3 | Get up and running with OpenCode Monitor in just a few minutes! 4 | 5 | ## 📋 Prerequisites 6 | 7 | - Python 3.7 or higher 8 | - pip package manager 9 | - OpenCode session data (stored in `~/.local/share/opencode/storage/message/`) 10 | 11 | ## 🛠️ Installation 12 | 13 | ### Option 1: Automated Installation (Recommended) 14 | 15 | ```bash 16 | # Clone the repository 17 | git clone 18 | cd ocmonitor 19 | 20 | # Run the installation script 21 | ./install.sh 22 | ``` 23 | 24 | ### Option 2: Manual Installation 25 | 26 | ```bash 27 | # Clone the repository 28 | git clone 29 | cd ocmonitor 30 | 31 | # Install dependencies 32 | python3 -m pip install -r requirements.txt 33 | 34 | # Install the package 35 | python3 -m pip install -e . 36 | 37 | # Add to PATH (if needed) 38 | echo 'export PATH="$(python3 -m site --user-base)/bin:$PATH"' >> ~/.bashrc 39 | source ~/.bashrc 40 | ``` 41 | 42 | ## 🎯 First Steps 43 | 44 | ### 1. Check Configuration 45 | ```bash 46 | ocmonitor config show 47 | ``` 48 | 49 | ### 2. Analyze Your Sessions 50 | ```bash 51 | # Analyze all sessions (uses default OpenCode directory) 52 | ocmonitor sessions 53 | 54 | # Analyze a specific session 55 | ocmonitor session /path/to/specific/session 56 | ``` 57 | 58 | ### 3. View Different Reports 59 | ```bash 60 | # Daily usage breakdown 61 | ocmonitor daily 62 | 63 | # Model usage analytics 64 | ocmonitor models 65 | 66 | # Weekly breakdown 67 | ocmonitor weekly 68 | ``` 69 | 70 | ### 4. Export Data 71 | ```bash 72 | # Export to CSV 73 | ocmonitor export sessions --format csv --output my_report.csv 74 | 75 | # Export to JSON 76 | ocmonitor export sessions --format json --output my_report.json 77 | ``` 78 | 79 | ### 5. Real-time Monitoring 80 | ```bash 81 | # Start live dashboard 82 | ocmonitor live 83 | ``` 84 | 85 | ## 📖 Common Commands 86 | 87 | | Command | Description | 88 | |---------|-------------| 89 | | `ocmonitor --help` | Show all available commands | 90 | | `ocmonitor config show` | Display current configuration | 91 | | `ocmonitor sessions` | Analyze all sessions | 92 | | `ocmonitor session ` | Analyze a single session | 93 | | `ocmonitor daily` | Daily usage breakdown | 94 | | `ocmonitor models` | Model usage analytics | 95 | | `ocmonitor live` | Real-time monitoring dashboard | 96 | | `ocmonitor export --format ` | Export data | 97 | 98 | ## 🎨 Output Formats 99 | 100 | All commands support different output formats: 101 | 102 | ```bash 103 | # Rich tables (default) 104 | ocmonitor sessions 105 | 106 | # JSON output 107 | ocmonitor sessions --format json 108 | 109 | # Export to files 110 | ocmonitor export sessions --format csv --output report.csv 111 | ``` 112 | 113 | ## 🤔 Need Help? 114 | 115 | - Run `ocmonitor --help` for specific command help 116 | - Check `MANUAL_TEST_GUIDE.md` for comprehensive usage examples 117 | - File an issue on GitHub if you encounter problems 118 | 119 | ## 🎉 You're Ready! 120 | 121 | Start exploring your OpenCode session data and gain insights into your AI-assisted coding patterns! -------------------------------------------------------------------------------- /screenshots/README.md: -------------------------------------------------------------------------------- 1 | # 📸 Screenshots Directory 2 | 3 | This directory contains screenshots used in the README.md and DOCUMENTATION.md files. 4 | 5 | ## 📋 Required Screenshots 6 | 7 | ### README.md Screenshots 8 | 9 | #### `sessions-summary.png` 10 | - **Command**: `ocmonitor sessions ~/.local/share/opencode/storage/message` 11 | - **Description**: Screenshot showing the sessions summary output with cost breakdown and statistics 12 | - **Recommended Size**: 800x600px or similar 13 | 14 | #### `model-usage-breakdown.png` 15 | - **Command**: `ocmonitor models ~/.local/share/opencode/storage/message` 16 | - **Description**: Screenshot showing the model usage analytics with cost and usage percentages 17 | - **Recommended Size**: 800x600px or similar 18 | 19 | ### DOCUMENTATION.md Screenshots 20 | 21 | #### `session-analysis.png` 22 | - **Command**: `ocmonitor session ~/.local/share/opencode/storage/message/ses_20250118_143022` 23 | - **Description**: Screenshot showing detailed analysis of a single session 24 | - **Recommended Size**: 800x600px or similar 25 | 26 | #### `sessions-summary.png` 27 | - **Command**: `ocmonitor sessions ~/.local/share/opencode/storage/message` 28 | - **Description**: Screenshot showing the sessions summary with multiple sessions listed 29 | - **Recommended Size**: 800x600px or similar 30 | 31 | #### `daily-usage-breakdown.png` 32 | - **Command**: `ocmonitor daily ~/.local/share/opencode/storage/message` 33 | - **Description**: Screenshot showing daily usage breakdown with costs and token counts 34 | - **Recommended Size**: 800x600px or similar 35 | 36 | #### `model-usage-analysis.png` 37 | - **Command**: `ocmonitor models ~/.local/share/opencode/storage/message` 38 | - **Description**: Screenshot showing detailed model usage analytics with percentages 39 | - **Recommended Size**: 800x600px or similar 40 | 41 | #### `quota-status.png` 42 | - **Command**: `ocmonitor daily ~/.local/share/opencode/storage/message --show-quotas` 43 | - **Description**: Screenshot showing quota status with usage percentages and warnings 44 | - **Recommended Size**: 800x600px or similar 45 | 46 | #### `configuration.png` 47 | - **Command**: `ocmonitor config show` 48 | - **Description**: Screenshot showing the complete configuration display 49 | - **Recommended Size**: 800x600px or similar 50 | 51 | #### `config-validation.png` 52 | - **Command**: `ocmonitor config validate` 53 | - **Description**: Screenshot showing configuration validation results 54 | - **Recommended Size**: 800x600px or similar 55 | 56 | #### `config-diagnosis.png` 57 | - **Command**: `ocmonitor config diagnose` 58 | - **Description**: Screenshot showing configuration diagnosis with recommendations 59 | - **Recommended Size**: 800x600px or similar 60 | 61 | #### `system-info.png` 62 | - **Command**: `ocmonitor config system-info` 63 | - **Description**: Screenshot showing system information for bug reports 64 | - **Recommended Size**: 800x600px or similar 65 | 66 | ## 📝 How to Add Screenshots 67 | 68 | 1. Run the corresponding command in your terminal 69 | 2. Take a screenshot of the output 70 | 3. Save the image with the exact filename listed above 71 | 4. Place the PNG file in this directory 72 | 5. Commit and push to your GitHub repository 73 | 74 | ## 💡 Screenshot Tips 75 | 76 | - **Terminal Theme**: Use a dark theme for better readability 77 | - **Window Size**: Ensure the full output is visible 78 | - **Format**: Save as PNG for best quality 79 | - **Naming**: Use exact filenames as specified above 80 | - **Resolution**: 72-96 DPI is sufficient for web display 81 | 82 | ## 🔄 Updating Screenshots 83 | 84 | When you update the tool or add new features, remember to update the corresponding screenshots to reflect the latest output format and features. -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to OpenCode Monitor will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [1.0.0] - 2025-01-XX 9 | 10 | ### 🎉 Initial Release 11 | 12 | #### Added 13 | - **Core CLI Application** - Complete command-line interface with Click framework 14 | - **Rich Terminal UI** - Beautiful tables, progress bars, and colored output 15 | - **Comprehensive Analytics** - Daily, weekly, monthly, and model-based breakdowns 16 | - **Real-time Monitoring** - Live dashboard for active session monitoring 17 | - **Data Export** - CSV and JSON export functionality with metadata 18 | - **Multi-model Support** - Support for 6 AI models including Claude, Grok, Qwen, and Z-AI 19 | - **Professional Documentation** - Complete setup guides and user documentation 20 | 21 | #### Features 22 | - 📊 **Session Analysis** - Analyze individual or batch sessions 23 | - 💰 **Cost Tracking** - Accurate cost calculations with model-specific pricing 24 | - 📈 **Usage Analytics** - Token usage, context utilization, and performance metrics 25 | - 🎨 **Rich UI Components** - Color-coded tables with progress indicators 26 | - ⚙️ **TOML Configuration** - User-friendly configuration management 27 | - 🔄 **Live Dashboard** - Real-time session monitoring with auto-refresh 28 | - 📤 **Export Capabilities** - Professional CSV and JSON exports 29 | 30 | #### Supported Commands 31 | ```bash 32 | ocmonitor config show # Display configuration 33 | ocmonitor session # Analyze single session 34 | ocmonitor sessions # Analyze all sessions 35 | ocmonitor daily # Daily usage breakdown 36 | ocmonitor weekly # Weekly usage breakdown 37 | ocmonitor monthly # Monthly usage breakdown 38 | ocmonitor models # Model usage analytics 39 | ocmonitor live # Real-time monitoring 40 | ocmonitor export # Data export functionality 41 | ``` 42 | 43 | #### Supported AI Models 44 | - **Claude Sonnet 4** (2025-05-14) - $3/$15 per 1M tokens, 200k context 45 | - **Claude Opus 4** - $15/$75 per 1M tokens, 200k context 46 | - **Claude Opus 4.1** - $15/$75 per 1M tokens, 200k context 47 | - **Grok Code** - FREE, 256k context 48 | - **Qwen3 Coder** (qwen/qwen3-coder) - FREE, 256k context 49 | - **Z-AI GLM 4.5 Air** (z-ai/glm-4.5-air) - FREE, 128k context 50 | 51 | #### Technical Improvements 52 | - **Modular Architecture** - Clean separation of concerns with services, models, and UI 53 | - **Error Handling** - Comprehensive error handling with user-friendly messages 54 | - **Type Safety** - Full type hints and Pydantic models for data validation 55 | - **Performance** - Memory-efficient processing of large session datasets 56 | - **Extensibility** - Easy addition of new models and features 57 | 58 | ### 🛠️ Development Infrastructure 59 | 60 | #### Added 61 | - **Automated Installation** - `install.sh` script for easy setup 62 | - **Comprehensive Testing** - Manual test suites and validation scripts 63 | - **Documentation** - README, Quick Start, Manual Test Guide 64 | - **Configuration Management** - TOML config with JSON model pricing 65 | - **PATH Management** - Automatic PATH configuration handling 66 | 67 | ### 🐛 Bug Fixes 68 | 69 | #### Fixed 70 | - **JSON Serialization** - Fixed Decimal serialization for JSON exports 71 | - **Model Name Parsing** - Proper handling of fully qualified model names (e.g., `qwen/qwen3-coder`) 72 | - **Zero-token Filtering** - Filtered out empty interactions that caused confusion 73 | - **Export Data Structure** - Fixed CLI export command data structure issues 74 | - **Import Path Resolution** - Resolved Python module path issues 75 | - **Installation Issues** - Created comprehensive installation guides and PATH configuration 76 | 77 | ### 📚 Documentation 78 | 79 | #### Added 80 | - **README.md** - Complete project overview and usage instructions 81 | - **QUICK_START.md** - Fast setup and common usage patterns 82 | - **MANUAL_TEST_GUIDE.md** - Comprehensive testing procedures 83 | - **PROJECT_SUMMARY.md** - Detailed project documentation and achievements 84 | - **Installation Guides** - Multiple installation methods with troubleshooting 85 | 86 | ### 🧪 Testing 87 | 88 | #### Added 89 | - **Basic Functionality Tests** - Core feature validation 90 | - **Import Validation** - Module import and dependency checks 91 | - **CLI Command Tests** - All command-line interfaces tested 92 | - **Real Data Testing** - Validation with actual OpenCode session data 93 | - **Error Scenario Testing** - Edge case and error handling validation 94 | 95 | --- 96 | 97 | ## Version History Summary 98 | 99 | - **v1.0.0** - Initial production release with complete feature set 100 | - **Pre-release** - Development phases transforming basic scripts into professional CLI tool 101 | 102 | ## Migration from Legacy Scripts 103 | 104 | This release replaces the original three Python scripts: 105 | - `session_summarizer.py` → `ocmonitor sessions` 106 | - `token_summarizer.py` → `ocmonitor models` 107 | - `live_dashboard.py` → `ocmonitor live` 108 | 109 | The new unified CLI provides all original functionality plus significant enhancements: 110 | - Beautiful Rich terminal interface 111 | - Comprehensive analytics and breakdowns 112 | - Professional export capabilities 113 | - Real-time monitoring dashboard 114 | - Robust error handling and validation 115 | - Easy installation and configuration 116 | 117 | --- 118 | 119 | *For detailed information about each feature, see the [README.md](README.md) and [documentation](QUICK_START.md).* -------------------------------------------------------------------------------- /ocmonitor/config.py: -------------------------------------------------------------------------------- 1 | """Configuration management for OpenCode Monitor.""" 2 | 3 | import json 4 | import os 5 | import toml 6 | from typing import Dict, Optional 7 | from pydantic import BaseModel, Field, field_validator 8 | from decimal import Decimal 9 | 10 | 11 | def opencode_storage_path(path: str | None = None) -> str: 12 | base = os.getenv("XDG_DATA_HOME") or "~/.local/share" 13 | parts = [base, "opencode", "storage"] 14 | if path: 15 | parts.append(path) 16 | return os.path.join(*parts) 17 | 18 | 19 | class PathsConfig(BaseModel): 20 | """Configuration for file paths.""" 21 | messages_dir: str = Field(default=opencode_storage_path("messages")) 22 | opencode_storage_dir: str = Field(default=opencode_storage_path()) 23 | export_dir: str = Field(default="./exports") 24 | 25 | @field_validator('messages_dir', 'opencode_storage_dir', 'export_dir') 26 | @classmethod 27 | def expand_path(cls, v): 28 | """Expand user paths and environment variables.""" 29 | return os.path.expanduser(os.path.expandvars(v)) 30 | 31 | 32 | class UIConfig(BaseModel): 33 | """Configuration for UI appearance.""" 34 | table_style: str = Field(default="rich", pattern="^(rich|simple|minimal)$") 35 | progress_bars: bool = Field(default=True) 36 | colors: bool = Field(default=True) 37 | live_refresh_interval: int = Field(default=5, ge=1, le=60) 38 | 39 | 40 | class ExportConfig(BaseModel): 41 | """Configuration for data export.""" 42 | default_format: str = Field(default="csv", pattern="^(csv|json)$") 43 | include_metadata: bool = Field(default=True) 44 | include_raw_data: bool = Field(default=False) 45 | 46 | 47 | class ModelsConfig(BaseModel): 48 | """Configuration for model pricing.""" 49 | config_file: str = Field(default="models.json") 50 | 51 | 52 | class AnalyticsConfig(BaseModel): 53 | """Configuration for analytics.""" 54 | default_timeframe: str = Field(default="daily", pattern="^(daily|weekly|monthly)$") 55 | recent_sessions_limit: int = Field(default=50, ge=1, le=1000) 56 | 57 | 58 | class Config(BaseModel): 59 | """Main configuration class.""" 60 | paths: PathsConfig = Field(default_factory=PathsConfig) 61 | ui: UIConfig = Field(default_factory=UIConfig) 62 | export: ExportConfig = Field(default_factory=ExportConfig) 63 | models: ModelsConfig = Field(default_factory=ModelsConfig) 64 | analytics: AnalyticsConfig = Field(default_factory=AnalyticsConfig) 65 | 66 | 67 | class ModelPricing(BaseModel): 68 | """Model for pricing information.""" 69 | input: Decimal = Field(description="Cost per 1M input tokens") 70 | output: Decimal = Field(description="Cost per 1M output tokens") 71 | cache_write: Decimal = Field(alias="cacheWrite", description="Cost per 1M cache write tokens") 72 | cache_read: Decimal = Field(alias="cacheRead", description="Cost per 1M cache read tokens") 73 | context_window: int = Field(alias="contextWindow", description="Maximum context window size") 74 | session_quota: Decimal = Field(alias="sessionQuota", description="Maximum session cost quota") 75 | 76 | 77 | class ConfigManager: 78 | """Manages configuration loading and access.""" 79 | 80 | def __init__(self, config_path: Optional[str] = None): 81 | """Initialize configuration manager. 82 | 83 | Args: 84 | config_path: Path to configuration file. If None, searches standard locations. 85 | """ 86 | self.config_path = config_path or self._find_config_file() 87 | self._config: Optional[Config] = None 88 | self._pricing_data: Optional[Dict[str, ModelPricing]] = None 89 | 90 | def _find_config_file(self) -> str: 91 | """Find configuration file in standard locations.""" 92 | search_paths = [ 93 | "config.toml", 94 | "ocmonitor.toml", 95 | os.path.expanduser("~/.config/ocmonitor/config.toml"), 96 | os.path.join(os.path.dirname(__file__), "..", "config.toml"), 97 | ] 98 | 99 | for path in search_paths: 100 | if os.path.exists(path): 101 | return path 102 | 103 | # Return default path even if it doesn't exist 104 | return search_paths[0] 105 | 106 | @property 107 | def config(self) -> Config: 108 | """Get configuration, loading if necessary.""" 109 | if self._config is None: 110 | self._config = self._load_config() 111 | return self._config 112 | 113 | def _load_config(self) -> Config: 114 | """Load configuration from TOML file.""" 115 | if not os.path.exists(self.config_path): 116 | # Return default configuration if file doesn't exist 117 | return Config() 118 | 119 | try: 120 | with open(self.config_path, 'r') as f: 121 | config_data = toml.load(f) 122 | return Config(**config_data) 123 | except (toml.TomlDecodeError, ValueError) as e: 124 | raise ValueError(f"Invalid configuration file {self.config_path}: {e}") 125 | 126 | def load_pricing_data(self) -> Dict[str, ModelPricing]: 127 | """Load model pricing data.""" 128 | if self._pricing_data is None: 129 | self._pricing_data = self._load_pricing_data() 130 | return self._pricing_data 131 | 132 | def _load_pricing_data(self) -> Dict[str, ModelPricing]: 133 | """Load pricing data from JSON file.""" 134 | models_file = self.config.models.config_file 135 | 136 | # Try relative to config file first 137 | if not os.path.isabs(models_file): 138 | config_dir = os.path.dirname(self.config_path) 139 | models_file = os.path.join(config_dir, models_file) 140 | 141 | if not os.path.exists(models_file): 142 | # Try in same directory as this module 143 | models_file = os.path.join(os.path.dirname(__file__), "..", "models.json") 144 | 145 | if not os.path.exists(models_file): 146 | return {} 147 | 148 | try: 149 | with open(models_file, 'r') as f: 150 | raw_data = json.load(f) 151 | 152 | pricing_data = {} 153 | for model_name, model_data in raw_data.items(): 154 | pricing_data[model_name] = ModelPricing(**model_data) 155 | 156 | return pricing_data 157 | except (json.JSONDecodeError, ValueError) as e: 158 | raise ValueError(f"Invalid pricing file {models_file}: {e}") 159 | 160 | def get_model_pricing(self, model_name: str) -> Optional[ModelPricing]: 161 | """Get pricing information for a specific model.""" 162 | pricing_data = self.load_pricing_data() 163 | return pricing_data.get(model_name) 164 | 165 | def reload(self): 166 | """Reload configuration and pricing data.""" 167 | self._config = None 168 | self._pricing_data = None 169 | 170 | 171 | # Global configuration manager instance 172 | config_manager = ConfigManager() 173 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to OpenCode Monitor 2 | 3 | Thank you for considering contributing to OpenCode Monitor! This document provides guidelines and information for contributors. 4 | 5 | ## 🚀 Quick Start for Contributors 6 | 7 | ### Development Setup 8 | 9 | 1. **Fork and Clone** 10 | ```bash 11 | git clone https://github.com/yourusername/ocmonitor.git 12 | cd ocmonitor 13 | ``` 14 | 15 | 2. **Set Up Development Environment** 16 | ```bash 17 | python3 -m pip install -r requirements.txt 18 | python3 -m pip install -e . 19 | ``` 20 | 21 | 3. **Verify Installation** 22 | ```bash 23 | ocmonitor --help 24 | python3 test_basic.py 25 | ``` 26 | 27 | ## 📋 Ways to Contribute 28 | 29 | ### 🐛 Bug Reports 30 | 31 | When filing a bug report, please include: 32 | 33 | - **Clear Description** - What happened vs. what you expected 34 | - **Steps to Reproduce** - Detailed steps to recreate the issue 35 | - **Environment Info** - OS, Python version, OpenCode Monitor version 36 | - **Sample Data** - If possible, include sample session data (anonymized) 37 | - **Error Messages** - Full error messages and stack traces 38 | 39 | **Template:** 40 | ```markdown 41 | **Bug Description** 42 | A clear description of what the bug is. 43 | 44 | **To Reproduce** 45 | Steps to reproduce the behavior: 46 | 1. Run command '...' 47 | 2. With data '...' 48 | 3. See error 49 | 50 | **Expected Behavior** 51 | What you expected to happen. 52 | 53 | **Environment** 54 | - OS: [e.g. macOS 12.0] 55 | - Python Version: [e.g. 3.9.0] 56 | - OpenCode Monitor Version: [e.g. 1.0.0] 57 | 58 | **Additional Context** 59 | Any other context about the problem. 60 | ``` 61 | 62 | ### 💡 Feature Requests 63 | 64 | For feature requests, please include: 65 | 66 | - **Use Case** - Why would this feature be useful? 67 | - **Proposed Solution** - How should it work? 68 | - **Alternative Solutions** - Other ways to achieve the same goal 69 | - **Implementation Ideas** - Technical approach (if you have ideas) 70 | 71 | ### 🔧 Code Contributions 72 | 73 | We welcome code contributions! Here's how to submit them: 74 | 75 | ## 📝 Development Guidelines 76 | 77 | ### Code Style 78 | 79 | - **Python Standards** - Follow PEP 8 style guidelines 80 | - **Type Hints** - Use type hints for function parameters and return values 81 | - **Docstrings** - Include docstrings for all public functions and classes 82 | - **Error Handling** - Provide meaningful error messages and graceful failures 83 | 84 | **Example:** 85 | ```python 86 | def calculate_session_cost(session: SessionData, model_config: Dict[str, Any]) -> Decimal: 87 | """Calculate the total cost for a coding session. 88 | 89 | Args: 90 | session: The session data containing token usage 91 | model_config: Configuration containing model pricing 92 | 93 | Returns: 94 | The total cost as a Decimal for precise financial calculations 95 | 96 | Raises: 97 | ValueError: If model is not found in configuration 98 | """ 99 | # Implementation here 100 | ``` 101 | 102 | ### Project Architecture 103 | 104 | The project follows a clean architecture pattern: 105 | 106 | ``` 107 | ocmonitor/ 108 | ├── cli.py # Command-line interface (Click) 109 | ├── config.py # Configuration management 110 | ├── models/ # Pydantic data models 111 | │ ├── analytics.py # Analytics data structures 112 | │ └── session.py # Session data structures 113 | ├── services/ # Business logic services 114 | │ ├── session_analyzer.py # Core analysis logic 115 | │ ├── report_generator.py # Report generation 116 | │ ├── export_service.py # Data export functionality 117 | │ └── live_monitor.py # Real-time monitoring 118 | ├── ui/ # User interface components 119 | │ ├── dashboard.py # Rich dashboard components 120 | │ └── tables.py # Table formatting 121 | └── utils/ # Utility functions 122 | ├── file_utils.py # File processing 123 | ├── time_utils.py # Time/date utilities 124 | └── formatting.py # Output formatting 125 | ``` 126 | 127 | ### Adding New Features 128 | 129 | 1. **Models** - Add data structures in `models/` 130 | 2. **Services** - Implement business logic in `services/` 131 | 3. **CLI** - Add commands in `cli.py` 132 | 4. **UI** - Create Rich components in `ui/` 133 | 134 | ### Testing 135 | 136 | - **Basic Tests** - Run `python3 test_basic.py` for core functionality 137 | - **Simple Tests** - Run `python3 test_simple.py` for import validation 138 | - **Manual Testing** - Follow `MANUAL_TEST_GUIDE.md` for comprehensive testing 139 | 140 | ### Adding New AI Models 141 | 142 | To add support for a new AI model: 143 | 144 | 1. **Update models.json** 145 | ```json 146 | { 147 | "new-model-name": { 148 | "input_cost_per_million": 5.0, 149 | "output_cost_per_million": 15.0, 150 | "context_window": 200000 151 | } 152 | } 153 | ``` 154 | 155 | 2. **Test the Model** 156 | - Create test session data with the new model 157 | - Verify cost calculations are correct 158 | - Ensure the model appears in analytics 159 | 160 | ## 🔄 Pull Request Process 161 | 162 | ### Before Submitting 163 | 164 | 1. **Test Your Changes** 165 | ```bash 166 | python3 test_basic.py 167 | python3 test_simple.py 168 | ``` 169 | 170 | 2. **Check Code Style** 171 | - Ensure your code follows the existing style 172 | - Add appropriate type hints and docstrings 173 | 174 | 3. **Update Documentation** 175 | - Update README.md if adding new features 176 | - Update model lists if adding new AI models 177 | 178 | ### Submitting the PR 179 | 180 | 1. **Create Feature Branch** 181 | ```bash 182 | git checkout -b feature/your-feature-name 183 | ``` 184 | 185 | 2. **Commit Changes** 186 | ```bash 187 | git add . 188 | git commit -m "Add: description of your changes" 189 | ``` 190 | 191 | 3. **Push and Create PR** 192 | ```bash 193 | git push origin feature/your-feature-name 194 | ``` 195 | 196 | ### PR Template 197 | 198 | ```markdown 199 | ## Description 200 | Brief description of changes made. 201 | 202 | ## Type of Change 203 | - [ ] Bug fix (non-breaking change which fixes an issue) 204 | - [ ] New feature (non-breaking change which adds functionality) 205 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 206 | - [ ] Documentation update 207 | 208 | ## How Has This Been Tested? 209 | - [ ] Ran test_basic.py 210 | - [ ] Ran test_simple.py 211 | - [ ] Manual testing completed 212 | - [ ] Tested with real OpenCode session data 213 | 214 | ## Checklist 215 | - [ ] My code follows the style guidelines 216 | - [ ] I have performed a self-review of my code 217 | - [ ] I have made corresponding changes to the documentation 218 | - [ ] My changes generate no new warnings 219 | - [ ] Any dependent changes have been merged and published 220 | ``` 221 | 222 | ## 🏷️ Versioning 223 | 224 | We use [Semantic Versioning](https://semver.org/) (SemVer): 225 | 226 | - **MAJOR** version for incompatible API changes 227 | - **MINOR** version for new functionality in a backwards compatible manner 228 | - **PATCH** version for backwards compatible bug fixes 229 | 230 | ## 🤝 Code of Conduct 231 | 232 | ### Our Pledge 233 | 234 | We pledge to make participation in our project a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 235 | 236 | ### Our Standards 237 | 238 | - **Be Respectful** - Treat everyone with respect and kindness 239 | - **Be Collaborative** - Help others and accept help gracefully 240 | - **Be Inclusive** - Welcome newcomers and diverse perspectives 241 | - **Be Professional** - Focus on constructive feedback and solutions 242 | 243 | ## 📞 Getting Help 244 | 245 | - **GitHub Issues** - For bug reports and feature requests 246 | - **Discussions** - For questions and general discussion 247 | - **Documentation** - Check README.md and guides for common questions 248 | 249 | ## 🎉 Recognition 250 | 251 | Contributors will be acknowledged in: 252 | - GitHub contributor list 253 | - Release notes for significant contributions 254 | - README.md acknowledgments section 255 | 256 | --- 257 | 258 | Thank you for contributing to OpenCode Monitor! 🚀 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 📊 OpenCode Monitor 2 | 3 | [![Python 3.7+](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/) 4 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 5 | 6 | **OpenCode Monitor is a CLI tool for monitoring and analyzing OpenCode AI coding sessions.** 7 | 8 | Transform your OpenCode usage data into beautiful, actionable insights with comprehensive analytics, real-time monitoring, and professional reporting capabilities. 9 | 10 | [![Sessions Summary Screenshot](screenshots/sessions-summary.png)](screenshots/sessions-summary.png) 11 | 12 | ## 🌟 Features 13 | 14 | ### 💼 Professional Analytics 15 | - **📈 Comprehensive Reports** - Daily, weekly, and monthly usage breakdowns 16 | - **💰 Cost Tracking** - Accurate cost calculations for multiple AI models 17 | - **📊 Model Analytics** - Detailed breakdown of usage per AI model with `--breakdown` flag 18 | - **📋 Project Analytics** - Track costs and token usage by coding project 19 | - **⏱️ Performance Metrics** - Session duration and processing time tracking 20 | - **📅 Flexible Week Boundaries** - Customize weekly reports with 7 start day options (Monday-Sunday) 21 | 22 | ### 🎨 Beautiful User Interface 23 | - **🌈 Rich Terminal UI** - Professional design with clean styling and optimal space utilization 24 | - **📊 Progress Bars** - Visual indicators for cost quotas, context usage, and session time 25 | - **🚥 Color Coding** - Green/yellow/red status indicators based on usage thresholds 26 | - **📱 Live Dashboard** - Real-time monitoring with project names and session titles 27 | - **⏰ Session Time Tracking** - 5-hour session progress bar with color-coded time alerts 28 | 29 | ### 📤 Data Export & Integration 30 | - **📋 CSV Export** - Spreadsheet-compatible exports with metadata 31 | - **🔄 JSON Export** - Machine-readable exports for custom integrations 32 | - **📊 Multiple Report Types** - Sessions, daily, weekly, monthly, model, and project reports 33 | 34 | ## 🚀 Quick Start 35 | 36 | ### Installation 37 | 38 | **Option 1: Automated Installation (Recommended)** 39 | ```bash 40 | git clone https://github.com/yourusername/ocmonitor.git 41 | cd ocmonitor 42 | ./install.sh 43 | ``` 44 | 45 | **Option 2: Manual Installation** 46 | ```bash 47 | git clone https://github.com/yourusername/ocmonitor.git 48 | cd ocmonitor 49 | python3 -m pip install -r requirements.txt 50 | python3 -m pip install -e . 51 | ``` 52 | 53 | ### Basic Usage 54 | 55 | ```bash 56 | # Quick configuration check 57 | ocmonitor config show 58 | 59 | # Analyze your sessions 60 | ocmonitor sessions ~/.local/share/opencode/storage/message 61 | 62 | # Analyze by project 63 | ocmonitor projects ~/.local/share/opencode/storage/message 64 | 65 | # Real-time monitoring 66 | ocmonitor live ~/.local/share/opencode/storage/message 67 | 68 | # Export your data 69 | ocmonitor export sessions ~/.local/share/opencode/storage/message --format csv 70 | ``` 71 | 72 | ## 📖 Documentation 73 | 74 | - **[Quick Start Guide](QUICK_START.md)** - Get up and running in 5 minutes 75 | - **[Manual Test Guide](MANUAL_TEST_GUIDE.md)** - Comprehensive testing instructions 76 | - **[Contributing Guidelines](CONTRIBUTING.md)** - How to contribute to the project 77 | 78 | ## 🎯 Use Cases 79 | 80 | ### Individual Developers 81 | - **Cost Management** - Track your AI usage costs across different models and projects 82 | - **Usage Optimization** - Identify patterns in your coding sessions with session time tracking 83 | - **Performance Monitoring** - Monitor session efficiency and token usage with real-time dashboards 84 | - **Project Analytics** - Understand which projects consume the most AI resources 85 | 86 | ### Development Teams 87 | - **Team Analytics** - Aggregate usage statistics across team members and projects 88 | - **Budget Planning** - Forecast AI costs based on usage trends and project breakdowns 89 | - **Model Comparison** - Compare performance and costs across different AI models 90 | - **Session Management** - Track coding session durations and productivity patterns 91 | 92 | ### Organizations 93 | - **Resource Planning** - Plan AI resource allocation and budgets by project 94 | - **Usage Reporting** - Generate professional reports for stakeholders with export capabilities 95 | - **Cost Attribution** - Track AI costs by project, team, and time period 96 | - **Quality Monitoring** - Monitor session lengths and usage patterns for optimization 97 | 98 | 99 | ## 📊 Example Output 100 | 101 | > **📸 Screenshots**: The following examples include both text output and clickable screenshots. To add your own screenshots, place PNG files in the `screenshots/` directory with the corresponding filenames. 102 | 103 | ### Sessions Summary 104 | 105 | [![Sessions Summary Screenshot](screenshots/sessions-summary.png)](screenshots/sessions-summary.png) 106 | 107 | *Click image to view full-size screenshot of sessions summary output* 108 | 109 | 110 | 111 | ### Time-Based Reporting 112 | 113 | #### `ocmonitor daily|weekly|monthly [--breakdown]` 114 | 115 | Time-based usage breakdown with optional per-model cost analysis. 116 | 117 | ```bash 118 | # Daily breakdown 119 | ocmonitor daily ~/.local/share/opencode/storage/message 120 | 121 | # Weekly breakdown with per-model breakdown 122 | ocmonitor weekly ~/.local/share/opencode/storage/message --breakdown 123 | 124 | # Monthly breakdown 125 | ocmonitor monthly ~/.local/share/opencode/storage/message 126 | 127 | # Weekly with custom start day 128 | ocmonitor weekly ~/.local/share/opencode/storage/message --start-day friday --breakdown 129 | ``` 130 | 131 | **`--breakdown` Flag:** Shows token consumption and cost per model within each time period (daily/weekly/monthly), making it easy to see which models are consuming resources. 132 | 133 | Supported days: `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` 134 | 135 | ### Live Monitoring Commands 136 | 137 | #### `ocmonitor live ` 138 | 139 | Real-time monitoring dashboard that updates automatically. 140 | 141 | ```bash 142 | # Start live monitoring (updates every 5 seconds) 143 | ocmonitor live ~/.local/share/opencode/storage/message 144 | 145 | # Custom refresh interval (in seconds) 146 | ocmonitor live ~/.local/share/opencode/storage/message --refresh 10 147 | ``` 148 | 149 | **Features:** 150 | - 🔄 Auto-refreshing display with professional UI design 151 | - 📊 Real-time cost tracking with progress indicators 152 | - ⏱️ Live session duration with 5-hour progress bar 153 | - 📈 Token usage updates and context window monitoring 154 | - 🚦 Color-coded status indicators and time alerts 155 | - 📂 Project name display for better context 156 | - 📝 Human-readable session titles instead of cryptic IDs 157 | 158 | [![Live Dashboard Screenshot](screenshots/live_dashboard.png)](screenshots/live_dashboard.png) 159 | 160 | *Click image to view full-size screenshot of the live monitoring dashboard* 161 | 162 | ### Model Usage Breakdown 163 | 164 | [![Model Usage Breakdown Screenshot](screenshots/model-usage-breakdown.png)](screenshots/model-usage-breakdown.png) 165 | 166 | *Click image to view full-size screenshot of model usage analytics* 167 | 168 | 169 | ## ⚙️ Configuration 170 | 171 | ### Configuration File Location 172 | 173 | Create your configuration file at: **`~/.config/ocmonitor/config.toml`** 174 | 175 | ```bash 176 | # Create the configuration directory 177 | mkdir -p ~/.config/ocmonitor 178 | 179 | # Create your configuration file 180 | touch ~/.config/ocmonitor/config.toml 181 | ``` 182 | 183 | ### Configuration Options 184 | 185 | The tool is highly configurable through the `config.toml` file: 186 | 187 | ```toml 188 | [paths] 189 | messages_dir = "~/.local/share/opencode/storage/message" 190 | export_dir = "./exports" 191 | 192 | [ui] 193 | table_style = "rich" 194 | progress_bars = true 195 | colors = true 196 | 197 | [export] 198 | default_format = "csv" 199 | include_metadata = true 200 | ``` 201 | 202 | **Configuration File Search Order:** 203 | 1. `~/.config/ocmonitor/config.toml` (recommended user location) 204 | 2. `config.toml` (current directory) 205 | 3. Project directory fallback 206 | 207 | ## 🛠️ Development 208 | 209 | ### Prerequisites 210 | - Python 3.7+ 211 | - pip package manager 212 | 213 | ### Setting Up Development Environment 214 | ```bash 215 | git clone https://github.com/yourusername/ocmonitor.git 216 | cd ocmonitor 217 | python3 -m pip install -r requirements.txt 218 | python3 -m pip install -e . 219 | ``` 220 | 221 | ### Running Tests 222 | ```bash 223 | # Basic functionality test 224 | python3 test_basic.py 225 | 226 | # Simple import tests 227 | python3 test_simple.py 228 | ``` 229 | 230 | ### Project Architecture 231 | ``` 232 | ocmonitor/ 233 | ├── ocmonitor/ # Core package 234 | │ ├── cli.py # Command-line interface 235 | │ ├── config.py # Configuration management 236 | │ ├── models/ # Pydantic data models 237 | │ ├── services/ # Business logic services 238 | │ ├── ui/ # Rich UI components 239 | │ └── utils/ # Utility functions 240 | ├── config.toml # User configuration 241 | ├── models.json # AI model pricing data 242 | └── test_sessions/ # Sample test data 243 | ``` 244 | 245 | ## 🤝 Contributing 246 | 247 | We welcome contributions! Please see our [Contributing Guidelines](CONTRIBUTING.md) for details on: 248 | 249 | - 🐛 Reporting bugs 250 | - 💡 Suggesting features 251 | - 🔧 Setting up development environment 252 | - 📝 Code style and standards 253 | - 🚀 Submitting pull requests 254 | 255 | ## 📄 License 256 | 257 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 258 | 259 | ## 🏆 Acknowledgments 260 | 261 | - **[OpenCode](https://opencode.ai/)** - For creating an excellent AI coding agent that makes development more efficient 262 | - **[ccusage](https://ccusage.com/)** - A similar monitoring tool for Claude Code that inspired features in this project 263 | - **[Click](https://click.palletsprojects.com/)** - Excellent CLI framework 264 | - **[Rich](https://github.com/Textualize/rich)** - Beautiful terminal formatting 265 | - **[Pydantic](https://pydantic-docs.helpmanual.io/)** - Data validation and settings 266 | 267 | ## 🚀 Status 268 | 269 | **🧪 Beta Testing** - This application is currently in beta testing phase. Please report any issues you encounter. 270 | 271 | **⚠️ Disclaimer** - This application is not affiliated with OpenCode AI. It is an independent community tool for monitoring OpenCode usage. 272 | 273 | --- 274 | 275 | *Built with ❤️ for the OpenCode community* -------------------------------------------------------------------------------- /ocmonitor/models/session.py: -------------------------------------------------------------------------------- 1 | """Session data models for OpenCode Monitor.""" 2 | 3 | from datetime import datetime 4 | from typing import List, Optional, Dict, Any 5 | from pathlib import Path 6 | from decimal import Decimal 7 | from pydantic import BaseModel, Field, computed_field, field_validator, ConfigDict 8 | 9 | 10 | class TokenUsage(BaseModel): 11 | """Model for token usage data.""" 12 | input: int = Field(default=0, ge=0) 13 | output: int = Field(default=0, ge=0) 14 | cache_write: int = Field(default=0, ge=0) 15 | cache_read: int = Field(default=0, ge=0) 16 | 17 | @computed_field 18 | @property 19 | def total(self) -> int: 20 | """Calculate total tokens.""" 21 | return self.input + self.output + self.cache_write + self.cache_read 22 | 23 | 24 | class TimeData(BaseModel): 25 | """Model for timing information.""" 26 | created: Optional[int] = Field(default=None, description="Creation timestamp in milliseconds") 27 | completed: Optional[int] = Field(default=None, description="Completion timestamp in milliseconds") 28 | 29 | @computed_field 30 | @property 31 | def duration_ms(self) -> Optional[int]: 32 | """Calculate duration in milliseconds.""" 33 | if self.created is not None and self.completed is not None: 34 | return self.completed - self.created 35 | return None 36 | 37 | @computed_field 38 | @property 39 | def created_datetime(self) -> Optional[datetime]: 40 | """Get creation time as datetime object.""" 41 | if self.created is not None: 42 | return datetime.fromtimestamp(self.created / 1000) 43 | return None 44 | 45 | @computed_field 46 | @property 47 | def completed_datetime(self) -> Optional[datetime]: 48 | """Get completion time as datetime object.""" 49 | if self.completed is not None: 50 | return datetime.fromtimestamp(self.completed / 1000) 51 | return None 52 | 53 | 54 | class InteractionFile(BaseModel): 55 | """Model for a single OpenCode interaction file.""" 56 | file_path: Path 57 | session_id: str 58 | model_id: str = Field(default="unknown") 59 | tokens: TokenUsage = Field(default_factory=TokenUsage) 60 | time_data: Optional[TimeData] = Field(default=None) 61 | project_path: Optional[str] = Field(default=None, description="Project working directory from OpenCode") 62 | raw_data: Dict[str, Any] = Field(default_factory=dict) 63 | 64 | model_config = ConfigDict(arbitrary_types_allowed=True) 65 | 66 | @field_validator('file_path') 67 | @classmethod 68 | def validate_file_path(cls, v): 69 | """Ensure file path is a Path object.""" 70 | return Path(v) if not isinstance(v, Path) else v 71 | 72 | @computed_field 73 | @property 74 | def file_name(self) -> str: 75 | """Get the file name.""" 76 | return self.file_path.name 77 | 78 | @computed_field 79 | @property 80 | def modification_time(self) -> datetime: 81 | """Get file modification time.""" 82 | return datetime.fromtimestamp(self.file_path.stat().st_mtime) 83 | 84 | @computed_field 85 | @property 86 | def project_name(self) -> str: 87 | """Get project name from project path.""" 88 | if not self.project_path: 89 | return "Unknown" 90 | return Path(self.project_path).name if self.project_path else "Unknown" 91 | 92 | def calculate_cost(self, pricing_data: Dict[str, Any]) -> Decimal: 93 | """Calculate cost for this interaction.""" 94 | if self.model_id not in pricing_data: 95 | return Decimal('0.0') 96 | 97 | pricing = pricing_data[self.model_id] 98 | cost = Decimal('0.0') 99 | 100 | # Convert to cost per million tokens 101 | million = Decimal('1000000') 102 | 103 | cost += (Decimal(self.tokens.input) / million) * Decimal(str(pricing.input)) 104 | cost += (Decimal(self.tokens.output) / million) * Decimal(str(pricing.output)) 105 | cost += (Decimal(self.tokens.cache_write) / million) * Decimal(str(pricing.cache_write)) 106 | cost += (Decimal(self.tokens.cache_read) / million) * Decimal(str(pricing.cache_read)) 107 | 108 | return cost 109 | 110 | 111 | class SessionData(BaseModel): 112 | """Model for a complete OpenCode session.""" 113 | session_id: str 114 | session_path: Path 115 | files: List[InteractionFile] = Field(default_factory=list) 116 | session_title: Optional[str] = Field(default=None, description="Human-readable session title from OpenCode") 117 | 118 | model_config = ConfigDict(arbitrary_types_allowed=True) 119 | 120 | @field_validator('session_path') 121 | @classmethod 122 | def validate_session_path(cls, v): 123 | """Ensure session path is a Path object.""" 124 | return Path(v) if not isinstance(v, Path) else v 125 | 126 | @computed_field 127 | @property 128 | def models_used(self) -> List[str]: 129 | """Get list of unique models used in this session.""" 130 | return list(set(file.model_id for file in self.files)) 131 | 132 | @computed_field 133 | @property 134 | def total_tokens(self) -> TokenUsage: 135 | """Calculate total token usage for the session.""" 136 | total = TokenUsage() 137 | for file in self.files: 138 | total.input += file.tokens.input 139 | total.output += file.tokens.output 140 | total.cache_write += file.tokens.cache_write 141 | total.cache_read += file.tokens.cache_read 142 | return total 143 | 144 | @computed_field 145 | @property 146 | def start_time(self) -> Optional[datetime]: 147 | """Get session start time (earliest file creation time).""" 148 | times = [file.time_data.created_datetime for file in self.files 149 | if file.time_data and file.time_data.created_datetime] 150 | return min(times) if times else None 151 | 152 | @computed_field 153 | @property 154 | def end_time(self) -> Optional[datetime]: 155 | """Get session end time (latest file completion time).""" 156 | times = [file.time_data.completed_datetime for file in self.files 157 | if file.time_data and file.time_data.completed_datetime] 158 | return max(times) if times else None 159 | 160 | @computed_field 161 | @property 162 | def duration_ms(self) -> Optional[int]: 163 | """Calculate total session duration in milliseconds.""" 164 | if self.start_time and self.end_time: 165 | return int((self.end_time - self.start_time).total_seconds() * 1000) 166 | return None 167 | 168 | @computed_field 169 | @property 170 | def duration_hours(self) -> float: 171 | """Calculate session duration in hours.""" 172 | if self.duration_ms: 173 | return self.duration_ms / (1000 * 60 * 60) 174 | return 0.0 175 | 176 | @computed_field 177 | @property 178 | def duration_percentage(self) -> float: 179 | """Calculate session duration as percentage of 5-hour maximum.""" 180 | max_hours = 5.0 181 | return min(100.0, (self.duration_hours / max_hours) * 100.0) 182 | 183 | @computed_field 184 | @property 185 | def total_processing_time_ms(self) -> int: 186 | """Calculate total processing time across all files.""" 187 | total = 0 188 | for file in self.files: 189 | if file.time_data and file.time_data.duration_ms: 190 | total += file.time_data.duration_ms 191 | return total 192 | 193 | def calculate_total_cost(self, pricing_data: Dict[str, Any]) -> Decimal: 194 | """Calculate total cost for the session.""" 195 | costs = [file.calculate_cost(pricing_data) for file in self.files] 196 | return Decimal(sum(costs)) 197 | 198 | def get_model_breakdown(self, pricing_data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]: 199 | """Get breakdown of usage and cost by model.""" 200 | breakdown = {} 201 | 202 | for model in self.models_used: 203 | model_files = [f for f in self.files if f.model_id == model] 204 | model_tokens = TokenUsage() 205 | model_cost = Decimal('0.0') 206 | 207 | for file in model_files: 208 | model_tokens.input += file.tokens.input 209 | model_tokens.output += file.tokens.output 210 | model_tokens.cache_write += file.tokens.cache_write 211 | model_tokens.cache_read += file.tokens.cache_read 212 | model_cost += file.calculate_cost(pricing_data) 213 | 214 | breakdown[model] = { 215 | 'files': len(model_files), 216 | 'tokens': model_tokens, 217 | 'cost': model_cost 218 | } 219 | 220 | return breakdown 221 | 222 | @computed_field 223 | @property 224 | def interaction_count(self) -> int: 225 | """Get number of interactions (files) in this session.""" 226 | return len(self.files) 227 | 228 | @property 229 | def non_zero_token_files(self) -> List[InteractionFile]: 230 | """Get files with non-zero token usage.""" 231 | return [file for file in self.files if file.tokens.total > 0] 232 | 233 | @computed_field 234 | @property 235 | def project_name(self) -> str: 236 | """Get project name for this session based on most common project path.""" 237 | if not self.files: 238 | return "Unknown" 239 | 240 | # Get project paths from files that have them 241 | project_paths = [f.project_path for f in self.files if f.project_path] 242 | 243 | if not project_paths: 244 | return "Unknown" 245 | 246 | # Use the most common project path (in case there are mixed paths) 247 | from collections import Counter 248 | most_common_path = Counter(project_paths).most_common(1)[0][0] 249 | 250 | return Path(most_common_path).name if most_common_path else "Unknown" 251 | 252 | @computed_field 253 | @property 254 | def display_title(self) -> str: 255 | """Get display-friendly session title, with fallback to session ID.""" 256 | if self.session_title: 257 | # Truncate long titles for better display 258 | if len(self.session_title) > 50: 259 | return self.session_title[:47] + "..." 260 | return self.session_title 261 | 262 | # Fallback to session ID 263 | return self.session_id -------------------------------------------------------------------------------- /ocmonitor/utils/formatting.py: -------------------------------------------------------------------------------- 1 | """Formatting utility functions for OpenCode Monitor.""" 2 | 3 | from decimal import Decimal 4 | from typing import Any, Dict, List, Optional 5 | 6 | 7 | class NumberFormatter: 8 | """Utility functions for number formatting.""" 9 | 10 | @staticmethod 11 | def format_number(number: int) -> str: 12 | """Format numbers with thousands separators. 13 | 14 | Args: 15 | number: Number to format 16 | 17 | Returns: 18 | Formatted number string 19 | """ 20 | return f"{number:,}" 21 | 22 | @staticmethod 23 | def format_currency(amount: Decimal, currency: str = "USD") -> str: 24 | """Format currency amounts. 25 | 26 | Args: 27 | amount: Amount to format 28 | currency: Currency code (currently only USD supported) 29 | 30 | Returns: 31 | Formatted currency string 32 | """ 33 | if currency == "USD": 34 | return f"${amount:.4f}" 35 | else: 36 | return f"{amount:.4f} {currency}" 37 | 38 | @staticmethod 39 | def format_percentage(value: float, total: float, decimal_places: int = 1) -> str: 40 | """Format percentage values. 41 | 42 | Args: 43 | value: Numerator value 44 | total: Denominator value 45 | decimal_places: Number of decimal places 46 | 47 | Returns: 48 | Formatted percentage string 49 | """ 50 | if total == 0: 51 | return "0.0%" 52 | 53 | percentage = (value / total) * 100 54 | return f"{percentage:.{decimal_places}f}%" 55 | 56 | @staticmethod 57 | def format_bytes(bytes_count: int) -> str: 58 | """Format byte counts in human-readable format. 59 | 60 | Args: 61 | bytes_count: Number of bytes 62 | 63 | Returns: 64 | Formatted byte string (e.g., "1.5 KB", "2.3 MB") 65 | """ 66 | if bytes_count == 0: 67 | return "0 B" 68 | 69 | units = ["B", "KB", "MB", "GB", "TB"] 70 | size = float(bytes_count) 71 | unit_index = 0 72 | 73 | while size >= 1024 and unit_index < len(units) - 1: 74 | size /= 1024 75 | unit_index += 1 76 | 77 | if unit_index == 0: 78 | return f"{int(size)} {units[unit_index]}" 79 | else: 80 | return f"{size:.1f} {units[unit_index]}" 81 | 82 | @staticmethod 83 | def format_rate(value: float, unit: str = "per minute") -> str: 84 | """Format rate values. 85 | 86 | Args: 87 | value: Rate value 88 | unit: Unit description 89 | 90 | Returns: 91 | Formatted rate string 92 | """ 93 | if value == 0: 94 | return f"0 {unit}" 95 | 96 | if value >= 1000000: 97 | return f"{value/1000000:.1f}M {unit}" 98 | elif value >= 1000: 99 | return f"{value/1000:.1f}K {unit}" 100 | else: 101 | return f"{value:.0f} {unit}" 102 | 103 | 104 | class TableFormatter: 105 | """Utility functions for table formatting.""" 106 | 107 | @staticmethod 108 | def truncate_text(text: str, max_length: int, suffix: str = "...") -> str: 109 | """Truncate text to maximum length with suffix. 110 | 111 | Args: 112 | text: Text to truncate 113 | max_length: Maximum length including suffix 114 | suffix: Suffix to add if truncated 115 | 116 | Returns: 117 | Truncated text 118 | """ 119 | if len(text) <= max_length: 120 | return text 121 | 122 | truncate_length = max_length - len(suffix) 123 | if truncate_length <= 0: 124 | return suffix[:max_length] 125 | 126 | return text[:truncate_length] + suffix 127 | 128 | @staticmethod 129 | def align_text(text: str, width: int, alignment: str = "left") -> str: 130 | """Align text within a given width. 131 | 132 | Args: 133 | text: Text to align 134 | width: Target width 135 | alignment: "left", "right", or "center" 136 | 137 | Returns: 138 | Aligned text 139 | """ 140 | if len(text) >= width: 141 | return text 142 | 143 | if alignment == "left": 144 | return text.ljust(width) 145 | elif alignment == "right": 146 | return text.rjust(width) 147 | elif alignment == "center": 148 | return text.center(width) 149 | else: 150 | return text.ljust(width) 151 | 152 | @staticmethod 153 | def create_progress_bar(percentage: float, width: int = 20, 154 | filled_char: str = "█", empty_char: str = "░") -> str: 155 | """Create a text-based progress bar. 156 | 157 | Args: 158 | percentage: Percentage (0-100) 159 | width: Width of the progress bar 160 | filled_char: Character for filled portion 161 | empty_char: Character for empty portion 162 | 163 | Returns: 164 | Progress bar string 165 | """ 166 | filled = int(width * percentage / 100) 167 | bar = filled_char * filled + empty_char * (width - filled) 168 | return f"[{bar}] {percentage:.1f}%" 169 | 170 | 171 | class ColorFormatter: 172 | """Utility functions for color formatting.""" 173 | 174 | @staticmethod 175 | def get_cost_color(cost: Decimal, quota: Optional[Decimal] = None) -> str: 176 | """Get color for cost based on quota. 177 | 178 | Args: 179 | cost: Current cost 180 | quota: Optional quota to compare against 181 | 182 | Returns: 183 | Color name for Rich formatting 184 | """ 185 | if quota is None: 186 | return "white" 187 | 188 | try: 189 | percentage = float(cost / quota) * 100 190 | except (ZeroDivisionError, TypeError, ValueError): 191 | return "white" 192 | 193 | if percentage >= 90: 194 | return "red" 195 | elif percentage >= 75: 196 | return "yellow" 197 | elif percentage >= 50: 198 | return "orange" 199 | else: 200 | return "green" 201 | 202 | @staticmethod 203 | def get_usage_color(current: int, maximum: int) -> str: 204 | """Get color for usage based on maximum. 205 | 206 | Args: 207 | current: Current usage 208 | maximum: Maximum allowed usage 209 | 210 | Returns: 211 | Color name for Rich formatting 212 | """ 213 | if maximum == 0: 214 | return "white" 215 | 216 | percentage = (current / maximum) * 100 217 | 218 | if percentage >= 95: 219 | return "red" 220 | elif percentage >= 85: 221 | return "yellow" 222 | elif percentage >= 70: 223 | return "orange" 224 | else: 225 | return "green" 226 | 227 | @staticmethod 228 | def get_status_color(status: str) -> str: 229 | """Get color for status indicators. 230 | 231 | Args: 232 | status: Status string 233 | 234 | Returns: 235 | Color name for Rich formatting 236 | """ 237 | status_colors = { 238 | "success": "green", 239 | "warning": "yellow", 240 | "error": "red", 241 | "info": "blue", 242 | "active": "green", 243 | "inactive": "dim white", 244 | "pending": "yellow", 245 | "completed": "green", 246 | "failed": "red" 247 | } 248 | 249 | return status_colors.get(status.lower(), "white") 250 | 251 | 252 | class DataFormatter: 253 | """Utility functions for data structure formatting.""" 254 | 255 | @staticmethod 256 | def flatten_dict(data: Dict[str, Any], prefix: str = "", separator: str = ".") -> Dict[str, Any]: 257 | """Flatten a nested dictionary. 258 | 259 | Args: 260 | data: Dictionary to flatten 261 | prefix: Prefix for keys 262 | separator: Separator between nested keys 263 | 264 | Returns: 265 | Flattened dictionary 266 | """ 267 | flattened = {} 268 | 269 | for key, value in data.items(): 270 | new_key = f"{prefix}{separator}{key}" if prefix else key 271 | 272 | if isinstance(value, dict): 273 | flattened.update(DataFormatter.flatten_dict(value, new_key, separator)) 274 | elif isinstance(value, list): 275 | for i, item in enumerate(value): 276 | if isinstance(item, dict): 277 | flattened.update(DataFormatter.flatten_dict(item, f"{new_key}[{i}]", separator)) 278 | else: 279 | flattened[f"{new_key}[{i}]"] = item 280 | else: 281 | flattened[new_key] = value 282 | 283 | return flattened 284 | 285 | @staticmethod 286 | def sanitize_for_csv(value: Any) -> str: 287 | """Sanitize a value for CSV export. 288 | 289 | Args: 290 | value: Value to sanitize 291 | 292 | Returns: 293 | Sanitized string value 294 | """ 295 | if value is None: 296 | return "" 297 | 298 | # Convert to string 299 | str_value = str(value) 300 | 301 | # Escape quotes by doubling them 302 | str_value = str_value.replace('"', '""') 303 | 304 | # Wrap in quotes if contains comma, newline, or quote 305 | if ',' in str_value or '\n' in str_value or '"' in str_value: 306 | str_value = f'"{str_value}"' 307 | 308 | return str_value 309 | 310 | @staticmethod 311 | def format_model_name(model_name: str, max_length: int = 25) -> str: 312 | """Format model names for display. 313 | 314 | Args: 315 | model_name: Full model name 316 | max_length: Maximum display length 317 | 318 | Returns: 319 | Formatted model name 320 | """ 321 | if len(model_name) <= max_length: 322 | return model_name 323 | 324 | # Try to keep important parts 325 | if "claude" in model_name.lower(): 326 | # For Claude models, prioritize version info 327 | parts = model_name.split("-") 328 | if len(parts) >= 2: 329 | short_name = f"{parts[0]}-{parts[1]}" 330 | if len(short_name) <= max_length: 331 | return short_name 332 | 333 | # Fallback to simple truncation 334 | return TableFormatter.truncate_text(model_name, max_length) -------------------------------------------------------------------------------- /MANUAL_TEST_GUIDE.md: -------------------------------------------------------------------------------- 1 | # 🧪 OpenCode Monitor - Manual Test Guide 2 | 3 | This guide will help you manually test all functionality of the new OpenCode Monitor implementation. 4 | 5 | ## 📋 Pre-Test Setup 6 | 7 | ### 1. **Environment Setup** 8 | ```bash 9 | cd ocmonitor 10 | python3 -m pip install -r requirements.txt 11 | ``` 12 | 13 | **Note**: You may see warnings about scripts not being in PATH. This is normal and will be resolved in the next step. 14 | 15 | ### 2. **Basic Installation Test** 16 | ```bash 17 | # Install in development mode 18 | python3 -m pip install -e . 19 | 20 | # Add Python scripts directory to PATH (if needed) 21 | # This command will show you the scripts directory: 22 | python3 -m site --user-base 23 | # Add /path/from/above/bin to your PATH 24 | 25 | # For macOS/Linux with bash/zsh, add this line to ~/.bashrc or ~/.zshrc: 26 | # export PATH="$(python3 -m site --user-base)/bin:$PATH" 27 | 28 | # Verify installation 29 | ocmonitor --help 30 | ``` 31 | **Expected**: Should show main help with all commands listed 32 | 33 | ### 3. **Run Basic Functionality Test** 34 | ```bash 35 | python3 test_basic.py 36 | ``` 37 | **Expected**: All 4 tests should pass with ✅ symbols 38 | 39 | ## 🔧 Configuration Tests 40 | 41 | ### Test 1: Configuration Display 42 | ```bash 43 | ocmonitor config show 44 | ``` 45 | **Expected Output:** 46 | - 📋 Current Configuration header 47 | - All sections: Paths, UI Settings, Export Settings, Models 48 | - Should show 3 configured models (claude-sonnet-4, claude-opus-4, claude-opus-4.1) 49 | 50 | ### Test 2: Configuration Files 51 | ```bash 52 | # Check config file exists 53 | ls -la config.toml 54 | 55 | # Check models file exists 56 | ls -la models.json 57 | 58 | # View config content 59 | cat config.toml 60 | ``` 61 | **Expected**: Both files should exist and contain valid configuration 62 | 63 | ## 📁 File System Tests 64 | 65 | ### Test 3: Directory Validation 66 | ```bash 67 | # Test with non-existent directory 68 | ocmonitor sessions /non/existent/path 69 | ``` 70 | **Expected**: Should show user-friendly error message 71 | 72 | ### Test 4: Default Directory Handling 73 | ```bash 74 | # Test with default directory (will likely not exist) 75 | ocmonitor sessions 76 | ``` 77 | **Expected**: Should attempt to use default path and show appropriate message 78 | 79 | ## 📊 Basic Analysis Tests 80 | 81 | ### Test 5: Session Analysis (Empty Directory) 82 | ```bash 83 | # Create test directory 84 | mkdir -p test_sessions 85 | 86 | # Test with empty directory 87 | ocmonitor sessions test_sessions 88 | ``` 89 | **Expected**: "No sessions found" message 90 | 91 | ### Test 6: Create Mock Session Data 92 | ```bash 93 | # Create mock session directory 94 | mkdir -p test_sessions/ses_test_001 95 | 96 | # Create mock interaction file 97 | cat > test_sessions/ses_test_001/msg_001.json << 'EOF' 98 | { 99 | "id": "msg_001", 100 | "role": "user", 101 | "sessionID": "ses_test_001", 102 | "modelID": "claude-sonnet-4-20250514", 103 | "tokens": { 104 | "input": 100, 105 | "output": 500, 106 | "cache": { 107 | "write": 50, 108 | "read": 25 109 | } 110 | }, 111 | "time": { 112 | "created": 1705147082000, 113 | "completed": 1705147087000 114 | } 115 | } 116 | EOF 117 | 118 | # Create second interaction file 119 | cat > test_sessions/ses_test_001/msg_002.json << 'EOF' 120 | { 121 | "id": "msg_002", 122 | "role": "assistant", 123 | "sessionID": "ses_test_001", 124 | "modelID": "claude-sonnet-4-20250514", 125 | "tokens": { 126 | "input": 75, 127 | "output": 300, 128 | "cache": { 129 | "write": 0, 130 | "read": 100 131 | } 132 | }, 133 | "time": { 134 | "created": 1705147090000, 135 | "completed": 1705147095000 136 | } 137 | } 138 | EOF 139 | ``` 140 | 141 | ### Test 7: Single Session Analysis 142 | ```bash 143 | ocmonitor session test_sessions/ses_test_001 144 | ``` 145 | **Expected Output:** 146 | - Beautiful Rich table with session details 147 | - 2 interaction files listed 148 | - Token counts: input, output, cache read/write 149 | - Cost calculations 150 | - Duration information 151 | - Summary panel with totals 152 | 153 | ### Test 8: Sessions Summary 154 | ```bash 155 | ocmonitor sessions test_sessions 156 | ``` 157 | **Expected Output:** 158 | - Sessions summary table 159 | - 1 session shown 160 | - Aggregated token counts 161 | - Total cost calculation 162 | - Summary panel with overall statistics 163 | 164 | ## 🎨 Output Format Tests 165 | 166 | ### Test 9: JSON Output 167 | ```bash 168 | ocmonitor session test_sessions/ses_test_001 --format json 169 | ``` 170 | **Expected**: Valid JSON output with session data 171 | 172 | ### Test 10: Table Output (Default) 173 | ```bash 174 | ocmonitor sessions test_sessions --format table 175 | ``` 176 | **Expected**: Rich formatted table with colors and proper alignment 177 | 178 | ## 📤 Export Tests 179 | 180 | ### Test 11: CSV Export 181 | ```bash 182 | ocmonitor export sessions test_sessions --format csv --output test_export.csv 183 | ``` 184 | **Expected Output:** 185 | - ✅ Export completed successfully message 186 | - File path and size information 187 | - CSV file created with proper data 188 | 189 | ### Test 12: JSON Export 190 | ```bash 191 | ocmonitor export sessions test_sessions --format json --output test_export.json 192 | ``` 193 | **Expected Output:** 194 | - ✅ Export completed successfully message 195 | - JSON file created with metadata 196 | 197 | ### Test 13: Verify Export Files 198 | ```bash 199 | # Check CSV structure 200 | head -10 test_export.csv 201 | 202 | # Check JSON structure 203 | python3 -m json.tool test_export.json | head -20 204 | 205 | # Check file sizes 206 | ls -lh test_export.* 207 | ``` 208 | **Expected**: Both files should contain valid data 209 | 210 | ## 📅 Time-Based Analysis Tests 211 | 212 | ### Test 14: Create Additional Mock Data 213 | ```bash 214 | # Create second session with different date 215 | mkdir -p test_sessions/ses_test_002 216 | 217 | cat > test_sessions/ses_test_002/msg_001.json << 'EOF' 218 | { 219 | "id": "msg_001", 220 | "sessionID": "ses_test_002", 221 | "modelID": "claude-opus-4", 222 | "tokens": { 223 | "input": 200, 224 | "output": 800, 225 | "cache": { 226 | "write": 100, 227 | "read": 50 228 | } 229 | }, 230 | "time": { 231 | "created": 1705233482000, 232 | "completed": 1705233492000 233 | } 234 | } 235 | EOF 236 | ``` 237 | 238 | ### Test 15: Daily Breakdown 239 | ```bash 240 | ocmonitor daily test_sessions 241 | ``` 242 | **Expected**: Daily breakdown table with dates and usage 243 | 244 | ### Test 16: Model Breakdown 245 | ```bash 246 | ocmonitor models test_sessions 247 | ``` 248 | **Expected**: 249 | - Model usage table 250 | - Both claude-sonnet-4 and claude-opus-4 listed 251 | - Token counts and costs per model 252 | - Percentage breakdown 253 | 254 | ## 🔴 Error Handling Tests 255 | 256 | ### Test 17: Invalid JSON File 257 | ```bash 258 | # Create invalid JSON 259 | echo "{ invalid json" > test_sessions/ses_test_001/invalid.json 260 | 261 | # Test with invalid file 262 | ocmonitor session test_sessions/ses_test_001 263 | ``` 264 | **Expected**: Should handle invalid JSON gracefully and continue processing valid files 265 | 266 | ### Test 18: Permission Errors 267 | ```bash 268 | # Create unreadable file 269 | touch test_sessions/ses_test_001/unreadable.json 270 | chmod 000 test_sessions/ses_test_001/unreadable.json 271 | 272 | # Test handling 273 | ocmonitor session test_sessions/ses_test_001 274 | ``` 275 | **Expected**: Should handle permission errors gracefully 276 | 277 | ### Test 19: Missing Required Fields 278 | ```bash 279 | cat > test_sessions/ses_test_001/incomplete.json << 'EOF' 280 | { 281 | "id": "incomplete", 282 | "sessionID": "ses_test_001" 283 | } 284 | EOF 285 | 286 | ocmonitor session test_sessions/ses_test_001 287 | ``` 288 | **Expected**: Should handle missing fields and show appropriate warnings 289 | 290 | ## 📺 Live Monitoring Tests 291 | 292 | ### Test 20: Live Dashboard Setup 293 | ```bash 294 | # Test validation 295 | ocmonitor live test_sessions 296 | ``` 297 | **Expected**: Should show validation warnings and start monitoring 298 | 299 | ### Test 21: Live Dashboard Interaction 300 | 1. Start live dashboard in one terminal: 301 | ```bash 302 | ocmonitor live test_sessions --interval 2 303 | ``` 304 | 305 | 2. In another terminal, add a new file: 306 | ```bash 307 | cat > test_sessions/ses_test_002/msg_002.json << 'EOF' 308 | { 309 | "id": "msg_002", 310 | "sessionID": "ses_test_002", 311 | "modelID": "claude-opus-4", 312 | "tokens": { 313 | "input": 150, 314 | "output": 400 315 | }, 316 | "time": { 317 | "created": 1705233500000, 318 | "completed": 1705233505000 319 | } 320 | } 321 | EOF 322 | ``` 323 | 324 | 3. Watch the dashboard update 325 | 326 | **Expected**: Dashboard should refresh and show updated data 327 | 328 | ## 🎯 Advanced Feature Tests 329 | 330 | ### Test 22: Date Filtering 331 | ```bash 332 | ocmonitor models test_sessions --start-date 2024-01-01 --end-date 2024-12-31 333 | ``` 334 | 335 | ### Test 23: Verbose Mode 336 | ```bash 337 | ocmonitor sessions test_sessions --verbose 338 | ``` 339 | **Expected**: More detailed output and error information 340 | 341 | ### Test 24: Help Commands 342 | ```bash 343 | ocmonitor --help 344 | ocmonitor session --help 345 | ocmonitor export --help 346 | ocmonitor config --help 347 | ``` 348 | **Expected**: Comprehensive help for each command 349 | 350 | ## 🧹 Cleanup Tests 351 | 352 | ### Test 25: Export Directory 353 | ```bash 354 | ls -la exports/ 355 | ``` 356 | **Expected**: Should contain exported files 357 | 358 | ### Test 26: File Permissions 359 | ```bash 360 | # Reset permissions 361 | chmod 644 test_sessions/ses_test_001/unreadable.json 362 | 363 | # Cleanup test data 364 | rm -rf test_sessions/ 365 | rm -f test_export.* 366 | ``` 367 | 368 | ## ✅ Success Criteria 369 | 370 | ### **Basic Functionality** ✓ 371 | - [ ] All commands execute without Python errors 372 | - [ ] Configuration loads correctly 373 | - [ ] Help system works 374 | - [ ] Error messages are user-friendly 375 | 376 | ### **Data Processing** ✓ 377 | - [ ] Parses JSON files correctly 378 | - [ ] Calculates costs accurately 379 | - [ ] Handles missing/invalid data gracefully 380 | - [ ] Aggregates data correctly across sessions 381 | 382 | ### **Output Quality** ✓ 383 | - [ ] Rich tables display properly with colors 384 | - [ ] JSON output is valid 385 | - [ ] CSV exports are properly formatted 386 | - [ ] Progress bars and indicators work 387 | 388 | ### **Live Monitoring** ✓ 389 | - [ ] Dashboard starts and displays data 390 | - [ ] Updates in real-time 391 | - [ ] Handles Ctrl+C gracefully 392 | - [ ] Shows appropriate status information 393 | 394 | ### **Error Resilience** ✓ 395 | - [ ] Handles file system errors 396 | - [ ] Recovers from JSON parsing errors 397 | - [ ] Provides helpful error messages 398 | - [ ] Continues processing despite individual file failures 399 | 400 | ## 🐛 Common Issues & Solutions 401 | 402 | ### Issue: "No module named 'ocmonitor'" 403 | **Solution**: Run `pip install -e .` from the ocmonitor directory 404 | 405 | ### Issue: "Permission denied" 406 | **Solution**: Check file permissions and directory access 407 | 408 | ### Issue: Rich tables not displaying properly 409 | **Solution**: Ensure terminal supports UTF-8 and colors 410 | 411 | ### Issue: "No sessions found" 412 | **Solution**: Verify the directory path and session folder structure (ses_* directories) 413 | 414 | ## 📞 Support 415 | 416 | If any tests fail: 417 | 1. Check the error message for specific details 418 | 2. Run with `--verbose` flag for more information 419 | 3. Verify dependencies are installed correctly 420 | 4. Check file permissions and paths 421 | 422 | --- 423 | 424 | **Happy Testing! 🎉** 425 | 426 | This guide should help you verify that all functionality works correctly. Each test builds on the previous one, so it's best to run them in order. -------------------------------------------------------------------------------- /ocmonitor/utils/time_utils.py: -------------------------------------------------------------------------------- 1 | """Time utility functions for OpenCode Monitor.""" 2 | 3 | from datetime import datetime, date, timedelta 4 | from typing import Optional, Tuple 5 | 6 | WEEKDAY_MAP = { 7 | 'monday': 0, 8 | 'tuesday': 1, 9 | 'wednesday': 2, 10 | 'thursday': 3, 11 | 'friday': 4, 12 | 'saturday': 5, 13 | 'sunday': 6 14 | } 15 | 16 | WEEKDAY_NAMES = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] 17 | 18 | WEEKDAY_MAP = { 19 | 'monday': 0, 20 | 'tuesday': 1, 21 | 'wednesday': 2, 22 | 'thursday': 3, 23 | 'friday': 4, 24 | 'saturday': 5, 25 | 'sunday': 6 26 | } 27 | 28 | WEEKDAY_NAMES = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] 29 | 30 | 31 | class TimeUtils: 32 | """Utility functions for time operations.""" 33 | 34 | @staticmethod 35 | def format_timestamp(timestamp_ms: Optional[int]) -> str: 36 | """Convert timestamp in milliseconds to readable format. 37 | 38 | Args: 39 | timestamp_ms: Timestamp in milliseconds 40 | 41 | Returns: 42 | Formatted timestamp string 43 | """ 44 | if timestamp_ms is None: 45 | return 'N/A' 46 | 47 | try: 48 | dt = datetime.fromtimestamp(timestamp_ms / 1000) 49 | return dt.strftime('%Y-%m-%d %H:%M:%S') 50 | except (ValueError, OSError): 51 | return 'Invalid' 52 | 53 | @staticmethod 54 | def format_duration(milliseconds: Optional[int]) -> str: 55 | """Format duration in milliseconds to hours and minutes format (e.g., "1h 30m"). 56 | 57 | Args: 58 | milliseconds: Duration in milliseconds 59 | 60 | Returns: 61 | Formatted duration string in "Xh Ym" format 62 | """ 63 | return TimeUtils.format_duration_hm(milliseconds) 64 | 65 | @staticmethod 66 | def format_duration_hm(milliseconds: Optional[int]) -> str: 67 | """Format duration in milliseconds to hours and minutes format (e.g., "1h 30m"). 68 | 69 | Args: 70 | milliseconds: Duration in milliseconds 71 | 72 | Returns: 73 | Formatted duration string in "Xh Ym" format 74 | """ 75 | if milliseconds is None or milliseconds < 0: 76 | return 'N/A' 77 | 78 | total_seconds = milliseconds / 1000 79 | total_minutes = total_seconds / 60 80 | 81 | if total_minutes < 1: 82 | return f"{total_seconds:.0f}s" 83 | elif total_minutes < 60: 84 | return f"{total_minutes:.0f}m" 85 | else: 86 | hours = int(total_minutes // 60) 87 | minutes = int(total_minutes % 60) 88 | if minutes == 0: 89 | return f"{hours}h" 90 | else: 91 | return f"{hours}h {minutes}m" 92 | 93 | @staticmethod 94 | def parse_date_string(date_str: str) -> Optional[date]: 95 | """Parse date string in YYYY-MM-DD format. 96 | 97 | Args: 98 | date_str: Date string to parse 99 | 100 | Returns: 101 | Date object or None if parsing failed 102 | """ 103 | try: 104 | return datetime.strptime(date_str, '%Y-%m-%d').date() 105 | except ValueError: 106 | return None 107 | 108 | @staticmethod 109 | def parse_month_string(month_str: str) -> Optional[Tuple[int, int]]: 110 | """Parse month string in YYYY-MM format. 111 | 112 | Args: 113 | month_str: Month string to parse 114 | 115 | Returns: 116 | Tuple of (year, month) or None if parsing failed 117 | """ 118 | try: 119 | dt = datetime.strptime(month_str, '%Y-%m') 120 | return dt.year, dt.month 121 | except ValueError: 122 | return None 123 | 124 | @staticmethod 125 | def get_month_range(year: int, month: int) -> Tuple[date, date]: 126 | """Get the start and end dates for a given month. 127 | 128 | Args: 129 | year: Year 130 | month: Month (1-12) 131 | 132 | Returns: 133 | Tuple of (start_date, end_date) for the month 134 | """ 135 | start_date = date(year, month, 1) 136 | 137 | # Get the first day of next month, then subtract one day 138 | if month == 12: 139 | next_month = date(year + 1, 1, 1) 140 | else: 141 | next_month = date(year, month + 1, 1) 142 | 143 | end_date = next_month - timedelta(days=1) 144 | return start_date, end_date 145 | 146 | @staticmethod 147 | def get_week_range(year: int, week: int) -> Tuple[date, date]: 148 | """Get the start and end dates for a given ISO week. 149 | 150 | Args: 151 | year: Year 152 | week: Week number (1-53) 153 | 154 | Returns: 155 | Tuple of (start_date, end_date) for the week 156 | """ 157 | # January 4th is always in the first week of the year 158 | jan_4 = date(year, 1, 4) 159 | week_start = jan_4 - timedelta(days=jan_4.weekday()) + timedelta(weeks=week-1) 160 | week_end = week_start + timedelta(days=6) 161 | return week_start, week_end 162 | 163 | @staticmethod 164 | def get_year_range(year: int) -> Tuple[date, date]: 165 | """Get the start and end dates for a given year. 166 | 167 | Args: 168 | year: Year 169 | 170 | Returns: 171 | Tuple of (start_date, end_date) for the year 172 | """ 173 | start_date = date(year, 1, 1) 174 | end_date = date(year, 12, 31) 175 | return start_date, end_date 176 | 177 | @staticmethod 178 | def get_current_month_range() -> Tuple[date, date]: 179 | """Get the start and end dates for the current month. 180 | 181 | Returns: 182 | Tuple of (start_date, end_date) for current month 183 | """ 184 | today = date.today() 185 | return TimeUtils.get_month_range(today.year, today.month) 186 | 187 | @staticmethod 188 | def get_current_week_range() -> Tuple[date, date]: 189 | """Get the start and end dates for the current week. 190 | 191 | Returns: 192 | Tuple of (start_date, end_date) for current week 193 | """ 194 | today = date.today() 195 | year, week, _ = today.isocalendar() 196 | return TimeUtils.get_week_range(year, week) 197 | 198 | @staticmethod 199 | def get_custom_week_start(target_date: date, week_start_day: int = 0) -> date: 200 | """Get the start date of the week containing target_date. 201 | 202 | Args: 203 | target_date: The date to find the week start for 204 | week_start_day: Day of week to start on (0=Monday, 6=Sunday) 205 | 206 | Returns: 207 | Date of the week's start day 208 | 209 | Example: 210 | If target_date is 2025-10-23 (Thursday) and week_start_day is 6 (Sunday): 211 | Returns 2025-10-19 (the previous Sunday) 212 | """ 213 | current_weekday = target_date.weekday() 214 | days_back = (current_weekday - week_start_day) % 7 215 | week_start = target_date - timedelta(days=days_back) 216 | return week_start 217 | 218 | @staticmethod 219 | def get_custom_week_range(target_date: date, week_start_day: int = 0) -> Tuple[date, date]: 220 | """Get the start and end dates for the week containing target_date. 221 | 222 | Args: 223 | target_date: Date within the week 224 | week_start_day: Day of week to start on (0=Monday, 6=Sunday) 225 | 226 | Returns: 227 | Tuple of (start_date, end_date) for the week 228 | """ 229 | week_start = TimeUtils.get_custom_week_start(target_date, week_start_day) 230 | week_end = week_start + timedelta(days=6) 231 | return week_start, week_end 232 | 233 | @staticmethod 234 | def format_week_range(start_date: date, end_date: date) -> str: 235 | """Format week range as readable string. 236 | 237 | Args: 238 | start_date: Week start date 239 | end_date: Week end date 240 | 241 | Returns: 242 | Formatted string like "Oct 19 - Oct 25, 2025" 243 | """ 244 | if start_date.year == end_date.year: 245 | if start_date.month == end_date.month: 246 | return f"{start_date.strftime('%b %d')} - {end_date.strftime('%d, %Y')}" 247 | else: 248 | return f"{start_date.strftime('%b %d')} - {end_date.strftime('%b %d, %Y')}" 249 | else: 250 | return f"{start_date.strftime('%b %d, %Y')} - {end_date.strftime('%b %d, %Y')}" 251 | 252 | @staticmethod 253 | def date_in_range(check_date: date, start_date: Optional[date], end_date: Optional[date]) -> bool: 254 | """Check if a date falls within a given range. 255 | 256 | Args: 257 | check_date: Date to check 258 | start_date: Start of range (inclusive), None means no start limit 259 | end_date: End of range (inclusive), None means no end limit 260 | 261 | Returns: 262 | True if date is in range, False otherwise 263 | """ 264 | if start_date and check_date < start_date: 265 | return False 266 | if end_date and check_date > end_date: 267 | return False 268 | return True 269 | 270 | @staticmethod 271 | def datetime_in_range(check_datetime: datetime, start_date: Optional[date], end_date: Optional[date]) -> bool: 272 | """Check if a datetime falls within a given date range. 273 | 274 | Args: 275 | check_datetime: Datetime to check 276 | start_date: Start of range (inclusive), None means no start limit 277 | end_date: End of range (inclusive), None means no end limit 278 | 279 | Returns: 280 | True if datetime is in range, False otherwise 281 | """ 282 | check_date = check_datetime.date() 283 | return TimeUtils.date_in_range(check_date, start_date, end_date) 284 | 285 | @staticmethod 286 | def get_relative_time_description(dt: datetime) -> str: 287 | """Get a human-readable description of how long ago a datetime was. 288 | 289 | Args: 290 | dt: Datetime to describe 291 | 292 | Returns: 293 | Human-readable relative time description 294 | """ 295 | now = datetime.now() 296 | diff = now - dt 297 | 298 | if diff.total_seconds() < 60: 299 | return "just now" 300 | elif diff.total_seconds() < 3600: 301 | minutes = int(diff.total_seconds() / 60) 302 | return f"{minutes} minute{'s' if minutes != 1 else ''} ago" 303 | elif diff.total_seconds() < 86400: 304 | hours = int(diff.total_seconds() / 3600) 305 | return f"{hours} hour{'s' if hours != 1 else ''} ago" 306 | elif diff.days < 7: 307 | return f"{diff.days} day{'s' if diff.days != 1 else ''} ago" 308 | elif diff.days < 30: 309 | weeks = diff.days // 7 310 | return f"{weeks} week{'s' if weeks != 1 else ''} ago" 311 | else: 312 | months = diff.days // 30 313 | return f"{months} month{'s' if months != 1 else ''} ago" 314 | 315 | @staticmethod 316 | def format_date_range(start_date: Optional[date], end_date: Optional[date]) -> str: 317 | """Format a date range as a human-readable string. 318 | 319 | Args: 320 | start_date: Start date (None means open-ended) 321 | end_date: End date (None means open-ended) 322 | 323 | Returns: 324 | Formatted date range string 325 | """ 326 | if start_date is None and end_date is None: 327 | return "All time" 328 | elif start_date is None: 329 | return f"Up to {end_date.strftime('%Y-%m-%d')}" 330 | elif end_date is None: 331 | return f"From {start_date.strftime('%Y-%m-%d')}" 332 | elif start_date == end_date: 333 | return start_date.strftime('%Y-%m-%d') 334 | else: 335 | return f"{start_date.strftime('%Y-%m-%d')} to {end_date.strftime('%Y-%m-%d')}" -------------------------------------------------------------------------------- /ocmonitor/services/live_monitor.py: -------------------------------------------------------------------------------- 1 | """Live monitoring service for OpenCode Monitor.""" 2 | 3 | import time 4 | from datetime import datetime 5 | from pathlib import Path 6 | from typing import Optional, Dict, Any 7 | from rich.live import Live 8 | from rich.console import Console 9 | 10 | from ..models.session import SessionData, InteractionFile 11 | from ..utils.file_utils import FileProcessor 12 | from ..ui.dashboard import DashboardUI 13 | from ..config import ModelPricing 14 | 15 | 16 | class LiveMonitor: 17 | """Service for live monitoring of OpenCode sessions.""" 18 | 19 | def __init__(self, pricing_data: Dict[str, ModelPricing], console: Optional[Console] = None): 20 | """Initialize live monitor. 21 | 22 | Args: 23 | pricing_data: Model pricing information 24 | console: Rich console for output 25 | """ 26 | self.pricing_data = pricing_data 27 | self.console = console or Console() 28 | self.dashboard_ui = DashboardUI(console) 29 | 30 | def start_monitoring(self, base_path: str, refresh_interval: int = 5): 31 | """Start live monitoring of the most recent session. 32 | 33 | Args: 34 | base_path: Path to directory containing sessions 35 | refresh_interval: Update interval in seconds 36 | """ 37 | try: 38 | # Find the most recent session 39 | recent_session = FileProcessor.get_most_recent_session(base_path) 40 | if not recent_session: 41 | self.console.print(f"[red]No sessions found in {base_path}[/red]") 42 | return 43 | 44 | self.console.print(f"[green]Starting live monitoring of session: {recent_session.session_id}[/green]") 45 | self.console.print(f"[cyan]Update interval: {refresh_interval} seconds[/cyan]") 46 | self.console.print("[dim]Press Ctrl+C to exit[/dim]\n") 47 | 48 | # Start live monitoring 49 | with Live( 50 | self._generate_dashboard(recent_session), 51 | refresh_per_second=1/refresh_interval, 52 | console=self.console 53 | ) as live: 54 | while True: 55 | # Check for most recent session (might be a new one!) 56 | most_recent = FileProcessor.get_most_recent_session(base_path) 57 | 58 | if most_recent: 59 | # If we detected a different session, switch to it 60 | if most_recent.session_id != recent_session.session_id: 61 | recent_session = most_recent 62 | self.console.print(f"\n[yellow]New session detected: {recent_session.session_id}[/yellow]") 63 | else: 64 | # Same session, just reload its data 65 | updated_session = FileProcessor.load_session_data(recent_session.session_path) 66 | if updated_session: 67 | recent_session = updated_session 68 | 69 | # Update dashboard 70 | live.update(self._generate_dashboard(recent_session)) 71 | time.sleep(refresh_interval) 72 | 73 | except KeyboardInterrupt: 74 | self.console.print("\n[yellow]Live monitoring stopped.[/yellow]") 75 | 76 | def _generate_dashboard(self, session: SessionData): 77 | """Generate dashboard layout for the session. 78 | 79 | Args: 80 | session: Session to monitor 81 | 82 | Returns: 83 | Rich layout for the dashboard 84 | """ 85 | # Get the most recent file 86 | recent_file = None 87 | if session.files: 88 | recent_file = max(session.files, key=lambda f: f.modification_time) 89 | 90 | # Calculate burn rate 91 | burn_rate = self._calculate_burn_rate(session) 92 | 93 | # Get model pricing for quota and context window 94 | quota = None 95 | context_window = 200000 # Default 96 | 97 | if recent_file and recent_file.model_id in self.pricing_data: 98 | model_pricing = self.pricing_data[recent_file.model_id] 99 | quota = model_pricing.session_quota 100 | context_window = model_pricing.context_window 101 | 102 | return self.dashboard_ui.create_dashboard_layout( 103 | session=session, 104 | recent_file=recent_file, 105 | pricing_data=self.pricing_data, 106 | burn_rate=burn_rate, 107 | quota=quota, 108 | context_window=context_window 109 | ) 110 | 111 | def _calculate_burn_rate(self, session: SessionData) -> float: 112 | """Calculate token burn rate for a session (total tokens / total session time). 113 | 114 | Args: 115 | session: SessionData object 116 | 117 | Returns: 118 | Tokens per minute for the entire session 119 | """ 120 | # Get total tokens for the session 121 | total_tokens = session.total_tokens.total 122 | 123 | # If no tokens, return 0 124 | if total_tokens == 0: 125 | return 0.0 126 | 127 | # Calculate session duration from start time to now 128 | if session.start_time: 129 | current_time = datetime.now() 130 | session_duration = current_time - session.start_time 131 | duration_minutes = session_duration.total_seconds() / 60 132 | 133 | if duration_minutes > 0: 134 | return total_tokens / duration_minutes 135 | 136 | return 0.0 137 | 138 | def get_session_status(self, base_path: str) -> Dict[str, Any]: 139 | """Get current status of the most recent session. 140 | 141 | Args: 142 | base_path: Path to directory containing sessions 143 | 144 | Returns: 145 | Dictionary with session status information 146 | """ 147 | recent_session = FileProcessor.get_most_recent_session(base_path) 148 | if not recent_session: 149 | return { 150 | 'status': 'no_sessions', 151 | 'message': 'No sessions found' 152 | } 153 | 154 | recent_file = None 155 | if recent_session.files: 156 | recent_file = max(recent_session.files, key=lambda f: f.modification_time) 157 | 158 | # Calculate how long ago the last activity was 159 | last_activity = None 160 | if recent_file: 161 | last_activity = time.time() - recent_file.modification_time.timestamp() 162 | 163 | # Determine activity status 164 | activity_status = 'unknown' 165 | if last_activity is not None: 166 | if last_activity < 60: # Less than 1 minute 167 | activity_status = 'active' 168 | elif last_activity < 300: # Less than 5 minutes 169 | activity_status = 'recent' 170 | elif last_activity < 1800: # Less than 30 minutes 171 | activity_status = 'idle' 172 | else: 173 | activity_status = 'inactive' 174 | 175 | return { 176 | 'status': 'found', 177 | 'session_id': recent_session.session_id, 178 | 'interaction_count': recent_session.interaction_count, 179 | 'total_tokens': recent_session.total_tokens.total, 180 | 'total_cost': float(recent_session.calculate_total_cost(self.pricing_data)), 181 | 'models_used': recent_session.models_used, 182 | 'last_activity_seconds': last_activity, 183 | 'activity_status': activity_status, 184 | 'burn_rate': self._calculate_burn_rate(recent_session), 185 | 'recent_file': { 186 | 'name': recent_file.file_name, 187 | 'model': recent_file.model_id, 188 | 'tokens': recent_file.tokens.total 189 | } if recent_file else None 190 | } 191 | 192 | def monitor_single_update(self, base_path: str) -> Optional[Dict[str, Any]]: 193 | """Get a single update of the monitoring data. 194 | 195 | Args: 196 | base_path: Path to directory containing sessions 197 | 198 | Returns: 199 | Monitoring data or None if no session found 200 | """ 201 | recent_session = FileProcessor.get_most_recent_session(base_path) 202 | if not recent_session: 203 | return None 204 | 205 | recent_file = None 206 | if recent_session.files: 207 | recent_file = max(recent_session.files, key=lambda f: f.modification_time) 208 | 209 | return { 210 | 'timestamp': time.time(), 211 | 'session': { 212 | 'id': recent_session.session_id, 213 | 'interaction_count': recent_session.interaction_count, 214 | 'total_tokens': recent_session.total_tokens.model_dump(), 215 | 'total_cost': float(recent_session.calculate_total_cost(self.pricing_data)), 216 | 'models_used': recent_session.models_used 217 | }, 218 | 'recent_interaction': { 219 | 'file_name': recent_file.file_name, 220 | 'model_id': recent_file.model_id, 221 | 'tokens': recent_file.tokens.model_dump(), 222 | 'cost': float(recent_file.calculate_cost(self.pricing_data)), 223 | 'modification_time': recent_file.modification_time.isoformat() 224 | } if recent_file else None, 225 | 'burn_rate': self._calculate_burn_rate(recent_session), 226 | 'context_usage': self._calculate_context_usage(recent_file) if recent_file else None 227 | } 228 | 229 | def _calculate_context_usage(self, interaction_file: InteractionFile) -> Dict[str, Any]: 230 | """Calculate context window usage for an interaction. 231 | 232 | Args: 233 | interaction_file: Interaction file to analyze 234 | 235 | Returns: 236 | Context usage information 237 | """ 238 | if interaction_file.model_id not in self.pricing_data: 239 | return { 240 | 'context_size': 0, 241 | 'context_window': 200000, 242 | 'usage_percentage': 0.0 243 | } 244 | 245 | model_pricing = self.pricing_data[interaction_file.model_id] 246 | context_window = model_pricing.context_window 247 | 248 | # Context size = input + cache read + cache write 249 | context_size = ( 250 | interaction_file.tokens.input + 251 | interaction_file.tokens.cache_read + 252 | interaction_file.tokens.cache_write 253 | ) 254 | 255 | usage_percentage = (context_size / context_window) * 100 if context_window > 0 else 0 256 | 257 | return { 258 | 'context_size': context_size, 259 | 'context_window': context_window, 260 | 'usage_percentage': min(100.0, usage_percentage) 261 | } 262 | 263 | def validate_monitoring_setup(self, base_path: str) -> Dict[str, Any]: 264 | """Validate that monitoring can be set up properly. 265 | 266 | Args: 267 | base_path: Path to directory containing sessions 268 | 269 | Returns: 270 | Validation results 271 | """ 272 | issues = [] 273 | warnings = [] 274 | 275 | # Check if base path exists 276 | base_path_obj = Path(base_path) 277 | if not base_path_obj.exists(): 278 | issues.append(f"Base path does not exist: {base_path}") 279 | return { 280 | 'valid': False, 281 | 'issues': issues, 282 | 'warnings': warnings 283 | } 284 | 285 | if not base_path_obj.is_dir(): 286 | issues.append(f"Base path is not a directory: {base_path}") 287 | return { 288 | 'valid': False, 289 | 'issues': issues, 290 | 'warnings': warnings 291 | } 292 | 293 | # Check for session directories 294 | session_dirs = FileProcessor.find_session_directories(base_path) 295 | if not session_dirs: 296 | warnings.append("No session directories found") 297 | else: 298 | # Check most recent session 299 | recent_session = FileProcessor.load_session_data(session_dirs[0]) 300 | if not recent_session: 301 | warnings.append("Most recent session directory contains no valid data") 302 | elif not recent_session.files: 303 | warnings.append("Most recent session has no interaction files") 304 | 305 | # Check pricing data 306 | if not self.pricing_data: 307 | warnings.append("No pricing data available - costs will show as $0.00") 308 | 309 | return { 310 | 'valid': len(issues) == 0, 311 | 'issues': issues, 312 | 'warnings': warnings, 313 | 'session_directories_found': len(session_dirs), 314 | 'most_recent_session': session_dirs[0].name if session_dirs else None 315 | } -------------------------------------------------------------------------------- /ocmonitor/utils/file_utils.py: -------------------------------------------------------------------------------- 1 | """File utility functions for OpenCode Monitor.""" 2 | 3 | import json 4 | import os 5 | from pathlib import Path 6 | from typing import Dict, List, Optional, Any, Generator 7 | from datetime import datetime 8 | 9 | from ..models.session import SessionData, InteractionFile, TokenUsage, TimeData 10 | 11 | 12 | class FileProcessor: 13 | """Handles file processing and session discovery.""" 14 | 15 | @staticmethod 16 | def find_session_directories(base_path: str) -> List[Path]: 17 | """Find all session directories in the base path. 18 | 19 | Args: 20 | base_path: Path to search for session directories 21 | 22 | Returns: 23 | List of session directory paths sorted by modification time (newest first) 24 | """ 25 | base_dir = Path(base_path) 26 | if not base_dir.exists(): 27 | return [] 28 | 29 | # Find all directories that start with 'ses_' 30 | session_dirs = [ 31 | d for d in base_dir.iterdir() 32 | if d.is_dir() and d.name.startswith('ses_') 33 | ] 34 | 35 | # Sort by modification time (most recent first) 36 | session_dirs.sort(key=lambda x: x.stat().st_mtime, reverse=True) 37 | return session_dirs 38 | 39 | @staticmethod 40 | def find_json_files(directory: Path) -> List[Path]: 41 | """Find all JSON files in a directory. 42 | 43 | Args: 44 | directory: Directory to search 45 | 46 | Returns: 47 | List of JSON file paths sorted by modification time (newest first) 48 | """ 49 | if not directory.exists() or not directory.is_dir(): 50 | return [] 51 | 52 | json_files = list(directory.glob("*.json")) 53 | json_files.sort(key=lambda x: x.stat().st_mtime, reverse=True) 54 | return json_files 55 | 56 | @staticmethod 57 | def load_json_file(file_path: Path) -> Optional[Dict[str, Any]]: 58 | """Load and parse a JSON file. 59 | 60 | Args: 61 | file_path: Path to JSON file 62 | 63 | Returns: 64 | Parsed JSON data or None if failed 65 | """ 66 | try: 67 | with open(file_path, 'r', encoding='utf-8') as f: 68 | return json.load(f) 69 | except (json.JSONDecodeError, FileNotFoundError, PermissionError, UnicodeDecodeError): 70 | return None 71 | 72 | @staticmethod 73 | def _extract_model_name(model_id: str) -> str: 74 | """Extract model name from fully qualified model ID. 75 | 76 | Args: 77 | model_id: Full model ID (e.g., 'qwen/qwen3-coder' or 'claude-sonnet-4-20250514') 78 | 79 | Returns: 80 | Extracted model name 81 | """ 82 | if '/' in model_id: 83 | return model_id 84 | return model_id 85 | 86 | @staticmethod 87 | def extract_project_name(path_str: str) -> str: 88 | """Extract project name from a file path. 89 | 90 | Args: 91 | path_str: Full path string (e.g., '/Users/shelli/Documents/apps/ocmonitor') 92 | 93 | Returns: 94 | Project name (last directory in path) or 'Unknown' if empty 95 | """ 96 | if not path_str: 97 | return "Unknown" 98 | 99 | path = Path(path_str) 100 | return path.name if path.name else "Unknown" 101 | 102 | @staticmethod 103 | def get_opencode_storage_path() -> Optional[Path]: 104 | """Get the OpenCode storage path. 105 | 106 | Returns: 107 | Path to OpenCode storage directory or None if not found 108 | """ 109 | # Try to get from configuration first 110 | try: 111 | from ..config import config_manager 112 | storage_path = Path(config_manager.config.paths.opencode_storage_dir) 113 | if storage_path.exists(): 114 | return storage_path 115 | except ImportError: 116 | pass 117 | 118 | # Standard OpenCode storage location as fallback 119 | home = Path.home() 120 | storage_path = home / ".local" / "share" / "opencode" / "storage" 121 | 122 | if storage_path.exists(): 123 | return storage_path 124 | 125 | return None 126 | 127 | @staticmethod 128 | def find_session_title(session_id: str) -> Optional[str]: 129 | """Find and load session title from OpenCode storage. 130 | 131 | Args: 132 | session_id: Session ID to search for 133 | 134 | Returns: 135 | Session title or None if not found 136 | """ 137 | storage_path = FileProcessor.get_opencode_storage_path() 138 | if not storage_path: 139 | return None 140 | 141 | session_storage = storage_path / "session" 142 | if not session_storage.exists(): 143 | return None 144 | 145 | # Search through all project directories (including global) 146 | for project_dir in session_storage.iterdir(): 147 | if not project_dir.is_dir(): 148 | continue 149 | 150 | session_file = project_dir / f"{session_id}.json" 151 | if session_file.exists(): 152 | session_data = FileProcessor.load_json_file(session_file) 153 | if session_data and "title" in session_data: 154 | return session_data["title"] 155 | 156 | return None 157 | 158 | @staticmethod 159 | def parse_interaction_file(file_path: Path, session_id: str) -> Optional[InteractionFile]: 160 | """Parse a single interaction JSON file. 161 | 162 | Args: 163 | file_path: Path to the interaction file 164 | session_id: ID of the session this file belongs to 165 | 166 | Returns: 167 | InteractionFile object or None if parsing failed 168 | """ 169 | data = FileProcessor.load_json_file(file_path) 170 | if not data: 171 | return None 172 | 173 | try: 174 | # Extract basic information 175 | model_id = data.get('modelID', 'unknown') 176 | 177 | # Handle fully qualified model names 178 | model_id = FileProcessor._extract_model_name(model_id) 179 | 180 | # Extract token usage 181 | tokens_data = data.get('tokens', {}) 182 | cache_data = tokens_data.get('cache', {}) 183 | 184 | tokens = TokenUsage( 185 | input=tokens_data.get('input', 0), 186 | output=tokens_data.get('output', 0), 187 | cache_write=cache_data.get('write', 0), 188 | cache_read=cache_data.get('read', 0) 189 | ) 190 | 191 | # Extract time data 192 | time_data = None 193 | if 'time' in data: 194 | time_info = data['time'] 195 | time_data = TimeData( 196 | created=time_info.get('created'), 197 | completed=time_info.get('completed') 198 | ) 199 | 200 | # Extract project path data 201 | project_path = None 202 | if 'path' in data: 203 | path_info = data['path'] 204 | # Use 'cwd' as the project path, fallback to 'root' if needed 205 | project_path = path_info.get('cwd') or path_info.get('root') 206 | 207 | return InteractionFile( 208 | file_path=file_path, 209 | session_id=session_id, 210 | model_id=model_id, 211 | tokens=tokens, 212 | time_data=time_data, 213 | project_path=project_path, 214 | raw_data=data 215 | ) 216 | 217 | except (KeyError, ValueError, TypeError): 218 | return None 219 | 220 | @staticmethod 221 | def load_session_data(session_path: Path) -> Optional[SessionData]: 222 | """Load complete session data from a session directory. 223 | 224 | Args: 225 | session_path: Path to session directory 226 | 227 | Returns: 228 | SessionData object or None if loading failed 229 | """ 230 | if not session_path.exists() or not session_path.is_dir(): 231 | return None 232 | 233 | session_id = session_path.name 234 | json_files = FileProcessor.find_json_files(session_path) 235 | 236 | if not json_files: 237 | return None 238 | 239 | interaction_files = [] 240 | for json_file in json_files: 241 | interaction = FileProcessor.parse_interaction_file(json_file, session_id) 242 | if interaction: 243 | # Filter out interactions with zero token usage 244 | if interaction.tokens.total > 0: 245 | interaction_files.append(interaction) 246 | 247 | if not interaction_files: 248 | return None 249 | 250 | # Load session title from OpenCode storage 251 | session_title = FileProcessor.find_session_title(session_id) 252 | 253 | return SessionData( 254 | session_id=session_id, 255 | session_path=session_path, 256 | files=interaction_files, 257 | session_title=session_title 258 | ) 259 | 260 | @staticmethod 261 | def get_most_recent_session(base_path: str) -> Optional[SessionData]: 262 | """Get the most recently modified session. 263 | 264 | Args: 265 | base_path: Path to search for sessions 266 | 267 | Returns: 268 | Most recent SessionData or None if no sessions found 269 | """ 270 | session_dirs = FileProcessor.find_session_directories(base_path) 271 | if not session_dirs: 272 | return None 273 | 274 | return FileProcessor.load_session_data(session_dirs[0]) 275 | 276 | @staticmethod 277 | def get_most_recent_file(session_path: Path) -> Optional[InteractionFile]: 278 | """Get the most recently modified file in a session. 279 | 280 | Args: 281 | session_path: Path to session directory 282 | 283 | Returns: 284 | Most recent InteractionFile or None if no files found 285 | """ 286 | json_files = FileProcessor.find_json_files(session_path) 287 | if not json_files: 288 | return None 289 | 290 | session_id = session_path.name 291 | return FileProcessor.parse_interaction_file(json_files[0], session_id) 292 | 293 | @staticmethod 294 | def load_all_sessions(base_path: str, limit: Optional[int] = None) -> List[SessionData]: 295 | """Load all sessions from the base path. 296 | 297 | Args: 298 | base_path: Path to search for sessions 299 | limit: Maximum number of sessions to load (None for all) 300 | 301 | Returns: 302 | List of SessionData objects 303 | """ 304 | session_dirs = FileProcessor.find_session_directories(base_path) 305 | 306 | if limit: 307 | session_dirs = session_dirs[:limit] 308 | 309 | sessions = [] 310 | for session_dir in session_dirs: 311 | session_data = FileProcessor.load_session_data(session_dir) 312 | if session_data: 313 | sessions.append(session_data) 314 | 315 | return sessions 316 | 317 | @staticmethod 318 | def session_generator(base_path: str) -> Generator[SessionData, None, None]: 319 | """Generator that yields sessions one by one (memory efficient). 320 | 321 | Args: 322 | base_path: Path to search for sessions 323 | 324 | Yields: 325 | SessionData objects 326 | """ 327 | session_dirs = FileProcessor.find_session_directories(base_path) 328 | 329 | for session_dir in session_dirs: 330 | session_data = FileProcessor.load_session_data(session_dir) 331 | if session_data: 332 | yield session_data 333 | 334 | @staticmethod 335 | def validate_session_structure(session_path: Path) -> bool: 336 | """Validate that a directory contains valid session structure. 337 | 338 | Args: 339 | session_path: Path to potential session directory 340 | 341 | Returns: 342 | True if valid session structure, False otherwise 343 | """ 344 | if not session_path.exists() or not session_path.is_dir(): 345 | return False 346 | 347 | if not session_path.name.startswith('ses_'): 348 | return False 349 | 350 | json_files = FileProcessor.find_json_files(session_path) 351 | if not json_files: 352 | return False 353 | 354 | # Check if at least one file has valid structure 355 | for json_file in json_files[:3]: # Check first 3 files 356 | data = FileProcessor.load_json_file(json_file) 357 | if data and ('tokens' in data or 'modelID' in data): 358 | return True 359 | 360 | return False 361 | 362 | @staticmethod 363 | def get_session_stats(session_path: Path) -> Dict[str, Any]: 364 | """Get basic statistics about a session without loading all data. 365 | 366 | Args: 367 | session_path: Path to session directory 368 | 369 | Returns: 370 | Dictionary with basic session statistics 371 | """ 372 | if not FileProcessor.validate_session_structure(session_path): 373 | return {} 374 | 375 | json_files = FileProcessor.find_json_files(session_path) 376 | 377 | stats = { 378 | 'session_id': session_path.name, 379 | 'file_count': len(json_files), 380 | 'first_file': None, 381 | 'last_file': None, 382 | 'total_size_bytes': 0 383 | } 384 | 385 | if json_files: 386 | stats['first_file'] = json_files[-1].name # Oldest file 387 | stats['last_file'] = json_files[0].name # Newest file 388 | 389 | # Calculate total size 390 | for json_file in json_files: 391 | try: 392 | stats['total_size_bytes'] += json_file.stat().st_size 393 | except OSError: 394 | pass 395 | 396 | return stats -------------------------------------------------------------------------------- /ocmonitor/utils/error_handling.py: -------------------------------------------------------------------------------- 1 | """Error handling utilities for OpenCode Monitor.""" 2 | 3 | import traceback 4 | from typing import Optional, Dict, Any, Callable, TypeVar, Union 5 | from functools import wraps 6 | from pathlib import Path 7 | import logging 8 | 9 | # Set up logging 10 | logger = logging.getLogger(__name__) 11 | 12 | # Type variable for decorated functions 13 | F = TypeVar('F', bound=Callable[..., Any]) 14 | 15 | 16 | class OCMonitorError(Exception): 17 | """Base exception for OpenCode Monitor.""" 18 | 19 | def __init__(self, message: str, details: Optional[Dict[str, Any]] = None): 20 | super().__init__(message) 21 | self.message = message 22 | self.details = details or {} 23 | 24 | 25 | class ConfigurationError(OCMonitorError): 26 | """Raised when there's a configuration problem.""" 27 | pass 28 | 29 | 30 | class DataProcessingError(OCMonitorError): 31 | """Raised when there's an error processing session data.""" 32 | pass 33 | 34 | 35 | class FileSystemError(OCMonitorError): 36 | """Raised when there's a file system related error.""" 37 | pass 38 | 39 | 40 | class ValidationError(OCMonitorError): 41 | """Raised when data validation fails.""" 42 | pass 43 | 44 | 45 | class ExportError(OCMonitorError): 46 | """Raised when export operations fail.""" 47 | pass 48 | 49 | 50 | class ErrorHandler: 51 | """Centralized error handling for OpenCode Monitor.""" 52 | 53 | def __init__(self, verbose: bool = False): 54 | """Initialize error handler. 55 | 56 | Args: 57 | verbose: Whether to show detailed error information 58 | """ 59 | self.verbose = verbose 60 | 61 | def handle_error(self, error: Exception, context: str = "") -> Dict[str, Any]: 62 | """Handle an error and return structured error information. 63 | 64 | Args: 65 | error: The exception that occurred 66 | context: Additional context about where the error occurred 67 | 68 | Returns: 69 | Dictionary with error information 70 | """ 71 | error_info = { 72 | 'error_type': type(error).__name__, 73 | 'message': str(error), 74 | 'context': context, 75 | 'success': False 76 | } 77 | 78 | # Add details if it's our custom exception 79 | if isinstance(error, OCMonitorError): 80 | error_info['details'] = error.details 81 | 82 | # Add traceback in verbose mode 83 | if self.verbose: 84 | error_info['traceback'] = traceback.format_exc() 85 | 86 | # Log the error 87 | logger.error(f"Error in {context}: {error_info['error_type']}: {error_info['message']}") 88 | if self.verbose: 89 | logger.debug(f"Traceback: {error_info.get('traceback', 'N/A')}") 90 | 91 | return error_info 92 | 93 | def safe_execute(self, func: Callable, *args, context: str = "", **kwargs) -> Dict[str, Any]: 94 | """Safely execute a function and handle any errors. 95 | 96 | Args: 97 | func: Function to execute 98 | *args: Positional arguments for the function 99 | context: Context description for error handling 100 | **kwargs: Keyword arguments for the function 101 | 102 | Returns: 103 | Dictionary with result or error information 104 | """ 105 | try: 106 | result = func(*args, **kwargs) 107 | return { 108 | 'success': True, 109 | 'result': result 110 | } 111 | except Exception as e: 112 | return self.handle_error(e, context) 113 | 114 | 115 | def handle_errors(error_handler: Optional[ErrorHandler] = None, context: str = ""): 116 | """Decorator for handling errors in functions. 117 | 118 | Args: 119 | error_handler: ErrorHandler instance to use 120 | context: Context description for the operation 121 | 122 | Returns: 123 | Decorated function 124 | """ 125 | def decorator(func: F) -> F: 126 | @wraps(func) 127 | def wrapper(*args, **kwargs): 128 | handler = error_handler or ErrorHandler() 129 | func_context = context or f"{func.__module__}.{func.__name__}" 130 | return handler.safe_execute(func, *args, context=func_context, **kwargs) 131 | return wrapper 132 | return decorator 133 | 134 | 135 | def validate_path(path: Union[str, Path], must_exist: bool = True, must_be_dir: bool = False) -> Path: 136 | """Validate a file system path. 137 | 138 | Args: 139 | path: Path to validate 140 | must_exist: Whether the path must exist 141 | must_be_dir: Whether the path must be a directory 142 | 143 | Returns: 144 | Validated Path object 145 | 146 | Raises: 147 | ValidationError: If validation fails 148 | """ 149 | try: 150 | path_obj = Path(path) 151 | 152 | if must_exist and not path_obj.exists(): 153 | raise ValidationError(f"Path does not exist: {path}") 154 | 155 | if must_exist and must_be_dir and not path_obj.is_dir(): 156 | raise ValidationError(f"Path is not a directory: {path}") 157 | 158 | return path_obj 159 | 160 | except (TypeError, ValueError) as e: 161 | raise ValidationError(f"Invalid path: {path}") from e 162 | 163 | 164 | def validate_config_value(value: Any, expected_type: type, name: str) -> Any: 165 | """Validate a configuration value. 166 | 167 | Args: 168 | value: Value to validate 169 | expected_type: Expected type 170 | name: Name of the configuration value 171 | 172 | Returns: 173 | Validated value 174 | 175 | Raises: 176 | ConfigurationError: If validation fails 177 | """ 178 | if not isinstance(value, expected_type): 179 | raise ConfigurationError( 180 | f"Configuration value '{name}' must be of type {expected_type.__name__}, got {type(value).__name__}", 181 | details={'value': value, 'expected_type': expected_type.__name__} 182 | ) 183 | return value 184 | 185 | 186 | def safe_json_load(file_path: Path) -> Dict[str, Any]: 187 | """Safely load JSON data from a file. 188 | 189 | Args: 190 | file_path: Path to JSON file 191 | 192 | Returns: 193 | Parsed JSON data 194 | 195 | Raises: 196 | FileSystemError: If file cannot be read or parsed 197 | """ 198 | try: 199 | with open(file_path, 'r', encoding='utf-8') as f: 200 | import json 201 | return json.load(f) 202 | except FileNotFoundError: 203 | raise FileSystemError(f"JSON file not found: {file_path}") 204 | except json.JSONDecodeError as e: 205 | raise DataProcessingError( 206 | f"Invalid JSON in file: {file_path}", 207 | details={'json_error': str(e), 'line': e.lineno, 'column': e.colno} 208 | ) 209 | except PermissionError: 210 | raise FileSystemError(f"Permission denied reading file: {file_path}") 211 | except UnicodeDecodeError: 212 | raise DataProcessingError(f"File encoding error: {file_path}") 213 | 214 | 215 | def safe_file_write(file_path: Path, content: str, encoding: str = 'utf-8') -> None: 216 | """Safely write content to a file. 217 | 218 | Args: 219 | file_path: Path to write to 220 | content: Content to write 221 | encoding: File encoding 222 | 223 | Raises: 224 | FileSystemError: If file cannot be written 225 | """ 226 | try: 227 | # Ensure parent directory exists 228 | file_path.parent.mkdir(parents=True, exist_ok=True) 229 | 230 | with open(file_path, 'w', encoding=encoding) as f: 231 | f.write(content) 232 | except PermissionError: 233 | raise FileSystemError(f"Permission denied writing to file: {file_path}") 234 | except OSError as e: 235 | raise FileSystemError(f"Error writing to file: {file_path}: {e}") 236 | 237 | 238 | def validate_session_data(session_data: Dict[str, Any]) -> Dict[str, Any]: 239 | """Validate session data structure. 240 | 241 | Args: 242 | session_data: Session data to validate 243 | 244 | Returns: 245 | Validated session data 246 | 247 | Raises: 248 | ValidationError: If validation fails 249 | """ 250 | required_fields = ['tokens'] 251 | 252 | for field in required_fields: 253 | if field not in session_data: 254 | raise ValidationError( 255 | f"Missing required field in session data: {field}", 256 | details={'data': session_data} 257 | ) 258 | 259 | # Validate tokens structure 260 | tokens = session_data['tokens'] 261 | if not isinstance(tokens, dict): 262 | raise ValidationError("Tokens field must be a dictionary") 263 | 264 | # Validate token values 265 | token_fields = ['input', 'output'] 266 | for field in token_fields: 267 | if field in tokens: 268 | if not isinstance(tokens[field], int) or tokens[field] < 0: 269 | raise ValidationError( 270 | f"Token field '{field}' must be a non-negative integer", 271 | details={'value': tokens[field]} 272 | ) 273 | 274 | return session_data 275 | 276 | 277 | def create_user_friendly_error(error: Exception) -> str: 278 | """Create a user-friendly error message. 279 | 280 | Args: 281 | error: Exception to format 282 | 283 | Returns: 284 | User-friendly error message 285 | """ 286 | if isinstance(error, ConfigurationError): 287 | return f"Configuration error: {error.message}" 288 | elif isinstance(error, FileSystemError): 289 | return f"File system error: {error.message}" 290 | elif isinstance(error, DataProcessingError): 291 | return f"Data processing error: {error.message}" 292 | elif isinstance(error, ValidationError): 293 | return f"Validation error: {error.message}" 294 | elif isinstance(error, ExportError): 295 | return f"Export error: {error.message}" 296 | elif isinstance(error, FileNotFoundError): 297 | return f"File not found: {error.filename or 'Unknown file'}" 298 | elif isinstance(error, PermissionError): 299 | return f"Permission denied: {error.filename or 'Unknown file'}" 300 | elif isinstance(error, KeyboardInterrupt): 301 | return "Operation cancelled by user" 302 | else: 303 | return f"Unexpected error: {str(error)}" 304 | 305 | 306 | class OperationResult: 307 | """Represents the result of an operation that might fail.""" 308 | 309 | def __init__(self, success: bool, data: Any = None, error: Optional[Exception] = None): 310 | """Initialize operation result. 311 | 312 | Args: 313 | success: Whether the operation succeeded 314 | data: Result data if successful 315 | error: Exception if failed 316 | """ 317 | self.success = success 318 | self.data = data 319 | self.error = error 320 | 321 | @classmethod 322 | def success_result(cls, data: Any = None) -> 'OperationResult': 323 | """Create a successful result. 324 | 325 | Args: 326 | data: Result data 327 | 328 | Returns: 329 | OperationResult with success=True 330 | """ 331 | return cls(success=True, data=data) 332 | 333 | @classmethod 334 | def error_result(cls, error: Exception) -> 'OperationResult': 335 | """Create a failed result. 336 | 337 | Args: 338 | error: Exception that occurred 339 | 340 | Returns: 341 | OperationResult with success=False 342 | """ 343 | return cls(success=False, error=error) 344 | 345 | def get_data_or_raise(self) -> Any: 346 | """Get the data or raise the error. 347 | 348 | Returns: 349 | Result data 350 | 351 | Raises: 352 | Exception: If the operation failed 353 | """ 354 | if self.success: 355 | return self.data 356 | else: 357 | raise self.error or RuntimeError("Operation failed with no error details") 358 | 359 | def get_error_message(self) -> str: 360 | """Get a user-friendly error message. 361 | 362 | Returns: 363 | Error message or empty string if successful 364 | """ 365 | if self.success: 366 | return "" 367 | else: 368 | return create_user_friendly_error(self.error) if self.error else "Unknown error" 369 | 370 | 371 | def retry_operation(func: Callable, max_retries: int = 3, delay: float = 1.0) -> OperationResult: 372 | """Retry an operation with exponential backoff. 373 | 374 | Args: 375 | func: Function to retry 376 | max_retries: Maximum number of retries 377 | delay: Initial delay between retries 378 | 379 | Returns: 380 | OperationResult with the final result 381 | """ 382 | import time 383 | 384 | for attempt in range(max_retries + 1): 385 | try: 386 | result = func() 387 | return OperationResult.success_result(result) 388 | except Exception as e: 389 | if attempt == max_retries: 390 | return OperationResult.error_result(e) 391 | 392 | time.sleep(delay * (2 ** attempt)) # Exponential backoff 393 | 394 | return OperationResult.error_result(RuntimeError("Retry operation failed")) 395 | 396 | 397 | def graceful_shutdown(cleanup_func: Optional[Callable] = None): 398 | """Decorator for graceful shutdown handling. 399 | 400 | Args: 401 | cleanup_func: Optional cleanup function to call 402 | 403 | Returns: 404 | Decorated function 405 | """ 406 | def decorator(func: F) -> F: 407 | @wraps(func) 408 | def wrapper(*args, **kwargs): 409 | try: 410 | return func(*args, **kwargs) 411 | except KeyboardInterrupt: 412 | if cleanup_func: 413 | try: 414 | cleanup_func() 415 | except Exception as cleanup_error: 416 | logger.error(f"Error during cleanup: {cleanup_error}") 417 | raise 418 | except Exception as e: 419 | if cleanup_func: 420 | try: 421 | cleanup_func() 422 | except Exception as cleanup_error: 423 | logger.error(f"Error during cleanup: {cleanup_error}") 424 | raise 425 | return wrapper 426 | return decorator -------------------------------------------------------------------------------- /ocmonitor/ui/dashboard.py: -------------------------------------------------------------------------------- 1 | """Live dashboard UI components for OpenCode Monitor.""" 2 | 3 | import os 4 | import time 5 | from typing import Dict, Any, Optional 6 | from decimal import Decimal 7 | from datetime import datetime 8 | from rich.console import Console 9 | from rich.panel import Panel 10 | from rich.columns import Columns 11 | from rich.text import Text 12 | from rich.table import Table 13 | from rich.progress import Progress, BarColumn, TextColumn 14 | from rich.live import Live 15 | from rich.layout import Layout 16 | 17 | from ..models.session import SessionData, TokenUsage 18 | from ..utils.time_utils import TimeUtils 19 | 20 | 21 | class DashboardUI: 22 | """UI components for the live dashboard.""" 23 | 24 | def __init__(self, console: Optional[Console] = None): 25 | """Initialize dashboard UI. 26 | 27 | Args: 28 | console: Rich console instance. If None, creates a new one. 29 | """ 30 | self.console = console or Console() 31 | 32 | def create_header(self, session: SessionData) -> Panel: 33 | """Create header panel with session info.""" 34 | current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') 35 | 36 | header_text = f"""[bold blue]OpenCode Live Dashboard[/bold blue] [dim]Project:[/dim] [bold cyan]{session.project_name}[/bold cyan] [dim]Session:[/dim] [bold white]{session.display_title}[/bold white] [dim]Updated:[/dim] [bold white]{current_time}[/bold white] [dim]Interactions:[/dim] [bold white]{session.interaction_count}[/bold white]""" 37 | 38 | return Panel( 39 | header_text, 40 | title="Dashboard", 41 | title_align="left", 42 | border_style="dim blue" 43 | ) 44 | 45 | def create_token_panel(self, session: SessionData, recent_file: Optional[Any] = None) -> Panel: 46 | """Create token consumption panel.""" 47 | session_tokens = session.total_tokens 48 | 49 | # Create compact horizontal layout 50 | if recent_file: 51 | token_text = f"""[bold blue]Recent Interaction[/bold blue] 52 | [dim]Input:[/dim] [bold white]{recent_file.tokens.input:,}[/bold white] [dim]Cache W:[/dim] [bold white]{recent_file.tokens.cache_write:,}[/bold white] 53 | [dim]Output:[/dim] [bold white]{recent_file.tokens.output:,}[/bold white] [dim]Cache R:[/dim] [bold white]{recent_file.tokens.cache_read:,}[/bold white] 54 | 55 | [bold blue]Session Totals[/bold blue] 56 | [dim]Input:[/dim] [bold white]{session_tokens.input:,}[/bold white] [dim]Cache W:[/dim] [bold white]{session_tokens.cache_write:,}[/bold white] 57 | [dim]Output:[/dim] [bold white]{session_tokens.output:,}[/bold white] [dim]Cache R:[/dim] [bold white]{session_tokens.cache_read:,}[/bold white] 58 | [dim]Total:[/dim] [bold cyan]{session_tokens.total:,}[/bold cyan]""" 59 | else: 60 | token_text = f"""[bold blue]Session Totals[/bold blue] 61 | [dim]Input:[/dim] [bold white]{session_tokens.input:,}[/bold white] [dim]Cache W:[/dim] [bold white]{session_tokens.cache_write:,}[/bold white] 62 | [dim]Output:[/dim] [bold white]{session_tokens.output:,}[/bold white] [dim]Cache R:[/dim] [bold white]{session_tokens.cache_read:,}[/bold white] 63 | [dim]Total:[/dim] [bold cyan]{session_tokens.total:,}[/bold cyan]""" 64 | 65 | return Panel( 66 | token_text, 67 | title="Tokens", 68 | title_align="left", 69 | border_style="dim white" 70 | ) 71 | 72 | def create_cost_panel(self, session: SessionData, pricing_data: Dict[str, Any], 73 | quota: Optional[Decimal] = None) -> Panel: 74 | """Create cost tracking panel.""" 75 | total_cost = session.calculate_total_cost(pricing_data) 76 | 77 | if quota: 78 | percentage = min(100, float(total_cost / quota) * 100) 79 | progress_bar = self.create_compact_progress_bar(percentage) 80 | cost_color = self.get_cost_color(percentage) 81 | 82 | cost_text = f"""[bold blue]Cost Tracking[/bold blue] 83 | [dim]Session:[/dim] [bold white]${total_cost:.2f}[/bold white] 84 | [dim]Quota:[/dim] [bold white]${quota:.2f}[/bold white] 85 | [{cost_color}]{progress_bar}[/{cost_color}]""" 86 | else: 87 | cost_text = f"""[bold blue]Cost Tracking[/bold blue] 88 | [dim]Session:[/dim] [bold white]${total_cost:.2f}[/bold white] 89 | [dim]No quota configured[/dim]""" 90 | 91 | return Panel( 92 | cost_text, 93 | title="Cost", 94 | title_align="left", 95 | border_style="dim white" 96 | ) 97 | 98 | def create_model_panel(self, session: SessionData, pricing_data: Dict[str, Any]) -> Panel: 99 | """Create model usage panel.""" 100 | model_breakdown = session.get_model_breakdown(pricing_data) 101 | 102 | if not model_breakdown: 103 | return Panel("[dim]No model data available[/dim]", title="Models", border_style="dim white") 104 | 105 | model_lines = [] 106 | for model, stats in model_breakdown.items(): 107 | model_name = model[:25] + "..." if len(model) > 28 else model 108 | model_lines.append( 109 | f"[dim]{model_name}[/dim] " 110 | f"[bold white]{stats['tokens'].total:,}[/bold white] [dim cyan]tokens[/dim cyan] " 111 | f"[bold white]${stats['cost']:.2f}[/bold white]" 112 | ) 113 | 114 | model_text = "\n".join(model_lines) 115 | 116 | return Panel( 117 | model_text, 118 | title="Models", 119 | title_align="left", 120 | border_style="dim white" 121 | ) 122 | 123 | def create_context_panel(self, recent_file: Optional[Any], 124 | context_window: int = 200000) -> Panel: 125 | """Create context window status panel.""" 126 | if not recent_file: 127 | return Panel( 128 | "[dim]No recent interaction[/dim]", 129 | title="Context", 130 | border_style="dim white" 131 | ) 132 | 133 | # Calculate context size (input + cache read + cache write from most recent) 134 | context_size = (recent_file.tokens.input + 135 | recent_file.tokens.cache_read + 136 | recent_file.tokens.cache_write) 137 | 138 | percentage = min(100, (context_size / context_window) * 100) 139 | progress_bar = self.create_compact_progress_bar(percentage, 12) 140 | context_color = self.get_context_color(percentage) 141 | 142 | context_text = f"""[dim]Size:[/dim] [bold white]{context_size:,}[/bold white] 143 | [dim]Window:[/dim] [bold white]{context_window:,}[/bold white] 144 | [{context_color}]{progress_bar}[/{context_color}]""" 145 | 146 | return Panel( 147 | context_text, 148 | title="Context", 149 | title_align="left", 150 | border_style="dim white" 151 | ) 152 | 153 | def create_burn_rate_panel(self, burn_rate: float) -> Panel: 154 | """Create token burn rate panel.""" 155 | if burn_rate == 0: 156 | burn_text = "[dim]No recent activity[/dim]" 157 | else: 158 | # Add level indicator 159 | if burn_rate > 10000: 160 | level = "[red][HIGH][/red]" 161 | elif burn_rate > 5000: 162 | level = "[yellow][MED][/yellow]" 163 | else: 164 | level = "[green][LOW][/green]" 165 | 166 | burn_text = f"""[bold white]{burn_rate:,.0f}[/bold white] [dim cyan]tok/min[/dim cyan] 167 | {level}""" 168 | 169 | return Panel( 170 | burn_text, 171 | title="Rate", 172 | title_align="left", 173 | border_style="dim white" 174 | ) 175 | 176 | def create_session_time_panel(self, session: SessionData) -> Panel: 177 | """Create session time progress panel with 5-hour maximum.""" 178 | if not session.start_time: 179 | return Panel( 180 | "[dim]No session timing data[/dim]", 181 | title="Session Time", 182 | border_style="dim white" 183 | ) 184 | 185 | # Calculate duration from start_time to now (updates continuously even when idle) 186 | current_time = datetime.now() 187 | session_duration = current_time - session.start_time 188 | duration_ms = int(session_duration.total_seconds() * 1000) 189 | 190 | # Calculate percentage based on 5-hour maximum 191 | max_hours = 5.0 192 | duration_hours = session_duration.total_seconds() / 3600 193 | percentage = min(100.0, (duration_hours / max_hours) * 100.0) 194 | 195 | # Format duration display using hours and minutes format 196 | duration_display = TimeUtils.format_duration_hm(duration_ms) 197 | 198 | # Create progress bar with time-based colors 199 | progress_bar = self.create_compact_progress_bar(percentage, 12) 200 | time_color = self.get_time_color(percentage) 201 | 202 | time_text = f"""[dim]Duration:[/dim] [bold white]{duration_display}[/bold white] 203 | [dim]Max:[/dim] [bold white]{max_hours:.0f}h[/bold white] 204 | [{time_color}]{progress_bar}[/{time_color}]""" 205 | 206 | return Panel( 207 | time_text, 208 | title="Session Time", 209 | title_align="left", 210 | border_style="dim white" 211 | ) 212 | 213 | def create_recent_file_panel(self, recent_file: Optional[Any]) -> Panel: 214 | """Create recent file info panel.""" 215 | if not recent_file: 216 | return Panel( 217 | "[dim]No recent files[/dim]", 218 | title="Recent", 219 | border_style="dim white" 220 | ) 221 | 222 | # Truncate file name if too long 223 | file_name = recent_file.file_name 224 | if len(file_name) > 20: 225 | file_name = "..." + file_name[-17:] 226 | 227 | file_text = f"""[dim]File:[/dim] [bold white]{file_name}[/bold white] 228 | [dim]Model:[/dim] [bold white]{recent_file.model_id[:15]}[/bold white]""" 229 | 230 | if recent_file.time_data and recent_file.time_data.duration_ms: 231 | duration = self.format_duration(recent_file.time_data.duration_ms) 232 | file_text += f"\n[dim]Duration:[/dim] [bold white]{duration}[/bold white]" 233 | 234 | return Panel( 235 | file_text, 236 | title="Recent", 237 | title_align="left", 238 | border_style="dim white" 239 | ) 240 | 241 | def create_dashboard_layout(self, session: SessionData, recent_file: Optional[Any], 242 | pricing_data: Dict[str, Any], burn_rate: float, 243 | quota: Optional[Decimal] = None, 244 | context_window: int = 200000) -> Layout: 245 | """Create the complete dashboard layout.""" 246 | layout = Layout() 247 | 248 | # Create panels 249 | header = self.create_header(session) 250 | token_panel = self.create_token_panel(session, recent_file) 251 | cost_panel = self.create_cost_panel(session, pricing_data, quota) 252 | model_panel = self.create_model_panel(session, pricing_data) 253 | context_panel = self.create_context_panel(recent_file, context_window) 254 | burn_rate_panel = self.create_burn_rate_panel(burn_rate) 255 | session_time_panel = self.create_session_time_panel(session) 256 | recent_file_panel = self.create_recent_file_panel(recent_file) 257 | 258 | # Setup new 4-section layout structure 259 | layout.split_column( 260 | Layout(header, size=3), # Compact header 261 | Layout(name="primary", minimum_size=8), # Main metrics 262 | Layout(name="secondary", size=6), # Compact metrics 263 | Layout(name="models", minimum_size=4) # Model breakdown 264 | ) 265 | 266 | # Primary section: Token usage (60%) and Cost tracking (40%) 267 | layout["primary"].split_row( 268 | Layout(token_panel, ratio=3), # 60% for token data 269 | Layout(cost_panel, ratio=2) # 40% for cost data 270 | ) 271 | 272 | # Secondary section: Four compact panels 273 | layout["secondary"].split_row( 274 | Layout(context_panel, ratio=1), 275 | Layout(burn_rate_panel, ratio=1), 276 | Layout(session_time_panel, ratio=1), 277 | Layout(recent_file_panel, ratio=1) 278 | ) 279 | 280 | # Models section: Full width for model breakdown 281 | layout["models"].update(model_panel) 282 | 283 | return layout 284 | 285 | def create_progress_bar(self, percentage: float, width: int = 30) -> str: 286 | """Create a text-based progress bar.""" 287 | filled = int(width * percentage / 100) 288 | bar = '█' * filled + '░' * (width - filled) 289 | return f"[{bar}] {percentage:.1f}%" 290 | 291 | def create_compact_progress_bar(self, percentage: float, width: int = 20) -> str: 292 | """Create a compact progress bar for space-efficient display.""" 293 | filled = int(width * percentage / 100) 294 | bar = '▌' * filled + '░' * (width - filled) 295 | return f"{bar} {percentage:.0f}%" 296 | 297 | def get_cost_color(self, percentage: float) -> str: 298 | """Get color for cost based on percentage.""" 299 | if percentage >= 90: 300 | return "red" 301 | elif percentage >= 75: 302 | return "yellow" 303 | elif percentage >= 50: 304 | return "orange" 305 | else: 306 | return "green" 307 | 308 | def get_context_color(self, percentage: float) -> str: 309 | """Get color for context window based on percentage.""" 310 | if percentage >= 95: 311 | return "red" 312 | elif percentage >= 85: 313 | return "yellow" 314 | elif percentage >= 70: 315 | return "orange" 316 | else: 317 | return "green" 318 | 319 | def get_time_color(self, percentage: float) -> str: 320 | """Get color for session time based on percentage of 5-hour max.""" 321 | if percentage >= 90: 322 | return "red" 323 | elif percentage >= 75: 324 | return "yellow" 325 | elif percentage >= 50: 326 | return "orange" 327 | else: 328 | return "green" 329 | 330 | def format_duration(self, milliseconds: int) -> str: 331 | """Format duration in milliseconds to hours and minutes format.""" 332 | return TimeUtils.format_duration_hm(milliseconds) 333 | 334 | def clear_screen(self): 335 | """Clear the terminal screen.""" 336 | os.system('cls' if os.name == 'nt' else 'clear') 337 | 338 | def create_simple_table(self, data: Dict[str, Any]) -> Table: 339 | """Create a simple data table for fallback rendering.""" 340 | table = Table(show_header=False, box=None) 341 | table.add_column("Key", style="cyan") 342 | table.add_column("Value", style="white") 343 | 344 | for key, value in data.items(): 345 | table.add_row(key, str(value)) 346 | 347 | return table -------------------------------------------------------------------------------- /ocmonitor/services/session_analyzer.py: -------------------------------------------------------------------------------- 1 | """Session analysis service for OpenCode Monitor.""" 2 | 3 | import time 4 | from pathlib import Path 5 | from typing import List, Dict, Any, Optional 6 | from datetime import datetime, date 7 | from decimal import Decimal 8 | 9 | from ..models.session import SessionData, InteractionFile, TokenUsage 10 | from ..models.analytics import ( 11 | DailyUsage, WeeklyUsage, MonthlyUsage, ModelUsageStats, 12 | ModelBreakdownReport, ProjectBreakdownReport, TimeframeAnalyzer 13 | ) 14 | from ..utils.file_utils import FileProcessor 15 | from ..utils.time_utils import TimeUtils 16 | from ..config import ModelPricing 17 | 18 | 19 | class SessionAnalyzer: 20 | """Service for analyzing OpenCode sessions.""" 21 | 22 | def __init__(self, pricing_data: Dict[str, ModelPricing]): 23 | """Initialize session analyzer. 24 | 25 | Args: 26 | pricing_data: Model pricing information 27 | """ 28 | self.pricing_data = pricing_data 29 | 30 | def analyze_single_session(self, session_path: str) -> Optional[SessionData]: 31 | """Analyze a single session directory. 32 | 33 | Args: 34 | session_path: Path to session directory 35 | 36 | Returns: 37 | SessionData object or None if analysis failed 38 | """ 39 | path = Path(session_path) 40 | return FileProcessor.load_session_data(path) 41 | 42 | def analyze_all_sessions(self, base_path: str, limit: Optional[int] = None) -> List[SessionData]: 43 | """Analyze all sessions in a directory. 44 | 45 | Args: 46 | base_path: Path to directory containing sessions 47 | limit: Maximum number of sessions to analyze 48 | 49 | Returns: 50 | List of SessionData objects 51 | """ 52 | return FileProcessor.load_all_sessions(base_path, limit) 53 | 54 | def get_sessions_summary(self, sessions: List[SessionData]) -> Dict[str, Any]: 55 | """Generate summary statistics for multiple sessions. 56 | 57 | Args: 58 | sessions: List of sessions to summarize 59 | 60 | Returns: 61 | Dictionary with summary statistics 62 | """ 63 | if not sessions: 64 | return { 65 | 'total_sessions': 0, 66 | 'total_interactions': 0, 67 | 'total_tokens': TokenUsage(), 68 | 'total_cost': Decimal('0.0'), 69 | 'models_used': [], 70 | 'date_range': 'No sessions' 71 | } 72 | 73 | total_tokens = TokenUsage() 74 | total_cost = Decimal('0.0') 75 | total_interactions = 0 76 | models_used = set() 77 | start_times = [] 78 | end_times = [] 79 | 80 | for session in sessions: 81 | session_tokens = session.total_tokens 82 | total_tokens.input += session_tokens.input 83 | total_tokens.output += session_tokens.output 84 | total_tokens.cache_write += session_tokens.cache_write 85 | total_tokens.cache_read += session_tokens.cache_read 86 | 87 | total_cost += session.calculate_total_cost(self.pricing_data) 88 | total_interactions += session.interaction_count 89 | models_used.update(session.models_used) 90 | 91 | if session.start_time: 92 | start_times.append(session.start_time) 93 | if session.end_time: 94 | end_times.append(session.end_time) 95 | 96 | # Calculate date range 97 | date_range = 'Unknown' 98 | if start_times and end_times: 99 | earliest = min(start_times) 100 | latest = max(end_times) 101 | if earliest.date() == latest.date(): 102 | date_range = earliest.strftime('%Y-%m-%d') 103 | else: 104 | date_range = f"{earliest.strftime('%Y-%m-%d')} to {latest.strftime('%Y-%m-%d')}" 105 | 106 | return { 107 | 'total_sessions': len(sessions), 108 | 'total_interactions': total_interactions, 109 | 'total_tokens': total_tokens, 110 | 'total_cost': total_cost, 111 | 'models_used': sorted(list(models_used)), 112 | 'date_range': date_range, 113 | 'earliest_session': min(start_times) if start_times else None, 114 | 'latest_session': max(end_times) if end_times else None 115 | } 116 | 117 | def create_daily_breakdown(self, sessions: List[SessionData]) -> List[DailyUsage]: 118 | """Create daily usage breakdown. 119 | 120 | Args: 121 | sessions: List of sessions to analyze 122 | 123 | Returns: 124 | List of DailyUsage objects 125 | """ 126 | return TimeframeAnalyzer.create_daily_breakdown(sessions) 127 | 128 | def create_weekly_breakdown(self, sessions: List[SessionData], week_start_day: int = 0) -> List[WeeklyUsage]: 129 | """Create weekly usage breakdown. 130 | 131 | Args: 132 | sessions: List of sessions to analyze 133 | week_start_day: Day to start week on (0=Monday, 6=Sunday) 134 | 135 | Returns: 136 | List of WeeklyUsage objects 137 | """ 138 | daily_usage = self.create_daily_breakdown(sessions) 139 | return TimeframeAnalyzer.create_weekly_breakdown(daily_usage, week_start_day) 140 | 141 | def create_monthly_breakdown(self, sessions: List[SessionData]) -> List[MonthlyUsage]: 142 | """Create monthly usage breakdown. 143 | 144 | Args: 145 | sessions: List of sessions to analyze 146 | 147 | Returns: 148 | List of MonthlyUsage objects 149 | """ 150 | daily_usage = self.create_daily_breakdown(sessions) 151 | weekly_usage = TimeframeAnalyzer.create_weekly_breakdown(daily_usage) 152 | return TimeframeAnalyzer.create_monthly_breakdown(weekly_usage) 153 | 154 | def create_model_breakdown(self, sessions: List[SessionData], 155 | timeframe: str = "all", 156 | start_date: Optional[date] = None, 157 | end_date: Optional[date] = None) -> ModelBreakdownReport: 158 | """Create model usage breakdown. 159 | 160 | Args: 161 | sessions: List of sessions to analyze 162 | timeframe: Timeframe for analysis ("all", "daily", "weekly", "monthly") 163 | start_date: Start date filter 164 | end_date: End date filter 165 | 166 | Returns: 167 | ModelBreakdownReport object 168 | """ 169 | return TimeframeAnalyzer.create_model_breakdown( 170 | sessions, self.pricing_data, timeframe, start_date, end_date 171 | ) 172 | 173 | def create_project_breakdown(self, sessions: List[SessionData], 174 | timeframe: str = "all", 175 | start_date: Optional[date] = None, 176 | end_date: Optional[date] = None) -> ProjectBreakdownReport: 177 | """Create project usage breakdown. 178 | 179 | Args: 180 | sessions: List of sessions to analyze 181 | timeframe: Timeframe for analysis ("all", "daily", "weekly", "monthly") 182 | start_date: Start date filter 183 | end_date: End date filter 184 | 185 | Returns: 186 | ProjectBreakdownReport object 187 | """ 188 | return TimeframeAnalyzer.create_project_breakdown( 189 | sessions, self.pricing_data, timeframe, start_date, end_date 190 | ) 191 | 192 | def filter_sessions_by_date(self, sessions: List[SessionData], 193 | start_date: Optional[date] = None, 194 | end_date: Optional[date] = None) -> List[SessionData]: 195 | """Filter sessions by date range. 196 | 197 | Args: 198 | sessions: List of sessions to filter 199 | start_date: Start date (inclusive) 200 | end_date: End date (inclusive) 201 | 202 | Returns: 203 | Filtered list of sessions 204 | """ 205 | if not start_date and not end_date: 206 | return sessions 207 | 208 | filtered = [] 209 | for session in sessions: 210 | if session.start_time: 211 | session_date = session.start_time.date() 212 | if TimeUtils.date_in_range(session_date, start_date, end_date): 213 | filtered.append(session) 214 | 215 | return filtered 216 | 217 | def filter_sessions_by_model(self, sessions: List[SessionData], models: List[str]) -> List[SessionData]: 218 | """Filter sessions by models used. 219 | 220 | Args: 221 | sessions: List of sessions to filter 222 | models: List of model names to include 223 | 224 | Returns: 225 | Filtered list of sessions 226 | """ 227 | if not models: 228 | return sessions 229 | 230 | filtered = [] 231 | for session in sessions: 232 | if any(model in session.models_used for model in models): 233 | filtered.append(session) 234 | 235 | return filtered 236 | 237 | def get_most_recent_session(self, base_path: str) -> Optional[SessionData]: 238 | """Get the most recently modified session. 239 | 240 | Args: 241 | base_path: Path to search for sessions 242 | 243 | Returns: 244 | Most recent SessionData or None 245 | """ 246 | return FileProcessor.get_most_recent_session(base_path) 247 | 248 | def get_session_statistics(self, session: SessionData) -> Dict[str, Any]: 249 | """Get detailed statistics for a single session. 250 | 251 | Args: 252 | session: Session to analyze 253 | 254 | Returns: 255 | Dictionary with detailed statistics 256 | """ 257 | model_breakdown = session.get_model_breakdown(self.pricing_data) 258 | session_tokens = session.total_tokens 259 | total_cost = session.calculate_total_cost(self.pricing_data) 260 | 261 | # Calculate averages 262 | avg_tokens_per_interaction = session_tokens.total // session.interaction_count if session.interaction_count > 0 else 0 263 | avg_cost_per_interaction = total_cost / session.interaction_count if session.interaction_count > 0 else Decimal('0.0') 264 | 265 | # Time analysis 266 | time_stats = {} 267 | if session.start_time and session.end_time: 268 | time_stats = { 269 | 'start_time': session.start_time, 270 | 'end_time': session.end_time, 271 | 'duration_ms': session.duration_ms, 272 | 'total_processing_time_ms': session.total_processing_time_ms, 273 | 'avg_processing_time_ms': session.total_processing_time_ms // session.interaction_count if session.interaction_count > 0 else 0 274 | } 275 | 276 | return { 277 | 'session_id': session.session_id, 278 | 'interaction_count': session.interaction_count, 279 | 'models_used': session.models_used, 280 | 'total_tokens': session_tokens, 281 | 'total_cost': total_cost, 282 | 'model_breakdown': model_breakdown, 283 | 'averages': { 284 | 'tokens_per_interaction': avg_tokens_per_interaction, 285 | 'cost_per_interaction': avg_cost_per_interaction 286 | }, 287 | 'time_analysis': time_stats 288 | } 289 | 290 | def calculate_burn_rate(self, session_path: str, timeframe_minutes: int = 5) -> float: 291 | """Calculate token burn rate for a session. 292 | 293 | Args: 294 | session_path: Path to session directory 295 | timeframe_minutes: Timeframe in minutes for burn rate calculation 296 | 297 | Returns: 298 | Tokens per minute over the timeframe 299 | """ 300 | path = Path(session_path) 301 | if not path.exists(): 302 | return 0.0 303 | 304 | json_files = FileProcessor.find_json_files(path) 305 | if not json_files: 306 | return 0.0 307 | 308 | # Get current time and timeframe 309 | now = time.time() 310 | timeframe_seconds = timeframe_minutes * 60 311 | 312 | # Filter files within timeframe 313 | recent_files = [] 314 | for json_file in json_files: 315 | mod_time = json_file.stat().st_mtime 316 | if (now - mod_time) <= timeframe_seconds: 317 | recent_files.append((json_file, mod_time)) 318 | 319 | if not recent_files: 320 | return 0.0 321 | 322 | # Calculate total tokens in recent files 323 | total_tokens = 0 324 | for json_file, _ in recent_files: 325 | interaction = FileProcessor.parse_interaction_file(json_file, path.name) 326 | if interaction: 327 | total_tokens += interaction.tokens.total 328 | 329 | # Calculate time span 330 | if len(recent_files) > 1: 331 | oldest_time = min([mod_time for _, mod_time in recent_files]) 332 | time_span_minutes = (now - oldest_time) / 60 333 | if time_span_minutes > 0: 334 | return total_tokens / time_span_minutes 335 | 336 | return 0.0 337 | 338 | def validate_session_health(self, session: SessionData) -> Dict[str, Any]: 339 | """Validate session health and identify potential issues. 340 | 341 | Args: 342 | session: Session to validate 343 | 344 | Returns: 345 | Dictionary with health check results 346 | """ 347 | issues = [] 348 | warnings = [] 349 | 350 | # Check for empty interactions 351 | empty_interactions = sum(1 for file in session.files if file.tokens.total == 0) 352 | if empty_interactions > 0: 353 | warnings.append(f"{empty_interactions} interactions have no token usage") 354 | 355 | # Check for missing time data 356 | missing_time = sum(1 for file in session.files if file.time_data is None) 357 | if missing_time > 0: 358 | warnings.append(f"{missing_time} interactions missing time data") 359 | 360 | # Check for unknown models 361 | unknown_models = [model for model in session.models_used if model not in self.pricing_data and model != 'unknown'] 362 | if unknown_models: 363 | warnings.append(f"Unknown models with no pricing: {', '.join(unknown_models)}") 364 | 365 | # Check for very high costs 366 | total_cost = session.calculate_total_cost(self.pricing_data) 367 | if total_cost > Decimal('50.0'): # Arbitrary threshold 368 | warnings.append(f"High session cost: ${total_cost:.2f}") 369 | 370 | # Check for extremely long interactions 371 | long_interactions = [] 372 | for file in session.files: 373 | if file.time_data and file.time_data.duration_ms and file.time_data.duration_ms > 300000: # 5 minutes 374 | long_interactions.append(file.file_name) 375 | 376 | if long_interactions: 377 | warnings.append(f"Long interactions (>5min): {len(long_interactions)} files") 378 | 379 | return { 380 | 'healthy': len(issues) == 0, 381 | 'issues': issues, 382 | 'warnings': warnings, 383 | 'stats': { 384 | 'total_interactions': session.interaction_count, 385 | 'empty_interactions': empty_interactions, 386 | 'missing_time_data': missing_time, 387 | 'unknown_models': len(unknown_models), 388 | 'total_cost': total_cost 389 | } 390 | } -------------------------------------------------------------------------------- /ocmonitor/ui/tables.py: -------------------------------------------------------------------------------- 1 | """Rich table formatting for OpenCode Monitor.""" 2 | 3 | from typing import List, Dict, Any, Optional 4 | from decimal import Decimal 5 | from rich.console import Console 6 | from rich.table import Table 7 | from rich.text import Text 8 | from rich.panel import Panel 9 | from rich.columns import Columns 10 | from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn 11 | 12 | from ..models.session import SessionData, TokenUsage 13 | from ..models.analytics import DailyUsage, WeeklyUsage, MonthlyUsage, ModelUsageStats 14 | from ..utils.time_utils import TimeUtils 15 | 16 | 17 | class TableFormatter: 18 | """Formatter for creating Rich tables.""" 19 | 20 | def __init__(self, console: Optional[Console] = None): 21 | """Initialize table formatter. 22 | 23 | Args: 24 | console: Rich console instance. If None, creates a new one. 25 | """ 26 | self.console = console or Console() 27 | 28 | def format_number(self, number: int) -> str: 29 | """Format numbers with thousands separators.""" 30 | return f"{number:,}" 31 | 32 | def format_currency(self, amount: Decimal) -> str: 33 | """Format currency amounts.""" 34 | return f"${amount:.2f}" 35 | 36 | def format_percentage(self, value: float, total: float) -> str: 37 | """Format percentage values.""" 38 | if total == 0: 39 | return "0.0%" 40 | percentage = (value / total) * 100 41 | return f"{percentage:.1f}%" 42 | 43 | def get_cost_color(self, cost: Decimal, quota: Optional[Decimal] = None) -> str: 44 | """Get color for cost based on quota.""" 45 | if quota is None: 46 | return "white" 47 | 48 | percentage = float(cost / quota) * 100 49 | if percentage >= 90: 50 | return "red" 51 | elif percentage >= 75: 52 | return "yellow" 53 | elif percentage >= 50: 54 | return "orange" 55 | else: 56 | return "green" 57 | 58 | def create_sessions_table(self, sessions: List[SessionData], pricing_data: Dict[str, Any]) -> Table: 59 | """Create a table for multiple sessions.""" 60 | table = Table( 61 | title="OpenCode Sessions Summary", 62 | show_header=True, 63 | header_style="bold blue", 64 | title_style="bold magenta" 65 | ) 66 | 67 | # Add columns 68 | table.add_column("Started", style="cyan", no_wrap=True) 69 | table.add_column("Duration", style="cyan", no_wrap=True) 70 | table.add_column("Session", style="magenta", max_width=35) 71 | table.add_column("Model", style="yellow", max_width=25) 72 | table.add_column("Interactions", justify="right", style="green") 73 | table.add_column("Input Tokens", justify="right", style="blue") 74 | table.add_column("Output Tokens", justify="right", style="blue") 75 | table.add_column("Total Tokens", justify="right", style="bold blue") 76 | table.add_column("Cost", justify="right", style="red") 77 | 78 | # Sort sessions by start time 79 | sorted_sessions = sorted(sessions, key=lambda s: s.start_time or s.session_id) 80 | 81 | total_interactions = 0 82 | total_tokens = TokenUsage() 83 | total_cost = Decimal('0.0') 84 | 85 | for session in sorted_sessions: 86 | session_cost = session.calculate_total_cost(pricing_data) 87 | session_tokens = session.total_tokens 88 | 89 | # Update totals 90 | total_interactions += session.interaction_count 91 | total_tokens.input += session_tokens.input 92 | total_tokens.output += session_tokens.output 93 | total_tokens.cache_write += session_tokens.cache_write 94 | total_tokens.cache_read += session_tokens.cache_read 95 | total_cost += session_cost 96 | 97 | # Get model breakdown for session 98 | model_breakdown = session.get_model_breakdown(pricing_data) 99 | 100 | # Add rows for each model 101 | for i, (model, stats) in enumerate(model_breakdown.items()): 102 | # Show session info only for first model 103 | if i == 0: 104 | start_time = session.start_time.strftime('%Y-%m-%d %H:%M:%S') if session.start_time else 'N/A' 105 | duration = self._format_duration(session.duration_ms) if session.duration_ms else 'N/A' 106 | session_display = session.display_title 107 | # Truncate if too long for display 108 | if len(session_display) > 35: 109 | session_display = session_display[:32] + "..." 110 | else: 111 | start_time = "" 112 | duration = "" 113 | session_display = "" 114 | 115 | # Format model name 116 | model_text = Text(model) 117 | if len(model) > 25: 118 | model_text = Text(f"{model[:22]}...") 119 | 120 | # Get cost color 121 | cost_color = self.get_cost_color(stats['cost']) 122 | 123 | table.add_row( 124 | start_time, 125 | duration, 126 | session_display, 127 | model_text, 128 | self.format_number(stats['files']), 129 | self.format_number(stats['tokens'].input), 130 | self.format_number(stats['tokens'].output), 131 | self.format_number(stats['tokens'].total), 132 | Text(self.format_currency(stats['cost']), style=cost_color) 133 | ) 134 | 135 | # Add separator and totals 136 | table.add_section() 137 | table.add_row( 138 | Text("TOTALS", style="bold white"), 139 | "", 140 | "", # Empty session column 141 | Text(f"{len(sorted_sessions)} sessions", style="bold white"), 142 | Text(self.format_number(total_interactions), style="bold green"), 143 | Text(self.format_number(total_tokens.input), style="bold blue"), 144 | Text(self.format_number(total_tokens.output), style="bold blue"), 145 | Text(self.format_number(total_tokens.total), style="bold blue"), 146 | Text(self.format_currency(total_cost), style="bold red") 147 | ) 148 | 149 | return table 150 | 151 | def create_session_table(self, session: SessionData, pricing_data: Dict[str, Any]) -> Table: 152 | """Create a table for a single session.""" 153 | table = Table( 154 | title=f"Session: {session.display_title}", 155 | show_header=True, 156 | header_style="bold blue", 157 | title_style="bold magenta" 158 | ) 159 | 160 | # Add columns 161 | table.add_column("File", style="cyan", max_width=30) 162 | table.add_column("Model", style="yellow") 163 | table.add_column("Input", justify="right", style="blue") 164 | table.add_column("Output", justify="right", style="blue") 165 | table.add_column("Cache W", justify="right", style="green") 166 | table.add_column("Cache R", justify="right", style="green") 167 | table.add_column("Total", justify="right", style="bold blue") 168 | table.add_column("Cost", justify="right", style="red") 169 | table.add_column("Duration", justify="right", style="cyan") 170 | 171 | total_cost = Decimal('0.0') 172 | total_tokens = TokenUsage() 173 | 174 | for file in session.files: 175 | cost = file.calculate_cost(pricing_data) 176 | total_cost += cost 177 | total_tokens.input += file.tokens.input 178 | total_tokens.output += file.tokens.output 179 | total_tokens.cache_write += file.tokens.cache_write 180 | total_tokens.cache_read += file.tokens.cache_read 181 | 182 | duration = "" 183 | if file.time_data and file.time_data.duration_ms: 184 | duration = self._format_duration(file.time_data.duration_ms) 185 | 186 | cost_color = self.get_cost_color(cost) 187 | 188 | table.add_row( 189 | Text(file.file_name[:27] + "..." if len(file.file_name) > 30 else file.file_name), 190 | file.model_id, 191 | self.format_number(file.tokens.input), 192 | self.format_number(file.tokens.output), 193 | self.format_number(file.tokens.cache_write), 194 | self.format_number(file.tokens.cache_read), 195 | self.format_number(file.tokens.total), 196 | Text(self.format_currency(cost), style=cost_color), 197 | duration 198 | ) 199 | 200 | # Add totals 201 | table.add_section() 202 | table.add_row( 203 | Text("TOTALS", style="bold white"), 204 | "", 205 | Text(self.format_number(total_tokens.input), style="bold blue"), 206 | Text(self.format_number(total_tokens.output), style="bold blue"), 207 | Text(self.format_number(total_tokens.cache_write), style="bold green"), 208 | Text(self.format_number(total_tokens.cache_read), style="bold green"), 209 | Text(self.format_number(total_tokens.total), style="bold blue"), 210 | Text(self.format_currency(total_cost), style="bold red"), 211 | "" 212 | ) 213 | 214 | return table 215 | 216 | def create_daily_table(self, daily_usage: List[DailyUsage], pricing_data: Dict[str, Any]) -> Table: 217 | """Create a table for daily usage breakdown.""" 218 | table = Table( 219 | title="Daily Usage Breakdown", 220 | show_header=True, 221 | header_style="bold blue", 222 | title_style="bold magenta" 223 | ) 224 | 225 | # Add columns 226 | table.add_column("Date", style="cyan", no_wrap=True) 227 | table.add_column("Sessions", justify="right", style="green") 228 | table.add_column("Interactions", justify="right", style="green") 229 | table.add_column("Input Tokens", justify="right", style="blue") 230 | table.add_column("Output Tokens", justify="right", style="blue") 231 | table.add_column("Total Tokens", justify="right", style="bold blue") 232 | table.add_column("Cost", justify="right", style="red") 233 | table.add_column("Models", style="yellow") 234 | 235 | total_sessions = 0 236 | total_interactions = 0 237 | total_tokens = TokenUsage() 238 | total_cost = Decimal('0.0') 239 | 240 | for day in daily_usage: 241 | day_cost = day.calculate_total_cost(pricing_data) 242 | day_tokens = day.total_tokens 243 | 244 | total_sessions += len(day.sessions) 245 | total_interactions += day.total_interactions 246 | total_tokens.input += day_tokens.input 247 | total_tokens.output += day_tokens.output 248 | total_tokens.cache_write += day_tokens.cache_write 249 | total_tokens.cache_read += day_tokens.cache_read 250 | total_cost += day_cost 251 | 252 | models_text = ", ".join(day.models_used[:3]) 253 | if len(day.models_used) > 3: 254 | models_text += f" (+{len(day.models_used) - 3} more)" 255 | 256 | cost_color = self.get_cost_color(day_cost) 257 | 258 | table.add_row( 259 | day.date.strftime('%Y-%m-%d'), 260 | self.format_number(len(day.sessions)), 261 | self.format_number(day.total_interactions), 262 | self.format_number(day_tokens.input), 263 | self.format_number(day_tokens.output), 264 | self.format_number(day_tokens.total), 265 | Text(self.format_currency(day_cost), style=cost_color), 266 | Text(models_text, style="yellow") 267 | ) 268 | 269 | # Add totals 270 | table.add_section() 271 | table.add_row( 272 | Text("TOTALS", style="bold white"), 273 | Text(self.format_number(total_sessions), style="bold green"), 274 | Text(self.format_number(total_interactions), style="bold green"), 275 | Text(self.format_number(total_tokens.input), style="bold blue"), 276 | Text(self.format_number(total_tokens.output), style="bold blue"), 277 | Text(self.format_number(total_tokens.total), style="bold blue"), 278 | Text(self.format_currency(total_cost), style="bold red"), 279 | "" 280 | ) 281 | 282 | return table 283 | 284 | def create_model_breakdown_table(self, model_stats: List[ModelUsageStats]) -> Table: 285 | """Create a table for model usage breakdown.""" 286 | table = Table( 287 | title="Model Usage Breakdown", 288 | show_header=True, 289 | header_style="bold blue", 290 | title_style="bold magenta" 291 | ) 292 | 293 | # Add columns 294 | table.add_column("Model", style="yellow", max_width=30) 295 | table.add_column("Sessions", justify="right", style="green") 296 | table.add_column("Interactions", justify="right", style="green") 297 | table.add_column("Input Tokens", justify="right", style="blue") 298 | table.add_column("Output Tokens", justify="right", style="blue") 299 | table.add_column("Total Tokens", justify="right", style="bold blue") 300 | table.add_column("Cost", justify="right", style="red") 301 | table.add_column("Cost %", justify="right", style="red") 302 | 303 | total_cost = sum(model.total_cost for model in model_stats) 304 | 305 | for model in model_stats: 306 | cost_percentage = self.format_percentage(float(model.total_cost), float(total_cost)) 307 | cost_color = self.get_cost_color(model.total_cost) 308 | 309 | table.add_row( 310 | Text(model.model_name[:27] + "..." if len(model.model_name) > 30 else model.model_name), 311 | self.format_number(model.total_sessions), 312 | self.format_number(model.total_interactions), 313 | self.format_number(model.total_tokens.input), 314 | self.format_number(model.total_tokens.output), 315 | self.format_number(model.total_tokens.total), 316 | Text(self.format_currency(model.total_cost), style=cost_color), 317 | Text(cost_percentage, style=cost_color) 318 | ) 319 | 320 | return table 321 | 322 | def create_progress_bar(self, percentage: float, width: int = 20) -> str: 323 | """Create a text-based progress bar.""" 324 | filled = int(width * percentage / 100) 325 | bar = '█' * filled + '░' * (width - filled) 326 | return f"[{bar}] {percentage:.1f}%" 327 | 328 | def _format_duration(self, milliseconds: int) -> str: 329 | """Format duration in milliseconds to hours and minutes format.""" 330 | return TimeUtils.format_duration_hm(milliseconds) 331 | 332 | def create_summary_panel(self, sessions: List[SessionData], pricing_data: Dict[str, Any]) -> Panel: 333 | """Create a summary panel with key metrics.""" 334 | if not sessions: 335 | return Panel("No sessions found", title="Summary", title_align="left") 336 | 337 | total_sessions = len(sessions) 338 | total_interactions = sum(session.interaction_count for session in sessions) 339 | total_tokens = TokenUsage() 340 | total_cost = Decimal('0.0') 341 | models_used = set() 342 | 343 | for session in sessions: 344 | session_tokens = session.total_tokens 345 | total_tokens.input += session_tokens.input 346 | total_tokens.output += session_tokens.output 347 | total_tokens.cache_write += session_tokens.cache_write 348 | total_tokens.cache_read += session_tokens.cache_read 349 | total_cost += session.calculate_total_cost(pricing_data) 350 | models_used.update(session.models_used) 351 | 352 | # Create summary text 353 | summary_lines = [ 354 | f"[bold]Sessions:[/bold] {self.format_number(total_sessions)}", 355 | f"[bold]Interactions:[/bold] {self.format_number(total_interactions)}", 356 | f"[bold]Total Tokens:[/bold] {self.format_number(total_tokens.total)}", 357 | f"[bold]Total Cost:[/bold] {self.format_currency(total_cost)}", 358 | f"[bold]Models Used:[/bold] {len(models_used)}" 359 | ] 360 | 361 | return Panel( 362 | "\n".join(summary_lines), 363 | title="Summary", 364 | title_align="left", 365 | border_style="blue" 366 | ) -------------------------------------------------------------------------------- /ocmonitor/models/analytics.py: -------------------------------------------------------------------------------- 1 | """Analytics data models for OpenCode Monitor.""" 2 | 3 | from datetime import datetime, date, timedelta 4 | from typing import List, Dict, Any, Optional 5 | from decimal import Decimal 6 | from pydantic import BaseModel, Field, computed_field 7 | from collections import defaultdict 8 | from .session import SessionData, TokenUsage 9 | 10 | 11 | class DailyUsage(BaseModel): 12 | """Model for daily usage statistics.""" 13 | date: date 14 | sessions: List[SessionData] = Field(default_factory=list) 15 | 16 | @computed_field 17 | @property 18 | def total_tokens(self) -> TokenUsage: 19 | """Calculate total tokens for the day.""" 20 | total = TokenUsage() 21 | for session in self.sessions: 22 | session_tokens = session.total_tokens 23 | total.input += session_tokens.input 24 | total.output += session_tokens.output 25 | total.cache_write += session_tokens.cache_write 26 | total.cache_read += session_tokens.cache_read 27 | return total 28 | 29 | @computed_field 30 | @property 31 | def total_interactions(self) -> int: 32 | """Calculate total interactions for the day.""" 33 | return sum(session.interaction_count for session in self.sessions) 34 | 35 | @computed_field 36 | @property 37 | def models_used(self) -> List[str]: 38 | """Get unique models used on this day.""" 39 | models = set() 40 | for session in self.sessions: 41 | models.update(session.models_used) 42 | return list(models) 43 | 44 | def calculate_total_cost(self, pricing_data: Dict[str, Any]) -> Decimal: 45 | """Calculate total cost for the day.""" 46 | return sum((session.calculate_total_cost(pricing_data) for session in self.sessions), Decimal('0.0')) 47 | 48 | 49 | class WeeklyUsage(BaseModel): 50 | """Model for weekly usage statistics.""" 51 | year: int 52 | week: int 53 | start_date: date 54 | end_date: date 55 | daily_usage: List[DailyUsage] = Field(default_factory=list) 56 | 57 | @computed_field 58 | @property 59 | def total_tokens(self) -> TokenUsage: 60 | """Calculate total tokens for the week.""" 61 | total = TokenUsage() 62 | for day in self.daily_usage: 63 | day_tokens = day.total_tokens 64 | total.input += day_tokens.input 65 | total.output += day_tokens.output 66 | total.cache_write += day_tokens.cache_write 67 | total.cache_read += day_tokens.cache_read 68 | return total 69 | 70 | @computed_field 71 | @property 72 | def total_sessions(self) -> int: 73 | """Calculate total sessions for the week.""" 74 | return sum(len(day.sessions) for day in self.daily_usage) 75 | 76 | @computed_field 77 | @property 78 | def total_interactions(self) -> int: 79 | """Calculate total interactions for the week.""" 80 | return sum(day.total_interactions for day in self.daily_usage) 81 | 82 | def calculate_total_cost(self, pricing_data: Dict[str, Any]) -> Decimal: 83 | """Calculate total cost for the week.""" 84 | return sum((day.calculate_total_cost(pricing_data) for day in self.daily_usage), Decimal('0.0')) 85 | 86 | 87 | class MonthlyUsage(BaseModel): 88 | """Model for monthly usage statistics.""" 89 | year: int 90 | month: int 91 | weekly_usage: List[WeeklyUsage] = Field(default_factory=list) 92 | 93 | @computed_field 94 | @property 95 | def total_tokens(self) -> TokenUsage: 96 | """Calculate total tokens for the month.""" 97 | total = TokenUsage() 98 | for week in self.weekly_usage: 99 | week_tokens = week.total_tokens 100 | total.input += week_tokens.input 101 | total.output += week_tokens.output 102 | total.cache_write += week_tokens.cache_write 103 | total.cache_read += week_tokens.cache_read 104 | return total 105 | 106 | @computed_field 107 | @property 108 | def total_sessions(self) -> int: 109 | """Calculate total sessions for the month.""" 110 | return sum(week.total_sessions for week in self.weekly_usage) 111 | 112 | @computed_field 113 | @property 114 | def total_interactions(self) -> int: 115 | """Calculate total interactions for the month.""" 116 | return sum(week.total_interactions for week in self.weekly_usage) 117 | 118 | def calculate_total_cost(self, pricing_data: Dict[str, Any]) -> Decimal: 119 | """Calculate total cost for the month.""" 120 | return sum((week.calculate_total_cost(pricing_data) for week in self.weekly_usage), Decimal('0.0')) 121 | 122 | 123 | class ModelUsageStats(BaseModel): 124 | """Model for model-specific usage statistics.""" 125 | model_name: str 126 | total_tokens: TokenUsage = Field(default_factory=TokenUsage) 127 | total_sessions: int = Field(default=0) 128 | total_interactions: int = Field(default=0) 129 | total_cost: Decimal = Field(default=Decimal('0.0')) 130 | first_used: Optional[datetime] = Field(default=None) 131 | last_used: Optional[datetime] = Field(default=None) 132 | 133 | 134 | class ModelBreakdownReport(BaseModel): 135 | """Model for model usage breakdown report.""" 136 | timeframe: str # "daily", "weekly", "monthly", "all" 137 | start_date: Optional[date] = Field(default=None) 138 | end_date: Optional[date] = Field(default=None) 139 | model_stats: List[ModelUsageStats] = Field(default_factory=list) 140 | 141 | @computed_field 142 | @property 143 | def total_cost(self) -> Decimal: 144 | """Calculate total cost across all models.""" 145 | return sum((model.total_cost for model in self.model_stats), Decimal('0.0')) 146 | 147 | @computed_field 148 | @property 149 | def total_tokens(self) -> TokenUsage: 150 | """Calculate total tokens across all models.""" 151 | total = TokenUsage() 152 | for model in self.model_stats: 153 | total.input += model.total_tokens.input 154 | total.output += model.total_tokens.output 155 | total.cache_write += model.total_tokens.cache_write 156 | total.cache_read += model.total_tokens.cache_read 157 | return total 158 | 159 | 160 | class ProjectUsageStats(BaseModel): 161 | """Model for project-specific usage statistics.""" 162 | project_name: str 163 | total_tokens: TokenUsage = Field(default_factory=TokenUsage) 164 | total_sessions: int = Field(default=0) 165 | total_interactions: int = Field(default=0) 166 | total_cost: Decimal = Field(default=Decimal('0.0')) 167 | models_used: List[str] = Field(default_factory=list) 168 | first_activity: Optional[datetime] = Field(default=None) 169 | last_activity: Optional[datetime] = Field(default=None) 170 | 171 | 172 | class ProjectBreakdownReport(BaseModel): 173 | """Model for project usage breakdown report.""" 174 | timeframe: str # "daily", "weekly", "monthly", "all" 175 | start_date: Optional[date] = Field(default=None) 176 | end_date: Optional[date] = Field(default=None) 177 | project_stats: List[ProjectUsageStats] = Field(default_factory=list) 178 | 179 | @computed_field 180 | @property 181 | def total_cost(self) -> Decimal: 182 | """Calculate total cost across all projects.""" 183 | return sum(project.total_cost for project in self.project_stats) 184 | 185 | @computed_field 186 | @property 187 | def total_tokens(self) -> TokenUsage: 188 | """Calculate total tokens across all projects.""" 189 | total = TokenUsage() 190 | for project in self.project_stats: 191 | total.input += project.total_tokens.input 192 | total.output += project.total_tokens.output 193 | total.cache_write += project.total_tokens.cache_write 194 | total.cache_read += project.total_tokens.cache_read 195 | return total 196 | 197 | 198 | class TimeframeAnalyzer: 199 | """Analyzer for different timeframe breakdowns.""" 200 | 201 | @staticmethod 202 | def create_daily_breakdown(sessions: List[SessionData]) -> List[DailyUsage]: 203 | """Create daily breakdown from sessions.""" 204 | daily_data = defaultdict(list) 205 | 206 | for session in sessions: 207 | if session.start_time: 208 | session_date = session.start_time.date() 209 | daily_data[session_date].append(session) 210 | 211 | return [ 212 | DailyUsage(date=date_key, sessions=sessions_list) 213 | for date_key, sessions_list in sorted(daily_data.items()) 214 | ] 215 | 216 | @staticmethod 217 | def create_weekly_breakdown(daily_usage: List[DailyUsage], week_start_day: int = 0) -> List[WeeklyUsage]: 218 | """Create weekly breakdown from daily usage. 219 | 220 | Args: 221 | daily_usage: List of daily usage records 222 | week_start_day: Day to start week on (0=Monday, 6=Sunday) 223 | 224 | Returns: 225 | List of WeeklyUsage objects 226 | """ 227 | from ..utils.time_utils import TimeUtils 228 | 229 | weekly_data = defaultdict(list) 230 | 231 | for day in daily_usage: 232 | # Get the week start date for this day 233 | week_start, week_end = TimeUtils.get_custom_week_range(day.date, week_start_day) 234 | 235 | # Use (week_start, week_end) tuple as key for grouping 236 | week_key = (week_start, week_end) 237 | weekly_data[week_key].append(day) 238 | 239 | weekly_breakdown = [] 240 | for (week_start, week_end), days in sorted(weekly_data.items()): 241 | # For display purposes, calculate ISO week number for the week_start 242 | year, week, _ = week_start.isocalendar() 243 | 244 | weekly_breakdown.append(WeeklyUsage( 245 | year=year, 246 | week=week, 247 | start_date=week_start, 248 | end_date=week_end, 249 | daily_usage=sorted(days, key=lambda d: d.date) 250 | )) 251 | 252 | return weekly_breakdown 253 | 254 | @staticmethod 255 | def create_monthly_breakdown(weekly_usage: List[WeeklyUsage]) -> List[MonthlyUsage]: 256 | """Create monthly breakdown from weekly usage.""" 257 | monthly_data = defaultdict(list) 258 | 259 | for week in weekly_usage: 260 | # Assign week to month based on start date 261 | month_key = (week.start_date.year, week.start_date.month) 262 | monthly_data[month_key].append(week) 263 | 264 | return [ 265 | MonthlyUsage(year=year, month=month, weekly_usage=weeks) 266 | for (year, month), weeks in sorted(monthly_data.items()) 267 | ] 268 | 269 | @staticmethod 270 | def create_model_breakdown( 271 | sessions: List[SessionData], 272 | pricing_data: Dict[str, Any], 273 | timeframe: str = "all", 274 | start_date: Optional[date] = None, 275 | end_date: Optional[date] = None 276 | ) -> ModelBreakdownReport: 277 | """Create model usage breakdown.""" 278 | # Filter sessions by date range if specified 279 | filtered_sessions = sessions 280 | if start_date or end_date: 281 | filtered_sessions = [] 282 | for session in sessions: 283 | if session.start_time: 284 | session_date = session.start_time.date() 285 | if start_date and session_date < start_date: 286 | continue 287 | if end_date and session_date > end_date: 288 | continue 289 | filtered_sessions.append(session) 290 | 291 | model_data = defaultdict(lambda: { 292 | 'tokens': TokenUsage(), 293 | 'sessions': set(), 294 | 'interactions': 0, 295 | 'cost': Decimal('0.0'), 296 | 'first_used': None, 297 | 'last_used': None 298 | }) 299 | 300 | for session in filtered_sessions: 301 | for model in session.models_used: 302 | model_files = [f for f in session.files if f.model_id == model] 303 | model_stats = model_data[model] 304 | 305 | # Update token counts 306 | for file in model_files: 307 | model_stats['tokens'].input += file.tokens.input 308 | model_stats['tokens'].output += file.tokens.output 309 | model_stats['tokens'].cache_write += file.tokens.cache_write 310 | model_stats['tokens'].cache_read += file.tokens.cache_read 311 | model_stats['interactions'] += 1 312 | model_stats['cost'] += file.calculate_cost(pricing_data) 313 | 314 | # Track sessions 315 | model_stats['sessions'].add(session.session_id) 316 | 317 | # Update first/last used times 318 | if session.start_time: 319 | if (model_stats['first_used'] is None or 320 | session.start_time < model_stats['first_used']): 321 | model_stats['first_used'] = session.start_time 322 | 323 | if session.end_time: 324 | if (model_stats['last_used'] is None or 325 | session.end_time > model_stats['last_used']): 326 | model_stats['last_used'] = session.end_time 327 | 328 | # Convert to ModelUsageStats objects 329 | model_stats = [] 330 | for model_name, stats in model_data.items(): 331 | model_stats.append(ModelUsageStats( 332 | model_name=model_name, 333 | total_tokens=stats['tokens'], 334 | total_sessions=len(stats['sessions']), 335 | total_interactions=stats['interactions'], 336 | total_cost=stats['cost'], 337 | first_used=stats['first_used'], 338 | last_used=stats['last_used'] 339 | )) 340 | 341 | # Sort by total cost descending 342 | model_stats.sort(key=lambda x: x.total_cost, reverse=True) 343 | 344 | return ModelBreakdownReport( 345 | timeframe=timeframe, 346 | start_date=start_date, 347 | end_date=end_date, 348 | model_stats=model_stats 349 | ) 350 | 351 | @staticmethod 352 | def create_project_breakdown( 353 | sessions: List[SessionData], 354 | pricing_data: Dict[str, Any], 355 | timeframe: str = "all", 356 | start_date: Optional[date] = None, 357 | end_date: Optional[date] = None 358 | ) -> 'ProjectBreakdownReport': 359 | """Create project usage breakdown.""" 360 | # Filter sessions by date range if specified 361 | filtered_sessions = sessions 362 | if start_date or end_date: 363 | filtered_sessions = [] 364 | for session in sessions: 365 | if session.start_time: 366 | session_date = session.start_time.date() 367 | if start_date and session_date < start_date: 368 | continue 369 | if end_date and session_date > end_date: 370 | continue 371 | filtered_sessions.append(session) 372 | 373 | project_data = defaultdict(lambda: { 374 | 'tokens': TokenUsage(), 375 | 'sessions': 0, 376 | 'interactions': 0, 377 | 'cost': Decimal('0.0'), 378 | 'models_used': set(), 379 | 'first_activity': None, 380 | 'last_activity': None 381 | }) 382 | 383 | for session in filtered_sessions: 384 | project_name = session.project_name or "Unknown" 385 | project_stats = project_data[project_name] 386 | 387 | # Update aggregated data 388 | session_tokens = session.total_tokens 389 | project_stats['tokens'].input += session_tokens.input 390 | project_stats['tokens'].output += session_tokens.output 391 | project_stats['tokens'].cache_write += session_tokens.cache_write 392 | project_stats['tokens'].cache_read += session_tokens.cache_read 393 | 394 | project_stats['sessions'] += 1 395 | project_stats['interactions'] += session.interaction_count 396 | project_stats['cost'] += session.calculate_total_cost(pricing_data) 397 | project_stats['models_used'].update(session.models_used) 398 | 399 | # Track first/last activity times 400 | if session.start_time: 401 | if (project_stats['first_activity'] is None or 402 | session.start_time < project_stats['first_activity']): 403 | project_stats['first_activity'] = session.start_time 404 | 405 | if session.end_time: 406 | if (project_stats['last_activity'] is None or 407 | session.end_time > project_stats['last_activity']): 408 | project_stats['last_activity'] = session.end_time 409 | 410 | # Convert to ProjectUsageStats objects 411 | project_stats = [] 412 | for project_name, stats in project_data.items(): 413 | project_stats.append(ProjectUsageStats( 414 | project_name=project_name, 415 | total_tokens=stats['tokens'], 416 | total_sessions=stats['sessions'], 417 | total_interactions=stats['interactions'], 418 | total_cost=stats['cost'], 419 | models_used=list(stats['models_used']), 420 | first_activity=stats['first_activity'], 421 | last_activity=stats['last_activity'] 422 | )) 423 | 424 | # Sort by total cost descending 425 | project_stats.sort(key=lambda x: x.total_cost, reverse=True) 426 | 427 | return ProjectBreakdownReport( 428 | timeframe=timeframe, 429 | start_date=start_date, 430 | end_date=end_date, 431 | project_stats=project_stats 432 | ) -------------------------------------------------------------------------------- /ocmonitor/services/export_service.py: -------------------------------------------------------------------------------- 1 | """Export service for OpenCode Monitor.""" 2 | 3 | import csv 4 | import json 5 | import os 6 | from pathlib import Path 7 | from typing import List, Dict, Any, Optional, Union 8 | from datetime import datetime 9 | 10 | from ..utils.formatting import DataFormatter 11 | 12 | 13 | class ExportService: 14 | """Service for exporting data to various formats.""" 15 | 16 | def __init__(self, export_dir: str = "./exports"): 17 | """Initialize export service. 18 | 19 | Args: 20 | export_dir: Directory to save exported files 21 | """ 22 | self.export_dir = Path(export_dir) 23 | self.export_dir.mkdir(parents=True, exist_ok=True) 24 | 25 | def export_to_csv(self, data: List[Dict[str, Any]], filename: str, 26 | include_metadata: bool = True) -> str: 27 | """Export data to CSV format. 28 | 29 | Args: 30 | data: List of dictionaries to export 31 | filename: Output filename (without extension) 32 | include_metadata: Whether to include metadata header 33 | 34 | Returns: 35 | Path to exported file 36 | 37 | Raises: 38 | ValueError: If data is empty or invalid 39 | IOError: If file cannot be written 40 | """ 41 | if not data: 42 | raise ValueError("No data to export") 43 | 44 | # Ensure filename has .csv extension 45 | if not filename.endswith('.csv'): 46 | filename += '.csv' 47 | 48 | output_path = self.export_dir / filename 49 | 50 | try: 51 | with open(output_path, 'w', newline='', encoding='utf-8') as csvfile: 52 | # Write metadata header if requested 53 | if include_metadata: 54 | csvfile.write(f"# OpenCode Monitor Export\n") 55 | csvfile.write(f"# Generated: {datetime.now().isoformat()}\n") 56 | csvfile.write(f"# Records: {len(data)}\n") 57 | csvfile.write("#\n") 58 | 59 | # Get all unique keys from the data 60 | fieldnames = set() 61 | for row in data: 62 | fieldnames.update(row.keys()) 63 | fieldnames = sorted(list(fieldnames)) 64 | 65 | writer = csv.DictWriter(csvfile, fieldnames=fieldnames) 66 | writer.writeheader() 67 | 68 | # Write data rows, sanitizing values 69 | for row in data: 70 | sanitized_row = {} 71 | for key in fieldnames: 72 | value = row.get(key) 73 | if value is None: 74 | sanitized_row[key] = "" 75 | elif isinstance(value, (list, dict)): 76 | # Convert complex types to string representation 77 | sanitized_row[key] = str(value) 78 | else: 79 | sanitized_row[key] = DataFormatter.sanitize_for_csv(value) 80 | writer.writerow(sanitized_row) 81 | 82 | except IOError as e: 83 | raise IOError(f"Failed to write CSV file: {e}") 84 | 85 | return str(output_path) 86 | 87 | def export_to_json(self, data: Union[List[Dict[str, Any]], Dict[str, Any]], filename: str, 88 | include_metadata: bool = True, indent: int = 2) -> str: 89 | """Export data to JSON format. 90 | 91 | Args: 92 | data: Data to export (list of dicts or single dict) 93 | filename: Output filename (without extension) 94 | include_metadata: Whether to include metadata 95 | indent: JSON indentation level 96 | 97 | Returns: 98 | Path to exported file 99 | 100 | Raises: 101 | ValueError: If data is invalid 102 | IOError: If file cannot be written 103 | """ 104 | if data is None: 105 | raise ValueError("No data to export") 106 | 107 | # Ensure filename has .json extension 108 | if not filename.endswith('.json'): 109 | filename += '.json' 110 | 111 | output_path = self.export_dir / filename 112 | 113 | # Prepare export data 114 | export_data = data 115 | if include_metadata: 116 | metadata = { 117 | 'export_info': { 118 | 'generated_by': 'OpenCode Monitor', 119 | 'generated_at': datetime.now().isoformat(), 120 | 'version': '1.0.0' 121 | } 122 | } 123 | 124 | if isinstance(data, list): 125 | export_data = { 126 | 'metadata': metadata, 127 | 'data': data, 128 | 'record_count': len(data) 129 | } 130 | elif isinstance(data, dict): 131 | export_data = { 132 | 'metadata': metadata, 133 | 'data': data 134 | } 135 | 136 | try: 137 | with open(output_path, 'w', encoding='utf-8') as jsonfile: 138 | json.dump(export_data, jsonfile, indent=indent, default=self._json_serializer, 139 | ensure_ascii=False) 140 | 141 | except IOError as e: 142 | raise IOError(f"Failed to write JSON file: {e}") 143 | 144 | return str(output_path) 145 | 146 | def export_report_data(self, report_data: Dict[str, Any], report_type: str, 147 | format_type: str, output_filename: Optional[str] = None, 148 | include_metadata: bool = True) -> str: 149 | """Export report data in specified format. 150 | 151 | Args: 152 | report_data: Report data from ReportGenerator 153 | report_type: Type of report (session, sessions, daily, etc.) 154 | format_type: Export format ("csv" or "json") 155 | output_filename: Custom filename (auto-generated if None) 156 | include_metadata: Whether to include metadata 157 | 158 | Returns: 159 | Path to exported file 160 | 161 | Raises: 162 | ValueError: If format or data is invalid 163 | IOError: If export fails 164 | """ 165 | if format_type not in ["csv", "json"]: 166 | raise ValueError(f"Unsupported export format: {format_type}") 167 | 168 | # Generate filename if not provided 169 | if not output_filename: 170 | timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') 171 | output_filename = f"ocmonitor_{report_type}_{timestamp}" 172 | 173 | # Extract exportable data based on report type 174 | export_data = self._extract_export_data(report_data, report_type) 175 | 176 | if format_type == "csv": 177 | return self.export_to_csv(export_data, output_filename, include_metadata) 178 | else: 179 | return self.export_to_json(export_data, output_filename, include_metadata) 180 | 181 | def _extract_export_data(self, report_data: Dict[str, Any], report_type: str) -> Union[List[Dict[str, Any]], Dict[str, Any]]: 182 | """Extract exportable data from report data. 183 | 184 | Args: 185 | report_data: Raw report data 186 | report_type: Type of report 187 | 188 | Returns: 189 | Data suitable for export 190 | """ 191 | if report_type == "single_session": 192 | # For single session, export interaction details 193 | session = report_data.get('session') 194 | if session: 195 | return [ 196 | { 197 | 'session_id': session.session_id, 198 | 'session_title': session.session_title, 199 | 'project_name': session.project_name, 200 | 'file_name': file.file_name, 201 | 'model_id': file.model_id, 202 | 'input_tokens': file.tokens.input, 203 | 'output_tokens': file.tokens.output, 204 | 'cache_write_tokens': file.tokens.cache_write, 205 | 'cache_read_tokens': file.tokens.cache_read, 206 | 'total_tokens': file.tokens.total, 207 | 'created_time': file.time_data.created if file.time_data else None, 208 | 'completed_time': file.time_data.completed if file.time_data else None, 209 | 'duration_ms': file.time_data.duration_ms if file.time_data else None 210 | } 211 | for file in session.files 212 | ] 213 | return [] 214 | 215 | elif report_type == "sessions": 216 | # For sessions summary, export session-level data 217 | sessions = report_data.get('sessions', []) 218 | from ..services.session_analyzer import SessionAnalyzer 219 | # Note: This is a simplified version - in practice, we'd need the analyzer instance 220 | return [ 221 | { 222 | 'session_id': session.session_id, 223 | 'session_title': session.session_title, 224 | 'project_name': session.project_name, 225 | 'start_time': session.start_time.isoformat() if session.start_time else None, 226 | 'end_time': session.end_time.isoformat() if session.end_time else None, 227 | 'duration_ms': session.duration_ms, 228 | 'interaction_count': session.interaction_count, 229 | 'models_used': ', '.join(session.models_used), 230 | 'total_input_tokens': session.total_tokens.input, 231 | 'total_output_tokens': session.total_tokens.output, 232 | 'total_cache_write_tokens': session.total_tokens.cache_write, 233 | 'total_cache_read_tokens': session.total_tokens.cache_read, 234 | 'total_tokens': session.total_tokens.total 235 | } 236 | for session in sessions 237 | ] 238 | 239 | elif report_type == "daily": 240 | # For daily breakdown, export daily data 241 | daily_usage = report_data.get('daily_usage', []) 242 | from ..services.session_analyzer import SessionAnalyzer 243 | # Note: This would need the analyzer instance for cost calculation 244 | return [ 245 | { 246 | 'date': day.date.isoformat(), 247 | 'sessions_count': len(day.sessions), 248 | 'total_interactions': day.total_interactions, 249 | 'input_tokens': day.total_tokens.input, 250 | 'output_tokens': day.total_tokens.output, 251 | 'cache_write_tokens': day.total_tokens.cache_write, 252 | 'cache_read_tokens': day.total_tokens.cache_read, 253 | 'total_tokens': day.total_tokens.total, 254 | 'models_used': ', '.join(day.models_used) 255 | } 256 | for day in daily_usage 257 | ] 258 | 259 | elif report_type == "weekly": 260 | # For weekly breakdown, export weekly data 261 | weekly_usage = report_data.get('weekly_usage', []) 262 | return [ 263 | { 264 | 'year': week.year, 265 | 'week_number': week.week, 266 | 'start_date': week.start_date.isoformat(), 267 | 'end_date': week.end_date.isoformat(), 268 | 'sessions_count': week.total_sessions, 269 | 'total_interactions': week.total_interactions, 270 | 'input_tokens': week.total_tokens.input, 271 | 'output_tokens': week.total_tokens.output, 272 | 'cache_write_tokens': week.total_tokens.cache_write, 273 | 'cache_read_tokens': week.total_tokens.cache_read, 274 | 'total_tokens': week.total_tokens.total 275 | } 276 | for week in weekly_usage 277 | ] 278 | 279 | elif report_type == "monthly": 280 | # For monthly breakdown, export monthly data 281 | monthly_usage = report_data.get('monthly_usage', []) 282 | return [ 283 | { 284 | 'year': month.year, 285 | 'month': month.month, 286 | 'sessions_count': month.total_sessions, 287 | 'total_interactions': month.total_interactions, 288 | 'input_tokens': month.total_tokens.input, 289 | 'output_tokens': month.total_tokens.output, 290 | 'cache_write_tokens': month.total_tokens.cache_write, 291 | 'cache_read_tokens': month.total_tokens.cache_read, 292 | 'total_tokens': month.total_tokens.total 293 | } 294 | for month in monthly_usage 295 | ] 296 | 297 | elif report_type == "models": 298 | # For models breakdown, export model data 299 | model_breakdown = report_data.get('model_breakdown') 300 | if model_breakdown: 301 | return [ 302 | { 303 | 'model_name': model.model_name, 304 | 'total_sessions': model.total_sessions, 305 | 'total_interactions': model.total_interactions, 306 | 'input_tokens': model.total_tokens.input, 307 | 'output_tokens': model.total_tokens.output, 308 | 'cache_write_tokens': model.total_tokens.cache_write, 309 | 'cache_read_tokens': model.total_tokens.cache_read, 310 | 'total_tokens': model.total_tokens.total, 311 | 'total_cost': float(model.total_cost), 312 | 'first_used': model.first_used.isoformat() if model.first_used else None, 313 | 'last_used': model.last_used.isoformat() if model.last_used else None 314 | } 315 | for model in model_breakdown.model_stats 316 | ] 317 | return [] 318 | 319 | elif report_type == "projects": 320 | # For projects breakdown, export project data 321 | project_breakdown = report_data.get('project_breakdown') 322 | if project_breakdown: 323 | return [ 324 | { 325 | 'project_name': project.project_name, 326 | 'total_sessions': project.total_sessions, 327 | 'total_interactions': project.total_interactions, 328 | 'input_tokens': project.total_tokens.input, 329 | 'output_tokens': project.total_tokens.output, 330 | 'cache_write_tokens': project.total_tokens.cache_write, 331 | 'cache_read_tokens': project.total_tokens.cache_read, 332 | 'total_tokens': project.total_tokens.total, 333 | 'total_cost': float(project.total_cost), 334 | 'models_used': ', '.join(project.models_used), 335 | 'first_activity': project.first_activity.isoformat() if project.first_activity else None, 336 | 'last_activity': project.last_activity.isoformat() if project.last_activity else None 337 | } 338 | for project in project_breakdown.project_stats 339 | ] 340 | return [] 341 | 342 | else: 343 | # For unknown report types, try to return the data as-is 344 | return report_data 345 | 346 | def _json_serializer(self, obj): 347 | """Custom JSON serializer for special types. 348 | 349 | Args: 350 | obj: Object to serialize 351 | 352 | Returns: 353 | Serializable representation 354 | """ 355 | if hasattr(obj, 'isoformat'): 356 | # Handle datetime objects 357 | return obj.isoformat() 358 | elif hasattr(obj, '__dict__'): 359 | # Handle objects with __dict__ 360 | return obj.__dict__ 361 | elif hasattr(obj, 'model_dump'): 362 | # Handle Pydantic models 363 | return obj.model_dump() 364 | else: 365 | # Fallback to string representation 366 | return str(obj) 367 | 368 | def get_export_summary(self, file_path: str) -> Dict[str, Any]: 369 | """Get summary information about an exported file. 370 | 371 | Args: 372 | file_path: Path to exported file 373 | 374 | Returns: 375 | Summary information 376 | """ 377 | path = Path(file_path) 378 | if not path.exists(): 379 | return {'error': 'File not found'} 380 | 381 | try: 382 | stat = path.stat() 383 | summary = { 384 | 'filename': path.name, 385 | 'size_bytes': stat.st_size, 386 | 'size_human': self._format_file_size(stat.st_size), 387 | 'created': datetime.fromtimestamp(stat.st_ctime).isoformat(), 388 | 'modified': datetime.fromtimestamp(stat.st_mtime).isoformat(), 389 | 'format': path.suffix.lower() 390 | } 391 | 392 | # Add format-specific information 393 | if path.suffix.lower() == '.csv': 394 | summary.update(self._get_csv_info(path)) 395 | elif path.suffix.lower() == '.json': 396 | summary.update(self._get_json_info(path)) 397 | 398 | return summary 399 | 400 | except (OSError, IOError) as e: 401 | return {'error': f'Failed to read file info: {e}'} 402 | 403 | def _get_csv_info(self, file_path: Path) -> Dict[str, Any]: 404 | """Get CSV-specific information. 405 | 406 | Args: 407 | file_path: Path to CSV file 408 | 409 | Returns: 410 | CSV information 411 | """ 412 | try: 413 | with open(file_path, 'r', encoding='utf-8') as csvfile: 414 | # Count lines (excluding metadata comments) 415 | lines = csvfile.readlines() 416 | data_lines = [line for line in lines if not line.startswith('#')] 417 | 418 | if data_lines: 419 | # First non-comment line should be header 420 | header_line = data_lines[0] if data_lines else "" 421 | columns = len(header_line.split(',')) if header_line else 0 422 | rows = len(data_lines) - 1 # Subtract header row 423 | 424 | return { 425 | 'rows': rows, 426 | 'columns': columns, 427 | 'has_header': True 428 | } 429 | else: 430 | return {'rows': 0, 'columns': 0, 'has_header': False} 431 | 432 | except Exception: 433 | return {'rows': 'unknown', 'columns': 'unknown', 'has_header': 'unknown'} 434 | 435 | def _get_json_info(self, file_path: Path) -> Dict[str, Any]: 436 | """Get JSON-specific information. 437 | 438 | Args: 439 | file_path: Path to JSON file 440 | 441 | Returns: 442 | JSON information 443 | """ 444 | try: 445 | with open(file_path, 'r', encoding='utf-8') as jsonfile: 446 | data = json.load(jsonfile) 447 | 448 | info = {'valid_json': True} 449 | 450 | if isinstance(data, dict): 451 | info['type'] = 'object' 452 | info['keys'] = len(data.keys()) 453 | 454 | # Check for metadata 455 | if 'metadata' in data: 456 | info['has_metadata'] = True 457 | if 'data' in data: 458 | data_section = data['data'] 459 | if isinstance(data_section, list): 460 | info['records'] = len(data_section) 461 | elif 'record_count' in data: 462 | info['records'] = data['record_count'] 463 | 464 | elif isinstance(data, list): 465 | info['type'] = 'array' 466 | info['records'] = len(data) 467 | 468 | return info 469 | 470 | except Exception: 471 | return {'valid_json': False} 472 | 473 | def _format_file_size(self, bytes_count: int) -> str: 474 | """Format file size in human-readable format. 475 | 476 | Args: 477 | bytes_count: Size in bytes 478 | 479 | Returns: 480 | Human-readable size string 481 | """ 482 | if bytes_count == 0: 483 | return "0 B" 484 | 485 | units = ["B", "KB", "MB", "GB"] 486 | size = float(bytes_count) 487 | unit_index = 0 488 | 489 | while size >= 1024 and unit_index < len(units) - 1: 490 | size /= 1024 491 | unit_index += 1 492 | 493 | if unit_index == 0: 494 | return f"{int(size)} {units[unit_index]}" 495 | else: 496 | return f"{size:.1f} {units[unit_index]}" 497 | 498 | def list_exports(self) -> List[Dict[str, Any]]: 499 | """List all exported files in the export directory. 500 | 501 | Returns: 502 | List of export file information 503 | """ 504 | if not self.export_dir.exists(): 505 | return [] 506 | 507 | exports = [] 508 | for file_path in self.export_dir.iterdir(): 509 | if file_path.is_file() and file_path.suffix.lower() in ['.csv', '.json']: 510 | summary = self.get_export_summary(str(file_path)) 511 | exports.append(summary) 512 | 513 | # Sort by modification time (newest first) 514 | exports.sort(key=lambda x: x.get('modified', ''), reverse=True) 515 | return exports --------------------------------------------------------------------------------