├── requirements.txt
├── assets
└── File.png
├── graphite_app
├── assets
│ ├── File.png
│ └── graphite.ico
├── graphite_app
│ ├── __pycache__
│ │ ├── graphite_ui.cpython-312.pyc
│ │ ├── graphite_core.cpython-312.pyc
│ │ ├── graphite_agents.cpython-312.pyc
│ │ └── graphite_config.cpython-312.pyc
│ ├── graphite_config.py
│ ├── graphite_app.pyproj
│ ├── api_provider.py
│ ├── graphite_agents.py
│ ├── graphite_core.py
│ └── graphite_app.py
└── graphite_app.sln
├── .github
└── ISSUE_TEMPLATE
│ ├── feature_request.md
│ └── bug_report.md
├── LICENSE
├── SECURITY.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Change_Log.md
└── README.md
/requirements.txt:
--------------------------------------------------------------------------------
1 | PyQt5
2 | ollama
3 | matplotlib
4 | qtawesome
5 |
--------------------------------------------------------------------------------
/assets/File.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dovvnloading/Graphite/HEAD/assets/File.png
--------------------------------------------------------------------------------
/graphite_app/assets/File.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dovvnloading/Graphite/HEAD/graphite_app/assets/File.png
--------------------------------------------------------------------------------
/graphite_app/assets/graphite.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dovvnloading/Graphite/HEAD/graphite_app/assets/graphite.ico
--------------------------------------------------------------------------------
/graphite_app/graphite_app/__pycache__/graphite_ui.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dovvnloading/Graphite/HEAD/graphite_app/graphite_app/__pycache__/graphite_ui.cpython-312.pyc
--------------------------------------------------------------------------------
/graphite_app/graphite_app/__pycache__/graphite_core.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dovvnloading/Graphite/HEAD/graphite_app/graphite_app/__pycache__/graphite_core.cpython-312.pyc
--------------------------------------------------------------------------------
/graphite_app/graphite_app/__pycache__/graphite_agents.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dovvnloading/Graphite/HEAD/graphite_app/graphite_app/__pycache__/graphite_agents.cpython-312.pyc
--------------------------------------------------------------------------------
/graphite_app/graphite_app/__pycache__/graphite_config.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dovvnloading/Graphite/HEAD/graphite_app/graphite_app/__pycache__/graphite_config.cpython-312.pyc
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Smartphone (please complete the following information):**
32 | - Device: [e.g. iPhone6]
33 | - OS: [e.g. iOS8.1]
34 | - Browser [e.g. stock browser, safari]
35 | - Version [e.g. 22]
36 |
37 | **Additional context**
38 | Add any other context about the problem here.
39 |
--------------------------------------------------------------------------------
/graphite_app/graphite_app/graphite_config.py:
--------------------------------------------------------------------------------
1 | # This file holds the global configuration for the application,
2 | # such as the currently selected Ollama model.
3 |
4 | # Abstract task identifiers
5 | TASK_TITLE = "task_title"
6 | TASK_CHAT = "task_chat"
7 | TASK_CHART = "task_chart"
8 |
9 | # API Providers
10 | API_PROVIDER_OPENAI = "OpenAI-Compatible"
11 | API_PROVIDER_GEMINI = "Google Gemini"
12 |
13 | # Ollama models per task
14 | OLLAMA_MODELS = {
15 | TASK_TITLE: 'qwen2.5:3b',
16 | TASK_CHAT: 'qwen2.5:7b-instruct',
17 | TASK_CHART: 'deepseek-coder:6.7b'
18 | }
19 |
20 | # Default model to use on startup
21 | CURRENT_MODEL = OLLAMA_MODELS[TASK_CHAT]
22 |
23 | def set_current_model(model_name: str):
24 | """
25 | Sets the global model to be used by all agents.
26 | NOTE: This now primarily affects the default chat model for Ollama.
27 | """
28 | global CURRENT_MODEL
29 | if model_name:
30 | CURRENT_MODEL = model_name
31 | OLLAMA_MODELS[TASK_CHAT] = model_name
32 |
--------------------------------------------------------------------------------
/graphite_app/graphite_app.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.9.34607.119
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "graphite_app", "graphite_app\graphite_app.pyproj", "{012BB263-7B27-4708-939C-19A2ECC6AA24}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|Any CPU = Debug|Any CPU
11 | Release|Any CPU = Release|Any CPU
12 | EndGlobalSection
13 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
14 | {012BB263-7B27-4708-939C-19A2ECC6AA24}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
15 | {012BB263-7B27-4708-939C-19A2ECC6AA24}.Release|Any CPU.ActiveCfg = Release|Any CPU
16 | EndGlobalSection
17 | GlobalSection(SolutionProperties) = preSolution
18 | HideSolutionNode = FALSE
19 | EndGlobalSection
20 | GlobalSection(ExtensibilityGlobals) = postSolution
21 | SolutionGuid = {B4880855-9191-4230-A832-DCF0B3445933}
22 | EndGlobalSection
23 | EndGlobal
24 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Matthew Wesney
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/graphite_app/graphite_app/graphite_app.pyproj:
--------------------------------------------------------------------------------
1 |
2 |
3 | Debug
4 | 2.0
5 | 012bb263-7b27-4708-939c-19a2ecc6aa24
6 | .
7 | graphite_app.py
8 |
9 |
10 | .
11 | .
12 | graphite_app
13 | graphite_app
14 |
15 |
16 | true
17 | false
18 |
19 |
20 | true
21 | false
22 |
23 |
24 |
25 |
26 |
27 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | As an early-stage open-source project, security updates will be prioritized for the latest version of Graphite. Users are encouraged to stay up-to-date with the most recent release to ensure they have the latest features and security patches.
6 |
7 | Since the project is in active development and has not yet reached a stable 1.0 release, the versioning is fluid. The current focus is on developing the core functionalities.
8 |
9 | | Version | Supported |
10 | | :-- | :--- |
11 | | Latest | :white_check_mark: |
12 | | Older Versions | :x: |
13 |
14 | We encourage users to update to the latest version to receive security updates.
15 |
16 | ## Reporting a Vulnerability
17 |
18 | The security of Graphite is a top priority. We appreciate the community's efforts in responsible disclosure and will make every effort to address vulnerabilities in a timely manner.
19 |
20 | If you discover a security vulnerability, please report it by opening an issue on the [GitHub repository](https://github.com/dovvnloading/Graphite/issues).
21 |
22 | When reporting a vulnerability, please include the following details:
23 |
24 | * A clear and descriptive title.
25 | * A detailed description of the vulnerability.
26 | * Steps to reproduce the vulnerability.
27 | * Any relevant screenshots or code snippets.
28 |
29 | Once a vulnerability is reported, you can expect the following:
30 |
31 | * We will acknowledge the receipt of your report within 48 hours.
32 | * We will investigate the report and provide an update on our findings within 7 days.
33 | * If the vulnerability is accepted, we will work on a patch and aim to release it in a timely manner.
34 | * You will be kept informed of the progress and notified when a patch is available.
35 |
36 | We kindly request that you do not disclose the vulnerability publicly until a patch has been released. We appreciate your cooperation in helping us keep Graphite secure.
37 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Project Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We are committed to maintaining a professional, respectful, and harassment-free environment for all contributors, regardless of their background. We pledge to act and interact in ways that contribute to an open, welcoming, and productive community focused on improving the project.
6 |
7 | ## Our Standards
8 |
9 | Examples of behavior that contributes to a positive environment include:
10 |
11 | * Being respectful of differing opinions, viewpoints, and experiences.
12 | * Focusing on constructive, professional dialogue.
13 | * Giving and gracefully accepting constructive feedback.
14 | * Accepting responsibility and learning from our mistakes.
15 | * Focusing on what is best for the project and the community.
16 |
17 | Examples of unacceptable behavior include:
18 |
19 | * The use of sexualized language or imagery, and unwelcome sexual attention or advances.
20 | * Trolling, insulting or derogatory comments, and personal or political attacks.
21 | * Public or private harassment of any kind.
22 | * Publishing others' private information, such as a physical or email address, without their explicit permission.
23 | * Other conduct which could reasonably be considered inappropriate in a professional setting.
24 |
25 | ## Enforcement Responsibilities
26 |
27 | The project maintainers are responsible for clarifying and enforcing our standards of acceptable behavior. They will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
28 |
29 | Maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned with this Code of Conduct.
30 |
31 | ## Scope
32 |
33 | This Code of Conduct applies within all project spaces, and also applies when an individual is officially representing the project in public spaces.
34 |
35 | ## Reporting
36 |
37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the project maintainers responsible for enforcement at **devaux.mail@gmail.com**.
38 |
39 | All complaints will be reviewed and investigated promptly and fairly. The privacy and security of the reporter will be respected.
40 |
41 | ## Enforcement Actions
42 |
43 | The project maintainers will follow these guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
44 |
45 | 1. **Correction**: For unprofessional or unwelcome behavior, a private, written warning will be issued, explaining the nature of the violation. An apology may be requested.
46 | 2. **Warning**: For a more serious violation or repeated offenses, a warning with consequences for continued behavior will be issued. This may include a temporary restriction from interaction with the project.
47 | 3. **Ban**: For a serious or repeated pattern of violation of community standards, including harassment, a temporary or permanent ban from the project may be enforced.
48 |
--------------------------------------------------------------------------------
/graphite_app/graphite_app/api_provider.py:
--------------------------------------------------------------------------------
1 | import os
2 | import ollama
3 | import graphite_config as config
4 |
5 | USE_API_MODE = False
6 | API_PROVIDER_TYPE = None
7 | API_CLIENT = None
8 | API_MODELS = {
9 | config.TASK_TITLE: None,
10 | config.TASK_CHAT: None,
11 | config.TASK_CHART: None
12 | }
13 |
14 | # Static, hard-coded list of reliable Gemini models. This is now the primary source.
15 | GEMINI_MODELS_STATIC = sorted([
16 | "gemini-2.5-pro-latest",
17 | "gemini-2.5-pro",
18 | "gemini-2.5-flash-latest",
19 | "gemini-2.5-flash",
20 | "gemini-2.0-flash",
21 | "gemini-pro",
22 | ])
23 |
24 |
25 | def _convert_to_gemini_messages(messages: list) -> tuple:
26 | """
27 | Converts standard message list to Gemini format and extracts system prompt.
28 | """
29 | system_prompt = None
30 | gemini_history = []
31 |
32 | for msg in messages:
33 | if msg['role'] == 'system':
34 | system_prompt = msg['content']
35 | continue
36 |
37 | # Gemini roles are 'user' and 'model'
38 | role = 'model' if msg['role'] == 'assistant' else 'user'
39 |
40 | # Ensure alternating roles
41 | if gemini_history and gemini_history[-1]['role'] == role:
42 | # If two user messages in a row, combine them.
43 | if role == 'user':
44 | gemini_history[-1]['parts'].append(msg['content'])
45 | continue
46 | # If two model messages in a row, this is unusual, but we'll add a placeholder user message
47 | else:
48 | gemini_history.append({'role': 'user', 'parts': ["(Continuing...)"]})
49 |
50 | gemini_history.append({
51 | 'role': role,
52 | 'parts': [msg['content']]
53 | })
54 |
55 | return system_prompt, gemini_history
56 |
57 | def chat(task: str, messages: list, **kwargs) -> dict:
58 | if not USE_API_MODE:
59 | model = config.OLLAMA_MODELS.get(task)
60 | if not model:
61 | raise ValueError(f"No Ollama model configured for task: {task}")
62 | return ollama.chat(model=model, messages=messages, **kwargs)
63 | else:
64 | if not API_CLIENT:
65 | raise RuntimeError("API client not initialized. Configure API settings first.")
66 |
67 | api_model = API_MODELS.get(task)
68 | if not api_model:
69 | raise RuntimeError(
70 | f"No API model configured for task '{task}'.\n"
71 | f"Please configure models in API Settings."
72 | )
73 |
74 | if API_PROVIDER_TYPE == config.API_PROVIDER_OPENAI:
75 | response = API_CLIENT.chat.completions.create(
76 | model=api_model,
77 | messages=messages,
78 | **kwargs
79 | )
80 | return {
81 | 'message': {
82 | 'content': response.choices[0].message.content,
83 | 'role': 'assistant'
84 | }
85 | }
86 | elif API_PROVIDER_TYPE == config.API_PROVIDER_GEMINI:
87 | system_prompt, gemini_history = _convert_to_gemini_messages(messages)
88 |
89 | model_config = {}
90 | if system_prompt:
91 | model_config['system_instruction'] = system_prompt
92 |
93 | gemini_model = API_CLIENT.GenerativeModel(api_model, **model_config)
94 |
95 | # This was the error. The .pop() method was removing the user's message
96 | # before sending the request. The generate_content method expects the full
97 | # conversation history, including the latest message.
98 | # prompt = gemini_history.pop() # <--- THIS LINE IS THE ROOT CAUSE OF THE FAILURE
99 |
100 | response = gemini_model.generate_content(
101 | contents=gemini_history,
102 | generation_config=kwargs
103 | )
104 |
105 | return {
106 | 'message': {
107 | 'content': response.text,
108 | 'role': 'assistant'
109 | }
110 | }
111 | else:
112 | raise RuntimeError(f"Unsupported API provider: {API_PROVIDER_TYPE}")
113 |
114 |
115 | def initialize_api(provider: str, api_key: str, base_url: str = None):
116 | global API_PROVIDER_TYPE, API_CLIENT
117 | API_PROVIDER_TYPE = provider
118 |
119 | if provider == config.API_PROVIDER_OPENAI:
120 | try:
121 | from openai import OpenAI
122 | except ImportError:
123 | raise RuntimeError("openai package required. Install with: pip install openai")
124 |
125 | if not base_url:
126 | base_url = 'https://api.openai.com/v1'
127 |
128 | API_CLIENT = OpenAI(api_key=api_key, base_url=base_url)
129 |
130 | elif provider == config.API_PROVIDER_GEMINI:
131 | try:
132 | import google.generativeai as genai
133 | except ImportError:
134 | raise RuntimeError("google-generativeai package required. Install with: pip install google-generativeai")
135 |
136 | genai.configure(api_key=api_key)
137 | API_CLIENT = genai # Store the configured module as the client
138 | else:
139 | raise ValueError(f"Unknown API provider: {provider}")
140 |
141 | return API_CLIENT
142 |
143 |
144 | def get_available_models():
145 | if not API_CLIENT:
146 | raise RuntimeError("API client not initialized")
147 |
148 | try:
149 | if API_PROVIDER_TYPE == config.API_PROVIDER_OPENAI:
150 | models = API_CLIENT.models.list()
151 | return sorted([model.id for model in models.data])
152 | elif API_PROVIDER_TYPE == config.API_PROVIDER_GEMINI:
153 | # The brittle API call has been removed. We now return a reliable, static list.
154 | return GEMINI_MODELS_STATIC
155 | else:
156 | return []
157 | except Exception as e:
158 | raise RuntimeError(f"Failed to fetch models from endpoint: {str(e)}")
159 |
160 | def set_mode(use_api: bool):
161 | global USE_API_MODE
162 | USE_API_MODE = use_api
163 |
164 | def set_task_model(task: str, api_model: str):
165 | if task in API_MODELS:
166 | API_MODELS[task] = api_model
167 |
168 | def get_task_models() -> dict:
169 | return API_MODELS.copy()
170 |
171 | def get_mode() -> str:
172 | return "API" if USE_API_MODE else "Ollama"
173 |
174 | def is_configured() -> bool:
175 | return API_CLIENT is not None and all(API_MODELS.values())
176 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to Graphite
2 |
3 | First off, thank you for considering contributing to Graphite! It's people like you that make the open-source community such a fantastic place to learn, inspire, and create. All contributions are welcome and greatly appreciated.
4 |
5 | This document provides guidelines for contributing to the project. Please read it to ensure a smooth and effective contribution process for everyone involved.
6 |
7 | ## Table of Contents
8 | - [Code of Conduct](#code-of-conduct)
9 | - [How Can I Contribute?](#how-can-i-contribute)
10 | - [Reporting Bugs](#reporting-bugs)
11 | - [Suggesting Enhancements](#suggesting-enhancements)
12 | - [Pull Requests](#pull-requests)
13 | - [Understanding the Architecture](#understanding-the-architecture)
14 | - [Development Setup](#development-setup)
15 | - [Style Guides](#style-guides)
16 | - [Git Commit Messages](#git-commit-messages)
17 | - [Python Styleguide](#python-styleguide)
18 |
19 | ## Code of Conduct
20 |
21 | This project and everyone participating in it is governed by a [Code of Conduct](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. Please report any unacceptable behavior.
22 |
23 | ## How Can I Contribute?
24 |
25 | There are many ways to contribute to Graphite, from writing code and documentation to submitting bug reports and feature requests.
26 |
27 | ### Reporting Bugs
28 |
29 | If you encounter a bug, please open an issue on GitHub. When filing a bug report, please include the following:
30 |
31 | * **A clear and descriptive title** for the issue.
32 | * **A detailed description of the problem**, including the steps to reproduce the bug.
33 | * **The expected behavior** and what is happening instead.
34 | * **Your operating system and Graphite version**.
35 | * **Any relevant screenshots or logs**.
36 |
37 | ### Suggesting Enhancements
38 |
39 | If you have an idea for a new feature or an improvement to an existing one, we'd love to hear it! To ensure your effort is not wasted, please **open an issue on GitHub to discuss the idea first**. This allows for a discussion with the maintainers and community before you begin implementation.
40 |
41 | When suggesting an enhancement, please include:
42 |
43 | * **A clear and descriptive title** for the issue.
44 | * **A detailed description of the proposed enhancement** and the problem it solves.
45 | * **Any mockups or examples** that might help to illustrate your idea.
46 |
47 | ### Pull Requests
48 |
49 | Ready to contribute code? Please follow these steps to ensure your contribution can be smoothly integrated.
50 |
51 | **Important: Before You Start Coding**
52 |
53 | 1. **Sync with the `main` branch.** The project is under active development. To avoid building on an outdated version, please pull the latest changes from the official `main` branch before creating your own branch.
54 | 2. **Open an Issue First.** For any new feature or significant change, please open an issue to discuss your plan. This helps prevent duplicated effort and ensures your proposed changes align with the project's direction.
55 |
56 | **The Pull Request Process**
57 |
58 | 1. **Fork the repository** on GitHub.
59 | 2. **Create a new branch** from the up-to-date `main` branch (`git checkout -b feature/your-feature-name`).
60 | 3. **Make your changes** in your branch. Please see the architecture guide below to understand where your changes should go.
61 | 4. **Commit your changes** with clear, descriptive messages.
62 | 5. **Push your branch** to your forked repository.
63 | 6. **Create a pull request** to the `main` branch of the official Graphite repository.
64 | 7. In your pull request, **provide a clear description of the changes** you have made and reference the issue number it resolves (e.g., "Closes #42").
65 |
66 | ## Understanding the Architecture
67 |
68 | Graphite has been refactored from a single file into a modular structure. Understanding this structure is key to contributing effectively. All development should be done within these files, not by creating a new monolithic script.
69 |
70 | * `graphite_app.py`: **Main Application Entry Point.** Contains the `ChatWindow` class, toolbar setup, and event handling. This is the primary file for launching the app and managing the main UI.
71 | * `graphite_ui.py`: **The UI Layer.** Contains all custom Qt widgets, dialogs, and `QGraphicsItem` subclasses (`ChatNode`, `ConnectionItem`, `Frame`, `Note`, `ChatView`, `ChatScene`, etc.). All visual components belong here.
72 | * `graphite_core.py`: **Core Logic and Data.** Manages data persistence (`ChatDatabase`) and session management (`ChatSessionManager`), including serialization/deserialization of scene elements.
73 | * `graphite_agents.py`: **AI and Backend Logic.** Contains all classes related to LLM interaction, including the `ChatAgent`, specialized tool agents, and the `QThread` workers for running AI tasks in the background.
74 | * `graphite_config.py`: **Global Configuration.** A simple file for storing global settings, such as default model names.
75 |
76 | ## Development Setup
77 |
78 | To get started with developing on Graphite, follow these steps:
79 |
80 | 1. **Prerequisites**:
81 | * Python 3.8 or newer.
82 | * Ollama installed and running.
83 |
84 | 2. **Install an LLM Model**:
85 | Before running Graphite, you need a model for Ollama. The current default is `qwen2.5:7b-instruct`. Open your terminal and run:
86 | ```bash
87 | ollama pull qwen2.5:7b-instruct
88 | ```
89 | Ensure the Ollama application is running in the background.
90 |
91 | 3. **Clone and Install Dependencies**:
92 | * Clone the repository:
93 | ```bash
94 | git clone https://github.com/dovvnloading/Graphite.git
95 | cd Graphite
96 | ```
97 | * Create and activate a virtual environment (recommended):
98 | ```bash
99 | # For Windows
100 | python -m venv venv
101 | .\venv\Scripts\activate
102 |
103 | # For macOS/Linux
104 | python3 -m venv venv
105 | source venv/bin/activate
106 | ```
107 | * Install the required Python packages:
108 | ```bash
109 | pip install -r requirements.txt
110 | ```
111 |
112 | 4. **Run the Application**:
113 | The main entry point is now `graphite_app.py`.
114 | ```bash
115 | python graphite_app.py
116 | ```
117 |
118 | ## Style Guides
119 |
120 | ### Git Commit Messages
121 |
122 | * Use the present tense ("Add feature" not "Added feature").
123 | * Use the imperative mood ("Move file to..." not "Moves file to...").
124 | * Limit the first line to 72 characters or less.
125 | * Reference issues and pull requests liberally in the body of the commit message.
126 |
127 | ### Python Styleguide
128 |
129 | This project adheres to the [PEP 8 style guide for Python code](https://www.python.org/dev/peps/pep-0008/). Please ensure your contributions follow these guidelines. Using a linter like `flake8` can help you identify and fix style issues.
130 |
--------------------------------------------------------------------------------
/Change_Log.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | ---
6 |
7 | ### `[Beta v.0.2.2]` - 2024-05-25
8 |
9 | This release introduces support for any OpenAI-compatible API endpoint as an alternative to a local Ollama instance. It also includes a significant user experience overhaul for model and provider selection, resolving critical design flaws and improving usability.
10 |
11 | #### Added
12 |
13 | * **Added support for OpenAI-Compatible API Endpoints.**
14 | * **Feature:** Users can now switch from using a local Ollama instance to any remote API service that is compatible with the OpenAI API specification (e.g., OpenAI, Groq, OpenRouter, or self-hosted solutions like LiteLLM). (Credits for first conceptual buildout/iteration of implimentations - https://github.com/1818TusculumSt)
15 | * **Implementation:** A new `api_provider.py` module was created to act as a router, abstracting all LLM calls and directing them to either Ollama or the configured API endpoint based on the user's selection. All agent classes (`ChatAgent`, `KeyTakeawayAgent`, etc.) were refactored to use this new provider instead of calling `ollama` directly.
16 | * **Added Per-Task Model Configuration for API Mode.**
17 | * **Feature:** When using an API provider, users can configure different models for different tasks (Title Generation, Chat/Analysis, and Chart Data Extraction) to optimize for cost, speed, and capability.
18 | * **Implementation:** An `APISettingsDialog` was created, allowing users to enter their API credentials, load a list of available models from the endpoint, and assign a specific model to each task category.
19 |
20 | #### Changed
21 |
22 | * **Overhauled the Model and Provider Selection UX.**
23 | * **Problem:** The previous UI design for model selection was confusing, with separate and sometimes hidden buttons for different providers. This created a frustrating and non-discoverable user experience.
24 | * **Solution:** The toolbar has been redesigned with a clear, two-part system:
25 | 1. A "Mode" dropdown to explicitly select the provider (`Ollama (Local)` or `API Endpoint`).
26 | 2. A single, consistently visible "Settings" button. This button is now context-aware and will open the appropriate configuration dialog (`ModelSelectionDialog` for Ollama or `APISettingsDialog` for an API) based on the currently selected mode. This resolves all ambiguity and makes the feature intuitive to use.
27 |
28 | #### Fixed
29 |
30 | * **Fixed a critical UI bug where the API settings button was permanently invisible.**
31 | * **Problem:** A logic error in the toolbar setup (`self.api_settings_btn.setVisible(False)`) caused the button to configure the API to be hidden, making the entire feature inaccessible.
32 | * **Solution:** The erroneous line was removed, and the toolbar was refactored to use the new unified "Settings" button, ensuring the correct dialog is always accessible.
33 | * **Fixed an architectural violation where UI components were defined outside of `graphite_ui.py`.**
34 | * **Problem:** The `APISettingsDialog` was incorrectly defined within the main application file (`graphite_app.py`), breaking the project's modular structure.
35 | * **Solution:** The `APISettingsDialog` class has been moved to its proper location within `graphite_ui.py`, restoring the architectural integrity and separation of concerns.
36 |
37 | ---
38 |
39 | ### `[Beta v.0.2.1]` - 2024-05-25
40 |
41 | This release focuses on critical stability and functionality fixes, primarily addressing issues within the API provider integration and the core UI rendering system.
42 |
43 | #### Fixed
44 |
45 | * **Fixed a critical application crash when loading models from the Google Gemini API.**
46 | * **Problem:** The application would crash with a `TypeError` when fetching the model list from Gemini. A deep-dive analysis revealed this was due to a fundamental incompatibility between the installed `google-generativeai` library and the current data structure of the API's response. The previous design, which relied on dynamic model discovery, proved to be too brittle for this provider.
47 | * **Solution:** The architecture for the Gemini provider has been completely refactored. The brittle dynamic discovery process has been removed and replaced with a static, hard-coded list of known-stable Gemini models. The API Settings UI now intelligently adapts, hiding non-functional controls (e.g., "Load Models") for the Gemini provider and populating the model list instantly and reliably.
48 |
49 | * **Fixed a critical logic error preventing chat messages from being sent to Gemini models.**
50 | * **Problem:** After successfully configuring a Gemini model, all chat attempts would fail with an "Error: message cannot be blank."
51 | * **Solution:** The root cause was identified in the `api_provider.chat()` function, where a `pop()` operation was incorrectly removing the user's current message from the conversation history before the payload was sent to the API. This line has been removed, ensuring the complete and correct conversation context is now sent with every request.
52 |
53 | * **Fixed a fatal application crash on startup due to incorrect UI class definition order.**
54 | * **Problem:** A regression from a previous refactor caused the application to crash on launch with an `ImportError` and underlying `NameError` exceptions. Classes within `graphite_ui.py` (e.g., `ColorPickerDialog`, `ConnectionItem`) were being referenced before they were defined, halting the module loading process.
55 | * **Solution:** The class definitions within `graphite_ui.py` have been meticulously reordered to ensure a correct, top-to-bottom dependency resolution. All classes are now defined before they are instantiated or referenced, resolving the startup crash.
56 |
57 | * **Fixed a fatal UI rendering crash related to the `ChartItem` class.**
58 | * **Problem:** The application would enter a crash loop during the UI paint event, raising an `AttributeError` for a non-existent `QPainter.RenderHint` (`HighQualityAntialiasing`).
59 | * **Solution:** The erroneous line has been removed from the `ChartItem.paint()` method. Rendering quality is preserved by other existing and correct render hints (`SmoothPixmapTransform`), and the crash is fully resolved.
60 |
61 | #### Changed
62 |
63 | * **Updated the static model list for the Google Gemini provider.**
64 | * The hard-coded list of Gemini models was updated to include the latest stable releases available through the public API, including the `gemini-2.5-pro` and `gemini-2.5-flash` series, ensuring users have access to current and powerful models.
65 |
66 | ---
67 |
68 | ### `[Beta v.0.2.0]` - 2024-05-23
69 |
70 | #### Architectural Refactor
71 |
72 | * This update introduces a major architectural refactoring to improve the project's structure, maintainability, and scalability. The application has been transitioned from a single, monolithic script into a modular, multi-file structure with a clear separation of concerns. The primary goal of this refactor was to decouple the User Interface, Core Logic, and AI Agent services from one another.
73 |
74 | * The new project structure is as follows:
75 | * `graphite_app.py`: Serves as the main application entry point. It contains the primary `ChatWindow` class and is responsible for initializing and launching the application.
76 | * `graphite_ui.py`: Consolidates all classes related to the User Interface layer. This includes all Qt widgets, dialogs, custom graphics items for the scene (`ChatNode`, `ConnectionItem`, `Frame`, etc.), and view components (`ChatView`, `ChatScene`).
77 | * `graphite_core.py`: Manages the application's core logic and data persistence. It contains the `ChatDatabase` class for all SQLite operations and the `ChatSessionManager` for handling the serialization and deserialization of chat sessions.
78 | * `graphite_agents.py`: Isolates all logic related to AI model interaction. This module contains the base `ChatAgent` as well as specialized tool agents (`KeyTakeawayAgent`, `ExplainerAgent`, `ChartDataAgent`) and their corresponding `QThread` workers for asynchronous processing.
79 | * `graphite_config.py`: A centralized location for application-wide configuration constants, such as task identifiers and default model names.
80 | * `api_provider.py`: A dedicated module to abstract away the differences between various AI providers (Ollama, OpenAI-compatible, Google Gemini), presenting a unified interface to the rest of the application.
81 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Graphite: A Visual Node-Based LLM Interface
2 |
3 |     
4 |
5 |
6 |
7 |
8 |
9 |
10 | Graphite is an advanced desktop environment for human–AI collaboration. It transforms ordinary chat into a visual reasoning space, where ideas branch, connect, and evolve. Built with Python and PySide6, it seamlessly integrates Ollama, OpenAI, Gemini, Anthropic, and Groq models—offering a secure, local-first, and deeply intuitive workspace for research, creation, and thought.
11 |
12 | ---
13 |
14 | ## Table of Contents
15 |
16 | - [Overview](#overview)
17 | - [Key Features](#key-features)
18 | - [Gallery](#gallery)
19 | - [Technical Architecture](#technical-architecture)
20 | - [Technology Stack](#technology-stack)
21 | - [Installation and Setup](#installation-and-setup)
22 | - [Usage](#usage)
23 | - [Contributing](#contributing)
24 | - [License](#license)
25 |
26 | ## Overview
27 |
28 | Traditional chatbot interfaces confine conversations to a single, chronological timeline. This linear format stifles creativity and makes it difficult to revisit, branch, or organize complex lines of thought.
29 |
30 | Graphite solves this by treating every conversation as an interactive mind-map on an infinite canvas. Each prompt and response becomes a distinct node, visually connected to form a branching graph of your entire dialogue. This unique approach allows you to trace the evolution of ideas, explore multiple paths from any point in the conversation, and build a rich, interconnected knowledge base—all while ensuring your data remains completely private on your local machine.
31 |
32 | ## Key Features
33 |
34 | - **Node-Based Visual Interface:** Move beyond linear text logs. Every interaction is a movable, selectable node on an infinite canvas.
35 | - **Non-Linear Conversation Flow:** Branch your conversation from any previous node to explore alternative ideas without losing context.
36 | - **Local and Private LLM Integration:** Powered by **Ollama**, all AI processing happens locally. Your conversations are never sent to the cloud, ensuring 100% privacy.
37 | - **Flexible Model Selection:** Choose from a list of popular preset models or specify any custom model compatible with Ollama. The application validates and ensures the model is available locally before use.
38 | - **Rich Organizational Tools:**
39 | - **Frames:** Group related nodes into logical clusters with customizable titles and colors.
40 | - **Notes:** Add persistent, editable sticky notes to the canvas for annotations and reminders.
41 | - **Navigation Pins:** Drop pins on important nodes and access them instantly from a dedicated overlay, creating a table of contents for your canvas.
42 | - **AI-Powered Content Generation:**
43 | - **Chart Generation:** Ask the AI to summarize data and generate `matplotlib` charts (Bar, Line, Pie, Histogram, and Sankey) directly on the canvas.
44 | - **Key Takeaways & Explainers:** Right-click any node to generate a concise summary or a simplified explanation, which appear as new, formatted notes.
45 | - **Advanced View and Canvas Controls:**
46 | - **Infinite Canvas:** Pan and zoom freely across a vast workspace.
47 | - **Custom UI Controls:** Fine-tune grid snapping, panning speed, and zoom levels.
48 | - **Comprehensive Session Management:**
49 | - **Chat Library:** Save, load, rename, and manage all your conversation canvases.
50 | - **Secure Local Database:** All sessions, including nodes, frames, notes, and pins, are stored securely in a local SQLite database.
51 |
52 | ## Gallery
53 |
54 |
55 |
56 |  |
57 |  |
58 |
59 |
60 |  |
61 |  |
62 |
63 |
64 |  |
65 |
66 | |
67 |
68 |
69 |  |
70 |  |
71 |
72 |
73 |
74 |
75 |
76 | ---
77 |
78 | ## Technical Architecture
79 |
80 | Graphite is built on a modern, modular architecture designed for maintainability and scalability. The application is written in Python 3 and leverages the PySide6 framework for its cross-platform graphical user interface. The core principle is a clear separation of concerns, decoupling the UI, core logic, and AI services.
81 |
82 | The project is organized into the following key modules:
83 |
84 | - **`graphite_app.py`**: The main application entry point. It contains the primary `ChatWindow` class, which orchestrates the entire application, assembling the UI, initializing backend services, and handling main event loops.
85 |
86 | - **`graphite_ui.py`**: The complete User Interface layer. This module contains all Qt-based components, from the main window's structure to custom dialogs (`APISettingsDialog`, `ChatLibraryDialog`). It also defines all custom-rendered `QGraphicsItem` subclasses that make up the interactive canvas, including `ChatNode`, `ConnectionItem`, `Frame`, `Note`, and `ChartItem`.
87 |
88 | - **`graphite_core.py`**: The application's central nervous system, managing state and data persistence.
89 | * The `ChatSessionManager` handles the complex logic of serializing the entire scene graph (nodes, connections, frames, etc.) into a JSON format and deserializing it back into a live session.
90 | * The `ChatDatabase` class provides an interface to the local SQLite database, managing the storage and retrieval of saved chat sessions.
91 |
92 | - **`graphite_agents.py`**: This module isolates all logic related to AI-powered tasks. It contains the base `ChatAgent` for conversations and specialized "tool" agents like `KeyTakeawayAgent`, `ExplainerAgent`, and `ChartDataAgent`. Each agent runs its network requests within a dedicated `QThread` worker to ensure the UI remains responsive.
93 |
94 | - **`api_provider.py`**: A crucial abstraction layer that unifies communication with different AI model providers. It acts as a router, directing requests to either a local Ollama instance or any OpenAI-compatible remote API based on the user's configuration. This module makes the core application agnostic to the underlying LLM service.
95 |
96 | - **`graphite_config.py`**: A centralized file for storing global configuration constants. It defines abstract task identifiers (e.g., `TASK_CHAT`, `TASK_CHART`) and default model names, providing a single source of truth for application-wide settings.
97 |
98 |
99 | ## Technology Stack
100 |
101 | - **Language:** Python 3.8+
102 | - **UI Framework:** PySide6
103 | - **Local LLM Interface:** Ollama
104 | - **Charting Library:** Matplotlib
105 | - **Database:** SQLite
106 | - **Icons:** QtAwesome (FontAwesome)
107 |
108 | ## Installation and Setup
109 |
110 | Follow these steps to get Graphite running on your local machine.
111 |
112 | ### 1. Prerequisites
113 |
114 | - **Python:** Ensure you have Python 3.8 or newer installed.
115 | - **Ollama:** You must have **[Ollama](https://ollama.com/)** installed and running.
116 |
117 | ### 2. Install an LLM Model
118 |
119 | Before running Graphite, you need to pull a model for Ollama to use. The default is `qwen2.5:7b-instruct`. Open your terminal and run:
120 |
121 | ```bash
122 | ollama pull qwen2.5:7b-instruct
123 | ```
124 |
125 | You can use the in-app Model Selection dialog to choose and validate other models. Ensure the Ollama application is running in the background.
126 |
127 | ### 3. Clone and Install Dependencies
128 |
129 | 1. **Clone the repository:**
130 | ```bash
131 | git clone https://github.com/dovvnloading/Graphite.git
132 | cd Graphite
133 | ```
134 |
135 | 2. **Create and activate a virtual environment (recommended):**
136 | ```bash
137 | # For Windows
138 | python -m venv venv
139 | .\venv\Scripts\activate
140 |
141 | # For macOS/Linux
142 | python3 -m venv venv
143 | source venv/bin/activate
144 | ```
145 |
146 | 3. **Install the required Python packages:**
147 | ```bash
148 | pip install -r requirements.txt
149 | ```
150 | *(If a `requirements.txt` is not available, install manually: `pip install PySide6 ollama matplotlib qtawesome`)*
151 |
152 | ### 4. Run the Application
153 |
154 | Once the setup is complete, launch the application by running:
155 |
156 | ```bash
157 | python graphite_app.py
158 | ```
159 |
160 | ## Usage
161 |
162 | - **Sending Messages:** Type your message in the input box at the bottom and press Enter or click the send button. A new user node will appear, followed by a connected AI response node.
163 | - **Setting Context:** To branch the conversation, simply click on any previous node. The input box will indicate it's your new context. Your next message will create a new branch from that selected node.
164 | - **Interacting with Nodes:**
165 | - **Move:** Click and drag any node to reposition it.
166 | - **Select:** Click a node to select it, or drag a selection box to select multiple nodes.
167 | - **Context Menu:** Right-click a node to access options like copying text, generating takeaways, creating charts, or deleting the node.
168 | - **Keyboard Shortcuts:**
169 | - `Ctrl + F`: Create a Frame around selected nodes.
170 | - `Ctrl + N`: Create a new Note at the cursor's position.
171 | - `Delete`: Delete any selected item (node, frame, note, etc.).
172 | - `Ctrl + S`: Save the current chat session.
173 | - `Ctrl + L`: Open the Chat Library.
174 |
175 | ---
176 | (resolved) Known issues: The graph generation is often brittle and not very stable. Note: Larger models do handle chart data far better than smaller models however the system requirements to use the larger models is significantly more demanding. Using coding models does improve the chart accuracy.
177 | ---
178 |
179 | ## Contributing
180 |
181 | Contributions are welcome! If you'd like to contribute, please follow these steps:
182 |
183 | 1. Fork the repository.
184 | 2. Create a new branch for your feature or bug fix (`git checkout -b feature/your-feature-name`).
185 | 3. Make your changes and commit them with clear, descriptive messages.
186 | 4. Push your changes to your forked repository.
187 | 5. Create a pull request, detailing the changes you made and why.
188 |
189 | Please open an issue first to discuss any major changes or new features.
190 |
191 | ## License
192 |
193 | This project is licensed under the **MIT License**. See the `LICENSE` file for more details.
194 |
--------------------------------------------------------------------------------
/graphite_app/graphite_app/graphite_agents.py:
--------------------------------------------------------------------------------
1 | import ollama
2 | import json
3 | from PySide6.QtCore import QThread, Signal, QPointF
4 | import graphite_config as config
5 | import api_provider
6 |
7 | class ChatWorkerThread(QThread):
8 | finished = Signal(str)
9 | error = Signal(str)
10 |
11 | def __init__(self, agent, message, conversation_history):
12 | super().__init__()
13 | self.agent = agent
14 | self.message = message
15 | self.conversation_history = conversation_history
16 |
17 | def run(self):
18 | try:
19 | # Use agent's get_response directly
20 | response = self.agent.get_response(self.message)
21 | self.finished.emit(response)
22 | except Exception as e:
23 | self.error.emit(str(e))
24 |
25 | class ChatWorker:
26 | def __init__(self, system_prompt, conversation_history):
27 | self.system_prompt = system_prompt
28 | self.conversation_history = conversation_history
29 |
30 | def run(self, user_message):
31 | try:
32 | messages = [
33 | {'role': 'system', 'content': self.system_prompt},
34 | *self.conversation_history,
35 | {'role': 'user', 'content': user_message}
36 | ]
37 | response = api_provider.chat(task=config.TASK_CHAT, messages=messages)
38 | ai_message = response['message']['content']
39 | return ai_message
40 | except Exception as e:
41 | return f"Error: {str(e)}"
42 |
43 | class ChatAgent:
44 | def __init__(self, name, persona):
45 | self.name = name or "AI Assistant"
46 | self.persona = persona or "(default persona)"
47 | self.system_prompt = f"You are {self.name}. {self.persona}"
48 | self.conversation_history = []
49 |
50 | def get_response(self, user_message):
51 | chat_worker = ChatWorker(self.system_prompt, self.conversation_history)
52 | ai_response = chat_worker.run(user_message)
53 | self.conversation_history.append({'role': 'user', 'content': user_message})
54 | self.conversation_history.append({'role': 'assistant', 'content': ai_response})
55 | return ai_response
56 |
57 | class ExplainerAgent:
58 | def __init__(self):
59 | self.system_prompt = """You are an expert at explaining complex topics in simple terms. Follow these principles in order:
60 |
61 | 1. Simplification: Break down complex ideas into their most basic form
62 | 2. Clarification: Remove any technical jargon or complex terminology
63 | 3. Distillation: Extract only the most important concepts
64 | 4. Breakdown: Present information in small, digestible chunks
65 | 5. Simple Language: Use everyday words and short sentences
66 |
67 | Always use:
68 | - Analogies: Connect ideas to everyday experiences
69 | - Metaphors: Compare complex concepts to simple, familiar things
70 |
71 | Format your response exactly like this:
72 |
73 | Simple Explanation
74 | [2-3 sentence overview using everyday language]
75 |
76 | Think of it Like This:
77 | [Add one clear analogy or metaphor that a child would understand]
78 |
79 | Key Parts:
80 | • [First simple point]
81 | • [Second simple point]
82 | • [Third point if needed]
83 |
84 | Remember: Write as if explaining to a curious 5-year-old. No technical terms, no complex words."""
85 |
86 | def clean_text(self, text):
87 | """Clean special characters and format text"""
88 | # Remove markdown and special characters
89 | replacements = [
90 | ('```', ''),
91 | ('`', ''),
92 | ('**', ''),
93 | ('__', ''),
94 | ('*', ''),
95 | ('_', ''),
96 | ('•', '•'),
97 | ('→', '->'),
98 | ('\n\n\n', '\n\n'),
99 | ]
100 |
101 | cleaned = text
102 | for old, new in replacements:
103 | cleaned = cleaned.replace(old, new)
104 |
105 | # Split into lines and clean each line
106 | lines = cleaned.split('\n')
107 | cleaned_lines = []
108 |
109 | for line in lines:
110 | line = line.strip()
111 | if line:
112 | if line.lstrip().startswith('-'):
113 | line = '• ' + line.lstrip('- ')
114 | cleaned_lines.append(line)
115 |
116 | # Rebuild text with proper spacing
117 | formatted = ''
118 | in_bullet_list = False
119 |
120 | for i, line in enumerate(cleaned_lines):
121 | # Handle title
122 | if i == 0 and "Simple Explanation" not in line:
123 | formatted += "Simple Explanation\n"
124 |
125 | # Add line with proper spacing
126 | if line.startswith('•'):
127 | if not in_bullet_list:
128 | formatted += '\n' if formatted else ''
129 | in_bullet_list = True
130 | formatted += line + '\n'
131 | elif any(section in line for section in ['Think of it Like This:', 'Key Parts:']):
132 | formatted += '\n' + line + '\n'
133 | else:
134 | in_bullet_list = False
135 | formatted += line + '\n'
136 |
137 | return formatted.strip()
138 |
139 | def get_response(self, text):
140 | messages = [
141 | {'role': 'system', 'content': self.system_prompt},
142 | {'role': 'user', 'content': f"Explain this in simple terms: {text}"}
143 | ]
144 | response = api_provider.chat(task=config.TASK_CHAT, messages=messages)
145 | raw_response = response['message']['content']
146 |
147 | # Clean and format the response
148 | formatted_response = self.clean_text(raw_response)
149 | return formatted_response
150 |
151 | class KeyTakeawayAgent:
152 | def __init__(self):
153 | self.system_prompt = """You are a key takeaway generator. Format your response exactly like this:
154 |
155 | Key Takeaway
156 | [1-2 sentence overview]
157 |
158 | Main Points:
159 | • [First key point]
160 | • [Second key point]
161 | • [Third key point if needed]
162 |
163 | Keep total output under 150 words. Be direct and focused on practical value.
164 | No markdown formatting, no special characters."""
165 |
166 | def clean_text(self, text):
167 | """Clean special characters and format text"""
168 | # Remove markdown and special characters
169 | replacements = [
170 | ('```', ''), # code blocks
171 | ('`', ''), # inline code
172 | ('**', ''), # bold
173 | ('__', ''), # alternate bold
174 | ('*', ''), # italic/bullet
175 | ('_', ''), # alternate italic
176 | ('•', '•'), # standardize bullets
177 | ('→', '->'), # standardize arrows
178 | ('\n\n\n', '\n\n'), # remove extra newlines
179 | ]
180 |
181 | cleaned = text
182 | for old, new in replacements:
183 | cleaned = cleaned.replace(old, new)
184 |
185 | # Split into lines and clean each line
186 | lines = cleaned.split('\n')
187 | cleaned_lines = []
188 |
189 | for line in lines:
190 | line = line.strip()
191 | if line:
192 | # Ensure bullet points are properly formatted
193 | if line.lstrip().startswith('-'):
194 | line = '• ' + line.lstrip('- ')
195 | cleaned_lines.append(line)
196 |
197 | # Rebuild text with proper spacing
198 | formatted = ''
199 | in_bullet_list = False
200 |
201 | for i, line in enumerate(cleaned_lines):
202 | # Handle title
203 | if i == 0 and "Key Takeaway" not in line:
204 | formatted += "Key Takeaway\n"
205 |
206 | # Add line with proper spacing
207 | if line.startswith('•'):
208 | if not in_bullet_list:
209 | formatted += '\n' if formatted else ''
210 | in_bullet_list = True
211 | formatted += line + '\n'
212 | elif 'Main Points:' in line:
213 | formatted += '\n' + line + '\n'
214 | else:
215 | in_bullet_list = False
216 | formatted += line + '\n'
217 |
218 | return formatted.strip()
219 |
220 | def get_response(self, text):
221 | messages = [
222 | {'role': 'system', 'content': self.system_prompt},
223 | {'role': 'user', 'content': f"Generate key takeaways from this text: {text}"}
224 | ]
225 | response = api_provider.chat(task=config.TASK_CHAT, messages=messages)
226 | raw_response = response['message']['content']
227 |
228 | # Clean and format the response
229 | formatted_response = self.clean_text(raw_response)
230 | return formatted_response
231 |
232 | class KeyTakeawayWorkerThread(QThread):
233 | finished = Signal(str, QPointF) # Signal includes response and node position
234 | error = Signal(str)
235 |
236 | def __init__(self, agent, text, node_pos):
237 | super().__init__()
238 | self.agent = agent
239 | self.text = text
240 | self.node_pos = node_pos
241 | self._is_running = False
242 |
243 | def run(self):
244 | try:
245 | self._is_running = True
246 | response = self.agent.get_response(self.text)
247 | if self._is_running: # Check if we should still emit
248 | self.finished.emit(response, self.node_pos)
249 | except Exception as e:
250 | if self._is_running: # Check if we should still emit
251 | self.error.emit(str(e))
252 | finally:
253 | self._is_running = False
254 |
255 | def stop(self):
256 | """Safely stop the thread"""
257 | self._is_running = False
258 |
259 | class ChartDataAgent:
260 | def __init__(self):
261 | self.system_prompt = """You are a data extraction agent that converts text into chart data. Always output valid JSON with these structures:
262 |
263 | For histograms:
264 | {
265 | "type": "histogram",
266 | "title": "Chart Title",
267 | "values": [numeric values],
268 | "bins": 10,
269 | "xAxis": "X Axis Label",
270 | "yAxis": "Frequency"
271 | }
272 |
273 | For bar charts:
274 | {
275 | "type": "bar",
276 | "title": "Chart Title",
277 | "labels": ["label1", "label2", ...],
278 | "values": [numeric values],
279 | "xAxis": "X Axis Label",
280 | "yAxis": "Y Axis Label"
281 | }
282 |
283 | For line charts:
284 | {
285 | "type": "line",
286 | "title": "Chart Title",
287 | "labels": ["label1", "label2", ...],
288 | "values": [numeric values],
289 | "xAxis": "X Axis Label",
290 | "yAxis": "Y Axis Label"
291 | }
292 |
293 | For pie charts:
294 | {
295 | "type": "pie",
296 | "title": "Chart Title",
297 | "labels": ["label1", "label2", ...],
298 | "values": [numeric values]
299 | }
300 |
301 | For Sankey diagrams:
302 | {
303 | "type": "sankey",
304 | "title": "Flow Diagram",
305 | "data": {
306 | "nodes": [
307 | {"name": "Node1"},
308 | {"name": "Node2"},
309 | ...
310 | ],
311 | "links": [
312 | {"source": 0, "target": 1, "value": 100},
313 | {"source": 1, "target": 2, "value": 50},
314 | ...
315 | ]
316 | }
317 | }
318 |
319 | IMPORTANT:
320 | - Always include ALL required fields for the specified chart type
321 | - For non-histogram charts, ALWAYS include 'labels' array matching the length of 'values'
322 | - For Sankey diagrams, ensure proper node indexing and valid links
323 | - All numeric values must be valid numbers
324 | - If you cannot extract proper data, return an error object"""
325 |
326 | def clean_response(self, text):
327 | """Clean the LLM response to ensure valid JSON"""
328 | # Remove any markdown code blocks
329 | text = text.replace("```json", "").replace("```", "").strip()
330 |
331 | # Remove any explanatory text before/after JSON
332 | try:
333 | start = text.find('{')
334 | end = text.rfind('}') + 1
335 | if start >= 0 and end > start:
336 | text = text[start:end]
337 | except:
338 | pass
339 |
340 | return text
341 |
342 | def validate_chart_data(self, data, chart_type):
343 | """Validate chart data based on type and requirements"""
344 | try:
345 | if chart_type == 'sankey':
346 | if not all(key in data for key in ['type', 'title', 'data']):
347 | return False, "Missing required fields (type, title, or data)"
348 |
349 | if not all(key in data['data'] for key in ['nodes', 'links']):
350 | return False, "Missing nodes or links in data"
351 |
352 | nodes = data['data']['nodes']
353 | links = data['data']['links']
354 |
355 | # Validate nodes
356 | if not nodes:
357 | return False, "No nodes provided"
358 |
359 | node_names = set()
360 | for i, node in enumerate(nodes):
361 | if 'name' not in node:
362 | return False, f"Node {i} missing name"
363 | if node['name'] in node_names:
364 | return False, f"Duplicate node name: {node['name']}"
365 | node_names.add(node['name'])
366 |
367 | # Validate links
368 | if not links:
369 | return False, "No links provided"
370 |
371 | node_count = len(nodes)
372 | for i, link in enumerate(links):
373 | if not all(key in link for key in ['source', 'target', 'value']):
374 | return False, f"Link {i} missing required fields"
375 |
376 | if not isinstance(link['value'], (int, float)) or link['value'] <= 0:
377 | return False, f"Link {i} has invalid value"
378 |
379 | if not isinstance(link['source'], int) or not isinstance(link['target'], int):
380 | return False, "Link source and target must be node indices"
381 |
382 | if not (0 <= link['source'] < node_count and 0 <= link['target'] < node_count):
383 | return False, f"Link {i} references invalid node index"
384 |
385 | if link['source'] == link['target']:
386 | return False, f"Link {i} has same source and target"
387 |
388 | elif chart_type == 'histogram':
389 | required = ['type', 'title', 'values', 'bins', 'xAxis', 'yAxis']
390 | if not all(key in data for key in required):
391 | return False, f"Missing required fields for histogram: {[key for key in required if key not in data]}"
392 | if not isinstance(data['bins'], (int, float)):
393 | return False, "Bins must be a number"
394 |
395 | elif chart_type in ['bar', 'line']:
396 | required = ['type', 'title', 'labels', 'values', 'xAxis', 'yAxis']
397 | if not all(key in data for key in required):
398 | return False, f"Missing required fields for {chart_type} chart: {[key for key in required if key not in data]}"
399 | if not isinstance(data.get('labels', []), list):
400 | return False, "Labels must be a list"
401 | if len(data['labels']) != len(data['values']):
402 | return False, "Labels and values must have the same length"
403 |
404 | elif chart_type == 'pie':
405 | required = ['type', 'title', 'labels', 'values']
406 | if not all(key in data for key in required):
407 | return False, f"Missing required fields for pie chart: {[key for key in required if key not in data]}"
408 | if not isinstance(data.get('labels', []), list):
409 | return False, "Labels must be a list"
410 | if len(data['labels']) != len(data['values']):
411 | return False, "Labels and values must have the same length"
412 |
413 | # Validate numeric values for non-Sankey charts
414 | if chart_type != 'sankey':
415 | try:
416 | if isinstance(data['values'], list):
417 | data['values'] = [float(v) for v in data['values']]
418 | except (ValueError, TypeError):
419 | return False, "All values must be numeric"
420 |
421 | return True, None
422 |
423 | except Exception as e:
424 | return False, f"Validation error: {str(e)}"
425 |
426 | def process_sankey_data(self, raw_data):
427 | """Convert raw Sankey data into proper format"""
428 | try:
429 | # Extract unique nodes from flows
430 | nodes = []
431 | node_indices = {}
432 |
433 | for flow in raw_data:
434 | for node_name in [flow['source'], flow['target']]:
435 | if node_name not in node_indices:
436 | node_indices[node_name] = len(nodes)
437 | nodes.append({"name": node_name})
438 |
439 | # Create links using node indices
440 | links = [
441 | {
442 | "source": node_indices[flow['source']],
443 | "target": node_indices[flow['target']],
444 | "value": flow['value']
445 | }
446 | for flow in raw_data
447 | ]
448 |
449 | return {
450 | "nodes": nodes,
451 | "links": links
452 | }
453 | except Exception as e:
454 | raise ValueError(f"Error processing Sankey data: {str(e)}")
455 |
456 | def get_response(self, text, chart_type):
457 | """Extract chart data from text"""
458 | try:
459 | messages = [
460 | {'role': 'system', 'content': self.system_prompt},
461 | {'role': 'user', 'content': f"Create a {chart_type} chart from this text. Only return the JSON data: {text}"}
462 | ]
463 |
464 | # Using a more specialized model for code/JSON generation
465 | response = api_provider.chat(task=config.TASK_CHART, messages=messages)
466 | cleaned_response = self.clean_response(response['message']['content'])
467 |
468 | # Parse JSON
469 | try:
470 | data = json.loads(cleaned_response)
471 | except json.JSONDecodeError:
472 | return json.dumps({"error": "Invalid JSON response from model"})
473 |
474 | # Validate data
475 | is_valid, error_message = self.validate_chart_data(data, chart_type)
476 | if not is_valid:
477 | return json.dumps({"error": error_message})
478 |
479 | # Special handling for Sankey diagrams
480 | if chart_type == 'sankey' and 'flows' in data:
481 | try:
482 | data['data'] = self.process_sankey_data(data['flows'])
483 | del data['flows']
484 | except ValueError as e:
485 | return json.dumps({"error": str(e)})
486 |
487 | # Return validated data
488 | return json.dumps(data)
489 |
490 | except Exception as e:
491 | return json.dumps({"error": f"Data extraction failed: {str(e)}"})
492 |
493 | class ChartWorkerThread(QThread):
494 | finished = Signal(str, str)
495 | error = Signal(str)
496 |
497 | def __init__(self, text, chart_type):
498 | super().__init__()
499 | self.agent = ChartDataAgent()
500 | self.text = text
501 | self.chart_type = chart_type
502 |
503 | def run(self):
504 | try:
505 | data = self.agent.get_response(self.text, self.chart_type)
506 | # Validate data is proper JSON with numbers
507 | parsed = json.loads(data)
508 | if 'error' in parsed:
509 | raise ValueError(parsed['error'])
510 | self.finished.emit(data, self.chart_type)
511 | except Exception as e:
512 | self.error.emit(str(e))
513 |
514 | class ExplainerWorkerThread(QThread):
515 | finished = Signal(str, QPointF)
516 | error = Signal(str)
517 |
518 | def __init__(self, agent, text, node_pos):
519 | super().__init__()
520 | self.agent = agent
521 | self.text = text
522 | self.node_pos = node_pos
523 | self._is_running = False
524 |
525 | def run(self):
526 | try:
527 | self._is_running = True
528 | response = self.agent.get_response(self.text)
529 | if self._is_running:
530 | self.finished.emit(response, self.node_pos)
531 | except Exception as e:
532 | if self._is_running:
533 | self.error.emit(str(e))
534 | finally:
535 | self._is_running = False
536 |
537 | def stop(self):
538 | """Safely stop the thread"""
539 | self._is_running = False
540 |
541 | class ModelPullWorkerThread(QThread):
542 | status_update = Signal(str)
543 | finished = Signal(str, str)
544 | error = Signal(str)
545 |
546 | def __init__(self, model_name):
547 | super().__init__()
548 | self.model_name = model_name
549 |
550 | def run(self):
551 | try:
552 | self.status_update.emit(f"Ensuring model '{self.model_name}' is available...")
553 |
554 | ollama.pull(self.model_name)
555 |
556 | self.finished.emit(f"Model '{self.model_name}' is ready to use.", self.model_name)
557 |
558 | except Exception as e:
559 | error_message = str(e)
560 | if "not found" in error_message.lower():
561 | self.error.emit(f"Model '{self.model_name}' not found on the Ollama hub. Please check the name for typos.")
562 | elif "connection refused" in error_message.lower():
563 | self.error.emit("Connection to Ollama server failed. Is Ollama running?")
564 | else:
565 | self.error.emit(f"An unexpected error occurred: {error_message}")
566 |
--------------------------------------------------------------------------------
/graphite_app/graphite_app/graphite_core.py:
--------------------------------------------------------------------------------
1 | import json
2 | import sqlite3
3 | from datetime import datetime
4 | from pathlib import Path
5 | import ollama
6 | from PySide6.QtCore import QPointF
7 | from PySide6.QtGui import QTransform
8 |
9 | # Import UI classes needed for serialization/deserialization
10 | from graphite_ui import Note, NavigationPin, ChartItem, ConnectionItem, Frame
11 | import graphite_config as config
12 | import api_provider
13 |
14 | class TitleGenerator:
15 | def __init__(self):
16 | self.system_prompt = """You are a title generation assistant. Your only job is to create short,
17 | 2-3 word titles based on conversation content. Rules:
18 | - ONLY output the title, nothing else
19 | - Keep it between 2-3 words
20 | - Use title case
21 | - Make it descriptive but concise
22 | - NO punctuation
23 | - NO explanations
24 | - NO additional text"""
25 |
26 | def generate_title(self, message):
27 | try:
28 | messages = [
29 | {'role': 'system', 'content': self.system_prompt},
30 | {'role': 'user', 'content': f"Create a 2-3 word title for this message: {message}"}
31 | ]
32 | response = api_provider.chat(task=config.TASK_TITLE, messages=messages)
33 | title = response['message']['content'].strip()
34 | # Clean up title if needed
35 | title = ' '.join(title.split()[:3]) # Ensure max 3 words
36 | return title
37 | except Exception as e:
38 | return f"Chat {datetime.now().strftime('%Y%m%d_%H%M')}"
39 |
40 | class ChatDatabase:
41 | def __init__(self):
42 | self.db_path = Path.home() / '.graphite' / 'chats.db'
43 | self.db_path.parent.mkdir(parents=True, exist_ok=True)
44 | self.init_database()
45 |
46 | def init_database(self):
47 | with sqlite3.connect(self.db_path) as conn:
48 | # Existing chats table
49 | conn.execute("""
50 | CREATE TABLE IF NOT EXISTS chats (
51 | id INTEGER PRIMARY KEY AUTOINCREMENT,
52 | title TEXT NOT NULL,
53 | created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
54 | updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
55 | data TEXT NOT NULL
56 | )
57 | """)
58 |
59 | # Notes table
60 | conn.execute("""
61 | CREATE TABLE IF NOT EXISTS notes (
62 | id INTEGER PRIMARY KEY AUTOINCREMENT,
63 | chat_id INTEGER NOT NULL,
64 | content TEXT NOT NULL,
65 | position_x REAL NOT NULL,
66 | position_y REAL NOT NULL,
67 | width REAL NOT NULL,
68 | height REAL NOT NULL,
69 | color TEXT NOT NULL,
70 | header_color TEXT,
71 | FOREIGN KEY (chat_id) REFERENCES chats (id) ON DELETE CASCADE
72 | )
73 | """)
74 |
75 | # Add pins table
76 | conn.execute("""
77 | CREATE TABLE IF NOT EXISTS pins (
78 | id INTEGER PRIMARY KEY AUTOINCREMENT,
79 | chat_id INTEGER NOT NULL,
80 | title TEXT NOT NULL,
81 | note TEXT,
82 | position_x REAL NOT NULL,
83 | position_y REAL NOT NULL,
84 | FOREIGN KEY (chat_id) REFERENCES chats (id) ON DELETE CASCADE
85 | )
86 | """)
87 |
88 | def save_pins(self, chat_id, pins_data):
89 | """Save pins for a chat session"""
90 | with sqlite3.connect(self.db_path) as conn:
91 | # First delete existing pins for this chat
92 | conn.execute("DELETE FROM pins WHERE chat_id = ?", (chat_id,))
93 |
94 | # Insert new pins
95 | for pin_data in pins_data:
96 | conn.execute("""
97 | INSERT INTO pins (
98 | chat_id, title, note, position_x, position_y
99 | ) VALUES (?, ?, ?, ?, ?)
100 | """, (
101 | chat_id,
102 | pin_data['title'],
103 | pin_data['note'],
104 | pin_data['position']['x'],
105 | pin_data['position']['y']
106 | ))
107 |
108 | def load_pins(self, chat_id):
109 | """Load pins for a chat session"""
110 | with sqlite3.connect(self.db_path) as conn:
111 | cursor = conn.execute("""
112 | SELECT title, note, position_x, position_y
113 | FROM pins WHERE chat_id = ?
114 | """, (chat_id,))
115 |
116 | pins = []
117 | for row in cursor.fetchall():
118 | pins.append({
119 | 'title': row[0],
120 | 'note': row[1],
121 | 'position': {'x': row[2], 'y': row[3]}
122 | })
123 | return pins
124 |
125 | def save_notes(self, chat_id, notes_data):
126 | with sqlite3.connect(self.db_path) as conn:
127 | # First delete existing notes for this chat
128 | conn.execute("DELETE FROM notes WHERE chat_id = ?", (chat_id,))
129 |
130 | # Insert new notes
131 | for note_data in notes_data:
132 | conn.execute("""
133 | INSERT INTO notes (
134 | chat_id, content, position_x, position_y,
135 | width, height, color, header_color
136 | ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
137 | """, (
138 | chat_id,
139 | note_data['content'],
140 | note_data['position']['x'],
141 | note_data['position']['y'],
142 | note_data['size']['width'],
143 | note_data['size']['height'],
144 | note_data['color'],
145 | note_data.get('header_color')
146 | ))
147 |
148 | def load_notes(self, chat_id):
149 | with sqlite3.connect(self.db_path) as conn:
150 | cursor = conn.execute("""
151 | SELECT content, position_x, position_y, width, height,
152 | color, header_color
153 | FROM notes WHERE chat_id = ?
154 | """, (chat_id,))
155 |
156 | notes = []
157 | for row in cursor.fetchall():
158 | notes.append({
159 | 'content': row[0],
160 | 'position': {'x': row[1], 'y': row[2]},
161 | 'size': {'width': row[3], 'height': row[4]},
162 | 'color': row[5],
163 | 'header_color': row[6]
164 | })
165 | return notes
166 |
167 | def save_chat(self, title, chat_data):
168 | with sqlite3.connect(self.db_path) as conn:
169 | cursor = conn.execute("""
170 | INSERT INTO chats (title, data, updated_at)
171 | VALUES (?, ?, CURRENT_TIMESTAMP)
172 | """, (title, json.dumps(chat_data)))
173 | return cursor.lastrowid # Return the ID of the newly inserted chat
174 |
175 | def get_latest_chat_id(self):
176 | """Get the ID of the most recently created chat"""
177 | with sqlite3.connect(self.db_path) as conn:
178 | cursor = conn.execute("""
179 | SELECT id FROM chats
180 | ORDER BY created_at DESC
181 | LIMIT 1
182 | """)
183 | result = cursor.fetchone()
184 | return result[0] if result else None
185 |
186 | def update_chat(self, chat_id, title, chat_data):
187 | with sqlite3.connect(self.db_path) as conn:
188 | conn.execute("""
189 | UPDATE chats
190 | SET title = ?, data = ?, updated_at = CURRENT_TIMESTAMP
191 | WHERE id = ?
192 | """, (title, json.dumps(chat_data), chat_id))
193 |
194 | def load_chat(self, chat_id):
195 | with sqlite3.connect(self.db_path) as conn:
196 | result = conn.execute("""
197 | SELECT title, data FROM chats WHERE id = ?
198 | """, (chat_id,)).fetchone()
199 | if result:
200 | return {
201 | 'title': result[0],
202 | 'data': json.loads(result[1])
203 | }
204 | return None
205 |
206 | def get_all_chats(self):
207 | with sqlite3.connect(self.db_path) as conn:
208 | return conn.execute("""
209 | SELECT id, title, created_at, updated_at
210 | FROM chats
211 | ORDER BY updated_at DESC
212 | """).fetchall()
213 |
214 | def delete_chat(self, chat_id):
215 | with sqlite3.connect(self.db_path) as conn:
216 | conn.execute("DELETE FROM chats WHERE id = ?", (chat_id,))
217 |
218 | def rename_chat(self, chat_id, new_title):
219 | with sqlite3.connect(self.db_path) as conn:
220 | conn.execute("""
221 | UPDATE chats
222 | SET title = ?, updated_at = CURRENT_TIMESTAMP
223 | WHERE id = ?
224 | """, (new_title, chat_id))
225 |
226 | class ChatSessionManager:
227 | def __init__(self, window):
228 | self.window = window
229 | self.db = ChatDatabase()
230 | self.title_generator = TitleGenerator()
231 | self.current_chat_id = None
232 |
233 | def serialize_pin(self, pin):
234 | """Convert a navigation pin to a serializable dictionary"""
235 | return {
236 | 'title': pin.title,
237 | 'note': pin.note,
238 | 'position': {'x': pin.pos().x(), 'y': pin.pos().y()}
239 | }
240 |
241 | def serialize_pin_layout(self, pin):
242 | """Convert a pin to a serializable dictionary"""
243 | return {
244 | 'position': {'x': pin.pos().x(), 'y': pin.pos().y()}
245 | }
246 |
247 | def serialize_connection(self, connection):
248 | """Convert a connection to a serializable dictionary"""
249 | return {
250 | 'start_node_index': self.window.chat_view.scene().nodes.index(connection.start_node),
251 | 'end_node_index': self.window.chat_view.scene().nodes.index(connection.end_node),
252 | 'pins': [self.serialize_pin_layout(pin) for pin in connection.pins]
253 | }
254 |
255 | def serialize_node(self, node):
256 | """Convert a ChatNode to a serializable dictionary"""
257 | return {
258 | 'text': node.text,
259 | 'is_user': node.is_user,
260 | 'position': {'x': node.pos().x(), 'y': node.pos().y()},
261 | 'conversation_history': node.conversation_history,
262 | 'children_indices': [self.window.chat_view.scene().nodes.index(child) for child in node.children],
263 | 'scroll_value': node.scroll_value
264 | }
265 |
266 | def serialize_frame(self, frame):
267 | """Convert a Frame to a serializable dictionary"""
268 | return {
269 | 'nodes': [self.window.chat_view.scene().nodes.index(node) for node in frame.nodes],
270 | 'position': {'x': frame.pos().x(), 'y': frame.pos().y()},
271 | 'note': frame.note,
272 | 'size': {
273 | 'width': frame.rect.width(),
274 | 'height': frame.rect.height()
275 | },
276 | 'color': frame.color,
277 | 'header_color': frame.header_color
278 | }
279 |
280 | def serialize_note(self, note):
281 | """Convert a Note to a serializable dictionary"""
282 | return {
283 | 'content': note.content,
284 | 'position': {'x': note.pos().x(), 'y': note.pos().y()},
285 | 'size': {'width': note.width, 'height': note.height},
286 | 'color': note.color,
287 | 'header_color': note.header_color
288 | }
289 |
290 | def serialize_chart(self, chart):
291 | """Convert a ChartItem to a serializable dictionary"""
292 | return {
293 | 'data': chart.data,
294 | 'position': {'x': chart.pos().x(), 'y': chart.pos().y()},
295 | 'size': {'width': chart.width, 'height': chart.height}
296 | }
297 |
298 | def serialize_current_chat(self):
299 | """Serialize the current chat session with all elements"""
300 | scene = self.window.chat_view.scene()
301 |
302 | # Get all notes, pins, and charts in the scene
303 | notes = [item for item in scene.items() if isinstance(item, Note)]
304 | pins = [item for item in scene.items() if isinstance(item, NavigationPin)]
305 | charts = [item for item in scene.items() if isinstance(item, ChartItem)]
306 |
307 | chat_data = {
308 | 'nodes': [self.serialize_node(node) for node in scene.nodes],
309 | 'connections': [self.serialize_connection(conn) for conn in scene.connections],
310 | 'frames': [self.serialize_frame(frame) for frame in scene.frames],
311 | 'charts': [self.serialize_chart(chart) for chart in charts], # Add charts
312 | 'view_state': {
313 | 'zoom_factor': self.window.chat_view._zoom_factor,
314 | 'scroll_position': {
315 | 'x': self.window.chat_view.horizontalScrollBar().value(),
316 | 'y': self.window.chat_view.verticalScrollBar().value()
317 | }
318 | }
319 | }
320 |
321 | # Save chat data first
322 | if not self.current_chat_id:
323 | last_message = scene.nodes[-1].text if scene.nodes else "New Chat"
324 | title = self.title_generator.generate_title(last_message)
325 | self.current_chat_id = self.db.save_chat(title, chat_data)
326 | else:
327 | chat = self.db.load_chat(self.current_chat_id)
328 | if chat:
329 | title = chat['title']
330 | self.db.update_chat(self.current_chat_id, title, chat_data)
331 |
332 | # Now save notes and pins separately
333 | if self.current_chat_id:
334 | notes_data = [self.serialize_note(note) for note in notes]
335 | self.db.save_notes(self.current_chat_id, notes_data)
336 |
337 | pins_data = [self.serialize_pin(pin) for pin in pins]
338 | self.db.save_pins(self.current_chat_id, pins_data)
339 |
340 | return chat_data
341 |
342 | def deserialize_chart(self, data, scene):
343 | """Recreate a chart from serialized data"""
344 | chart = scene.add_chart(data['data'], QPointF(
345 | data['position']['x'],
346 | data['position']['y']
347 | ))
348 |
349 | if 'size' in data:
350 | chart.width = data['size']['width']
351 | chart.height = data['size']['height']
352 | chart.generate_chart() # Regenerate chart with new size
353 |
354 | return chart
355 |
356 | def deserialize_pin(self, data, connection):
357 | """Recreate a pin from serialized data"""
358 | pin = connection.add_pin(QPointF(0, 0)) # Create pin
359 | pin.setPos(data['position']['x'], data['position']['y'])
360 | return pin
361 |
362 | def deserialize_connection(self, data, scene):
363 | """Recreate a connection from serialized data"""
364 | start_node = scene.nodes[data['start_node_index']]
365 | end_node = scene.nodes[data['end_node_index']]
366 |
367 | # Find existing connection or create new one
368 | connection = None
369 | for conn in scene.connections:
370 | if conn.start_node == start_node and conn.end_node == end_node:
371 | connection = conn
372 | break
373 |
374 | if connection is None:
375 | connection = ConnectionItem(start_node, end_node)
376 | scene.addItem(connection)
377 | scene.connections.append(connection)
378 |
379 | # Recreate pins
380 | for pin_data in data['pins']:
381 | self.deserialize_pin(pin_data, connection)
382 |
383 | return connection
384 |
385 | def deserialize_node(self, data, nodes_map=None):
386 | """Convert serialized data back to ChatNode"""
387 | scene = self.window.chat_view.scene()
388 |
389 | # Create node without parent initially
390 | node = scene.add_chat_node(
391 | data['text'],
392 | is_user=data['is_user'],
393 | parent_node=None, # Important: No parent node initially
394 | conversation_history=data.get('conversation_history', [])
395 | )
396 |
397 | # Remove the automatically created connection since we'll restore them later
398 | if scene.connections and node.parent_node:
399 | for conn in scene.connections[:]: # Create a copy of the list to modify it
400 | if conn.end_node == node:
401 | scene.removeItem(conn)
402 | scene.connections.remove(conn)
403 | node.parent_node = None # Clear the parent reference
404 |
405 | # Restore position and scroll state
406 | node.setPos(data['position']['x'], data['position']['y'])
407 | node.scroll_value = data.get('scroll_value', 0)
408 | node.scrollbar.set_value(node.scroll_value)
409 |
410 | # Store in nodes map if provided
411 | if nodes_map is not None:
412 | nodes_map[len(scene.nodes) - 1] = node
413 |
414 | return node
415 |
416 | def deserialize_frame(self, data, scene):
417 | """Recreate a frame from serialized data"""
418 | nodes = [scene.nodes[i] for i in data['nodes']]
419 | frame = Frame(nodes)
420 | frame.setPos(data['position']['x'], data['position']['y'])
421 | frame.note = data['note']
422 |
423 | if 'color' in data:
424 | frame.color = data['color']
425 | if 'header_color' in data:
426 | frame.header_color = data['header_color']
427 |
428 | if 'size' in data:
429 | frame.rect.setWidth(data['size']['width'])
430 | frame.rect.setHeight(data['size']['height'])
431 |
432 | scene.addItem(frame)
433 | scene.frames.append(frame)
434 | frame.setZValue(-2)
435 | return frame
436 |
437 | def load_chat(self, chat_id):
438 | """Load a chat session with all elements including pins, charts, and notes"""
439 | chat = self.db.load_chat(chat_id)
440 | if not chat:
441 | return
442 |
443 | # Clear current scene
444 | scene = self.window.chat_view.scene()
445 | scene.clear()
446 | scene.nodes.clear()
447 | scene.connections.clear()
448 | scene.frames.clear()
449 | scene.pins.clear() # Ensure pins are cleared
450 | self.window.current_node = None # <<< FIX: Reset stale reference
451 |
452 | try:
453 | # First pass: Create all nodes
454 | nodes_map = {} # Map to store node indices
455 | for node_data in chat['data']['nodes']:
456 | node = self.deserialize_node(node_data, nodes_map)
457 |
458 | # Second pass: Set up parent-child relationships
459 | for i, node_data in enumerate(chat['data']['nodes']):
460 | if 'children_indices' in node_data:
461 | node = nodes_map[i]
462 | for child_index in node_data['children_indices']:
463 | child_node = nodes_map[child_index]
464 | node.children.append(child_node)
465 | child_node.parent_node = node
466 |
467 | # Third pass: Create connections WITHOUT pins first
468 | connections_map = {} # Store connections for later pin addition
469 | if 'connections' in chat['data']:
470 | for i, conn_data in enumerate(chat['data']['connections']):
471 | start_node = scene.nodes[conn_data['start_node_index']]
472 | end_node = scene.nodes[conn_data['end_node_index']]
473 |
474 | # Check if connection already exists
475 | existing_conn = None
476 | for conn in scene.connections:
477 | if (conn.start_node == start_node and
478 | conn.end_node == end_node):
479 | existing_conn = conn
480 | break
481 |
482 | if not existing_conn:
483 | connection = ConnectionItem(start_node, end_node)
484 | scene.addItem(connection)
485 | scene.connections.append(connection)
486 | connections_map[i] = connection
487 | else:
488 | connections_map[i] = existing_conn
489 |
490 | # Fourth pass: Add pins to existing connections
491 | if 'connections' in chat['data']:
492 | for i, conn_data in enumerate(chat['data']['connections']):
493 | if i in connections_map and 'pins' in conn_data:
494 | connection = connections_map[i]
495 | # Clear any existing pins first
496 | for pin in connection.pins[:]:
497 | connection.remove_pin(pin)
498 | # Add stored pins
499 | for pin_data in conn_data['pins']:
500 | self.deserialize_pin(pin_data, connection)
501 |
502 | # Load frames
503 | if 'frames' in chat['data']:
504 | for frame_data in chat['data']['frames']:
505 | frame = self.deserialize_frame(frame_data, scene)
506 |
507 | # Load charts
508 | if 'charts' in chat['data']:
509 | for chart_data in chat['data']['charts']:
510 | self.deserialize_chart(chart_data, scene)
511 |
512 | # Load notes with proper error handling
513 | notes_data = self.db.load_notes(chat_id)
514 | for note_data in notes_data:
515 | try:
516 | note = scene.add_note(QPointF(
517 | note_data['position']['x'],
518 | note_data['position']['y']
519 | ))
520 | note.content = note_data['content']
521 | note.width = note_data['size']['width']
522 | note.height = note_data['size']['height']
523 | note.color = note_data['color']
524 | note.header_color = note_data['header_color']
525 | except Exception as e:
526 | print(f"Error loading note: {str(e)}")
527 | continue
528 |
529 | # Clear existing pins in overlay before loading new ones
530 | if self.window and hasattr(self.window, 'pin_overlay'):
531 | self.window.pin_overlay.clear_pins()
532 |
533 | # Load navigation pins with validation
534 | pins_data = self.db.load_pins(chat_id)
535 | for pin_data in pins_data:
536 | try:
537 | pin = scene.add_navigation_pin(QPointF(
538 | pin_data['position']['x'],
539 | pin_data['position']['y']
540 | ))
541 | pin.title = pin_data['title']
542 | pin.note = pin_data.get('note', '')
543 |
544 | # Add pin to overlay if window exists
545 | if self.window and hasattr(self.window, 'pin_overlay'):
546 | self.window.pin_overlay.add_pin_button(pin)
547 | except Exception as e:
548 | print(f"Error loading pin: {str(e)}")
549 | continue
550 |
551 | # Restore view state
552 | if 'view_state' in chat['data']:
553 | view_state = chat['data']['view_state']
554 | self.window.chat_view._zoom_factor = view_state['zoom_factor']
555 | self.window.chat_view.setTransform(QTransform().scale(
556 | view_state['zoom_factor'],
557 | view_state['zoom_factor']
558 | ))
559 | self.window.chat_view.horizontalScrollBar().setValue(
560 | view_state['scroll_position']['x']
561 | )
562 | self.window.chat_view.verticalScrollBar().setValue(
563 | view_state['scroll_position']['y']
564 | )
565 |
566 | # Set current chat ID and update connections
567 | self.current_chat_id = chat_id
568 | scene.update_connections()
569 |
570 | except Exception as e:
571 | print(f"Error loading chat: {str(e)}")
572 | # Clean up in case of error
573 | scene.clear()
574 | scene.nodes.clear()
575 | scene.connections.clear()
576 | scene.frames.clear()
577 | scene.pins.clear()
578 | if self.window and hasattr(self.window, 'pin_overlay'):
579 | self.window.pin_overlay.clear_pins()
580 | raise
581 |
582 | # Return loaded chat data
583 | return chat
584 |
585 | def save_current_chat(self):
586 | """Save the current chat session"""
587 | if not self.window.chat_view.scene().nodes:
588 | return
589 |
590 | try:
591 | chat_data = self.serialize_current_chat()
592 |
593 | # If this is a new chat, generate title from last message
594 | if not self.current_chat_id:
595 | last_message = self.window.chat_view.scene().nodes[-1].text
596 | title = self.title_generator.generate_title(last_message)
597 | self.current_chat_id = self.db.save_chat(title, chat_data)
598 | else:
599 | # For existing chats, fetch the current title from the database
600 | chat = self.db.load_chat(self.current_chat_id)
601 | if chat:
602 | title = chat['title']
603 | self.db.update_chat(self.current_chat_id, title, chat_data)
604 | else:
605 | # Fallback if chat not found - create new chat
606 | last_message = self.window.chat_view.scene().nodes[-1].text
607 | title = self.title_generator.generate_title(last_message)
608 | self.current_chat_id = self.db.save_chat(title, chat_data)
609 |
610 | except Exception as e:
611 | print(f"Error saving chat: {str(e)}")
612 | raise
613 |
--------------------------------------------------------------------------------
/graphite_app/graphite_app/graphite_app.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from PySide6.QtWidgets import (
3 | QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QToolBar,
4 | QToolButton, QLineEdit, QPushButton, QMessageBox, QSizePolicy, QLabel, QComboBox
5 | )
6 | from PySide6.QtCore import Qt, QSize, QPointF
7 | from PySide6.QtGui import QKeySequence, QGuiApplication, QCursor, QShortcut
8 | import qtawesome as qta
9 | import json
10 | import os
11 |
12 | # Imports from new modules
13 | from graphite_ui import (
14 | StyleSheet, CustomTitleBar, PinOverlay, ChatView, LoadingOverlay,
15 | ChatLibraryDialog, HelpDialog, Note, ModelSelectionDialog, APISettingsDialog
16 | )
17 | from graphite_core import ChatSessionManager
18 | from graphite_agents import (
19 | ChatAgent, ExplainerAgent, KeyTakeawayAgent, ChartDataAgent,
20 | ChatWorkerThread, KeyTakeawayWorkerThread, ExplainerWorkerThread, ChartWorkerThread
21 | )
22 | import graphite_config as config
23 | import api_provider
24 |
25 | class ChatWindow(QMainWindow):
26 | def __init__(self):
27 | super().__init__()
28 | self.setWindowFlags(Qt.WindowType.FramelessWindowHint)
29 | self.setGeometry(100, 100, 1200, 800)
30 | self.setStyleSheet(StyleSheet.DARK_THEME)
31 | self.library_dialog = None
32 |
33 | # Initialize AI agent
34 | self.agent = ChatAgent("Graphite Assistant",
35 | """
36 | * You are a helpful AI assistant integrated within a program called Graphite.
37 | * Your purpose is to assist the user and provide high-quality, professional responses.
38 | * You have been provided with detailed knowledge about the Graphite application's features. When a user asks for help, use this knowledge to guide them clearly and concisely.
39 |
40 | --- Key Features of the Graphite Application ---
41 |
42 | **Core Concept: Node-Based Chat**
43 | * Conversations are visualized as a graph of connected nodes. Each message from the user or you is a new node. This allows for branching discussions and exploring multiple ideas in parallel.
44 |
45 | **Navigation & View Controls**
46 | * Panning the View: Hold the Middle Mouse Button and drag.
47 | * Zooming: Use Ctrl + Mouse Wheel, or the "Zoom In" / "Zoom Out" buttons in the toolbar.
48 | * Reset & Fit View: The toolbar has a "Reset" button to return to default zoom and a "Fit All" button to frame all existing nodes in the view.
49 |
50 | **Chat & Node Interaction**
51 | * Contextual Replies: When a user clicks on any node, it becomes the active context. Your next response will be created as a child of that selected node.
52 | * Node Tools (Right-Click Menu): Users can right-click any node to access powerful tools:
53 | - `Generate Key Takeaway`: Creates a concise summary of the node's text in a new green-headed note.
54 | - `Generate Explainer`: Simplifies the node's content into easy-to-understand terms in a new purple-headed note.
55 | - `Generate Chart`: Can visualize data from the node's text as a bar, line, pie chart, and more.
56 | - `Regenerate Response`: Allows the user to request a new version of one of your previous AI-generated messages.
57 |
58 | **Organization Tools**
59 | * Frames: Users can group related nodes by selecting them and pressing `Ctrl+F`. This creates a colored frame around them. The frame's title can be edited, and its color can be changed.
60 | * Notes: Users can create floating sticky notes anywhere on the canvas by pressing `Ctrl+N`.
61 | * Connections: The lines between nodes can be reshaped by adding 'pins' to them (Ctrl + Left-Click on a line). Pins can then be dragged to change the curve of the line.
62 |
63 | **Session Management**
64 | * The user can save (`Ctrl+S` or the "Save" button) and load (`Ctrl+L` or the "Library" button) entire chat graphs. The Library allows them to manage all their past conversations.
65 |
66 | --- Your Behavior ---
67 | * Always be professional, thoughtful, and think your responses through.
68 | * If you are unsure or unaware of a topic outside of the Graphite application, say so. Do not give blind advice.
69 | * Your primary role is to be an expert on the Graphite application itself, and a general-purpose assistant for all other topics.
70 | """)
71 |
72 | # Create main container
73 | self.container = QWidget()
74 | container_layout = QVBoxLayout(self.container)
75 | container_layout.setContentsMargins(0, 0, 0, 0)
76 | container_layout.setSpacing(0)
77 |
78 | # Add title bar
79 | self.title_bar = CustomTitleBar(self)
80 | container_layout.addWidget(self.title_bar)
81 |
82 | # Create content widget to hold chat view and pin overlay
83 | content_widget = QWidget()
84 | content_layout = QHBoxLayout(content_widget)
85 | content_layout.setContentsMargins(10, 10, 10, 10)
86 | content_layout.setSpacing(10)
87 |
88 | # Create and add pin overlay
89 | self.pin_overlay = PinOverlay(self) # Pass self as parent
90 | content_layout.addWidget(self.pin_overlay)
91 |
92 | # Create and add chat view - BEFORE toolbar setup
93 | self.chat_view = ChatView(self)
94 | content_layout.addWidget(self.chat_view)
95 |
96 | # Initialize session manager
97 | self.session_manager = ChatSessionManager(self)
98 |
99 | # Create and add toolbar - AFTER chat view creation
100 | self.toolbar = QToolBar()
101 | container_layout.addWidget(self.toolbar)
102 |
103 | # Add Library and Save buttons to toolbar
104 | library_btn = QToolButton()
105 | library_btn.setIcon(qta.icon('fa5s.book', color='#2ecc71'))
106 | library_btn.setText("Library")
107 | library_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
108 | library_btn.setObjectName("actionButton")
109 | library_btn.clicked.connect(self.show_library)
110 | self.toolbar.addWidget(library_btn)
111 |
112 | save_btn = QToolButton()
113 | save_btn.setIcon(qta.icon('fa5s.save', color='#2ecc71'))
114 | save_btn.setText("Save")
115 | save_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
116 | save_btn.setObjectName("actionButton")
117 | save_btn.clicked.connect(self.save_chat)
118 | self.toolbar.addWidget(save_btn)
119 |
120 | self.toolbar.addSeparator()
121 |
122 | # Setup remaining toolbar items - NOW chat_view exists
123 | self.setup_toolbar(self.toolbar)
124 |
125 | # Add content widget to main container
126 | container_layout.addWidget(content_widget)
127 |
128 | # Create input area
129 | input_widget = QWidget()
130 | input_layout = QHBoxLayout(input_widget)
131 | input_layout.setContentsMargins(8, 8, 8, 8)
132 |
133 | self.message_input = QLineEdit()
134 | self.message_input.setPlaceholderText("Type your message...")
135 | self.message_input.returnPressed.connect(self.send_message)
136 |
137 | self.send_button = QPushButton()
138 | self.send_button.setIcon(qta.icon('fa5s.paper-plane', color='white'))
139 | self.send_button.setToolTip("Send message")
140 | self.send_button.setFixedSize(40, 40)
141 | self.send_button.setStyleSheet("""
142 | QPushButton {
143 | background-color: #2ecc71;
144 | border: none;
145 | border-radius: 20px;
146 | padding: 10px;
147 | }
148 | QPushButton:hover {
149 | background-color: #27ae60;
150 | }
151 | QPushButton:pressed {
152 | background-color: #219652;
153 | }
154 | """)
155 | self.send_button.clicked.connect(self.send_message)
156 |
157 | input_layout.addWidget(self.message_input)
158 | input_layout.addWidget(self.send_button)
159 | container_layout.addWidget(input_widget)
160 |
161 | # Set central widget
162 | self.setCentralWidget(self.container)
163 |
164 | # Initialize current node
165 | self.current_node = None
166 |
167 | # Add loading overlay
168 | self.loading_overlay = LoadingOverlay(self.container)
169 | self.loading_overlay.hide()
170 |
171 | # Add keyboard shortcuts
172 | self.library_shortcut = QShortcut(QKeySequence("Ctrl+L"), self)
173 | self.library_shortcut.activated.connect(self.show_library)
174 |
175 | self.save_shortcut = QShortcut(QKeySequence("Ctrl+S"), self)
176 | self.save_shortcut.activated.connect(self.save_chat)
177 |
178 | # Center the window on the screen
179 | screen = QGuiApplication.primaryScreen().geometry()
180 | size = self.geometry()
181 | self.move(int((screen.width() - size.width()) / 2),
182 | int((screen.height() - size.height()) / 2))
183 |
184 | def resizeEvent(self, event):
185 | super().resizeEvent(event)
186 | # Keep loading overlay centered
187 | if hasattr(self, 'loading_overlay'):
188 | self.loading_overlay.setGeometry(
189 | (self.width() - 200) // 2,
190 | (self.height() - 100) // 2,
191 | 200,
192 | 100
193 | )
194 |
195 | def show_library(self):
196 | """Show the chat library dialog"""
197 | # Create new dialog and store reference
198 | self.library_dialog = ChatLibraryDialog(self.session_manager, self)
199 | self.library_dialog.setWindowTitle("Chat Library")
200 | self.library_dialog.resize(500, 600)
201 | # Use exec_() for modal dialog or show() for non-modal
202 | self.library_dialog.show()
203 |
204 | def keyPressEvent(self, event):
205 | if event.modifiers() & Qt.KeyboardModifier.ControlModifier:
206 | if event.key() == Qt.Key.Key_N:
207 | # Get cursor position in scene coordinates
208 | view_pos = self.chat_view.mapFromGlobal(QCursor.pos())
209 | scene_pos = self.chat_view.mapToScene(view_pos)
210 | self.chat_view.scene().add_note(scene_pos)
211 | elif event.key() == Qt.Key.Key_Delete:
212 | # Forward delete key to scene for handling
213 | self.chat_view.scene().deleteSelectedNotes()
214 | else:
215 | super().keyPressEvent(event)
216 |
217 | def save_chat(self):
218 | """Save the current chat session"""
219 | self.session_manager.save_current_chat()
220 | QMessageBox.information(self, "Success", "Chat saved successfully!")
221 |
222 | def setup_toolbar(self, toolbar):
223 | """Setup toolbar with modern QToolButtons"""
224 | toolbar.setIconSize(QSize(20, 20))
225 | toolbar.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
226 | toolbar.setStyleSheet("""
227 | QToolBar {
228 | spacing: 4px;
229 | padding: 4px;
230 | }
231 |
232 | QToolButton {
233 | color: white;
234 | background: transparent;
235 | border: none;
236 | border-radius: 4px;
237 | padding: 6px;
238 | margin: 2px;
239 | font-size: 12px;
240 | }
241 |
242 | QToolButton:hover {
243 | background: rgba(255, 255, 255, 0.1);
244 | }
245 |
246 | QToolButton:pressed {
247 | background: rgba(0, 0, 0, 0.2);
248 | }
249 |
250 | QToolButton#actionButton {
251 | color: #3498db;
252 | }
253 |
254 | QToolButton#helpButton {
255 | color: #9b59b6;
256 | }
257 | """)
258 |
259 | # Organize Button
260 | organize_btn = QToolButton()
261 | organize_btn.setIcon(qta.icon('fa5s.project-diagram', color='#3498db'))
262 | organize_btn.setText("Organize")
263 | organize_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
264 | organize_btn.setObjectName("actionButton")
265 | organize_btn.clicked.connect(lambda: self.chat_view.scene().organize_nodes())
266 | toolbar.addWidget(organize_btn)
267 |
268 | toolbar.addSeparator()
269 |
270 | # Zoom Controls
271 | zoom_in_btn = QToolButton()
272 | zoom_in_btn.setIcon(qta.icon('fa5s.search-plus', color='#3498db'))
273 | zoom_in_btn.setText("Zoom In")
274 | zoom_in_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
275 | zoom_in_btn.setObjectName("actionButton")
276 | zoom_in_btn.clicked.connect(lambda: self.chat_view.scale(1.1, 1.1))
277 | toolbar.addWidget(zoom_in_btn)
278 |
279 | zoom_out_btn = QToolButton()
280 | zoom_out_btn.setIcon(qta.icon('fa5s.search-minus', color='#3498db'))
281 | zoom_out_btn.setText("Zoom Out")
282 | zoom_out_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
283 | zoom_out_btn.setObjectName("actionButton")
284 | zoom_out_btn.clicked.connect(lambda: self.chat_view.scale(0.9, 0.9))
285 | toolbar.addWidget(zoom_out_btn)
286 |
287 | toolbar.addSeparator()
288 |
289 | # View Controls
290 | reset_btn = QToolButton()
291 | reset_btn.setIcon(qta.icon('fa5s.undo', color='#3498db'))
292 | reset_btn.setText("Reset")
293 | reset_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
294 | reset_btn.setObjectName("actionButton")
295 | reset_btn.clicked.connect(self.chat_view.reset_zoom)
296 | toolbar.addWidget(reset_btn)
297 |
298 | fit_btn = QToolButton()
299 | fit_btn.setIcon(qta.icon('fa5s.expand', color='#3498db'))
300 | fit_btn.setText("Fit All")
301 | fit_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
302 | fit_btn.setObjectName("actionButton")
303 | fit_btn.clicked.connect(self.chat_view.fit_all)
304 | toolbar.addWidget(fit_btn)
305 |
306 | # Add expanding spacer
307 | spacer = QWidget()
308 | spacer.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Preferred)
309 | toolbar.addWidget(spacer)
310 |
311 | # Mode Toggle: Ollama vs API
312 | mode_label = QLabel("Mode:")
313 | mode_label.setStyleSheet("color: #ffffff; padding: 0 8px; font-size: 12px;")
314 | toolbar.addWidget(mode_label)
315 |
316 | self.mode_combo = QComboBox()
317 | self.mode_combo.addItem("Ollama (Local)", False)
318 | self.mode_combo.addItem("API Endpoint", True)
319 | self.mode_combo.setMinimumWidth(150)
320 | self.mode_combo.currentIndexChanged.connect(self.on_mode_changed)
321 | toolbar.addWidget(self.mode_combo)
322 |
323 | # Unified Settings Button
324 | settings_btn = QToolButton()
325 | settings_btn.setIcon(qta.icon('fa5s.cog', color='#3498db'))
326 | settings_btn.setText("Settings")
327 | settings_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
328 | settings_btn.setObjectName("actionButton")
329 | settings_btn.clicked.connect(self.show_settings)
330 | toolbar.addWidget(settings_btn)
331 |
332 | # Help Button
333 | help_btn = QToolButton()
334 | help_btn.setIcon(qta.icon('fa5s.question-circle', color='#9b59b6'))
335 | help_btn.setText("Help")
336 | help_btn.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextBesideIcon)
337 | help_btn.setObjectName("helpButton")
338 | help_btn.clicked.connect(self.show_help)
339 | toolbar.addWidget(help_btn)
340 |
341 | # Set initial visibility based on mode
342 | self.on_mode_changed(self.mode_combo.currentIndex())
343 |
344 |
345 | def show_help(self):
346 | """Show the help dialog"""
347 | help_dialog = HelpDialog(self)
348 | # Center the dialog relative to the main window
349 | center = self.geometry().center()
350 | help_dialog.move(center.x() - help_dialog.width() // 2,
351 | center.y() - help_dialog.height() // 2)
352 | help_dialog.show()
353 |
354 | def show_settings(self):
355 | """Show the appropriate settings dialog based on the current mode."""
356 | use_api = self.mode_combo.currentData()
357 | if use_api:
358 | dialog = APISettingsDialog(self)
359 | else:
360 | dialog = ModelSelectionDialog(self)
361 |
362 | center = self.geometry().center()
363 | dialog.move(center.x() - dialog.width() // 2,
364 | center.y() - dialog.height() // 2)
365 | dialog.exec()
366 |
367 | def on_mode_changed(self, index):
368 | """Handle mode toggle between Ollama and API"""
369 | use_api = self.mode_combo.itemData(index)
370 | api_provider.set_mode(use_api)
371 |
372 | def setCurrentNode(self, node):
373 | self.current_node = node
374 | self.message_input.setPlaceholderText(f"Responding to: {node.text[:30]}...")
375 |
376 | def send_message(self):
377 | message = self.message_input.text().strip()
378 | if not message:
379 | return
380 |
381 | # Disable input during processing
382 | self.message_input.setEnabled(False)
383 | self.send_button.setEnabled(False)
384 |
385 | # Show loading overlay
386 | self.loading_overlay.show()
387 |
388 | # Get conversation history up to current node
389 | history = []
390 | if self.current_node:
391 | temp_node = self.current_node
392 | while temp_node:
393 | if hasattr(temp_node, 'conversation_history'):
394 | history = temp_node.conversation_history + history
395 | temp_node = temp_node.parent_node
396 |
397 | # Add user message node
398 | user_node = self.chat_view.scene().add_chat_node(
399 | message,
400 | is_user=True,
401 | parent_node=self.current_node,
402 | conversation_history=history
403 | )
404 |
405 | # Update conversation history
406 | user_node.conversation_history = history + [
407 | {'role': 'user', 'content': message}
408 | ]
409 |
410 | # Create and start worker thread
411 | self.chat_thread = ChatWorkerThread(
412 | self.agent,
413 | message,
414 | user_node.conversation_history
415 | )
416 |
417 | self.chat_thread.finished.connect(lambda response: self.handle_response(response, user_node))
418 | self.chat_thread.error.connect(self.handle_error)
419 | self.chat_thread.start()
420 |
421 | def handle_response(self, response, user_node):
422 | # Add AI response node
423 | ai_node = self.chat_view.scene().add_chat_node(
424 | response,
425 | is_user=False,
426 | parent_node=user_node,
427 | conversation_history=self.agent.conversation_history + [
428 | {'role': 'assistant', 'content': response}
429 | ]
430 | )
431 |
432 | # Update current node and view
433 | self.current_node = ai_node
434 | self.message_input.clear()
435 | self.chat_view.centerOn(ai_node)
436 |
437 | # Re-enable input
438 | self.message_input.setEnabled(True)
439 | self.send_button.setEnabled(True)
440 | self.loading_overlay.hide()
441 |
442 | # Auto-save after response
443 | self.session_manager.save_current_chat()
444 |
445 | def handle_error(self, error_message):
446 | QMessageBox.critical(self, "Error", f"An error occurred: {error_message}")
447 | # Re-enable input
448 | self.message_input.setEnabled(True)
449 | self.send_button.setEnabled(True)
450 | self.loading_overlay.hide()
451 |
452 |
453 | def cleanup_takeaway_thread(self):
454 | """Clean up the takeaway thread properly"""
455 | if hasattr(self, 'takeaway_thread') and self.takeaway_thread is not None:
456 | self.takeaway_thread.finished.disconnect()
457 | self.takeaway_thread.error.disconnect()
458 | self.takeaway_thread.quit()
459 | self.takeaway_thread.wait()
460 | self.takeaway_thread = None
461 |
462 | def generate_takeaway(self, node):
463 | """Generate takeaway for the given node"""
464 | try:
465 | # Cleanup any existing thread
466 | self.cleanup_takeaway_thread()
467 |
468 | # Get node position for note placement
469 | node_pos = node.scenePos()
470 |
471 | # Show loading overlay
472 | self.loading_overlay.show()
473 |
474 | # Create and start worker thread
475 | self.takeaway_thread = KeyTakeawayWorkerThread(
476 | KeyTakeawayAgent(),
477 | node.text,
478 | node_pos
479 | )
480 |
481 | self.takeaway_thread.finished.connect(self.handle_takeaway_response)
482 | self.takeaway_thread.error.connect(self.handle_takeaway_error)
483 | self.takeaway_thread.start()
484 |
485 | except Exception as e:
486 | QMessageBox.critical(self, "Error", f"Error generating takeaway: {str(e)}")
487 | self.loading_overlay.hide()
488 |
489 | def handle_takeaway_response(self, response, node_pos):
490 | """Handle the key takeaway response"""
491 | try:
492 | # Calculate note position - offset from node
493 | note_pos = QPointF(node_pos.x() + 400, node_pos.y())
494 |
495 | # Create new note
496 | note = self.chat_view.scene().add_note(note_pos)
497 | note.content = response
498 | note.color = "#2d2d2d"
499 | note.header_color = "#2ecc71"
500 |
501 | self.loading_overlay.hide()
502 | self.cleanup_takeaway_thread()
503 |
504 | except Exception as e:
505 | QMessageBox.critical(self, "Error", f"Error creating takeaway note: {str(e)}")
506 | self.loading_overlay.hide()
507 |
508 | def handle_takeaway_error(self, error_message):
509 | """Handle any errors during takeaway generation"""
510 | QMessageBox.critical(self, "Error", f"Error generating takeaway: {error_message}")
511 | self.loading_overlay.hide()
512 | self.cleanup_takeaway_thread()
513 |
514 | def cleanup_explainer_thread(self):
515 | """Clean up the explainer thread properly"""
516 | if hasattr(self, 'explainer_thread') and self.explainer_thread is not None:
517 | self.explainer_thread.finished.disconnect()
518 | self.explainer_thread.error.disconnect()
519 | self.explainer_thread.quit()
520 | self.explainer_thread.wait()
521 | self.explainer_thread = None
522 |
523 | def generate_explainer(self, node):
524 | """Generate simple explanation for the given node"""
525 | try:
526 | # Cleanup any existing thread
527 | self.cleanup_explainer_thread()
528 |
529 | # Get node position for note placement
530 | node_pos = node.scenePos()
531 |
532 | # Show loading overlay
533 | self.loading_overlay.show()
534 |
535 | # Create and start worker thread
536 | self.explainer_thread = ExplainerWorkerThread(
537 | ExplainerAgent(),
538 | node.text,
539 | node_pos
540 | )
541 |
542 | self.explainer_thread.finished.connect(self.handle_explainer_response)
543 | self.explainer_thread.error.connect(self.handle_explainer_error)
544 | self.explainer_thread.start()
545 |
546 | except Exception as e:
547 | QMessageBox.critical(self, "Error", f"Error generating explanation: {str(e)}")
548 | self.loading_overlay.hide()
549 |
550 | def handle_explainer_response(self, response, node_pos):
551 | """Handle the explainer response"""
552 | try:
553 | # Calculate note position - offset from node
554 | note_pos = QPointF(node_pos.x() + 400, node_pos.y() + 100) # Offset from takeaway note
555 |
556 | # Create new note
557 | note = self.chat_view.scene().add_note(note_pos)
558 | note.content = response
559 | note.color = "#2d2d2d"
560 | note.header_color = "#9b59b6" # Purple to distinguish from takeaway
561 |
562 | self.loading_overlay.hide()
563 | self.cleanup_explainer_thread()
564 |
565 | except Exception as e:
566 | QMessageBox.critical(self, "Error", f"Error creating explainer note: {str(e)}")
567 | self.loading_overlay.hide()
568 |
569 | def handle_explainer_error(self, error_message):
570 | """Handle any errors during explanation generation"""
571 | QMessageBox.critical(self, "Error", f"Error generating explanation: {error_message}")
572 | self.loading_overlay.hide()
573 | self.cleanup_explainer_thread()
574 |
575 |
576 | def generate_chart(self, node, chart_type):
577 | """Generate chart for the given node"""
578 | try:
579 | # Show loading overlay
580 | self.loading_overlay.show()
581 |
582 | # Create and start worker thread with just text and chart type
583 | self.chart_thread = ChartWorkerThread(
584 | node.text,
585 | chart_type
586 | )
587 |
588 | self.chart_thread.finished.connect(self.handle_chart_data)
589 | self.chart_thread.error.connect(self.handle_error)
590 | self.chart_thread.start()
591 |
592 | except Exception as e:
593 | QMessageBox.critical(self, "Error", f"Error generating chart: {str(e)}")
594 | self.loading_overlay.hide()
595 |
596 | def handle_chart_data(self, data, chart_type):
597 | """Handle the chart data and create visualization"""
598 | try:
599 | chart_data = json.loads(data)
600 | if "error" in chart_data:
601 | QMessageBox.warning(self, "Warning", chart_data["error"])
602 | self.loading_overlay.hide()
603 | return
604 |
605 | # Calculate position
606 | if self.current_node:
607 | pos = self.current_node.scenePos()
608 | chart_pos = QPointF(pos.x() + 450, pos.y())
609 | else:
610 | chart_pos = QPointF(0, 0)
611 |
612 | # Create chart item
613 | self.chat_view.scene().add_chart(chart_data, chart_pos)
614 |
615 | except Exception as e:
616 | QMessageBox.critical(self, "Error", f"Error creating chart: {str(e)}")
617 |
618 | finally:
619 | self.loading_overlay.hide()
620 |
621 | def stop_all_workers(self):
622 | """Safely stops all running worker threads before closing."""
623 | if hasattr(self, 'chat_thread') and self.chat_thread and self.chat_thread.isRunning():
624 | self.chat_thread.quit()
625 | self.chat_thread.wait()
626 |
627 | if hasattr(self, 'takeaway_thread') and self.takeaway_thread and self.takeaway_thread.isRunning():
628 | self.takeaway_thread.stop()
629 | self.takeaway_thread.quit()
630 | self.takeaway_thread.wait()
631 |
632 | if hasattr(self, 'explainer_thread') and self.explainer_thread and self.explainer_thread.isRunning():
633 | self.explainer_thread.stop()
634 | self.explainer_thread.quit()
635 | self.explainer_thread.wait()
636 |
637 | if hasattr(self, 'chart_thread') and self.chart_thread and self.chart_thread.isRunning():
638 | self.chart_thread.quit()
639 | self.chart_thread.wait()
640 |
641 | def closeEvent(self, event):
642 | """
643 | Overrides the default close event to ensure all background threads
644 | are properly terminated before the application exits.
645 | """
646 | self.stop_all_workers()
647 | super().closeEvent(event)
648 |
649 | def main():
650 | app = QApplication(sys.argv)
651 | window = ChatWindow()
652 | window.show()
653 | sys.exit(app.exec())
654 |
655 | if __name__ == "__main__":
656 | main()
657 |
--------------------------------------------------------------------------------