├── requirements.txt ├── sqlite_worker ├── __init__.py ├── __pycache__ │ ├── main.cpython-311.pyc │ └── __init__.cpython-311.pyc └── main.py ├── sqlite_performance_comparison.png ├── .gitignore ├── LICENSE ├── ReadMe.md ├── tests.py └── test_performance.py /requirements.txt: -------------------------------------------------------------------------------- 1 | coverage -------------------------------------------------------------------------------- /sqlite_worker/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import SqliteWorker 2 | -------------------------------------------------------------------------------- /sqlite_performance_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/roshanlam/sqlite-worker/HEAD/sqlite_performance_comparison.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | dist/ 3 | sqlite_worker.egg-info/ 4 | build/ 5 | .coverage 6 | htmlcov/ 7 | sqlite_worker/__pycache__/ 8 | venv 9 | -------------------------------------------------------------------------------- /sqlite_worker/__pycache__/main.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/roshanlam/sqlite-worker/HEAD/sqlite_worker/__pycache__/main.cpython-311.pyc -------------------------------------------------------------------------------- /sqlite_worker/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/roshanlam/sqlite-worker/HEAD/sqlite_worker/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Roshan Lamichhane 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ReadMe.md: -------------------------------------------------------------------------------- 1 | # sqlite-worker 2 | 3 | Sqlite-Worker is a Python package providing a thread-safe interface for SQLite database operations. It ensures safe concurrent access to SQLite databases and simplifies executing database queries from different threads. 4 | 5 | ## Features 6 | 7 | - Thread-safe SQLite database operations 8 | - Queue-based query execution 9 | - Simple and easy-to-use API 10 | - Initialization actions executed once upon database connection 11 | - Regular commits for continuous query streams 12 | 13 | ## Installation 14 | 15 | To install, run: 16 | 17 | ```sh 18 | pip3 install sqlite-worker 19 | ``` 20 | 21 | ## Sqlite vs SqliteWorker Comparison 22 | ![](sqlite_performance_comparison.png) 23 | 24 | # Creating a Worker Instance 25 | To create a basic instance of Sqlite3Worker by specifying the path to your SQLite database file: 26 | 27 | ```python 28 | from sqlite_worker import SqliteWorker 29 | worker = SqliteWorker("/path/to/your/database.db") 30 | ``` 31 | 32 | # Worker instance with Initialization Actions 33 | Create a `SqliteWorker` instance with initialization actions (such as setting pragmas): 34 | 35 | ```python 36 | from sqlite_worker import SqliteWorker 37 | 38 | init_actions = [ 39 | "PRAGMA journal_mode=WAL;", 40 | "PRAGMA synchronous=NORMAL;", 41 | "PRAGMA temp_store=MEMORY;" 42 | ] 43 | 44 | worker = SqliteWorker("/path/to/your/database.db", execute_init=init_actions) 45 | ``` 46 | 47 | # Worker Instance with Regular Commits 48 | Create a SqliteWorker instance with initialization actions and set a maximum query count for regular commits: 49 | 50 | ```python 51 | from sqlite_worker import SqliteWorker 52 | 53 | init_actions = [ 54 | "PRAGMA journal_mode=WAL;", 55 | "PRAGMA synchronous=NORMAL;", 56 | "PRAGMA temp_store=MEMORY;" 57 | ] 58 | 59 | worker = SqliteWorker("/path/to/your/database.db", execute_init=init_actions, max_count=50) 60 | ``` 61 | 62 | 63 | 64 | # Execute Queries 65 | 66 | ## Creating a table 67 | ```python 68 | worker.execute("CREATE TABLE example (id INTEGER PRIMARY KEY, name TEXT)") 69 | ``` 70 | 71 | ## Inserting data 72 | ```python 73 | worker.execute("INSERT INTO example (name) VALUES (?)", ("Alice",)) 74 | ``` 75 | 76 | ## Fetching data 77 | ```python 78 | token = worker.execute("SELECT * FROM example") 79 | results = worker.fetch_results(token) 80 | print(results) 81 | ``` 82 | 83 | # Closing the Worker 84 | After completing all database operations, close the worker to ensure proper cleanup: 85 | ```python 86 | worker.close() 87 | ``` 88 | 89 | # Contributing 90 | Contributions to the Sqlite-Worker are welcome! Please refer to the project's issues and pull request sections for contributions. 91 | 92 | # Acknowledgements 93 | 94 | Special thanks to [Johannes Ahlmann](https://github.com/codinguncut) for their valuable suggestion on initializing actions and implementing regular commits. 95 | -------------------------------------------------------------------------------- /sqlite_worker/main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sqlite3 3 | import threading 4 | import uuid 5 | import queue 6 | 7 | LOGGER = logging.getLogger("SqliteWorker") 8 | SILENT_TOKEN_SUFFIX = '-silent' 9 | 10 | 11 | class SqliteWorker: 12 | """Sqlite thread-safe object.""" 13 | 14 | def __init__(self, file_name, max_queue_size=100, execute_init=(), max_count=50): 15 | self._file_name = file_name 16 | self._sql_queue = queue.Queue(maxsize=max_queue_size) 17 | self._results = {} 18 | self._tokens = set() 19 | self._select_events = {} 20 | self._lock = threading.Lock() 21 | self._close_event = threading.Event() 22 | self._thread = threading.Thread(target=self._run, daemon=True) 23 | self._thread.start() 24 | self.execute_init = execute_init 25 | self.max_count = max_count 26 | 27 | def _run(self): 28 | try: 29 | self._process_queries() 30 | except Exception as err: 31 | LOGGER.critical( 32 | "Unhandled exception in query processor: %s", err, exc_info=True) 33 | raise 34 | 35 | def _process_queries(self): 36 | with sqlite3.connect(self._file_name, check_same_thread=False, detect_types=sqlite3.PARSE_DECLTYPES) as conn: 37 | cursor = conn.cursor() 38 | for action in self.execute_init: 39 | cursor.execute(action) 40 | conn.commit() 41 | 42 | count = 0 43 | while not self._close_event.is_set() or not self._sql_queue.empty(): 44 | try: 45 | token, query, values = self._sql_queue.get(timeout=1) 46 | except queue.Empty: 47 | continue 48 | if query: 49 | count += 1 50 | self._execute_query(cursor, token, query, values) 51 | 52 | if count >= self.max_count or self._sql_queue.empty(): 53 | count = 0 54 | conn.commit() 55 | 56 | def _execute_query(self, cursor, token: str, query, values): 57 | try: 58 | cursor.execute(query, values) 59 | if not token.endswith(SILENT_TOKEN_SUFFIX): 60 | with self._lock: 61 | self._results[token] = cursor.fetchall() 62 | except sqlite3.Error as err: 63 | LOGGER.error("Query error: %s: %s: %s", query, values, err) 64 | self._handle_query_error(token, err) 65 | self._notify_query_done(token) 66 | 67 | def _is_select_query(self, query): 68 | return query.lower().lstrip().startswith("select") 69 | 70 | def _notify_query_begin(self, token): 71 | self._select_events.setdefault(token, threading.Event()) 72 | 73 | def _notify_query_done(self, token): 74 | self._select_events[token].set() 75 | 76 | def _handle_query_error(self, token, err): 77 | with self._lock: 78 | self._results[token] = err 79 | 80 | def close(self): 81 | self._close_event.set() 82 | self._sql_queue.put((None, None, None), timeout=5) 83 | self._thread.join() 84 | 85 | def execute(self, query, values=None, always_return_token=False): 86 | if self._close_event.is_set(): 87 | raise RuntimeError("Worker is closed") 88 | 89 | return_token = ( 90 | always_return_token or 91 | self._is_select_query(query) 92 | ) 93 | 94 | token = uuid.uuid4().hex 95 | if not return_token: 96 | token += SILENT_TOKEN_SUFFIX 97 | 98 | self._sql_queue.put((token, query, values or []), timeout=5) 99 | self._notify_query_begin(token) 100 | 101 | if return_token: 102 | return token 103 | return None 104 | 105 | def fetch_results(self, token): 106 | if token: 107 | with self._lock: 108 | event = self._select_events.get(token) 109 | if event: 110 | event.wait() 111 | with self._lock: 112 | return self._results.pop(token, None) 113 | return None 114 | 115 | @property 116 | def queue_size(self): 117 | return self._sql_queue.qsize() 118 | -------------------------------------------------------------------------------- /tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import time 3 | from sqlite_worker import SqliteWorker 4 | import sqlite3 5 | import os 6 | 7 | 8 | class TestSqliteWorker(unittest.TestCase): 9 | def setUp(self): 10 | self.db_file = 'test_example_sqlite.db' 11 | self.execute_init = ( 12 | "PRAGMA journal_mode=WAL;", 13 | "PRAGMA synchronous=NORMAL;", 14 | "PRAGMA temp_store=MEMORY;" 15 | ) 16 | self.worker = SqliteWorker( 17 | self.db_file, execute_init=self.execute_init) 18 | # Give some time for the worker to initialize and execute pragmas 19 | time.sleep(0.5) 20 | 21 | def tearDown(self): 22 | self.worker.close() 23 | if os.path.exists(self.db_file): 24 | os.remove(self.db_file) 25 | 26 | def test_initialization_and_pragmas(self): 27 | # Verify the worker initializes correctly and pragmas are executed 28 | self.assertTrue(os.path.exists(self.db_file)) 29 | 30 | # Check if the database is in WAL mode 31 | with sqlite3.connect(self.db_file) as conn: 32 | cursor = conn.cursor() 33 | cursor.execute("PRAGMA journal_mode;") 34 | result = cursor.fetchone() 35 | self.assertEqual(result[0], "wal") 36 | 37 | def test_create_table_and_insert(self): 38 | # Create a table 39 | self.worker.execute( 40 | "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT)" 41 | ) 42 | 43 | # Insert data 44 | self.worker.execute("INSERT INTO users (name) VALUES (?)", ("Alice",)) 45 | self.worker.execute("INSERT INTO users (name) VALUES (?)", ("Bob",)) 46 | 47 | # Allow some time for the insert queries to complete 48 | time.sleep(1) 49 | 50 | # Fetch data 51 | token = self.worker.execute("SELECT * FROM users") 52 | time.sleep(1) # Give some time for the select query to complete 53 | results = self.worker.fetch_results(token) 54 | 55 | self.assertIsNotNone(results) 56 | self.assertEqual(len(results), 2) 57 | self.assertEqual(results[0][1], "Alice") 58 | self.assertEqual(results[1][1], "Bob") 59 | 60 | def test_multiple_queries(self): 61 | # Create a table and insert data 62 | self.worker.execute( 63 | "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT)" 64 | ) 65 | self.worker.execute("INSERT INTO users (name) VALUES (?)", ("Alice",)) 66 | self.worker.execute("INSERT INTO users (name) VALUES (?)", ("Bob",)) 67 | 68 | # Allow some time for the insert queries to complete 69 | time.sleep(1) 70 | 71 | # Perform multiple select queries 72 | token1 = self.worker.execute( 73 | "SELECT * FROM users WHERE name = ?", ("Alice",)) 74 | token2 = self.worker.execute( 75 | "SELECT * FROM users WHERE name = ?", ("Bob",)) 76 | time.sleep(1) # Give some time for the select queries to complete 77 | 78 | results1 = self.worker.fetch_results(token1) 79 | results2 = self.worker.fetch_results(token2) 80 | 81 | self.assertIsNotNone(results1) 82 | self.assertEqual(len(results1), 1) 83 | self.assertEqual(results1[0][1], "Alice") 84 | self.assertIsNotNone(results2) 85 | self.assertEqual(len(results2), 1) 86 | self.assertEqual(results2[0][1], "Bob") 87 | 88 | def test_error_handling(self): 89 | error_token = self.worker.execute("SELECT * FROM non_existing_table") 90 | time.sleep(1) # Give some time for the error to be processed 91 | results = self.worker.fetch_results(error_token) 92 | self.assertIsInstance(results, sqlite3.Error) 93 | 94 | def test_close_worker(self): 95 | # Create a table and close the worker 96 | self.worker.execute( 97 | "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT)" 98 | ) 99 | self.worker.close() 100 | 101 | # Verify that the worker is closed 102 | with self.assertRaises(RuntimeError): 103 | self.worker.execute( 104 | "INSERT INTO users (name) VALUES (?)", ("Charlie",)) 105 | 106 | def test_queue_size(self): 107 | initial_size = self.worker.queue_size 108 | self.assertEqual(initial_size, 0) 109 | 110 | self.worker.execute( 111 | "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT)" 112 | ) 113 | self.assertEqual(self.worker.queue_size, 1) 114 | 115 | # Allow some time for the queries to complete 116 | time.sleep(1) 117 | 118 | self.assertEqual(self.worker.queue_size, 0) 119 | 120 | 121 | if __name__ == '__main__': 122 | unittest.main() 123 | -------------------------------------------------------------------------------- /test_performance.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import threading 3 | import time 4 | import uuid 5 | import random 6 | import concurrent.futures 7 | import os 8 | import statistics 9 | import matplotlib.pyplot as plt 10 | import numpy as np 11 | from contextlib import contextmanager 12 | 13 | # Import your SqliteWorker class 14 | # Assuming SqliteWorker.py is in the same directory 15 | from sqlite_worker.main import SqliteWorker 16 | 17 | # Constants for testing 18 | DB_FILE_STANDARD = "standard_test.db" 19 | DB_FILE_WORKER = "worker_test.db" 20 | NUM_THREADS = [1, 2, 4, 8, 16, 32] # Test with different thread counts 21 | NUM_QUERIES_PER_THREAD = 1000 22 | TEST_RUNS = 3 # Number of times to run each test for reliable results 23 | 24 | # Test data 25 | def generate_random_data(num_records=100): 26 | data = [] 27 | for _ in range(num_records): 28 | data.append(( 29 | uuid.uuid4().hex, 30 | random.randint(1, 1000), 31 | f"Item {random.randint(1, 1000)}", 32 | random.random() * 100 33 | )) 34 | return data 35 | 36 | # Clean up database files before testing 37 | def cleanup(): 38 | for file in [DB_FILE_STANDARD, DB_FILE_WORKER]: 39 | if os.path.exists(file): 40 | os.remove(file) 41 | 42 | # Set up the databases 43 | def setup_databases(): 44 | # Standard SQLite setup 45 | conn = sqlite3.connect(DB_FILE_STANDARD) 46 | c = conn.cursor() 47 | c.execute(''' 48 | CREATE TABLE items ( 49 | id TEXT PRIMARY KEY, 50 | quantity INTEGER, 51 | name TEXT, 52 | price REAL 53 | ) 54 | ''') 55 | conn.commit() 56 | conn.close() 57 | 58 | # SqliteWorker setup 59 | worker = SqliteWorker(DB_FILE_WORKER, execute_init=[ 60 | ''' 61 | CREATE TABLE items ( 62 | id TEXT PRIMARY KEY, 63 | quantity INTEGER, 64 | name TEXT, 65 | price REAL 66 | ) 67 | ''' 68 | ]) 69 | worker.close() 70 | 71 | # Thread-safe connection for standard SQLite 72 | @contextmanager 73 | def get_connection(): 74 | conn = sqlite3.connect(DB_FILE_STANDARD) 75 | try: 76 | yield conn 77 | finally: 78 | conn.close() 79 | 80 | # Standard SQLite worker function 81 | def standard_worker(thread_id, lock, queries_per_thread): 82 | results = [] 83 | 84 | for i in range(queries_per_thread): 85 | if i % 4 == 0: # 25% are inserts 86 | item_id = f"{thread_id}-{i}-{uuid.uuid4().hex}" 87 | quantity = random.randint(1, 100) 88 | name = f"Item {thread_id}-{i}" 89 | price = random.random() * 100 90 | 91 | with get_connection() as conn: 92 | c = conn.cursor() 93 | start_time = time.time() 94 | try: 95 | with lock: 96 | c.execute( 97 | "INSERT INTO items VALUES (?, ?, ?, ?)", 98 | (item_id, quantity, name, price) 99 | ) 100 | conn.commit() 101 | except sqlite3.Error as e: 102 | print(f"Insert error: {e}") 103 | duration = time.time() - start_time 104 | results.append(duration) 105 | else: # 75% are selects 106 | with get_connection() as conn: 107 | c = conn.cursor() 108 | start_time = time.time() 109 | try: 110 | with lock: 111 | c.execute("SELECT * FROM items ORDER BY RANDOM() LIMIT 10") 112 | data = c.fetchall() 113 | except sqlite3.Error as e: 114 | print(f"Select error: {e}") 115 | duration = time.time() - start_time 116 | results.append(duration) 117 | 118 | return results 119 | 120 | # SqliteWorker worker function 121 | def worker_worker(thread_id, worker, queries_per_thread): 122 | results = [] 123 | 124 | for i in range(queries_per_thread): 125 | if i % 4 == 0: # 25% are inserts 126 | item_id = f"{thread_id}-{i}-{uuid.uuid4().hex}" 127 | quantity = random.randint(1, 100) 128 | name = f"Item {thread_id}-{i}" 129 | price = random.random() * 100 130 | 131 | start_time = time.time() 132 | worker.execute( 133 | "INSERT INTO items VALUES (?, ?, ?, ?)", 134 | (item_id, quantity, name, price) 135 | ) 136 | duration = time.time() - start_time 137 | results.append(duration) 138 | else: # 75% are selects 139 | start_time = time.time() 140 | token = worker.execute( 141 | "SELECT * FROM items ORDER BY RANDOM() LIMIT 10" 142 | ) 143 | data = worker.fetch_results(token) 144 | duration = time.time() - start_time 145 | results.append(duration) 146 | 147 | return results 148 | 149 | # Preload the databases with some data 150 | def preload_databases(): 151 | data = generate_random_data(1000) 152 | 153 | # Preload standard SQLite 154 | conn = sqlite3.connect(DB_FILE_STANDARD) 155 | c = conn.cursor() 156 | c.executemany("INSERT INTO items VALUES (?, ?, ?, ?)", data) 157 | conn.commit() 158 | conn.close() 159 | 160 | # Preload SqliteWorker 161 | worker = SqliteWorker(DB_FILE_WORKER) 162 | for row in data: 163 | worker.execute("INSERT INTO items VALUES (?, ?, ?, ?)", row) 164 | worker.close() 165 | 166 | def run_standard_test(num_threads, queries_per_thread): 167 | lock = threading.Lock() 168 | all_durations = [] 169 | 170 | with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: 171 | futures = [ 172 | executor.submit(standard_worker, i, lock, queries_per_thread) 173 | for i in range(num_threads) 174 | ] 175 | 176 | for future in concurrent.futures.as_completed(futures): 177 | all_durations.extend(future.result()) 178 | 179 | return all_durations 180 | 181 | def run_worker_test(num_threads, queries_per_thread): 182 | worker = SqliteWorker(DB_FILE_WORKER, max_queue_size=100000) 183 | all_durations = [] 184 | 185 | with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: 186 | futures = [ 187 | executor.submit(worker_worker, i, worker, queries_per_thread) 188 | for i in range(num_threads) 189 | ] 190 | 191 | for future in concurrent.futures.as_completed(futures): 192 | all_durations.extend(future.result()) 193 | 194 | worker.close() 195 | return all_durations 196 | 197 | def run_tests(): 198 | results = { 199 | "standard": {}, 200 | "worker": {}, 201 | "improvement": {} 202 | } 203 | 204 | for thread_count in NUM_THREADS: 205 | standard_times = [] 206 | worker_times = [] 207 | 208 | for run in range(TEST_RUNS): 209 | print(f"Run {run+1}/{TEST_RUNS} with {thread_count} threads") 210 | cleanup() 211 | setup_databases() 212 | preload_databases() 213 | 214 | # Run standard SQLite test 215 | print(f" Testing standard SQLite...") 216 | standard_durations = run_standard_test(thread_count, NUM_QUERIES_PER_THREAD) 217 | standard_total = sum(standard_durations) 218 | standard_times.append(standard_total) 219 | 220 | # Run SqliteWorker test 221 | print(f" Testing SqliteWorker...") 222 | worker_durations = run_worker_test(thread_count, NUM_QUERIES_PER_THREAD) 223 | worker_total = sum(worker_durations) 224 | worker_times.append(worker_total) 225 | 226 | # Calculate average time across test runs 227 | avg_standard = statistics.mean(standard_times) 228 | avg_worker = statistics.mean(worker_times) 229 | 230 | # Calculate improvement percentage 231 | improvement = ((avg_standard - avg_worker) / avg_worker) * 100 232 | 233 | results["standard"][thread_count] = avg_standard 234 | results["worker"][thread_count] = avg_worker 235 | results["improvement"][thread_count] = improvement 236 | 237 | print(f"\nResults for {thread_count} threads:") 238 | print(f" Standard SQLite: {avg_standard:.4f} seconds") 239 | print(f" SqliteWorker: {avg_worker:.4f} seconds") 240 | print(f" Improvement: {improvement:.2f}%") 241 | print(f" Speed multiplier: {avg_standard/avg_worker:.2f}x\n") 242 | 243 | return results 244 | 245 | def plot_results(results): 246 | plt.figure(figsize=(12, 10)) 247 | 248 | # Plot 1: Execution Time Comparison 249 | plt.subplot(2, 1, 1) 250 | x = np.array(NUM_THREADS) 251 | width = 0.35 252 | 253 | standard_times = [results["standard"][t] for t in NUM_THREADS] 254 | worker_times = [results["worker"][t] for t in NUM_THREADS] 255 | 256 | plt.bar(x - width/2, standard_times, width, label='Standard SQLite') 257 | plt.bar(x + width/2, worker_times, width, label='SqliteWorker') 258 | 259 | plt.xlabel('Number of Threads') 260 | plt.ylabel('Total Execution Time (seconds)') 261 | plt.title('Execution Time Comparison') 262 | plt.xticks(x) 263 | plt.legend() 264 | plt.grid(True, linestyle='--', alpha=0.7) 265 | 266 | # Plot 2: Performance Improvement 267 | plt.subplot(2, 1, 2) 268 | improvements = [results["improvement"][t] for t in NUM_THREADS] 269 | speedups = [results["standard"][t]/results["worker"][t] for t in NUM_THREADS] 270 | 271 | plt.plot(x, improvements, 'o-', label='Improvement %') 272 | plt.axhline(y=300, color='r', linestyle='--', label='300% Improvement Threshold') 273 | 274 | plt.xlabel('Number of Threads') 275 | plt.ylabel('Performance Improvement (%)') 276 | plt.title('Performance Improvement with SqliteWorker') 277 | plt.xticks(x) 278 | plt.legend() 279 | plt.grid(True, linestyle='--', alpha=0.7) 280 | 281 | # Add a text annotation with the speedup for each thread count 282 | for i, threads in enumerate(NUM_THREADS): 283 | plt.annotate(f"{speedups[i]:.2f}x faster", 284 | (x[i], improvements[i]), 285 | textcoords="offset points", 286 | xytext=(0,10), 287 | ha='center') 288 | 289 | plt.tight_layout() 290 | plt.savefig('sqlite_performance_comparison.png') 291 | plt.show() 292 | 293 | # Print the final analysis 294 | max_improvement = max(improvements) 295 | max_threads = NUM_THREADS[improvements.index(max_improvement)] 296 | avg_improvement = sum(improvements) / len(improvements) 297 | 298 | print("\nFinal Analysis:") 299 | print(f"Maximum improvement: {max_improvement:.2f}% with {max_threads} threads") 300 | print(f"Average improvement across all thread counts: {avg_improvement:.2f}%") 301 | 302 | for i, threads in enumerate(NUM_THREADS): 303 | print(f"With {threads} threads: {speedups[i]:.2f}x faster ({improvements[i]:.2f}% improvement)") 304 | 305 | if max_improvement >= 300: 306 | print("\nThe 300% performance improvement claim is verified!") 307 | else: 308 | print(f"\nThe actual maximum improvement is {max_improvement:.2f}%, not 300%") 309 | 310 | if __name__ == "__main__": 311 | print("Starting SQLiteWorker Performance Test") 312 | results = run_tests() 313 | plot_results(results) 314 | --------------------------------------------------------------------------------