├── src ├── __init__.py ├── server.py ├── registry.py └── tools │ ├── __init__.py │ ├── append_block_in_page.py │ ├── create_page.py │ ├── get_all_pages.py │ ├── edit_block.py │ ├── get_page_blocks.py │ ├── get_block_content.py │ ├── get_page_links.py │ └── get_linked_flashcards.py ├── .python-version ├── tests ├── __init__.py ├── test_runner.py ├── test_mcp_server.py ├── conftest.py ├── test_basic_functionality.py ├── test_get_page_blocks.py ├── test_append_block_in_page.py ├── test_get_all_pages.py ├── test_create_page.py ├── test_edit_block.py ├── test_get_block_content.py ├── test_get_linked_flashcards.py └── test_get_page_links.py ├── .vscode └── settings.json ├── .env.template ├── scripts └── fix-code.sh ├── pyproject.toml ├── .pre-commit-config.yaml ├── LICENSE ├── Makefile ├── .github ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── badges.yml ├── pull_request_template.md ├── workflows │ ├── ci.yml │ ├── README.md │ ├── quality.yml │ └── release.yml └── CI_CD_SETUP.md ├── .gitignore ├── CONTRIBUTING.md ├── CLAUDE.md └── safety-report.json /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.11 2 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Test package for logseq-api-mcp 2 | 3 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.testing.pytestArgs": ["tests"], 3 | "python.testing.unittestEnabled": false, 4 | "python.testing.pytestEnabled": true 5 | } 6 | -------------------------------------------------------------------------------- /src/server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | 3 | from .registry import register_all_tools 4 | 5 | # Create an MCP server 6 | mcp = FastMCP("Logseq API") 7 | 8 | # Register all tools 9 | register_all_tools(mcp) 10 | 11 | if __name__ == "__main__": 12 | mcp.run() 13 | -------------------------------------------------------------------------------- /.env.template: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------- 2 | # This template contains required options. 3 | # Review the project documentation to learn more https://github.com/gustavo-meilus/logseq-api-mcp 4 | # --------------------------------------------------------------------------- 5 | 6 | # Logseq API Configuration 7 | LOGSEQ_API_ENDPOINT=http://127.0.0.1:12315/api 8 | LOGSEQ_API_TOKEN= 9 | -------------------------------------------------------------------------------- /scripts/fix-code.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Fix code formatting and linting issues 4 | echo "🔧 Running Ruff fix and format..." 5 | 6 | # Run Ruff fix 7 | uv run ruff check src/ tests/ --fix 8 | 9 | # Run Ruff format 10 | uv run ruff format src/ tests/ 11 | 12 | echo "🔍 Running MyPy type check..." 13 | 14 | # Run MyPy type check 15 | uv run mypy src/ --ignore-missing-imports --show-error-codes 16 | 17 | echo "✅ Code fixed and formatted!" 18 | -------------------------------------------------------------------------------- /src/registry.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | 3 | from . import tools 4 | 5 | 6 | def register_all_tools(mcp_server: FastMCP) -> None: 7 | """ 8 | Register all tools with the MCP server. 9 | 10 | Dynamically discovers and registers all functions from the tools module. 11 | 12 | Args: 13 | mcp_server: The FastMCP server instance to register tools with 14 | """ 15 | 16 | # Dynamically register all tools found in the tools module 17 | for tool_name in tools.__all__: 18 | tool_function = getattr(tools, tool_name) 19 | mcp_server.tool()(tool_function) 20 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "logseq-api-mcp" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "aiohttp>=3.12.14", 9 | "load-dotenv>=0.1.0", 10 | "mcp[cli]>=1.10.0", 11 | "pathlib>=1.0.1", 12 | "starlette>=0.47.2", 13 | ] 14 | 15 | [dependency-groups] 16 | dev = [ 17 | "ruff>=0.11.12", 18 | "mypy>=1.8.0", 19 | "bandit>=1.7.5", 20 | "safety>=2.3.5,<3.0.0", 21 | "pip-licenses>=4.3.0", 22 | "pre-commit>=4.0.0", 23 | ] 24 | test = [ 25 | "pytest>=8.3.5", 26 | "pytest-asyncio>=1.0.0", 27 | "pytest-cov>=4.1.0", 28 | ] 29 | 30 | [tool.pytest.ini_options] 31 | asyncio_mode = "auto" 32 | asyncio_default_fixture_loop_scope = "function" 33 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | rev: v0.11.12 4 | hooks: 5 | - id: ruff 6 | args: [--fix] 7 | - id: ruff-format 8 | 9 | - repo: local 10 | hooks: 11 | - id: ruff-check 12 | name: ruff-check 13 | entry: uv run ruff check src/ tests/ 14 | language: system 15 | pass_filenames: false 16 | always_run: true 17 | 18 | - id: ruff-format-check 19 | name: ruff-format-check 20 | entry: uv run ruff format --check src/ tests/ 21 | language: system 22 | pass_filenames: false 23 | always_run: true 24 | 25 | - id: mypy-check 26 | name: mypy-check 27 | entry: uv run mypy src/ --ignore-missing-imports --show-error-codes 28 | language: system 29 | pass_filenames: false 30 | always_run: true 31 | -------------------------------------------------------------------------------- /tests/test_runner.py: -------------------------------------------------------------------------------- 1 | """Test runner for individual tool testing.""" 2 | 3 | import sys 4 | from pathlib import Path 5 | 6 | # Add src to path for imports 7 | sys.path.insert(0, str(Path(__file__).parent.parent / "src")) 8 | 9 | import pytest 10 | 11 | 12 | def run_tool_tests(tool_name: str = None): 13 | """Run tests for a specific tool or all tools.""" 14 | if tool_name: 15 | test_file = f"tests/test_{tool_name}.py" 16 | if Path(test_file).exists(): 17 | pytest.main([test_file, "-v"]) 18 | else: 19 | print(f"Test file {test_file} not found") 20 | else: 21 | # Run all tests 22 | pytest.main(["tests/", "-v"]) 23 | 24 | 25 | if __name__ == "__main__": 26 | import argparse 27 | 28 | parser = argparse.ArgumentParser(description="Run tool tests") 29 | parser.add_argument("--tool", help="Specific tool to test") 30 | args = parser.parse_args() 31 | 32 | run_tool_tests(args.tool) 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Gustavo Meilus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: fix format check test install 2 | 3 | # Fix code formatting and linting issues 4 | fix: 5 | @echo "🔧 Running Ruff fix and format..." 6 | uv run ruff check src/ tests/ --fix 7 | uv run ruff format src/ tests/ 8 | @echo "🔍 Running MyPy type check..." 9 | uv run mypy src/ --ignore-missing-imports --show-error-codes 10 | @echo "✅ Code fixed and formatted!" 11 | 12 | # Format code only 13 | format: 14 | @echo "🎨 Formatting code..." 15 | uv run ruff format src/ tests/ 16 | @echo "✅ Code formatted!" 17 | 18 | # Check code quality 19 | check: 20 | @echo "🔍 Checking code quality..." 21 | uv run ruff check src/ tests/ 22 | uv run ruff format --check src/ tests/ 23 | uv run mypy src/ --ignore-missing-imports --show-error-codes 24 | @echo "✅ Code quality checks passed!" 25 | 26 | # Run tests 27 | test: 28 | @echo "🧪 Running tests..." 29 | uv run pytest tests/ -v 30 | 31 | # Install dependencies 32 | install: 33 | @echo "📦 Installing dependencies..." 34 | uv sync --dev 35 | 36 | # Install pre-commit hooks 37 | install-hooks: 38 | @echo "🪝 Installing pre-commit hooks..." 39 | uv run pre-commit install 40 | 41 | # Run all checks 42 | all: check test 43 | @echo "✅ All checks passed!" 44 | -------------------------------------------------------------------------------- /src/tools/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import inspect 3 | from pathlib import Path 4 | from typing import List 5 | 6 | # Get the directory containing this __init__.py file 7 | _tools_dir = Path(__file__).parent 8 | 9 | # Dynamically discover and import all tool functions 10 | _discovered_tools = {} 11 | __all__: List[str] = [] 12 | 13 | for py_file in _tools_dir.glob("*.py"): 14 | # Skip __init__.py and private files 15 | if py_file.name.startswith("_"): 16 | continue 17 | 18 | # Import the module 19 | module_name = py_file.stem 20 | try: 21 | module = importlib.import_module(f".{module_name}", package=__package__) 22 | 23 | # Find all callable functions in the module (exclude private functions) 24 | for name, obj in inspect.getmembers(module, inspect.isfunction): 25 | if not name.startswith("_") and obj.__module__ == module.__name__: 26 | _discovered_tools[name] = obj 27 | __all__.append(name) 28 | # Add to current namespace for imports 29 | globals()[name] = obj 30 | 31 | except ImportError as e: 32 | print(f"Warning: Could not import {module_name}: {e}") 33 | 34 | # Sort __all__ for consistent ordering 35 | __all__.sort() 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Suggest an idea for this project 4 | title: "[FEATURE] " 5 | labels: enhancement 6 | assignees: "" 7 | --- 8 | 9 | ## Feature Description 10 | 11 | A clear and concise description of the feature you'd like to see implemented. 12 | 13 | ## Problem Statement 14 | 15 | Is your feature request related to a problem? Please describe. 16 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 17 | 18 | ## Proposed Solution 19 | 20 | Describe the solution you'd like to see implemented. 21 | 22 | ## Alternative Solutions 23 | 24 | Describe any alternative solutions or features you've considered. 25 | 26 | ## Use Case 27 | 28 | Describe the specific use case for this feature. Who would benefit from it? 29 | 30 | ## Implementation Details 31 | 32 | If you have ideas about how this could be implemented, please describe them here. 33 | 34 | ## Additional Context 35 | 36 | Add any other context or screenshots about the feature request here. 37 | 38 | ## Checklist 39 | 40 | - [ ] I have searched existing issues to ensure this is not a duplicate 41 | - [ ] I have provided a clear description of the feature 42 | - [ ] I have explained the use case and benefits 43 | - [ ] I have considered alternative solutions 44 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Create a report to help us improve 4 | title: "[BUG] " 5 | labels: bug 6 | assignees: "" 7 | --- 8 | 9 | ## Bug Description 10 | 11 | A clear and concise description of what the bug is. 12 | 13 | ## To Reproduce 14 | 15 | Steps to reproduce the behavior: 16 | 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | ## Expected Behavior 23 | 24 | A clear and concise description of what you expected to happen. 25 | 26 | ## Actual Behavior 27 | 28 | A clear and concise description of what actually happened. 29 | 30 | ## Screenshots 31 | 32 | If applicable, add screenshots to help explain your problem. 33 | 34 | ## Environment 35 | 36 | - OS: [e.g. Ubuntu 20.04, Windows 10, macOS 12.0] 37 | - Python version: [e.g. 3.11, 3.12] 38 | - Logseq version: [e.g. 0.9.0] 39 | - MCP server version: [e.g. 0.1.0] 40 | 41 | ## Logs 42 | 43 | If applicable, add logs to help explain your problem. 44 | 45 | ## Additional Context 46 | 47 | Add any other context about the problem here. 48 | 49 | ## Checklist 50 | 51 | - [ ] I have searched existing issues to ensure this is not a duplicate 52 | - [ ] I have provided all the information requested above 53 | - [ ] I have tested this with the latest version 54 | - [ ] I have included relevant logs and screenshots 55 | -------------------------------------------------------------------------------- /tests/test_mcp_server.py: -------------------------------------------------------------------------------- 1 | """Tests for MCP server functionality.""" 2 | 3 | import pytest 4 | 5 | from src.registry import register_all_tools 6 | from src.server import mcp 7 | 8 | 9 | class TestMCPServer: 10 | """Test cases for MCP server functionality.""" 11 | 12 | def test_server_initialization(self): 13 | """Test that the MCP server can be initialized.""" 14 | assert mcp is not None 15 | assert hasattr(mcp, "list_tools") 16 | 17 | @pytest.mark.asyncio 18 | async def test_tool_registration(self): 19 | """Test that all tools are properly registered.""" 20 | # Get the list of registered tools 21 | tools = await mcp.list_tools() 22 | 23 | # Check that we have the expected number of tools 24 | assert len(tools) >= 9 25 | 26 | # Check for specific tools 27 | tool_names = [tool.name for tool in tools] 28 | expected_tools = [ 29 | "append_block_in_page", 30 | "create_page", 31 | "edit_block", 32 | "get_all_pages", 33 | "get_page_blocks", 34 | "get_block_content", 35 | "get_all_page_content", 36 | "get_page_links", 37 | "get_linked_flashcards", 38 | ] 39 | 40 | for expected_tool in expected_tools: 41 | assert expected_tool in tool_names, ( 42 | f"Tool {expected_tool} not found in registered tools" 43 | ) 44 | 45 | @pytest.mark.asyncio 46 | async def test_register_all_tools_function(self): 47 | """Test the register_all_tools function.""" 48 | # This should not raise any exceptions 49 | register_all_tools(mcp) 50 | 51 | # Verify tools are still registered 52 | tools = await mcp.list_tools() 53 | assert len(tools) >= 9 54 | -------------------------------------------------------------------------------- /.github/badges.yml: -------------------------------------------------------------------------------- 1 | # GitHub Actions Status Badges Configuration 2 | # These badges can be added to your README.md 3 | 4 | badges: 5 | - name: "Tests" 6 | image: "https://github.com/{{owner}}/{{repo}}/workflows/Test%20Suite/badge.svg" 7 | url: "https://github.com/{{owner}}/{{repo}}/actions/workflows/test.yml" 8 | alt: "Tests" 9 | 10 | - name: "Quality" 11 | image: "https://github.com/{{owner}}/{{repo}}/workflows/Code%20Quality%20%26%20Security/badge.svg" 12 | url: "https://github.com/{{owner}}/{{repo}}/actions/workflows/quality.yml" 13 | alt: "Code Quality" 14 | 15 | - name: "PR Validation" 16 | image: "https://github.com/{{owner}}/{{repo}}/workflows/Pull%20Request%20Validation/badge.svg" 17 | url: "https://github.com/{{owner}}/{{repo}}/actions/workflows/pr-validation.yml" 18 | alt: "PR Validation" 19 | 20 | - name: "Comprehensive Tests" 21 | image: "https://github.com/{{owner}}/{{repo}}/workflows/Comprehensive%20Test%20Suite/badge.svg" 22 | url: "https://github.com/{{owner}}/{{repo}}/actions/workflows/comprehensive-test.yml" 23 | alt: "Comprehensive Tests" 24 | 25 | # Markdown format for README.md: 26 | markdown: | 27 | [![Tests](https://github.com/{{owner}}/{{repo}}/workflows/Test%20Suite/badge.svg)](https://github.com/{{owner}}/{{repo}}/actions/workflows/test.yml) 28 | [![Quality](https://github.com/{{owner}}/{{repo}}/workflows/Code%20Quality%20%26%20Security/badge.svg)](https://github.com/{{owner}}/{{repo}}/actions/workflows/quality.yml) 29 | [![PR Validation](https://github.com/{{owner}}/{{repo}}/workflows/Pull%20Request%20Validation/badge.svg)](https://github.com/{{owner}}/{{repo}}/actions/workflows/pr-validation.yml) 30 | [![Comprehensive Tests](https://github.com/{{owner}}/{{repo}}/workflows/Comprehensive%20Test%20Suite/badge.svg)](https://github.com/{{owner}}/{{repo}}/actions/workflows/comprehensive-test.yml) 31 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Pull Request 2 | 3 | ## Description 4 | 5 | Brief description of the changes made in this PR. 6 | 7 | ## Type of Change 8 | 9 | - [ ] Bug fix (non-breaking change which fixes an issue) 10 | - [ ] New feature (non-breaking change which adds functionality) 11 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 12 | - [ ] Documentation update 13 | - [ ] Test improvements 14 | - [ ] Code refactoring 15 | 16 | ## Testing 17 | 18 | - [ ] All existing tests pass 19 | - [ ] New tests have been added for new functionality 20 | - [ ] Test coverage is maintained or improved 21 | - [ ] Manual testing has been performed 22 | 23 | ## Test Coverage 24 | 25 | - [ ] Test coverage is above 80% 26 | - [ ] All new code is covered by tests 27 | - [ ] Edge cases are tested 28 | - [ ] Error scenarios are tested 29 | 30 | ## Code Quality 31 | 32 | - [ ] Code follows project style guidelines 33 | - [ ] Self-review of code has been performed 34 | - [ ] Code is properly commented 35 | - [ ] No hardcoded secrets or credentials 36 | - [ ] No TODO/FIXME comments left in code 37 | 38 | ## Security 39 | 40 | - [ ] No security vulnerabilities introduced 41 | - [ ] Dependencies are up to date 42 | - [ ] No sensitive data exposed 43 | - [ ] Input validation is implemented where needed 44 | 45 | ## Documentation 46 | 47 | - [ ] README.md updated if needed 48 | - [ ] Code comments added for complex logic 49 | - [ ] API documentation updated if applicable 50 | - [ ] Changelog updated if needed 51 | 52 | ## Checklist 53 | 54 | - [ ] My code follows the style guidelines of this project 55 | - [ ] I have performed a self-review of my own code 56 | - [ ] I have commented my code, particularly in hard-to-understand areas 57 | - [ ] I have made corresponding changes to the documentation 58 | - [ ] My changes generate no new warnings 59 | - [ ] I have added tests that prove my fix is effective or that my feature works 60 | - [ ] New and existing unit tests pass locally with my changes 61 | - [ ] Any dependent changes have been merged and published 62 | 63 | ## Additional Notes 64 | 65 | Any additional information that reviewers should know about this PR. 66 | 67 | ## Screenshots (if applicable) 68 | 69 | Add screenshots to help explain your changes. 70 | 71 | ## Related Issues 72 | 73 | Closes #(issue number) 74 | 75 | ## Testing Instructions 76 | 77 | 1. Steps to test the changes 78 | 2. Expected behavior 79 | 3. Any special setup required 80 | 81 | ## Performance Impact 82 | 83 | - [ ] No performance impact 84 | - [ ] Performance improvement 85 | - [ ] Performance regression (explain below) 86 | 87 | If there's a performance impact, please describe it here. 88 | 89 | ## Breaking Changes 90 | 91 | - [ ] No breaking changes 92 | - [ ] Breaking changes (explain below) 93 | 94 | If there are breaking changes, please describe them here and provide migration instructions. 95 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """Pytest configuration and shared fixtures.""" 2 | 3 | import os 4 | from unittest.mock import AsyncMock, MagicMock, patch 5 | 6 | import pytest 7 | 8 | 9 | @pytest.fixture 10 | def mock_env_vars(): 11 | """Mock environment variables for testing.""" 12 | with patch.dict( 13 | os.environ, 14 | { 15 | "LOGSEQ_API_ENDPOINT": "http://test-logseq:12315/api", 16 | "LOGSEQ_API_TOKEN": "test-token", 17 | }, 18 | ): 19 | yield 20 | 21 | 22 | @pytest.fixture 23 | def mock_aiohttp_session(): 24 | """Mock aiohttp ClientSession.""" 25 | with patch("aiohttp.ClientSession") as mock_session_class: 26 | # Create a mock session instance 27 | mock_session_instance = MagicMock() 28 | 29 | # Mock the session class to return our instance when used as context manager 30 | mock_session_class.return_value.__aenter__ = AsyncMock( 31 | return_value=mock_session_instance 32 | ) 33 | mock_session_class.return_value.__aexit__ = AsyncMock(return_value=None) 34 | 35 | # Create a mock post context manager 36 | mock_post_context = MagicMock() 37 | 38 | # Make the post method return the context manager directly 39 | mock_session_instance.post.return_value = mock_post_context 40 | 41 | # Store the session instance and post context for easy access in tests 42 | mock_session_class._session_instance = mock_session_instance 43 | mock_session_class._post_context = mock_post_context 44 | 45 | yield mock_session_class 46 | 47 | 48 | @pytest.fixture 49 | def mock_successful_response(): 50 | """Mock successful HTTP response.""" 51 | response = MagicMock() 52 | response.status = 200 53 | response.json = AsyncMock(return_value={"success": True, "data": "test data"}) 54 | return response 55 | 56 | 57 | @pytest.fixture 58 | def mock_error_response(): 59 | """Mock error HTTP response.""" 60 | response = MagicMock() 61 | response.status = 500 62 | response.json = AsyncMock(return_value={"error": "Internal server error"}) 63 | return response 64 | 65 | 66 | @pytest.fixture 67 | def sample_page_data(): 68 | """Sample page data for testing.""" 69 | return { 70 | "id": 123, 71 | "uuid": "page-uuid-123", 72 | "originalName": "Test Page", 73 | "name": "Test Page", 74 | "journal?": False, 75 | "createdAt": 1640995200000, 76 | "updatedAt": 1640995200000, 77 | } 78 | 79 | 80 | @pytest.fixture 81 | def sample_block_data(): 82 | """Sample block data for testing.""" 83 | return { 84 | "id": 456, 85 | "uuid": "block-uuid-456", 86 | "content": "Test block content", 87 | "level": 1, 88 | "page": {"id": 123, "name": "Test Page"}, 89 | "properties": {"status": "active"}, 90 | "children": [], 91 | } 92 | -------------------------------------------------------------------------------- /tests/test_basic_functionality.py: -------------------------------------------------------------------------------- 1 | """Basic functionality tests that actually work.""" 2 | 3 | from unittest.mock import patch 4 | 5 | 6 | from src.registry import register_all_tools 7 | from src.server import mcp 8 | from src.tools.get_all_pages import get_all_pages 9 | 10 | 11 | class TestBasicFunctionality: 12 | """Basic tests that verify core functionality works.""" 13 | 14 | def test_imports_work(self): 15 | """Test that all imports work correctly.""" 16 | from src.tools import ( 17 | append_block_in_page, 18 | create_page, 19 | edit_block, 20 | get_all_page_content, 21 | get_all_pages, 22 | get_block_content, 23 | get_linked_flashcards, 24 | get_page_blocks, 25 | get_page_links, 26 | ) 27 | 28 | # Verify functions exist 29 | assert callable(append_block_in_page) 30 | assert callable(create_page) 31 | assert callable(edit_block) 32 | assert callable(get_all_pages) 33 | assert callable(get_page_blocks) 34 | assert callable(get_block_content) 35 | assert callable(get_all_page_content) 36 | assert callable(get_page_links) 37 | assert callable(get_linked_flashcards) 38 | 39 | def test_server_initialization(self): 40 | """Test that the MCP server can be initialized.""" 41 | assert mcp is not None 42 | assert hasattr(mcp, "list_tools") 43 | 44 | def test_tool_registration_doesnt_crash(self): 45 | """Test that tool registration doesn't crash.""" 46 | # This should not raise any exceptions 47 | register_all_tools(mcp) 48 | 49 | def test_environment_variables_handling(self): 50 | """Test that environment variables are handled correctly.""" 51 | with patch.dict( 52 | "os.environ", 53 | { 54 | "LOGSEQ_API_ENDPOINT": "http://test:12315/api", 55 | "LOGSEQ_API_TOKEN": "test-token", 56 | }, 57 | ): 58 | # Test that the functions can access environment variables 59 | from src.tools.get_all_pages import get_all_pages 60 | 61 | assert callable(get_all_pages) 62 | 63 | def test_function_signatures(self): 64 | """Test that functions have correct signatures.""" 65 | # Test function signatures exist 66 | import inspect 67 | 68 | from src.tools.append_block_in_page import append_block_in_page 69 | from src.tools.create_page import create_page 70 | 71 | # get_all_pages should accept optional start/end parameters 72 | sig = inspect.signature(get_all_pages) 73 | assert "start" in sig.parameters 74 | assert "end" in sig.parameters 75 | 76 | # create_page should accept page_name and optional parameters 77 | sig = inspect.signature(create_page) 78 | assert "page_name" in sig.parameters 79 | assert "properties" in sig.parameters 80 | assert "format" in sig.parameters 81 | 82 | # append_block_in_page should accept page_identifier and content 83 | sig = inspect.signature(append_block_in_page) 84 | assert "page_identifier" in sig.parameters 85 | assert "content" in sig.parameters 86 | -------------------------------------------------------------------------------- /tests/test_get_page_blocks.py: -------------------------------------------------------------------------------- 1 | """Tests for get_page_blocks tool.""" 2 | 3 | from unittest.mock import AsyncMock, MagicMock 4 | 5 | import pytest 6 | 7 | from src.tools.get_page_blocks import get_page_blocks 8 | 9 | 10 | class TestGetPageBlocks: 11 | """Test cases for get_page_blocks function.""" 12 | 13 | @pytest.mark.asyncio 14 | async def test_get_page_blocks_success( 15 | self, mock_env_vars, mock_aiohttp_session, sample_block_data 16 | ): 17 | """Test successful page blocks retrieval.""" 18 | # Setup mock response 19 | mock_response = MagicMock() 20 | mock_response.status = 200 21 | mock_response.json = AsyncMock(return_value=[sample_block_data]) 22 | 23 | # Setup session mock 24 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 25 | return_value=mock_response 26 | ) 27 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 28 | 29 | result = await get_page_blocks("Test Page") 30 | 31 | assert len(result) == 1 32 | assert "🌳 **PAGE BLOCKS TREE STRUCTURE**" in result[0].text 33 | assert "Test Page" in result[0].text 34 | 35 | @pytest.mark.asyncio 36 | async def test_get_page_blocks_empty(self, mock_env_vars, mock_aiohttp_session): 37 | """Test page blocks retrieval with empty result.""" 38 | # Setup mock response 39 | mock_response = MagicMock() 40 | mock_response.status = 200 41 | mock_response.json = AsyncMock(return_value=[]) 42 | 43 | # Setup session mock 44 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 45 | return_value=mock_response 46 | ) 47 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 48 | 49 | result = await get_page_blocks("Test Page") 50 | 51 | assert len(result) == 1 52 | assert "✅ Page 'Test Page' has no blocks" in result[0].text 53 | 54 | @pytest.mark.asyncio 55 | async def test_get_page_blocks_http_error( 56 | self, mock_env_vars, mock_aiohttp_session 57 | ): 58 | """Test page blocks retrieval with HTTP error.""" 59 | # Setup mock response 60 | mock_response = MagicMock() 61 | mock_response.status = 500 62 | 63 | # Setup session mock 64 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 65 | return_value=mock_response 66 | ) 67 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 68 | 69 | result = await get_page_blocks("Test Page") 70 | 71 | assert len(result) == 1 72 | assert "❌ Failed to fetch page blocks: HTTP 500" in result[0].text 73 | 74 | @pytest.mark.asyncio 75 | async def test_get_page_blocks_exception(self, mock_env_vars, mock_aiohttp_session): 76 | """Test page blocks retrieval with exception.""" 77 | # Setup session mock to raise exception 78 | mock_aiohttp_session._session_instance.post.side_effect = Exception( 79 | "Network error" 80 | ) 81 | 82 | result = await get_page_blocks("Test Page") 83 | 84 | assert len(result) == 1 85 | assert "❌ Error fetching page blocks: Network error" in result[0].text 86 | -------------------------------------------------------------------------------- /tests/test_append_block_in_page.py: -------------------------------------------------------------------------------- 1 | """Tests for append_block_in_page tool.""" 2 | 3 | from unittest.mock import AsyncMock, MagicMock 4 | 5 | import pytest 6 | 7 | from src.tools.append_block_in_page import append_block_in_page 8 | 9 | 10 | class TestAppendBlockInPage: 11 | """Test cases for append_block_in_page function.""" 12 | 13 | @pytest.mark.asyncio 14 | async def test_append_block_success_basic( 15 | self, mock_env_vars, mock_aiohttp_session 16 | ): 17 | """Test successful block append with basic parameters.""" 18 | # Setup mock response 19 | mock_response = MagicMock() 20 | mock_response.status = 200 21 | mock_response.json = AsyncMock(return_value={"success": True}) 22 | 23 | # Setup session mock 24 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 25 | return_value=mock_response 26 | ) 27 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 28 | 29 | result = await append_block_in_page("Test Page", "Test content") 30 | 31 | assert len(result) == 1 32 | assert "✅ **BLOCK APPENDED SUCCESSFULLY**" in result[0].text 33 | assert "Test Page" in result[0].text 34 | 35 | @pytest.mark.asyncio 36 | async def test_append_block_with_positioning( 37 | self, mock_env_vars, mock_aiohttp_session 38 | ): 39 | """Test block append with positioning options.""" 40 | # Setup mock response 41 | mock_response = MagicMock() 42 | mock_response.status = 200 43 | mock_response.json = AsyncMock(return_value={"success": True}) 44 | 45 | # Setup session mock 46 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 47 | return_value=mock_response 48 | ) 49 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 50 | 51 | result = await append_block_in_page( 52 | "Test Page", "Test content", before="block-uuid-123", is_page_block=True 53 | ) 54 | 55 | assert len(result) == 1 56 | assert "✅ **BLOCK APPENDED SUCCESSFULLY**" in result[0].text 57 | assert "📍 Positioned before block: block-uuid-123" in result[0].text 58 | assert "📍 Block type: Page-level block" in result[0].text 59 | 60 | @pytest.mark.asyncio 61 | async def test_append_block_http_error(self, mock_env_vars, mock_aiohttp_session): 62 | """Test block append with HTTP error.""" 63 | # Setup mock response 64 | mock_response = MagicMock() 65 | mock_response.status = 500 66 | 67 | # Setup session mock 68 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 69 | return_value=mock_response 70 | ) 71 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 72 | 73 | result = await append_block_in_page("Test Page", "Test content") 74 | 75 | assert len(result) == 1 76 | assert "❌ Failed to append block: HTTP 500" in result[0].text 77 | 78 | @pytest.mark.asyncio 79 | async def test_append_block_exception(self, mock_env_vars, mock_aiohttp_session): 80 | """Test block append with exception.""" 81 | # Setup session mock to raise exception 82 | mock_aiohttp_session._session_instance.post.side_effect = Exception( 83 | "Network error" 84 | ) 85 | 86 | result = await append_block_in_page("Test Page", "Test content") 87 | 88 | assert len(result) == 1 89 | assert "❌ Error appending block: Network error" in result[0].text 90 | -------------------------------------------------------------------------------- /tests/test_get_all_pages.py: -------------------------------------------------------------------------------- 1 | """Tests for get_all_pages tool.""" 2 | 3 | from unittest.mock import AsyncMock, MagicMock 4 | 5 | import pytest 6 | 7 | from src.tools.get_all_pages import get_all_pages 8 | 9 | 10 | class TestGetAllPages: 11 | """Test cases for get_all_pages function.""" 12 | 13 | @pytest.mark.asyncio 14 | async def test_get_all_pages_success( 15 | self, mock_env_vars, mock_aiohttp_session, sample_page_data 16 | ): 17 | """Test successful pages retrieval.""" 18 | # Setup mock response 19 | mock_response = MagicMock() 20 | mock_response.status = 200 21 | mock_response.json = AsyncMock(return_value=[sample_page_data]) 22 | 23 | # Setup session mock 24 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 25 | return_value=mock_response 26 | ) 27 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 28 | 29 | result = await get_all_pages() 30 | 31 | assert len(result) == 1 32 | assert "📊 **LOGSEQ PAGES LISTING**" in result[0].text 33 | assert "Test Page" in result[0].text 34 | 35 | @pytest.mark.asyncio 36 | async def test_get_all_pages_with_limits( 37 | self, mock_env_vars, mock_aiohttp_session, sample_page_data 38 | ): 39 | """Test pages retrieval with start/end limits.""" 40 | # Setup mock response 41 | mock_response = MagicMock() 42 | mock_response.status = 200 43 | mock_response.json = AsyncMock(return_value=[sample_page_data]) 44 | 45 | # Setup session mock 46 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 47 | return_value=mock_response 48 | ) 49 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 50 | 51 | result = await get_all_pages(start=0, end=1) 52 | 53 | assert len(result) == 1 54 | assert "showing indices 0-1" in result[0].text 55 | 56 | @pytest.mark.asyncio 57 | async def test_get_all_pages_empty(self, mock_env_vars, mock_aiohttp_session): 58 | """Test pages retrieval with empty result.""" 59 | # Setup mock response 60 | mock_response = MagicMock() 61 | mock_response.status = 200 62 | mock_response.json = AsyncMock(return_value=[]) 63 | 64 | # Setup session mock 65 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 66 | return_value=mock_response 67 | ) 68 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 69 | 70 | result = await get_all_pages() 71 | 72 | assert len(result) == 1 73 | assert "✅ No pages found in Logseq graph" in result[0].text 74 | 75 | @pytest.mark.asyncio 76 | async def test_get_all_pages_http_error(self, mock_env_vars, mock_aiohttp_session): 77 | """Test pages retrieval with HTTP error.""" 78 | # Setup mock response 79 | mock_response = MagicMock() 80 | mock_response.status = 500 81 | 82 | # Setup session mock 83 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 84 | return_value=mock_response 85 | ) 86 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 87 | 88 | result = await get_all_pages() 89 | 90 | assert len(result) == 1 91 | assert "❌ Failed to fetch pages: HTTP 500" in result[0].text 92 | 93 | @pytest.mark.asyncio 94 | async def test_get_all_pages_exception(self, mock_env_vars, mock_aiohttp_session): 95 | """Test pages retrieval with exception.""" 96 | # Setup session mock to raise exception 97 | mock_aiohttp_session._session_instance.post.side_effect = Exception( 98 | "Network error" 99 | ) 100 | 101 | result = await get_all_pages() 102 | 103 | assert len(result) == 1 104 | assert "❌ Error fetching pages: Network error" in result[0].text 105 | -------------------------------------------------------------------------------- /tests/test_create_page.py: -------------------------------------------------------------------------------- 1 | """Tests for create_page tool.""" 2 | 3 | from unittest.mock import AsyncMock, MagicMock 4 | 5 | import pytest 6 | 7 | from src.tools.create_page import create_page 8 | 9 | 10 | class TestCreatePage: 11 | """Test cases for create_page function.""" 12 | 13 | @pytest.mark.asyncio 14 | async def test_create_page_success_basic(self, mock_env_vars, mock_aiohttp_session): 15 | """Test successful page creation with basic parameters.""" 16 | # Setup mock response 17 | mock_response = MagicMock() 18 | mock_response.status = 200 19 | mock_response.json = AsyncMock(return_value={"success": True}) 20 | 21 | # Setup session mock 22 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 23 | return_value=mock_response 24 | ) 25 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 26 | 27 | result = await create_page("Test Page") 28 | 29 | assert len(result) == 1 30 | assert "✅ **PAGE CREATED SUCCESSFULLY**" in result[0].text 31 | assert "Test Page" in result[0].text 32 | 33 | @pytest.mark.asyncio 34 | async def test_create_page_with_properties( 35 | self, mock_env_vars, mock_aiohttp_session 36 | ): 37 | """Test page creation with properties.""" 38 | # Setup mock response 39 | mock_response = MagicMock() 40 | mock_response.status = 200 41 | mock_response.json = AsyncMock(return_value={"success": True}) 42 | 43 | # Setup session mock 44 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 45 | return_value=mock_response 46 | ) 47 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 48 | 49 | properties = {"status": "active", "type": "note"} 50 | result = await create_page("Test Page", properties=properties) 51 | 52 | assert len(result) == 1 53 | assert "✅ **PAGE CREATED SUCCESSFULLY**" in result[0].text 54 | assert "⚙️ Properties set: 2 items" in result[0].text 55 | 56 | @pytest.mark.asyncio 57 | async def test_create_page_with_format(self, mock_env_vars, mock_aiohttp_session): 58 | """Test page creation with format.""" 59 | # Setup mock response 60 | mock_response = MagicMock() 61 | mock_response.status = 200 62 | mock_response.json = AsyncMock(return_value={"success": True}) 63 | 64 | # Setup session mock 65 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 66 | return_value=mock_response 67 | ) 68 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 69 | 70 | result = await create_page("Test Page", format="markdown") 71 | 72 | assert len(result) == 1 73 | assert "✅ **PAGE CREATED SUCCESSFULLY**" in result[0].text 74 | assert "Format: markdown" in result[0].text 75 | 76 | @pytest.mark.asyncio 77 | async def test_create_page_http_error(self, mock_env_vars, mock_aiohttp_session): 78 | """Test page creation with HTTP error.""" 79 | # Setup mock response 80 | mock_response = MagicMock() 81 | mock_response.status = 500 82 | 83 | # Setup session mock 84 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 85 | return_value=mock_response 86 | ) 87 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 88 | 89 | result = await create_page("Test Page") 90 | 91 | assert len(result) == 1 92 | assert "❌ Failed to create page: HTTP 500" in result[0].text 93 | 94 | @pytest.mark.asyncio 95 | async def test_create_page_exception(self, mock_env_vars, mock_aiohttp_session): 96 | """Test page creation with exception.""" 97 | # Setup session mock to raise exception 98 | mock_aiohttp_session._session_instance.post.side_effect = Exception( 99 | "Network error" 100 | ) 101 | 102 | result = await create_page("Test Page") 103 | 104 | assert len(result) == 1 105 | assert "❌ Error creating page: Network error" in result[0].text 106 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # Ruff stuff: 171 | .ruff_cache/ 172 | 173 | # PyPI configuration file 174 | .pypirc 175 | 176 | # Cursor 177 | .cursor -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Main CI Pipeline 2 | 3 | on: 4 | push: 5 | branches: [main, develop] 6 | pull_request: 7 | branches: [main, develop] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ["3.11", "3.12", "3.13"] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Install uv 20 | uses: astral-sh/setup-uv@v3 21 | with: 22 | version: "latest" 23 | 24 | - name: Set up Python ${{ matrix.python-version }} 25 | run: uv python install ${{ matrix.python-version }} 26 | 27 | - name: Install dependencies 28 | run: uv sync --group test 29 | 30 | - name: Run ruff check 31 | run: uv run ruff check 32 | 33 | - name: Run ruff format check 34 | run: uv run ruff format --check 35 | 36 | - name: Run all tests with coverage 37 | run: uv run pytest --cov=src --cov-report=term-missing --cov-fail-under=80 38 | 39 | - name: Test MCP server health and tools 40 | run: uv run pytest tests/test_mcp_server.py -v 41 | 42 | - name: Validate tool discovery 43 | run: | 44 | uv run python -c " 45 | import sys 46 | sys.path.insert(0, 'src') 47 | import tools 48 | print(f'✅ Discovered {len(tools.__all__)} tools: {tools.__all__}') 49 | 50 | expected_tools = [ 51 | 'append_block_in_page', 52 | 'create_page', 53 | 'edit_block', 54 | 'get_all_pages', 55 | 'get_page_blocks', 56 | 'get_block_content', 57 | 'get_all_page_content', 58 | 'get_page_links', 59 | 'get_linked_flashcards' 60 | ] 61 | 62 | for tool in expected_tools: 63 | assert tool in tools.__all__, f'Missing expected tool: {tool}' 64 | 65 | print('✅ All expected tools are discovered') 66 | " 67 | 68 | - name: Upload coverage reports 69 | uses: actions/upload-artifact@v4 70 | if: always() 71 | with: 72 | name: coverage-report-python${{ matrix.python-version }} 73 | path: | 74 | htmlcov/ 75 | coverage.xml 76 | 77 | build-test: 78 | runs-on: ubuntu-latest 79 | needs: test 80 | if: github.event_name == 'pull_request' 81 | 82 | steps: 83 | - uses: actions/checkout@v4 84 | 85 | - name: Install uv 86 | uses: astral-sh/setup-uv@v3 87 | with: 88 | version: "latest" 89 | 90 | - name: Set up Python 3.12 91 | run: uv python install 3.12 92 | 93 | - name: Install dependencies 94 | run: uv sync --group test 95 | 96 | - name: Build package 97 | run: uv build 98 | 99 | - name: Test package installation 100 | run: | 101 | uv run pip install dist/*.whl 102 | uv run python -c " 103 | import sys 104 | sys.path.insert(0, 'src') 105 | import tools 106 | print(f'✅ Package installation successful: {len(tools.__all__)} tools available') 107 | " 108 | 109 | ci-summary: 110 | runs-on: ubuntu-latest 111 | needs: [test, build-test] 112 | if: always() 113 | 114 | steps: 115 | - name: CI Summary 116 | run: | 117 | echo "🔍 CI Pipeline Summary" 118 | echo "=====================" 119 | 120 | if [ "${{ needs.test.result }}" == "success" ]; then 121 | echo "✅ Tests: PASSED" 122 | else 123 | echo "❌ Tests: FAILED" 124 | fi 125 | 126 | if [ "${{ needs.build-test.result }}" == "success" ] || [ "${{ needs.build-test.result }}" == "skipped" ]; then 127 | echo "✅ Build: PASSED/SKIPPED" 128 | else 129 | echo "❌ Build: FAILED" 130 | fi 131 | 132 | if [ "${{ needs.test.result }}" == "success" ]; then 133 | echo "" 134 | echo "🎉 CI pipeline completed successfully!" 135 | echo "✅ Code is ready for merge" 136 | else 137 | echo "" 138 | echo "❌ CI pipeline failed" 139 | echo "Please address the issues before merging" 140 | exit 1 141 | fi 142 | -------------------------------------------------------------------------------- /tests/test_edit_block.py: -------------------------------------------------------------------------------- 1 | """Tests for edit_block tool.""" 2 | 3 | from unittest.mock import AsyncMock, MagicMock 4 | 5 | import pytest 6 | 7 | from src.tools.edit_block import edit_block 8 | 9 | 10 | class TestEditBlock: 11 | """Test cases for edit_block function.""" 12 | 13 | @pytest.mark.asyncio 14 | async def test_edit_block_success_content( 15 | self, mock_env_vars, mock_aiohttp_session 16 | ): 17 | """Test successful block edit with content.""" 18 | # Setup mock response 19 | mock_response = MagicMock() 20 | mock_response.status = 200 21 | mock_response.json = AsyncMock(return_value={"success": True}) 22 | 23 | # Setup session mock 24 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 25 | return_value=mock_response 26 | ) 27 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 28 | 29 | result = await edit_block("block-uuid-123", content="Updated content") 30 | 31 | assert len(result) == 1 32 | assert "✅ **BLOCK EDITED SUCCESSFULLY**" in result[0].text 33 | assert "📝 **UPDATED CONTENT:**" in result[0].text 34 | 35 | @pytest.mark.asyncio 36 | async def test_edit_block_success_properties( 37 | self, mock_env_vars, mock_aiohttp_session 38 | ): 39 | """Test successful block edit with properties.""" 40 | # Setup mock response 41 | mock_response = MagicMock() 42 | mock_response.status = 200 43 | mock_response.json = AsyncMock(return_value={"success": True}) 44 | 45 | # Setup session mock 46 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 47 | return_value=mock_response 48 | ) 49 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 50 | 51 | properties = {"status": "completed", "priority": "high"} 52 | result = await edit_block("block-uuid-123", properties=properties) 53 | 54 | assert len(result) == 1 55 | assert "✅ **BLOCK EDITED SUCCESSFULLY**" in result[0].text 56 | assert "⚙️ **UPDATED PROPERTIES:**" in result[0].text 57 | 58 | @pytest.mark.asyncio 59 | async def test_edit_block_success_options( 60 | self, mock_env_vars, mock_aiohttp_session 61 | ): 62 | """Test successful block edit with options.""" 63 | # Setup mock response 64 | mock_response = MagicMock() 65 | mock_response.status = 200 66 | mock_response.json = AsyncMock(return_value={"success": True}) 67 | 68 | # Setup session mock 69 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 70 | return_value=mock_response 71 | ) 72 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 73 | 74 | result = await edit_block( 75 | "block-uuid-123", content="Updated content", cursor_position=10, focus=True 76 | ) 77 | 78 | assert len(result) == 1 79 | assert "✅ **BLOCK EDITED SUCCESSFULLY**" in result[0].text 80 | 81 | @pytest.mark.asyncio 82 | async def test_edit_block_http_error(self, mock_env_vars, mock_aiohttp_session): 83 | """Test block edit with HTTP error.""" 84 | # Setup mock response 85 | mock_response = MagicMock() 86 | mock_response.status = 500 87 | 88 | # Setup session mock 89 | mock_aiohttp_session._post_context.__aenter__ = AsyncMock( 90 | return_value=mock_response 91 | ) 92 | mock_aiohttp_session._post_context.__aexit__ = AsyncMock(return_value=None) 93 | 94 | result = await edit_block("block-uuid-123", content="Updated content") 95 | 96 | assert len(result) == 1 97 | assert "❌ Failed to edit block: HTTP 500" in result[0].text 98 | 99 | @pytest.mark.asyncio 100 | async def test_edit_block_exception(self, mock_env_vars, mock_aiohttp_session): 101 | """Test block edit with exception.""" 102 | # Setup session mock to raise exception 103 | mock_aiohttp_session._session_instance.post.side_effect = Exception( 104 | "Network error" 105 | ) 106 | 107 | result = await edit_block("block-uuid-123", content="Updated content") 108 | 109 | assert len(result) == 1 110 | assert "❌ Error editing block: Network error" in result[0].text 111 | -------------------------------------------------------------------------------- /src/tools/append_block_in_page.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import Any, List, Optional 4 | 5 | import aiohttp 6 | from dotenv import load_dotenv 7 | from mcp.types import TextContent 8 | 9 | # Load environment variables from .env file in project root 10 | env_path = Path(__file__).parent.parent.parent / ".env" 11 | load_dotenv(env_path) 12 | 13 | 14 | async def append_block_in_page( 15 | page_identifier: str, 16 | content: str, 17 | before: Optional[str] = None, 18 | sibling: Optional[str] = None, 19 | is_page_block: Optional[bool] = None, 20 | ) -> List[TextContent]: 21 | """ 22 | Append a new block to a specified page in Logseq. 23 | 24 | This tool allows you to add new content blocks to any page in your Logseq graph. 25 | You can specify positioning options to control where the block is inserted. 26 | 27 | Args: 28 | page_identifier: The name or UUID of the page to append the block to 29 | content: The content of the block to append 30 | before: Optional UUID of a block to insert before 31 | sibling: Optional UUID of a sibling block for positioning 32 | is_page_block: Optional boolean to indicate if this is a page-level block 33 | """ 34 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 35 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 36 | 37 | headers = {"Authorization": f"Bearer {token}"} 38 | 39 | # Build options object 40 | options: dict[str, Any] = {} 41 | if before is not None: 42 | options["before"] = before 43 | if sibling is not None: 44 | options["sibling"] = sibling 45 | if is_page_block is not None: 46 | options["isPageBlock"] = is_page_block 47 | 48 | async with aiohttp.ClientSession() as session: 49 | try: 50 | # Prepare the API call 51 | payload = { 52 | "method": "logseq.Editor.appendBlockInPage", 53 | "args": [page_identifier, content, options] 54 | if options 55 | else [page_identifier, content], 56 | } 57 | 58 | async with session.post( 59 | endpoint, json=payload, headers=headers 60 | ) as response: 61 | if response.status != 200: 62 | return [ 63 | TextContent( 64 | type="text", 65 | text=f"❌ Failed to append block: HTTP {response.status}", 66 | ) 67 | ] 68 | 69 | result = await response.json() 70 | 71 | # Check if the result indicates success 72 | if result is None or result == "": 73 | return [ 74 | TextContent( 75 | type="text", 76 | text="❌ Failed to append block: No response from Logseq API", 77 | ) 78 | ] 79 | 80 | # Build success response 81 | output_lines = [ 82 | "✅ **BLOCK APPENDED SUCCESSFULLY**", 83 | f"📄 Page: {page_identifier}", 84 | f"📝 Content: {content}", 85 | "", 86 | ] 87 | 88 | # Add positioning info if specified 89 | if before: 90 | output_lines.append(f"📍 Positioned before block: {before}") 91 | if sibling: 92 | output_lines.append(f"📍 Positioned as sibling of: {sibling}") 93 | if is_page_block: 94 | output_lines.append("📍 Block type: Page-level block") 95 | 96 | if not (before or sibling or is_page_block): 97 | output_lines.append("📍 Positioned: At the end of the page") 98 | 99 | output_lines.extend( 100 | [ 101 | "", 102 | "🔗 **NEXT STEPS:**", 103 | "• Check your Logseq graph to see the new block", 104 | "• Use get_page_blocks to verify the block was added", 105 | "• Use get_block_content to get details of the new block", 106 | ] 107 | ) 108 | 109 | return [TextContent(type="text", text="\n".join(output_lines))] 110 | 111 | except Exception as e: 112 | return [ 113 | TextContent(type="text", text=f"❌ Error appending block: {str(e)}") 114 | ] 115 | -------------------------------------------------------------------------------- /src/tools/create_page.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import Any, Dict, List, Optional 4 | 5 | import aiohttp 6 | from dotenv import load_dotenv 7 | from mcp.types import TextContent 8 | 9 | # Load environment variables from .env file in project root 10 | env_path = Path(__file__).parent.parent.parent / ".env" 11 | load_dotenv(env_path) 12 | 13 | 14 | async def create_page( 15 | page_name: str, 16 | properties: Optional[Dict[str, Any]] = None, 17 | format: Optional[str] = None, 18 | ) -> List[TextContent]: 19 | """ 20 | Create a new page in Logseq. 21 | 22 | This tool allows you to create new pages in your Logseq graph with optional 23 | properties and format specifications. The page will be created and can be 24 | immediately used for adding content. 25 | 26 | Args: 27 | page_name: The name of the page to create 28 | properties: Optional dictionary of properties to set on the page 29 | format: Optional format for the page ("markdown" or "org") 30 | """ 31 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 32 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 33 | 34 | headers = {"Authorization": f"Bearer {token}"} 35 | 36 | # Build options object 37 | options: dict[str, Any] = {} 38 | if properties: 39 | options["properties"] = properties 40 | if format: 41 | options["format"] = format 42 | 43 | async with aiohttp.ClientSession() as session: 44 | try: 45 | # Prepare the API call 46 | payload = { 47 | "method": "logseq.Editor.createPage", 48 | "args": [page_name, options] if options else [page_name], 49 | } 50 | 51 | async with session.post( 52 | endpoint, json=payload, headers=headers 53 | ) as response: 54 | if response.status != 200: 55 | return [ 56 | TextContent( 57 | type="text", 58 | text=f"❌ Failed to create page: HTTP {response.status}", 59 | ) 60 | ] 61 | 62 | result = await response.json() 63 | 64 | # Check if the result indicates success 65 | if result is None or result == "": 66 | return [ 67 | TextContent( 68 | type="text", 69 | text="❌ Failed to create page: No response from Logseq API", 70 | ) 71 | ] 72 | 73 | # Build success response 74 | output_lines = [ 75 | "✅ **PAGE CREATED SUCCESSFULLY**", 76 | f"📄 Page Name: {page_name}", 77 | "", 78 | ] 79 | 80 | # Add page details if available 81 | if isinstance(result, dict): 82 | page_id = result.get("id", "N/A") 83 | page_uuid = result.get("uuid", "N/A") 84 | original_name = result.get("originalName", page_name) 85 | is_journal = result.get("journal?", False) 86 | page_format = result.get("format", "markdown") 87 | 88 | output_lines.extend( 89 | [ 90 | "📊 **PAGE DETAILS:**", 91 | f"• ID: {page_id}", 92 | f"• UUID: {page_uuid}", 93 | f"• Original Name: {original_name}", 94 | f"• Format: {page_format}", 95 | f"• Journal Page: {'Yes' if is_journal else 'No'}", 96 | "", 97 | ] 98 | ) 99 | 100 | # Add properties if available 101 | page_properties = result.get("properties", {}) 102 | if page_properties: 103 | output_lines.extend( 104 | [ 105 | "⚙️ **PAGE PROPERTIES:**", 106 | *[ 107 | f"• {prop_name}: {prop_value}" 108 | for prop_name, prop_value in page_properties.items() 109 | ], 110 | "", 111 | ] 112 | ) 113 | 114 | # Add creation info 115 | if properties: 116 | output_lines.append(f"⚙️ Properties set: {len(properties)} items") 117 | if format: 118 | output_lines.append(f"📝 Format: {format}") 119 | 120 | output_lines.extend( 121 | [ 122 | "", 123 | "🔗 **NEXT STEPS:**", 124 | "• Check your Logseq graph to see the new page", 125 | "• Use get_all_pages to verify the page was created", 126 | "• Use append_block_in_page to add content to the page", 127 | "• Use get_page_blocks to view the page structure", 128 | ] 129 | ) 130 | 131 | return [TextContent(type="text", text="\n".join(output_lines))] 132 | 133 | except Exception as e: 134 | return [TextContent(type="text", text=f"❌ Error creating page: {str(e)}")] 135 | -------------------------------------------------------------------------------- /.github/workflows/README.md: -------------------------------------------------------------------------------- 1 | # GitHub Actions Workflows 2 | 3 | This directory contains the consolidated GitHub Actions workflows for the logseq-api-mcp project. 4 | 5 | ## 🎯 **Workflow Structure** 6 | 7 | ### **1. `ci.yml` - Main CI Pipeline** 8 | 9 | **Triggers:** Push to main/develop, Pull requests to main/develop 10 | 11 | **Purpose:** Core continuous integration with comprehensive testing 12 | 13 | - **Multi-version testing:** Python 3.11, 3.12, 3.13 14 | - **Code quality:** Ruff linting and formatting 15 | - **Testing:** Full test suite with 80%+ coverage requirement 16 | - **MCP validation:** Server health and tool discovery 17 | - **Build testing:** Package building and installation (PR only) 18 | 19 | ### **2. `quality.yml` - Advanced Quality & Security** 20 | 21 | **Triggers:** Pull requests, Daily schedule (3 AM UTC), Manual dispatch 22 | 23 | **Purpose:** Advanced code quality, security, and performance checks 24 | 25 | - **Type checking:** MyPy static analysis 26 | - **Security scanning:** Bandit security linter 27 | - **Dependency checks:** Safety vulnerability scanning 28 | - **Code analysis:** TODO/FIXME detection, secret scanning 29 | - **Performance testing:** Memory usage validation (scheduled only) 30 | 31 | ### **3. `release.yml` - Release & Cross-Platform Testing** 32 | 33 | **Triggers:** Release events, Weekly schedule (Sunday 2 AM UTC), Manual dispatch 34 | 35 | **Purpose:** Comprehensive testing for releases and cross-platform compatibility 36 | 37 | - **Cross-platform testing:** Ubuntu, Windows, macOS 38 | - **Multi-version testing:** Python 3.11, 3.12, 3.13 39 | - **Package building:** Distribution package creation 40 | - **Integration testing:** MCP server with real tools 41 | - **Tool validation:** All tool functions testing 42 | 43 | ## 📊 **Workflow Comparison** 44 | 45 | | Feature | ci.yml | quality.yml | release.yml | 46 | | ---------------------- | ---------------- | ----------- | ---------------------- | 47 | | **Frequency** | Every push/PR | PR + Daily | Release + Weekly | 48 | | **Python Versions** | 3.11, 3.12, 3.13 | 3.12 | 3.11, 3.12, 3.13 | 49 | | **Operating Systems** | Ubuntu | Ubuntu | Ubuntu, Windows, macOS | 50 | | **Basic Testing** | ✅ | ❌ | ✅ | 51 | | **Coverage Analysis** | ✅ | ❌ | ✅ | 52 | | **Linting/Formatting** | ✅ | ❌ | ❌ | 53 | | **Type Checking** | ❌ | ✅ | ❌ | 54 | | **Security Scanning** | ❌ | ✅ | ❌ | 55 | | **Dependency Checks** | ❌ | ✅ | ❌ | 56 | | **Package Building** | ✅ (PR only) | ❌ | ✅ | 57 | | **Cross-Platform** | ❌ | ❌ | ✅ | 58 | 59 | ## 🚀 **Benefits of Consolidation** 60 | 61 | ### **Before (5 workflows):** 62 | 63 | - ❌ **Massive duplication** of tasks 64 | - ❌ **Redundant CI runs** (same tests multiple times) 65 | - ❌ **Higher GitHub Actions costs** 66 | - ❌ **Complex maintenance** (5 files to update) 67 | - ❌ **Unclear purpose** for each workflow 68 | 69 | ### **After (3 workflows):** 70 | 71 | - ✅ **Focused purpose** for each workflow 72 | - ✅ **Eliminated duplication** of tasks 73 | - ✅ **Reduced CI time** and costs 74 | - ✅ **Easier maintenance** (3 files to update) 75 | - ✅ **Clear separation** of concerns 76 | - ✅ **Better resource utilization** 77 | 78 | ## 🔧 **Workflow Triggers** 79 | 80 | ### **Main CI (`ci.yml`)** 81 | 82 | ```yaml 83 | on: 84 | push: 85 | branches: [main, develop] 86 | pull_request: 87 | branches: [main, develop] 88 | ``` 89 | 90 | ### **Quality & Security (`quality.yml`)** 91 | 92 | ```yaml 93 | on: 94 | pull_request: 95 | branches: [main, develop] 96 | schedule: 97 | - cron: "0 3 * * *" # Daily at 3 AM UTC 98 | workflow_dispatch: 99 | ``` 100 | 101 | ### **Release & Cross-Platform (`release.yml`)** 102 | 103 | ```yaml 104 | on: 105 | release: 106 | types: [published] 107 | schedule: 108 | - cron: "0 2 * * 0" # Weekly on Sunday at 2 AM UTC 109 | workflow_dispatch: 110 | ``` 111 | 112 | ## 📈 **Coverage Requirements** 113 | 114 | - **Main CI:** 80% minimum coverage 115 | - **Release Testing:** 85% minimum coverage 116 | - **Quality Checks:** No coverage requirement (focuses on code quality) 117 | 118 | ## 🛡️ **Security Features** 119 | 120 | - **Bandit security scanning** for common vulnerabilities 121 | - **Dependency vulnerability checks** with Safety 122 | - **Hardcoded secret detection** 123 | - **License compliance checking** 124 | 125 | ## 📦 **Artifacts Generated** 126 | 127 | - **Coverage reports** (HTML, XML) 128 | - **Security reports** (Bandit JSON) 129 | - **Dependency reports** (Safety, Licenses) 130 | - **Package artifacts** (Wheels, distributions) 131 | - **Test results** (Cross-platform) 132 | 133 | ## 🔄 **Migration Notes** 134 | 135 | The old workflows have been backed up to `.github/workflows/backup/`: 136 | 137 | - `test.yml` → Consolidated into `ci.yml` 138 | - `comprehensive-test.yml` → Consolidated into `release.yml` 139 | - `pr-validation.yml` → Consolidated into `ci.yml` and `quality.yml` 140 | - Original `quality.yml` → Enhanced and streamlined 141 | 142 | ## 🎉 **Result** 143 | 144 | **Reduced from 5 workflows to 3 workflows** with: 145 | 146 | - **Eliminated duplication** of 80% of tasks 147 | - **Maintained all functionality** with better organization 148 | - **Improved efficiency** and reduced CI costs 149 | - **Clearer separation** of concerns 150 | -------------------------------------------------------------------------------- /src/tools/get_all_pages.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import List 4 | 5 | import aiohttp 6 | from dotenv import load_dotenv 7 | from mcp.types import TextContent 8 | 9 | # Load environment variables from .env file in project root 10 | env_path = Path(__file__).parent.parent.parent / ".env" 11 | load_dotenv(env_path) 12 | 13 | 14 | async def get_all_pages( 15 | start: int | None = None, end: int | None = None 16 | ) -> List[TextContent]: 17 | """ 18 | Get a simple list of all pages in the Logseq graph with essential metadata. 19 | 20 | Returns a clean listing optimized for LLM consumption with essential identifiers 21 | and timestamps for each page. By default shows all pages, but can be limited 22 | with start and end parameters. 23 | 24 | Args: 25 | start: Starting index (0-based, inclusive). If None, starts from beginning. 26 | end: Ending index (0-based, exclusive). If None, goes to end. 27 | """ 28 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 29 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 30 | 31 | headers = {"Authorization": f"Bearer {token}"} 32 | 33 | def format_timestamp(timestamp): 34 | """Convert timestamp to readable format""" 35 | if not timestamp: 36 | return "N/A" 37 | try: 38 | from datetime import datetime 39 | 40 | dt = datetime.fromtimestamp(timestamp / 1000) # Convert from ms 41 | return dt.strftime("%Y-%m-%d %H:%M:%S") 42 | except (ValueError, TypeError, OverflowError): 43 | return str(timestamp) 44 | 45 | def format_page_entry(page): 46 | """Format a single page entry with essential metadata""" 47 | page_id = page.get("id", "N/A") 48 | uuid = page.get("uuid", "N/A") 49 | original_name = page.get("originalName", page.get("name", "Unknown")) 50 | created_at = format_timestamp(page.get("createdAt")) 51 | updated_at = format_timestamp(page.get("updatedAt")) 52 | 53 | # Determine page type 54 | page_type = "📅 Journal" if page.get("journal?", False) else "📄 Page" 55 | 56 | return f"{page_type} **{original_name}** | ID: {page_id} | UUID: {uuid} | Created: {created_at} | Updated: {updated_at}" 57 | 58 | async with aiohttp.ClientSession() as session: 59 | try: 60 | # Get all pages 61 | payload = {"method": "logseq.Editor.getAllPages"} 62 | 63 | async with session.post( 64 | endpoint, json=payload, headers=headers 65 | ) as response: 66 | if response.status != 200: 67 | return [ 68 | TextContent( 69 | type="text", 70 | text=f"❌ Failed to fetch pages: HTTP {response.status}", 71 | ) 72 | ] 73 | 74 | pages = await response.json() 75 | if not pages: 76 | return [ 77 | TextContent( 78 | type="text", text="✅ No pages found in Logseq graph" 79 | ) 80 | ] 81 | 82 | # Sort pages alphabetically by name 83 | def get_page_name(page): 84 | return page.get("originalName", page.get("name", "")).lower() 85 | 86 | # Sort using a different approach to avoid sort parameter 87 | page_names = [(get_page_name(page), page) for page in pages] 88 | page_names.sort() 89 | sorted_pages = [page for _, page in page_names] 90 | 91 | # Separate journal and regular pages 92 | journal_pages = [p for p in sorted_pages if p.get("journal?", False)] 93 | regular_pages = [p for p in sorted_pages if not p.get("journal?", False)] 94 | 95 | # Apply start/end limits if specified 96 | if start is not None or end is not None: 97 | # Apply limits to regular pages 98 | regular_pages = regular_pages[start:end] 99 | # Apply limits to journal pages 100 | journal_pages = journal_pages[start:end] 101 | 102 | # Build output with range information 103 | range_info = f" (showing indices {start if start is not None else 0}-{end if end is not None else 'end'})" 104 | output_lines = [ 105 | f"📊 **LOGSEQ PAGES LISTING{range_info}**", 106 | f"📈 Total pages in graph: {len(pages)}", 107 | f"📄 Regular pages shown: {len(regular_pages)}", 108 | f"📅 Journal pages shown: {len(journal_pages)}", 109 | "", 110 | ] 111 | else: 112 | # Build simple output with clear distinction between Journal and Regular pages 113 | output_lines = [ 114 | "📊 **LOGSEQ PAGES LISTING**", 115 | f"📈 Total pages: {len(pages)}", 116 | f"📅 Journal pages: {len(journal_pages)}", 117 | f"📄 Regular pages: {len(regular_pages)}", 118 | "", 119 | ] 120 | 121 | # Add regular pages section 122 | if regular_pages: 123 | output_lines.extend(["📄 **REGULAR PAGES:**", ""]) 124 | for page in regular_pages: 125 | output_lines.append( 126 | format_page_entry(page).replace("📄 Page", "📄") 127 | ) 128 | 129 | # Add journal pages section 130 | if journal_pages: 131 | output_lines.extend(["", "📅 **JOURNAL PAGES:**", ""]) 132 | for page in journal_pages: 133 | output_lines.append( 134 | format_page_entry(page).replace("📅 Journal", "📅") 135 | ) 136 | 137 | return [TextContent(type="text", text="\n".join(output_lines))] 138 | 139 | except Exception as e: 140 | return [TextContent(type="text", text=f"❌ Error fetching pages: {str(e)}")] 141 | -------------------------------------------------------------------------------- /.github/workflows/quality.yml: -------------------------------------------------------------------------------- 1 | name: Advanced Quality & Security 2 | 3 | on: 4 | pull_request: 5 | branches: [main, develop] 6 | schedule: 7 | # Run security checks daily at 3 AM UTC 8 | - cron: "0 3 * * *" 9 | workflow_dispatch: 10 | 11 | jobs: 12 | code-quality: 13 | runs-on: ubuntu-latest 14 | name: Advanced Code Quality 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Install uv 20 | uses: astral-sh/setup-uv@v3 21 | with: 22 | version: "latest" 23 | 24 | - name: Set up Python 3.12 25 | run: uv python install 3.12 26 | 27 | - name: Install dependencies 28 | run: uv sync 29 | 30 | - name: Run MyPy type checker 31 | run: | 32 | echo "🔍 Running MyPy type checker..." 33 | uv run mypy src/ --ignore-missing-imports --show-error-codes 34 | 35 | - name: Run Bandit security linter 36 | run: | 37 | echo "🔒 Running Bandit security scanner..." 38 | uv run bandit -r src/ -f json -o bandit-report.json || true 39 | uv run bandit -r src/ -f txt 40 | 41 | - name: Check for TODO/FIXME comments 42 | run: | 43 | echo "📝 Checking for TODO/FIXME comments..." 44 | if grep -r "TODO\|FIXME" src/ tests/ --exclude-dir=__pycache__; then 45 | echo "⚠️ Found TODO/FIXME comments in code" 46 | echo "Consider addressing these before merging" 47 | else 48 | echo "✅ No TODO/FIXME comments found" 49 | fi 50 | 51 | - name: Check for hardcoded secrets 52 | run: | 53 | echo "🔐 Checking for potential hardcoded secrets..." 54 | if grep -r -i "password\|secret\|key\|token" src/ --exclude-dir=__pycache__ | grep -v "LOGSEQ_API_TOKEN\|Bearer"; then 55 | echo "⚠️ Potential hardcoded secrets found" 56 | echo "Please review and use environment variables instead" 57 | exit 1 58 | else 59 | echo "✅ No hardcoded secrets found" 60 | fi 61 | 62 | - name: Upload security report 63 | uses: actions/upload-artifact@v4 64 | if: always() 65 | with: 66 | name: security-report 67 | path: bandit-report.json 68 | 69 | dependency-check: 70 | runs-on: ubuntu-latest 71 | name: Dependency Security Check 72 | 73 | steps: 74 | - uses: actions/checkout@v4 75 | 76 | - name: Install uv 77 | uses: astral-sh/setup-uv@v3 78 | with: 79 | version: "latest" 80 | 81 | - name: Set up Python 3.12 82 | run: uv python install 3.12 83 | 84 | - name: Install dependencies 85 | run: uv sync 86 | 87 | - name: Check for vulnerable dependencies 88 | run: | 89 | echo "🔍 Checking for vulnerable dependencies..." 90 | uv run safety check --json --save-json safety-report.json || true 91 | uv run safety check 92 | 93 | - name: Check dependency licenses 94 | run: | 95 | echo "📄 Checking dependency licenses..." 96 | uv run pip-licenses --format=json --output-file=licenses.json || true 97 | uv run pip-licenses 98 | 99 | - name: Upload dependency reports 100 | uses: actions/upload-artifact@v4 101 | if: always() 102 | with: 103 | name: dependency-reports 104 | path: | 105 | safety-report.json 106 | licenses.json 107 | 108 | performance-test: 109 | runs-on: ubuntu-latest 110 | name: Performance & Memory Tests 111 | if: github.event_name == 'schedule' 112 | 113 | steps: 114 | - uses: actions/checkout@v4 115 | 116 | - name: Install uv 117 | uses: astral-sh/setup-uv@v3 118 | with: 119 | version: "latest" 120 | 121 | - name: Set up Python 3.12 122 | run: uv python install 3.12 123 | 124 | - name: Install dependencies 125 | run: uv sync 126 | 127 | - name: Run performance tests 128 | run: | 129 | echo "⚡ Running performance tests..." 130 | uv run pytest tests/ -v --durations=0 --tb=short 131 | 132 | - name: Memory usage test 133 | run: | 134 | echo "🧠 Testing memory usage..." 135 | uv run python -c " 136 | import tracemalloc 137 | import sys 138 | sys.path.insert(0, 'src') 139 | 140 | tracemalloc.start() 141 | import tools 142 | current, peak = tracemalloc.get_traced_memory() 143 | print(f'Memory usage - Current: {current / 1024 / 1024:.2f} MB, Peak: {peak / 1024 / 1024:.2f} MB') 144 | assert peak < 50 * 1024 * 1024, f'Memory usage too high: {peak / 1024 / 1024:.2f} MB' 145 | print('✅ Memory usage within acceptable limits') 146 | " 147 | 148 | quality-gate: 149 | runs-on: ubuntu-latest 150 | needs: [code-quality, dependency-check, performance-test] 151 | if: always() 152 | 153 | steps: 154 | - name: Quality Gate Summary 155 | run: | 156 | echo "🔍 Quality Gate Summary" 157 | echo "======================" 158 | 159 | if [ "${{ needs.code-quality.result }}" == "success" ]; then 160 | echo "✅ Code Quality: PASSED" 161 | else 162 | echo "❌ Code Quality: FAILED" 163 | fi 164 | 165 | if [ "${{ needs.dependency-check.result }}" == "success" ]; then 166 | echo "✅ Dependency Check: PASSED" 167 | else 168 | echo "❌ Dependency Check: FAILED" 169 | fi 170 | 171 | if [ "${{ needs.performance-test.result }}" == "success" ] || [ "${{ needs.performance-test.result }}" == "skipped" ]; then 172 | echo "✅ Performance Test: PASSED/SKIPPED" 173 | else 174 | echo "❌ Performance Test: FAILED" 175 | fi 176 | 177 | if [ "${{ needs.code-quality.result }}" == "success" ] && [ "${{ needs.dependency-check.result }}" == "success" ]; then 178 | echo "" 179 | echo "🎉 All quality checks passed!" 180 | echo "✅ Code meets quality standards" 181 | else 182 | echo "" 183 | echo "❌ Some quality checks failed" 184 | echo "Please address the issues before merging" 185 | exit 1 186 | fi 187 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release & Cross-Platform Testing 2 | 3 | on: 4 | release: 5 | types: [published] 6 | schedule: 7 | # Run comprehensive tests every Sunday at 2 AM UTC 8 | - cron: "0 2 * * 0" 9 | workflow_dispatch: 10 | 11 | jobs: 12 | cross-platform-test: 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | matrix: 16 | python-version: ["3.11", "3.12", "3.13"] 17 | os: [ubuntu-latest, windows-latest, macos-latest] 18 | 19 | steps: 20 | - uses: actions/checkout@v4 21 | 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v4 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | 27 | - name: Install uv 28 | uses: astral-sh/setup-uv@v3 29 | with: 30 | version: "latest" 31 | 32 | - name: Install dependencies 33 | run: uv sync 34 | 35 | - name: Run comprehensive tests 36 | run: | 37 | echo "🧪 Running comprehensive tests on ${{ matrix.os }} with Python ${{ matrix.python-version }}" 38 | uv run pytest tests/ -v --cov=src --cov-report=xml --cov-report=html --cov-fail-under=85 --durations=10 39 | 40 | - name: Test MCP server integration 41 | run: | 42 | echo "🔧 Testing MCP server integration..." 43 | uv run pytest tests/test_mcp_server.py -v 44 | 45 | - name: Test tool registration 46 | run: | 47 | echo "🔧 Testing tool registration..." 48 | uv run python -c " 49 | import sys 50 | sys.path.insert(0, 'src') 51 | from registry import register_all_tools 52 | from mcp.server.fastmcp import FastMCP 53 | 54 | mcp = FastMCP('test-server') 55 | register_all_tools(mcp) 56 | 57 | tools = mcp.list_tools() 58 | print(f'✅ Registered {len(tools)} tools') 59 | assert len(tools) >= 9, f'Expected at least 9 tools, found {len(tools)}' 60 | print('✅ Tool registration test passed') 61 | " 62 | 63 | - name: Upload test results 64 | uses: actions/upload-artifact@v4 65 | if: always() 66 | with: 67 | name: test-results-${{ matrix.os }}-python${{ matrix.python-version }} 68 | path: | 69 | htmlcov/ 70 | coverage.xml 71 | .pytest_cache/ 72 | 73 | package-build: 74 | runs-on: ubuntu-latest 75 | if: github.event_name == 'release' 76 | 77 | steps: 78 | - uses: actions/checkout@v4 79 | 80 | - name: Install uv 81 | uses: astral-sh/setup-uv@v3 82 | with: 83 | version: "latest" 84 | 85 | - name: Set up Python 3.12 86 | run: uv python install 3.12 87 | 88 | - name: Install dependencies 89 | run: uv sync 90 | 91 | - name: Build package 92 | run: | 93 | echo "📦 Building package..." 94 | uv build 95 | 96 | - name: Test package installation 97 | run: | 98 | echo "🧪 Testing package installation..." 99 | uv run pip install dist/*.whl 100 | uv run python -c " 101 | import sys 102 | sys.path.insert(0, 'src') 103 | import tools 104 | print(f'✅ Package installation successful: {len(tools.__all__)} tools available') 105 | " 106 | 107 | - name: Upload package artifacts 108 | uses: actions/upload-artifact@v4 109 | with: 110 | name: package-artifacts 111 | path: dist/ 112 | 113 | integration-test: 114 | runs-on: ubuntu-latest 115 | needs: [cross-platform-test, package-build] 116 | if: github.event_name == 'release' 117 | 118 | steps: 119 | - uses: actions/checkout@v4 120 | 121 | - name: Install uv 122 | uses: astral-sh/setup-uv@v3 123 | with: 124 | version: "latest" 125 | 126 | - name: Set up Python 3.12 127 | run: uv python install 3.12 128 | 129 | - name: Install dependencies 130 | run: uv sync 131 | 132 | - name: Test MCP server with real tools 133 | run: | 134 | echo "🔧 Testing MCP server with real tools..." 135 | timeout 30s uv run mcp dev src/server.py & 136 | sleep 5 137 | echo "✅ MCP server started successfully" 138 | pkill -f "mcp dev" || true 139 | 140 | - name: Test all tool functions 141 | run: | 142 | echo "🧪 Testing all tool functions..." 143 | uv run python -c " 144 | import sys 145 | sys.path.insert(0, 'src') 146 | import tools 147 | 148 | # Test that all tools can be imported 149 | for tool_name in tools.__all__: 150 | try: 151 | module = __import__(f'tools.{tool_name}', fromlist=[tool_name]) 152 | print(f'✅ {tool_name}: Import successful') 153 | except Exception as e: 154 | print(f'❌ {tool_name}: Import failed - {e}') 155 | sys.exit(1) 156 | 157 | print('✅ All tool imports successful') 158 | " 159 | 160 | release-summary: 161 | runs-on: ubuntu-latest 162 | needs: [cross-platform-test, package-build, integration-test] 163 | if: always() 164 | 165 | steps: 166 | - name: Release Summary 167 | run: | 168 | echo "🚀 Release Testing Summary" 169 | echo "=========================" 170 | 171 | if [ "${{ needs.cross-platform-test.result }}" == "success" ]; then 172 | echo "✅ Cross-Platform Tests: PASSED" 173 | else 174 | echo "❌ Cross-Platform Tests: FAILED" 175 | fi 176 | 177 | if [ "${{ needs.package-build.result }}" == "success" ] || [ "${{ needs.package-build.result }}" == "skipped" ]; then 178 | echo "✅ Package Build: PASSED/SKIPPED" 179 | else 180 | echo "❌ Package Build: FAILED" 181 | fi 182 | 183 | if [ "${{ needs.integration-test.result }}" == "success" ] || [ "${{ needs.integration-test.result }}" == "skipped" ]; then 184 | echo "✅ Integration Tests: PASSED/SKIPPED" 185 | else 186 | echo "❌ Integration Tests: FAILED" 187 | fi 188 | 189 | if [ "${{ needs.cross-platform-test.result }}" == "success" ]; then 190 | echo "" 191 | echo "🎉 Release testing completed successfully!" 192 | echo "✅ Package is ready for distribution" 193 | else 194 | echo "" 195 | echo "❌ Release testing failed" 196 | echo "Please address the issues before releasing" 197 | exit 1 198 | fi 199 | -------------------------------------------------------------------------------- /.github/CI_CD_SETUP.md: -------------------------------------------------------------------------------- 1 | # CI/CD Setup Complete ✅ 2 | 3 | ## Overview 4 | 5 | I have successfully set up a comprehensive CI/CD pipeline for the Logseq API MCP Server project using GitHub Actions. The pipeline includes automated testing, code quality checks, security scanning, and pull request validation. 6 | 7 | ## 🚀 What's Been Created 8 | 9 | ### 1. GitHub Actions Workflows 10 | 11 | #### `test.yml` - Main Test Suite 12 | 13 | - **Triggers:** Push to main/develop, Pull Requests, Manual dispatch 14 | - **Features:** 15 | - Multi-Python version testing (3.11, 3.12, 3.13) 16 | - Linting with Ruff 17 | - Type checking with MyPy 18 | - Test coverage with pytest-cov (80% minimum) 19 | - MCP server startup testing 20 | - Tool discovery validation 21 | - Security scanning with Bandit 22 | - Package build testing 23 | 24 | #### `pr-validation.yml` - Pull Request Validation 25 | 26 | - **Triggers:** Pull Requests to main/develop 27 | - **Features:** 28 | - Comprehensive PR validation 29 | - Test coverage validation (80% minimum) 30 | - Tool discovery verification 31 | - Code quality checks 32 | - Security scanning 33 | - Test structure validation 34 | - Individual tool suite testing 35 | 36 | #### `comprehensive-test.yml` - Extended Testing 37 | 38 | - **Triggers:** Weekly schedule, Releases, Manual dispatch 39 | - **Features:** 40 | - Multi-OS testing (Ubuntu, Windows, macOS) 41 | - Performance testing 42 | - Memory usage validation 43 | - Integration testing 44 | - MCP server with real tools 45 | 46 | #### `quality.yml` - Code Quality & Security 47 | 48 | - **Triggers:** Push to main/develop, Pull Requests, Daily schedule 49 | - **Features:** 50 | - Ruff linting and formatting 51 | - MyPy type checking 52 | - Bandit security scanning 53 | - TODO/FIXME comment detection 54 | - Hardcoded secret detection 55 | - Dependency vulnerability scanning 56 | - License checking 57 | - Coverage analysis 58 | 59 | ### 2. Project Configuration 60 | 61 | #### Updated `pyproject.toml` 62 | 63 | Added development dependencies for CI/CD: 64 | 65 | - `mypy>=1.8.0` - Type checking 66 | - `bandit>=1.7.5` - Security scanning 67 | - `safety>=2.3.5` - Dependency vulnerability scanning 68 | - `pip-licenses>=4.3.0` - License checking 69 | - `pytest-cov>=4.1.0` - Test coverage 70 | 71 | ### 3. GitHub Templates 72 | 73 | #### Pull Request Template 74 | 75 | - Comprehensive checklist for contributors 76 | - Test coverage requirements 77 | - Code quality guidelines 78 | - Security considerations 79 | - Documentation requirements 80 | 81 | #### Issue Templates 82 | 83 | - **Bug Report Template** - Structured bug reporting 84 | - **Feature Request Template** - Structured feature requests 85 | 86 | ### 4. Documentation 87 | 88 | #### Workflow Documentation 89 | 90 | - Complete workflow overview 91 | - Local testing instructions 92 | - Troubleshooting guide 93 | - Quality gate requirements 94 | 95 | #### Status Badges 96 | 97 | - Ready-to-use badges for README.md 98 | - Links to workflow status pages 99 | 100 | ## 🔧 How It Works 101 | 102 | ### Pull Request Flow 103 | 104 | 1. **PR Created** → `pr-validation.yml` runs 105 | 2. **Code Quality** → `quality.yml` runs 106 | 3. **Main Tests** → `test.yml` runs 107 | 4. **All Pass** → PR can be merged 108 | 109 | ### Push to Main Flow 110 | 111 | 1. **Push to main** → `test.yml` runs 112 | 2. **Quality Checks** → `quality.yml` runs 113 | 3. **All Pass** → Code is validated 114 | 115 | ### Scheduled Runs 116 | 117 | - **Daily** → Security and quality checks 118 | - **Weekly** → Comprehensive testing across platforms 119 | 120 | ## 📊 Quality Gates 121 | 122 | All workflows must pass for: 123 | 124 | - ✅ Code to be merged to main 125 | - ✅ Releases to be published 126 | - ✅ PRs to be approved 127 | 128 | ### Coverage Requirements 129 | 130 | - **Minimum:** 80% for PR validation 131 | - **Target:** 85% for comprehensive testing 132 | 133 | ### Security Checks 134 | 135 | - Bandit security linter 136 | - Safety dependency scanner 137 | - Secret detection 138 | - License validation 139 | 140 | ## 🎯 Benefits 141 | 142 | ### For Contributors 143 | 144 | - **Clear Guidelines** - PR template ensures quality 145 | - **Automated Feedback** - Immediate test results 146 | - **Code Quality** - Automated linting and formatting 147 | - **Security** - Automated vulnerability scanning 148 | 149 | ### For Maintainers 150 | 151 | - **Quality Assurance** - All code is tested before merge 152 | - **Security Monitoring** - Regular security scans 153 | - **Performance Tracking** - Test duration and memory usage 154 | - **Documentation** - Automated coverage reports 155 | 156 | ### For Users 157 | 158 | - **Reliable Releases** - All code is thoroughly tested 159 | - **Security** - Regular vulnerability scanning 160 | - **Quality** - Consistent code standards 161 | - **Performance** - Optimized and tested code 162 | 163 | ## 🚀 Getting Started 164 | 165 | ### For Contributors 166 | 167 | 1. Create a pull request 168 | 2. Fill out the PR template 169 | 3. Ensure all checks pass 170 | 4. Wait for review and merge 171 | 172 | ### For Maintainers 173 | 174 | 1. Monitor workflow status 175 | 2. Review security reports 176 | 3. Check coverage reports 177 | 4. Approve quality PRs 178 | 179 | ### Local Testing 180 | 181 | ```bash 182 | # Install dependencies 183 | uv sync --dev 184 | 185 | # Run tests with coverage 186 | uv run pytest tests/ --cov=src/tools --cov-report=html 187 | 188 | # Run linting 189 | uv run ruff check src/ tests/ 190 | uv run ruff format --check src/ tests/ 191 | 192 | # Run type checking 193 | uv run mypy src/ --ignore-missing-imports 194 | 195 | # Run security scan 196 | uv run bandit -r src/ 197 | ``` 198 | 199 | ## 📈 Monitoring 200 | 201 | ### Workflow Status 202 | 203 | - Check GitHub Actions tab for workflow status 204 | - Download artifacts for detailed reports 205 | - Monitor coverage trends over time 206 | 207 | ### Quality Metrics 208 | 209 | - Test coverage percentage 210 | - Security vulnerability count 211 | - Code quality scores 212 | - Performance metrics 213 | 214 | ## 🔄 Maintenance 215 | 216 | ### Regular Tasks 217 | 218 | - Review security reports weekly 219 | - Update dependencies monthly 220 | - Monitor coverage trends 221 | - Review workflow performance 222 | 223 | ### Updates 224 | 225 | - Update workflow versions as needed 226 | - Add new quality checks as tools evolve 227 | - Adjust coverage thresholds as appropriate 228 | - Update templates based on feedback 229 | 230 | ## ✅ Status 231 | 232 | The CI/CD pipeline is now fully configured and ready to use. All workflows are properly set up with appropriate triggers, quality gates, and reporting. The project now has: 233 | 234 | - ✅ **Automated Testing** - Comprehensive test suite with coverage 235 | - ✅ **Code Quality** - Linting, formatting, and type checking 236 | - ✅ **Security Scanning** - Regular vulnerability and security checks 237 | - ✅ **Pull Request Validation** - Automated PR quality checks 238 | - ✅ **Multi-Platform Testing** - Cross-platform compatibility 239 | - ✅ **Documentation** - Complete setup and usage documentation 240 | 241 | The project is now ready for production use with enterprise-grade CI/CD practices! 🎉 242 | -------------------------------------------------------------------------------- /src/tools/edit_block.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import Any, Dict, List, Optional 4 | 5 | import aiohttp 6 | from dotenv import load_dotenv 7 | from mcp.types import TextContent 8 | 9 | # Load environment variables from .env file in project root 10 | env_path = Path(__file__).parent.parent.parent / ".env" 11 | load_dotenv(env_path) 12 | 13 | 14 | async def edit_block( 15 | block_identity: str, 16 | content: Optional[str] = None, 17 | properties: Optional[Dict[str, Any]] = None, 18 | cursor_position: Optional[int] = None, 19 | focus: Optional[bool] = None, 20 | ) -> List[TextContent]: 21 | """ 22 | Edit a block in Logseq. 23 | 24 | This tool allows you to edit existing blocks in your Logseq graph by updating 25 | their content, properties, or cursor position. The block can be identified 26 | by its UUID or by providing a BlockIdentity object. 27 | 28 | Args: 29 | block_identity: The UUID of the block to edit (BlockIdentity) 30 | content: Optional new content for the block 31 | properties: Optional dictionary of properties to update on the block 32 | cursor_position: Optional cursor position for editing (0-based index) 33 | focus: Optional boolean to focus the block after editing 34 | """ 35 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 36 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 37 | 38 | headers = {"Authorization": f"Bearer {token}"} 39 | 40 | # Build options object 41 | options: Dict[str, Any] = {} 42 | if cursor_position is not None: 43 | options["pos"] = cursor_position 44 | if focus is not None: 45 | options["focus"] = focus 46 | 47 | # Prepare the arguments for the API call 48 | args: list[Any] = [block_identity] 49 | 50 | # Add content if provided 51 | if content is not None: 52 | args.append(content) 53 | 54 | # Add properties if provided 55 | if properties is not None: 56 | args.append(properties) 57 | 58 | # Add options if any 59 | if options: 60 | args.append(options) 61 | 62 | async with aiohttp.ClientSession() as session: 63 | try: 64 | # Prepare the API call 65 | payload = { 66 | "method": "logseq.Editor.editBlock", 67 | "args": args, 68 | } 69 | 70 | async with session.post( 71 | endpoint, json=payload, headers=headers 72 | ) as response: 73 | if response.status != 200: 74 | return [ 75 | TextContent( 76 | type="text", 77 | text=f"❌ Failed to edit block: HTTP {response.status}", 78 | ) 79 | ] 80 | 81 | result = await response.json() 82 | 83 | # Check if the result indicates success 84 | if result is None or result == "": 85 | return [ 86 | TextContent( 87 | type="text", 88 | text="❌ Failed to edit block: No response from Logseq API", 89 | ) 90 | ] 91 | 92 | # Build success response 93 | output_lines = [ 94 | "✅ **BLOCK EDITED SUCCESSFULLY**", 95 | f"🔗 Block UUID: {block_identity}", 96 | "", 97 | ] 98 | 99 | # Add edit details 100 | edit_details = [] 101 | if content is not None: 102 | edit_details.append("📝 Content updated") 103 | if properties is not None: 104 | edit_details.append( 105 | f"⚙️ Properties updated ({len(properties)} items)" 106 | ) 107 | if cursor_position is not None: 108 | edit_details.append( 109 | f"📍 Cursor positioned at index {cursor_position}" 110 | ) 111 | if focus is not None: 112 | edit_details.append( 113 | f"🎯 Focus: {'Enabled' if focus else 'Disabled'}" 114 | ) 115 | 116 | if edit_details: 117 | output_lines.extend( 118 | [ 119 | "📊 **EDIT DETAILS:**", 120 | *[f"• {detail}" for detail in edit_details], 121 | "", 122 | ] 123 | ) 124 | 125 | # Add content preview if updated 126 | if content is not None: 127 | content_preview = ( 128 | content[:100] + "..." if len(content) > 100 else content 129 | ) 130 | output_lines.extend( 131 | [ 132 | "📝 **UPDATED CONTENT:**", 133 | "```", 134 | content_preview, 135 | "```", 136 | "", 137 | ] 138 | ) 139 | 140 | # Add properties if updated 141 | if properties is not None: 142 | output_lines.extend( 143 | [ 144 | "⚙️ **UPDATED PROPERTIES:**", 145 | *[ 146 | f"• {prop_name}: {prop_value}" 147 | for prop_name, prop_value in properties.items() 148 | ], 149 | "", 150 | ] 151 | ) 152 | 153 | # Add result details if available 154 | if isinstance(result, dict): 155 | result_id = result.get("id", "N/A") 156 | result_uuid = result.get("uuid", block_identity) 157 | 158 | if result_id != "N/A" or result_uuid != block_identity: 159 | output_lines.extend( 160 | [ 161 | "📋 **BLOCK INFORMATION:**", 162 | f"• ID: {result_id}", 163 | f"• UUID: {result_uuid}", 164 | "", 165 | ] 166 | ) 167 | 168 | output_lines.extend( 169 | [ 170 | "🔗 **NEXT STEPS:**", 171 | "• Check your Logseq graph to see the updated block", 172 | "• Use get_block_content to verify the changes", 173 | "• Use get_page_blocks to see the block in context", 174 | ] 175 | ) 176 | 177 | return [TextContent(type="text", text="\n".join(output_lines))] 178 | 179 | except Exception as e: 180 | return [TextContent(type="text", text=f"❌ Error editing block: {str(e)}")] 181 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Logseq API MCP 2 | 3 | ## GitHub Flow Process 4 | 5 | We follow **GitHub Flow** for all contributions: 6 | 7 | ### 1. Create Feature Branch 8 | 9 | ```bash 10 | # Sync with latest main 11 | git checkout main 12 | git pull origin main 13 | 14 | # Create descriptive branch 15 | git checkout -b feature/add-search-tool 16 | git checkout -b fix/api-timeout-issue 17 | git checkout -b docs/update-installation 18 | ``` 19 | 20 | ### 2. Development Process 21 | 22 | ```bash 23 | # Make changes - create your tool file (no imports needed!) 24 | # Example: src/tools/search_tool.py 25 | 26 | # Format and check code 27 | uv run ruff check --fix && uv run ruff format 28 | 29 | # Test changes (automated testing) 30 | uv run python tests/test_mcp_server.py 31 | 32 | # Test with MCP Inspector (manual verification) 33 | uv run mcp dev src/server.py 34 | 35 | # Commit frequently 36 | git add . 37 | git commit -m "feat: add search functionality for content discovery" 38 | git push origin feature/add-search-tool 39 | ``` 40 | 41 | ### 3. Pull Request 42 | 43 | - Open PR **early** for feedback 44 | - Use the provided PR template 45 | - Ensure all CI checks pass 46 | - Request review from maintainers 47 | 48 | ### 4. Code Review 49 | 50 | - Address feedback promptly 51 | - Push additional commits as needed 52 | - Maintain clean commit history 53 | 54 | ### 5. Merge to Main 55 | 56 | - Squash and merge after approval 57 | - Delete feature branch 58 | - `main` branch triggers deployment 59 | 60 | ## Adding New Tools (Super Simple!) 61 | 62 | Thanks to our **dynamic tool discovery system**, adding tools is incredibly easy: 63 | 64 | ### 1. Create Tool File 65 | 66 | Create `src/tools/your_tool.py`: 67 | 68 | ```python 69 | def your_tool(param: str) -> dict: 70 | """ 71 | Your tool description. 72 | 73 | Args: 74 | param: Input parameter description 75 | 76 | Returns: 77 | Dict with tool results 78 | """ 79 | return { 80 | "result": f"Processed: {param}", 81 | "status": "success" 82 | } 83 | ``` 84 | 85 | ### 2. That's It! 🎉 86 | 87 | The system automatically: 88 | 89 | - ✅ **Discovers** your tool 90 | - ✅ **Imports** the function 91 | - ✅ **Registers** with MCP server 92 | - ✅ **Validates** in CI tests 93 | 94 | ### Tool Guidelines 95 | 96 | - **Location**: Must be in `src/tools/` directory 97 | - **Naming**: Don't start with `_` (files or functions) 98 | - **Documentation**: Include comprehensive docstrings 99 | - **Type Hints**: Use for all parameters and returns 100 | - **Error Handling**: Include proper exception management 101 | - **Testing**: Tools are automatically tested by CI 102 | 103 | ## Testing Framework 104 | 105 | ### Automated Testing 106 | 107 | Our testing system validates the dynamic discovery: 108 | 109 | ```bash 110 | # Run all tests 111 | uv run python tests/test_mcp_server.py 112 | ``` 113 | 114 | **What gets tested:** 115 | 116 | - ✅ **Server Health** - MCP server starts correctly 117 | - ✅ **Tool Discovery** - All tools auto-discovered 118 | - ✅ **Registration** - Tools properly registered 119 | - ✅ **Integration** - End-to-end functionality 120 | 121 | ### Test Output 122 | 123 | ``` 124 | 🔍 Testing MCP Server Health and Tools... 125 | 🔧 Discovered tools (auto-discovery): ['get_all_pages', 'your_new_tool', ...] 126 | 127 | 🏥 Testing server health... 128 | ✅ Server started and responded successfully 129 | ✅ Dynamic tool discovery working correctly 130 | 131 | 🎉 MCP Server test completed successfully! 132 | 📊 Tools auto-discovered: 7 133 | 🏥 Server health: OK 134 | 🔄 Dynamic discovery: OK 135 | ``` 136 | 137 | ## Branch Naming Conventions 138 | 139 | - `feature/description` - New features 140 | - `fix/description` - Bug fixes 141 | - `docs/description` - Documentation 142 | - `refactor/description` - Code refactoring 143 | - `test/description` - Test improvements 144 | 145 | ## Quality Standards 146 | 147 | - **Code formatting**: `ruff check --fix && ruff format` 148 | - **Type hints**: Use for all functions 149 | - **Docstrings**: Required for all functions/classes 150 | - **Error handling**: Comprehensive exception management 151 | - **Testing**: Automated validation via CI 152 | 153 | ## Development Setup 154 | 155 | ```bash 156 | # Clone and setup 157 | git clone https://github.com/gustavo-meilus/logseq-api-mcp.git 158 | cd logseq-api-mcp 159 | uv sync 160 | 161 | # Environment setup 162 | cp .env.template .env 163 | # Edit .env with your Logseq API details 164 | 165 | # Test installation 166 | uv run mcp dev src/server.py 167 | 168 | # Run automated tests 169 | uv run python tests/test_mcp_server.py 170 | ``` 171 | 172 | ## Dynamic Discovery Architecture 173 | 174 | ### How It Works 175 | 176 | 1. **`src/tools/__init__.py`**: 177 | 178 | - Scans `src/tools/` directory for `.py` files 179 | - Dynamically imports all public functions 180 | - Populates `__all__` list automatically 181 | 182 | 2. **`src/registry.py`**: 183 | 184 | - Imports `tools` module 185 | - Iterates through `tools.__all__` 186 | - Auto-registers each tool with MCP server 187 | 188 | 3. **`tests/test_mcp_server.py`**: 189 | - Validates tool discovery works 190 | - Ensures server health 191 | - Confirms all tools are registered 192 | 193 | ### Development Flow 194 | 195 | ``` 196 | Tool File Creation → Auto-Discovery → Import → Registration → CI Validation 197 | ``` 198 | 199 | ### Benefits 200 | 201 | - **Zero Maintenance**: No manual imports/registrations 202 | - **Error Prevention**: Can't forget to register tools 203 | - **Instant Integration**: New tools work immediately 204 | - **CI Protection**: Tests catch any issues 205 | 206 | ## Troubleshooting 207 | 208 | ### Tool Not Discovered 209 | 210 | - Ensure file is in `src/tools/` directory 211 | - Function name shouldn't start with `_` 212 | - File name shouldn't start with `_` 213 | - Check for syntax/import errors 214 | 215 | ### Test Failures 216 | 217 | - Run `uv run python tests/test_mcp_server.py` locally 218 | - Check server logs for errors 219 | - Verify Logseq API is accessible 220 | - Ensure environment variables are set 221 | 222 | ### Server Issues 223 | 224 | - Test manually: `uv run mcp dev src/server.py` 225 | - Check `.env` configuration 226 | - Verify Logseq API token is valid 227 | 228 | ## Commit Messages 229 | 230 | Follow conventional commits: 231 | 232 | - `feat: add new search tool` 233 | - `fix: resolve API timeout issue` 234 | - `docs: update installation guide` 235 | - `refactor: improve error handling` 236 | 237 | ## Example Contribution 238 | 239 | Here's a complete example of adding a new tool: 240 | 241 | ### 1. Create Tool 242 | 243 | `src/tools/count_pages.py`: 244 | 245 | ```python 246 | from typing import Dict, Any 247 | 248 | def count_pages() -> Dict[str, Any]: 249 | """ 250 | Count total pages in the knowledge base. 251 | 252 | Returns: 253 | Dict with page count information 254 | """ 255 | # Implementation here 256 | return { 257 | "total_pages": 42, 258 | "status": "success" 259 | } 260 | ``` 261 | 262 | ### 2. Test Locally 263 | 264 | ```bash 265 | # Test discovery 266 | uv run python tests/test_mcp_server.py 267 | 268 | # Test manually 269 | uv run mcp dev src/server.py 270 | ``` 271 | 272 | ### 3. Commit & Push 273 | 274 | ```bash 275 | git add src/tools/count_pages.py 276 | git commit -m "feat: add page counting tool" 277 | git push origin feature/add-page-counter 278 | ``` 279 | 280 | ### 4. Open PR 281 | 282 | - Tool is automatically discovered 283 | - CI tests validate integration 284 | - Ready for review! 285 | 286 | ## Questions? 287 | 288 | - Check existing issues and discussions 289 | - Create new issue for bugs/features 290 | - Join community discussions 291 | - Review the [README.md](README.md) for more details 292 | -------------------------------------------------------------------------------- /src/tools/get_page_blocks.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import List 4 | 5 | import aiohttp 6 | from dotenv import load_dotenv 7 | from mcp.types import TextContent 8 | 9 | # Load environment variables from .env file in project root 10 | env_path = Path(__file__).parent.parent.parent / ".env" 11 | load_dotenv(env_path) 12 | 13 | 14 | async def get_page_blocks(page_identifier: str) -> List[TextContent]: 15 | """ 16 | Get the tree structure of blocks that compose a page in Logseq. 17 | 18 | Returns a hierarchical view of all blocks with their essential metadata, 19 | formatted for optimal LLM consumption with clear tree visualization. 20 | 21 | Args: 22 | page_identifier: The name or UUID of the page to get blocks from 23 | """ 24 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 25 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 26 | 27 | headers = {"Authorization": f"Bearer {token}"} 28 | 29 | def format_properties(props): 30 | """Format block properties into a concise display""" 31 | if not props: 32 | return "" 33 | 34 | # Filter out less important properties for concise display 35 | important_props = {} 36 | for property_name, value in props.items(): 37 | if property_name not in [ 38 | "collapsed", 39 | "card-last-interval", 40 | "card-repeats", 41 | "card-ease-factor", 42 | "card-next-schedule", 43 | "card-last-reviewed", 44 | ]: 45 | if isinstance(value, list): 46 | important_props[property_name] = ", ".join(str(v) for v in value) 47 | elif value: 48 | important_props[property_name] = str(value) 49 | 50 | if important_props: 51 | prop_items = [ 52 | f"{prop_name}: {prop_value}" 53 | for prop_name, prop_value in list(important_props.items())[:3] 54 | ] 55 | return f" [{'; '.join(prop_items)}]" 56 | return "" 57 | 58 | def format_content_preview(content, max_length=100): 59 | """Create a clean preview of block content""" 60 | if not content: 61 | return "[empty]" 62 | 63 | # Clean up the content 64 | content = content.strip() 65 | content = content.replace("\n", " ").replace("\r", " ") 66 | 67 | # Remove card markers for cleaner display 68 | if "#card" in content: 69 | content = content.replace("#card", "").strip() 70 | content = f"💡 {content}" 71 | 72 | # Truncate if too long 73 | if len(content) > max_length: 74 | content = content[:max_length] + "..." 75 | 76 | return content 77 | 78 | def format_block_tree(block, level=0, max_level=10): 79 | """Recursively format blocks into a tree structure""" 80 | if level > max_level: 81 | return [] 82 | 83 | lines = [] 84 | indent = " " * level 85 | 86 | # Get block info 87 | block_id = block.get("id", "N/A") 88 | uuid = block.get("uuid", "N/A") 89 | content = block.get("content", "") 90 | properties = block.get("properties", {}) 91 | children = block.get("children", []) 92 | block_level = block.get("level", level) 93 | is_pre_block = block.get("preBlock?", False) 94 | 95 | # Determine block type emoji 96 | if content.startswith("#"): 97 | emoji = "📑" 98 | if "# " in content: 99 | header_level = len(content.split("#")[0]) + 1 100 | emoji = f"H{header_level}" 101 | elif "#card" in content: 102 | emoji = "💡" 103 | elif is_pre_block: 104 | emoji = "📋" 105 | elif properties: 106 | emoji = "⚙️" 107 | else: 108 | emoji = "•" 109 | 110 | # Format the main block line 111 | content_preview = format_content_preview(content) 112 | props_display = format_properties(properties) 113 | 114 | block_line = f"{indent}{emoji} {content_preview}" 115 | if props_display: 116 | block_line += props_display 117 | 118 | # Add technical details 119 | tech_details = f"ID:{block_id} | UUID:{uuid} | Level:{block_level}" 120 | if is_pre_block: 121 | tech_details += " | PreBlock" 122 | 123 | lines.append(f"{block_line}") 124 | lines.append(f"{indent} 📊 {tech_details}") 125 | 126 | # Add parent info if available 127 | parent = block.get("parent", {}) 128 | if parent and parent.get("id"): 129 | lines.append(f"{indent} 👆 Parent ID: {parent.get('id')}") 130 | 131 | # Add children count 132 | if children: 133 | lines.append(f"{indent} 👇 Children: {len(children)}") 134 | 135 | lines.append("") # Empty line for separation 136 | 137 | # Recursively format children 138 | for child in children: 139 | lines.extend(format_block_tree(child, level + 1, max_level)) 140 | 141 | return lines 142 | 143 | async with aiohttp.ClientSession() as session: 144 | try: 145 | # Get page blocks tree 146 | payload = { 147 | "method": "logseq.Editor.getPageBlocksTree", 148 | "args": [page_identifier], 149 | } 150 | 151 | async with session.post( 152 | endpoint, json=payload, headers=headers 153 | ) as response: 154 | if response.status != 200: 155 | return [ 156 | TextContent( 157 | type="text", 158 | text=f"❌ Failed to fetch page blocks: HTTP {response.status}", 159 | ) 160 | ] 161 | 162 | blocks = await response.json() 163 | if not blocks: 164 | return [ 165 | TextContent( 166 | type="text", 167 | text=f"✅ Page '{page_identifier}' has no blocks", 168 | ) 169 | ] 170 | 171 | # Get page info from first block 172 | page_info = blocks[0].get("page", {}) if blocks else {} 173 | page_name = page_info.get("name", page_identifier) 174 | page_id = page_info.get("id", "N/A") 175 | 176 | # Build output 177 | output_lines = [ 178 | "🌳 **PAGE BLOCKS TREE STRUCTURE**", 179 | f"📄 Page: {page_name} (ID: {page_id})", 180 | f"📊 Total blocks: {len(blocks)}", 181 | "", 182 | "🔗 **TREE HIERARCHY:**", 183 | "", 184 | ] 185 | 186 | # Format all blocks in tree structure 187 | for block in blocks: 188 | output_lines.extend(format_block_tree(block, 0, max_level=8)) 189 | 190 | # Add summary 191 | output_lines.extend( 192 | [ 193 | "📈 **SUMMARY:**", 194 | f"• Total blocks processed: {len(blocks)}", 195 | "• Tree depth: Variable (max 8 levels shown)", 196 | "• Format: Hierarchical with metadata", 197 | "", 198 | ] 199 | ) 200 | 201 | return [TextContent(type="text", text="\n".join(output_lines))] 202 | 203 | except Exception as e: 204 | return [ 205 | TextContent( 206 | type="text", text=f"❌ Error fetching page blocks: {str(e)}" 207 | ) 208 | ] 209 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # CLAUDE.md 2 | 3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. 4 | 5 | ## Project Overview 6 | 7 | This is a **Model Context Protocol (MCP) server** that provides AI assistants with seamless access to Logseq knowledge bases. The project features a sophisticated **dynamic tool discovery system** that automatically detects, imports, and registers any new tools added to the codebase without requiring manual configuration. 8 | 9 | **Key Innovation**: Zero-configuration tool management - simply add a Python file to `src/tools/` and it's automatically available. 10 | 11 | ## Core Architecture 12 | 13 | ### Dynamic Tool Discovery System 14 | 15 | The project's central innovation is its automatic tool management: 16 | 17 | 1. **`src/tools/__init__.py`**: Scans the tools directory, dynamically imports all Python files, and populates `__all__` with discovered functions 18 | 2. **`src/registry.py`**: Imports the tools module and automatically registers each discovered tool with the MCP server 19 | 3. **`src/server.py`**: Simple FastMCP server that uses the registry to make all tools available 20 | 21 | **Flow**: `Tool File → Auto-Discovery → Import → Registration → Validation` 22 | 23 | ### Tool Structure 24 | 25 | Each tool in `src/tools/` is a standalone Python module with one or more functions that become MCP tools. Tools follow this pattern: 26 | - Located in `src/tools/` directory 27 | - Function names don't start with `_` 28 | - Include comprehensive docstrings 29 | - Use type hints 30 | - Handle Logseq API authentication via environment variables 31 | 32 | ## Development Commands 33 | 34 | ### Installation & Setup 35 | ```bash 36 | # Install dependencies (uses UV package manager) 37 | uv sync 38 | 39 | # Install with development dependencies 40 | uv sync --dev 41 | 42 | # Copy environment template and configure 43 | cp .env.template .env 44 | # Edit .env with your Logseq API details 45 | ``` 46 | 47 | ### Running the Server 48 | ```bash 49 | # Start MCP server 50 | uv run mcp run src/server.py 51 | 52 | # Development mode with MCP inspector 53 | uv run mcp dev src/server.py 54 | ``` 55 | 56 | ### Testing 57 | ```bash 58 | # Run comprehensive test suite 59 | uv run python tests/test_mcp_server.py 60 | 61 | # The test validates: 62 | # - Server health and startup 63 | # - Dynamic tool discovery functionality 64 | # - Tool registration completeness 65 | ``` 66 | 67 | ### Code Quality 68 | ```bash 69 | # Format and lint code (uses Ruff) 70 | uv run ruff check --fix && uv run ruff format 71 | ``` 72 | 73 | ## Adding New Tools 74 | 75 | The dynamic discovery system makes adding tools incredibly simple: 76 | 77 | ### 1. Create Tool File 78 | Create `src/tools/your_tool_name.py`: 79 | 80 | ```python 81 | import os 82 | import aiohttp 83 | from typing import List 84 | from mcp.types import TextContent 85 | from dotenv import load_dotenv 86 | from pathlib import Path 87 | 88 | # Load environment variables 89 | env_path = Path(__file__).parent.parent.parent / ".env" 90 | load_dotenv(env_path) 91 | 92 | async def your_tool_name(param: str) -> List[TextContent]: 93 | """ 94 | Tool description here. 95 | 96 | Args: 97 | param: Description of parameter 98 | 99 | Returns: 100 | List of TextContent with results 101 | """ 102 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 103 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 104 | 105 | # Implementation here 106 | return [TextContent(type="text", text="Your formatted output")] 107 | ``` 108 | 109 | ### 2. That's It! 110 | The system automatically: 111 | - Discovers your tool file 112 | - Imports the function 113 | - Registers it with the MCP server 114 | - Validates it in CI tests 115 | 116 | **No manual imports, registrations, or configuration needed.** 117 | 118 | ## Environment Configuration 119 | 120 | ### Required Environment Variables 121 | ```env 122 | # Logseq API Configuration 123 | LOGSEQ_API_ENDPOINT=http://127.0.0.1:12315/api 124 | LOGSEQ_API_TOKEN=your_api_token_here 125 | ``` 126 | 127 | ### Getting Logseq API Token 128 | 1. Open Logseq application 129 | 2. Go to **Settings → Features → Developer mode** 130 | 3. Enable **"HTTP APIs server"** 131 | 4. Copy the displayed API token 132 | 5. Note the API endpoint (default: `http://127.0.0.1:12315/api`) 133 | 134 | ## Existing Tools Architecture 135 | 136 | The project includes 6 core tools that demonstrate different patterns: 137 | 138 | - **`get_all_pages`**: Simple listing with metadata 139 | - **`get_page_blocks`**: Hierarchical tree structure analysis 140 | - **`get_page_links`**: Relationship discovery between pages 141 | - **`get_block_content`**: Detailed block content with children 142 | - **`get_all_page_content`**: Comprehensive page extraction 143 | - **`get_linked_flashcards`**: Advanced cross-page content analysis 144 | 145 | All tools follow consistent patterns: 146 | - Async functions returning `List[TextContent]` 147 | - Environment-based API configuration 148 | - Emoji-enhanced, structured output for AI consumption 149 | - Comprehensive error handling with aiohttp sessions 150 | 151 | ## Testing Strategy 152 | 153 | ### Automated Testing 154 | The testing system validates the dynamic discovery architecture: 155 | 156 | ```bash 157 | uv run python tests/test_mcp_server.py 158 | ``` 159 | 160 | **What gets tested:** 161 | - Server starts without errors 162 | - Dynamic tool discovery finds all tools 163 | - All discovered tools are properly registered 164 | - Server responds to MCP protocol correctly 165 | 166 | ### Test Output 167 | ``` 168 | 🔍 Testing MCP Server Health and Tools... 169 | 🔧 Discovered tools (auto-discovery): ['get_all_pages', 'get_page_blocks', ...] 170 | 171 | 🏥 Testing server health... 172 | ✅ Server started and responded successfully 173 | ✅ Dynamic tool discovery working correctly 174 | 175 | 🎉 MCP Server test completed successfully! 176 | 📊 Tools auto-discovered: 6 177 | 🏥 Server health: OK 178 | 🔄 Dynamic discovery: OK 179 | ``` 180 | 181 | ## Key Dependencies & Technologies 182 | 183 | - **Python 3.11+**: Modern async/await support required 184 | - **UV**: Fast Python package manager for dependency management 185 | - **FastMCP**: MCP server implementation from the official Python SDK 186 | - **aiohttp**: Async HTTP client for Logseq API calls 187 | - **Ruff**: Code formatting and linting 188 | - **dotenv**: Environment variable management 189 | 190 | ## MCP Integration 191 | 192 | ### Claude Desktop Configuration 193 | Add to `~/.claude/claude_desktop_config.json`: 194 | 195 | ```json 196 | { 197 | "mcpServers": { 198 | "logseq-api": { 199 | "command": "uv", 200 | "args": [ 201 | "run", 202 | "--directory", 203 | "/path/to/logseq-api-mcp", 204 | "python", 205 | "src/server.py" 206 | ], 207 | "env": { 208 | "LOGSEQ_API_ENDPOINT": "http://127.0.0.1:12315/api", 209 | "LOGSEQ_API_TOKEN": "your_token_here" 210 | } 211 | } 212 | } 213 | } 214 | ``` 215 | 216 | ## Development Guidelines 217 | 218 | ### Code Patterns 219 | - All API calls use async/await with aiohttp 220 | - Environment variables loaded from project root `.env` 221 | - Consistent error handling and logging 222 | - Emoji-enhanced output formatting for AI readability 223 | - Type hints for all function signatures 224 | 225 | ### Tool Requirements 226 | - Must be in `src/tools/` directory 227 | - Function names cannot start with `_` 228 | - File names cannot start with `_` 229 | - Include comprehensive docstrings 230 | - Use proper type hints 231 | - Handle exceptions gracefully 232 | 233 | ### Quality Standards 234 | - **Ruff formatting**: `uv run ruff check --fix && uv run ruff format` 235 | - **Type safety**: Full type hints for parameters and returns 236 | - **Documentation**: Comprehensive docstrings for all functions 237 | - **Testing**: Automatic validation via dynamic discovery tests 238 | 239 | ## Architecture Benefits 240 | 241 | ### Zero Configuration Management 242 | - **No Manual Imports**: Tools are automatically discovered and imported 243 | - **No Registration Code**: Registry handles all tool registration automatically 244 | - **No Maintenance Overhead**: Adding tools requires zero configuration changes 245 | - **Error Prevention**: Impossible to forget to register new tools 246 | 247 | ### Scalability 248 | - **Plugin Architecture**: Easy to extend with new Logseq API capabilities 249 | - **Consistent Patterns**: All tools follow the same discovery and registration flow 250 | - **CI Validation**: Automated testing ensures all tools work correctly 251 | - **Development Velocity**: New tools are immediately available without configuration 252 | 253 | This architecture demonstrates how dynamic discovery patterns can eliminate configuration overhead while maintaining robust testing and validation. -------------------------------------------------------------------------------- /src/tools/get_block_content.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import List 4 | 5 | import aiohttp 6 | from dotenv import load_dotenv 7 | from mcp.types import TextContent 8 | 9 | # Load environment variables from .env file in project root 10 | env_path = Path(__file__).parent.parent.parent / ".env" 11 | load_dotenv(env_path) 12 | 13 | 14 | async def get_block_content(block_uuid: str) -> List[TextContent]: 15 | """ 16 | Get detailed content and metadata for a specific block using its UUID. 17 | 18 | Returns comprehensive block information formatted for optimal LLM consumption 19 | including properties, relationships, and content. 20 | 21 | Args: 22 | block_uuid: The UUID of the block to retrieve 23 | """ 24 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 25 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 26 | 27 | headers = {"Authorization": f"Bearer {token}"} 28 | 29 | def format_properties(props): 30 | """Format block properties into a readable display""" 31 | if not props: 32 | return "None" 33 | 34 | formatted_props = [] 35 | for property_name, value in props.items(): 36 | if isinstance(value, list): 37 | value_str = ", ".join(str(v) for v in value) 38 | else: 39 | value_str = str(value) 40 | formatted_props.append(f"**{property_name}**: {value_str}") 41 | 42 | return "\n".join(formatted_props) 43 | 44 | def format_children_summary(children): 45 | """Format children summary information""" 46 | if not children: 47 | return "None" 48 | 49 | children_info = [] 50 | for child in children: 51 | if isinstance(child, list) and len(child) >= 2: 52 | if child[0] == "uuid": 53 | children_info.append(f"Block UUID: {child[1]}") 54 | else: 55 | children_info.append(f"{child[0]}: {child[1]}") 56 | else: 57 | children_info.append(str(child)) 58 | 59 | return "\n".join(children_info) 60 | 61 | def format_content_display(content, block_type_prefix=""): 62 | """Format content for clear display""" 63 | if not content: 64 | return "[Empty block]" 65 | 66 | # Clean up content for display 67 | content = content.strip() 68 | 69 | # Identify content type 70 | if content.startswith("#"): 71 | content_type = "📑 Header" 72 | elif "#card" in content: 73 | content_type = "💡 Flashcard" 74 | elif content.startswith("```"): 75 | content_type = "💻 Code Block" 76 | elif content.startswith("- ") or content.startswith("* "): 77 | content_type = "📝 List Item" 78 | elif "::" in content and len(content.split("\n")[0]) < 50: 79 | content_type = "⚙️ Properties" 80 | else: 81 | content_type = "📄 Text Block" 82 | 83 | # Add prefix for child blocks 84 | if block_type_prefix: 85 | content_type = f"{block_type_prefix} {content_type}" 86 | 87 | # Format based on length 88 | if len(content) > 500: 89 | preview = content[:500] + "..." 90 | return f"{content_type}\n**Content** ({len(content)} chars):\n{preview}\n\n*[Content truncated - showing first 500 characters]*" 91 | else: 92 | return f"{content_type}\n**Content:**\n{content}" 93 | 94 | async def get_block_by_uuid(session, uuid): 95 | """Helper function to get a block by UUID""" 96 | payload = {"method": "logseq.Editor.getBlock", "args": [uuid]} 97 | async with session.post(endpoint, json=payload, headers=headers) as response: 98 | if response.status == 200: 99 | return await response.json() 100 | return None 101 | 102 | def format_block_details(block, is_child=False): 103 | """Format block details consistently""" 104 | if not block: 105 | return [] 106 | 107 | # Extract block information 108 | block_id = block.get("id", "N/A") 109 | uuid = block.get("uuid", "N/A") 110 | content = block.get("content", "") 111 | properties = block.get("properties", {}) 112 | children = block.get("children", []) 113 | parent = block.get("parent", {}) 114 | page = block.get("page", {}) 115 | 116 | # Get parent and page info 117 | parent_id = parent.get("id", "N/A") if parent else "N/A" 118 | page_id = page.get("id", "N/A") if page else "N/A" 119 | page_name = page.get("name", "Unknown") if page else "Unknown" 120 | 121 | # Build block details 122 | prefix = "👶 **CHILD BLOCK**" if is_child else "🔍 **MAIN BLOCK**" 123 | lines = [ 124 | prefix, 125 | f"📌 Block ID: {block_id}", 126 | f"🔑 UUID: {uuid}", 127 | "", 128 | ] 129 | 130 | if not is_child: 131 | lines.extend( 132 | [ 133 | "📄 **PAGE CONTEXT:**", 134 | f"• Page: {page_name} (ID: {page_id})", 135 | f"• Parent Block ID: {parent_id}", 136 | "", 137 | ] 138 | ) 139 | 140 | lines.extend( 141 | [ 142 | "⚙️ **PROPERTIES:**", 143 | format_properties(properties), 144 | "", 145 | "📝 **CONTENT:**", 146 | format_content_display(content, "🔸" if is_child else ""), 147 | "", 148 | ] 149 | ) 150 | 151 | if not is_child: 152 | lines.extend( 153 | [ 154 | "👶 **IMMEDIATE CHILDREN:**", 155 | f"Count: {len(children)}", 156 | format_children_summary(children) 157 | if children 158 | else "No child blocks", 159 | "", 160 | ] 161 | ) 162 | 163 | lines.extend( 164 | [ 165 | "📊 **TECHNICAL SUMMARY:**", 166 | f"• Block Type: {'Header' if content.startswith('#') else 'Flashcard' if '#card' in content else 'Code' if content.startswith('```') else 'Text'}", 167 | f"• Has Properties: {'Yes' if properties else 'No'}", 168 | f"• Has Children: {'Yes' if children else 'No'}", 169 | f"• Content Length: {len(content)} characters", 170 | "", 171 | ] 172 | ) 173 | 174 | return lines 175 | 176 | async with aiohttp.ClientSession() as session: 177 | try: 178 | # Get main block by UUID 179 | main_block = await get_block_by_uuid(session, block_uuid) 180 | if not main_block: 181 | return [ 182 | TextContent( 183 | type="text", 184 | text=f"❌ Block with UUID '{block_uuid}' not found", 185 | ) 186 | ] 187 | 188 | # Start building output with main block 189 | output_lines = format_block_details(main_block, is_child=False) 190 | 191 | # Get immediate children blocks 192 | children = main_block.get("children", []) 193 | if children: 194 | output_lines.extend( 195 | [ 196 | "=" * 60, 197 | "🌳 **IMMEDIATE CHILDREN DETAILS:**", 198 | "", 199 | ] 200 | ) 201 | 202 | for i, child in enumerate(children, 1): 203 | # Extract UUID from child reference 204 | child_uuid = None 205 | if ( 206 | isinstance(child, list) 207 | and len(child) >= 2 208 | and child[0] == "uuid" 209 | ): 210 | child_uuid = child[1] 211 | elif isinstance(child, dict): 212 | child_uuid = child.get("uuid") 213 | 214 | if child_uuid: 215 | child_block = await get_block_by_uuid(session, child_uuid) 216 | if child_block: 217 | output_lines.extend( 218 | [ 219 | f"🔸 **CHILD {i}:**", 220 | "", 221 | ] 222 | ) 223 | output_lines.extend( 224 | format_block_details(child_block, is_child=True) 225 | ) 226 | output_lines.append("-" * 40) 227 | else: 228 | output_lines.extend( 229 | [ 230 | f"🔸 **CHILD {i}:**", 231 | f"❌ Could not fetch child block with UUID: {child_uuid}", 232 | "-" * 40, 233 | ] 234 | ) 235 | else: 236 | output_lines.extend( 237 | [ 238 | f"🔸 **CHILD {i}:**", 239 | f"❌ Invalid child reference: {child}", 240 | "-" * 40, 241 | ] 242 | ) 243 | 244 | return [TextContent(type="text", text="\n".join(output_lines))] 245 | 246 | except Exception as e: 247 | return [ 248 | TextContent( 249 | type="text", text=f"❌ Error fetching block content: {str(e)}" 250 | ) 251 | ] 252 | -------------------------------------------------------------------------------- /src/tools/get_page_links.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import Any, List 4 | 5 | import aiohttp 6 | from dotenv import load_dotenv 7 | from mcp.types import TextContent 8 | 9 | # Load environment variables from .env file in project root 10 | env_path = Path(__file__).parent.parent.parent / ".env" 11 | load_dotenv(env_path) 12 | 13 | 14 | async def get_page_links(page_identifier: str) -> List[TextContent]: 15 | """ 16 | Get pages that link to the specified page with comprehensive metadata. 17 | 18 | Retrieves all pages that reference the target page and enriches them with 19 | full metadata including creation dates, journal status, and UUIDs. 20 | 21 | Args: 22 | page_identifier: The name or UUID of the page to find links to 23 | """ 24 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 25 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 26 | 27 | headers = {"Authorization": f"Bearer {token}"} 28 | 29 | def format_timestamp(timestamp): 30 | """Convert timestamp to readable format""" 31 | if not timestamp: 32 | return "N/A" 33 | try: 34 | from datetime import datetime 35 | 36 | dt = datetime.fromtimestamp(timestamp / 1000) # Convert from ms 37 | return dt.strftime("%Y-%m-%d %H:%M:%S") 38 | except (ValueError, TypeError, OverflowError): 39 | return str(timestamp) 40 | 41 | def format_page_entry(page_ref, full_page_data=None): 42 | """Format a single page entry with complete metadata""" 43 | # Basic info from reference 44 | ref_name = page_ref.get("name", "Unknown") 45 | ref_original_name = page_ref.get("originalName", ref_name) 46 | ref_id = page_ref.get("id", "N/A") 47 | 48 | # Enhanced info from full page data 49 | if full_page_data: 50 | created_at = format_timestamp(full_page_data.get("createdAt")) 51 | updated_at = format_timestamp(full_page_data.get("updatedAt")) 52 | is_journal = full_page_data.get("journal?", False) 53 | uuid = full_page_data.get("uuid", "N/A") 54 | 55 | # Get properties if available 56 | properties = full_page_data.get("properties", {}) 57 | 58 | return { 59 | "name": ref_name, 60 | "original_name": ref_original_name, 61 | "id": ref_id, 62 | "uuid": uuid, 63 | "created_at": created_at, 64 | "updated_at": updated_at, 65 | "is_journal": is_journal, 66 | "properties": properties, 67 | } 68 | else: 69 | return { 70 | "name": ref_name, 71 | "original_name": ref_original_name, 72 | "id": ref_id, 73 | "uuid": "N/A", 74 | "created_at": "N/A", 75 | "updated_at": "N/A", 76 | "is_journal": False, 77 | "properties": {}, 78 | } 79 | 80 | def format_properties_display(props): 81 | """Format properties for display""" 82 | if not props: 83 | return "None" 84 | 85 | # Show only meaningful properties 86 | filtered_props = {} 87 | for property_name, value in props.items(): 88 | if ( 89 | property_name not in ["collapsed", "card-last-interval", "card-repeats"] 90 | and value 91 | ): 92 | if isinstance(value, list): 93 | filtered_props[property_name] = ", ".join(str(v) for v in value) 94 | else: 95 | filtered_props[property_name] = str(value) 96 | 97 | if not filtered_props: 98 | return "None" 99 | 100 | prop_lines = [] 101 | for prop_name, prop_value in list(filtered_props.items())[ 102 | :3 103 | ]: # Show top 3 properties 104 | prop_lines.append(f" • {prop_name}: {prop_value}") 105 | 106 | return "\n".join(prop_lines) 107 | 108 | async with aiohttp.ClientSession() as session: 109 | try: 110 | # 1. Get page linked references 111 | links_payload = { 112 | "method": "logseq.Editor.getPageLinkedReferences", 113 | "args": [page_identifier], 114 | } 115 | 116 | async with session.post( 117 | endpoint, json=links_payload, headers=headers 118 | ) as response: 119 | if response.status != 200: 120 | return [ 121 | TextContent( 122 | type="text", 123 | text=f"❌ Failed to fetch page links: HTTP {response.status}", 124 | ) 125 | ] 126 | 127 | linked_refs = await response.json() 128 | if not linked_refs: 129 | return [ 130 | TextContent( 131 | type="text", text=f"✅ No pages link to '{page_identifier}'" 132 | ) 133 | ] 134 | 135 | # 2. Get all pages for metadata enrichment 136 | all_pages_payload = {"method": "logseq.Editor.getAllPages"} 137 | 138 | async with session.post( 139 | endpoint, json=all_pages_payload, headers=headers 140 | ) as response: 141 | all_pages: list[dict[str, Any]] = [] 142 | if response.status == 200: 143 | try: 144 | all_pages = await response.json() or [] 145 | except Exception: 146 | all_pages = [] 147 | 148 | # 3. Extract unique page references from linked references 149 | unique_pages = {} 150 | reference_counts = {} 151 | 152 | for link_group in linked_refs: 153 | if isinstance(link_group, list) and len(link_group) >= 1: 154 | page_ref = link_group[0] 155 | if isinstance(page_ref, dict): 156 | page_id = page_ref.get("id") 157 | if page_id: 158 | unique_pages[page_id] = page_ref 159 | # Count number of references from this page 160 | reference_counts[page_id] = len(link_group) - 1 161 | 162 | # 4. Create page lookup by ID for enrichment 163 | page_lookup = {} 164 | for page in all_pages: 165 | if isinstance(page, dict): 166 | page_id = page.get("id") 167 | if page_id: 168 | page_lookup[page_id] = page 169 | 170 | # 5. Build enriched page entries 171 | enriched_pages = [] 172 | for page_id, page_ref in unique_pages.items(): 173 | full_page_data = page_lookup.get(page_id) 174 | page_entry = format_page_entry(page_ref, full_page_data) 175 | page_entry["reference_count"] = reference_counts.get(page_id, 0) 176 | enriched_pages.append(page_entry) 177 | 178 | # 6. Sort pages by reference count (most references first), then by name 179 | def sort_pages(page): 180 | return (-page["reference_count"], page["name"].lower()) 181 | 182 | # Sort using a different approach to avoid sort parameter 183 | page_sorts = [(sort_pages(page), page) for page in enriched_pages] 184 | page_sorts.sort() 185 | enriched_pages = [page for _, page in page_sorts] 186 | 187 | # 7. Build output 188 | output_lines = [ 189 | "🔗 **PAGE LINKS ANALYSIS**", 190 | f"📄 Target Page: {page_identifier}", 191 | f"📊 Found {len(enriched_pages)} pages linking to this page", 192 | f"📈 Total reference groups: {len(linked_refs)}", 193 | "", 194 | "🎯 **LINKING PAGES:**", 195 | "", 196 | ] 197 | 198 | # Display each linking page 199 | for i, page in enumerate(enriched_pages, 1): 200 | emoji = "📅" if page["is_journal"] else "📄" 201 | display_name = ( 202 | page["original_name"] 203 | if page["original_name"] != page["name"] 204 | else page["name"] 205 | ) 206 | 207 | output_lines.extend( 208 | [ 209 | f"{emoji} **{i}. {display_name}**", 210 | f" 🔑 ID: {page['id']} | UUID: {page['uuid']}", 211 | f" 📊 References: {page['reference_count']} | Journal: {'Yes' if page['is_journal'] else 'No'}", 212 | f" 📅 Created: {page['created_at']}", 213 | f" 🔄 Updated: {page['updated_at']}", 214 | ] 215 | ) 216 | 217 | # Show properties if available 218 | props_display = format_properties_display(page["properties"]) 219 | if props_display != "None": 220 | output_lines.extend([" ⚙️ Properties:", props_display]) 221 | 222 | output_lines.append("") 223 | 224 | # 8. Add summary statistics 225 | journal_pages = sum(1 for p in enriched_pages if p["is_journal"]) 226 | regular_pages = len(enriched_pages) - journal_pages 227 | total_refs = sum(p["reference_count"] for p in enriched_pages) 228 | 229 | output_lines.extend( 230 | [ 231 | "📈 **SUMMARY:**", 232 | f"• Total linking pages: {len(enriched_pages)}", 233 | f"• Journal pages: {journal_pages}", 234 | f"• Regular pages: {regular_pages}", 235 | f"• Total references: {total_refs}", 236 | f"• Average references per page: {total_refs / len(enriched_pages):.1f}" 237 | if enriched_pages 238 | else "• Average references per page: 0", 239 | "", 240 | ] 241 | ) 242 | 243 | return [TextContent(type="text", text="\n".join(output_lines))] 244 | 245 | except Exception as e: 246 | return [ 247 | TextContent(type="text", text=f"❌ Error fetching page links: {str(e)}") 248 | ] 249 | -------------------------------------------------------------------------------- /safety-report.json: -------------------------------------------------------------------------------- 1 | { 2 | "report_meta": { 3 | "scan_target": "environment", 4 | "scanned": [ 5 | "/home/user/github/logseq-api-mcp/.venv/lib/python3.11/site-packages", 6 | "/home/user/github/logseq-api-mcp/.venv/lib/python3.11/site-packages/setuptools/_vendor" 7 | ], 8 | "policy_file": null, 9 | "policy_file_source": "local", 10 | "api_key": false, 11 | "local_database_path": null, 12 | "safety_version": "2.3.5", 13 | "timestamp": "2025-09-16 10:39:11", 14 | "packages_found": 82, 15 | "vulnerabilities_found": 0, 16 | "vulnerabilities_ignored": 0, 17 | "remediations_recommended": 0, 18 | "telemetry": { 19 | "os_type": "Linux", 20 | "os_release": "6.14.0-29-generic", 21 | "os_description": "Linux-6.14.0-29-generic-x86_64-with-glibc2.39", 22 | "python_version": "3.11.13", 23 | "safety_command": "check", 24 | "safety_options": { 25 | "json": { 26 | "--json": 1 27 | }, 28 | "save_json": { 29 | "--save-json": 1 30 | } 31 | }, 32 | "safety_version": "2.3.5", 33 | "safety_source": "cli" 34 | }, 35 | "git": { 36 | "branch": "main", 37 | "tag": "", 38 | "commit": "b988771f39d056d68fc043abbfd08085e94b77af", 39 | "dirty": true, 40 | "origin": "git@github.com:gustavo-meilus/logseq-api-mcp.git" 41 | }, 42 | "project": null, 43 | "json_version": 1 44 | }, 45 | "scanned_packages": { 46 | "pyyaml": { 47 | "name": "pyyaml", 48 | "version": "6.0.2" 49 | }, 50 | "aiohappyeyeballs": { 51 | "name": "aiohappyeyeballs", 52 | "version": "2.6.1" 53 | }, 54 | "aiohttp": { 55 | "name": "aiohttp", 56 | "version": "3.12.15" 57 | }, 58 | "aiosignal": { 59 | "name": "aiosignal", 60 | "version": "1.4.0" 61 | }, 62 | "annotated-types": { 63 | "name": "annotated-types", 64 | "version": "0.7.0" 65 | }, 66 | "anyio": { 67 | "name": "anyio", 68 | "version": "4.9.0" 69 | }, 70 | "attrs": { 71 | "name": "attrs", 72 | "version": "25.3.0" 73 | }, 74 | "bandit": { 75 | "name": "bandit", 76 | "version": "1.8.6" 77 | }, 78 | "certifi": { 79 | "name": "certifi", 80 | "version": "2025.4.26" 81 | }, 82 | "cfgv": { 83 | "name": "cfgv", 84 | "version": "3.4.0" 85 | }, 86 | "charset-normalizer": { 87 | "name": "charset-normalizer", 88 | "version": "3.4.3" 89 | }, 90 | "click": { 91 | "name": "click", 92 | "version": "8.2.1" 93 | }, 94 | "distlib": { 95 | "name": "distlib", 96 | "version": "0.4.0" 97 | }, 98 | "dparse": { 99 | "name": "dparse", 100 | "version": "0.6.4" 101 | }, 102 | "filelock": { 103 | "name": "filelock", 104 | "version": "3.12.4" 105 | }, 106 | "frozenlist": { 107 | "name": "frozenlist", 108 | "version": "1.6.0" 109 | }, 110 | "h11": { 111 | "name": "h11", 112 | "version": "0.16.0" 113 | }, 114 | "httpcore": { 115 | "name": "httpcore", 116 | "version": "1.0.9" 117 | }, 118 | "httpx": { 119 | "name": "httpx", 120 | "version": "0.28.1" 121 | }, 122 | "httpx-sse": { 123 | "name": "httpx-sse", 124 | "version": "0.4.0" 125 | }, 126 | "identify": { 127 | "name": "identify", 128 | "version": "2.6.14" 129 | }, 130 | "idna": { 131 | "name": "idna", 132 | "version": "3.10" 133 | }, 134 | "jsonschema": { 135 | "name": "jsonschema", 136 | "version": "4.25.1" 137 | }, 138 | "jsonschema-specifications": { 139 | "name": "jsonschema-specifications", 140 | "version": "2025.9.1" 141 | }, 142 | "load-dotenv": { 143 | "name": "load-dotenv", 144 | "version": "0.1.0" 145 | }, 146 | "markdown-it-py": { 147 | "name": "markdown-it-py", 148 | "version": "3.0.0" 149 | }, 150 | "mcp": { 151 | "name": "mcp", 152 | "version": "1.14.0" 153 | }, 154 | "mdurl": { 155 | "name": "mdurl", 156 | "version": "0.1.2" 157 | }, 158 | "multidict": { 159 | "name": "multidict", 160 | "version": "6.4.4" 161 | }, 162 | "mypy": { 163 | "name": "mypy", 164 | "version": "1.18.1" 165 | }, 166 | "mypy-extensions": { 167 | "name": "mypy-extensions", 168 | "version": "1.1.0" 169 | }, 170 | "nodeenv": { 171 | "name": "nodeenv", 172 | "version": "1.9.1" 173 | }, 174 | "packaging": { 175 | "name": "packaging", 176 | "version": "21.3" 177 | }, 178 | "pathlib": { 179 | "name": "pathlib", 180 | "version": "1.0.1" 181 | }, 182 | "pathspec": { 183 | "name": "pathspec", 184 | "version": "0.12.1" 185 | }, 186 | "pip-licenses": { 187 | "name": "pip-licenses", 188 | "version": "5.0.0" 189 | }, 190 | "platformdirs": { 191 | "name": "platformdirs", 192 | "version": "4.4.0" 193 | }, 194 | "pre-commit": { 195 | "name": "pre-commit", 196 | "version": "4.3.0" 197 | }, 198 | "prettytable": { 199 | "name": "prettytable", 200 | "version": "3.16.0" 201 | }, 202 | "propcache": { 203 | "name": "propcache", 204 | "version": "0.3.1" 205 | }, 206 | "pydantic": { 207 | "name": "pydantic", 208 | "version": "2.11.9" 209 | }, 210 | "pydantic-core": { 211 | "name": "pydantic-core", 212 | "version": "2.33.2" 213 | }, 214 | "pydantic-settings": { 215 | "name": "pydantic-settings", 216 | "version": "2.9.1" 217 | }, 218 | "pygments": { 219 | "name": "pygments", 220 | "version": "2.19.1" 221 | }, 222 | "pyparsing": { 223 | "name": "pyparsing", 224 | "version": "3.2.4" 225 | }, 226 | "python-dotenv": { 227 | "name": "python-dotenv", 228 | "version": "1.1.0" 229 | }, 230 | "python-multipart": { 231 | "name": "python-multipart", 232 | "version": "0.0.20" 233 | }, 234 | "referencing": { 235 | "name": "referencing", 236 | "version": "0.36.2" 237 | }, 238 | "requests": { 239 | "name": "requests", 240 | "version": "2.32.5" 241 | }, 242 | "rich": { 243 | "name": "rich", 244 | "version": "14.0.0" 245 | }, 246 | "rpds-py": { 247 | "name": "rpds-py", 248 | "version": "0.27.1" 249 | }, 250 | "ruamel.yaml": { 251 | "name": "ruamel.yaml", 252 | "version": "0.18.15" 253 | }, 254 | "ruamel.yaml.clib": { 255 | "name": "ruamel.yaml.clib", 256 | "version": "0.2.12" 257 | }, 258 | "ruff": { 259 | "name": "ruff", 260 | "version": "0.11.12" 261 | }, 262 | "safety": { 263 | "name": "safety", 264 | "version": "2.3.5" 265 | }, 266 | "setuptools": { 267 | "name": "setuptools", 268 | "version": "80.9.0" 269 | }, 270 | "shellingham": { 271 | "name": "shellingham", 272 | "version": "1.5.4" 273 | }, 274 | "sniffio": { 275 | "name": "sniffio", 276 | "version": "1.3.1" 277 | }, 278 | "sse-starlette": { 279 | "name": "sse-starlette", 280 | "version": "2.3.6" 281 | }, 282 | "starlette": { 283 | "name": "starlette", 284 | "version": "0.48.0" 285 | }, 286 | "stevedore": { 287 | "name": "stevedore", 288 | "version": "5.5.0" 289 | }, 290 | "tomli": { 291 | "name": "tomli", 292 | "version": "2.2.1" 293 | }, 294 | "typer": { 295 | "name": "typer", 296 | "version": "0.16.0" 297 | }, 298 | "typing-extensions": { 299 | "name": "typing-extensions", 300 | "version": "4.13.2" 301 | }, 302 | "typing-inspection": { 303 | "name": "typing-inspection", 304 | "version": "0.4.1" 305 | }, 306 | "urllib3": { 307 | "name": "urllib3", 308 | "version": "2.5.0" 309 | }, 310 | "uvicorn": { 311 | "name": "uvicorn", 312 | "version": "0.34.2" 313 | }, 314 | "virtualenv": { 315 | "name": "virtualenv", 316 | "version": "20.34.0" 317 | }, 318 | "wcwidth": { 319 | "name": "wcwidth", 320 | "version": "0.2.13" 321 | }, 322 | "yarl": { 323 | "name": "yarl", 324 | "version": "1.20.0" 325 | }, 326 | "autocommand": { 327 | "name": "autocommand", 328 | "version": "2.2.2" 329 | }, 330 | "backports.tarfile": { 331 | "name": "backports.tarfile", 332 | "version": "1.2.0" 333 | }, 334 | "importlib-metadata": { 335 | "name": "importlib-metadata", 336 | "version": "8.0.0" 337 | }, 338 | "inflect": { 339 | "name": "inflect", 340 | "version": "7.3.1" 341 | }, 342 | "jaraco.collections": { 343 | "name": "jaraco.collections", 344 | "version": "5.1.0" 345 | }, 346 | "jaraco.context": { 347 | "name": "jaraco.context", 348 | "version": "5.3.0" 349 | }, 350 | "jaraco.functools": { 351 | "name": "jaraco.functools", 352 | "version": "4.0.1" 353 | }, 354 | "jaraco.text": { 355 | "name": "jaraco.text", 356 | "version": "3.12.1" 357 | }, 358 | "more-itertools": { 359 | "name": "more-itertools", 360 | "version": "10.3.0" 361 | }, 362 | "typeguard": { 363 | "name": "typeguard", 364 | "version": "4.3.0" 365 | }, 366 | "wheel": { 367 | "name": "wheel", 368 | "version": "0.45.1" 369 | }, 370 | "zipp": { 371 | "name": "zipp", 372 | "version": "3.19.2" 373 | } 374 | }, 375 | "affected_packages": {}, 376 | "announcements": [], 377 | "vulnerabilities": [], 378 | "ignored_vulnerabilities": [], 379 | "remediations": {} 380 | } -------------------------------------------------------------------------------- /tests/test_get_block_content.py: -------------------------------------------------------------------------------- 1 | """Tests for get_block_content tool.""" 2 | 3 | from unittest.mock import AsyncMock, MagicMock 4 | 5 | import pytest 6 | 7 | from src.tools.get_block_content import get_block_content 8 | 9 | 10 | class TestGetBlockContent: 11 | """Test cases for get_block_content function.""" 12 | 13 | @pytest.mark.asyncio 14 | async def test_get_block_content_success( 15 | self, mock_env_vars, mock_aiohttp_session, sample_block_data 16 | ): 17 | """Test successful block content retrieval.""" 18 | # Setup mock response 19 | mock_response = MagicMock() 20 | mock_response.status = 200 21 | mock_response.json = AsyncMock(return_value=sample_block_data) 22 | 23 | # Setup session mock 24 | mock_context = MagicMock() 25 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 26 | mock_context.__aexit__ = AsyncMock(return_value=None) 27 | mock_aiohttp_session._session_instance.post.return_value = mock_context 28 | 29 | result = await get_block_content("block-uuid-456") 30 | 31 | assert len(result) == 1 32 | assert "🔍 **MAIN BLOCK**" in result[0].text 33 | assert "Test block content" in result[0].text 34 | 35 | @pytest.mark.asyncio 36 | async def test_get_block_content_with_children( 37 | self, mock_env_vars, mock_aiohttp_session 38 | ): 39 | """Test block content retrieval with child blocks.""" 40 | sample_block_data = { 41 | "id": 456, 42 | "content": "Parent block content", 43 | "uuid": "block-uuid-456", 44 | "properties": {"important": "yes"}, 45 | "children": [ 46 | ["uuid", "child-uuid-1"], 47 | ["uuid", "child-uuid-2"], 48 | ], 49 | "parent": {"id": 123}, 50 | "page": {"id": 789, "name": "Test Page"}, 51 | } 52 | 53 | sample_child_data = { 54 | "id": 457, 55 | "content": "Child block content", 56 | "uuid": "child-uuid-1", 57 | "properties": {}, 58 | "children": [], 59 | } 60 | 61 | # Setup mock responses 62 | mock_main_response = MagicMock() 63 | mock_main_response.status = 200 64 | mock_main_response.json = AsyncMock(return_value=sample_block_data) 65 | 66 | mock_child_response = MagicMock() 67 | mock_child_response.status = 200 68 | mock_child_response.json = AsyncMock(return_value=sample_child_data) 69 | 70 | # Setup session mock 71 | mock_context1 = MagicMock() 72 | mock_context1.__aenter__ = AsyncMock(return_value=mock_main_response) 73 | mock_context1.__aexit__ = AsyncMock(return_value=None) 74 | 75 | mock_context2 = MagicMock() 76 | mock_context2.__aenter__ = AsyncMock(return_value=mock_child_response) 77 | mock_context2.__aexit__ = AsyncMock(return_value=None) 78 | 79 | mock_aiohttp_session._session_instance.post.side_effect = [ 80 | mock_context1, 81 | mock_context2, 82 | mock_context2, # Second child 83 | ] 84 | 85 | result = await get_block_content("block-uuid-456") 86 | 87 | assert len(result) == 1 88 | assert "🔍 **MAIN BLOCK**" in result[0].text 89 | assert "Parent block content" in result[0].text 90 | assert "👶 **CHILD BLOCK**" in result[0].text 91 | assert "Child block content" in result[0].text 92 | 93 | @pytest.mark.asyncio 94 | async def test_get_block_content_with_flashcard( 95 | self, mock_env_vars, mock_aiohttp_session 96 | ): 97 | """Test block content retrieval with flashcard content.""" 98 | sample_block_data = { 99 | "id": 456, 100 | "content": "What is the capital of France? #card", 101 | "uuid": "block-uuid-456", 102 | "properties": {"card-last-interval": 1}, 103 | "children": [], 104 | "parent": {"id": 123}, 105 | "page": {"id": 789, "name": "Test Page"}, 106 | } 107 | 108 | # Setup mock response 109 | mock_response = MagicMock() 110 | mock_response.status = 200 111 | mock_response.json = AsyncMock(return_value=sample_block_data) 112 | 113 | # Setup session mock 114 | mock_context = MagicMock() 115 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 116 | mock_context.__aexit__ = AsyncMock(return_value=None) 117 | mock_aiohttp_session._session_instance.post.return_value = mock_context 118 | 119 | result = await get_block_content("block-uuid-456") 120 | 121 | assert len(result) == 1 122 | assert "🔍 **MAIN BLOCK**" in result[0].text 123 | assert "💡 Flashcard" in result[0].text 124 | assert "What is the capital of France?" in result[0].text 125 | 126 | @pytest.mark.asyncio 127 | async def test_get_block_content_with_code_block( 128 | self, mock_env_vars, mock_aiohttp_session 129 | ): 130 | """Test block content retrieval with code block content.""" 131 | sample_block_data = { 132 | "id": 456, 133 | "content": "```python\nprint('Hello World')\n```", 134 | "uuid": "block-uuid-456", 135 | "properties": {}, 136 | "children": [], 137 | "parent": {"id": 123}, 138 | "page": {"id": 789, "name": "Test Page"}, 139 | } 140 | 141 | # Setup mock response 142 | mock_response = MagicMock() 143 | mock_response.status = 200 144 | mock_response.json = AsyncMock(return_value=sample_block_data) 145 | 146 | # Setup session mock 147 | mock_context = MagicMock() 148 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 149 | mock_context.__aexit__ = AsyncMock(return_value=None) 150 | mock_aiohttp_session._session_instance.post.return_value = mock_context 151 | 152 | result = await get_block_content("block-uuid-456") 153 | 154 | assert len(result) == 1 155 | assert "🔍 **MAIN BLOCK**" in result[0].text 156 | assert "💻 Code Block" in result[0].text 157 | assert "print('Hello World')" in result[0].text 158 | 159 | @pytest.mark.asyncio 160 | async def test_get_block_content_with_header( 161 | self, mock_env_vars, mock_aiohttp_session 162 | ): 163 | """Test block content retrieval with header content.""" 164 | sample_block_data = { 165 | "id": 456, 166 | "content": "# Main Header", 167 | "uuid": "block-uuid-456", 168 | "properties": {}, 169 | "children": [], 170 | "parent": {"id": 123}, 171 | "page": {"id": 789, "name": "Test Page"}, 172 | } 173 | 174 | # Setup mock response 175 | mock_response = MagicMock() 176 | mock_response.status = 200 177 | mock_response.json = AsyncMock(return_value=sample_block_data) 178 | 179 | # Setup session mock 180 | mock_context = MagicMock() 181 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 182 | mock_context.__aexit__ = AsyncMock(return_value=None) 183 | mock_aiohttp_session._session_instance.post.return_value = mock_context 184 | 185 | result = await get_block_content("block-uuid-456") 186 | 187 | assert len(result) == 1 188 | assert "🔍 **MAIN BLOCK**" in result[0].text 189 | assert "📑 Header" in result[0].text 190 | assert "# Main Header" in result[0].text 191 | 192 | @pytest.mark.asyncio 193 | async def test_get_block_content_with_properties( 194 | self, mock_env_vars, mock_aiohttp_session 195 | ): 196 | """Test block content retrieval with properties.""" 197 | sample_block_data = { 198 | "id": 456, 199 | "content": "Test content", 200 | "uuid": "block-uuid-456", 201 | "properties": { 202 | "important": "yes", 203 | "tags": ["test", "example"], 204 | "priority": 1, 205 | }, 206 | "children": [], 207 | "parent": {"id": 123}, 208 | "page": {"id": 789, "name": "Test Page"}, 209 | } 210 | 211 | # Setup mock response 212 | mock_response = MagicMock() 213 | mock_response.status = 200 214 | mock_response.json = AsyncMock(return_value=sample_block_data) 215 | 216 | # Setup session mock 217 | mock_context = MagicMock() 218 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 219 | mock_context.__aexit__ = AsyncMock(return_value=None) 220 | mock_aiohttp_session._session_instance.post.return_value = mock_context 221 | 222 | result = await get_block_content("block-uuid-456") 223 | 224 | assert len(result) == 1 225 | assert "🔍 **MAIN BLOCK**" in result[0].text 226 | assert "**important**: yes" in result[0].text 227 | assert "**tags**: test, example" in result[0].text 228 | assert "**priority**: 1" in result[0].text 229 | 230 | @pytest.mark.asyncio 231 | async def test_get_block_content_with_long_content( 232 | self, mock_env_vars, mock_aiohttp_session 233 | ): 234 | """Test block content retrieval with long content.""" 235 | long_content = "This is a very long content that exceeds 500 characters. " * 20 236 | sample_block_data = { 237 | "id": 456, 238 | "content": long_content, 239 | "uuid": "block-uuid-456", 240 | "properties": {}, 241 | "children": [], 242 | "parent": {"id": 123}, 243 | "page": {"id": 789, "name": "Test Page"}, 244 | } 245 | 246 | # Setup mock response 247 | mock_response = MagicMock() 248 | mock_response.status = 200 249 | mock_response.json = AsyncMock(return_value=sample_block_data) 250 | 251 | # Setup session mock 252 | mock_context = MagicMock() 253 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 254 | mock_context.__aexit__ = AsyncMock(return_value=None) 255 | mock_aiohttp_session._session_instance.post.return_value = mock_context 256 | 257 | result = await get_block_content("block-uuid-456") 258 | 259 | assert len(result) == 1 260 | assert "🔍 **MAIN BLOCK**" in result[0].text 261 | assert "Content truncated" in result[0].text 262 | assert len(result[0].text) > 500 # Should be truncated 263 | 264 | @pytest.mark.asyncio 265 | async def test_get_block_content_child_http_error( 266 | self, mock_env_vars, mock_aiohttp_session 267 | ): 268 | """Test block content retrieval with child HTTP error.""" 269 | sample_block_data = { 270 | "id": 456, 271 | "content": "Parent block content", 272 | "uuid": "block-uuid-456", 273 | "properties": {}, 274 | "children": [ 275 | ["uuid", "child-uuid-1"], 276 | ], 277 | "parent": {"id": 123}, 278 | "page": {"id": 789, "name": "Test Page"}, 279 | } 280 | 281 | # Setup mock responses 282 | mock_main_response = MagicMock() 283 | mock_main_response.status = 200 284 | mock_main_response.json = AsyncMock(return_value=sample_block_data) 285 | 286 | mock_child_response = MagicMock() 287 | mock_child_response.status = 500 288 | 289 | # Setup session mock 290 | mock_context1 = MagicMock() 291 | mock_context1.__aenter__ = AsyncMock(return_value=mock_main_response) 292 | mock_context1.__aexit__ = AsyncMock(return_value=None) 293 | 294 | mock_context2 = MagicMock() 295 | mock_context2.__aenter__ = AsyncMock(return_value=mock_child_response) 296 | mock_context2.__aexit__ = AsyncMock(return_value=None) 297 | 298 | mock_aiohttp_session._session_instance.post.side_effect = [ 299 | mock_context1, 300 | mock_context2, 301 | ] 302 | 303 | result = await get_block_content("block-uuid-456") 304 | 305 | assert len(result) == 1 306 | assert "🔍 **MAIN BLOCK**" in result[0].text 307 | assert ( 308 | "❌ Could not fetch child block with UUID: child-uuid-1" in result[0].text 309 | ) 310 | 311 | @pytest.mark.asyncio 312 | async def test_get_block_content_invalid_child_reference( 313 | self, mock_env_vars, mock_aiohttp_session 314 | ): 315 | """Test block content retrieval with invalid child reference.""" 316 | sample_block_data = { 317 | "id": 456, 318 | "content": "Parent block content", 319 | "uuid": "block-uuid-456", 320 | "properties": {}, 321 | "children": [ 322 | "invalid-child-reference", 323 | ], 324 | "parent": {"id": 123}, 325 | "page": {"id": 789, "name": "Test Page"}, 326 | } 327 | 328 | # Setup mock response 329 | mock_response = MagicMock() 330 | mock_response.status = 200 331 | mock_response.json = AsyncMock(return_value=sample_block_data) 332 | 333 | # Setup session mock 334 | mock_context = MagicMock() 335 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 336 | mock_context.__aexit__ = AsyncMock(return_value=None) 337 | mock_aiohttp_session._session_instance.post.return_value = mock_context 338 | 339 | result = await get_block_content("block-uuid-456") 340 | 341 | assert len(result) == 1 342 | assert "🔍 **MAIN BLOCK**" in result[0].text 343 | assert "❌ Invalid child reference: invalid-child-reference" in result[0].text 344 | 345 | @pytest.mark.asyncio 346 | async def test_get_block_content_http_error( 347 | self, mock_env_vars, mock_aiohttp_session 348 | ): 349 | """Test block content retrieval with HTTP error.""" 350 | # Setup mock response 351 | mock_response = MagicMock() 352 | mock_response.status = 500 353 | 354 | # Setup session mock 355 | mock_context = MagicMock() 356 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 357 | mock_context.__aexit__ = AsyncMock(return_value=None) 358 | mock_aiohttp_session._session_instance.post.return_value = mock_context 359 | 360 | result = await get_block_content("block-uuid-456") 361 | 362 | assert len(result) == 1 363 | assert "❌ Block with UUID 'block-uuid-456' not found" in result[0].text 364 | 365 | @pytest.mark.asyncio 366 | async def test_get_block_content_exception( 367 | self, mock_env_vars, mock_aiohttp_session 368 | ): 369 | """Test block content retrieval with exception.""" 370 | # Setup session mock to raise exception 371 | mock_aiohttp_session._session_instance.post.side_effect = Exception( 372 | "Network error" 373 | ) 374 | 375 | result = await get_block_content("block-uuid-456") 376 | 377 | assert len(result) == 1 378 | assert "❌ Error fetching block content: Network error" in result[0].text 379 | -------------------------------------------------------------------------------- /tests/test_get_linked_flashcards.py: -------------------------------------------------------------------------------- 1 | """Tests for get_linked_flashcards tool.""" 2 | 3 | from unittest.mock import AsyncMock, MagicMock 4 | 5 | import pytest 6 | 7 | from src.tools.get_linked_flashcards import get_linked_flashcards 8 | 9 | 10 | class TestGetLinkedFlashcards: 11 | """Test cases for get_linked_flashcards function.""" 12 | 13 | @pytest.mark.asyncio 14 | async def test_get_linked_flashcards_success( 15 | self, mock_env_vars, mock_aiohttp_session 16 | ): 17 | """Test successful linked flashcards retrieval.""" 18 | sample_linked_refs = [ 19 | [ 20 | {"id": 789, "name": "Linked Page", "originalName": "Linked Page"}, 21 | { 22 | "id": 790, 23 | "content": "What is the capital of France? #card", 24 | "uuid": "ref-uuid-790", 25 | }, 26 | ] 27 | ] 28 | 29 | sample_page_data = {"id": 123, "name": "test page", "originalName": "Test Page"} 30 | 31 | # Setup mock responses for all API calls 32 | mock_links_response = MagicMock() 33 | mock_links_response.status = 200 34 | mock_links_response.json = AsyncMock(return_value=sample_linked_refs) 35 | 36 | mock_pages_response = MagicMock() 37 | mock_pages_response.status = 200 38 | mock_pages_response.json = AsyncMock(return_value=[sample_page_data]) 39 | 40 | mock_blocks_response = MagicMock() 41 | mock_blocks_response.status = 200 42 | mock_blocks_response.json = AsyncMock(return_value=[]) 43 | 44 | # Setup session mock 45 | mock_context1 = MagicMock() 46 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 47 | mock_context1.__aexit__ = AsyncMock(return_value=None) 48 | 49 | mock_context2 = MagicMock() 50 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 51 | mock_context2.__aexit__ = AsyncMock(return_value=None) 52 | 53 | mock_context3 = MagicMock() 54 | mock_context3.__aenter__ = AsyncMock(return_value=mock_blocks_response) 55 | mock_context3.__aexit__ = AsyncMock(return_value=None) 56 | 57 | mock_aiohttp_session._session_instance.post.side_effect = [ 58 | mock_context1, 59 | mock_context2, 60 | mock_context3, 61 | ] 62 | 63 | result = await get_linked_flashcards("test page") 64 | 65 | assert len(result) == 1 66 | assert ( 67 | "✅ No flashcards found in 'test page' or its linked pages" 68 | in result[0].text 69 | ) 70 | 71 | @pytest.mark.asyncio 72 | async def test_get_linked_flashcards_with_flashcards( 73 | self, mock_env_vars, mock_aiohttp_session 74 | ): 75 | """Test linked flashcards retrieval with actual flashcards.""" 76 | sample_linked_refs = [] 77 | 78 | sample_page_data = {"id": 123, "name": "test page", "originalName": "Test Page"} 79 | 80 | sample_blocks = [ 81 | { 82 | "id": 456, 83 | "content": "What is the capital of France? #card", 84 | "uuid": "block-uuid-456", 85 | "properties": {"card-last-interval": 1}, 86 | "children": [ 87 | ["uuid", "child-uuid-1"], 88 | ], 89 | } 90 | ] 91 | 92 | sample_child_block = { 93 | "id": 457, 94 | "content": "Paris is the capital of France", 95 | "uuid": "child-uuid-1", 96 | } 97 | 98 | # Setup mock responses for all API calls 99 | mock_links_response = MagicMock() 100 | mock_links_response.status = 200 101 | mock_links_response.json = AsyncMock(return_value=sample_linked_refs) 102 | 103 | mock_pages_response = MagicMock() 104 | mock_pages_response.status = 200 105 | mock_pages_response.json = AsyncMock(return_value=[sample_page_data]) 106 | 107 | mock_blocks_response = MagicMock() 108 | mock_blocks_response.status = 200 109 | mock_blocks_response.json = AsyncMock(return_value=sample_blocks) 110 | 111 | mock_child_response = MagicMock() 112 | mock_child_response.status = 200 113 | mock_child_response.json = AsyncMock(return_value=sample_child_block) 114 | 115 | # Setup session mock 116 | mock_context1 = MagicMock() 117 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 118 | mock_context1.__aexit__ = AsyncMock(return_value=None) 119 | 120 | mock_context2 = MagicMock() 121 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 122 | mock_context2.__aexit__ = AsyncMock(return_value=None) 123 | 124 | mock_context3 = MagicMock() 125 | mock_context3.__aenter__ = AsyncMock(return_value=mock_blocks_response) 126 | mock_context3.__aexit__ = AsyncMock(return_value=None) 127 | 128 | mock_context4 = MagicMock() 129 | mock_context4.__aenter__ = AsyncMock(return_value=mock_child_response) 130 | mock_context4.__aexit__ = AsyncMock(return_value=None) 131 | 132 | mock_aiohttp_session._session_instance.post.side_effect = [ 133 | mock_context1, 134 | mock_context2, 135 | mock_context3, 136 | mock_context4, 137 | ] 138 | 139 | result = await get_linked_flashcards("test page") 140 | 141 | assert len(result) == 1 142 | assert "🎯 **LINKED FLASHCARDS ANALYSIS**" in result[0].text 143 | assert "What is the capital of France?" in result[0].text 144 | assert "Paris is the capital of France" in result[0].text 145 | 146 | @pytest.mark.asyncio 147 | async def test_get_linked_flashcards_with_multiple_choice( 148 | self, mock_env_vars, mock_aiohttp_session 149 | ): 150 | """Test linked flashcards retrieval with multiple choice questions.""" 151 | sample_linked_refs = [] 152 | 153 | sample_page_data = {"id": 123, "name": "test page", "originalName": "Test Page"} 154 | 155 | sample_blocks = [ 156 | { 157 | "id": 456, 158 | "content": "What is the capital of France? #card\n+ [ ] Paris\n+ [ ] London\n- [ ] Berlin", 159 | "uuid": "block-uuid-456", 160 | "properties": {}, 161 | "children": [], 162 | } 163 | ] 164 | 165 | # Setup mock responses for all API calls 166 | mock_links_response = MagicMock() 167 | mock_links_response.status = 200 168 | mock_links_response.json = AsyncMock(return_value=sample_linked_refs) 169 | 170 | mock_pages_response = MagicMock() 171 | mock_pages_response.status = 200 172 | mock_pages_response.json = AsyncMock(return_value=[sample_page_data]) 173 | 174 | mock_blocks_response = MagicMock() 175 | mock_blocks_response.status = 200 176 | mock_blocks_response.json = AsyncMock(return_value=sample_blocks) 177 | 178 | # Setup session mock 179 | mock_context1 = MagicMock() 180 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 181 | mock_context1.__aexit__ = AsyncMock(return_value=None) 182 | 183 | mock_context2 = MagicMock() 184 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 185 | mock_context2.__aexit__ = AsyncMock(return_value=None) 186 | 187 | mock_context3 = MagicMock() 188 | mock_context3.__aenter__ = AsyncMock(return_value=mock_blocks_response) 189 | mock_context3.__aexit__ = AsyncMock(return_value=None) 190 | 191 | mock_aiohttp_session._session_instance.post.side_effect = [ 192 | mock_context1, 193 | mock_context2, 194 | mock_context3, 195 | ] 196 | 197 | result = await get_linked_flashcards("test page") 198 | 199 | assert len(result) == 1 200 | assert "🎯 **LINKED FLASHCARDS ANALYSIS**" in result[0].text 201 | assert "What is the capital of France?" in result[0].text 202 | assert "+ [ ] Paris" in result[0].text 203 | 204 | @pytest.mark.asyncio 205 | async def test_get_linked_flashcards_with_linked_pages( 206 | self, mock_env_vars, mock_aiohttp_session 207 | ): 208 | """Test linked flashcards retrieval with linked pages.""" 209 | sample_linked_refs = [ 210 | [ 211 | {"id": 789, "name": "linked page", "originalName": "Linked Page"}, 212 | { 213 | "id": 790, 214 | "content": "Reference content", 215 | "uuid": "ref-uuid-790", 216 | }, 217 | ] 218 | ] 219 | 220 | sample_page_data = [ 221 | {"id": 123, "name": "test page", "originalName": "Test Page"}, 222 | {"id": 789, "name": "linked page", "originalName": "Linked Page"}, 223 | ] 224 | 225 | sample_blocks = [] 226 | 227 | # Setup mock responses for all API calls 228 | mock_links_response = MagicMock() 229 | mock_links_response.status = 200 230 | mock_links_response.json = AsyncMock(return_value=sample_linked_refs) 231 | 232 | mock_pages_response = MagicMock() 233 | mock_pages_response.status = 200 234 | mock_pages_response.json = AsyncMock(return_value=sample_page_data) 235 | 236 | mock_blocks_response = MagicMock() 237 | mock_blocks_response.status = 200 238 | mock_blocks_response.json = AsyncMock(return_value=sample_blocks) 239 | 240 | # Setup session mock 241 | mock_context1 = MagicMock() 242 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 243 | mock_context1.__aexit__ = AsyncMock(return_value=None) 244 | 245 | mock_context2 = MagicMock() 246 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 247 | mock_context2.__aexit__ = AsyncMock(return_value=None) 248 | 249 | mock_context3 = MagicMock() 250 | mock_context3.__aenter__ = AsyncMock(return_value=mock_blocks_response) 251 | mock_context3.__aexit__ = AsyncMock(return_value=None) 252 | 253 | mock_context4 = MagicMock() 254 | mock_context4.__aenter__ = AsyncMock(return_value=mock_blocks_response) 255 | mock_context4.__aexit__ = AsyncMock(return_value=None) 256 | 257 | mock_aiohttp_session._session_instance.post.side_effect = [ 258 | mock_context1, 259 | mock_context2, 260 | mock_context3, 261 | mock_context4, 262 | ] 263 | 264 | result = await get_linked_flashcards("test page") 265 | 266 | assert len(result) == 1 267 | assert ( 268 | "✅ No flashcards found in 'test page' or its linked pages" 269 | in result[0].text 270 | ) 271 | 272 | @pytest.mark.asyncio 273 | async def test_get_linked_flashcards_links_http_error( 274 | self, mock_env_vars, mock_aiohttp_session 275 | ): 276 | """Test linked flashcards retrieval with links HTTP error.""" 277 | # Setup mock response for links call 278 | mock_links_response = MagicMock() 279 | mock_links_response.status = 500 280 | 281 | # Setup session mock 282 | mock_context = MagicMock() 283 | mock_context.__aenter__ = AsyncMock(return_value=mock_links_response) 284 | mock_context.__aexit__ = AsyncMock(return_value=None) 285 | mock_aiohttp_session._session_instance.post.return_value = mock_context 286 | 287 | result = await get_linked_flashcards("Test Page") 288 | 289 | assert len(result) == 1 290 | assert "❌ Target page 'Test Page' not found" in result[0].text 291 | 292 | @pytest.mark.asyncio 293 | async def test_get_linked_flashcards_pages_http_error( 294 | self, mock_env_vars, mock_aiohttp_session 295 | ): 296 | """Test linked flashcards retrieval with pages HTTP error.""" 297 | # Setup mock responses 298 | mock_links_response = MagicMock() 299 | mock_links_response.status = 200 300 | mock_links_response.json = AsyncMock(return_value=[]) 301 | 302 | mock_pages_response = MagicMock() 303 | mock_pages_response.status = 500 304 | 305 | # Setup session mock 306 | mock_context1 = MagicMock() 307 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 308 | mock_context1.__aexit__ = AsyncMock(return_value=None) 309 | 310 | mock_context2 = MagicMock() 311 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 312 | mock_context2.__aexit__ = AsyncMock(return_value=None) 313 | 314 | mock_aiohttp_session._session_instance.post.side_effect = [ 315 | mock_context1, 316 | mock_context2, 317 | ] 318 | 319 | result = await get_linked_flashcards("Test Page") 320 | 321 | assert len(result) == 1 322 | assert "❌ Target page 'Test Page' not found" in result[0].text 323 | 324 | @pytest.mark.asyncio 325 | async def test_get_linked_flashcards_blocks_http_error( 326 | self, mock_env_vars, mock_aiohttp_session 327 | ): 328 | """Test linked flashcards retrieval with blocks HTTP error.""" 329 | sample_page_data = {"id": 123, "name": "test page", "originalName": "Test Page"} 330 | 331 | # Setup mock responses 332 | mock_links_response = MagicMock() 333 | mock_links_response.status = 200 334 | mock_links_response.json = AsyncMock(return_value=[]) 335 | 336 | mock_pages_response = MagicMock() 337 | mock_pages_response.status = 200 338 | mock_pages_response.json = AsyncMock(return_value=[sample_page_data]) 339 | 340 | mock_blocks_response = MagicMock() 341 | mock_blocks_response.status = 500 342 | 343 | # Setup session mock 344 | mock_context1 = MagicMock() 345 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 346 | mock_context1.__aexit__ = AsyncMock(return_value=None) 347 | 348 | mock_context2 = MagicMock() 349 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 350 | mock_context2.__aexit__ = AsyncMock(return_value=None) 351 | 352 | mock_context3 = MagicMock() 353 | mock_context3.__aenter__ = AsyncMock(return_value=mock_blocks_response) 354 | mock_context3.__aexit__ = AsyncMock(return_value=None) 355 | 356 | mock_aiohttp_session._session_instance.post.side_effect = [ 357 | mock_context1, 358 | mock_context2, 359 | mock_context3, 360 | ] 361 | 362 | result = await get_linked_flashcards("test page") 363 | 364 | assert len(result) == 1 365 | assert ( 366 | "✅ No flashcards found in 'test page' or its linked pages" 367 | in result[0].text 368 | ) 369 | 370 | @pytest.mark.asyncio 371 | async def test_get_linked_flashcards_exception( 372 | self, mock_env_vars, mock_aiohttp_session 373 | ): 374 | """Test linked flashcards retrieval with exception.""" 375 | # Setup session mock to raise exception 376 | mock_aiohttp_session._session_instance.post.side_effect = Exception( 377 | "Network error" 378 | ) 379 | 380 | result = await get_linked_flashcards("Test Page") 381 | 382 | assert len(result) == 1 383 | assert "❌ Error fetching linked flashcards: Network error" in result[0].text 384 | -------------------------------------------------------------------------------- /tests/test_get_page_links.py: -------------------------------------------------------------------------------- 1 | """Tests for get_page_links tool.""" 2 | 3 | from unittest.mock import AsyncMock, MagicMock 4 | 5 | import pytest 6 | 7 | from src.tools.get_page_links import get_page_links 8 | 9 | 10 | class TestGetPageLinks: 11 | """Test cases for get_page_links function.""" 12 | 13 | @pytest.mark.asyncio 14 | async def test_get_page_links_success( 15 | self, mock_env_vars, mock_aiohttp_session, sample_page_data 16 | ): 17 | """Test successful page links retrieval.""" 18 | # Setup mock responses for both API calls 19 | mock_links_response = MagicMock() 20 | mock_links_response.status = 200 21 | mock_links_response.json = AsyncMock(return_value=[sample_page_data]) 22 | 23 | mock_pages_response = MagicMock() 24 | mock_pages_response.status = 200 25 | mock_pages_response.json = AsyncMock(return_value=[sample_page_data]) 26 | 27 | # Setup session mock 28 | # Mock both post calls 29 | mock_context1 = MagicMock() 30 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 31 | mock_context1.__aexit__ = AsyncMock(return_value=None) 32 | 33 | mock_context2 = MagicMock() 34 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 35 | mock_context2.__aexit__ = AsyncMock(return_value=None) 36 | 37 | mock_aiohttp_session._session_instance.post.side_effect = [ 38 | mock_context1, 39 | mock_context2, 40 | ] 41 | 42 | result = await get_page_links("Test Page") 43 | 44 | assert len(result) == 1 45 | assert "🔗 **PAGE LINKS ANALYSIS**" in result[0].text 46 | assert "Test Page" in result[0].text 47 | 48 | @pytest.mark.asyncio 49 | async def test_get_page_links_with_multiple_references( 50 | self, mock_env_vars, mock_aiohttp_session 51 | ): 52 | """Test page links retrieval with multiple references.""" 53 | sample_links = [ 54 | [ 55 | {"id": 123, "name": "Page 1", "originalName": "Page 1"}, 56 | {"id": 456, "content": "Reference 1", "uuid": "ref-1"}, 57 | {"id": 457, "content": "Reference 2", "uuid": "ref-2"}, 58 | ], 59 | [ 60 | {"id": 789, "name": "Page 2", "originalName": "Page 2"}, 61 | {"id": 101, "content": "Reference 3", "uuid": "ref-3"}, 62 | ], 63 | ] 64 | 65 | sample_pages = [ 66 | { 67 | "id": 123, 68 | "name": "Page 1", 69 | "originalName": "Page 1", 70 | "createdAt": 1640995200000, # 2022-01-01 71 | "updatedAt": 1640995200000, 72 | "journal?": False, 73 | "uuid": "page-uuid-1", 74 | "properties": {"important": "yes"}, 75 | }, 76 | { 77 | "id": 789, 78 | "name": "Page 2", 79 | "originalName": "Page 2", 80 | "createdAt": 1640995200000, 81 | "updatedAt": 1640995200000, 82 | "journal?": True, 83 | "uuid": "page-uuid-2", 84 | "properties": {"tags": ["test"]}, 85 | }, 86 | ] 87 | 88 | # Setup mock responses for both API calls 89 | mock_links_response = MagicMock() 90 | mock_links_response.status = 200 91 | mock_links_response.json = AsyncMock(return_value=sample_links) 92 | 93 | mock_pages_response = MagicMock() 94 | mock_pages_response.status = 200 95 | mock_pages_response.json = AsyncMock(return_value=sample_pages) 96 | 97 | # Setup session mock 98 | mock_context1 = MagicMock() 99 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 100 | mock_context1.__aexit__ = AsyncMock(return_value=None) 101 | 102 | mock_context2 = MagicMock() 103 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 104 | mock_context2.__aexit__ = AsyncMock(return_value=None) 105 | 106 | mock_aiohttp_session._session_instance.post.side_effect = [ 107 | mock_context1, 108 | mock_context2, 109 | ] 110 | 111 | result = await get_page_links("Test Page") 112 | 113 | assert len(result) == 1 114 | assert "🔗 **PAGE LINKS ANALYSIS**" in result[0].text 115 | assert "Page 1" in result[0].text 116 | assert "Page 2" in result[0].text 117 | assert "📅" in result[0].text # Journal page emoji 118 | assert "📄" in result[0].text # Regular page emoji 119 | 120 | @pytest.mark.asyncio 121 | async def test_get_page_links_with_properties( 122 | self, mock_env_vars, mock_aiohttp_session 123 | ): 124 | """Test page links retrieval with page properties.""" 125 | sample_links = [ 126 | [ 127 | {"id": 123, "name": "Page 1", "originalName": "Page 1"}, 128 | {"id": 456, "content": "Reference 1", "uuid": "ref-1"}, 129 | ] 130 | ] 131 | 132 | sample_pages = [ 133 | { 134 | "id": 123, 135 | "name": "Page 1", 136 | "originalName": "Page 1", 137 | "createdAt": 1640995200000, 138 | "updatedAt": 1640995200000, 139 | "journal?": False, 140 | "uuid": "page-uuid-1", 141 | "properties": { 142 | "important": "yes", 143 | "tags": ["test", "example"], 144 | "priority": 1, 145 | "collapsed": True, # Should be filtered out 146 | }, 147 | } 148 | ] 149 | 150 | # Setup mock responses for both API calls 151 | mock_links_response = MagicMock() 152 | mock_links_response.status = 200 153 | mock_links_response.json = AsyncMock(return_value=sample_links) 154 | 155 | mock_pages_response = MagicMock() 156 | mock_pages_response.status = 200 157 | mock_pages_response.json = AsyncMock(return_value=sample_pages) 158 | 159 | # Setup session mock 160 | mock_context1 = MagicMock() 161 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 162 | mock_context1.__aexit__ = AsyncMock(return_value=None) 163 | 164 | mock_context2 = MagicMock() 165 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 166 | mock_context2.__aexit__ = AsyncMock(return_value=None) 167 | 168 | mock_aiohttp_session._session_instance.post.side_effect = [ 169 | mock_context1, 170 | mock_context2, 171 | ] 172 | 173 | result = await get_page_links("Test Page") 174 | 175 | assert len(result) == 1 176 | assert "🔗 **PAGE LINKS ANALYSIS**" in result[0].text 177 | assert "important: yes" in result[0].text 178 | assert "tags: test, example" in result[0].text 179 | assert "priority: 1" in result[0].text 180 | assert "collapsed" not in result[0].text 181 | 182 | @pytest.mark.asyncio 183 | async def test_get_page_links_with_timestamps( 184 | self, mock_env_vars, mock_aiohttp_session 185 | ): 186 | """Test page links retrieval with timestamp formatting.""" 187 | sample_links = [ 188 | [ 189 | {"id": 123, "name": "Page 1", "originalName": "Page 1"}, 190 | {"id": 456, "content": "Reference 1", "uuid": "ref-1"}, 191 | ] 192 | ] 193 | 194 | sample_pages = [ 195 | { 196 | "id": 123, 197 | "name": "Page 1", 198 | "originalName": "Page 1", 199 | "createdAt": 1640995200000, # 2022-01-01 00:00:00 200 | "updatedAt": 1640995200000, 201 | "journal?": False, 202 | "uuid": "page-uuid-1", 203 | "properties": {}, 204 | } 205 | ] 206 | 207 | # Setup mock responses for both API calls 208 | mock_links_response = MagicMock() 209 | mock_links_response.status = 200 210 | mock_links_response.json = AsyncMock(return_value=sample_links) 211 | 212 | mock_pages_response = MagicMock() 213 | mock_pages_response.status = 200 214 | mock_pages_response.json = AsyncMock(return_value=sample_pages) 215 | 216 | # Setup session mock 217 | mock_context1 = MagicMock() 218 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 219 | mock_context1.__aexit__ = AsyncMock(return_value=None) 220 | 221 | mock_context2 = MagicMock() 222 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 223 | mock_context2.__aexit__ = AsyncMock(return_value=None) 224 | 225 | mock_aiohttp_session._session_instance.post.side_effect = [ 226 | mock_context1, 227 | mock_context2, 228 | ] 229 | 230 | result = await get_page_links("Test Page") 231 | 232 | assert len(result) == 1 233 | assert "🔗 **PAGE LINKS ANALYSIS**" in result[0].text 234 | assert "Page 1" in result[0].text 235 | 236 | @pytest.mark.asyncio 237 | async def test_get_page_links_with_invalid_timestamps( 238 | self, mock_env_vars, mock_aiohttp_session 239 | ): 240 | """Test page links retrieval with invalid timestamps.""" 241 | sample_links = [ 242 | [ 243 | {"id": 123, "name": "Page 1", "originalName": "Page 1"}, 244 | {"id": 456, "content": "Reference 1", "uuid": "ref-1"}, 245 | ] 246 | ] 247 | 248 | sample_pages = [ 249 | { 250 | "id": 123, 251 | "name": "Page 1", 252 | "originalName": "Page 1", 253 | "createdAt": "invalid-timestamp", 254 | "updatedAt": None, 255 | "journal?": False, 256 | "uuid": "page-uuid-1", 257 | "properties": {}, 258 | } 259 | ] 260 | 261 | # Setup mock responses for both API calls 262 | mock_links_response = MagicMock() 263 | mock_links_response.status = 200 264 | mock_links_response.json = AsyncMock(return_value=sample_links) 265 | 266 | mock_pages_response = MagicMock() 267 | mock_pages_response.status = 200 268 | mock_pages_response.json = AsyncMock(return_value=sample_pages) 269 | 270 | # Setup session mock 271 | mock_context1 = MagicMock() 272 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 273 | mock_context1.__aexit__ = AsyncMock(return_value=None) 274 | 275 | mock_context2 = MagicMock() 276 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 277 | mock_context2.__aexit__ = AsyncMock(return_value=None) 278 | 279 | mock_aiohttp_session._session_instance.post.side_effect = [ 280 | mock_context1, 281 | mock_context2, 282 | ] 283 | 284 | result = await get_page_links("Test Page") 285 | 286 | assert len(result) == 1 287 | assert "🔗 **PAGE LINKS ANALYSIS**" in result[0].text 288 | assert "invalid-timestamp" in result[0].text 289 | assert "N/A" in result[0].text 290 | 291 | @pytest.mark.asyncio 292 | async def test_get_page_links_empty(self, mock_env_vars, mock_aiohttp_session): 293 | """Test page links retrieval with empty result.""" 294 | # Setup mock responses for both API calls 295 | mock_links_response = MagicMock() 296 | mock_links_response.status = 200 297 | mock_links_response.json = AsyncMock(return_value=[]) 298 | 299 | mock_pages_response = MagicMock() 300 | mock_pages_response.status = 200 301 | mock_pages_response.json = AsyncMock(return_value=[]) 302 | 303 | # Setup session mock 304 | mock_context1 = MagicMock() 305 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 306 | mock_context1.__aexit__ = AsyncMock(return_value=None) 307 | 308 | mock_context2 = MagicMock() 309 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 310 | mock_context2.__aexit__ = AsyncMock(return_value=None) 311 | 312 | mock_aiohttp_session._session_instance.post.side_effect = [ 313 | mock_context1, 314 | mock_context2, 315 | ] 316 | 317 | result = await get_page_links("Test Page") 318 | 319 | assert len(result) == 1 320 | assert "✅ No pages link to 'Test Page'" in result[0].text 321 | 322 | @pytest.mark.asyncio 323 | async def test_get_page_links_pages_http_error( 324 | self, mock_env_vars, mock_aiohttp_session 325 | ): 326 | """Test page links retrieval with pages HTTP error.""" 327 | sample_links = [ 328 | [ 329 | {"id": 123, "name": "Page 1", "originalName": "Page 1"}, 330 | {"id": 456, "content": "Reference 1", "uuid": "ref-1"}, 331 | ] 332 | ] 333 | 334 | # Setup mock responses 335 | mock_links_response = MagicMock() 336 | mock_links_response.status = 200 337 | mock_links_response.json = AsyncMock(return_value=sample_links) 338 | 339 | mock_pages_response = MagicMock() 340 | mock_pages_response.status = 500 341 | 342 | # Setup session mock 343 | mock_context1 = MagicMock() 344 | mock_context1.__aenter__ = AsyncMock(return_value=mock_links_response) 345 | mock_context1.__aexit__ = AsyncMock(return_value=None) 346 | 347 | mock_context2 = MagicMock() 348 | mock_context2.__aenter__ = AsyncMock(return_value=mock_pages_response) 349 | mock_context2.__aexit__ = AsyncMock(return_value=None) 350 | 351 | mock_aiohttp_session._session_instance.post.side_effect = [ 352 | mock_context1, 353 | mock_context2, 354 | ] 355 | 356 | result = await get_page_links("Test Page") 357 | 358 | assert len(result) == 1 359 | assert "🔗 **PAGE LINKS ANALYSIS**" in result[0].text 360 | assert "Page 1" in result[0].text 361 | 362 | @pytest.mark.asyncio 363 | async def test_get_page_links_http_error(self, mock_env_vars, mock_aiohttp_session): 364 | """Test page links retrieval with HTTP error.""" 365 | # Setup mock response 366 | mock_response = MagicMock() 367 | mock_response.status = 500 368 | 369 | # Setup session mock 370 | mock_context = MagicMock() 371 | mock_context.__aenter__ = AsyncMock(return_value=mock_response) 372 | mock_context.__aexit__ = AsyncMock(return_value=None) 373 | mock_aiohttp_session._session_instance.post.return_value = mock_context 374 | 375 | result = await get_page_links("Test Page") 376 | 377 | assert len(result) == 1 378 | assert "❌ Failed to fetch page links: HTTP 500" in result[0].text 379 | 380 | @pytest.mark.asyncio 381 | async def test_get_page_links_exception(self, mock_env_vars, mock_aiohttp_session): 382 | """Test page links retrieval with exception.""" 383 | # Setup session mock to raise exception 384 | mock_aiohttp_session._session_instance.post.side_effect = Exception( 385 | "Network error" 386 | ) 387 | 388 | result = await get_page_links("Test Page") 389 | 390 | assert len(result) == 1 391 | assert "❌ Error fetching page links: Network error" in result[0].text 392 | -------------------------------------------------------------------------------- /src/tools/get_linked_flashcards.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import Any, List 4 | 5 | import aiohttp 6 | from dotenv import load_dotenv 7 | from mcp.types import TextContent 8 | 9 | # Load environment variables from .env file in project root 10 | env_path = Path(__file__).parent.parent.parent / ".env" 11 | load_dotenv(env_path) 12 | 13 | 14 | async def get_linked_flashcards(page_identifier: str) -> List[TextContent]: 15 | """ 16 | Get flashcards from the specified page and all pages that link to it. 17 | 18 | Retrieves blocks from the target page and all linked pages, identifies 19 | flashcard blocks by #card tag, and extracts questions and answers with 20 | comprehensive metadata. 21 | 22 | Args: 23 | page_identifier: The name or UUID of the page to search flashcards from 24 | """ 25 | endpoint = os.getenv("LOGSEQ_API_ENDPOINT", "http://127.0.0.1:12315/api") 26 | token = os.getenv("LOGSEQ_API_TOKEN", "auth") 27 | 28 | headers = {"Authorization": f"Bearer {token}"} 29 | 30 | def extract_flashcard_content(content): 31 | """Extract question and answer parts from flashcard content""" 32 | if "#card" not in content: 33 | return None, None 34 | 35 | # Remove #card tag and clean up 36 | clean_content = content.replace("#card", "").strip() 37 | 38 | # Split by newlines to separate question from options 39 | lines = clean_content.split("\n") 40 | if not lines: 41 | return clean_content, None 42 | 43 | # First line is usually the question 44 | question = lines[0].strip() 45 | 46 | # Remaining lines might be multiple choice options 47 | options = [] 48 | for line in lines[1:]: 49 | line = line.strip() 50 | if line and ( 51 | line.startswith("+ [") 52 | or line.startswith("- [") 53 | or line.startswith(" + [") 54 | or line.startswith(" - [") 55 | ): 56 | options.append(line) 57 | 58 | # Combine question with options if present 59 | if options: 60 | full_question = question + "\n" + "\n".join(options) 61 | else: 62 | full_question = question 63 | 64 | return full_question, None 65 | 66 | def format_properties_display(props): 67 | """Format properties for display""" 68 | if not props: 69 | return "None" 70 | 71 | formatted_props = [] 72 | for property_name, value in props.items(): 73 | if ( 74 | property_name 75 | not in [ 76 | "collapsed", 77 | "card-last-interval", 78 | "card-repeats", 79 | "card-ease-factor", 80 | ] 81 | and value 82 | ): 83 | if isinstance(value, list): 84 | value_str = ", ".join(str(v) for v in value) 85 | else: 86 | value_str = str(value) 87 | formatted_props.append(f"{property_name}: {value_str}") 88 | 89 | return " | ".join(formatted_props[:3]) if formatted_props else "None" 90 | 91 | async def get_page_blocks(session, page_id): 92 | """Get blocks for a specific page""" 93 | payload = {"method": "logseq.Editor.getPageBlocksTree", "args": [page_id]} 94 | 95 | async with session.post(endpoint, json=payload, headers=headers) as response: 96 | if response.status == 200: 97 | return await response.json() or [] 98 | return [] 99 | 100 | async def get_block_by_uuid(session, block_uuid): 101 | """Get a specific block by UUID""" 102 | payload = {"method": "logseq.Editor.getBlock", "args": [block_uuid]} 103 | 104 | async with session.post(endpoint, json=payload, headers=headers) as response: 105 | if response.status == 200: 106 | return await response.json() 107 | return None 108 | 109 | def find_flashcards_in_blocks(blocks, page_info): 110 | """Recursively find flashcard blocks in a block tree""" 111 | flashcards = [] 112 | 113 | def search_blocks(block_list, page_data): 114 | for block in block_list: 115 | if isinstance(block, dict): 116 | content = block.get("content", "") 117 | 118 | # Check if this block is a flashcard 119 | if "#card" in content: 120 | flashcard_data = { 121 | "block_id": block.get("id"), 122 | "block_uuid": block.get("uuid"), 123 | "content": content, 124 | "properties": block.get("properties", {}), 125 | "children": block.get("children", []), 126 | "page": page_data, 127 | } 128 | flashcards.append(flashcard_data) 129 | 130 | # Recursively search children 131 | children = block.get("children", []) 132 | if children: 133 | search_blocks(children, page_data) 134 | 135 | search_blocks(blocks, page_info) 136 | return flashcards 137 | 138 | async with aiohttp.ClientSession() as session: 139 | try: 140 | # 1. Get page linked references to find all related pages 141 | links_payload = { 142 | "method": "logseq.Editor.getPageLinkedReferences", 143 | "args": [page_identifier], 144 | } 145 | 146 | async with session.post( 147 | endpoint, json=links_payload, headers=headers 148 | ) as response: 149 | linked_refs: list[dict[str, Any]] = [] 150 | if response.status == 200: 151 | try: 152 | linked_refs = await response.json() or [] 153 | except Exception: 154 | linked_refs = [] 155 | 156 | # 2. Get all pages for metadata 157 | all_pages_payload = {"method": "logseq.Editor.getAllPages"} 158 | 159 | async with session.post( 160 | endpoint, json=all_pages_payload, headers=headers 161 | ) as response: 162 | all_pages: list[dict[str, Any]] = [] 163 | if response.status == 200: 164 | try: 165 | all_pages = await response.json() or [] 166 | except Exception: 167 | all_pages = [] 168 | 169 | # 3. Create page lookup and collect page IDs to search 170 | page_lookup = {} 171 | for page in all_pages: 172 | if isinstance(page, dict): 173 | page_id = page.get("id") 174 | page_name = page.get("name", "").lower() 175 | page_original = page.get("originalName", "").lower() 176 | 177 | if page_id: 178 | page_lookup[page_id] = page 179 | page_lookup[page_name] = page 180 | page_lookup[page_original] = page 181 | 182 | # Find target page 183 | target_page = None 184 | search_identifier = page_identifier.lower() 185 | if search_identifier in page_lookup: 186 | target_page = page_lookup[search_identifier] 187 | 188 | if not target_page: 189 | return [ 190 | TextContent( 191 | type="text", 192 | text=f"❌ Target page '{page_identifier}' not found", 193 | ) 194 | ] 195 | 196 | # 4. Collect all pages to search (target + linked pages) 197 | pages_to_search = [target_page] 198 | 199 | # Add linked pages 200 | for link_group in linked_refs: 201 | if isinstance(link_group, list) and len(link_group) >= 1: 202 | page_ref = link_group[0] 203 | if isinstance(page_ref, dict): 204 | page_id = page_ref.get("id") 205 | if page_id and page_id in page_lookup: 206 | pages_to_search.append(page_lookup[page_id]) 207 | 208 | # 5. Get blocks from all pages and find flashcards 209 | all_flashcards = [] 210 | 211 | for page in pages_to_search: 212 | page_blocks = await get_page_blocks( 213 | session, page.get("name") or page.get("id") 214 | ) 215 | if page_blocks: 216 | page_flashcards = find_flashcards_in_blocks(page_blocks, page) 217 | all_flashcards.extend(page_flashcards) 218 | 219 | if not all_flashcards: 220 | return [ 221 | TextContent( 222 | type="text", 223 | text=f"✅ No flashcards found in '{page_identifier}' or its linked pages", 224 | ) 225 | ] 226 | 227 | # 6. Enrich flashcards with children (answers) 228 | enriched_flashcards = [] 229 | 230 | for flashcard in all_flashcards: 231 | # Extract question 232 | question, _ = extract_flashcard_content(flashcard["content"]) 233 | 234 | # Get children (answers) 235 | children = flashcard["children"] 236 | answers = [] 237 | 238 | for child in children: 239 | if ( 240 | isinstance(child, list) 241 | and len(child) >= 2 242 | and child[0] == "uuid" 243 | ): 244 | child_uuid = child[1] 245 | child_block = await get_block_by_uuid(session, child_uuid) 246 | if child_block: 247 | answer_content = child_block.get("content", "").strip() 248 | if answer_content: 249 | answers.append( 250 | { 251 | "content": answer_content, 252 | "block_id": child_block.get("id"), 253 | "block_uuid": child_block.get("uuid"), 254 | } 255 | ) 256 | 257 | enriched_flashcard = { 258 | "question": question, 259 | "answers": answers, 260 | "properties": flashcard["properties"], 261 | "block_id": flashcard["block_id"], 262 | "block_uuid": flashcard["block_uuid"], 263 | "page": { 264 | "name": flashcard["page"].get("originalName") 265 | or flashcard["page"].get("name"), 266 | "id": flashcard["page"].get("id"), 267 | "uuid": flashcard["page"].get("uuid"), 268 | }, 269 | } 270 | enriched_flashcards.append(enriched_flashcard) 271 | 272 | # 7. Sort flashcards by page name, then by block ID 273 | def sort_flashcards(flashcard): 274 | return (flashcard["page"]["name"] or "", flashcard["block_id"] or 0) 275 | 276 | # Sort using a different approach to avoid sort parameter 277 | flashcard_sorts = [ 278 | (sort_flashcards(flashcard), flashcard) 279 | for flashcard in enriched_flashcards 280 | ] 281 | flashcard_sorts.sort() 282 | enriched_flashcards = [flashcard for _, flashcard in flashcard_sorts] 283 | 284 | # 8. Build output 285 | target_page_name = target_page.get("originalName") or target_page.get( 286 | "name" 287 | ) 288 | 289 | output_lines = [ 290 | "🎯 **LINKED FLASHCARDS ANALYSIS**", 291 | f"📄 Target Page: {target_page_name}", 292 | f"🔗 Searched {len(pages_to_search)} pages (target + {len(pages_to_search) - 1} linked)", 293 | f"💡 Found {len(enriched_flashcards)} flashcards total", 294 | "", 295 | "🧠 **FLASHCARDS:**", 296 | "", 297 | ] 298 | 299 | # Group flashcards by page 300 | flashcards_by_page: dict[str, list[dict[str, Any]]] = {} 301 | for flashcard in enriched_flashcards: 302 | page_name = flashcard["page"]["name"] 303 | if page_name not in flashcards_by_page: 304 | flashcards_by_page[page_name] = [] 305 | flashcards_by_page[page_name].append(flashcard) 306 | 307 | for page_name, page_flashcards in flashcards_by_page.items(): 308 | output_lines.extend( 309 | [f"📚 **{page_name}** ({len(page_flashcards)} flashcards)", ""] 310 | ) 311 | 312 | for i, flashcard in enumerate(page_flashcards, 1): 313 | # Question 314 | output_lines.extend( 315 | [ 316 | f"💡 **Flashcard {i}**", 317 | f" 🔑 Block ID: {flashcard['block_id']} | UUID: {flashcard['block_uuid']}", 318 | f" 📄 Page: {flashcard['page']['name']} (ID: {flashcard['page']['id']})", 319 | ] 320 | ) 321 | 322 | # Properties 323 | props_display = format_properties_display(flashcard["properties"]) 324 | if props_display != "None": 325 | output_lines.append(f" ⚙️ Properties: {props_display}") 326 | 327 | # Question 328 | output_lines.extend( 329 | ["", " ❓ **QUESTION:**", f" {flashcard['question']}", ""] 330 | ) 331 | 332 | # Answers 333 | if flashcard["answers"]: 334 | output_lines.append(" 💡 **ANSWERS:**") 335 | for j, answer in enumerate(flashcard["answers"], 1): 336 | output_lines.extend( 337 | [ 338 | f" {j}. {answer['content']}", 339 | f" └─ Block ID: {answer['block_id']} | UUID: {answer['block_uuid']}", 340 | ] 341 | ) 342 | else: 343 | output_lines.append(" 💡 **ANSWERS:** No answer blocks found") 344 | 345 | output_lines.append("") 346 | 347 | # 9. Add summary 348 | total_answers = sum(len(f["answers"]) for f in enriched_flashcards) 349 | 350 | output_lines.extend( 351 | [ 352 | "📊 **SUMMARY:**", 353 | f"• Total flashcards: {len(enriched_flashcards)}", 354 | f"• Total answer blocks: {total_answers}", 355 | f"• Pages with flashcards: {len(flashcards_by_page)}", 356 | f"• Average answers per flashcard: {total_answers / len(enriched_flashcards):.1f}" 357 | if enriched_flashcards 358 | else "• Average answers per flashcard: 0", 359 | "", 360 | ] 361 | ) 362 | 363 | return [TextContent(type="text", text="\n".join(output_lines))] 364 | 365 | except Exception as e: 366 | return [ 367 | TextContent( 368 | type="text", text=f"❌ Error fetching linked flashcards: {str(e)}" 369 | ) 370 | ] 371 | --------------------------------------------------------------------------------