├── tests ├── tools │ ├── __init__.py │ ├── test_system_info.py │ ├── test_services.py │ └── test_processes.py ├── utils │ ├── __init__.py │ ├── test_enum.py │ ├── test_decorators.py │ ├── test_format.py │ └── test_validation.py ├── _vendor │ ├── __init__.py │ └── test_vendor.py ├── connection │ ├── __init__.py │ └── ssh │ │ ├── __init__.py │ │ ├── test_get_bin_path.py │ │ ├── test_execute_command.py │ │ ├── test_discover_ssh_key.py │ │ ├── test_host_key_verification.py │ │ ├── test_timeout.py │ │ └── test_connection_manager.py ├── test__main__.py ├── __init__.py ├── test_server.py ├── test_logging_config.py ├── conftest.py ├── test_config.py └── test_audit.py ├── src └── linux_mcp_server │ ├── connection │ └── __init__.py │ ├── __init__.py │ ├── utils │ ├── __init__.py │ ├── types.py │ ├── enum.py │ ├── format.py │ ├── validation.py │ └── decorators.py │ ├── server.py │ ├── __main__.py │ ├── config.py │ ├── tools │ ├── __init__.py │ ├── services.py │ ├── logs.py │ └── storage.py │ ├── _vendor │ └── __init__.py │ ├── logging_config.py │ └── audit.py ├── renovate.json ├── docs ├── examples │ ├── claude_desktop_config.example.json │ └── example_config.sh ├── Debugging.md ├── CONTRIBUTING.md └── Usage.md ├── .devcontainer ├── Containerfile └── devcontainer.json ├── AGENTS.md ├── .gitignore ├── .codecov.yml ├── Makefile ├── .tekton ├── push.yaml ├── pull_request.yaml └── task-get-version.yaml ├── .github └── workflows │ ├── build-publish.yml │ └── ci.yml ├── Containerfile ├── .coderabbit.yaml ├── pyproject.toml └── README.md /tests/tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/_vendor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/connection/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/connection/ssh/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/linux_mcp_server/connection/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "enabled": false 4 | } 5 | -------------------------------------------------------------------------------- /src/linux_mcp_server/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib.metadata 2 | 3 | import linux_mcp_server._vendor # noqa: F401 4 | 5 | from linux_mcp_server.config import CONFIG as CONFIG 6 | 7 | 8 | __version__ = importlib.metadata.version(__spec__.parent) 9 | -------------------------------------------------------------------------------- /src/linux_mcp_server/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from linux_mcp_server.utils.enum import StrEnum as StrEnum 2 | from linux_mcp_server.utils.format import format_bytes as format_bytes 3 | from linux_mcp_server.utils.format import is_ipv6_link_local as is_ipv6_link_local 4 | -------------------------------------------------------------------------------- /src/linux_mcp_server/utils/types.py: -------------------------------------------------------------------------------- 1 | import typing as t 2 | 3 | from pydantic import Field 4 | from pydantic import StringConstraints 5 | 6 | 7 | Host = t.Annotated[str, Field(description="Remote host to connect to")] 8 | UpperCase = t.Annotated[str, StringConstraints(to_upper=True)] 9 | -------------------------------------------------------------------------------- /src/linux_mcp_server/utils/enum.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | try: 5 | from enum import StrEnum # pyright: ignore[reportAttributeAccessIssue] 6 | except ImportError: 7 | StrEnum = None 8 | 9 | 10 | class StringEnum(str, Enum): 11 | def __str__(self): 12 | return self.value 13 | 14 | 15 | if StrEnum is None: 16 | StrEnum = StringEnum 17 | -------------------------------------------------------------------------------- /tests/test__main__.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from linux_mcp_server.__main__ import cli 4 | 5 | 6 | def test_cli(): 7 | with pytest.raises(SystemExit): 8 | cli() 9 | 10 | 11 | def test_cli_keyboard_interrupt(mocker): 12 | mocker.patch("linux_mcp_server.__main__.main", side_effect=KeyboardInterrupt) 13 | with pytest.raises(SystemExit): 14 | cli() 15 | -------------------------------------------------------------------------------- /src/linux_mcp_server/server.py: -------------------------------------------------------------------------------- 1 | """Core MCP server for Linux diagnostics using FastMCP.""" 2 | 3 | import logging 4 | 5 | from mcp.server.fastmcp import FastMCP 6 | 7 | 8 | logger = logging.getLogger("linux-mcp-server") 9 | 10 | 11 | # Initialize FastMCP server 12 | mcp = FastMCP("linux-diagnostics") 13 | 14 | from linux_mcp_server.tools import * # noqa: E402, F403 15 | 16 | 17 | def main(): 18 | mcp.run() 19 | -------------------------------------------------------------------------------- /docs/examples/claude_desktop_config.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "linux-diagnostics": { 4 | "command": "linux-mcp-server", 5 | "args": [], 6 | "env": { 7 | "LINUX_MCP_ALLOWED_LOG_PATHS": "/var/log/messages,/var/log/secure,/var/log/audit/audit.log", 8 | "LINUX_MCP_LOG_LEVEL": "INFO", 9 | "LINUX_MCP_SSH_KEY_PATH": "/home/user/.ssh/id_ed25519" 10 | } 11 | } 12 | } 13 | } 14 | 15 | -------------------------------------------------------------------------------- /.devcontainer/Containerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/fedora/fedora:43 2 | 3 | ENV UV_LINK_MODE=copy 4 | ENV UV_COMPILE_BYTECODE=1 5 | ENV SHELL=/usr/bin/fish 6 | 7 | RUN dnf install -y \ 8 | python3 \ 9 | python3-pip \ 10 | git \ 11 | make \ 12 | fish \ 13 | sudo \ 14 | && dnf clean all 15 | 16 | COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv 17 | 18 | # Create vscode user with sudo access and fish shell 19 | RUN useradd -m -s /usr/bin/fish vscode && \ 20 | echo "vscode ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/vscode 21 | 22 | WORKDIR /workspace 23 | -------------------------------------------------------------------------------- /tests/utils/test_enum.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from linux_mcp_server.utils.enum import StrEnum 4 | 5 | 6 | @pytest.fixture 7 | def color(): 8 | class Color(StrEnum): 9 | red = "red" 10 | blue = "blue" 11 | 12 | return Color 13 | 14 | 15 | def test_enum(color): 16 | assert color.red == "red" 17 | assert color.blue == "blue" 18 | assert color.red in "Little red riding hood" 19 | assert color.blue in "I'm blue, da-ba-dee, da-ba-di" 20 | 21 | 22 | def test_enum_str(color): 23 | assert str(color.red) == "red" 24 | assert str(color.blue) == "blue" 25 | -------------------------------------------------------------------------------- /AGENTS.md: -------------------------------------------------------------------------------- 1 | # Linux MCP Server 2 | 3 | ## Development Guidelines 4 | 5 | - Always run tests, linters, and type checkers before committing code with `make verify`. 6 | - Extend existing tests using parameterized tests rather than adding new test cases. 7 | - Use fixtures to deduplicate setup code across tests. 8 | - If a fixture could be used in multiple test modules, place it in `conftest.py`. 9 | - Use mocks sparingly and try to pass objects to the code under test instead. 10 | - Use `autospec=True` when patching to verify arguments match the real function signature. 11 | - Use `spec=` with MagicMock to restrict attributes to those of the real object. 12 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """Tests for Linux MCP Server.""" 2 | 3 | 4 | def verify_result_structure(result) -> str: 5 | """Verify standard MCP tool result structure and extract output text. 6 | 7 | All MCP tools return a tuple of (list[TextContent], dict). This function 8 | validates that structure and extracts the text content for assertions. 9 | 10 | Args: 11 | result: The result tuple from an MCP tool call 12 | 13 | Returns: 14 | The text content from the first item in the result list 15 | """ 16 | assert isinstance(result, tuple) 17 | assert len(result) == 2 18 | assert isinstance(result[0], list) 19 | return result[0][0].text 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | lib/ 14 | lib64/ 15 | parts/ 16 | sdist/ 17 | var/ 18 | wheels/ 19 | *.egg-info/ 20 | .installed.cfg 21 | *.egg 22 | 23 | # Vendored packages 24 | src/linux_mcp_server/_vendor/* 25 | !src/linux_mcp_server/_vendor/__init__.py 26 | 27 | # Virtual environments 28 | .venv/ 29 | venv/ 30 | ENV/ 31 | env/ 32 | 33 | # Testing 34 | .pytest_cache/ 35 | .coverage 36 | coverage/ 37 | htmlcov/ 38 | .tox/ 39 | 40 | # IDEs 41 | .vscode/ 42 | .idea/ 43 | *.swp 44 | *.swo 45 | *~ 46 | 47 | # OS 48 | .DS_Store 49 | Thumbs.db 50 | 51 | # uv 52 | .uv/ 53 | 54 | # Configuration files (user-specific) 55 | config.sh 56 | 57 | .claude/settings.local.json 58 | CLAUDE.local.md 59 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | after_n_builds: 6 4 | wait_for_ci: false 5 | 6 | require_ci_to_pass: false 7 | token: 74bc381b-9c5f-44c1-9efa-af57d5f3fe50 # notsecret # repo-scoped, upload-only, stability in fork PRs 8 | 9 | coverage: 10 | range: 40..100 11 | status: 12 | project: 13 | default: 14 | target: 60% 15 | threshold: 10% 16 | app: 17 | target: 60% 18 | threshold: 10% 19 | paths: 20 | - src/ 21 | tests: 22 | target: 100% 23 | paths: 24 | - tests/ 25 | 26 | patch: 27 | default: 28 | target: 100% 29 | threshold: 10% 30 | app: 31 | target: 100% 32 | threshold: 10% 33 | paths: 34 | - src/ 35 | tests: 36 | target: 100% 37 | paths: 38 | - tests/ 39 | -------------------------------------------------------------------------------- /src/linux_mcp_server/__main__.py: -------------------------------------------------------------------------------- 1 | """Main entry point for the Linux MCP Server.""" 2 | 3 | import logging 4 | import sys 5 | 6 | from linux_mcp_server import __version__ 7 | from linux_mcp_server.logging_config import setup_logging 8 | from linux_mcp_server.server import main 9 | 10 | 11 | def cli(): 12 | """Console script entry point for the Linux MCP Server.""" 13 | setup_logging() 14 | 15 | logger = logging.getLogger("linux-mcp-server") 16 | logger.info(f"Running Linux MCP Server {__version__}. Press Ctrl+C, Enter to stop the server.") 17 | 18 | try: 19 | # FastMCP.run() creates its own event loop, don't use asyncio.run() 20 | main() 21 | except KeyboardInterrupt: 22 | logger.info("Linux MCP Server stopped by user") 23 | sys.exit(0) 24 | except Exception as e: 25 | logger.critical(f"Fatal error in Linux MCP Server: {e}", exc_info=True) 26 | sys.exit(1) 27 | 28 | 29 | if __name__ == "__main__": 30 | cli() 31 | -------------------------------------------------------------------------------- /tests/connection/ssh/test_get_bin_path.py: -------------------------------------------------------------------------------- 1 | import asyncssh 2 | import pytest 3 | 4 | from linux_mcp_server.connection.ssh import get_bin_path 5 | from linux_mcp_server.connection.ssh import get_remote_bin_path 6 | 7 | 8 | def test_get_bin_path_not_found(mocker): 9 | mocker.patch("linux_mcp_server.connection.ssh.shutil.which", return_value=None) 10 | with pytest.raises(FileNotFoundError, match="Unable to find"): 11 | get_bin_path("/bin/ls") 12 | 13 | 14 | async def test_get_remote_bin_path_error(mocker): 15 | connection = mocker.Mock(asyncssh.SSHClientConnection, _username="testuser") 16 | connection.run = mocker.AsyncMock(side_effect=asyncssh.Error(1, "Raised intentionally")) 17 | 18 | with pytest.raises(ConnectionError, match="Raised intentionally"): 19 | await get_remote_bin_path("ls", "host", connection) 20 | 21 | 22 | async def test_get_remote_bin_not_found(mocker): 23 | connection = mocker.Mock(asyncssh.SSHClientConnection, _username="testuser") 24 | connection.run = mocker.AsyncMock(return_value=mocker.Mock(exit_status=0, stdout="", stderr="")) 25 | 26 | with pytest.raises(FileNotFoundError, match="Unable to find command"): 27 | await get_remote_bin_path("ls", "host", connection) 28 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help sync lint format types test ci verify fix clean 2 | 3 | # Default target 4 | help: 5 | @echo "🔧 CI Targets:" 6 | @echo " make ci - Run ALL CI checks (lint + format + types + test)" 7 | @echo " make verify - Sync dependencies + run all CI checks" 8 | @echo " make lint - Run ruff linter" 9 | @echo " make format - Check code formatting" 10 | @echo " make types - Run pyright type checker" 11 | @echo " make test - Run pytest with coverage" 12 | @echo "" 13 | @echo "🛠️ Development Targets:" 14 | @echo " make sync - Install/sync all dependencies" 15 | @echo " make fix - Auto-fix lint and format issues" 16 | @echo " make clean - Remove build artifacts and caches" 17 | 18 | sync: 19 | uv sync --locked 20 | 21 | lint: 22 | uv run --locked ruff check --diff 23 | 24 | format: 25 | uv run --locked ruff format --diff 26 | 27 | types: 28 | uv run --locked pyright 29 | 30 | test: 31 | uv run --locked pytest 32 | 33 | ci: lint format types test 34 | @echo "" 35 | @echo "✅ All CI checks passed!" 36 | 37 | verify: sync ci 38 | 39 | fix: 40 | uv run --locked ruff check --fix 41 | uv run --locked ruff format 42 | 43 | clean: 44 | rm -rf .pytest_cache .ruff_cache .pyright coverage dist build 45 | rm -rf src/*.egg-info 46 | find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true 47 | -------------------------------------------------------------------------------- /tests/connection/ssh/test_execute_command.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from linux_mcp_server.connection.ssh import execute_command 4 | from linux_mcp_server.connection.ssh import SSHConnectionManager 5 | 6 | 7 | @pytest.mark.parametrize( 8 | "command, kwargs, expected_rc, expected_out, expected_err", 9 | ( 10 | (["echo", "hello"], {}, 0, "hello", ""), 11 | (["false"], {}, 1, "", ""), 12 | (["bash", "-c", "echo error >&2"], {}, 0, "", "error"), 13 | (["echo", "test"], {"username": "someuser"}, 0, "test", ""), 14 | (["/bin/echo", "test"], {}, 0, "test", ""), 15 | ), 16 | ) 17 | async def test_execute_command_local(command, kwargs, expected_rc, expected_out, expected_err): 18 | """Test local command execution success.""" 19 | returncode, stdout, stderr = await execute_command(command, **kwargs) 20 | 21 | assert returncode == expected_rc 22 | assert expected_out in stdout 23 | assert expected_err in stderr 24 | 25 | 26 | async def test_execute_command_remote(mocker): 27 | """Test that remote execution routes through SSH.""" 28 | mock_manager = mocker.Mock(SSHConnectionManager) 29 | mock_manager.execute_remote = mocker.AsyncMock(return_value=(0, "output", "")) 30 | mocker.patch("linux_mcp_server.connection.ssh._connection_manager", mock_manager) 31 | 32 | returncode, stdout, stderr = await execute_command( 33 | ["ls", "-la"], 34 | host="remote.example.com", 35 | username="testuser", 36 | ) 37 | 38 | assert returncode == 0 39 | assert stdout == "output" 40 | assert mock_manager.execute_remote.call_count == 1 41 | -------------------------------------------------------------------------------- /src/linux_mcp_server/config.py: -------------------------------------------------------------------------------- 1 | """Settings for linux-mcp-server""" 2 | 3 | from pathlib import Path 4 | 5 | from pydantic_settings import BaseSettings 6 | from pydantic_settings import SettingsConfigDict 7 | 8 | from linux_mcp_server.utils.types import UpperCase 9 | 10 | 11 | class Config(BaseSettings): 12 | # The `_`` is required in the env_prefix, otherwise, pydantic would 13 | # interpret the prefix as `LINUX_MCPLOG_DIR`, instead of `LINUX_MCP_LOG_DIR` 14 | model_config = SettingsConfigDict(env_prefix="LINUX_MCP_", env_ignore_empty=True) 15 | 16 | user: str = "" 17 | 18 | # Logging configuration 19 | log_dir: Path = Path.home() / ".local" / "share" / "linux-mcp-server" / "logs" 20 | log_level: UpperCase = "INFO" 21 | log_retention_days: int = 10 22 | 23 | # Log file access control 24 | allowed_log_paths: str | None = None 25 | 26 | # SSH configuration 27 | ssh_key_path: Path | None = None 28 | key_passphrase: str | None = None 29 | search_for_ssh_key: bool = False 30 | 31 | # SSH host key verification (security) 32 | verify_host_keys: bool = False # NOTE(major): Switch to true later for production! 33 | known_hosts_path: Path | None = None # Custom path to known_hosts file 34 | 35 | # Command execution timeout (applies to remote SSH commands) 36 | command_timeout: int = 30 # Timeout in seconds; prevents hung SSH operations 37 | 38 | @property 39 | def effective_known_hosts_path(self) -> Path: 40 | """Return the known_hosts path, using default ~/.ssh/known_hosts if not configured.""" 41 | return self.known_hosts_path or Path.home() / ".ssh" / "known_hosts" 42 | 43 | 44 | CONFIG = Config() 45 | -------------------------------------------------------------------------------- /.tekton/push.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://gitlab.cee.redhat.com/rhel-lightspeed/mcp/linux-mcp-server/-/tree/{{ revision }} 6 | build.appstudio.redhat.com/commit_sha: '{{ revision }}' 7 | build.appstudio.redhat.com/target_branch: '{{ target_branch }}' 8 | pipelinesascode.tekton.dev/cancel-in-progress: "false" 9 | pipelinesascode.tekton.dev/max-keep-runs: "3" 10 | pipelinesascode.tekton.dev/on-cel-expression: >- 11 | event == "push" 12 | && ( 13 | target_branch == "main" 14 | || target_branch.startsWith("refs/tags/") 15 | ) 16 | 17 | creationTimestamp: null 18 | 19 | labels: 20 | appstudio.openshift.io/application: linux-mcp-server 21 | appstudio.openshift.io/component: linux-mcp-server 22 | pipelines.appstudio.openshift.io/type: build 23 | 24 | namespace: rhel-lightspeed-tenant 25 | name: linux-mcp-server-on-push 26 | 27 | spec: 28 | params: 29 | - name: git-url 30 | value: '{{ source_url }}' 31 | 32 | - name: revision 33 | value: '{{ revision }}' 34 | 35 | - name: output-image 36 | value: quay.io/redhat-user-workloads/rhel-lightspeed-tenant/linux-mcp-server:{{ revision }} 37 | 38 | - name: dockerfile 39 | value: /Containerfile 40 | 41 | - name: additional-tags 42 | value: 43 | - latest 44 | - '{{ target_branch }}' 45 | 46 | pipelineRef: 47 | name: pipeline-build-multiarch 48 | 49 | taskRunTemplate: 50 | serviceAccountName: build-pipeline-linux-mcp-server 51 | 52 | workspaces: 53 | - name: git-auth 54 | secret: 55 | secretName: '{{ git_auth_secret }}' 56 | -------------------------------------------------------------------------------- /tests/test_server.py: -------------------------------------------------------------------------------- 1 | """Tests for the core MCP server.""" 2 | 3 | from linux_mcp_server.server import mcp 4 | 5 | 6 | class TestLinuxMCPServer: 7 | """Test the core server functionality.""" 8 | 9 | def test_server_initialization(self): 10 | """Test that the FastMCP server is initialized.""" 11 | assert mcp is not None 12 | assert mcp.name == "linux-diagnostics" 13 | 14 | def test_server_has_list_tools_method(self): 15 | """Test that the server has list_tools method.""" 16 | assert hasattr(mcp, "list_tools") 17 | assert callable(mcp.list_tools) 18 | 19 | async def test_list_tools_returns_tools(self): 20 | """Test that list_tools returns a list of tools.""" 21 | tools = await mcp.list_tools() 22 | 23 | # Should return a list 24 | assert isinstance(tools, list) 25 | assert len(tools) > 0 26 | 27 | async def test_server_has_basic_tools(self): 28 | """Test that basic diagnostic tools are registered.""" 29 | tools = await mcp.list_tools() 30 | tool_names = [tool.name for tool in tools] 31 | 32 | # Should have at least the basic tools 33 | assert "get_system_information" in tool_names 34 | assert "list_services" in tool_names 35 | assert "list_processes" in tool_names 36 | assert "get_network_interfaces" in tool_names 37 | assert "list_block_devices" in tool_names 38 | assert "list_directories" in tool_names 39 | assert "list_files" in tool_names 40 | 41 | async def test_all_tools_have_correct_count(self): 42 | """Test that all 18 tools are registered.""" 43 | tools = await mcp.list_tools() 44 | assert len(tools) == 20 45 | -------------------------------------------------------------------------------- /.tekton/pull_request.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://gitlab.cee.redhat.com/rhel-lightspeed/mcp/linux-mcp-server/-/tree/{{ revision }} 6 | build.appstudio.redhat.com/commit_sha: '{{ revision }}' 7 | build.appstudio.redhat.com/pull_request_number: '{{ pull_request_number }}' 8 | build.appstudio.redhat.com/target_branch: '{{ target_branch }}' 9 | pipelinesascode.tekton.dev/cancel-in-progress: "true" 10 | pipelinesascode.tekton.dev/max-keep-runs: "3" 11 | pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch == "main" 12 | 13 | creationTimestamp: null 14 | 15 | labels: 16 | appstudio.openshift.io/application: linux-mcp-server 17 | appstudio.openshift.io/component: linux-mcp-server 18 | pipelines.appstudio.openshift.io/type: build 19 | 20 | namespace: rhel-lightspeed-tenant 21 | name: linux-mcp-server-pr 22 | 23 | spec: 24 | params: 25 | - name: git-url 26 | value: '{{ source_url }}' 27 | 28 | - name: revision 29 | value: '{{ revision }}' 30 | 31 | - name: output-image 32 | value: quay.io/redhat-user-workloads/rhel-lightspeed-tenant/linux-mcp-server:pr-{{ pull_request_number }}-latest 33 | 34 | - name: dockerfile 35 | value: /Containerfile 36 | 37 | - name: image-expires-after 38 | value: 5d 39 | 40 | - name: tag-prefix 41 | value: pr-{{ pull_request_number }}- 42 | 43 | pipelineRef: 44 | name: pipeline-build-multiarch 45 | 46 | taskRunTemplate: 47 | serviceAccountName: build-pipeline-linux-mcp-server 48 | 49 | workspaces: 50 | - name: git-auth 51 | secret: 52 | secretName: '{{ git_auth_secret }}' 53 | -------------------------------------------------------------------------------- /.tekton/task-get-version.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: Task 3 | metadata: 4 | namespace: rhel-lightspeed-tenant 5 | name: get-version 6 | 7 | spec: 8 | description: Get version number 9 | params: 10 | - name: source_artifact 11 | description: The Trusted Artifact URI pointing to the artifact with the application source code. 12 | type: string 13 | 14 | results: 15 | - name: pseudo_version 16 | description: Version for use by setuptools-scm 17 | 18 | - name: version 19 | description: Version that is also a valid container tag 20 | 21 | volumes: 22 | - name: workdir 23 | emptyDir: {} 24 | 25 | stepTemplate: 26 | volumeMounts: 27 | - name: workdir 28 | mountPath: /var/workdir 29 | 30 | steps: 31 | - name: use-trusted-artifact 32 | image: quay.io/redhat-appstudio/build-trusted-artifacts:latest@sha256:9b180776a41d9a22a1c51539f1647c60defbbd55b44bbebdd4130e33512d8b0d 33 | args: 34 | - use 35 | - $(params.source_artifact)=/var/workdir/source 36 | 37 | - name: determine-image-tag 38 | image: registry.redhat.io/ubi10/s2i-base:latest 39 | workingDir: /var/workdir/source 40 | script: | 41 | #!/usr/bin/env bash 42 | set -euo pipefail 43 | 44 | python3 -V 45 | env | sort 46 | whoami 47 | pwd 48 | ls -la 49 | git status 50 | git tag 51 | 52 | python3 -m venv .venvs/hatch 53 | 54 | export PATH=".venvs/hatch/bin:$PATH" 55 | pip install hatch 56 | 57 | pseudo_version="$(hatch version)" 58 | version="${pseudo_version%%+*}" 59 | 60 | echo -n "$pseudo_version" | tee "$(results.pseudo_version.path)" 61 | echo -n "$version" | tee "$(results.version.path)" 62 | -------------------------------------------------------------------------------- /tests/_vendor/test_vendor.py: -------------------------------------------------------------------------------- 1 | import pkgutil 2 | import sys 3 | 4 | from pathlib import Path 5 | 6 | import pytest 7 | 8 | 9 | class ModuleInfo: 10 | def __init__(self, name): 11 | self.name = name 12 | 13 | 14 | @pytest.fixture 15 | def reset_vendor(): 16 | import linux_mcp_server 17 | 18 | vendor_path = str(Path(linux_mcp_server.__file__).parent / "_vendor") 19 | [sys.path.remove(path) for path in sys.path if path == vendor_path] 20 | [sys.modules.pop(package, None) for package in ["linux_mcp_server._vendor", "linux_mcp_server"]] 21 | 22 | 23 | def test_package_masking(): 24 | from linux_mcp_server import _vendor 25 | 26 | assert getattr(_vendor, "__path__") == [] 27 | 28 | 29 | def test_vendored(reset_vendor, mocker): 30 | mocker.patch.object(pkgutil, "iter_modules", return_value=[ModuleInfo(name="nopers")]) 31 | 32 | previous_path = list(sys.path) 33 | import linux_mcp_server 34 | 35 | vendor_path = str(Path(linux_mcp_server.__file__).parent / "_vendor") 36 | new_path = list(sys.path) 37 | 38 | assert new_path[0] == vendor_path 39 | assert new_path[1:] == previous_path 40 | 41 | 42 | def test_vendored_warning(reset_vendor, mocker): 43 | mocker.patch.object(pkgutil, "iter_modules", return_value=[ModuleInfo(name="sys"), ModuleInfo(name="pkgutil")]) 44 | 45 | previous_path = list(sys.path) 46 | import linux_mcp_server 47 | 48 | vendor_path = str(Path(linux_mcp_server.__file__).parent / "_vendor") 49 | new_path = list(sys.path) 50 | 51 | with pytest.warns(UserWarning) as warn: 52 | linux_mcp_server._vendor._vendor_paths() # pyright: ignore[reportAttributeAccessIssue] 53 | 54 | assert new_path[0] == vendor_path 55 | assert new_path[1:] == previous_path 56 | assert any(["pkgutil, sys" in str(w.message) for w in warn]) 57 | -------------------------------------------------------------------------------- /src/linux_mcp_server/utils/format.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | 3 | 4 | def is_ipv6_link_local(address: str) -> bool: 5 | """Check if an IPv6 address is link-local (fe80::/10). 6 | 7 | Link-local addresses are only valid on a single network segment and 8 | require a scope identifier to route. They're not useful for enterprise 9 | cross-machine communication. 10 | 11 | Args: 12 | address: IPv6 address string, possibly with scope identifier (e.g., "fe80::1%eth0") 13 | 14 | Returns: 15 | True if the address is link-local, False otherwise 16 | 17 | Examples: 18 | >>> is_ipv6_link_local("fe80::1") 19 | True 20 | >>> is_ipv6_link_local("fe80::1%eth0") 21 | True 22 | >>> is_ipv6_link_local("2001:db8::1") 23 | False 24 | >>> is_ipv6_link_local("192.168.1.1") 25 | False 26 | """ 27 | try: 28 | # Parse and check if it's link-local 29 | addr = ipaddress.IPv6Address(address) 30 | return addr.is_link_local 31 | except (ValueError, ipaddress.AddressValueError): 32 | # Not a valid IPv6 address (could be IPv4, empty, or malformed) 33 | return False 34 | 35 | 36 | def format_bytes(bytes_value: int | float) -> str: 37 | """ 38 | Format bytes into human-readable format. 39 | 40 | Args: 41 | bytes_value: Number of bytes to format 42 | 43 | Returns: 44 | Human-readable string representation (e.g., "1.5GB", "256.0MB") 45 | 46 | Examples: 47 | >>> format_bytes(1024) 48 | '1.0KB' 49 | >>> format_bytes(1536) 50 | '1.5KB' 51 | >>> format_bytes(1073741824) 52 | '1.0GB' 53 | """ 54 | value = float(bytes_value) 55 | for unit in ["B", "KB", "MB", "GB", "TB"]: 56 | if value < 1024.0: 57 | return f"{value:.1f}{unit}" 58 | 59 | value /= 1024.0 60 | 61 | return f"{value:.1f}PB" 62 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/devcontainers/spec/refs/heads/main/schemas/devContainer.schema.json", 3 | "name": "linux-mcp-server", 4 | "build": { 5 | "dockerfile": "Containerfile", 6 | "context": "." 7 | }, 8 | // Podman-specific: keep UID/GID mapping for rootless containers 9 | "runArgs": [ 10 | "--userns=keep-id", 11 | "--security-opt=label=disable" 12 | ], 13 | "containerUser": "vscode", 14 | "workspaceMount": "source=${localWorkspaceFolder},target=/workspace,type=bind,Z", 15 | "workspaceFolder": "/workspace", 16 | "postCreateCommand": "make sync", 17 | "customizations": { 18 | "vscode": { 19 | "extensions": [ 20 | "charliermarsh.ruff", 21 | "github.vscode-github-actions", 22 | "github.vscode-pull-request-github", 23 | "ms-python.debugpy", 24 | "ms-python.python", 25 | "ms-python.vscode-pylance" 26 | ], 27 | "settings": { 28 | "python.defaultInterpreterPath": "/workspace/.venv/bin/python", 29 | "python.terminal.activateEnvironment": true, 30 | "editor.formatOnSave": true, 31 | "terminal.integrated.defaultProfile.linux": "fish", 32 | "terminal.integrated.profiles.linux": { 33 | "fish": { 34 | "path": "/usr/bin/fish" 35 | } 36 | }, 37 | "[python]": { 38 | "editor.defaultFormatter": "charliermarsh.ruff", 39 | "editor.codeActionsOnSave": { 40 | "source.fixAll": "explicit", 41 | "source.organizeImports": "explicit" 42 | } 43 | } 44 | } 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /.github/workflows/build-publish.yml: -------------------------------------------------------------------------------- 1 | name: Build and publish 2 | on: 3 | push: 4 | tags: 5 | - '*' 6 | 7 | permissions: 8 | contents: read 9 | 10 | jobs: 11 | build: 12 | name: Build sdist and wheel 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v5.0.0 18 | with: 19 | fetch-depth: 0 20 | 21 | - name: Show environment 22 | run: | 23 | set -x 24 | python -VV 25 | python -m site 26 | ls -ld $(python -c "import site; print(site.getsitepackages()[0])") 27 | git tag --list --sort=-refname --format='%(refname:short) %(objectname:short)' 28 | whoami 29 | 30 | - name: Install build tools 31 | run: | 32 | python -m pip install --upgrade pip 33 | python -m pip install build 34 | 35 | - name: Build 36 | run: python -m build --sdist --wheel 37 | 38 | - name: Upload artifacts 39 | uses: actions/upload-artifact@v4.6.2 40 | with: 41 | name: artifacts 42 | path: dist 43 | if-no-files-found: error 44 | 45 | publish: 46 | name: Publish to PyPI 47 | runs-on: ubuntu-latest 48 | needs: 49 | - build 50 | 51 | environment: 52 | name: pypi 53 | url: https://pypi.org/p/linux-mcp-server 54 | 55 | permissions: 56 | id-token: write 57 | 58 | steps: 59 | - name: Download artifacts 60 | uses: actions/download-artifact@v5.0.0 61 | with: 62 | name: artifacts 63 | path: dist 64 | 65 | - name: Show environment 66 | run: | 67 | set -x 68 | python -VV 69 | python -m site 70 | ls -ld $(python -c "import site; print(site.getsitepackages()[0])") 71 | ls -l dist/ 72 | whoami 73 | 74 | - name: Publish to PyPI 75 | uses: pypa/gh-action-pypi-publish@v1.13.0 76 | -------------------------------------------------------------------------------- /src/linux_mcp_server/tools/__init__.py: -------------------------------------------------------------------------------- 1 | # logs 2 | from linux_mcp_server.tools.logs import get_audit_logs 3 | from linux_mcp_server.tools.logs import get_journal_logs 4 | from linux_mcp_server.tools.logs import read_log_file 5 | 6 | # network 7 | from linux_mcp_server.tools.network import get_listening_ports 8 | from linux_mcp_server.tools.network import get_network_connections 9 | from linux_mcp_server.tools.network import get_network_interfaces 10 | 11 | # processes 12 | from linux_mcp_server.tools.processes import get_process_info 13 | from linux_mcp_server.tools.processes import list_processes 14 | 15 | # services 16 | from linux_mcp_server.tools.services import get_service_logs 17 | from linux_mcp_server.tools.services import get_service_status 18 | from linux_mcp_server.tools.services import list_services 19 | 20 | # storage 21 | from linux_mcp_server.tools.storage import list_block_devices 22 | from linux_mcp_server.tools.storage import list_directories 23 | from linux_mcp_server.tools.storage import read_file 24 | 25 | # system_info 26 | from linux_mcp_server.tools.system_info import get_cpu_information 27 | from linux_mcp_server.tools.system_info import get_disk_usage 28 | from linux_mcp_server.tools.system_info import get_hardware_information 29 | from linux_mcp_server.tools.system_info import get_memory_information 30 | from linux_mcp_server.tools.system_info import get_system_information 31 | 32 | 33 | __all__ = [ 34 | "get_audit_logs", 35 | "get_cpu_information", 36 | "get_disk_usage", 37 | "get_hardware_information", 38 | "get_journal_logs", 39 | "get_listening_ports", 40 | "get_memory_information", 41 | "get_network_connections", 42 | "get_network_interfaces", 43 | "get_process_info", 44 | "get_service_logs", 45 | "get_service_status", 46 | "get_system_information", 47 | "list_block_devices", 48 | "list_directories", 49 | "list_processes", 50 | "list_services", 51 | "read_file", 52 | "read_log_file", 53 | ] 54 | -------------------------------------------------------------------------------- /src/linux_mcp_server/utils/validation.py: -------------------------------------------------------------------------------- 1 | """Input validation utilities for MCP tools. 2 | 3 | Provides validation functions for handling numeric parameters where LLMs often 4 | pass floats instead of integers. 5 | """ 6 | 7 | 8 | def validate_positive_int( 9 | value: int | float, 10 | param_name: str = "parameter", 11 | min_value: int = 1, 12 | max_value: int | None = None, 13 | ) -> tuple[int | None, str | None]: 14 | """ 15 | Validate and normalize a numeric value to a positive integer. 16 | 17 | Accepts both int and float (LLMs often pass floats) and truncates to int. 18 | Validates bounds and caps at max_value if specified. 19 | 20 | Returns: 21 | (validated_int, error_message) tuple. On success: (int_value, None). 22 | On failure: (None, error_msg). 23 | """ 24 | if not isinstance(value, (int, float)): 25 | return None, f"Error: {param_name} must be a number" 26 | 27 | int_value = int(value) 28 | 29 | if int_value < min_value: 30 | return None, f"Error: {param_name} must be at least {min_value}" 31 | 32 | if max_value is not None and int_value > max_value: 33 | int_value = max_value 34 | 35 | return int_value, None 36 | 37 | 38 | def validate_pid(pid: int | float) -> tuple[int | None, str | None]: 39 | """Validate a process ID (PID). Accepts floats from LLMs and truncates to int.""" 40 | return validate_positive_int(pid, param_name="PID", min_value=1) 41 | 42 | 43 | def validate_line_count( 44 | lines: int | float, 45 | default: int = 100, 46 | max_lines: int = 10000, 47 | ) -> tuple[int, str | None]: 48 | """ 49 | Validate line count for log reading functions. 50 | 51 | Accepts floats from LLMs, truncates to int, caps at max_lines. 52 | Returns default value if validation fails. 53 | """ 54 | validated, error = validate_positive_int(lines, "lines", 1, max_lines) 55 | if error or validated is None: 56 | return (default, error) 57 | return (validated, None) 58 | -------------------------------------------------------------------------------- /src/linux_mcp_server/utils/decorators.py: -------------------------------------------------------------------------------- 1 | """Decorators for tool functions.""" 2 | 3 | import functools 4 | import inspect 5 | import os 6 | 7 | from mcp.server.fastmcp.exceptions import ToolError 8 | 9 | 10 | CONTAINER_ENV_VARS = [ 11 | "openvz", 12 | "lxc", 13 | "lxc-libvirt", 14 | "systemd-nspawn", 15 | "docker", 16 | "podman", 17 | "rkt", 18 | "wsl", 19 | "proot", 20 | "pouch", 21 | ] 22 | 23 | 24 | def disallow_local_execution_in_containers(func): 25 | """ 26 | Decorator that raises a ToolError if local execution is attempted in a container. 27 | 28 | This decorator checks if: 29 | 1. The 'host' parameter is None (indicating local execution) 30 | 2. The process is running in a container (via the 'container' environment variable) 31 | 32 | If both conditions are true, it raises a ToolError. 33 | 34 | Args: 35 | func: The function to decorate (must have a 'host' parameter) 36 | 37 | Returns: 38 | The wrapped function 39 | 40 | Raises: 41 | ToolError: If local execution is attempted in a container 42 | """ 43 | 44 | @functools.wraps(func) 45 | async def wrapper(*args, **kwargs): 46 | # Get the function signature to find the 'host' parameter 47 | sig = inspect.signature(func) 48 | bound_args = sig.bind_partial(*args, **kwargs) 49 | bound_args.apply_defaults() 50 | 51 | # Check if 'host' parameter exists and is None 52 | host_value = bound_args.arguments.get("host") 53 | 54 | # Check if running in a container and host is None (local execution) 55 | if host_value is None and os.getenv("container") in CONTAINER_ENV_VARS: 56 | raise ToolError( 57 | "Local execution is not allowed when running in a container. " 58 | "Please specify a 'host' parameter to execute remotely via SSH." 59 | ) 60 | 61 | # Call the original function 62 | return await func(*args, **kwargs) 63 | 64 | return wrapper 65 | -------------------------------------------------------------------------------- /src/linux_mcp_server/_vendor/__init__.py: -------------------------------------------------------------------------------- 1 | # (c) 2020 Ansible Project 2 | # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 3 | 4 | import pathlib 5 | import pkgutil 6 | import sys 7 | import warnings 8 | 9 | 10 | """ 11 | This package exists to allow downstream packagers to transparently vendor Python 12 | packages. It is called very early on to ensure that packages are available. 13 | 14 | Packages should be vendored only when necessary. When available, packages from 15 | the system package manager should be used. 16 | 17 | A warning will be displayed in the event that a module from this package is 18 | already loaded. That should be a rare, if ever, occuerence due to the natue 19 | of this application. 20 | 21 | Packages in this directory are added to the beginning of sys.path. They will 22 | take precedent over any other packages. 23 | 24 | Install packages here during downstream packaging using a command such as: 25 | 26 | pip install --upgrade --target [path to this directory] fastmcp 27 | """ 28 | 29 | 30 | # Mask modules below this path so they cannot be accessed directly 31 | __path__ = [] 32 | 33 | 34 | def _vendor_paths() -> None: 35 | # List all the module names in this directory 36 | vendored_path = str(pathlib.Path(__file__).parent) 37 | vendored_module_names = {module.name for module in pkgutil.iter_modules([vendored_path])} 38 | 39 | if vendored_module_names: 40 | if vendored_path in sys.path: 41 | # If the module path was already loaded, remove it to ensure it is 42 | # at the beginning of sys.path. 43 | sys.path.remove(vendored_path) 44 | 45 | # Add the module path to the beginning of sys.path so that vendored 46 | # modules are prioritized over any others. 47 | sys.path.insert(0, vendored_path) 48 | 49 | if already_loaded_modules := vendored_module_names.intersection(sys.modules): 50 | warnings.warn(f"Detected already loaded module(s): {', '.join(sorted(already_loaded_modules))}") 51 | 52 | 53 | _vendor_paths() 54 | -------------------------------------------------------------------------------- /tests/connection/ssh/test_discover_ssh_key.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from linux_mcp_server.connection.ssh import discover_ssh_key 4 | 5 | 6 | @pytest.fixture 7 | def ssh_dir(tmp_path): 8 | ssh_dir = tmp_path / ".ssh" 9 | ssh_dir.mkdir() 10 | 11 | return ssh_dir 12 | 13 | 14 | @pytest.fixture 15 | def ssh_rsa(ssh_dir): 16 | id_rsa = ssh_dir / "id_rsa" 17 | id_rsa.touch() 18 | 19 | return id_rsa 20 | 21 | 22 | @pytest.fixture 23 | def ssh_ed25519(ssh_dir): 24 | id_ed25519 = ssh_dir / "id_ed25519" 25 | id_ed25519.touch() 26 | 27 | return id_ed25519 28 | 29 | 30 | @pytest.fixture 31 | def config_search(tmp_path, mocker): 32 | mocker.patch("pathlib.Path.home", return_value=tmp_path) 33 | mocker.patch("linux_mcp_server.connection.ssh.CONFIG.ssh_key_path", None) 34 | mocker.patch("linux_mcp_server.connection.ssh.CONFIG.search_for_ssh_key", True) 35 | 36 | 37 | @pytest.fixture 38 | def config_specify_key(ssh_ed25519, mocker): 39 | mocker.patch("pathlib.Path.home", return_value=ssh_ed25519.parent) 40 | mocker.patch("linux_mcp_server.connection.ssh.CONFIG.ssh_key_path", ssh_ed25519) 41 | 42 | 43 | def test_discover_ssh_key_env_var_not_exists(tmp_path, mocker): 44 | """Test SSH key discovery with non-existent env var path.""" 45 | key_path = tmp_path / "nonexistent_key" 46 | 47 | mocker.patch("linux_mcp_server.connection.ssh.CONFIG.ssh_key_path", key_path) 48 | 49 | result = discover_ssh_key() 50 | 51 | assert result is None 52 | 53 | 54 | def test_discover_ssh_key_prefers_ed25519(tmp_path, ssh_rsa, ssh_ed25519, config_search): 55 | """Test SSH key discovery prefers ed25519 over rsa when both keys exist.""" 56 | result = discover_ssh_key() 57 | 58 | assert result == str(ssh_ed25519) 59 | 60 | 61 | def test_discover_ssh_key_no_keys_found(tmp_path, ssh_dir, config_search): 62 | """Test SSH key discovery when no keys exist.""" 63 | result = discover_ssh_key() 64 | 65 | assert result is None 66 | 67 | 68 | def test_discover_ssh_specify_key_path(tmp_path, ssh_ed25519, config_specify_key): 69 | """Test SSH key discovery whene the path to the key is specified in config""" 70 | result = discover_ssh_key() 71 | 72 | assert result == str(ssh_ed25519) 73 | -------------------------------------------------------------------------------- /tests/utils/test_decorators.py: -------------------------------------------------------------------------------- 1 | """Tests for decorator utilities.""" 2 | 3 | import pytest 4 | 5 | from mcp.server.fastmcp.exceptions import ToolError 6 | 7 | from linux_mcp_server.utils.decorators import disallow_local_execution_in_containers 8 | 9 | 10 | class TestDisallowLocalExecutionInContainers: 11 | """Test disallow_local_execution_in_containers decorator.""" 12 | 13 | async def test_allows_execution_when_conditions_met(self, monkeypatch): 14 | """Test that execution is allowed when host is provided or not in a container.""" 15 | 16 | @disallow_local_execution_in_containers 17 | async def test_func(host=None, username=None): 18 | return "success" 19 | 20 | # Test 1: Execution allowed when host is provided 21 | result = await test_func(host="remote.example.com", username="user") 22 | assert result == "success" 23 | 24 | # Test 2: Local execution allowed when not running in a container 25 | monkeypatch.delenv("container", raising=False) 26 | result = await test_func(host=None, username="user") 27 | assert result == "success" 28 | 29 | # Empty value for 'container' env var does not trigger the check 30 | monkeypatch.setenv("container", "") 31 | result = await test_func(host=None, username="user") 32 | assert result == "success" 33 | 34 | @pytest.mark.parametrize( 35 | "container_value", 36 | ["openvz", "lxc", "lxc-libvirt", "systemd-nspawn", "docker", "podman", "rkt", "wsl", "proot", "pouch"], 37 | ) 38 | async def test_raises_error_for_local_execution_in_container(self, monkeypatch, container_value): 39 | """Test that ToolError is raised when attempting local execution in a container.""" 40 | 41 | @disallow_local_execution_in_containers 42 | async def test_func(host=None, username=None): 43 | return "success" 44 | 45 | # Simulate running in a container 46 | monkeypatch.setenv("container", container_value) 47 | with pytest.raises(ToolError) as exc_info: 48 | await test_func(host=None, username="user") 49 | 50 | assert "Local execution is not allowed" in str(exc_info.value) 51 | assert "container" in str(exc_info.value) 52 | assert "SSH" in str(exc_info.value) 53 | 54 | # Verify the function works when host is provided (covers function body) 55 | result = await test_func(host="remote.example.com", username="user") 56 | assert result == "success" 57 | -------------------------------------------------------------------------------- /tests/utils/test_format.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from linux_mcp_server.utils.format import format_bytes 4 | from linux_mcp_server.utils.format import is_ipv6_link_local 5 | 6 | 7 | @pytest.mark.parametrize( 8 | ("value", "expected"), 9 | ( 10 | (512, "512.0B"), 11 | (1024, "1.0KB"), 12 | (2048, "2.0KB"), 13 | (1536, "1.5KB"), 14 | (1_073_741_824, "1.0GB"), 15 | (201_073_741_824, "187.3GB"), 16 | (5_201_073_741_824, "4.7TB"), 17 | (10_105_201_073_741_824, "9.0PB"), 18 | ), 19 | ) 20 | def test_format_bytes(value, expected): 21 | result = format_bytes(value) 22 | 23 | assert result == expected 24 | 25 | 26 | @pytest.mark.parametrize( 27 | ("address", "expected"), 28 | ( 29 | # Link-local addresses - fe80::/10 covers fe80:: through febf:: 30 | ("fe80::1", True), 31 | ("fe80::1%eth0", True), # With scope identifier 32 | ("fe80::abcd:1234%enp0s3", True), 33 | ("FE80::1", True), # Uppercase 34 | ("fe80::", True), 35 | ("fe81::1", True), # fe8x range 36 | ("fe8f::1", True), 37 | ("fe90::1", True), # fe9x range 38 | ("fe9f::1", True), 39 | ("fea0::1", True), # feax range 40 | ("feaf::1", True), 41 | ("feb0::1", True), # febx range 42 | ("febf::1", True), # Last in range 43 | ("fe80::1:2:3:4", True), # Full link-local address 44 | ("fe80:0000:0000:0000:0000:0000:0000:0001", True), # Expanded form 45 | ("febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff", True), # Upper boundary 46 | # Non-link-local addresses (should return False) 47 | ("fec0::1", False), # Site-local (just outside range) 48 | ("ff00::1", False), # Multicast 49 | ("::1", False), # Loopback 50 | ("2001:db8::1", False), # Documentation range 51 | ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", False), 52 | ("", False), # Empty string 53 | # IPv4 addresses (should return False) 54 | ("192.168.1.1", False), 55 | ("127.0.0.1", False), 56 | ("169.254.1.1", False), # IPv4 link-local (different from IPv6) 57 | # Malformed inputs (should return False gracefully) 58 | ("fe80::xyz", False), # Invalid hex characters 59 | ("fe80:::1", False), # Malformed triple colon 60 | ("not-an-ip", False), # Completely invalid 61 | ("fe80::1::2", False), # Multiple :: compressions 62 | ("fe80:ghij::1", False), # Invalid hex in middle 63 | ), 64 | ) 65 | def test_is_ipv6_link_local(address, expected): 66 | result = is_ipv6_link_local(address) 67 | 68 | assert result == expected 69 | -------------------------------------------------------------------------------- /tests/connection/ssh/test_host_key_verification.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import AsyncMock 2 | from unittest.mock import MagicMock 3 | from unittest.mock import Mock 4 | 5 | import pytest 6 | 7 | from linux_mcp_server.connection.ssh import SSHConnectionManager 8 | 9 | 10 | @pytest.fixture 11 | def ssh_manager(): 12 | """Provide a clean SSH manager for each test.""" 13 | manager = SSHConnectionManager() 14 | manager._connections.clear() 15 | return manager 16 | 17 | 18 | @pytest.fixture 19 | def mock_asyncssh_connect(mocker): 20 | """Provide a mock for asyncssh.connect that captures call arguments.""" 21 | mock_conn = AsyncMock() 22 | mock_conn.is_closed = Mock(return_value=False) 23 | 24 | captured_kwargs = {} 25 | 26 | async def capture_connect(*args, **kwargs): 27 | captured_kwargs.update(kwargs) 28 | return mock_conn 29 | 30 | mock_connect = MagicMock(side_effect=capture_connect) 31 | mocker.patch("asyncssh.connect", mock_connect) 32 | 33 | return captured_kwargs 34 | 35 | 36 | @pytest.mark.parametrize( 37 | ("verify_host_keys", "use_custom_path", "expect_none", "expect_warning"), 38 | [ 39 | (True, False, False, False), # Default: uses ~/.ssh/known_hosts 40 | (True, True, False, False), # Custom path used when verification enabled 41 | (False, False, True, True), # Disabled: None + warning 42 | (False, True, True, True), # Disabled overrides custom path 43 | ], 44 | ids=[ 45 | "enabled_default_path", 46 | "enabled_custom_path", 47 | "disabled_logs_warning", 48 | "disabled_overrides_custom", 49 | ], 50 | ) 51 | async def test_known_hosts_configuration( 52 | mocker, 53 | ssh_manager, 54 | mock_asyncssh_connect, 55 | tmp_path, 56 | caplog, 57 | verify_host_keys, 58 | use_custom_path, 59 | expect_none, 60 | expect_warning, 61 | ): 62 | """Test known_hosts is configured correctly based on verify_host_keys and path settings.""" 63 | custom_path = tmp_path / "custom_known_hosts" if use_custom_path else None 64 | 65 | mocker.patch("linux_mcp_server.connection.ssh.CONFIG.verify_host_keys", verify_host_keys) 66 | mocker.patch("linux_mcp_server.connection.ssh.CONFIG.known_hosts_path", custom_path) 67 | mocker.patch("pathlib.Path.home", return_value=tmp_path) 68 | 69 | with caplog.at_level("WARNING"): 70 | await ssh_manager.get_connection("testhost") 71 | 72 | if expect_none: 73 | assert mock_asyncssh_connect["known_hosts"] is None 74 | elif use_custom_path: 75 | assert mock_asyncssh_connect["known_hosts"] == str(custom_path) 76 | else: 77 | assert mock_asyncssh_connect["known_hosts"] == str(tmp_path / ".ssh" / "known_hosts") 78 | 79 | if expect_warning: 80 | assert "host key verification disabled" in caplog.text.lower() 81 | assert "mitm" in caplog.text.lower() 82 | else: 83 | assert "mitm" not in caplog.text.lower() 84 | -------------------------------------------------------------------------------- /docs/examples/example_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Example configuration for Linux MCP Server 3 | # 4 | # Usage: 5 | # 1. Copy this file: cp example_config.sh config.sh 6 | # 2. Customize the settings below 7 | # 3. Source it before running: source config.sh 8 | # 4. Run the server using one of: 9 | # - linux-mcp-server (if installed with pip) 10 | # - uvx linux-mcp-server (to run without installation) 11 | # - uv run linux-mcp-server (for development) 12 | # - python -m linux_mcp_server (alternative) 13 | # 14 | # For installation instructions, see INSTALL.md 15 | 16 | # ======================================== 17 | # Audit Logging Configuration 18 | # ======================================== 19 | 20 | # Logging level for the MCP server audit logs 21 | # Options: DEBUG, INFO, WARNING, ERROR, CRITICAL 22 | # - DEBUG: Detailed diagnostics (connection reuse, timing, function flow) 23 | # - INFO: Operation logging (tool calls, SSH connections, command execution) 24 | # - WARNING: Authentication failures, retryable errors 25 | # - ERROR: Failed operations, connection failures 26 | # - CRITICAL: Server failures, unrecoverable errors 27 | export LINUX_MCP_LOG_LEVEL="INFO" 28 | 29 | # Custom directory for audit logs 30 | # Default: ~/.local/share/linux-mcp-server/logs/ 31 | # Logs are written in both human-readable text and JSON formats 32 | # export LINUX_MCP_LOG_DIR="/var/log/linux-mcp-server" 33 | 34 | # Number of days to retain rotated log files 35 | # Default: 10 days 36 | # Logs are rotated daily at midnight 37 | export LINUX_MCP_LOG_RETENTION_DAYS="10" 38 | 39 | # ======================================== 40 | # Log File Access Control 41 | # ======================================== 42 | 43 | # Log files that the MCP server is allowed to read 44 | # Add or remove paths as needed for your environment 45 | export LINUX_MCP_ALLOWED_LOG_PATHS="/var/log/messages,/var/log/secure,/var/log/audit/audit.log,/var/log/httpd/error_log,/var/log/httpd/access_log" 46 | 47 | # Common log file locations on RHEL/Fedora systems: 48 | # - /var/log/messages - General system messages 49 | # - /var/log/secure - Authentication and security logs 50 | # - /var/log/audit/audit.log - SELinux audit logs 51 | # - /var/log/httpd/* - Apache web server logs 52 | # - /var/log/nginx/* - Nginx web server logs 53 | # - /var/log/mariadb/mariadb.log - MariaDB database logs 54 | # - /var/log/postgresql/* - PostgreSQL database logs 55 | # - /var/log/firewalld - Firewall logs 56 | 57 | echo "================================" 58 | echo "Linux MCP Server Configuration" 59 | echo "================================" 60 | echo "Audit Log Level: $LINUX_MCP_LOG_LEVEL" 61 | if [ -n "$LINUX_MCP_LOG_DIR" ]; then 62 | echo "Audit Log Directory: $LINUX_MCP_LOG_DIR" 63 | else 64 | echo "Audit Log Directory: ~/.local/share/linux-mcp-server/logs/ (default)" 65 | fi 66 | echo "Log Retention: $LINUX_MCP_LOG_RETENTION_DAYS days" 67 | echo "Allowed Log Paths: $LINUX_MCP_ALLOWED_LOG_PATHS" 68 | echo "================================" 69 | 70 | -------------------------------------------------------------------------------- /Containerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi10-minimal:10.1-1762952303 as base 2 | 3 | FROM base as build 4 | 5 | RUN microdnf -y --nodocs --setopt=install_weak_deps=0 install \ 6 | git \ 7 | python3.12 \ 8 | python3.12-pip \ 9 | python-unversioned-command \ 10 | && microdnf clean all 11 | 12 | ARG PSEUDO_VERSION=0.1.0a 13 | 14 | ENV VENVS=/opt/venvs 15 | ENV UV_PROJECT=/usr/share/container-setup/linux-mcp-server/ 16 | ENV UV_PROJECT_ENVIRONMENT="${VENVS}"/mcp 17 | ENV UV_PYTHON=/usr/bin/python 18 | ENV PATH=$VENVS/mcp/bin:"$VENVS/uv/bin:$PATH" 19 | 20 | # Provide the version to avoid the need to pass in the .git directory. 21 | # https://setuptools-scm.readthedocs.io/en/latest/usage/#with-dockerpodman 22 | # FIXME: This should be SETUPTOOLS_SCM_PRETEND_VERSION_FOR_${DIST_NAME} but I 23 | # can't figure out what exactly the value for DIST_NAME should be. 24 | ENV SETUPTOOLS_SCM_PRETEND_VERSION=${PSEUDO_VERSION} 25 | 26 | # Add in source files. The .git directory is used by setuptools-scm to determine 27 | # the release version. 28 | ADD uv.lock pyproject.toml README.md "$UV_PROJECT" 29 | ADD src/ "$UV_PROJECT"/src/ 30 | 31 | # Install the application in its own virtual environment 32 | RUN python -m venv /opt/venvs/uv \ 33 | && /opt/venvs/uv/bin/python -m pip install -U pip \ 34 | && /opt/venvs/uv/bin/python -m pip install uv \ 35 | && uv venv --seed "${VENVS}"/mcp \ 36 | && uv sync --no-cache --locked --no-dev --no-editable 37 | 38 | 39 | FROM base as final 40 | 41 | ARG UID=1001 42 | ARG SOURCE_DATE_EPOCH 43 | ARG PSEUDO_VERSION=0.1.0a 44 | ARG VERSION=0.1.0a 45 | 46 | # Indicator the application is running in a container 47 | ENV container=docker 48 | 49 | ENV VENV=/opt/venvs/mcp 50 | ENV PATH="${VENV}/bin:$PATH" 51 | ENV HOME=/var/lib/mcp 52 | 53 | # Application configuration 54 | ENV LINUX_MCP_SEARCH_FOR_SSH_KEY=True 55 | 56 | LABEL com.redhat.component=linux-mcp-server 57 | LABEL cpe="cpe:2.3:a:redhat:linux_mcp_server:-:*:*:*:*:*:*:*" 58 | LABEL description="MCP Server for inspecting Linux" 59 | LABEL distribution-scope=private 60 | LABEL io.k8s.description="MCP Server for inspecting Linux" 61 | LABEL io.k8s.display-name="Linux MCP Server" 62 | LABEL io.openshift.tags="rhel,mcp,linux" 63 | LABEL konflux.additional-tags=${VERSION} 64 | LABEL name=linux-mcp-server 65 | LABEL org.opencontainers.image.created=${SOURCE_DATE_EPOCH} 66 | LABEL release=${PSEUDO_VERSION} 67 | LABEL summary="Linux MCP Server" 68 | LABEL url="https://github.com/rhel-lightspeed/linux-mcp-server" 69 | LABEL vendor="Red Hat, Inc." 70 | LABEL version=${VERSION} 71 | 72 | ADD licenses/ /licenses/ 73 | ADD LICENSE /licenses/Apache-2.0.txt 74 | 75 | RUN microdnf -y --nodocs --setopt=install_weak_deps=0 install \ 76 | git \ 77 | openssh \ 78 | python3.12 \ 79 | python-unversioned-command \ 80 | && microdnf clean all 81 | 82 | COPY --from=build /opt/venvs/mcp /opt/venvs/mcp 83 | 84 | RUN useradd --key HOME_MODE=0775 --uid "$UID" --gid 0 --create-home --home-dir "$HOME" mcp 85 | 86 | USER mcp 87 | WORKDIR $HOME 88 | 89 | CMD ["linux-mcp-server"] 90 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | 12 | env: 13 | PIP_DISABLE_PIP_VERSION_CHECK: 1 14 | COVERAGE_IGOR_VERBOSE: 1 15 | FORCE_COLOR: 1 # Get colored pytest output 16 | 17 | 18 | jobs: 19 | sanity: 20 | name: Sanity - ${{ matrix.test.name }} 21 | runs-on: ubuntu-latest 22 | 23 | strategy: 24 | fail-fast: false 25 | matrix: 26 | test: 27 | - name: Type 28 | step_name: Check types 29 | command: pyright 30 | 31 | - name: Lint 32 | step_name: Run lint 33 | command: ruff check --diff 34 | 35 | - name: Format 36 | step_name: Run format check 37 | command: ruff format --diff 38 | 39 | steps: 40 | - name: Checkout 41 | uses: actions/checkout@v5.0.0 42 | 43 | - name: Install Python 44 | uses: actions/setup-python@v6.0.0 45 | with: 46 | python-version-file: pyproject.toml 47 | 48 | - name: Install uv 49 | uses: astral-sh/setup-uv@v7.1.0 50 | with: 51 | enable-cache: true 52 | prune-cache: false 53 | 54 | - name: Install dependencies 55 | run: uv sync --locked --group lint 56 | 57 | - name: "Show environment" 58 | run: | 59 | python -VV 60 | python -m site 61 | uv --version 62 | env | sort 63 | ls -ld $(python -c "import site; print(site.getsitepackages()[0])") 64 | whoami 65 | 66 | - name: ${{ matrix.test.step_name }} 67 | run: uv run --locked ${{ matrix.test.command }} 68 | 69 | tests: 70 | name: Unit - ${{ matrix.python-version }} 71 | runs-on: ubuntu-latest 72 | 73 | strategy: 74 | fail-fast: true 75 | matrix: 76 | python-version: 77 | - "3.10" 78 | - "3.11" 79 | - "3.12" 80 | - "3.13" 81 | - "3.13t" 82 | - "3.14" 83 | - "3.14t" 84 | 85 | steps: 86 | - name: Checkout 87 | uses: actions/checkout@v5.0.0 88 | 89 | - name: Install Python 90 | uses: actions/setup-python@v6.0.0 91 | with: 92 | python-version-file: pyproject.toml 93 | 94 | - name: Install uv 95 | uses: astral-sh/setup-uv@v7.1.0 96 | with: 97 | enable-cache: true 98 | prune-cache: false 99 | 100 | - name: Install dependencies 101 | run: uv sync --locked --group test 102 | 103 | - name: Show environment 104 | run: | 105 | set -x 106 | python -VV 107 | python -m site 108 | uv --version 109 | ls -ld $(python -c "import site; print(site.getsitepackages()[0])") 110 | whoami 111 | 112 | - name: Run tests 113 | run: uv run --locked pytest --cov-report=xml 114 | 115 | - name: Upload coverage report 116 | uses: codecov/codecov-action@v5.5.1 117 | with: 118 | env_vars: OS,PYTHON 119 | disable_search: true 120 | files: coverage/coverage.xml 121 | fail_ci_if_error: true 122 | flags: unittests 123 | -------------------------------------------------------------------------------- /tests/tools/test_system_info.py: -------------------------------------------------------------------------------- 1 | """Tests for system information tools.""" 2 | 3 | import sys 4 | 5 | import pytest 6 | 7 | from linux_mcp_server.tools import system_info 8 | 9 | 10 | class TestSystemInfo: 11 | """Test system information tools.""" 12 | 13 | async def test_get_system_info_returns_string(self): 14 | """Test that get_system_information returns a string.""" 15 | result = await system_info.get_system_information() 16 | assert isinstance(result, str) 17 | assert len(result) > 0 18 | 19 | async def test_get_system_info_contains_key_information(self): 20 | """Test that system info contains essential data.""" 21 | result = await system_info.get_system_information() 22 | 23 | # Should contain key system information 24 | assert "Hostname" in result or "hostname" in result.lower() 25 | assert "Kernel" in result or "kernel" in result.lower() 26 | assert "OS" in result or "operating system" in result.lower() 27 | assert "Uptime" in result or "uptime" in result.lower() 28 | 29 | async def test_get_cpu_info_returns_string(self): 30 | """Test that get_cpu_information returns a string.""" 31 | result = await system_info.get_cpu_information() 32 | assert isinstance(result, str) 33 | assert len(result) > 0 34 | 35 | async def test_get_cpu_info_contains_cpu_data(self): 36 | """Test that CPU info contains relevant data.""" 37 | result = await system_info.get_cpu_information() 38 | 39 | # Should contain CPU information 40 | assert "CPU" in result or "cpu" in result.lower() or "processor" in result.lower() 41 | assert "load" in result.lower() or "usage" in result.lower() 42 | 43 | async def test_get_memory_info_returns_string(self): 44 | """Test that get_memory_information returns a string.""" 45 | result = await system_info.get_memory_information() 46 | assert isinstance(result, str) 47 | assert len(result) > 0 48 | 49 | async def test_get_memory_info_contains_memory_data(self): 50 | """Test that memory info contains RAM and swap information.""" 51 | result = await system_info.get_memory_information() 52 | 53 | # Should contain memory information 54 | assert "memory" in result.lower() or "ram" in result.lower() 55 | assert "total" in result.lower() 56 | assert "used" in result.lower() or "available" in result.lower() 57 | 58 | async def test_get_disk_usage_returns_string(self): 59 | """Test that get_disk_usage returns a string.""" 60 | result = await system_info.get_disk_usage() 61 | assert isinstance(result, str) 62 | assert len(result) > 0 63 | 64 | async def test_get_disk_usage_contains_filesystem_data(self): 65 | """Test that disk usage contains filesystem information.""" 66 | result = await system_info.get_disk_usage() 67 | 68 | # Should contain disk usage information 69 | assert "filesystem" in result.lower() or "device" in result.lower() or "mounted" in result.lower() 70 | assert "/" in result # Should at least show root filesystem 71 | 72 | @pytest.mark.skipif(sys.platform != "linux", reason="Only passes no Linux") 73 | async def test_get_hardware_info_returns_string(self): 74 | """Test that get_hardware_information returns a string.""" 75 | result = await system_info.get_hardware_information() 76 | assert isinstance(result, str) 77 | assert len(result) > 0 78 | -------------------------------------------------------------------------------- /tests/connection/ssh/test_timeout.py: -------------------------------------------------------------------------------- 1 | from contextlib import nullcontext 2 | from unittest.mock import AsyncMock 3 | from unittest.mock import Mock 4 | 5 | import asyncssh 6 | import pytest 7 | 8 | from asyncssh import SSHClientConnection 9 | 10 | from linux_mcp_server.connection.ssh import SSHConnectionManager 11 | 12 | 13 | def _make_success_mock(): 14 | """Create a mock that returns successful command output.""" 15 | return AsyncMock(return_value=Mock(exit_status=0, stdout="ok", stderr="")) 16 | 17 | 18 | def _make_timeout_mock(): 19 | """Create a mock that raises asyncssh.TimeoutError.""" 20 | timeout_error = asyncssh.TimeoutError( 21 | env=None, 22 | command="cmd", 23 | subsystem=None, 24 | exit_status=None, 25 | exit_signal=None, 26 | returncode=None, 27 | stdout="", 28 | stderr="", 29 | ) 30 | return AsyncMock(side_effect=timeout_error) 31 | 32 | 33 | @pytest.fixture 34 | def ssh_manager(): 35 | """Provide a clean SSH manager for each test.""" 36 | manager = SSHConnectionManager() 37 | manager._connections.clear() 38 | return manager 39 | 40 | 41 | @pytest.fixture 42 | def mock_ssh_connection(mocker): 43 | """Provide a mock SSH connection with asyncssh.connect patched.""" 44 | mock_conn = Mock(spec=SSHClientConnection, _username="testuser") 45 | mock_conn.is_closed.return_value = False 46 | 47 | mock_connect = AsyncMock(spec=asyncssh.connect) 48 | mock_connect.return_value = mock_conn 49 | 50 | mocker.patch("asyncssh.connect", mock_connect) 51 | 52 | return mock_conn 53 | 54 | 55 | @pytest.mark.parametrize( 56 | ("global_timeout", "per_cmd_timeout", "expected_timeout", "mock_run_factory", "expectation"), 57 | [ 58 | # Success cases - verify timeout is passed correctly 59 | (30, None, 30, _make_success_mock, nullcontext()), 60 | (30, 60, 60, _make_success_mock, nullcontext()), 61 | # Timeout error cases - verify asyncssh.TimeoutError is handled 62 | (30, None, 30, _make_timeout_mock, pytest.raises(ConnectionError, match="Command timed out")), 63 | (60, 10, 10, _make_timeout_mock, pytest.raises(ConnectionError, match="Command timed out")), 64 | ], 65 | ids=[ 66 | "uses_global_timeout", 67 | "per_cmd_overrides_global", 68 | "global_timeout_error", 69 | "per_cmd_timeout_error", 70 | ], 71 | ) 72 | async def test_timeout_behavior( 73 | mocker, 74 | ssh_manager, 75 | mock_ssh_connection, 76 | global_timeout, 77 | per_cmd_timeout, 78 | expected_timeout, 79 | mock_run_factory, 80 | expectation, 81 | ): 82 | """Test timeout parameter passing and error handling.""" 83 | mocker.patch("linux_mcp_server.connection.ssh.CONFIG.command_timeout", global_timeout) 84 | mocker.patch("linux_mcp_server.connection.ssh.get_remote_bin_path", return_value=("/usr/bin/cmd")) 85 | mock_ssh_connection.run = mock_run_factory() 86 | 87 | with expectation: 88 | # Only pass timeout if explicitly set (None means use default) 89 | kwargs = {} 90 | if per_cmd_timeout is not None: 91 | kwargs["timeout"] = per_cmd_timeout 92 | 93 | returncode, stdout, _ = await ssh_manager.execute_remote(["cmd"], "host", **kwargs) 94 | call_kwargs = mock_ssh_connection.run.call_args.kwargs 95 | 96 | assert returncode == 0 97 | assert stdout == "ok" 98 | assert call_kwargs["timeout"] == expected_timeout 99 | 100 | 101 | async def test_timeout_error_contains_context(mocker, ssh_manager, mock_ssh_connection): 102 | """Test that timeout error message includes command, host, and user context.""" 103 | mock_ssh_connection.run = _make_timeout_mock() 104 | mocker.patch("linux_mcp_server.connection.ssh.get_remote_bin_path", return_value=("/usr/bin/mycommand")) 105 | 106 | with pytest.raises(ConnectionError) as exc_info: 107 | await ssh_manager.execute_remote(["mycommand", "arg1"], "myhost.example.com", timeout=5) 108 | 109 | error_msg = str(exc_info.value) 110 | 111 | assert "testuser@myhost.example.com" in error_msg 112 | assert "mycommand" in error_msg 113 | assert "5s" in error_msg 114 | -------------------------------------------------------------------------------- /docs/Debugging.md: -------------------------------------------------------------------------------- 1 | # Debug Logging and Diagnostics 2 | 3 | This document describes how to enable and use logging to debug and monitor the MCP server operations. 4 | 5 | ## Overview 6 | 7 | The Linux MCP Server provides comprehensive logging for: 8 | - Tool invocations with parameters 9 | - SSH connection events 10 | - Command execution (local and remote) 11 | - Tool execution timing 12 | - Errors and exceptions 13 | 14 | Logging is centralized in the server layer with tiered verbosity based on log level. 15 | 16 | ## Enabling Debug Logging 17 | 18 | Set the `LINUX_MCP_LOG_LEVEL` environment variable to `DEBUG`: 19 | 20 | ```bash 21 | export LINUX_MCP_LOG_LEVEL=DEBUG 22 | ``` 23 | 24 | ## Log Output Locations 25 | 26 | Logs are written to two formats: 27 | 28 | 1. **Human-readable**: `~/.local/share/linux-mcp-server/logs/server.log` 29 | 2. **JSON format**: `~/.local/share/linux-mcp-server/logs/server.json` 30 | 31 | You can customize the log directory with: 32 | 33 | ```bash 34 | export LINUX_MCP_LOG_DIR=/path/to/your/logs 35 | ``` 36 | 37 | ## Example Log Output 38 | 39 | ### Human-Readable Format (INFO level) 40 | 41 | ``` 42 | 2025-10-10 15:30:45.123 | INFO | linux_mcp_server.audit | TOOL_CALL: list_directories | path=/home/user, order_by=size, sort=descending, top_n=10 | event=TOOL_CALL | tool=list_directories | execution_mode=local 43 | 2025-10-10 15:30:45.456 | INFO | linux_mcp_server.audit | TOOL_COMPLETE: list_directories | event=TOOL_COMPLETE | tool=list_directories | status=success | duration=0.333s 44 | ``` 45 | 46 | ### Human-Readable Format (DEBUG level - shows command execution) 47 | 48 | ``` 49 | 2025-10-10 15:30:45.123 | INFO | linux_mcp_server.audit | TOOL_CALL: list_directories | path=/home/user, order_by=size, sort=descending, top_n=10 | event=TOOL_CALL | tool=list_directories | execution_mode=local 50 | 2025-10-10 15:30:45.234 | DEBUG | linux_mcp_server.connection.ssh| LOCAL_EXEC completed: du -b --max-depth=1 /home/user | exit_code=0 | duration=0.200s 51 | 2025-10-10 15:30:45.456 | INFO | linux_mcp_server.audit | TOOL_COMPLETE: list_directories | event=TOOL_COMPLETE | tool=list_directories | status=success | duration=0.333s 52 | ``` 53 | 54 | ### JSON Format 55 | 56 | ```json 57 | { 58 | "timestamp": "2025-10-10T15:30:45", 59 | "level": "INFO", 60 | "logger": "linux_mcp_server.audit", 61 | "message": "TOOL_CALL: list_directories | path=/home/user, order_by=size, sort=descending, top_n=10", 62 | "event": "TOOL_CALL", 63 | "tool": "list_directories", 64 | "execution_mode": "local" 65 | } 66 | ``` 67 | 68 | ## Implementation 69 | 70 | Logging is centralized in `src/linux_mcp_server/audit.py` using the `log_tool_call()` decorator. 71 | 72 | ```python 73 | @mcp.tool() 74 | @log_tool_call 75 | async def list_directories(path: str, order_by: OrderBy, sort: SortBy, top_n: int | None) -> list[DirectoryEntry]: 76 | ... 77 | ``` 78 | 79 | The `audit.py` module provides structured logging functions: 80 | - `log_tool_call()`: Logs tool invocation with parameters 81 | - `log_ssh_connect()`: Logs SSH connection events 82 | - `log_ssh_command()`: Logs remote command execution 83 | 84 | ## Log Levels 85 | 86 | ### INFO Level 87 | - Tool invocations with parameters 88 | - Tool completion with status and timing 89 | - SSH connection success/failure 90 | - Remote command execution 91 | 92 | ### DEBUG Level 93 | - Detailed command execution timing 94 | - SSH connection pool state 95 | - Local command execution details 96 | - All INFO level events plus detailed diagnostics 97 | 98 | ## Benefits 99 | 100 | 1. **Centralized Logging**: All logging happens in one place (server.py + audit.py) 101 | 2. **Structured Data**: Both human-readable and JSON formats available 102 | 3. **Audit Trail**: Complete record of all operations with timing 103 | 4. **SSH Monitoring**: Track remote connections and command execution 104 | 5. **Performance Insights**: Execution duration for every tool call 105 | 106 | ## Use Cases 107 | 108 | - **Debugging**: Track tool invocations and identify issues 109 | - **Auditing**: Complete record of all operations 110 | - **Performance**: Monitor execution times 111 | - **SSH Troubleshooting**: Debug connection and authentication issues 112 | - **Development**: Understand tool behavior during testing 113 | 114 | -------------------------------------------------------------------------------- /.coderabbit.yaml: -------------------------------------------------------------------------------- 1 | # CodeRabbit Configuration for linux-mcp-server 2 | # Docs: https://docs.coderabbit.ai/configuration/ 3 | 4 | language: en-US 5 | tone_instructions: "Focus on security, architecture, logic bugs, race conditions. Skip style (ruff), types (pyright), coverage (CI enforces). Be concise." 6 | 7 | reviews: 8 | profile: chill 9 | collapse_walkthrough: true 10 | poem: false 11 | request_changes_workflow: false 12 | high_level_summary: true 13 | high_level_summary_placeholder: "@coderabbitai summary" 14 | abort_on_close: true 15 | 16 | auto_review: 17 | enabled: true 18 | drafts: false 19 | base_branches: 20 | - main 21 | ignore_title_keywords: 22 | - "WIP" 23 | - "[WIP]" 24 | - "DO NOT MERGE" 25 | 26 | path_filters: 27 | - "!**/__pycache__/**" 28 | - "!**/.venv/**" 29 | - "!**/.pytest_cache/**" 30 | - "!**/.ruff_cache/**" 31 | - "!**/dist/**" 32 | - "!**/*.egg-info/**" 33 | - "!**/htmlcov/**" 34 | - "!**/.coverage" 35 | - "!**/uv.lock" 36 | 37 | path_instructions: 38 | - path: "src/**/*.py" 39 | # NOTE(major): Adjust the read-only line at the top when 40 | # write operations are allowed. 41 | instructions: | 42 | Focus on security and architecture: 43 | - CRITICAL: All operations MUST be read-only. Flag any write operations. 44 | - Check for TOCTOU race conditions (use try-except, not check-then-use) 45 | - psutil exceptions pattern: except (psutil.NoSuchProcess, psutil.AccessDenied) 46 | - Verify async/await patterns (no blocking calls in async functions) 47 | - Ensure proper exception handling with graceful degradation 48 | - Error messages must be clear enough for LLMs to understand and act on 49 | - Check input validation before shell commands (command injection) 50 | - Verify MCP decorator order: @mcp.tool, @log_tool_call, @disallow_local_execution_in_containers 51 | - Host parameter pattern: host: Host | None = None (supports local and remote) 52 | - Resource cleanup: Verify SSH connections use context managers 53 | - Timeout handling: All network/SSH operations must have timeouts 54 | - Container safety: Flag operations that won't work in containers without explicit checks 55 | - Don't nitpick style (ruff handles) or types (pyright validates) 56 | 57 | - path: "src/**/config.py" 58 | instructions: | 59 | Configuration validation: 60 | - Centralize derived config logic (defaults, path resolution) in config class 61 | - Validate environment variables at startup, not lazily 62 | - Check for secure defaults 63 | 64 | - path: "src/**/connection/*.py" 65 | instructions: | 66 | SSH/connection handling: 67 | - All SSH operations must have timeouts 68 | - Prefer library-native async features (asyncssh timeout param) over wrappers (asyncio.wait_for) 69 | - Connection errors must include host information for debugging 70 | - Resource cleanup: Use context managers for all connections 71 | - Key handling: No plaintext passwords, only key-based auth 72 | 73 | - path: "tests/**/*.py" 74 | instructions: | 75 | Focus on test quality and meaningfulness: 76 | - Tests should verify behavior, not just chase coverage. A test that doesn't assert meaningful outcomes is worse than no test. 77 | - Prefer parameterized tests over duplicate test functions 78 | - Mock specs should be provided: AsyncMock(spec=SomeClass) 79 | - Use pytest.raises with nullcontext() pattern, not boolean flags 80 | - Use fixtures for reusable test components, not helper functions 81 | - Check for edge cases: process disappearing, permission denied, empty output 82 | - Verify both local (psutil) and remote (SSH) code paths tested 83 | - Don't nitpick coverage percentages (CI enforces 70%+ overall, 100% patch) 84 | 85 | - path: "pyproject.toml" 86 | instructions: | 87 | Check dependency versions and configuration consistency. 88 | Dev tools: uv (package manager), ruff (linting/formatting), pyright (type checking), pytest (testing). 89 | 90 | - path: ".github/workflows/**" 91 | instructions: "Validate workflow syntax and security (no secrets in logs)" 92 | 93 | - path: "Makefile" 94 | instructions: | 95 | Makefile validation: 96 | - .PHONY declarations for all non-file targets 97 | - Avoid flags that are already defaults 98 | 99 | - path: "Containerfile" 100 | instructions: "Use Podman + Containerfile conventions (not Docker + Dockerfile)" 101 | 102 | chat: 103 | auto_reply: true 104 | 105 | knowledge_base: 106 | learnings: 107 | scope: global 108 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling", "hatch-vcs"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "linux-mcp-server" 7 | description = "MCP server for read-only Linux system administration, diagnostics, and troubleshooting" 8 | authors = [ 9 | { name = "RHEL Lightspeed" } 10 | ] 11 | readme = "README.md" 12 | requires-python = ">=3.10" 13 | dependencies = [ 14 | "mcp>=0.9.0", 15 | "psutil>=5.9.0", 16 | "asyncssh>=2.14.0", 17 | "bcrypt", 18 | "pydantic-settings>=2.12.0", 19 | "pydantic>=2.12.4", 20 | ] 21 | dynamic = ["version"] 22 | license-files = ["LICENSE", "licenses/GPL-3.0.txt"] 23 | classifiers = [ 24 | "Development Status :: 2 - Pre-Alpha", 25 | "Environment :: Console", 26 | "Intended Audience :: Developers", 27 | "Intended Audience :: Information Technology", 28 | "Intended Audience :: System Administrators", 29 | "License :: OSI Approved :: Apache Software License", 30 | "Natural Language :: English", 31 | "Operating System :: POSIX :: Linux", 32 | "Programming Language :: Python :: 3", 33 | "Programming Language :: Python :: 3.10", 34 | "Programming Language :: Python :: 3.11", 35 | "Programming Language :: Python :: 3.12", 36 | "Programming Language :: Python :: 3.13", 37 | "Programming Language :: Python :: 3.14", 38 | "Programming Language :: Python :: 3 :: Only", 39 | "Topic :: System :: Systems Administration", 40 | "Topic :: Utilities", 41 | ] 42 | 43 | [project.url] 44 | "Source code" = "https://github.com/rhel-lightspeed/linux-mcp-server" 45 | "Bug Tracker" = "https://github.com/rhel-lightspeed/linux-mcp-server/issues" 46 | 47 | [project.scripts] 48 | linux-mcp-server = "linux_mcp_server.__main__:cli" 49 | 50 | [dependency-groups] 51 | dev = [ 52 | "ipdb", 53 | "ipython", 54 | {include-group = "lint"}, 55 | {include-group = "test"}, 56 | ] 57 | 58 | lint = [ 59 | "pyright", 60 | "ruff", 61 | ] 62 | 63 | test = [ 64 | "pytest-asyncio>=0.23.0", 65 | "pytest-cov>=4.1.0", 66 | "pytest-mock", 67 | "pytest>=8.0.0", 68 | ] 69 | 70 | [tool.hatch.build.targets.wheel] 71 | packages = ["src/linux_mcp_server"] 72 | 73 | [tool.hatch.version] 74 | source = "vcs" 75 | 76 | [tool.pytest.ini_options] 77 | addopts = [ 78 | "-r", "a", 79 | "--verbose", 80 | "--strict-markers", 81 | "--cov", "src", 82 | "--cov", "tests", 83 | "--cov-report", "html", 84 | "--cov-report", "term-missing:skip-covered", 85 | "--durations-min", "1", 86 | "--durations", "10", 87 | "--color", "yes", 88 | "--showlocals", 89 | "--pdbcls", "IPython.terminal.debugger:TerminalPdb", 90 | ] 91 | asyncio_mode = "auto" 92 | asyncio_default_fixture_loop_scope = "session" 93 | pythonpath = "src" 94 | testpaths = ["tests"] 95 | 96 | [tool.coverage.run] 97 | branch = true 98 | data_file = "coverage/.data" 99 | source = [ 100 | "src", 101 | "tests", 102 | "src/linux_mcp_server/_vendor/__init__.py", 103 | ] 104 | omit = [ 105 | "src/linux_mcp_server/_vendor/*/*", 106 | ] 107 | 108 | [tool.coverage.report] 109 | exclude_lines = [ 110 | "pragma: no cover", 111 | "def __repr__", 112 | "raise AssertionError", 113 | "raise NotImplementedError", 114 | "if __name__ == .__main__.:", 115 | ] 116 | 117 | [tool.coverage.html] 118 | directory = "coverage/htmlcov" 119 | 120 | [tool.coverage.json] 121 | output = "coverage/coverage.json" 122 | 123 | [tool.coverage.xml] 124 | output = "coverage/coverage.xml" 125 | 126 | [tool.gha-update] 127 | tag-only = [ 128 | "actions/checkout", 129 | "actions/download-artifact", 130 | "actions/setup-python", 131 | "actions/upload-artifact", 132 | "astral-sh/setup-uv", 133 | "codecov/codecov-action", 134 | "pypa/gh-action-pypi-publish", 135 | ] 136 | 137 | [tool.pyright] 138 | include = [ 139 | "src", 140 | "tests", 141 | ] 142 | ignore = [ 143 | "src/linux_mcp_server/_vendor/*/*" 144 | ] 145 | pythonVersion = "3.10" 146 | 147 | [tool.ruff] 148 | line-length = 120 149 | indent-width = 4 150 | 151 | [tool.ruff.lint] 152 | # https://docs.astral.sh/ruff/rules/ 153 | # Enable ruff rules to act like flake8 154 | select = [ 155 | "E", # pycodestyle (formerly part of flake8) 156 | "F", # pyflakes (formerly part of flake8) 157 | "I", # import order (like isort) 158 | # "B", # flake8-bugbear 159 | "C90", # flake8-comprehensions 160 | "RUF100", # unused-noqa 161 | "T20", # flake8-print 162 | ] 163 | mccabe.max-complexity = 12 164 | 165 | # Exclude specific rules if needed 166 | ignore = [ 167 | "E501", # Ignore line length (similar to flake8's max-line-length) 168 | ] 169 | 170 | 171 | [tool.ruff.lint.isort] 172 | case-sensitive = false 173 | force-single-line = true 174 | lines-after-imports = 2 175 | lines-between-types = 1 176 | order-by-type = false 177 | -------------------------------------------------------------------------------- /src/linux_mcp_server/logging_config.py: -------------------------------------------------------------------------------- 1 | """Centralized logging configuration for Linux MCP Server. 2 | 3 | Simplified logging setup with standard Python logging infrastructure. 4 | Supports structured logging with extra fields for audit and diagnostic purposes. 5 | """ 6 | 7 | import json 8 | import logging 9 | import logging.handlers 10 | 11 | from linux_mcp_server.config import CONFIG 12 | 13 | 14 | def get_log_level() -> int: 15 | """Get the log level from environment variable (defaults to INFO).""" 16 | level_name = CONFIG.log_level 17 | return getattr(logging, level_name, logging.INFO) 18 | 19 | 20 | def get_retention_days() -> int: 21 | """Get the log retention days from environment variable (defaults to 10).""" 22 | try: 23 | return int(CONFIG.log_retention_days) 24 | except ValueError: 25 | return 10 26 | 27 | 28 | class StructuredFormatter(logging.Formatter): 29 | """ 30 | Structured log formatter supporting extra fields. 31 | 32 | Format: TIMESTAMP | LEVEL | MODULE | MESSAGE | key=value ... 33 | Extra fields added to LogRecord are appended as key=value pairs. 34 | """ 35 | 36 | STANDARD_FIELDS = { 37 | "name", 38 | "msg", 39 | "args", 40 | "created", 41 | "filename", 42 | "funcName", 43 | "levelname", 44 | "levelno", 45 | "lineno", 46 | "module", 47 | "msecs", 48 | "message", 49 | "pathname", 50 | "process", 51 | "processName", 52 | "relativeCreated", 53 | "thread", 54 | "threadName", 55 | "exc_info", 56 | "exc_text", 57 | "stack_info", 58 | "asctime", 59 | "taskName", 60 | } 61 | 62 | def format(self, record: logging.LogRecord) -> str: 63 | """Format a log record with extra fields.""" 64 | # Base message 65 | base_msg = super().format(record) 66 | 67 | # Append extra fields as key=value pairs 68 | extra_fields = [f"{k}={v}" for k, v in record.__dict__.items() if k not in self.STANDARD_FIELDS] 69 | 70 | return f"{base_msg} | {' | '.join(extra_fields)}" if extra_fields else base_msg 71 | 72 | 73 | class JSONFormatter(logging.Formatter): 74 | """JSON log formatter for machine-readable logs.""" 75 | 76 | EXCLUDE_FIELDS = { 77 | "args", 78 | "exc_text", 79 | "exc_info", 80 | "stack_info", 81 | "filename", 82 | "funcName", 83 | "lineno", 84 | "module", 85 | "msecs", 86 | "pathname", 87 | "process", 88 | "processName", 89 | "relativeCreated", 90 | "thread", 91 | "threadName", 92 | "taskName", 93 | } 94 | 95 | def format(self, record: logging.LogRecord) -> str: 96 | """Format a log record as JSON.""" 97 | log_data = { 98 | "timestamp": self.formatTime(record, self.datefmt), 99 | "level": record.levelname, 100 | "logger": record.name, 101 | "message": record.getMessage(), 102 | } 103 | 104 | # Add exception info if present 105 | if record.exc_info: 106 | log_data["exception"] = self.formatException(record.exc_info) 107 | 108 | # Add extra fields 109 | for key, value in record.__dict__.items(): 110 | if ( 111 | key not in self.EXCLUDE_FIELDS 112 | and key not in log_data 113 | and key not in {"name", "msg", "levelname", "levelno", "created"} 114 | ): 115 | log_data[key] = value 116 | 117 | return json.dumps(log_data) 118 | 119 | 120 | def setup_logging(): 121 | """Set up logging with structured formatters and rotation.""" 122 | log_dir = CONFIG.log_dir 123 | log_dir.mkdir(parents=True, exist_ok=True) 124 | log_level = get_log_level() 125 | retention_days = get_retention_days() 126 | 127 | # Configure root logger 128 | root_logger = logging.getLogger() 129 | root_logger.setLevel(log_level) 130 | root_logger.handlers.clear() # Remove existing handlers 131 | 132 | # Human-readable text log 133 | text_handler = logging.handlers.TimedRotatingFileHandler( 134 | filename=log_dir / "server.log", 135 | when="midnight", 136 | interval=1, 137 | backupCount=retention_days, 138 | encoding="utf-8", 139 | ) 140 | text_handler.setLevel(log_level) 141 | text_handler.setFormatter( 142 | StructuredFormatter("%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S") 143 | ) 144 | text_handler.suffix = "%Y-%m-%d" 145 | root_logger.addHandler(text_handler) 146 | 147 | # JSON log 148 | json_handler = logging.handlers.TimedRotatingFileHandler( 149 | filename=log_dir / "server.json", 150 | when="midnight", 151 | interval=1, 152 | backupCount=retention_days, 153 | encoding="utf-8", 154 | ) 155 | json_handler.setLevel(log_level) 156 | json_handler.setFormatter(JSONFormatter(datefmt="%Y-%m-%dT%H:%M:%S")) 157 | json_handler.suffix = "%Y-%m-%d" 158 | root_logger.addHandler(json_handler) 159 | 160 | # Console handler for development 161 | console_handler = logging.StreamHandler() 162 | console_handler.setLevel(log_level) 163 | console_handler.setFormatter( 164 | StructuredFormatter("%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S") 165 | ) 166 | root_logger.addHandler(console_handler) 167 | 168 | root_logger.info(f"Logging initialized: {log_dir}") 169 | -------------------------------------------------------------------------------- /src/linux_mcp_server/tools/services.py: -------------------------------------------------------------------------------- 1 | """Service management tools.""" 2 | 3 | import typing as t 4 | 5 | from mcp.types import ToolAnnotations 6 | from pydantic import Field 7 | 8 | from linux_mcp_server.audit import log_tool_call 9 | from linux_mcp_server.connection.ssh import execute_command 10 | from linux_mcp_server.server import mcp 11 | from linux_mcp_server.utils.decorators import disallow_local_execution_in_containers 12 | from linux_mcp_server.utils.types import Host 13 | from linux_mcp_server.utils.validation import validate_line_count 14 | 15 | 16 | @mcp.tool( 17 | title="List services", 18 | description="List all systemd services.", 19 | annotations=ToolAnnotations(readOnlyHint=True), 20 | ) 21 | @log_tool_call 22 | @disallow_local_execution_in_containers 23 | async def list_services( 24 | host: Host | None = None, 25 | ) -> str: 26 | """ 27 | List all systemd services. 28 | """ 29 | try: 30 | # Run systemctl to list all services 31 | returncode, stdout, stderr = await execute_command( 32 | ["systemctl", "list-units", "--type=service", "--all", "--no-pager"], 33 | host=host, 34 | ) 35 | 36 | if returncode != 0: 37 | return f"Error listing services: {stderr}" 38 | 39 | # Format the output 40 | result = ["=== System Services ===\n"] 41 | result.append(stdout) 42 | 43 | # Get summary 44 | returncode_summary, stdout_summary, _ = await execute_command( 45 | ["systemctl", "list-units", "--type=service", "--state=running", "--no-pager"], 46 | host=host, 47 | ) 48 | 49 | if returncode_summary == 0: 50 | running_count = len([line for line in stdout_summary.split("\n") if ".service" in line]) 51 | result.append(f"\n\nSummary: {running_count} services currently running") 52 | 53 | return "\n".join(result) 54 | except FileNotFoundError: 55 | return "Error: systemctl command not found. This tool requires systemd." 56 | except Exception as e: 57 | return f"Error listing services: {str(e)}" 58 | 59 | 60 | @mcp.tool( 61 | title="Get service status", 62 | description="Get detailed status of a specific systemd service.", 63 | annotations=ToolAnnotations(readOnlyHint=True), 64 | ) 65 | @log_tool_call 66 | @disallow_local_execution_in_containers 67 | async def get_service_status( 68 | service_name: t.Annotated[str, Field(description="Name of the service")], 69 | host: Host | None = None, 70 | ) -> str: 71 | """ 72 | Get status of a specific service. 73 | """ 74 | try: 75 | # Ensure service name has .service suffix if not present 76 | if not service_name.endswith(".service") and "." not in service_name: 77 | service_name = f"{service_name}.service" 78 | 79 | # Run systemctl status 80 | _, stdout, stderr = await execute_command( 81 | ["systemctl", "status", service_name, "--no-pager", "--full"], 82 | host=host, 83 | ) 84 | 85 | # Note: systemctl status returns non-zero for inactive services, but that's expected 86 | if not stdout and stderr: 87 | # Service not found 88 | if "not found" in stderr.lower() or "could not be found" in stderr.lower(): 89 | return f"Service '{service_name}' not found on this system." 90 | return f"Error getting service status: {stderr}" 91 | 92 | result = [f"=== Status of {service_name} ===\n"] 93 | result.append(stdout) 94 | 95 | return "\n".join(result) 96 | except FileNotFoundError: 97 | return "Error: systemctl command not found. This tool requires systemd." 98 | except Exception as e: 99 | return f"Error getting service status: {str(e)}" 100 | 101 | 102 | @mcp.tool( 103 | title="Get service logs", 104 | description="Get recent logs for a specific systemd service.", 105 | annotations=ToolAnnotations(readOnlyHint=True), 106 | ) 107 | @log_tool_call 108 | @disallow_local_execution_in_containers 109 | async def get_service_logs( 110 | service_name: t.Annotated[str, Field(description="Name of the service")], 111 | lines: t.Annotated[int, Field(description="Number of log lines to retrieve.")] = 50, 112 | host: Host | None = None, 113 | ) -> str: 114 | """ 115 | Get logs for a specific service. 116 | """ 117 | try: 118 | # Validate lines parameter (accepts floats from LLMs) 119 | lines, _ = validate_line_count(lines, default=50) 120 | 121 | # Ensure service name has .service suffix if not present 122 | if not service_name.endswith(".service") and "." not in service_name: 123 | service_name = f"{service_name}.service" 124 | 125 | # Run journalctl for the service 126 | returncode, stdout, stderr = await execute_command( 127 | ["journalctl", "-u", service_name, "-n", str(lines), "--no-pager"], 128 | host=host, 129 | ) 130 | 131 | if returncode != 0: 132 | if "not found" in stderr.lower() or "no entries" in stderr.lower(): 133 | return f"No logs found for service '{service_name}'. The service may not exist or has no log entries." 134 | return f"Error getting service logs: {stderr}" 135 | 136 | if not stdout or stdout.strip() == "": 137 | return f"No log entries found for service '{service_name}'." 138 | 139 | result = [f"=== Last {lines} log entries for {service_name} ===\n"] 140 | result.append(stdout) 141 | 142 | return "\n".join(result) 143 | except FileNotFoundError: 144 | return "Error: journalctl command not found. This tool requires systemd." 145 | except Exception as e: 146 | return f"Error getting service logs: {str(e)}" 147 | -------------------------------------------------------------------------------- /tests/tools/test_services.py: -------------------------------------------------------------------------------- 1 | """Tests for service management tools.""" 2 | 3 | import sys 4 | 5 | from unittest.mock import AsyncMock 6 | 7 | import pytest 8 | 9 | from linux_mcp_server.tools import services 10 | 11 | 12 | class TestServices: 13 | """Test service management tools.""" 14 | 15 | @pytest.mark.skipif(sys.platform != "linux", reason="Only passes no Linux") 16 | async def test_list_services_returns_string(self): 17 | """Test that list_services returns a string.""" 18 | result = await services.list_services() 19 | assert isinstance(result, str) 20 | assert len(result) > 0 21 | 22 | @pytest.mark.skipif(sys.platform != "linux", reason="Only passes no Linux") 23 | async def test_list_services_contains_service_info(self): 24 | """Test that list_services contains service information.""" 25 | result = await services.list_services() 26 | 27 | # Should contain service-related keywords 28 | assert "service" in result.lower() or "unit" in result.lower() 29 | # Should show status information 30 | assert "active" in result.lower() or "inactive" in result.lower() or "running" in result.lower() 31 | 32 | @pytest.mark.skipif(sys.platform != "linux", reason="Only passes no Linux") 33 | async def test_get_service_status_with_common_service(self): 34 | """Test getting status of a common service.""" 35 | # Test with a service that should exist on most systems 36 | result = await services.get_service_status("sshd.service") 37 | assert isinstance(result, str) 38 | assert len(result) > 0 39 | # Should contain status information 40 | assert ( 41 | "active" in result.lower() 42 | or "inactive" in result.lower() 43 | or "loaded" in result.lower() 44 | or "not found" in result.lower() 45 | ) 46 | 47 | @pytest.mark.skipif(sys.platform != "linux", reason="Only passes no Linux") 48 | async def test_get_service_status_with_nonexistent_service(self): 49 | """Test getting status of a non-existent service.""" 50 | result = await services.get_service_status("nonexistent-service-xyz123") 51 | assert isinstance(result, str) 52 | assert len(result) > 0 53 | # Should handle gracefully 54 | assert "not found" in result.lower() or "could not" in result.lower() or "error" in result.lower() 55 | 56 | @pytest.mark.skipif(sys.platform != "linux", reason="Only passes no Linux") 57 | async def test_get_service_logs_returns_string(self): 58 | """Test that get_service_logs returns a string.""" 59 | result = await services.get_service_logs("sshd.service", lines=10) 60 | assert isinstance(result, str) 61 | assert len(result) > 0 62 | 63 | @pytest.mark.skipif(sys.platform != "linux", reason="Only passes no Linux") 64 | async def test_get_service_logs_respects_line_limit(self): 65 | """Test that get_service_logs respects the lines parameter.""" 66 | # This is a basic test - we just verify it runs without error 67 | result = await services.get_service_logs("sshd.service", lines=5) 68 | assert isinstance(result, str) 69 | 70 | @pytest.mark.skipif(sys.platform != "linux", reason="Only passes no Linux") 71 | async def test_get_service_logs_with_nonexistent_service(self): 72 | """Test getting logs of a non-existent service.""" 73 | result = await services.get_service_logs("nonexistent-service-xyz123", lines=10) 74 | assert isinstance(result, str) 75 | assert len(result) > 0 76 | # Should handle gracefully 77 | assert "not found" in result.lower() or "no entries" in result.lower() or "error" in result.lower() 78 | 79 | 80 | class TestRemoteServices: 81 | """Test remote service management.""" 82 | 83 | async def test_list_services_remote(self, mocker): 84 | """Test listing services on a remote host.""" 85 | mock_output = "UNIT LOAD ACTIVE SUB DESCRIPTION\nnginx.service loaded active running Nginx server\n" 86 | 87 | mock_exec = AsyncMock() 88 | mock_exec.return_value = (0, mock_output, "") 89 | mocker.patch("linux_mcp_server.tools.services.execute_command", mock_exec) 90 | 91 | result = await services.list_services(host="remote.example.com") 92 | 93 | assert "nginx.service" in result 94 | assert "System Services" in result 95 | mock_exec.assert_called() 96 | 97 | async def test_get_service_status_remote(self, mocker): 98 | """Test getting service status on a remote host.""" 99 | mock_output = "● nginx.service - Nginx HTTP Server\n Loaded: loaded\n Active: active (running)" 100 | 101 | mock_exec = AsyncMock() 102 | mock_exec.return_value = (0, mock_output, "") 103 | mocker.patch("linux_mcp_server.tools.services.execute_command", mock_exec) 104 | 105 | result = await services.get_service_status("nginx", host="remote.example.com") 106 | 107 | assert "nginx.service" in result 108 | assert "active" in result.lower() 109 | mock_exec.assert_called() 110 | 111 | async def test_get_service_logs_remote(self, mocker): 112 | """Test getting service logs on a remote host.""" 113 | mock_output = "Jan 01 12:00:00 host nginx[1234]: Starting Nginx\nJan 01 12:00:01 host nginx[1234]: Started" 114 | 115 | mock_exec = AsyncMock() 116 | mock_exec.return_value = (0, mock_output, "") 117 | mocker.patch("linux_mcp_server.tools.services.execute_command", mock_exec) 118 | 119 | result = await services.get_service_logs("nginx", lines=50, host="remote.example.com") 120 | 121 | assert "nginx" in result.lower() 122 | assert "Starting" in result 123 | mock_exec.assert_called() 124 | -------------------------------------------------------------------------------- /tests/utils/test_validation.py: -------------------------------------------------------------------------------- 1 | """Tests for input validation utilities.""" 2 | 3 | from linux_mcp_server.utils.validation import validate_line_count 4 | from linux_mcp_server.utils.validation import validate_pid 5 | from linux_mcp_server.utils.validation import validate_positive_int 6 | 7 | 8 | class TestValidatePositiveInt: 9 | """Test validate_positive_int function.""" 10 | 11 | def test_valid_integer(self): 12 | """Test with valid integer.""" 13 | result, error = validate_positive_int(5) 14 | assert result == 5 15 | assert error is None 16 | 17 | def test_valid_float_truncates(self): 18 | """Test that float is truncated to integer.""" 19 | result, error = validate_positive_int(5.9) 20 | assert result == 5 21 | assert error is None 22 | 23 | def test_exact_float_truncates(self): 24 | """Test that exact float like 10.0 works.""" 25 | result, error = validate_positive_int(10.0) 26 | assert result == 10 27 | assert error is None 28 | 29 | def test_zero_fails(self): 30 | """Test that zero fails validation (default min is 1).""" 31 | result, error = validate_positive_int(0) 32 | assert result is None 33 | assert error is not None 34 | assert "at least 1" in error.lower() 35 | 36 | def test_negative_fails(self): 37 | """Test that negative numbers fail validation.""" 38 | result, error = validate_positive_int(-5) 39 | assert result is None 40 | assert error is not None 41 | assert "at least" in error.lower() 42 | 43 | def test_string_fails(self): 44 | """Test that string fails validation.""" 45 | result, error = validate_positive_int("123") # pyright: ignore[reportArgumentType] 46 | assert result is None 47 | assert error is not None 48 | assert "must be a number" in error.lower() 49 | 50 | def test_custom_min_value(self): 51 | """Test with custom minimum value.""" 52 | result, error = validate_positive_int(5, min_value=10) 53 | assert result is None 54 | assert error is not None 55 | assert "at least 10" in error.lower() 56 | 57 | def test_max_value_caps(self): 58 | """Test that max_value caps the result.""" 59 | result, error = validate_positive_int(1500, max_value=1000) 60 | assert result == 1000 61 | assert error is None 62 | 63 | def test_custom_param_name_in_error(self): 64 | """Test that custom parameter name appears in error message.""" 65 | result, error = validate_positive_int(-1, param_name="top_n") 66 | assert result is None 67 | assert "top_n" in error # pyright: ignore[reportOperatorIssue] 68 | 69 | def test_min_value_zero(self): 70 | """Test with min_value of 0 (allowing zero).""" 71 | result, error = validate_positive_int(0, min_value=0) 72 | assert result == 0 73 | assert error is None 74 | 75 | 76 | class TestValidatePid: 77 | """Test validate_pid function.""" 78 | 79 | def test_valid_pid(self): 80 | """Test with valid PID.""" 81 | result, error = validate_pid(1234) 82 | assert result == 1234 83 | assert error is None 84 | 85 | def test_float_pid_truncates(self): 86 | """Test that float PID is truncated.""" 87 | result, error = validate_pid(1234.7) 88 | assert result == 1234 89 | assert error is None 90 | 91 | def test_zero_pid_fails(self): 92 | """Test that PID 0 fails.""" 93 | result, error = validate_pid(0) 94 | assert result is None 95 | assert error is not None 96 | assert "pid" in error.lower() 97 | 98 | def test_negative_pid_fails(self): 99 | """Test that negative PID fails.""" 100 | result, error = validate_pid(-1) 101 | assert result is None 102 | assert error is not None 103 | 104 | def test_large_pid(self): 105 | """Test with large PID (no max limit).""" 106 | result, error = validate_pid(99999) 107 | assert result == 99999 108 | assert error is None 109 | 110 | 111 | class TestValidateLineCount: 112 | """Test validate_line_count function.""" 113 | 114 | def test_valid_line_count(self): 115 | """Test with valid line count.""" 116 | result, error = validate_line_count(50) 117 | assert result == 50 118 | assert error is None 119 | 120 | def test_float_line_count_truncates(self): 121 | """Test that float is truncated.""" 122 | result, error = validate_line_count(50.8) 123 | assert result == 50 124 | assert error is None 125 | 126 | def test_zero_returns_default(self): 127 | """Test that zero returns default value.""" 128 | result, error = validate_line_count(0, default=100) 129 | assert result == 100 130 | assert error is not None 131 | assert "lines" in error.lower() 132 | 133 | def test_negative_returns_default(self): 134 | """Test that negative returns default value.""" 135 | result, error = validate_line_count(-10, default=100) 136 | assert result == 100 137 | assert error is not None 138 | 139 | def test_exceeds_max_caps_at_max(self): 140 | """Test that exceeding max_lines caps at maximum.""" 141 | result, error = validate_line_count(50000, max_lines=10000) 142 | assert result == 10000 143 | assert error is None 144 | 145 | def test_custom_default(self): 146 | """Test with custom default value.""" 147 | result, error = validate_line_count(-1, default=200) 148 | assert result == 200 149 | assert error is not None 150 | 151 | def test_custom_max_lines(self): 152 | """Test with custom max_lines.""" 153 | result, error = validate_line_count(5000, max_lines=1000) 154 | assert result == 1000 155 | assert error is None 156 | 157 | def test_at_max_boundary(self): 158 | """Test exactly at max boundary.""" 159 | result, error = validate_line_count(1000, max_lines=1000) 160 | assert result == 1000 161 | assert error is None 162 | -------------------------------------------------------------------------------- /tests/connection/ssh/test_connection_manager.py: -------------------------------------------------------------------------------- 1 | import asyncssh 2 | import pytest 3 | 4 | from linux_mcp_server.connection.ssh import SSHConnectionManager 5 | 6 | 7 | @pytest.fixture 8 | def mock_connection(mocker): 9 | mock_connection = mocker.AsyncMock(asyncssh.SSHClientConnection, name="connection", _username="testuser") 10 | mock_connection.run.return_value = mocker.Mock(exit_status=0, stdout="remote output", stderr="") 11 | mock_connection.is_closed.return_value = False 12 | 13 | return mock_connection 14 | 15 | 16 | @pytest.fixture 17 | def mock_asyncssh_connect(mocker, mock_connection): 18 | mock_connect = mocker.AsyncMock(name="async_connect", return_value=mock_connection) 19 | mocker.patch("asyncssh.connect", mock_connect) 20 | 21 | return mock_connect 22 | 23 | 24 | async def test_connection_manager_singleton(): 25 | """Test that connection manager is a singleton.""" 26 | manager1 = SSHConnectionManager() 27 | manager2 = SSHConnectionManager() 28 | 29 | assert manager1 is manager2 30 | 31 | 32 | async def test_get_connection(mock_connection, mock_asyncssh_connect): 33 | """Test getting a new SSH connection.""" 34 | manager = SSHConnectionManager() 35 | manager._connections.clear() 36 | await manager.get_connection("host1") 37 | await manager.get_connection("host1") # Called twice to verify asyncssh.connect is only be called once 38 | 39 | mock_asyncssh_connect.assert_called_once() 40 | 41 | 42 | async def test_get_connection_user_from_config(mocker, mock_asyncssh_connect): 43 | mocker.patch("linux_mcp_server.connection.ssh.CONFIG.user", "bobo") 44 | 45 | manager = SSHConnectionManager() 46 | manager._connections.clear() 47 | await manager.get_connection("host1") 48 | 49 | assert mock_asyncssh_connect.call_args.kwargs.get("username") == "bobo" 50 | 51 | 52 | async def test_get_connection_different_hosts(mocker, mock_asyncssh_connect): 53 | """Test that different hosts get different connections.""" 54 | 55 | manager = SSHConnectionManager() 56 | manager._connections.clear() 57 | 58 | mock_conn1 = mocker.AsyncMock(asyncssh.SSHClientConnection, return_value=False, _username="testuser") 59 | mock_conn2 = mocker.AsyncMock(asyncssh.SSHClientConnection, return_value=False, _username="testuser") 60 | 61 | async def async_connect(*args, **kwargs): 62 | return mock_conn1 if kwargs.get("host") == "host1" else mock_conn2 63 | 64 | mock_asyncssh_connect.side_effect = async_connect 65 | 66 | conn1 = await manager.get_connection("host1") 67 | conn2 = await manager.get_connection("host2") 68 | 69 | assert conn1 is not conn2 70 | 71 | 72 | async def test_execute_remote_success(mocker, mock_asyncssh_connect, mock_connection): 73 | """Test successful remote command execution.""" 74 | manager = SSHConnectionManager() 75 | manager._connections.clear() 76 | 77 | returncode, stdout, stderr = await manager.execute_remote(["/bin/ls", "-la"], "testhost") 78 | 79 | assert returncode == 0 80 | assert stdout == "remote output" 81 | assert stderr == "" 82 | assert mock_connection.run.call_count == 1 83 | 84 | 85 | async def test_execute_remote_command_failure(mocker, mock_connection): 86 | """Test remote command that returns non-zero exit code.""" 87 | manager = SSHConnectionManager() 88 | manager._connections.clear() 89 | 90 | mock_connection.run.side_effect = asyncssh.Error(1, "Raised intentionally") 91 | mock_get_connection = mocker.AsyncMock(asyncssh.SSHClientConnection, return_value=mock_connection) 92 | mocker.patch.object(manager, "get_connection", mock_get_connection) 93 | 94 | with pytest.raises(ConnectionError, match="Raised intentionally"): 95 | await manager.execute_remote(["/invalid_command"], "testhost") 96 | 97 | 98 | async def test_execute_remote_connection_failure(mocker, mock_asyncssh_connect): 99 | """Test handling of SSH connection failures.""" 100 | manager = SSHConnectionManager() 101 | manager._connections.clear() 102 | 103 | mock_asyncssh_connect.side_effect = asyncssh.DisconnectError(1, "Connection refused") 104 | 105 | with pytest.raises(ConnectionError, match="Failed to connect"): 106 | await manager.execute_remote(["ls"], "unreachable") 107 | 108 | 109 | async def test_execute_remote_authentication_failure(mocker, mock_asyncssh_connect): 110 | """Test handling of authentication failures.""" 111 | manager = SSHConnectionManager() 112 | manager._connections.clear() 113 | 114 | mock_asyncssh_connect.side_effect = asyncssh.PermissionDenied("Auth failed") 115 | 116 | with pytest.raises(ConnectionError, match="Authentication failed"): 117 | await manager.execute_remote(["ls"], "testhost") 118 | 119 | 120 | async def test_execute_remote_uses_discovered_key(mocker, mock_asyncssh_connect): 121 | """Test that remote execution uses discovered SSH key.""" 122 | manager = SSHConnectionManager() 123 | manager._connections.clear() 124 | manager._ssh_key = "/home/user/.ssh/id_ed25519" 125 | 126 | await manager.execute_remote(["ls"], "testhost") 127 | 128 | call_kwargs = mock_asyncssh_connect.call_args.kwargs 129 | 130 | assert call_kwargs.get("client_keys") == ["/home/user/.ssh/id_ed25519"] 131 | 132 | 133 | async def test_close_connections(mocker, mock_asyncssh_connect): 134 | """Test closing all connections.""" 135 | manager = SSHConnectionManager() 136 | manager._connections.clear() 137 | 138 | mock_conn1 = mocker.AsyncMock( 139 | asyncssh.SSHClientConnection, return_value=False, wait_closed=mocker.AsyncMock(), _username="testuser" 140 | ) 141 | mock_conn2 = mocker.AsyncMock( 142 | asyncssh.SSHClientConnection, return_value=False, wait_closed=mocker.AsyncMock(), _username="testuser" 143 | ) 144 | 145 | async def async_connect(*args, **kwargs): 146 | return mock_conn1 if kwargs.get("host") == "host1" else mock_conn2 147 | 148 | mock_asyncssh_connect.side_effect = async_connect 149 | 150 | await manager.get_connection("host1") 151 | await manager.get_connection("host2") 152 | await manager.close_all() 153 | 154 | assert mock_conn1.close.call_count == 1 155 | assert mock_conn2.close.call_count == 1 156 | assert len(manager._connections) == 0 157 | -------------------------------------------------------------------------------- /tests/test_logging_config.py: -------------------------------------------------------------------------------- 1 | """Tests for logging configuration.""" 2 | 3 | import importlib 4 | import json 5 | import logging 6 | 7 | from linux_mcp_server.logging_config import JSONFormatter 8 | from linux_mcp_server.logging_config import setup_logging 9 | from linux_mcp_server.logging_config import StructuredFormatter 10 | 11 | 12 | class TestSetupLogging: 13 | """Test logging setup.""" 14 | 15 | def test_setup_creates_log_files(self, tmp_path, mocker): 16 | """Test that setup creates both text and JSON log files.""" 17 | mocker.patch("linux_mcp_server.logging_config.CONFIG.log_dir", tmp_path) 18 | 19 | setup_logging() 20 | 21 | # Log something 22 | logger = logging.getLogger("test") 23 | logger.info("Test message") 24 | 25 | # Check both log files exist 26 | text_log = tmp_path / "server.log" 27 | json_log = tmp_path / "server.json" 28 | 29 | assert text_log.exists() 30 | assert json_log.exists() 31 | 32 | def test_log_level_from_environment(self, tmp_path, monkeypatch): 33 | """Test that log level can be set from environment variable.""" 34 | # Patch log_dir to avoid creating files in home directory during tests 35 | monkeypatch.setenv("LINUX_MCP_LOG_LEVEL", "DEBUG") 36 | monkeypatch.setenv("LINUX_MCP_LOG_RETENTION_DAYS", "10") 37 | monkeypatch.setenv("LINUX_MCP_LOG_DIR", str(tmp_path)) 38 | 39 | # Reload config module to pick up the environment variables 40 | from linux_mcp_server import config 41 | 42 | importlib.reload(config) 43 | 44 | # Reload logging_config to pick up the new CONFIG 45 | from linux_mcp_server import logging_config 46 | 47 | importlib.reload(logging_config) 48 | 49 | # Import setup_logging again to get the reloaded version 50 | from linux_mcp_server.logging_config import setup_logging 51 | 52 | setup_logging() 53 | 54 | # Root logger should be at DEBUG level 55 | root_logger = logging.getLogger() 56 | assert root_logger.level == logging.DEBUG 57 | 58 | def test_default_log_level_is_info(self, mocker, tmp_path): 59 | """Test that default log level is INFO.""" 60 | mocker.patch("linux_mcp_server.logging_config.CONFIG.log_dir", tmp_path) 61 | mocker.patch("linux_mcp_server.logging_config.CONFIG.log_level", "INFO") 62 | 63 | setup_logging() 64 | 65 | root_logger = logging.getLogger() 66 | assert root_logger.level == logging.INFO 67 | 68 | 69 | class TestStructuredFormatter: 70 | """Test structured log formatter.""" 71 | 72 | def test_format_basic_message(self): 73 | """Test formatting a basic log message.""" 74 | formatter = StructuredFormatter( 75 | "%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S" 76 | ) 77 | record = logging.LogRecord( 78 | name="test_module", 79 | level=logging.INFO, 80 | pathname="", 81 | lineno=0, 82 | msg="Test message", 83 | args=(), 84 | exc_info=None, 85 | ) 86 | 87 | formatted = formatter.format(record) 88 | 89 | # Check format: TIMESTAMP | LEVEL | MODULE | MESSAGE 90 | parts = formatted.split(" | ") 91 | assert len(parts) == 4 92 | assert parts[1] == "INFO" 93 | assert parts[2] == "test_module" 94 | assert parts[3] == "Test message" 95 | 96 | def test_format_with_extra_fields(self): 97 | """Test formatting with extra context fields.""" 98 | formatter = StructuredFormatter( 99 | "%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S" 100 | ) 101 | record = logging.LogRecord( 102 | name="test_module", 103 | level=logging.INFO, 104 | pathname="", 105 | lineno=0, 106 | msg="Test message", 107 | args=(), 108 | exc_info=None, 109 | ) 110 | record.host = "server1.example.com" 111 | record.username = "admin" 112 | 113 | formatted = formatter.format(record) 114 | 115 | assert "host=server1.example.com" in formatted 116 | assert "username=admin" in formatted 117 | 118 | 119 | class TestJSONFormatter: 120 | """Test JSON log formatter.""" 121 | 122 | def test_format_basic_message(self): 123 | """Test formatting a basic log message as JSON.""" 124 | formatter = JSONFormatter(datefmt="%Y-%m-%dT%H:%M:%S") 125 | record = logging.LogRecord( 126 | name="test_module", 127 | level=logging.INFO, 128 | pathname="", 129 | lineno=0, 130 | msg="Test message", 131 | args=(), 132 | exc_info=None, 133 | ) 134 | 135 | formatted = formatter.format(record) 136 | data = json.loads(formatted) 137 | 138 | assert data["level"] == "INFO" 139 | assert data["logger"] == "test_module" 140 | assert data["message"] == "Test message" 141 | assert "timestamp" in data 142 | 143 | def test_format_with_extra_fields(self): 144 | """Test formatting with extra context fields as JSON.""" 145 | formatter = JSONFormatter(datefmt="%Y-%m-%dT%H:%M:%S") 146 | record = logging.LogRecord( 147 | name="test_module", 148 | level=logging.INFO, 149 | pathname="", 150 | lineno=0, 151 | msg="Test message", 152 | args=(), 153 | exc_info=None, 154 | ) 155 | record.host = "server1.example.com" 156 | record.username = "admin" 157 | record.exit_code = 0 158 | 159 | formatted = formatter.format(record) 160 | data = json.loads(formatted) 161 | 162 | assert data["host"] == "server1.example.com" 163 | assert data["username"] == "admin" 164 | assert data["exit_code"] == 0 165 | 166 | def test_format_with_exception(self): 167 | """Test formatting with exception information.""" 168 | import sys 169 | 170 | formatter = JSONFormatter(datefmt="%Y-%m-%dT%H:%M:%S") 171 | try: 172 | raise ValueError("Test error") 173 | except ValueError: 174 | exc_info = sys.exc_info() 175 | record = logging.LogRecord( 176 | name="test_module", 177 | level=logging.ERROR, 178 | pathname="", 179 | lineno=0, 180 | msg="Error occurred", 181 | args=(), 182 | exc_info=exc_info, 183 | ) 184 | 185 | formatted = formatter.format(record) 186 | data = json.loads(formatted) 187 | 188 | assert "exception" in data 189 | assert "ValueError: Test error" in data["exception"] 190 | -------------------------------------------------------------------------------- /tests/tools/test_processes.py: -------------------------------------------------------------------------------- 1 | """Tests for process management tools.""" 2 | 3 | import os 4 | 5 | from unittest.mock import MagicMock 6 | 7 | import pytest 8 | 9 | from linux_mcp_server.tools import processes 10 | from tests.conftest import GLOBAL_IPV6 11 | from tests.conftest import IPV4_ADDR 12 | from tests.conftest import LINK_LOCAL_FILTER_CASES_PROCESS 13 | from tests.conftest import LINK_LOCAL_IPV6 14 | from tests.conftest import make_mixed_connections_process 15 | from tests.conftest import MockAddr 16 | from tests.conftest import MockConnection 17 | 18 | 19 | class TestProcesses: 20 | """Test process management tools.""" 21 | 22 | async def test_list_processes_returns_string(self): 23 | """Test that list_processes returns a string.""" 24 | result = await processes.list_processes() 25 | assert isinstance(result, str) 26 | assert len(result) > 0 27 | 28 | async def test_list_processes_contains_process_info(self): 29 | """Test that list_processes contains process information.""" 30 | result = await processes.list_processes() 31 | 32 | # Should contain process-related keywords 33 | assert "pid" in result.lower() or "process" in result.lower() 34 | # Should contain resource usage info 35 | assert "cpu" in result.lower() or "memory" in result.lower() or "mem" in result.lower() 36 | 37 | async def test_get_process_info_with_current_process(self): 38 | """Test getting info about the current process.""" 39 | current_pid = os.getpid() 40 | result = await processes.get_process_info(current_pid) 41 | assert isinstance(result, str) 42 | assert len(result) > 0 43 | # Should contain the PID 44 | assert str(current_pid) in result 45 | 46 | async def test_get_process_info_with_init_process(self): 47 | """Test getting info about init process (PID 1).""" 48 | result = await processes.get_process_info(1) 49 | assert isinstance(result, str) 50 | assert len(result) > 0 51 | # Should contain process information 52 | assert "1" in result or "systemd" in result.lower() or "init" in result.lower() 53 | 54 | async def test_get_process_info_with_nonexistent_process(self): 55 | """Test getting info about a non-existent process.""" 56 | # Use a very high PID that likely doesn't exist 57 | result = await processes.get_process_info(999999) 58 | assert isinstance(result, str) 59 | assert len(result) > 0 60 | # Should handle gracefully 61 | assert "not found" in result.lower() or "does not exist" in result.lower() or "error" in result.lower() 62 | 63 | async def test_list_processes_with_host(self, mocker): 64 | mocker.patch.object(processes, "execute_command", return_value=(0, "some process", "")) 65 | 66 | result = await processes.list_processes(host="starship.command") 67 | 68 | assert "Running Processes" in result 69 | 70 | 71 | @pytest.fixture 72 | def create_mock_process(): 73 | """Factory fixture to create a mock process with configurable network connections.""" 74 | 75 | def _create(connections): 76 | mock_proc = MagicMock() 77 | mock_proc.name.return_value = "test_process" 78 | mock_proc.exe.return_value = "/usr/bin/test" 79 | mock_proc.cmdline.return_value = ["test", "--arg"] 80 | mock_proc.status.return_value = "running" 81 | mock_proc.username.return_value = "testuser" 82 | mock_proc.pid = 12345 83 | mock_proc.ppid.return_value = 1 84 | mock_proc.cpu_percent.return_value = 1.5 85 | mock_proc.memory_percent.return_value = 2.0 86 | mock_proc.memory_info.return_value = MagicMock(rss=1024 * 1024, vms=2048 * 1024) 87 | mock_proc.create_time.return_value = 1700000000.0 88 | mock_proc.cpu_times.return_value = MagicMock(user=10.0, system=5.0) 89 | mock_proc.num_threads.return_value = 4 90 | mock_proc.num_fds.return_value = 10 91 | mock_proc.net_connections.return_value = connections 92 | return mock_proc 93 | 94 | return _create 95 | 96 | 97 | class TestGetProcessInfoLinkLocalFiltering: 98 | """Test get_process_info filters link-local connections.""" 99 | 100 | @pytest.mark.parametrize("laddr_ip,raddr_ip,expectation", LINK_LOCAL_FILTER_CASES_PROCESS) 101 | async def test_get_process_info_filters_link_local_connections( 102 | self, mocker, create_mock_process, laddr_ip, raddr_ip, expectation 103 | ): 104 | """Test that process connections with link-local addresses are filtered.""" 105 | connections = [ 106 | MockConnection( 107 | type_name="SOCK_STREAM", 108 | laddr=MockAddr(laddr_ip, 8080), 109 | raddr=MockAddr(raddr_ip, 54321), 110 | status="ESTABLISHED", 111 | ), 112 | ] 113 | 114 | mock_proc = create_mock_process(connections) 115 | mocker.patch.object(processes.psutil, "pid_exists", return_value=True) 116 | mocker.patch.object(processes.psutil, "Process", return_value=mock_proc) 117 | 118 | result = await processes.get_process_info(12345) # noqa: F841 - used in eval 119 | 120 | assert eval(expectation) 121 | 122 | async def test_get_process_info_mixed_connections_filtering(self, mocker, create_mock_process): 123 | """Test filtering with mix of link-local and regular connections.""" 124 | mock_proc = create_mock_process(make_mixed_connections_process()) 125 | mocker.patch.object(processes.psutil, "pid_exists", return_value=True) 126 | mocker.patch.object(processes.psutil, "Process", return_value=mock_proc) 127 | 128 | result = await processes.get_process_info(12345) 129 | 130 | # Should only show 1 connection (the IPv4 -> global IPv6 one) 131 | assert "Network Connections (1)" in result 132 | assert f"{IPV4_ADDR}:80" in result 133 | assert f"{GLOBAL_IPV6}:12345" in result 134 | # Link-local addresses should not appear 135 | assert LINK_LOCAL_IPV6 not in result 136 | 137 | async def test_get_process_info_shows_more_than_10_connections_message(self, mocker, create_mock_process): 138 | """Test that 'and X more' message shows correct count after filtering.""" 139 | # Create 15 regular connections (should show "and 5 more") 140 | connections = [ 141 | MockConnection( 142 | type_name="SOCK_STREAM", 143 | laddr=MockAddr(IPV4_ADDR, 8000 + i), 144 | raddr=MockAddr(GLOBAL_IPV6, 50000 + i), 145 | status="ESTABLISHED", 146 | ) 147 | for i in range(15) 148 | ] 149 | # Add 5 link-local connections that should be filtered out 150 | connections.extend( 151 | [ 152 | MockConnection( 153 | type_name="SOCK_STREAM", 154 | laddr=MockAddr(LINK_LOCAL_IPV6, 9000 + i), 155 | raddr=MockAddr(IPV4_ADDR, 60000 + i), 156 | status="ESTABLISHED", 157 | ) 158 | for i in range(5) 159 | ] 160 | ) 161 | 162 | mock_proc = create_mock_process(connections) 163 | mocker.patch.object(processes.psutil, "pid_exists", return_value=True) 164 | mocker.patch.object(processes.psutil, "Process", return_value=mock_proc) 165 | 166 | result = await processes.get_process_info(12345) 167 | 168 | # Should show 15 filtered connections, first 10 displayed, "and 5 more" 169 | assert "Network Connections (15)" in result 170 | assert "... and 5 more" in result 171 | 172 | @pytest.mark.parametrize( 173 | "laddr,raddr,expected_local,expected_remote", 174 | [ 175 | pytest.param(MockAddr(IPV4_ADDR, 80), None, f"{IPV4_ADDR}:80", "N/A", id="no-raddr"), 176 | pytest.param(None, MockAddr(IPV4_ADDR, 80), "N/A", f"{IPV4_ADDR}:80", id="no-laddr"), 177 | ], 178 | ) 179 | async def test_get_process_info_connection_missing_address( 180 | self, mocker, create_mock_process, laddr, raddr, expected_local, expected_remote 181 | ): 182 | """Test connections with missing local or remote address.""" 183 | connections = [ 184 | MockConnection(type_name="SOCK_STREAM", laddr=laddr, raddr=raddr, status="ESTABLISHED"), 185 | ] 186 | 187 | mock_proc = create_mock_process(connections) 188 | mocker.patch.object(processes.psutil, "pid_exists", return_value=True) 189 | mocker.patch.object(processes.psutil, "Process", return_value=mock_proc) 190 | 191 | result = await processes.get_process_info(12345) 192 | 193 | assert "Network Connections (1)" in result 194 | assert expected_local in result 195 | assert expected_remote in result 196 | -------------------------------------------------------------------------------- /src/linux_mcp_server/tools/logs.py: -------------------------------------------------------------------------------- 1 | """Log and audit tools.""" 2 | 3 | import os 4 | import typing as t 5 | 6 | from pathlib import Path 7 | 8 | from mcp.types import ToolAnnotations 9 | from pydantic import Field 10 | 11 | from linux_mcp_server.audit import log_tool_call 12 | from linux_mcp_server.config import CONFIG 13 | from linux_mcp_server.connection.ssh import execute_command 14 | from linux_mcp_server.server import mcp 15 | from linux_mcp_server.utils.decorators import disallow_local_execution_in_containers 16 | from linux_mcp_server.utils.types import Host 17 | from linux_mcp_server.utils.validation import validate_line_count 18 | 19 | 20 | @mcp.tool( 21 | title="Get journal logs", 22 | description="Get systemd journal logs.", 23 | annotations=ToolAnnotations(readOnlyHint=True), 24 | ) 25 | @log_tool_call 26 | @disallow_local_execution_in_containers 27 | async def get_journal_logs( 28 | unit: t.Annotated[ 29 | str | None, Field(description="Filter by systemd unit name or pattern (e.g., 'nginx.service', 'ssh*')") 30 | ] = None, 31 | priority: t.Annotated[ 32 | str | None, 33 | Field( 34 | description="Filter by priority. Possible values: priority level (0-7), syslog level name ('emerg' to 'debug'), or range (e.g., 'err..info')" 35 | ), 36 | ] = None, 37 | since: t.Annotated[ 38 | str | None, 39 | Field( 40 | description="Filter entries since specified time. Date/time filter (format: 'YYYY-MM-DD HH:MM:SS', 'today', 'yesterday', 'now', or relative like '-1h')" 41 | ), 42 | ] = None, 43 | lines: t.Annotated[int, Field(description="Number of log lines to retrieve. Default: 100")] = 100, 44 | host: Host | None = None, 45 | ) -> str: 46 | """ 47 | Get systemd journal logs. 48 | """ 49 | try: 50 | # Validate lines parameter (accepts floats from LLMs) 51 | lines, _ = validate_line_count(lines, default=100) 52 | 53 | cmd = ["journalctl", "-n", str(lines), "--no-pager"] 54 | 55 | if unit: 56 | cmd.extend(["-u", unit]) 57 | 58 | if priority: 59 | cmd.extend(["-p", priority]) 60 | 61 | if since: 62 | cmd.extend(["--since", since]) 63 | 64 | returncode, stdout, stderr = await execute_command(cmd, host=host) 65 | 66 | if returncode != 0: 67 | return f"Error reading journal logs: {stderr}" 68 | 69 | if not stdout or stdout.strip() == "": 70 | return "No journal entries found matching the criteria." 71 | 72 | # Build filter description 73 | filters = [] 74 | if unit: 75 | filters.append(f"unit={unit}") 76 | if priority: 77 | filters.append(f"priority={priority}") 78 | if since: 79 | filters.append(f"since={since}") 80 | 81 | filter_desc = ", ".join(filters) if filters else "no filters" 82 | 83 | result = [f"=== Journal Logs (last {lines} entries, {filter_desc}) ===\n"] 84 | result.append(stdout) 85 | 86 | return "\n".join(result) 87 | except FileNotFoundError: 88 | return "Error: journalctl command not found. This tool requires systemd." 89 | except Exception as e: 90 | return f"Error reading journal logs: {str(e)}" 91 | 92 | 93 | @mcp.tool( 94 | title="Get audit logs", 95 | description="Read the system audit logs. This requires root privileges.", 96 | annotations=ToolAnnotations(readOnlyHint=True), 97 | ) 98 | @log_tool_call 99 | @disallow_local_execution_in_containers 100 | async def get_audit_logs( 101 | lines: t.Annotated[int, Field(description="Number of log lines to retrieve.")] = 100, 102 | host: Host | None = None, 103 | ) -> str: 104 | """ 105 | Get audit logs. 106 | """ 107 | # Validate lines parameter (accepts floats from LLMs) 108 | lines, _ = validate_line_count(lines, default=100) 109 | 110 | audit_log_path = "/var/log/audit/audit.log" 111 | 112 | try: 113 | # For local execution, check if file exists 114 | if not host and not os.path.exists(audit_log_path): 115 | return f"Audit log file not found at {audit_log_path}. Audit logging may not be enabled." 116 | 117 | # Use tail to read last N lines 118 | returncode, stdout, stderr = await execute_command( 119 | ["tail", "-n", str(lines), audit_log_path], 120 | host=host, 121 | ) 122 | 123 | if returncode != 0: 124 | if "Permission denied" in stderr: 125 | return f"Permission denied reading audit logs. This tool requires elevated privileges (root) to read {audit_log_path}." 126 | return f"Error reading audit logs: {stderr}" 127 | 128 | if not stdout or stdout.strip() == "": 129 | return "No audit log entries found." 130 | 131 | result = [f"=== Audit Logs (last {lines} entries) ===\n"] 132 | result.append(stdout) 133 | 134 | return "\n".join(result) 135 | except FileNotFoundError: 136 | return "Error: tail command not found." 137 | except Exception as e: 138 | return f"Error reading audit logs: {str(e)}" 139 | 140 | 141 | @mcp.tool( 142 | title="Read log file", 143 | description="Read a specific log file.", 144 | annotations=ToolAnnotations(readOnlyHint=True), 145 | ) 146 | @log_tool_call 147 | @disallow_local_execution_in_containers 148 | async def read_log_file( # noqa: C901 149 | log_path: t.Annotated[str, Field(description="Path to the log file")], 150 | lines: t.Annotated[int, Field(description="Number of lines to retrieve from the end.")] = 100, 151 | host: Host | None = None, 152 | ) -> str: 153 | """ 154 | Read a specific log file. 155 | """ 156 | try: 157 | # Validate lines parameter (accepts floats from LLMs) 158 | lines, _ = validate_line_count(lines, default=100) 159 | 160 | # Get allowed log paths from environment variable 161 | allowed_paths_env = CONFIG.allowed_log_paths 162 | 163 | if not allowed_paths_env: 164 | return ( 165 | "No log files are allowed. Set LINUX_MCP_ALLOWED_LOG_PATHS environment variable " 166 | "with comma-separated list of allowed log file paths." 167 | ) 168 | 169 | allowed_paths = [p.strip() for p in allowed_paths_env.split(",") if p.strip()] 170 | 171 | # For local execution, validate path 172 | if not host: 173 | try: 174 | requested_path = Path(log_path).resolve() 175 | except Exception: 176 | return f"Invalid log file path: {log_path}" 177 | 178 | # Check if the requested path is in the allowed list 179 | is_allowed = False 180 | for allowed_path in allowed_paths: 181 | try: 182 | allowed_resolved = Path(allowed_path).resolve() 183 | if requested_path == allowed_resolved: 184 | is_allowed = True 185 | break 186 | except Exception: 187 | continue 188 | 189 | if not is_allowed: 190 | return ( 191 | f"Access to log file '{log_path}' is not allowed.\n" 192 | f"Allowed log files: {', '.join(allowed_paths)}" 193 | ) # nofmt 194 | 195 | # Check if file exists 196 | if not requested_path.exists(): 197 | return f"Log file not found: {log_path}" 198 | 199 | if not requested_path.is_file(): 200 | return f"Path is not a file: {log_path}" 201 | 202 | log_path_str = str(requested_path) 203 | else: 204 | # For remote execution, just check against whitelist without resolving 205 | if log_path not in allowed_paths: 206 | return ( 207 | f"Access to log file '{log_path}' is not allowed.\n" 208 | f"Allowed log files: {', '.join(allowed_paths)}" 209 | ) # nofmt 210 | log_path_str = log_path 211 | 212 | # Read the file using tail 213 | returncode, stdout, stderr = await execute_command( 214 | ["tail", "-n", str(lines), log_path_str], 215 | host=host, 216 | ) 217 | 218 | if returncode != 0: 219 | if "Permission denied" in stderr: 220 | return f"Permission denied reading log file: {log_path}" 221 | return f"Error reading log file: {stderr}" 222 | 223 | if not stdout or stdout.strip() == "": 224 | return f"Log file is empty: {log_path}" 225 | 226 | result = [f"=== Log File: {log_path} (last {lines} lines) ===\n"] 227 | result.append(stdout) 228 | 229 | return "\n".join(result) 230 | except FileNotFoundError: 231 | return "Error: tail command not found." 232 | except Exception as e: 233 | return f"Error reading log file: {str(e)}" 234 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | import pytest 4 | 5 | from linux_mcp_server.audit import log_tool_call 6 | 7 | 8 | # ============================================================================= 9 | # Shared test constants for IPv6 link-local filtering tests 10 | # ============================================================================= 11 | LINK_LOCAL_IPV6 = "fe80::1%eth0" 12 | GLOBAL_IPV6 = "2001:db8::1" 13 | IPV4_ADDR = "192.168.1.100" 14 | 15 | # Parameterized test cases for link-local address filtering (process tests) 16 | # Format: (laddr_ip, raddr_ip, expectation) 17 | # Each expectation is a string that will be eval'd with result, laddr_ip in scope 18 | LINK_LOCAL_FILTER_CASES_PROCESS = [ 19 | pytest.param(LINK_LOCAL_IPV6, IPV4_ADDR, '"Network Connections" not in result', id="laddr-link-local"), 20 | pytest.param(IPV4_ADDR, LINK_LOCAL_IPV6, '"Network Connections" not in result', id="raddr-link-local"), 21 | pytest.param(LINK_LOCAL_IPV6, LINK_LOCAL_IPV6, '"Network Connections" not in result', id="both-link-local"), 22 | pytest.param(IPV4_ADDR, GLOBAL_IPV6, '"Network Connections (1)" in result', id="no-link-local-header"), 23 | pytest.param(IPV4_ADDR, GLOBAL_IPV6, 'f"{laddr_ip}:8080" in result', id="no-link-local-addr"), 24 | pytest.param(GLOBAL_IPV6, IPV4_ADDR, '"Network Connections (1)" in result', id="global-ipv6-header"), 25 | pytest.param(GLOBAL_IPV6, IPV4_ADDR, 'f"{laddr_ip}:8080" in result', id="global-ipv6-addr"), 26 | ] 27 | 28 | # Parameterized test cases for link-local address filtering (network tests) 29 | # Format: (laddr_ip, raddr_ip, expectation) 30 | # Each expectation is a string that will be eval'd with output in scope 31 | LINK_LOCAL_FILTER_CASES_NETWORK = [ 32 | pytest.param(LINK_LOCAL_IPV6, IPV4_ADDR, '"filtered 1 link-local" in output', id="laddr-link-local-filtered"), 33 | pytest.param(LINK_LOCAL_IPV6, IPV4_ADDR, '"Total connections: 0" in output', id="laddr-link-local-count"), 34 | pytest.param(IPV4_ADDR, LINK_LOCAL_IPV6, '"filtered 1 link-local" in output', id="raddr-link-local-filtered"), 35 | pytest.param(IPV4_ADDR, LINK_LOCAL_IPV6, '"Total connections: 0" in output', id="raddr-link-local-count"), 36 | pytest.param(LINK_LOCAL_IPV6, LINK_LOCAL_IPV6, '"filtered 1 link-local" in output', id="both-link-local-filtered"), 37 | pytest.param(LINK_LOCAL_IPV6, LINK_LOCAL_IPV6, '"Total connections: 0" in output', id="both-link-local-count"), 38 | pytest.param(IPV4_ADDR, GLOBAL_IPV6, '"filtered" not in output', id="no-link-local-not-filtered"), 39 | pytest.param(IPV4_ADDR, GLOBAL_IPV6, '"Total connections: 1" in output', id="no-link-local-count"), 40 | pytest.param(GLOBAL_IPV6, IPV4_ADDR, '"filtered" not in output', id="global-ipv6-not-filtered"), 41 | pytest.param(GLOBAL_IPV6, IPV4_ADDR, '"Total connections: 1" in output', id="global-ipv6-count"), 42 | ] 43 | 44 | 45 | # ============================================================================= 46 | # Shared mock classes for network/connection testing 47 | # ============================================================================= 48 | class MockAddr: 49 | """Mock address for network connections (ip:port).""" 50 | 51 | def __init__(self, ip: str, port: int): 52 | self.ip = ip 53 | self.port = port 54 | 55 | 56 | class MockConnectionType: 57 | """Mock socket type with name attribute (for process connections).""" 58 | 59 | def __init__(self, name: str): 60 | self.name = name 61 | 62 | 63 | class MockConnection: 64 | """Mock network connection - works for both network tools and process tools. 65 | 66 | For network tools: pass type_ as socket.SOCK_STREAM/SOCK_DGRAM 67 | For process tools: pass type_name as "SOCK_STREAM"/"SOCK_DGRAM" 68 | """ 69 | 70 | def __init__( 71 | self, 72 | type_=None, 73 | type_name: str | None = None, 74 | laddr: MockAddr | None = None, 75 | raddr: MockAddr | None = None, 76 | status: str | None = "ESTABLISHED", 77 | pid: int | None = None, 78 | ): 79 | # Support both network-style (type_) and process-style (type_name) 80 | if type_name is not None: 81 | self.type = MockConnectionType(type_name) 82 | else: 83 | self.type = type_ if type_ is not None else socket.SOCK_STREAM 84 | self.laddr = laddr 85 | self.raddr = raddr 86 | self.status = status 87 | self.pid = pid 88 | 89 | 90 | # ============================================================================= 91 | # Shared test data for mixed link-local filtering scenarios 92 | # ============================================================================= 93 | def make_mixed_connections_network(): 94 | """Create mixed connections for network tool tests (with pid).""" 95 | return [ 96 | # Should be filtered (link-local laddr) 97 | MockConnection( 98 | type_=socket.SOCK_STREAM, 99 | laddr=MockAddr(LINK_LOCAL_IPV6, 8080), 100 | raddr=MockAddr(IPV4_ADDR, 54321), 101 | status="ESTABLISHED", 102 | pid=1234, 103 | ), 104 | # Should be shown (IPv4 -> global IPv6) 105 | MockConnection( 106 | type_=socket.SOCK_STREAM, 107 | laddr=MockAddr(IPV4_ADDR, 80), 108 | raddr=MockAddr(GLOBAL_IPV6, 12345), 109 | status="ESTABLISHED", 110 | pid=5678, 111 | ), 112 | # Should be filtered (link-local raddr) 113 | MockConnection( 114 | type_=socket.SOCK_DGRAM, 115 | laddr=MockAddr(IPV4_ADDR, 53), 116 | raddr=MockAddr(LINK_LOCAL_IPV6, 9999), 117 | status=None, 118 | pid=9999, 119 | ), 120 | ] 121 | 122 | 123 | def make_mixed_listening_ports(): 124 | """Create mixed listening ports for filtering tests (with pid).""" 125 | return [ 126 | # Should be filtered (link-local) 127 | MockConnection( 128 | type_=socket.SOCK_STREAM, 129 | laddr=MockAddr(LINK_LOCAL_IPV6, 8080), 130 | raddr=None, 131 | status="LISTEN", 132 | pid=1234, 133 | ), 134 | # Should be shown (IPv4) 135 | MockConnection( 136 | type_=socket.SOCK_STREAM, 137 | laddr=MockAddr(IPV4_ADDR, 80), 138 | raddr=None, 139 | status="LISTEN", 140 | pid=5678, 141 | ), 142 | # Should be shown (global IPv6) 143 | MockConnection( 144 | type_=socket.SOCK_DGRAM, 145 | laddr=MockAddr(GLOBAL_IPV6, 53), 146 | raddr=None, 147 | status=None, 148 | pid=9999, 149 | ), 150 | ] 151 | 152 | 153 | def make_mixed_connections_process(): 154 | """Create mixed connections for process tool tests (with type_name).""" 155 | return [ 156 | # Should be filtered (link-local laddr) 157 | MockConnection( 158 | type_name="SOCK_STREAM", 159 | laddr=MockAddr(LINK_LOCAL_IPV6, 8080), 160 | raddr=MockAddr(IPV4_ADDR, 54321), 161 | status="ESTABLISHED", 162 | ), 163 | # Should be shown 164 | MockConnection( 165 | type_name="SOCK_STREAM", 166 | laddr=MockAddr(IPV4_ADDR, 80), 167 | raddr=MockAddr(GLOBAL_IPV6, 12345), 168 | status="ESTABLISHED", 169 | ), 170 | # Should be filtered (link-local raddr) 171 | MockConnection( 172 | type_name="SOCK_DGRAM", 173 | laddr=MockAddr(IPV4_ADDR, 53), 174 | raddr=MockAddr(LINK_LOCAL_IPV6, 9999), 175 | status="NONE", 176 | ), 177 | ] 178 | 179 | 180 | @pytest.fixture 181 | def decorated(): 182 | @log_tool_call 183 | def list_services(*args, **kwargs): 184 | return args, kwargs 185 | 186 | return list_services 187 | 188 | 189 | @pytest.fixture 190 | def adecorated(): 191 | @log_tool_call 192 | async def list_services(*args, **kwargs): 193 | return args, kwargs 194 | 195 | return list_services 196 | 197 | 198 | @pytest.fixture 199 | async def decorated_fail(): 200 | @log_tool_call 201 | def list_services(*args, **kwargs): 202 | raise ValueError("Raised intentionally") 203 | 204 | return list_services 205 | 206 | 207 | @pytest.fixture 208 | async def adecorated_fail(): 209 | @log_tool_call 210 | async def list_services(*args, **kwargs): 211 | raise ValueError("Raised intentionally") 212 | 213 | return list_services 214 | 215 | 216 | @pytest.fixture 217 | def mock_execute_command_for(mocker): 218 | """Factory fixture for mocking execute_command in any module. 219 | 220 | Returns a callable that creates mocks for execute_command in the specified module. 221 | Uses autospec=True to verify arguments match the real function signature. 222 | 223 | Usage: 224 | @pytest.fixture 225 | def mock_execute_command(mock_execute_command_for): 226 | return mock_execute_command_for("linux_mcp_server.tools.mymodule") 227 | 228 | async def test_something(mock_execute_command): 229 | mock_execute_command.return_value = (0, "output", "") 230 | # ... test code ... 231 | mock_execute_command.assert_called_once() 232 | """ 233 | 234 | def _mock(module: str): 235 | return mocker.patch( 236 | f"{module}.execute_command", 237 | autospec=True, 238 | ) 239 | 240 | return _mock 241 | -------------------------------------------------------------------------------- /docs/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Linux MCP Server 2 | 3 | Thank you for your interest in contributing! This document provides guidelines for contributing to the Linux MCP Server project. 4 | 5 | ### Prerequisites 6 | 7 | - **Python 3.10 or higher** 8 | - **git** 9 | - **pip** 10 | - **uv** - https://github.com/astral-sh/uv#installation 11 | 12 | ### Method 1: Setup with pip and a virtual environment 13 | 14 | **Step 1: Clone the repository** 15 | 16 | ```bash 17 | git clone https://github.com/rhel-lightspeed/linux-mcp-server.git 18 | cd linux-mcp-server 19 | ``` 20 | 21 | **Step 2: Create and activate virtual environment** 22 | 23 | ```bash 24 | python -m venv .venv 25 | source .venv/bin/activate # On Linux/macOS 26 | # OR 27 | .venv\Scripts\activate # On Windows 28 | ``` 29 | 30 | **Step 3: Install the package in editable mode with dev dependencies** 31 | 32 | ```bash 33 | pip install -e . --group dev 34 | ``` 35 | 36 | **Step 4: Verify the installation** 37 | 38 | ```bash 39 | python -m linux_mcp_server 40 | ``` 41 | 42 | **Step 5: Run the tests** 43 | 44 | ```bash 45 | pytest 46 | ``` 47 | 48 | All tests should pass. 49 | 50 | ### Method 2: Setup with uv 51 | 52 | **Step 1: Clone the repository** 53 | 54 | ```bash 55 | git clone https://github.com/rhel-lightspeed/linux-mcp-server.git 56 | cd linux-mcp-server 57 | ``` 58 | 59 | **Step 2: Create virtual environment and install dev dependencies** 60 | 61 | Note that by default `uv` creates an editable install as well as installs all packages in the `dev` dependency group. 62 | 63 | ```bash 64 | uv sync 65 | ``` 66 | 67 | **Step 3: Verify the installation** 68 | 69 | ```bash 70 | uv run linux-mcp-server 71 | ``` 72 | 73 | **Step 5: Run the tests** 74 | 75 | ```bash 76 | uv run pytest 77 | ``` 78 | 79 | All tests should pass. 80 | 81 | 82 | ## Development Workflow 83 | 84 | We follow Test-Driven Development (TDD) principles: 85 | 86 | ### 1. RED - Write a Failing Test 87 | ```python 88 | # tests/test_new_feature.py 89 | import pytest 90 | from linux_mcp_server.tools import new_module 91 | 92 | async def test_new_feature(): 93 | result = await new_module.new_function() 94 | assert "expected" in result 95 | ``` 96 | 97 | ### 2. GREEN - Implement Minimal Code to Pass 98 | ```python 99 | # src/linux_mcp_server/tools/new_module.py 100 | async def new_function(): 101 | return "expected result" 102 | ``` 103 | 104 | ### 3. REFACTOR - Improve Code Quality 105 | - Improve readability 106 | - Remove duplication 107 | - Ensure all tests still pass 108 | 109 | ### 4. Commit 110 | ```bash 111 | git add . 112 | git commit -m "feat: add new feature 113 | 114 | - Detailed description of what was added 115 | - Tests included 116 | - All tests passing" 117 | ``` 118 | 119 | ## Code Standards 120 | 121 | ### Style Guidelines 122 | - Follow PEP 8 for Python code 123 | - Use type hints for function parameters and return values 124 | - Use async/await for I/O operations 125 | - Maximum line length: 120 characters 126 | 127 | ### Documentation 128 | - Add docstrings to all public functions 129 | - Use clear, descriptive variable names 130 | - Comment complex logic 131 | 132 | ### Testing 133 | - Write tests for all new features 134 | - Maintain project test coverage above 70%, patch test coverage must be 100%. 135 | - Use descriptive test names that explain what is being tested 136 | 137 | ## Adding New Tools 138 | 139 | When adding a new diagnostic tool: 140 | 141 | 1. **Create the tool function in appropriate module:** 142 | ```python 143 | # src/linux_mcp_server/tools/my_tool.py 144 | import typing as t 145 | 146 | @mcp.tool( 147 | title="Useful Tool", 148 | description="Description for LLM to understand the tool.", 149 | annotations=ToolAnnotations(readOnlyHint=True), 150 | ) 151 | @log_tool_call 152 | async def my_tool_name( 153 | param1: str, 154 | ) -> str: 155 | """Documentation string further describing the tool if necessary. 156 | """ 157 | returncode, stdout, _ = await execute_command(["ps", "aux", "--sort=-%cpu"], host=host) 158 | if returncode != 0: 159 | raise ToolError 160 | 161 | return stdout 162 | ``` 163 | 164 | 2. **Write tests:** 165 | ```python 166 | # tests/test_my_tool.py 167 | import pytest 168 | from linux_mcp_server.tools import my_tool 169 | 170 | async def test_my_tool(): 171 | result = await my_tool.my_diagnostic_function() 172 | assert isinstance(result, str) 173 | assert "expected content" in result.lower() 174 | 175 | # Test server integration 176 | async def test_server_has_my_tool(): 177 | from linux_mcp_server.server import mcp 178 | tools = await mcp.list_tools() 179 | tool_names = [t.name for t in tools] 180 | assert "my_tool_name" in tool_names 181 | ``` 182 | 183 | 3. **Update documentation:** 184 | - Add tool description to README.md 185 | - Add usage examples to USAGE.md 186 | 187 | ## Commit Message Format 188 | 189 | We use [Conventional Commits](https://www.conventionalcommits.org/): 190 | 191 | ``` 192 | (): 193 | 194 | 195 | 196 |