├── web_json_editor
├── __init__.py
├── pyproject.toml
├── static
│ └── src
│ │ ├── components
│ │ └── json_editor
│ │ │ └── json_editor.xml
│ │ └── fields
│ │ └── json_field.xml
├── README.md
└── __manifest__.py
├── llm_chroma
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
└── __manifest__.py
├── llm_comfyui
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── tests
│ └── __init__.py
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── comfyui_logo.png
├── data
│ ├── llm_provider.xml
│ ├── llm_prompt_category_data.xml
│ ├── llm_model.xml
│ └── llm_publisher.xml
└── __manifest__.py
├── llm_letta
├── __init__.py
├── pyproject.toml
├── models
│ ├── __init__.py
│ └── mail_message.py
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
├── changelog.rst
├── security
│ └── res_groups.xml
├── data
│ ├── llm_provider.xml
│ └── llm_publisher.xml
└── __manifest__.py
├── llm_mistral
├── __init__.py
├── models
│ ├── __init__.py
│ └── llm_model.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── mistral_logo.png
├── data
│ ├── llm_provider.xml
│ └── llm_publisher.xml
├── changelog.rst
└── __manifest__.py
├── llm_ollama
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── ollama_logo.png
├── changelog.rst
├── __manifest__.py
├── data
│ └── llm_publisher.xml
└── utils
│ └── ollama_tool_call_id_utils.py
├── llm_qdrant
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
└── __manifest__.py
├── llm_store
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── llm_store_architecture.png
├── security
│ └── ir.model.access.csv
├── views
│ └── llm_store_menu_views.xml
├── changelog.rst
└── __manifest__.py
├── llm_tool
├── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── llm_tool_architecture.png
├── tests
│ ├── __init__.py
│ └── common.py
├── models
│ ├── __init__.py
│ ├── llm_model.py
│ ├── llm_tool_record_retriever.py
│ └── llm_tool_record_updater.py
├── security
│ └── ir.model.access.csv
├── views
│ └── llm_menu_views.xml
└── data
│ ├── server_actions.xml
│ └── llm_tool_consent_config_data.xml
├── llm_anthropic
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── anthropic_logo.png
├── data
│ ├── llm_provider.xml
│ └── llm_publisher.xml
└── __manifest__.py
├── llm_comfy_icu
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── tests
│ └── __init__.py
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── comfy_icu_logo.png
├── __manifest__.py
└── data
│ └── llm_publisher.xml
├── llm_generate
├── __init__.py
├── tests
│ └── __init__.py
├── pyproject.toml
├── static
│ ├── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── llm_generate_architecture.png
│ └── src
│ │ ├── components
│ │ ├── message
│ │ │ └── message.scss
│ │ └── llm_media_form
│ │ │ ├── llm_form_fields_view.js
│ │ │ └── llm_media_form.scss
│ │ ├── patches
│ │ └── llm_chat_container_patch.js
│ │ └── templates
│ │ └── llm_chat_container_extension.xml
├── models
│ ├── __init__.py
│ ├── llm_provider.py
│ ├── llm_tool_generate.py
│ └── llm_model.py
├── changelog.rst
├── data
│ └── llm_tool_data.xml
├── views
│ └── llm_model_views.xml
└── migrations
│ └── 16.0.2.0.0
│ └── post-migration.py
├── llm_generate_job
├── __init__.py
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── llm_generate_job_architecture.png
├── models
│ └── __init__.py
├── pyproject.toml
├── security
│ └── ir.model.access.csv
├── views
│ └── llm_generation_menu_views.xml
├── changelog.rst
├── __manifest__.py
└── tests
│ └── test_model_based_queue.py
├── llm_replicate
├── __init__.py
├── models
│ ├── __init__.py
│ └── replicate_model.py
├── pyproject.toml
├── tests
│ └── __init__.py
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ ├── replicate_logo.png
│ │ └── replicate_logo.svg
├── changelog.rst
├── __manifest__.py
├── data
│ └── llm_publisher.xml
└── views
│ └── replicate_model_views.xml
├── llm_tool_demo
├── __init__.py
├── tests
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
├── models
│ └── __init__.py
├── security
│ └── ir.model.access.csv
└── __manifest__.py
├── llm_training
├── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
├── models
│ ├── __init__.py
│ └── llm_provider.py
├── security
│ └── ir.model.access.csv
├── views
│ └── llm_training_menu_views.xml
└── __manifest__.py
├── llm_knowledge_llama
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
└── __manifest__.py
├── llm_knowledge_mistral
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── screenshots
│ │ ├── grocerylist.webp
│ │ ├── mistral_parser.png
│ │ └── mistral_ocr_models.png
├── data
│ └── llm_tool_data.xml
├── __manifest__.py
└── views
│ └── llm_resource_views.xml
├── llm_thread
├── controllers
│ └── __init__.py
├── __init__.py
├── pyproject.toml
├── models
│ ├── __init__.py
│ └── res_users.py
├── static
│ ├── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── llm_thread_architecture.png
│ └── src
│ │ ├── templates
│ │ └── llm_chat_client_action.xml
│ │ ├── components
│ │ ├── llm_related_record
│ │ │ └── llm_related_record.scss
│ │ ├── llm_thread_header
│ │ │ └── llm_thread_header.scss
│ │ └── llm_tool_message
│ │ │ └── llm_tool_message.scss
│ │ ├── patches
│ │ └── message_patch.xml
│ │ └── client_actions
│ │ └── open_chatter_action.js
├── security
│ ├── ir.model.access.csv
│ └── llm_thread_security.xml
└── views
│ └── menu.xml
├── llm_tool_knowledge
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
├── data
│ └── llm_tool_data.xml
├── changelog.rst
└── __manifest__.py
├── llm_tool_ocr_mistral
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ ├── screenshot-ocr-models.png
│ │ ├── screenshot-compare-image.png
│ │ └── screenshot-mistral-provider.png
├── data
│ └── llm_tool_data.xml
└── __manifest__.py
├── llm
├── wizards
│ └── __init__.py
├── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── overview.jpg
├── models
│ ├── __init__.py
│ └── llm_publisher.py
├── security
│ └── ir.model.access.csv
├── views
│ └── llm_menu_views.xml
├── __manifest__.py
├── data
│ └── mail_message_subtype.xml
└── changelog.rst
├── llm_assistant
├── controllers
│ └── __init__.py
├── tests
│ └── __init__.py
├── wizards
│ └── __init__.py
├── __init__.py
├── pyproject.toml
├── static
│ ├── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── llm_assistant_architecture.png
│ └── src
│ │ └── patches
│ │ └── llm_thread_header_patch.js
├── models
│ ├── __init__.py
│ ├── llm_prompt_tag.py
│ └── arguments_schema.py
├── data
│ ├── llm_prompt_tag_data.xml
│ ├── llm_prompt_category_data.xml
│ └── llm_assistant_data.xml
├── security
│ └── ir.model.access.csv
├── views
│ ├── llm_menu_views.xml
│ └── llm_thread_views.xml
├── utils.py
└── changelog.rst
├── llm_knowledge_automation
├── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── screenshots
│ │ ├── automation_triggers.png
│ │ └── model_domain_for_automation.png
└── __manifest__.py
├── llm_assistant_account_invoice
├── __init__.py
├── models
│ ├── __init__.py
│ └── account_move.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ ├── screenshot-ocr-models.png
│ │ ├── screenshot-press-process.png
│ │ ├── screenshot-chatgpt-provider.png
│ │ ├── screenshot-mistral-provider.png
│ │ ├── screenshot-chat-with-assistant.png
│ │ └── screenshot-filled-up-invoice.png
├── views
│ └── account_move_views.xml
└── __manifest__.py
├── llm_mcp_server
├── wizards
│ └── __init__.py
├── controllers
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ ├── client_codex.png
│ │ ├── client_cursor.png
│ │ ├── client_vscode.png
│ │ ├── client_windsurf.png
│ │ ├── client_claude_code.png
│ │ ├── llm_mcp_server_demo.gif
│ │ └── client_claude_desktop.png
├── models
│ ├── __init__.py
│ └── res_users.py
├── __init__.py
├── data
│ └── llm_mcp_server_config.xml
├── security
│ └── ir.model.access.csv
├── views
│ └── res_users_views.xml
└── changelog.rst
├── llm_fal_ai
├── controllers
│ └── __init__.py
├── __init__.py
├── models
│ ├── __init__.py
│ └── llm_model.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── falai_logo.png
├── data
│ ├── llm_provider.xml
│ └── llm_publisher.xml
└── __manifest__.py
├── llm_openai
├── __init__.py
├── utils
│ └── __init__.py
├── models
│ └── __init__.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ ├── openai_logo.png
│ │ └── openai_logo.svg
├── data
│ ├── llm_provider.xml
│ ├── llm_model.xml
│ └── llm_publisher.xml
├── __manifest__.py
└── changelog.rst
├── llm_document_page
├── wizards
│ ├── __init__.py
│ └── upload_resource_wizard_views.xml
├── __init__.py
├── models
│ ├── __init__.py
│ └── llm_resource.py
├── pyproject.toml
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
├── views
│ └── document_page_views.xml
└── __manifest__.py
├── llm_knowledge
├── __init__.py
├── pyproject.toml
├── wizards
│ └── __init__.py
├── static
│ └── description
│ │ ├── icon.png
│ │ ├── banner.jpeg
│ │ └── screenshots
│ │ ├── chunks_view.png
│ │ ├── grocerylist.webp
│ │ ├── mistral_parser.png
│ │ ├── openai_model.png
│ │ ├── qdrant_config.png
│ │ ├── upload_wizard.png
│ │ ├── collection_create.png
│ │ ├── llm_assistant_chat.png
│ │ ├── mistral_ocr_models.png
│ │ ├── processing_pipeline.png
│ │ ├── vector_store_list.png
│ │ ├── claude_desktop_usage.png
│ │ └── resources_list_and_process.png
├── views
│ ├── llm_resource_menu.xml
│ └── menu.xml
├── models
│ ├── __init__.py
│ ├── mail_thread.py
│ └── llm_knowledge_domain.py
├── security
│ └── ir.model.access.csv
├── migrations
│ └── 16.0.1.1.0
│ │ └── post-migration.py
└── __manifest__.py
├── llm_pgvector
├── pyproject.toml
├── models
│ └── __init__.py
├── static
│ └── description
│ │ ├── icon.png
│ │ └── banner.jpeg
├── __init__.py
├── views
│ ├── menu_views.xml
│ └── llm_store_views.xml
├── security
│ └── ir.model.access.csv
└── __manifest__.py
├── requirements.txt
├── .ruff.toml
├── .gitignore
├── run_tests.sh
└── .claude
└── commands
└── fix-app-store-html.md
/web_json_editor/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_chroma/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_comfyui/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_letta/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_mistral/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_ollama/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_qdrant/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_store/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_tool/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_anthropic/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_comfy_icu/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_generate/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_generate_job/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_replicate/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_tool_demo/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_training/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_knowledge_llama/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_knowledge_mistral/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_thread/controllers/__init__.py:
--------------------------------------------------------------------------------
1 | from . import main
2 |
--------------------------------------------------------------------------------
/llm_tool_knowledge/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm/wizards/__init__.py:
--------------------------------------------------------------------------------
1 | from . import fetch_models_wizard
2 |
--------------------------------------------------------------------------------
/llm_assistant/controllers/__init__.py:
--------------------------------------------------------------------------------
1 | from . import main
2 |
--------------------------------------------------------------------------------
/llm_knowledge_automation/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import wizards
3 |
--------------------------------------------------------------------------------
/llm_assistant/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Test module for llm_assistant
2 |
--------------------------------------------------------------------------------
/llm_assistant/wizards/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_prompt_test
2 |
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 |
--------------------------------------------------------------------------------
/llm_chroma/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_store_chroma
2 |
--------------------------------------------------------------------------------
/llm_mcp_server/wizards/__init__.py:
--------------------------------------------------------------------------------
1 | from . import mcp_key_wizard
2 |
--------------------------------------------------------------------------------
/llm_qdrant/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_store_qdrant
2 |
--------------------------------------------------------------------------------
/llm_fal_ai/controllers/__init__.py:
--------------------------------------------------------------------------------
1 | from . import webhook_controller
2 |
--------------------------------------------------------------------------------
/llm_mcp_server/controllers/__init__.py:
--------------------------------------------------------------------------------
1 | from . import mcp_controller
2 |
--------------------------------------------------------------------------------
/llm_openai/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import utils
3 |
--------------------------------------------------------------------------------
/llm_openai/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from . import openai_message_validator
2 |
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import ir_attachment
2 |
--------------------------------------------------------------------------------
/llm_document_page/wizards/__init__.py:
--------------------------------------------------------------------------------
1 | from . import upload_resource_wizard
2 |
--------------------------------------------------------------------------------
/llm_fal_ai/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import controllers
3 |
--------------------------------------------------------------------------------
/llm_fal_ai/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import fal_ai_provider, llm_model
2 |
--------------------------------------------------------------------------------
/llm_knowledge/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import wizards
3 |
--------------------------------------------------------------------------------
/llm_thread/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import controllers
3 |
--------------------------------------------------------------------------------
/llm_tool_demo/tests/__init__.py:
--------------------------------------------------------------------------------
1 | from . import test_decorator_registration
2 |
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import account_move
2 |
--------------------------------------------------------------------------------
/llm_document_page/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import wizards
3 |
--------------------------------------------------------------------------------
/llm_knowledge_mistral/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import mistral_resource_parser
2 |
--------------------------------------------------------------------------------
/llm_knowledge_llama/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_knowledge_llama_chunkers
2 |
--------------------------------------------------------------------------------
/llm_tool_knowledge/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_tool_knowledge_retriever
2 |
--------------------------------------------------------------------------------
/llm_mistral/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import mistral_provider
2 | from . import llm_model
3 |
--------------------------------------------------------------------------------
/llm_comfyui/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import http_client
2 | from . import comfyui_provider
3 |
--------------------------------------------------------------------------------
/llm_ollama/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import mail_message
2 | from . import ollama_provider
3 |
--------------------------------------------------------------------------------
/llm_openai/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import openai_provider
2 | from . import mail_message
3 |
--------------------------------------------------------------------------------
/llm_store/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_store
2 | from . import llm_store_collection
3 |
--------------------------------------------------------------------------------
/llm/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_anthropic/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import anthropic_provider
2 | from . import mail_message
3 |
--------------------------------------------------------------------------------
/llm_comfy_icu/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import http_client
2 | from . import comfy_icu_provider
3 |
--------------------------------------------------------------------------------
/llm_document_page/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import document_page
2 | from . import llm_resource
3 |
--------------------------------------------------------------------------------
/llm_generate/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Test module for llm_generate
2 | from . import test_thread_schema
3 |
--------------------------------------------------------------------------------
/llm_replicate/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import replicate_provider
2 | from . import replicate_model
3 |
--------------------------------------------------------------------------------
/llm_assistant/__init__.py:
--------------------------------------------------------------------------------
1 | from . import models
2 | from . import controllers
3 | from . import wizards
4 |
--------------------------------------------------------------------------------
/llm_chroma/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_comfyui/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_fal_ai/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_generate/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_letta/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_mistral/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_ollama/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_openai/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_pgvector/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_qdrant/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_store/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_thread/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_tool/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_training/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_anthropic/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_assistant/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_comfy_icu/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_comfyui/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Test module for llm_comfyui
2 | from . import test_comfyui_schema_generation
3 |
--------------------------------------------------------------------------------
/llm_document_page/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_knowledge/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_mcp_server/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_replicate/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_tool_demo/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/web_json_editor/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm/static/description/icon.png
--------------------------------------------------------------------------------
/llm_comfy_icu/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Test module for llm_comfy_icu
2 | from . import test_comfy_icu_schema_generation
3 |
--------------------------------------------------------------------------------
/llm_knowledge_llama/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_knowledge_mistral/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_pgvector/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_store_pgvector
2 | from . import llm_knowledge_chunk_embedding
3 |
--------------------------------------------------------------------------------
/llm_replicate/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Test module for llm_replicate
2 | from . import test_replicate_schema_generation
3 |
--------------------------------------------------------------------------------
/llm_thread/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_thread
2 | from . import mail_message
3 | from . import res_users
4 |
--------------------------------------------------------------------------------
/llm_tool_knowledge/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm/static/description/overview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm/static/description/overview.jpg
--------------------------------------------------------------------------------
/llm_knowledge/wizards/__init__.py:
--------------------------------------------------------------------------------
1 | from . import create_rag_resource_wizard
2 | from . import upload_resource_wizard
3 |
--------------------------------------------------------------------------------
/llm_knowledge_automation/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_knowledge_collection
2 | from . import base_automation
3 |
--------------------------------------------------------------------------------
/llm_knowledge_automation/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_letta/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import letta_provider
2 | from . import llm_thread
3 | from . import mail_message
4 |
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["whool"]
3 | build-backend = "whool.buildapi"
4 |
--------------------------------------------------------------------------------
/llm_chroma/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_chroma/static/description/icon.png
--------------------------------------------------------------------------------
/llm_fal_ai/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_fal_ai/static/description/icon.png
--------------------------------------------------------------------------------
/llm_letta/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_letta/static/description/icon.png
--------------------------------------------------------------------------------
/llm_ollama/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_ollama/static/description/icon.png
--------------------------------------------------------------------------------
/llm_openai/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_openai/static/description/icon.png
--------------------------------------------------------------------------------
/llm_qdrant/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_qdrant/static/description/icon.png
--------------------------------------------------------------------------------
/llm_store/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_store/static/description/icon.png
--------------------------------------------------------------------------------
/llm_thread/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_thread/static/description/icon.png
--------------------------------------------------------------------------------
/llm_tool/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool/static/description/icon.png
--------------------------------------------------------------------------------
/llm_comfyui/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_comfyui/static/description/icon.png
--------------------------------------------------------------------------------
/llm_generate/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_generate/static/description/icon.png
--------------------------------------------------------------------------------
/llm_letta/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_letta/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_mistral/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mistral/static/description/icon.png
--------------------------------------------------------------------------------
/llm_pgvector/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_pgvector/static/description/icon.png
--------------------------------------------------------------------------------
/llm_store/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_store/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_tool/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_training/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_training/static/description/icon.png
--------------------------------------------------------------------------------
/llm_anthropic/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_anthropic/static/description/icon.png
--------------------------------------------------------------------------------
/llm_assistant/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant/static/description/icon.png
--------------------------------------------------------------------------------
/llm_chroma/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_chroma/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_comfy_icu/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_comfy_icu/static/description/icon.png
--------------------------------------------------------------------------------
/llm_comfyui/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_comfyui/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_fal_ai/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_fal_ai/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_generate/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_generate/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_knowledge/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/icon.png
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/icon.png
--------------------------------------------------------------------------------
/llm_mistral/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mistral/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_ollama/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_ollama/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_openai/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_openai/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_pgvector/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_pgvector/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_qdrant/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_qdrant/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_replicate/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_replicate/static/description/icon.png
--------------------------------------------------------------------------------
/llm_thread/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_thread/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_tool_demo/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_demo/static/description/icon.png
--------------------------------------------------------------------------------
/llm_training/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_training_dataset
2 | from . import llm_training_job
3 | from . import llm_provider
4 |
--------------------------------------------------------------------------------
/llm_training/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_training/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_anthropic/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_anthropic/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_assistant/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_comfy_icu/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_comfy_icu/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_document_page/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_document_page/static/description/icon.png
--------------------------------------------------------------------------------
/llm_fal_ai/static/description/falai_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_fal_ai/static/description/falai_logo.png
--------------------------------------------------------------------------------
/llm_generate_job/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_generate_job/static/description/icon.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_ollama/static/description/ollama_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_ollama/static/description/ollama_logo.png
--------------------------------------------------------------------------------
/llm_openai/static/description/openai_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_openai/static/description/openai_logo.png
--------------------------------------------------------------------------------
/llm_replicate/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_replicate/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_tool_demo/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_demo/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_model
2 | from . import llm_provider
3 | from . import llm_publisher
4 | from . import mail_message
5 |
--------------------------------------------------------------------------------
/llm_comfyui/static/description/comfyui_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_comfyui/static/description/comfyui_logo.png
--------------------------------------------------------------------------------
/llm_document_page/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_document_page/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_generate_job/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_generate_job/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_knowledge_llama/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_llama/static/description/icon.png
--------------------------------------------------------------------------------
/llm_mistral/static/description/mistral_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mistral/static/description/mistral_logo.png
--------------------------------------------------------------------------------
/llm_tool/tests/__init__.py:
--------------------------------------------------------------------------------
1 | from . import test_llm_tool_concurrency
2 | from . import test_llm_tool_core
3 | from . import test_llm_tool_schema
4 |
--------------------------------------------------------------------------------
/llm_tool_knowledge/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_knowledge/static/description/icon.png
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_ocr_mistral/static/description/icon.png
--------------------------------------------------------------------------------
/llm_knowledge_llama/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_llama/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_knowledge_mistral/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_mistral/static/description/icon.png
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/client_codex.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/client_codex.png
--------------------------------------------------------------------------------
/llm_tool_knowledge/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_knowledge/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_anthropic/static/description/anthropic_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_anthropic/static/description/anthropic_logo.png
--------------------------------------------------------------------------------
/llm_comfy_icu/static/description/comfy_icu_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_comfy_icu/static/description/comfy_icu_logo.png
--------------------------------------------------------------------------------
/llm_knowledge_automation/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_automation/static/description/icon.png
--------------------------------------------------------------------------------
/llm_knowledge_mistral/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_mistral/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/client_cursor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/client_cursor.png
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/client_vscode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/client_vscode.png
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/client_windsurf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/client_windsurf.png
--------------------------------------------------------------------------------
/llm_replicate/static/description/replicate_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_replicate/static/description/replicate_logo.png
--------------------------------------------------------------------------------
/llm_tool/static/description/llm_tool_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool/static/description/llm_tool_architecture.png
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_ocr_mistral/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_knowledge_automation/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_automation/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_mcp_server/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_mcp_server_config
2 | from . import llm_mcp_session
3 | from . import llm_tool
4 | from . import res_users
5 |
--------------------------------------------------------------------------------
/llm_store/static/description/llm_store_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_store/static/description/llm_store_architecture.png
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/static/description/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant_account_invoice/static/description/icon.png
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/client_claude_code.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/client_claude_code.png
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/llm_mcp_server_demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/llm_mcp_server_demo.gif
--------------------------------------------------------------------------------
/llm_thread/static/description/llm_thread_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_thread/static/description/llm_thread_architecture.png
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/static/description/banner.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant_account_invoice/static/description/banner.jpeg
--------------------------------------------------------------------------------
/llm_generate_job/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_generation_job
2 | from . import llm_generation_queue
3 | from . import llm_provider
4 | from . import llm_thread
5 |
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/chunks_view.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/chunks_view.png
--------------------------------------------------------------------------------
/llm_mcp_server/static/description/client_claude_desktop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_mcp_server/static/description/client_claude_desktop.png
--------------------------------------------------------------------------------
/llm_assistant/static/description/llm_assistant_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant/static/description/llm_assistant_architecture.png
--------------------------------------------------------------------------------
/llm_generate/static/description/llm_generate_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_generate/static/description/llm_generate_architecture.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/grocerylist.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/grocerylist.webp
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/mistral_parser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/mistral_parser.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/openai_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/openai_model.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/qdrant_config.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/qdrant_config.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/upload_wizard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/upload_wizard.png
--------------------------------------------------------------------------------
/llm_mcp_server/__init__.py:
--------------------------------------------------------------------------------
1 | from . import mcp_json_dispatcher # Register MCP JSON dispatcher
2 | from . import controllers
3 | from . import models
4 | from . import wizards
5 |
--------------------------------------------------------------------------------
/llm_tool_demo/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import crm_lead
2 | from . import sale_order
3 | from . import res_users
4 | from . import ir_model
5 | from . import utility_tools
6 |
--------------------------------------------------------------------------------
/llm_generate/static/src/components/message/message.scss:
--------------------------------------------------------------------------------
1 | .o_llm_media_gen_params {
2 | .border-start {
3 | border-left: 2px solid rgba(0, 0, 0, 0.1) !important;
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/static/description/screenshot-ocr-models.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_ocr_mistral/static/description/screenshot-ocr-models.png
--------------------------------------------------------------------------------
/llm_generate/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_model
2 | from . import llm_provider
3 | from . import llm_thread
4 | from . import llm_tool_generate
5 | from . import mail_message
6 |
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/collection_create.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/collection_create.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/llm_assistant_chat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/llm_assistant_chat.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/mistral_ocr_models.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/mistral_ocr_models.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/processing_pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/processing_pipeline.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/vector_store_list.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/vector_store_list.png
--------------------------------------------------------------------------------
/llm_knowledge_mistral/data/llm_tool_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/static/description/screenshot-compare-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_ocr_mistral/static/description/screenshot-compare-image.png
--------------------------------------------------------------------------------
/llm_generate_job/static/description/llm_generate_job_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_generate_job/static/description/llm_generate_job_architecture.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/claude_desktop_usage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/claude_desktop_usage.png
--------------------------------------------------------------------------------
/llm_knowledge_mistral/static/description/screenshots/grocerylist.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_mistral/static/description/screenshots/grocerylist.webp
--------------------------------------------------------------------------------
/llm_knowledge_mistral/static/description/screenshots/mistral_parser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_mistral/static/description/screenshots/mistral_parser.png
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/static/description/screenshot-mistral-provider.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_tool_ocr_mistral/static/description/screenshot-mistral-provider.png
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/static/description/screenshot-ocr-models.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant_account_invoice/static/description/screenshot-ocr-models.png
--------------------------------------------------------------------------------
/llm_knowledge/static/description/screenshots/resources_list_and_process.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge/static/description/screenshots/resources_list_and_process.png
--------------------------------------------------------------------------------
/llm_knowledge_mistral/static/description/screenshots/mistral_ocr_models.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_mistral/static/description/screenshots/mistral_ocr_models.png
--------------------------------------------------------------------------------
/llm_pgvector/__init__.py:
--------------------------------------------------------------------------------
1 | from . import fields
2 | from . import models
3 | from .init_hook import pre_init_hook
4 |
5 | # Export the main classes for easier imports
6 | from .fields import PgVector
7 |
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/static/description/screenshot-press-process.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant_account_invoice/static/description/screenshot-press-process.png
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/static/description/screenshot-chatgpt-provider.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant_account_invoice/static/description/screenshot-chatgpt-provider.png
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/static/description/screenshot-mistral-provider.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant_account_invoice/static/description/screenshot-mistral-provider.png
--------------------------------------------------------------------------------
/llm_knowledge_automation/static/description/screenshots/automation_triggers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_automation/static/description/screenshots/automation_triggers.png
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/static/description/screenshot-chat-with-assistant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant_account_invoice/static/description/screenshot-chat-with-assistant.png
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/static/description/screenshot-filled-up-invoice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_assistant_account_invoice/static/description/screenshot-filled-up-invoice.png
--------------------------------------------------------------------------------
/llm_knowledge_automation/static/description/screenshots/model_domain_for_automation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/apexive/odoo-llm/HEAD/llm_knowledge_automation/static/description/screenshots/model_domain_for_automation.png
--------------------------------------------------------------------------------
/llm_tool_demo/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_utility_tools,access_llm_utility_tools,model_llm_utility_tools,base.group_user,1,0,0,0
3 |
--------------------------------------------------------------------------------
/llm_assistant/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_thread
2 | from . import llm_assistant
3 | from . import llm_assistant_action_mixin
4 | from . import llm_prompt
5 | from . import llm_prompt_tag
6 | from . import llm_prompt_category
7 |
--------------------------------------------------------------------------------
/llm_letta/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.0.1 (2025-12-01)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [FIX] Added letta_normalize_prepend_messages() method to fix dispatch error
5 |
6 | 18.0.1.0.0
7 | ~~~~~~~~~~
8 |
9 | * Initial release with Letta agent integration
10 |
--------------------------------------------------------------------------------
/llm_store/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_store_user,llm.store.user,model_llm_store,base.group_user,1,0,0,0
3 | access_llm_store_manager,llm.store.manager,model_llm_store,llm.group_llm_manager,1,1,1,1
4 |
--------------------------------------------------------------------------------
/llm_thread/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_thread_user,llm.thread.user,model_llm_thread,base.group_user,1,1,1,0
3 | access_llm_thread_manager,llm.thread.manager,model_llm_thread,llm.group_llm_manager,1,1,1,1
4 |
--------------------------------------------------------------------------------
/llm_knowledge/views/llm_resource_menu.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
12 |
--------------------------------------------------------------------------------
/llm_thread/static/src/templates/llm_chat_client_action.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/llm_pgvector/views/menu_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
10 |
11 |
--------------------------------------------------------------------------------
/llm_store/views/llm_store_menu_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
12 |
--------------------------------------------------------------------------------
/llm_replicate/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.1.1 (2025-10-23)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [MIGRATION] Migrated to Odoo 18.0
5 |
6 | 16.0.1.1.0 (2025-03-06)
7 | ~~~~~~~~~~~~~~~~~~~~~~~
8 |
9 | * [ADD] Updated chat method to accept additional params
10 |
11 | 16.0.1.0.0 (2025-01-02)
12 | ~~~~~~~~~~~~~~~~~~~~~~~
13 |
14 | * [INIT] Initial release of the module
15 |
--------------------------------------------------------------------------------
/llm_letta/security/res_groups.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 365.0
8 |
9 |
10 |
--------------------------------------------------------------------------------
/llm_mistral/models/llm_model.py:
--------------------------------------------------------------------------------
1 | from odoo import api, models
2 |
3 |
4 | class LLMModel(models.Model):
5 | _inherit = "llm.model"
6 |
7 | @api.model
8 | def _get_available_model_usages(self):
9 | available_model_usages = super()._get_available_model_usages()
10 | return available_model_usages + [
11 | ("ocr", "OCR"),
12 | ]
13 |
--------------------------------------------------------------------------------
/llm_store/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.0.0 (2025-10-23)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [MIGRATION] Migrated to Odoo 18.0
5 | * [IMP] Updated views and dependencies for compatibility
6 |
7 | 16.0.1.0.0
8 | ~~~~~~~~~~
9 |
10 | * [INIT] Initial release
11 | * [ADD] Vector store abstraction layer
12 | * [ADD] Collection management
13 | * [ADD] Support for ChromaDB, pgvector, and Qdrant
14 |
--------------------------------------------------------------------------------
/llm_fal_ai/data/llm_provider.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Fal ai
5 | fal_ai
6 |
7 | fal-ai/flux/dev
8 |
9 |
10 |
--------------------------------------------------------------------------------
/llm_knowledge/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import mail_thread
2 | from . import llm_resource
3 | from . import llm_resource_retriever
4 | from . import llm_resource_parser
5 | from . import llm_resource_http
6 | from . import ir_attachment
7 | from . import llm_resource_chunker
8 | from . import llm_knowledge_chunk
9 | from . import llm_knowledge_collection
10 | from . import llm_knowledge_domain
11 |
--------------------------------------------------------------------------------
/llm_pgvector/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_knowledge_chunk_embedding_user,llm.knowledge.chunk.embedding.user,model_llm_knowledge_chunk_embedding,base.group_user,1,0,0,0
3 | access_llm_knowledge_chunk_embedding_manager,llm.knowledge.chunk.embedding.manager,model_llm_knowledge_chunk_embedding,llm.group_llm_manager,1,1,1,1
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # generated from manifests external_dependencies
2 | PyMuPDF
3 | anthropic
4 | chromadb-client
5 | emoji
6 | fal_client
7 | jinja2
8 | jsonref
9 | jsonschema
10 | letta-client
11 | llama_index
12 | markdown2
13 | markdownify
14 | mcp
15 | mistralai
16 | nltk
17 | numpy
18 | ollama
19 | openai
20 | pgvector
21 | pydantic>=2.0.0
22 | pyyaml
23 | qdrant-client
24 | replicate
25 | requests
26 |
--------------------------------------------------------------------------------
/llm_anthropic/data/llm_provider.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Anthropic
5 | anthropic
6 |
7 | https://api.anthropic.com
8 |
9 |
10 |
--------------------------------------------------------------------------------
/llm_comfyui/data/llm_provider.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ComfyUI
5 | comfyui
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/web_json_editor/static/src/components/json_editor/json_editor.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/llm_tool/models/__init__.py:
--------------------------------------------------------------------------------
1 | from . import llm_tool
2 | from . import llm_tool_record_retriever
3 | from . import llm_provider
4 | from . import llm_model
5 | from . import llm_tool_consent_config
6 | from . import llm_tool_record_updater
7 | from . import llm_tool_record_creator
8 | from . import llm_tool_record_unlinker
9 | from . import llm_tool_model_inspector
10 | from . import llm_tool_model_method_executor
11 | from . import mail_message
12 |
--------------------------------------------------------------------------------
/llm_letta/data/llm_provider.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Letta
5 | letta
6 |
7 | http://localhost:8283
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/llm_generate_job/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "llm_generate_job"
7 | version = "16.0.1.0.0"
8 | description = "Generation Job Management and Queue System for LLM Providers"
9 | authors = [
10 | {name = "Apexive", email = "info@apexive.com"},
11 | ]
12 | license = {text = "LGPL-3"}
13 | dependencies = [
14 | "odoo>=16.0,<16.1",
15 | ]
16 |
--------------------------------------------------------------------------------
/llm_thread/views/menu.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
18 |
19 |
--------------------------------------------------------------------------------
/llm_mistral/data/llm_provider.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Mistral AI
5 | mistral
6 |
7 | https://api.mistral.ai/v1
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/llm_openai/data/llm_provider.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | OpenAI
5 | openai
6 |
7 | https://api.openai.com/v1
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/llm_comfyui/data/llm_prompt_category_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ComfyUI Workflow
6 | comfyui_workflow
7 | 10
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/llm_openai/data/llm_model.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | gpt-4o
5 |
6 |
7 | chat
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/llm_tool/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_tool_user,llm.tool.user,model_llm_tool,base.group_user,1,0,0,0
3 | access_llm_tool_system,llm.tool.system,model_llm_tool,base.group_system,1,1,1,1
4 | access_llm_tool_consent_config_user,llm.tool.consent.config.user,model_llm_tool_consent_config,base.group_user,1,0,0,0
5 | access_llm_tool_consent_config_system,llm.tool.consent.config.system,model_llm_tool_consent_config,base.group_system,1,1,1,1
6 |
--------------------------------------------------------------------------------
/llm_tool/models/llm_model.py:
--------------------------------------------------------------------------------
1 | from odoo import models
2 |
3 |
4 | class LLMModel(models.Model):
5 | _inherit = "llm.model"
6 |
7 | def chat(self, messages, stream=False, tools=None, tool_choice="auto", **kwargs):
8 | """Send chat messages using this model"""
9 | return self.provider_id.chat(
10 | messages,
11 | model=self,
12 | stream=stream,
13 | tools=tools,
14 | tool_choice=tool_choice,
15 | **kwargs,
16 | )
17 |
--------------------------------------------------------------------------------
/llm_comfyui/data/llm_model.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Default ComfyUI Model
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/llm_replicate/static/description/replicate_logo.svg:
--------------------------------------------------------------------------------
1 |
8 |
--------------------------------------------------------------------------------
/web_json_editor/static/src/fields/json_field.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/llm_tool/views/llm_menu_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
12 |
13 |
20 |
21 |
--------------------------------------------------------------------------------
/llm_mistral/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.0.1 (2025-11-17)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [FIX] Removed wizard override - model fetching now handled by base provider
5 | * [IMP] Reordered OCR capability detection to check string patterns before API capabilities
6 | * [ADD] Added _determine_model_use() override for OCR model classification
7 | * [REM] Removed wizards directory and related imports
8 |
9 | 18.0.1.0.0 (2025-10-23)
10 | ~~~~~~~~~~~~~~~~~~~~~~~
11 |
12 | * [MIGRATION] Migrated to Odoo 18.0
13 | * [IMP] Updated views and manifest for compatibility
14 |
--------------------------------------------------------------------------------
/llm_training/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_training_dataset_user,llm.training.dataset.user,model_llm_training_dataset,base.group_user,1,0,0,0
3 | access_llm_training_dataset_manager,llm.training.dataset.manager,model_llm_training_dataset,llm.group_llm_manager,1,1,1,1
4 | access_llm_training_job_user,llm.training.job.user,model_llm_training_job,base.group_user,1,0,0,0
5 | access_llm_training_job_manager,llm.training.job.manager,model_llm_training_job,llm.group_llm_manager,1,1,1,1
6 |
--------------------------------------------------------------------------------
/llm_assistant/models/llm_prompt_tag.py:
--------------------------------------------------------------------------------
1 | from random import randint
2 |
3 | from odoo import fields, models
4 |
5 |
6 | class LLMPromptTag(models.Model):
7 | _name = "llm.prompt.tag"
8 | _description = "LLM Prompt Tag"
9 |
10 | def _get_default_color(self):
11 | return randint(1, 11)
12 |
13 | name = fields.Char("Tag Name", required=True, translate=True)
14 | color = fields.Integer("Color", default=_get_default_color)
15 |
16 | _sql_constraints = [
17 | ("name_uniq", "unique (name)", "Tag name already exists!"),
18 | ]
19 |
--------------------------------------------------------------------------------
/llm_generate_job/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_generation_job_user,llm.generation.job.user,model_llm_generation_job,base.group_user,1,1,1,1
3 | access_llm_generation_job_manager,llm.generation.job.manager,model_llm_generation_job,llm.group_llm_manager,1,1,1,1
4 | access_llm_generation_queue_user,llm.generation.queue.user,model_llm_generation_queue,base.group_user,1,0,0,0
5 | access_llm_generation_queue_manager,llm.generation.queue.manager,model_llm_generation_queue,llm.group_llm_manager,1,1,1,1
6 |
--------------------------------------------------------------------------------
/llm_generate/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.2.0.0 (2025-10-23)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [MIGRATION] Migrated to Odoo 18.0
5 | * [IMP] Updated views and components for compatibility
6 |
7 | 16.0.2.0.0
8 | ~~~~~~~~~~
9 |
10 | * [FIX] Fixed async loading issues in media form components
11 | * [IMP] Improved schema computation consistency
12 | * [IMP] Enhanced loading state management
13 | * [IMP] Better error handling and recovery
14 | * [ADD] Schema source transparency and indicators
15 | * [IMP] Enhanced form validation
16 | * [IMP] Improved streaming generation
17 | * [IMP] Better queue management
18 |
--------------------------------------------------------------------------------
/web_json_editor/README.md:
--------------------------------------------------------------------------------
1 | # Web JSON Editor
2 |
3 | A JSON Editor widget for Odoo form views.
4 |
5 | ## Features
6 |
7 | - Syntax highlighting for JSON data
8 | - Schema-based validation and autocomplete
9 | - Multiple view modes (code, view)
10 | - Search functionality
11 | - Undo/redo history
12 |
13 | ## Usage
14 |
15 | ```xml
16 |
17 | ```
18 |
19 | ## OCA Contribution
20 |
21 | This module has been submitted to the Odoo Community Association (OCA):
22 | - **PR:** https://github.com/OCA/web/pull/3380
23 | - **OCA Module:** `web_widget_json_editor`
24 |
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/models/account_move.py:
--------------------------------------------------------------------------------
1 | from odoo import models
2 |
3 |
4 | class AccountMove(models.Model):
5 | _name = "account.move"
6 | _inherit = ["account.move", "llm.assistant.action.mixin"]
7 |
8 | def action_process_with_ai(self):
9 | """
10 | Parse invoice with AI assistant.
11 | Creates a fresh thread every time (no context carryover).
12 | Frontend opens AI chat for OCR parsing and follow-up questions.
13 | """
14 | return self.action_open_llm_assistant(
15 | "odoo_invoice_data_entry_assistant", force_new_thread=True
16 | )
17 |
--------------------------------------------------------------------------------
/llm_tool/data/server_actions.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Reset Input Schema
6 |
7 |
8 | list
9 | code
10 |
11 | action = records.action_reset_input_schema()
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/llm_pgvector/views/llm_store_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm.store.form.inherit.pgvector
5 | llm.store
6 |
7 |
8 |
9 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/llm_thread/models/res_users.py:
--------------------------------------------------------------------------------
1 | from odoo import models
2 |
3 |
4 | class ResUsers(models.Model):
5 | _inherit = "res.users"
6 |
7 | def _init_messaging(self, store):
8 | """Extend init_messaging to include LLM threads following Odoo patterns."""
9 | super()._init_messaging(store)
10 |
11 | # Load user's recent LLM threads (similar to how discuss.channel works)
12 | llm_threads = self.env["llm.thread"].search(
13 | [("user_id", "=", self.id), ("active", "=", True)], order="write_date DESC"
14 | )
15 |
16 | # Use inherited _thread_to_store method from mail.thread
17 | if llm_threads:
18 | llm_threads._thread_to_store(store)
19 |
--------------------------------------------------------------------------------
/llm_document_page/views/document_page_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | document.page.form.inherit.llm
5 | document.page
6 |
7 |
8 |
9 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/llm_generate/static/src/components/llm_media_form/llm_form_fields_view.js:
--------------------------------------------------------------------------------
1 | /** @odoo-module **/
2 |
3 | const { Component } = owl;
4 |
5 | export class LLMFormFieldsView extends Component {
6 | static template = "llm_thread.LLMFormFieldsView";
7 | // Pass relevant parts of LLMMediaForm's state
8 | static props = {
9 | state: { type: Object, optional: false },
10 | inputSchema: { type: Object, optional: true },
11 | formFields: { type: Array, optional: false },
12 | requiredFields: { type: Array, optional: false },
13 | optionalFields: { type: Array, optional: false },
14 | onInputChange: { type: Function, optional: false },
15 | toggleAdvancedSettings: { type: Function, optional: false },
16 | };
17 | }
18 |
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/data/llm_tool_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm_tool_ocr_mistral
5 | function
6 | ir.attachment
7 | llm_tool_ocr_mistral
8 | Extract text and structured data from PDF or image attachments using Mistral OCR vision model
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/llm_mcp_server/data/llm_mcp_server_config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | odoo_llm_mcp_server
6 | 1.0.0
7 | 2025-06-18
8 |
9 |
10 |
11 |
12 | stateful
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/llm_training/views/llm_training_menu_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
12 |
19 |
20 |
21 |
28 |
29 |
--------------------------------------------------------------------------------
/.ruff.toml:
--------------------------------------------------------------------------------
1 |
2 | target-version = "py39"
3 | fix = true
4 |
5 | [lint]
6 | extend-select = [
7 | "B",
8 | "C90",
9 | "E501", # line too long (default 88)
10 | "I", # isort
11 | "UP", # pyupgrade
12 | ]
13 | exclude = ["setup/*"]
14 | ignore = ["E501"]
15 |
16 | [format]
17 | exclude = ["setup/*"]
18 |
19 | [lint.per-file-ignores]
20 | "__init__.py" = ["F401", "I001"] # ignore unused and unsorted imports in __init__.py
21 | "__manifest__.py" = ["B018"] # useless expression
22 |
23 | [lint.isort]
24 | section-order = ["future", "standard-library", "third-party", "odoo", "odoo-addons", "first-party", "local-folder"]
25 |
26 | [lint.isort.sections]
27 | "odoo" = ["odoo"]
28 | "odoo-addons" = ["odoo.addons"]
29 |
30 | [lint.mccabe]
31 | max-complexity = 16
32 |
--------------------------------------------------------------------------------
/llm_ollama/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.2.0 (2025-11-28)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [ADD] Added ollama_normalize_prepend_messages() to convert OpenAI list format to plain strings
5 | * [FIX] Fixed Ollama compatibility with system prompts that use OpenAI's list content format
6 | * [IMP] Changed ollama_chat to use generic format_messages() and format_tools() dispatch methods
7 | * [IMP] Improved consistency with base provider dispatch pattern
8 |
9 | 18.0.1.1.0 (2025-10-23)
10 | ~~~~~~~~~~~~~~~~~~~~~~~
11 |
12 | * [MIGRATION] Migrated to Odoo 18.0
13 |
14 | 16.0.1.1.0 (2025-03-06)
15 | ~~~~~~~~~~~~~~~~~~~~~~~
16 |
17 | * [ADD] Updated chat method to accept additional params
18 |
19 | 16.0.1.0.0 (2025-01-02)
20 | ~~~~~~~~~~~~~~~~~~~~~~~
21 |
22 | * [INIT] Initial release of the module
23 |
--------------------------------------------------------------------------------
/llm_thread/security/llm_thread_security.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Personal Chat Threads
5 |
6 | [('user_id', '=', user.id)]
7 |
8 |
9 |
10 |
11 | All Chat Threads
12 |
13 | [(1, '=', 1)]
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/llm_qdrant/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Qdrant Integration",
3 | "version": "18.0.1.0.0",
4 | "category": "Technical",
5 | "summary": "Integrates Qdrant vector store with the Odoo LLM framework.",
6 | "description": """
7 | Provides an llm.store implementation using the Qdrant vector database.
8 | Requires the qdrant-client Python package.
9 | """,
10 | "author": "Apexive Solutions LLC",
11 | "website": "https://github.com/apexive/odoo-llm",
12 | "depends": ["llm_knowledge", "llm_store"],
13 | "external_dependencies": {
14 | "python": ["qdrant-client"],
15 | },
16 | "images": ["static/description/banner.jpeg"],
17 | "installable": True,
18 | "application": False,
19 | "auto_install": False,
20 | "license": "LGPL-3",
21 | }
22 |
--------------------------------------------------------------------------------
/llm_ollama/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Ollama LLM Integration",
3 | "summary": "Ollama provider integration for LLM module",
4 | "description": """
5 | Implements Ollama provider service for the LLM integration module.
6 | Supports local deployment of various open source models.
7 | """,
8 | "author": "Apexive Solutions LLC",
9 | "website": "https://github.com/apexive/odoo-llm",
10 | "category": "Technical",
11 | "version": "18.0.1.2.0",
12 | "depends": ["llm", "llm_tool"],
13 | "external_dependencies": {
14 | "python": ["ollama"],
15 | },
16 | "data": [
17 | "data/llm_publisher.xml",
18 | ],
19 | "images": [
20 | "static/description/banner.jpeg",
21 | ],
22 | "license": "LGPL-3",
23 | "installable": True,
24 | }
25 |
--------------------------------------------------------------------------------
/llm_generate_job/views/llm_generation_menu_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
12 |
19 |
20 |
21 |
28 |
29 |
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/views/account_move_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | account.move.form.process.ai
6 | account.move
7 |
8 |
9 |
10 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/llm_mcp_server/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_mcp_server_config_manager,llm.mcp.server.config manager,model_llm_mcp_server_config,llm.group_llm_manager,1,1,1,1
3 | access_llm_mcp_server_config_user,llm.mcp.server.config user,model_llm_mcp_server_config,base.group_user,1,0,0,0
4 | access_llm_mcp_server_config_public,llm.mcp.server.config public,model_llm_mcp_server_config,,1,0,0,0
5 | access_llm_mcp_session_manager,llm.mcp.session manager,model_llm_mcp_session,llm.group_llm_manager,1,1,1,1
6 | access_llm_mcp_session_user,llm.mcp.session user,model_llm_mcp_session,base.group_user,1,1,1,0
7 | access_llm_mcp_session_public,llm.mcp.session public,model_llm_mcp_session,,1,1,1,0
8 | access_llm_mcp_key_show_user,llm.mcp.key.show user,model_llm_mcp_key_show,base.group_user,1,1,1,1
9 |
--------------------------------------------------------------------------------
/llm_generate/models/llm_provider.py:
--------------------------------------------------------------------------------
1 | from odoo import models
2 |
3 |
4 | class LLMProvider(models.Model):
5 | """Extension of llm.provider to support I/O schema generation for media generation models."""
6 |
7 | _inherit = "llm.provider"
8 |
9 | def generate_io_schema(self, model_record):
10 | """Generate input/output schema for a media generation model.
11 |
12 | Dispatches to {service}_generate_io_schema() implementation.
13 | """
14 | return self._dispatch("generate_io_schema", model_record)
15 |
16 | def should_generate_io_schema(self, model_record):
17 | """Check if I/O schema should be generated for the model.
18 |
19 | Dispatches to {service}_should_generate_io_schema() implementation.
20 | """
21 | return self._dispatch("should_generate_io_schema", model_record)
22 |
--------------------------------------------------------------------------------
/llm_mistral/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Mistral AI LLM Integration",
3 | "summary": "Mistral AI provider integration for LLM module",
4 | "description": """
5 | Implements Mistral AI provider service for the LLM integration module.
6 | Supports Mistral models for chat and embedding capabilities.
7 | """,
8 | "author": "Apexive Solutions LLC",
9 | "website": "https://github.com/apexive/odoo-llm",
10 | "category": "Technical",
11 | "version": "18.0.1.0.1",
12 | "depends": ["base", "llm_openai"],
13 | "external_dependencies": {
14 | "python": ["mistralai"],
15 | },
16 | "data": [
17 | "data/llm_publisher.xml",
18 | "data/llm_provider.xml",
19 | ],
20 | "images": ["static/description/banner.jpeg"],
21 | "license": "LGPL-3",
22 | "installable": True,
23 | }
24 |
--------------------------------------------------------------------------------
/llm_tool_knowledge/data/llm_tool_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | knowledge_retriever
6 | Retrieves relevant knowledge from document database using semantic search. Use this tool when you need specific information from the knowledge base to answer questions accurately. Supports both pure semantic and hybrid search methods, with configurable relevance thresholds.
9 | knowledge_retriever
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/llm_comfy_icu/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM ComfyICU Integration",
3 | "version": "18.0.1.0.0",
4 | "category": "Productivity/LLM",
5 | "summary": "Integration with ComfyICU API for media generation",
6 | "description": """
7 | This module integrates Odoo with ComfyICU API for media generation.
8 | It provides a new provider type that can be used with the LLM framework.
9 | """,
10 | "author": "Apexive Solutions LLC",
11 | "website": "https://github.com/apexive/odoo-llm",
12 | "license": "LGPL-3",
13 | "depends": [
14 | "llm",
15 | "llm_generate",
16 | ],
17 | "data": [
18 | "data/llm_publisher.xml",
19 | ],
20 | "images": [
21 | "static/description/banner.png",
22 | ],
23 | "installable": True,
24 | "application": False,
25 | "auto_install": False,
26 | }
27 |
--------------------------------------------------------------------------------
/llm_generate/static/src/components/llm_media_form/llm_media_form.scss:
--------------------------------------------------------------------------------
1 | .max-height-gen-inputs {
2 | max-height: 300px;
3 | }
4 |
5 | .attachment-upload-section {
6 | .form-control[type="file"] {
7 | border: 2px dashed #dee2e6;
8 | border-radius: 0.375rem;
9 | background-color: #f8f9fa;
10 | transition: all 0.15s ease-in-out;
11 |
12 | &:hover {
13 | border-color: #6c757d;
14 | background-color: #e9ecef;
15 | }
16 |
17 | &:focus {
18 | border-color: #0d6efd;
19 | box-shadow: 0 0 0 0.25rem rgba(13, 110, 253, 0.25);
20 | }
21 | }
22 |
23 | .uploaded-attachments {
24 | max-height: 150px;
25 | overflow-y: auto;
26 |
27 | .border {
28 | background-color: #f8f9fa;
29 | border-color: #dee2e6 !important;
30 |
31 | &:hover {
32 | background-color: #e9ecef;
33 | }
34 | }
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/llm_mcp_server/models/res_users.py:
--------------------------------------------------------------------------------
1 | from odoo import _, models
2 |
3 |
4 | class ResUsers(models.Model):
5 | """Extend res.users to add MCP key generation action."""
6 |
7 | _inherit = "res.users"
8 |
9 | def action_new_mcp_key(self):
10 | """Open standard API key wizard with MCP context.
11 |
12 | This triggers the same wizard as 'New API Key' but with context
13 | that redirects to the MCP config show view after key generation.
14 | """
15 | return {
16 | "type": "ir.actions.act_window",
17 | "res_model": "res.users.apikeys.description",
18 | "name": _("New MCP Key"),
19 | "views": [(False, "form")],
20 | "target": "new",
21 | "context": {
22 | "is_mcp_key": True,
23 | "default_name": "MCP Key",
24 | },
25 | }
26 |
--------------------------------------------------------------------------------
/llm_replicate/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Replicate LLM Integration",
3 | "summary": "Replicate provider integration for LLM module",
4 | "description": """
5 | Implements Replicate provider service for the LLM integration module.
6 | Supports diverse AI models and custom model deployments.
7 | """,
8 | "category": "Technical",
9 | "version": "18.0.1.1.1",
10 | "depends": ["llm", "llm_generate"],
11 | "external_dependencies": {
12 | "python": ["replicate", "jsonref"],
13 | },
14 | "data": [
15 | "data/llm_publisher.xml",
16 | "views/replicate_model_views.xml",
17 | ],
18 | "images": [
19 | "static/description/banner.jpeg",
20 | ],
21 | "website": "https://github.com/apexive/odoo-llm",
22 | "author": "Apexive Solutions LLC",
23 | "license": "LGPL-3",
24 | "installable": True,
25 | }
26 |
--------------------------------------------------------------------------------
/llm/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_provider_user,llm.provider.user,model_llm_provider,base.group_user,1,0,0,0
3 | access_llm_provider_manager,llm.provider.manager,model_llm_provider,group_llm_manager,1,1,1,1
4 | access_llm_model_user,llm.model.user,model_llm_model,base.group_user,1,0,0,0
5 | access_llm_model_manager,llm.model.manager,model_llm_model,group_llm_manager,1,1,1,1
6 | access_llm_publisher_user,llm.publisher.user,model_llm_publisher,base.group_user,1,0,0,0
7 | access_llm_publisher_manager,llm.publisher.manager,model_llm_publisher,group_llm_manager,1,1,1,1
8 | access_llm_fetch_models_wizard_manager,llm.fetch.models.wizard.manager,model_llm_fetch_models_wizard,group_llm_manager,1,1,1,1
9 | access_llm_fetch_models_line_manager,llm.fetch.models.line.manager,model_llm_fetch_models_line,group_llm_manager,1,1,1,1
10 |
--------------------------------------------------------------------------------
/llm_assistant/data/llm_prompt_tag_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Technical
6 |
7 |
8 |
9 | Business
10 |
11 |
12 |
13 | Creative
14 |
15 |
16 |
17 | Data
18 |
19 |
20 |
21 | System
22 |
23 |
24 | Assistant
25 |
26 |
27 |
--------------------------------------------------------------------------------
/llm_openai/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "OpenAI LLM Integration",
3 | "summary": "OpenAI provider integration for LLM module",
4 | "description": """
5 | Implements OpenAI provider service for the LLM integration module.
6 | Supports GPT models for chat and embedding capabilities.
7 | """,
8 | "author": "Apexive Solutions LLC",
9 | "website": "https://github.com/apexive/odoo-llm",
10 | "category": "Technical",
11 | "version": "18.0.1.2.0",
12 | "depends": ["llm", "llm_tool", "llm_training"],
13 | "external_dependencies": {
14 | "python": ["openai"],
15 | },
16 | "data": [
17 | "data/llm_publisher.xml",
18 | "data/llm_provider.xml",
19 | "data/llm_model.xml",
20 | ],
21 | "images": [
22 | "static/description/banner.jpeg",
23 | ],
24 | "license": "LGPL-3",
25 | "installable": True,
26 | }
27 |
--------------------------------------------------------------------------------
/llm_store/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Vector Store Base",
3 | "summary": """
4 | Integration with various vector database providers for LLM applications""",
5 | "description": """
6 | Provides integration with vector stores for:
7 | - Vector storage and retrieval
8 | - Similarity search
9 | - Collection management
10 | - RAG (Retrieval Augmented Generation) support
11 |
12 | """,
13 | "author": "Apexive Solutions LLC",
14 | "website": "https://github.com/apexive/odoo-llm",
15 | "category": "Technical",
16 | "version": "18.0.1.0.0",
17 | "depends": ["llm"],
18 | "data": [
19 | "security/ir.model.access.csv",
20 | "views/llm_store_views.xml",
21 | "views/llm_store_menu_views.xml",
22 | ],
23 | "images": ["static/description/banner.jpeg"],
24 | "license": "LGPL-3",
25 | "installable": True,
26 | }
27 |
--------------------------------------------------------------------------------
/llm_fal_ai/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM - Fal.ai Provider",
3 | "summary": "Integration with the fal.ai API for LLM generation services",
4 | "description": """
5 | Integrates fal.ai services with the LLM module in Odoo.
6 | Provides unified generate() endpoint for image generation using fal.ai models.
7 | Uses model details field for schema storage and supports both sync and async generation.
8 | """,
9 | "author": "Apexive Solutions LLC",
10 | "website": "https://github.com/apexive/odoo-llm",
11 | "category": "Technical",
12 | "version": "18.0.2.0.0",
13 | "depends": ["llm", "llm_generate_job"],
14 | "external_dependencies": {"python": ["fal_client"]},
15 | "data": ["data/llm_publisher.xml", "data/llm_provider.xml", "data/llm_model.xml"],
16 | "images": ["static/description/banner.jpeg"],
17 | "license": "LGPL-3",
18 | "installable": True,
19 | }
20 |
--------------------------------------------------------------------------------
/llm_ollama/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Ollama
5 |
6 |
7 | Ollama is an open source project that allows running large language models locally. It provides easy deployment and management of various open source models like Llama 2, Mistral, and more.
10 |
15 | {
16 | "website": "https://ollama.ai",
17 | "founded": 2023,
18 | "key_features": ["Local Deployment", "Model Management", "Open Source"]
19 | }
20 |
21 |
22 |
--------------------------------------------------------------------------------
/llm_letta/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Letta
5 |
6 |
7 | Letta is a platform for building stateful AI agents with advanced memory that can learn and self-improve over time. The platform is model-agnostic and supports various LLM providers for creating persistent, conversational AI agents.
10 |
11 | {
12 | "website": "https://www.letta.com",
13 | "founded": 2023,
14 | "headquarters": "San Francisco, California",
15 | "key_products": ["Letta Platform", "Agent Framework", "Memory Management"]
16 | }
17 |
18 |
19 |
--------------------------------------------------------------------------------
/llm_mistral/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Mistral AI
5 |
6 |
7 | Mistral AI is a French company specializing in artificial intelligence. They focus on developing open-source large language models and related technologies.
10 |
15 | {
16 | "website": "https://mistral.ai",
17 | "founded": 2023,
18 | "headquarters": "Paris, France",
19 | "key_products": ["Mistral 7B", "Mixtral 8x7B", "Mistral Large"]
20 | }
21 |
22 |
23 |
--------------------------------------------------------------------------------
/llm_tool_knowledge/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.0.1 (2025-10-23)
2 | ------------------------
3 |
4 | * [MIG] Migrated to Odoo 18.0
5 | * [FIX] Fixed tool schema generation to safely handle missing description keys
6 | * [IMP] Updated view_mode references from 'tree' to 'list' for Odoo 18.0 compatibility
7 |
8 | 16.0.1.0.1 (2025-04-04)
9 | ------------------------
10 |
11 | * [ADD] Added Knowledge Bot assistant that uses knowledge_retriever tool
12 | * [IMP] Integrated with OpenAI GPT-4o model for enhanced knowledge retrieval capabilities
13 |
14 | 16.0.1.0.0 (2025-03-28)
15 | ------------------------
16 |
17 | * Initial release of the LLM Tool RAG module
18 | * Added Knowledge Retriever tool for semantic document search
19 | * Implemented document search mixin for reusable search functionality
20 | * Added integration with core RAG module for document chunk access
21 | * Implemented security model with read-only access for regular users and full access for LLM managers
22 |
--------------------------------------------------------------------------------
/llm_knowledge/views/menu.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
12 |
19 |
26 |
27 |
28 |
35 |
36 |
--------------------------------------------------------------------------------
/llm_tool/tests/common.py:
--------------------------------------------------------------------------------
1 | from odoo.tests import common
2 |
3 |
4 | class LLMToolCase(common.TransactionCase):
5 | """Base test case for llm.tool core functionality tests
6 |
7 | These tests focus on core tool functionality without relying on
8 | actual decorated methods. Integration tests with real @llm_tool
9 | decorated methods are in llm_tool_demo/tests/.
10 | """
11 |
12 | @classmethod
13 | def setUpClass(cls):
14 | super().setUpClass()
15 | cls.LLMTool = cls.env["llm.tool"]
16 |
17 | def _create_test_tool(self, **kwargs):
18 | """Helper to create a test tool with default values"""
19 | values = {
20 | "name": kwargs.get("name", "test_tool"),
21 | "description": kwargs.get("description", "Test tool description"),
22 | "implementation": kwargs.get("implementation", "function"),
23 | }
24 | values.update(kwargs)
25 | return self.LLMTool.create(values)
26 |
--------------------------------------------------------------------------------
/llm_mcp_server/views/res_users_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | res.users.form.mcp.key
6 | res.users
7 |
8 |
9 |
10 |
11 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/llm_chroma/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Chroma",
3 | "summary": "Vector store integration with Chroma for LLM features",
4 | "description": """
5 | Implements vector storage and search capabilities for Odoo using Chroma vector database.
6 |
7 | Features:
8 | - Chroma integration for vector storage
9 | - HTTP client connection to Chroma server
10 | - Collection management for vector collections
11 | - Vector search with similarity functions
12 | """,
13 | "category": "Technical",
14 | "version": "18.0.1.0.0",
15 | "author": "Apexive Solutions LLC",
16 | "website": "https://github.com/apexive/odoo-llm",
17 | "depends": ["llm", "llm_knowledge", "llm_store"],
18 | "external_dependencies": {
19 | "python": ["chromadb-client", "numpy"],
20 | },
21 | "images": ["static/description/banner.jpeg"],
22 | "installable": True,
23 | "application": False,
24 | "auto_install": False,
25 | "license": "LGPL-3",
26 | }
27 |
--------------------------------------------------------------------------------
/llm_tool/data/llm_tool_consent_config_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
7 | Default Configuration
8 |
9 | \n\nIMPORTANT: This tool requires explicit user consent before execution. Please ask the user for permission before using this tool.
12 | The following tools require explicit user consent before execution: {tool_names}.
15 | For these tools, you MUST:
16 | 1. Clearly explain to the user what the tool will do
17 | 2. Ask for their explicit permission before using the tool
18 | 3. Only proceed with using the tool if the user gives clear consent
19 | 4. If the user denies consent or doesn't respond clearly, do not use the tool
20 |
21 |
22 |
--------------------------------------------------------------------------------
/llm_comfyui/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM ComfyUI Integration",
3 | "version": "18.0.1.0.1",
4 | "category": "Productivity/LLM",
5 | "summary": "Integration with ComfyUI API for media generation",
6 | "description": """
7 | This module integrates Odoo with ComfyUI API for media generation.
8 | It provides a new provider type that can be used with the LLM framework.
9 | """,
10 | "author": "Apexive Solutions LLC",
11 | "website": "https://github.com/apexive/odoo-llm",
12 | "license": "LGPL-3",
13 | "depends": [
14 | "llm",
15 | "llm_generate",
16 | ],
17 | "data": [
18 | "data/llm_publisher.xml",
19 | "data/llm_prompt_category_data.xml",
20 | "data/llm_prompt_data.xml",
21 | "data/llm_provider.xml",
22 | "data/llm_model.xml",
23 | ],
24 | "images": [
25 | "static/description/banner.png",
26 | ],
27 | "installable": True,
28 | "application": False,
29 | "auto_install": False,
30 | }
31 |
--------------------------------------------------------------------------------
/llm_thread/static/src/components/llm_related_record/llm_related_record.scss:
--------------------------------------------------------------------------------
1 | .o_LLMRelatedRecord {
2 | display: inline-block;
3 |
4 | .btn-group {
5 | .btn {
6 | white-space: nowrap;
7 |
8 | &:hover {
9 | z-index: 1;
10 | }
11 | }
12 |
13 | .text-truncate {
14 | display: inline-block;
15 | vertical-align: middle;
16 | }
17 | }
18 | }
19 |
20 | .o_RecordPickerDialog {
21 | .record-results {
22 | .list-group-item {
23 | cursor: pointer;
24 | transition: background-color 0.15s ease-in-out;
25 |
26 | &:hover {
27 | background-color: var(--bs-light);
28 | }
29 |
30 | &.active {
31 | background-color: var(--bs-primary);
32 | color: var(--bs-white);
33 | border-color: var(--bs-primary);
34 |
35 | .text-muted {
36 | color: rgba(255, 255, 255, 0.7) !important;
37 | }
38 | }
39 | }
40 | }
41 |
42 | .spinner-border {
43 | width: 1rem;
44 | height: 1rem;
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/llm_openai/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | OpenAI
5 |
6 |
7 | OpenAI is an artificial intelligence research organization. Founded in 2015, OpenAI's mission is to ensure that artificial general intelligence benefits all of humanity. The company is known for developing the GPT series of language models.
10 |
15 | {
16 | "website": "https://openai.com",
17 | "founded": 2015,
18 | "headquarters": "San Francisco, California",
19 | "key_products": ["GPT-4", "GPT-3.5", "DALL-E", "Whisper"]
20 | }
21 |
22 |
23 |
--------------------------------------------------------------------------------
/llm_knowledge_mistral/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM RAG Mistral",
3 | "summary": "OCR vision AI: extract text from images, receipts, handwriting, and scanned documents using Mistral vision models",
4 | "description": """
5 | Turn images into searchable knowledge with Mistral AI's vision models. Extract text from
6 | handwritten notes, receipts, scanned documents, screenshots, and product labels. Make every
7 | image searchable in your knowledge base with automatic OCR processing.
8 | """,
9 | "category": "Technical",
10 | "version": "18.0.1.0.0",
11 | "depends": ["llm_knowledge", "llm_mistral", "llm_tool"],
12 | "author": "Apexive Solutions LLC",
13 | "website": "https://github.com/apexive/odoo-llm",
14 | "data": [
15 | "data/llm_tool_data.xml",
16 | "views/llm_resource_views.xml",
17 | ],
18 | "images": ["static/description/banner.jpeg"],
19 | "license": "LGPL-3",
20 | "installable": True,
21 | "application": False,
22 | "auto_install": False,
23 | }
24 |
--------------------------------------------------------------------------------
/llm_knowledge_llama/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM RAG LlamaIndex",
3 | "summary": "Advanced RAG chunking: LlamaIndex markdown parsing, semantic chunking, structured document splitting for better AI retrieval accuracy",
4 | "description": """
5 | Supercharge your RAG pipeline with LlamaIndex's advanced chunking strategies. Parse markdown
6 | while preserving structure (headers, lists, code blocks). Semantic chunking creates better
7 | document segments for more accurate AI retrieval. Turn structured documents into AI-friendly chunks.
8 | """,
9 | "category": "Technical",
10 | "version": "18.0.1.0.0",
11 | "depends": ["llm_knowledge"],
12 | "external_dependencies": {
13 | "python": ["llama_index", "nltk"],
14 | },
15 | "author": "Apexive Solutions LLC",
16 | "website": "https://github.com/apexive/odoo-llm",
17 | "data": [],
18 | "images": ["static/description/banner.jpeg"],
19 | "license": "LGPL-3",
20 | "installable": True,
21 | "application": False,
22 | "auto_install": False,
23 | }
24 |
--------------------------------------------------------------------------------
/llm/views/llm_menu_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
12 |
18 |
19 |
20 |
27 |
28 |
29 |
36 |
37 |
38 |
45 |
46 |
--------------------------------------------------------------------------------
/llm_assistant/data/llm_prompt_category_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | General
6 | general
7 | 10
8 |
9 |
10 |
11 | Technical
12 | technical
13 | 20
14 |
15 |
16 |
17 | Business
18 | business
19 | 30
20 |
21 |
22 |
23 | Creative
24 | creative
25 | 40
26 |
27 |
28 |
--------------------------------------------------------------------------------
/llm_comfy_icu/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ComfyICU
5 |
6 |
7 | ComfyICU is a cloud platform for running ComfyUI workflows. It specializes in AI image generation and provides GPU resources for running complex image generation workflows created with ComfyUI.
10 |
15 | {
16 | "website": "https://comfy.icu",
17 | "founded": 2024,
18 | "key_features": [
19 | "AI Image Generation",
20 | "ComfyUI Workflow Execution",
21 | "Cloud GPU Resources",
22 | "Custom Workflow Support"
23 | ]
24 | }
25 |
26 |
27 |
--------------------------------------------------------------------------------
/llm_assistant/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_assistant_user,llm.assistant.user,model_llm_assistant,base.group_user,1,0,0,0
3 | access_llm_assistant_manager,llm.assistant.manager,model_llm_assistant,llm.group_llm_manager,1,1,1,1
4 | access_llm_prompt_user,llm.prompt.user,model_llm_prompt,base.group_user,1,0,0,0
5 | access_llm_prompt_manager,llm.prompt.manager,model_llm_prompt,llm.group_llm_manager,1,1,1,1
6 | access_llm_prompt_tag_user,llm.prompt.tag.user,model_llm_prompt_tag,base.group_user,1,0,0,0
7 | access_llm_prompt_tag_manager,llm.prompt.tag.manager,model_llm_prompt_tag,llm.group_llm_manager,1,1,1,1
8 | access_llm_prompt_category_user,llm.prompt.category.user,model_llm_prompt_category,base.group_user,1,0,0,0
9 | access_llm_prompt_category_manager,llm.prompt.category.manager,model_llm_prompt_category,llm.group_llm_manager,1,1,1,1
10 | access_llm_prompt_test_user,llm.prompt.test.user,model_llm_prompt_test,base.group_user,1,1,1,1
11 | access_llm_thread_mock_user,llm.thread.mock.user,model_llm_thread_mock,base.group_user,1,1,1,1
12 |
--------------------------------------------------------------------------------
/llm/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Integration Base",
3 | "summary": """
4 | Integration with various LLM providers like Ollama, OpenAI, Replicate and Anthropic""",
5 | "description": """
6 | Provides integration with LLM (Large Language Model) providers for:
7 | - Chat completions
8 | - Text embeddings
9 | - Model management
10 |
11 | """,
12 | "author": "Apexive Solutions LLC",
13 | "website": "https://github.com/apexive/odoo-llm",
14 | "category": "Technical",
15 | "version": "18.0.1.5.0",
16 | "depends": ["mail", "web"],
17 | "data": [
18 | "security/llm_security.xml",
19 | "security/ir.model.access.csv",
20 | "wizards/fetch_models_views.xml",
21 | "views/llm_provider_views.xml",
22 | "views/llm_model_views.xml",
23 | "views/llm_publisher_views.xml",
24 | "views/llm_menu_views.xml",
25 | "data/mail_message_subtype.xml",
26 | ],
27 | "license": "LGPL-3",
28 | "installable": True,
29 | "images": [
30 | "static/description/banner.jpeg",
31 | ],
32 | }
33 |
--------------------------------------------------------------------------------
/llm_training/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Training Management",
3 | "summary": """
4 | Manage LLM fine-tuning datasets and training jobs""",
5 | "description": """
6 | Provides management of training datasets and fine-tuning jobs for LLMs:
7 | - Dataset management for fine-tuning
8 | - Training job configuration and tracking
9 | - Integration with LLM providers for fine-tuning
10 | """,
11 | "author": "Apexive Solutions LLC",
12 | "website": "https://github.com/apexive/odoo-llm",
13 | "category": "Technical",
14 | "version": "18.0.1.0.0",
15 | "depends": ["base", "mail", "llm", "web_json_editor"],
16 | "data": [
17 | "security/llm_training_security.xml",
18 | "security/ir.model.access.csv",
19 | "views/llm_training_dataset_views.xml",
20 | "views/llm_training_job_views.xml",
21 | "views/llm_training_menu_views.xml",
22 | ],
23 | "images": [
24 | "static/description/banner.jpeg",
25 | ],
26 | "license": "LGPL-3",
27 | "installable": True,
28 | "auto_install": False,
29 | }
30 |
--------------------------------------------------------------------------------
/llm_replicate/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Replicate
5 |
6 |
7 | Replicate is a platform that makes it easy to run machine learning models in the cloud. It provides access to a wide variety of open source models and allows deployment of custom models.
10 |
15 | {
16 | "website": "https://replicate.com",
17 | "founded": 2019,
18 | "headquarters": "San Francisco, California",
19 | "key_features": [
20 | "Model Deployment",
21 | "Open Source Models",
22 | "Cloud Inference",
23 | "Model Versioning"
24 | ]
25 | }
26 |
27 |
28 |
--------------------------------------------------------------------------------
/llm_tool_knowledge/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Tool RAG",
3 | "version": "18.0.1.0.1",
4 | "category": "Productivity/Tools",
5 | "author": "Apexive Solutions LLC",
6 | "website": "https://github.com/apexive/odoo-llm",
7 | "summary": "RAG tools for AI assistants: semantic search, knowledge retrieval, source citations, and function calling for LLM chat integration",
8 | "description": """
9 | Give your AI assistants instant access to your knowledge base. This module provides
10 | RAG tools that enable LLM assistants to search documents, cite sources, and answer
11 | questions using your actual company data instead of just their training.
12 | """,
13 | "depends": ["llm_knowledge", "llm_tool", "llm_assistant"],
14 | "data": [
15 | "data/llm_tool_data.xml",
16 | "data/llm_assistant_data.xml",
17 | ],
18 | "images": [
19 | "static/description/banner.jpeg",
20 | ],
21 | "installable": True,
22 | "application": False,
23 | "auto_install": False,
24 | "license": "LGPL-3",
25 | "maintainer": "Apexive Solutions LLC",
26 | }
27 |
--------------------------------------------------------------------------------
/llm_fal_ai/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | fal.ai
5 |
6 |
7 |
8 | fal.ai is a generative media platform for developers, offering lightning-fast inference capabilities for AI-powered image, video, and audio generation. Founded in 2021, fal.ai aims to amplify and expand human creativity by providing scalable and cost-effective AI infrastructure.
9 |
10 |
15 |
16 | {
17 | "website": "https://fal.ai",
18 | "founded": 2021,
19 | "headquarters": "San Francisco, California",
20 | "key_products": ["FLUX", "MiniMax", "AuraFlow", "Kling", "Whisper"]
21 | }
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/llm_knowledge/models/mail_thread.py:
--------------------------------------------------------------------------------
1 | from markupsafe import Markup
2 |
3 | from odoo import models
4 |
5 |
6 | class MailThread(models.AbstractModel):
7 | _inherit = "mail.thread"
8 |
9 | def _post_styled_message(self, message, message_type="info"):
10 | """
11 | Post a message to the resource's chatter with appropriate styling.
12 |
13 | Args:
14 | message (str): The message to post
15 | message_type (str): Type of message (error, warning, success, info)
16 | """
17 | if message_type == "error":
18 | body = f"
Error: {message}
"
19 | elif message_type == "warning":
20 | body = f"Warning: {message}
"
21 | elif message_type == "success":
22 | body = f"Success: {message}
"
23 | else: # info
24 | body = f"Info: {message}
"
25 |
26 | return self.message_post(
27 | body=Markup(body),
28 | message_type="comment",
29 | )
30 |
--------------------------------------------------------------------------------
/llm_assistant_account_invoice/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Invoice Assistant",
3 | "summary": "AI-powered invoice analysis assistant with OCR document parsing",
4 | "description": """
5 | Intelligent invoice assistant that helps analyze vendor bills and invoices using AI.
6 | Features document parsing with OCR, automated data extraction, and smart invoice validation.
7 | """,
8 | "category": "Accounting/AI",
9 | "version": "18.0.1.0.0",
10 | "depends": [
11 | "account", # Invoice model (account.move)
12 | "llm_assistant", # Includes llm, llm_thread, llm_tool
13 | "llm_tool_ocr_mistral", # OCR tool
14 | ],
15 | "author": "Apexive Solutions LLC",
16 | "website": "https://github.com/apexive/odoo-llm",
17 | "data": [
18 | "data/llm_prompt_invoice_data.xml",
19 | "data/llm_assistant_data.xml",
20 | "views/account_move_views.xml",
21 | ],
22 | "images": [
23 | "static/description/banner.jpeg",
24 | ],
25 | "license": "LGPL-3",
26 | "installable": True,
27 | "application": False,
28 | "auto_install": False,
29 | }
30 |
--------------------------------------------------------------------------------
/llm_knowledge_automation/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Knowledge Automation",
3 | "summary": "Auto-sync knowledge base: keeps AI current with real-time data updates, domain filters, and automated RAG pipeline processing",
4 | "description": """
5 | Set it and forget it - your AI's knowledge stays automatically updated as your data changes.
6 | No manual sync required. Domain filters automatically create, update, and remove documents
7 | from knowledge collections when records change. RAG pipeline runs automatically.
8 | """,
9 | "category": "Technical",
10 | "version": "18.0.1.0.0",
11 | "depends": ["llm_knowledge", "base_automation"],
12 | "external_dependencies": {
13 | "python": [],
14 | },
15 | "author": "Apexive Solutions LLC",
16 | "website": "https://github.com/apexive/odoo-llm",
17 | "data": [
18 | "views/llm_knowledge_collection_views.xml",
19 | ],
20 | "license": "LGPL-3",
21 | "installable": True,
22 | "application": False,
23 | "auto_install": False,
24 | "images": [
25 | "static/description/banner.jpeg",
26 | ],
27 | }
28 |
--------------------------------------------------------------------------------
/llm_document_page/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Knowledge Integration for Document Pages",
3 | "summary": "Integrate document.page with LLM RAG for knowledge base search",
4 | "description": """
5 | Integrates the Document Page module with LLM RAG.
6 |
7 | Features:
8 | - Parse document pages into LLM Knowledge resources
9 | - Include document hierarchy in generated content
10 | - Maintain metadata like contributors and update dates
11 | - Create RAG resources from document pages
12 | """,
13 | "category": "Knowledge",
14 | "version": "18.0.1.0.0",
15 | "depends": ["document_page", "llm_knowledge"],
16 | "external_dependencies": {
17 | "python": ["markdownify"],
18 | },
19 | "author": "Apexive Solutions LLC",
20 | "website": "https://github.com/apexive/odoo-llm",
21 | "license": "AGPL-3",
22 | "installable": True,
23 | "application": False,
24 | "auto_install": False,
25 | "images": [
26 | "static/description/banner.jpeg",
27 | ],
28 | "data": [
29 | "views/document_page_views.xml",
30 | "wizards/upload_resource_wizard_views.xml",
31 | ],
32 | }
33 |
--------------------------------------------------------------------------------
/llm_comfyui/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ComfyUI
5 |
6 |
7 | ComfyUI is an advanced node-based UI for Stable Diffusion. It provides a powerful interface for creating complex AI image generation workflows with a focus on customization and control.
10 |
15 | {
16 | "website": "https://github.com/comfyanonymous/ComfyUI",
17 | "founded": 2022,
18 | "key_features": [
19 | "Node-based UI for AI Image Generation",
20 | "Stable Diffusion Integration",
21 | "Custom Workflow Creation",
22 | "Advanced Control over Generation Parameters",
23 | "Extensible Plugin System"
24 | ]
25 | }
26 |
27 |
28 |
--------------------------------------------------------------------------------
/llm_anthropic/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Anthropic LLM Integration",
3 | "summary": "Anthropic Claude provider integration for LLM module",
4 | "description": """
5 | Implements Anthropic provider service for the LLM integration module.
6 | Supports Claude models for chat, multimodal, and tool calling capabilities.
7 |
8 | Features:
9 | - Claude 4.5, 4, and 3.x model support
10 | - Tool/function calling
11 | - Extended thinking support
12 | - Streaming responses
13 | - Multimodal (vision) capabilities
14 | """,
15 | "author": "Apexive Solutions LLC",
16 | "contributors": [
17 | "Crottolo ",
18 | ],
19 | "website": "https://github.com/apexive/odoo-llm",
20 | "category": "Technical",
21 | "version": "18.0.1.0.0",
22 | "depends": ["llm", "llm_tool"],
23 | "external_dependencies": {
24 | "python": ["anthropic"],
25 | },
26 | "data": [
27 | "data/llm_publisher.xml",
28 | "data/llm_provider.xml",
29 | ],
30 | "images": [
31 | "static/description/banner.jpeg",
32 | ],
33 | "license": "LGPL-3",
34 | "installable": True,
35 | }
36 |
--------------------------------------------------------------------------------
/llm_assistant/views/llm_menu_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
12 |
13 |
20 |
21 |
22 |
28 |
29 |
30 |
37 |
38 |
45 |
46 |
--------------------------------------------------------------------------------
/llm/models/llm_publisher.py:
--------------------------------------------------------------------------------
1 | from odoo import api, fields, models
2 |
3 |
4 | class LLMPublisher(models.Model):
5 | _name = "llm.publisher"
6 | _description = "LLM Publisher"
7 | _inherit = ["mail.thread"]
8 |
9 | name = fields.Char(required=True, tracking=True)
10 | logo = fields.Image(
11 | max_width=1024, max_height=1024, verify_resolution=True, help="Publisher logo"
12 | )
13 | description = fields.Text(tracking=True)
14 | meta = fields.Json(string="Publisher Metadata")
15 | official = fields.Boolean(
16 | default=False,
17 | tracking=True,
18 | help="Indicates if this is an official model publisher",
19 | )
20 | frontier = fields.Boolean(
21 | default=False,
22 | tracking=True,
23 | help="Indicates if this publisher is working on frontier AI models",
24 | )
25 |
26 | # Relationships
27 | model_ids = fields.One2many("llm.model", "publisher_id", string="Models")
28 | model_count = fields.Integer(compute="_compute_model_count", store=True)
29 |
30 | @api.depends("model_ids")
31 | def _compute_model_count(self):
32 | for record in self:
33 | record.model_count = len(record.model_ids)
34 |
--------------------------------------------------------------------------------
/llm_tool_ocr_mistral/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Mistral OCR Tool",
3 | "summary": "Extract text from images and PDFs using Mistral AI vision models",
4 | "description": """
5 | Standalone OCR tool using Mistral's vision models to extract text from:
6 | - Invoice PDFs and images
7 | - Receipts and bills
8 | - Scanned documents
9 | - Handwritten notes
10 | - Product labels and packaging
11 | - Screenshots and photos
12 |
13 | This tool can be used by any LLM assistant for document parsing.
14 | Minimal dependencies - no knowledge base required.
15 | """,
16 | "category": "Technical/AI",
17 | "version": "18.0.1.0.0",
18 | "depends": [
19 | "llm_mistral", # Mistral AI provider
20 | "llm_tool", # Tool registration framework
21 | ],
22 | "author": "Apexive Solutions LLC",
23 | "website": "https://github.com/apexive/odoo-llm",
24 | "data": [
25 | "data/llm_tool_data.xml",
26 | ],
27 | "images": [
28 | "static/description/banner.jpeg",
29 | ],
30 | "license": "LGPL-3",
31 | "installable": True,
32 | "application": False,
33 | "auto_install": False,
34 | }
35 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | /.venv
5 | /.pytest_cache
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | env/
13 | bin/
14 | build/
15 | develop-eggs/
16 | dist/
17 | eggs/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | *.egg-info/
23 | .installed.cfg
24 | *.egg
25 | *.eggs
26 |
27 | # Installer logs
28 | pip-log.txt
29 | pip-delete-this-directory.txt
30 |
31 | # Unit test / coverage reports
32 | htmlcov/
33 | .tox/
34 | .coverage
35 | .cache
36 | nosetests.xml
37 | coverage.xml
38 |
39 | # Translations
40 | *.mo
41 |
42 | # Pycharm
43 | .idea
44 |
45 | # Eclipse
46 | .settings
47 |
48 | # Visual Studio cache/options directory
49 | .vs/
50 | .vscode
51 |
52 | # OSX Files
53 | .DS_Store
54 |
55 | # Django stuff:
56 | *.log
57 |
58 | # Mr Developer
59 | .mr.developer.cfg
60 | .project
61 | .pydevproject
62 |
63 | # Rope
64 | .ropeproject
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # Backup files
70 | *~
71 | *.swp
72 |
73 | # OCA rules
74 | !static/lib/
75 |
76 | .ruff_cache
77 | .claudesync
78 | llm_thread/gptree_output.txt
79 | **/.gptree_config
80 | **/gptree_output.txt
81 | odoo-docs-maintenance-swarm.yml
82 |
--------------------------------------------------------------------------------
/llm_anthropic/data/llm_publisher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Anthropic
5 |
6 |
7 | Anthropic is an artificial intelligence research company focused on developing safe and ethical AI systems. Known for developing the Claude series of language models, Anthropic emphasizes constitutional AI and responsible development practices. Claude models excel at nuanced reasoning, following complex instructions, and providing helpful, harmless, and honest responses.
10 |
15 | {
16 | "website": "https://www.anthropic.com",
17 | "founded": 2021,
18 | "headquarters": "San Francisco, California",
19 | "key_products": ["Claude Opus 4.5", "Claude Sonnet 4.5", "Claude Haiku 4.5", "Constitutional AI"]
20 | }
21 |
22 |
23 |
--------------------------------------------------------------------------------
/llm_pgvector/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM PgVector",
3 | "summary": "Vector field and search capabilities using pgvector",
4 | "description": """
5 | Implements vector field and search capabilities for Odoo using pgvector.
6 |
7 | Features:
8 | - Vector field type with variable dimensions
9 | - Embedding storage and retrieval for chunks
10 | - Vector index management
11 | - Efficient vector search with pgvector
12 | """,
13 | "category": "Technical",
14 | "version": "18.0.1.0.0",
15 | "author": "Apexive Solutions LLC",
16 | "website": "https://github.com/apexive/odoo-llm",
17 | "depends": ["llm", "llm_knowledge", "llm_store"],
18 | "external_dependencies": {
19 | "python": ["pgvector", "numpy"],
20 | },
21 | "data": [
22 | "security/ir.model.access.csv",
23 | "views/llm_knowledge_chunk_embedding_views.xml",
24 | "views/llm_store_views.xml",
25 | "views/menu_views.xml",
26 | ],
27 | "images": ["static/description/banner.jpeg"],
28 | "pre_init_hook": "pre_init_hook",
29 | "installable": True,
30 | "application": False,
31 | "auto_install": False,
32 | "license": "LGPL-3",
33 | }
34 |
--------------------------------------------------------------------------------
/llm_thread/static/src/components/llm_thread_header/llm_thread_header.scss:
--------------------------------------------------------------------------------
1 | .o-llm-thread-header {
2 | min-height: 60px;
3 |
4 | // Mobile: Reduced height and padding
5 | @media (max-width: 767px) {
6 | min-height: 48px;
7 | padding: 0.5rem !important;
8 |
9 | // Truncate thread name on mobile
10 | h6 {
11 | max-width: calc(100% - 50px); // Leave room for hamburger button
12 | overflow: hidden;
13 | text-overflow: ellipsis;
14 | white-space: nowrap;
15 | }
16 | }
17 |
18 | // Mobile controls dropdown styling
19 | .o-llm-mobile-controls {
20 | .form-label {
21 | font-weight: 600;
22 | margin-bottom: 0.25rem;
23 | color: var(--bs-secondary);
24 | }
25 |
26 | // Full-width dropdowns
27 | .dropdown {
28 | width: 100%;
29 |
30 | button {
31 | width: 100%;
32 | text-align: left;
33 | justify-content: space-between;
34 | }
35 | }
36 |
37 | // Tool checkboxes
38 | .form-check {
39 | padding: 0.5rem;
40 | margin: 0;
41 |
42 | &:hover {
43 | background-color: var(--bs-light);
44 | }
45 |
46 | .form-check-input {
47 | margin-top: 0.25rem;
48 | }
49 | }
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/llm_generate/data/llm_tool_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | odoo_generate
4 | odoo_generate
5 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/llm_generate/static/src/patches/llm_chat_container_patch.js:
--------------------------------------------------------------------------------
1 | /** @odoo-module **/
2 |
3 | import { LLMChatContainer } from "@llm_thread/components/llm_chat_container/llm_chat_container";
4 | import { LLMMediaForm } from "../components/llm_media_form/llm_media_form";
5 | import { patch } from "@web/core/utils/patch";
6 |
7 | /**
8 | * Patch LLMChatContainer to check for media generation models
9 | */
10 | patch(LLMChatContainer, {
11 | components: { ...LLMChatContainer.components, LLMMediaForm },
12 | });
13 |
14 | patch(LLMChatContainer.prototype, {
15 | /**
16 | * Check if a thread uses a media generation model
17 | * @param {Object} thread - The thread object from mailStore
18 | * @returns {Boolean}
19 | */
20 | isMediaGenerationModel(thread) {
21 | if (!thread?.id || !this.llmStore) {
22 | return false;
23 | }
24 |
25 | // Get model info from llmStore
26 | const modelId = thread.model_id?.id || thread.model_id;
27 | if (!modelId) {
28 | return false;
29 | }
30 |
31 | const model = this.llmStore.llmModels.get(modelId);
32 | if (!model) {
33 | return false;
34 | }
35 |
36 | // Check if model supports generation
37 | const modelUse = model.model_use;
38 | return modelUse === "generation" || modelUse === "image_generation";
39 | },
40 | });
41 |
--------------------------------------------------------------------------------
/llm_knowledge/security/ir.model.access.csv:
--------------------------------------------------------------------------------
1 | id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink
2 | access_llm_resource_user,llm.resource.user,model_llm_resource,base.group_user,1,0,0,0
3 | access_llm_resource_manager,llm.resource.manager,model_llm_resource,llm.group_llm_manager,1,1,1,1
4 | access_llm_knowledge_chunk_user,llm.knowledge.chunk.user,model_llm_knowledge_chunk,base.group_user,1,0,0,0
5 | access_llm_knowledge_chunk_manager,llm.knowledge.chunk.manager,model_llm_knowledge_chunk,llm.group_llm_manager,1,1,1,1
6 | access_llm_knowledge_collection_user,llm.knowledge.collection.user,model_llm_knowledge_collection,base.group_user,1,0,0,0
7 | access_llm_knowledge_collection_manager,llm.knowledge.collection.manager,model_llm_knowledge_collection,llm.group_llm_manager,1,1,1,1
8 | access_llm_knowledge_domain_user,llm.knowledge.domain.user,model_llm_knowledge_domain,base.group_user,1,0,0,0
9 | access_llm_knowledge_domain_manager,llm.knowledge.domain.manager,model_llm_knowledge_domain,llm.group_llm_manager,1,1,1,1
10 | access_llm_create_rag_resource_wizard_manager,llm.create.rag.resource.wizard.manager,model_llm_create_rag_resource_wizard,llm.group_llm_manager,1,1,1,1
11 | access_llm_upload_resource_wizard_manager,llm.upload.resource.wizard.manager,model_llm_upload_resource_wizard,llm.group_llm_manager,1,1,1,1
12 |
--------------------------------------------------------------------------------
/llm_openai/static/description/openai_logo.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llm_replicate/models/replicate_model.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from odoo import api, fields, models
4 |
5 | _logger = logging.getLogger(__name__)
6 |
7 |
8 | class LLMModel(models.Model):
9 | _inherit = "llm.model"
10 |
11 | replicate_version = fields.Char(
12 | string="Version",
13 | help="Specific version of the Replicate model to use. Format: alphanumeric hash ID. Leave empty to use the latest version. Some unofficial models requires setting it.",
14 | tracking=True,
15 | )
16 |
17 | is_replicate_provider = fields.Boolean(
18 | string="Is Replicate Provider",
19 | compute="_compute_is_replicate_provider",
20 | store=False,
21 | )
22 |
23 | @api.depends("provider_id", "provider_id.service")
24 | def _compute_is_replicate_provider(self):
25 | for record in self:
26 | record.is_replicate_provider = (
27 | record.provider_id.service == "replicate"
28 | if record.provider_id
29 | else False
30 | )
31 |
32 | def _replicate_model_name_with_version(self):
33 | """Get the full model name including version if specified"""
34 | self.ensure_one()
35 | if self.replicate_version and self.replicate_version.strip():
36 | return f"{self.name}:{self.replicate_version.strip()}"
37 | return None
38 |
--------------------------------------------------------------------------------
/llm_fal_ai/models/llm_model.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 |
4 | from odoo import api, models
5 |
6 | _logger = logging.getLogger(__name__)
7 |
8 |
9 | class LLMModel(models.Model):
10 | _inherit = "llm.model"
11 |
12 | @api.model_create_multi
13 | def create(self, vals_list):
14 | """Override create to ensure details field is parsed as dict for FAL.AI models"""
15 | for vals in vals_list:
16 | # Check if this is a FAL.AI model by checking the provider
17 | if vals.get("provider_id"):
18 | provider = self.env["llm.provider"].browse(vals["provider_id"])
19 | if provider.service == "fal_ai" and vals.get("details"):
20 | # If details is a string, parse it to dict
21 | if isinstance(vals["details"], str):
22 | try:
23 | vals["details"] = json.loads(vals["details"])
24 | _logger.info(
25 | "Parsed FAL.AI model details from string to dict"
26 | )
27 | except json.JSONDecodeError as e:
28 | _logger.error(
29 | f"Failed to parse FAL.AI model details JSON: {e}"
30 | )
31 |
32 | return super().create(vals_list)
33 |
--------------------------------------------------------------------------------
/run_tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Test runner for odoo-llm modules
4 | #
5 | # Usage:
6 | # ./run_tests.sh # Run all provider tests
7 | # ./run_tests.sh llm_replicate # Run specific module
8 | # ./run_tests.sh "llm_replicate,llm_comfyui" # Run multiple modules
9 | # ./run_tests.sh llm_replicate my_test_db # Custom database name
10 | # ./run_tests.sh llm_replicate my_test_db 8072 # Custom database and port
11 | #
12 |
13 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
14 | ODOO_PATH="$(cd "$SCRIPT_DIR/../../../.." && pwd)"
15 |
16 | # Activate venv
17 | source "$ODOO_PATH/.venv310/bin/activate"
18 |
19 | # Default values
20 | MODULE="${1:-llm_replicate,llm_comfyui,llm_comfy_icu}"
21 | TEST_DB="${2:-odoo_llm_test}"
22 | PORT="${3:-8071}"
23 |
24 | echo "Running tests for: $MODULE"
25 | echo "Test database: $TEST_DB"
26 | echo "Port: $PORT"
27 |
28 | # Run the test directly with proper tag format
29 | python3 "$ODOO_PATH/src/odoo/odoo-bin" \
30 | -d "$TEST_DB" \
31 | --db_host=localhost \
32 | --db_user=odoo \
33 | --db_password=odoo \
34 | --addons-path="$ODOO_PATH/src/odoo/addons,$ODOO_PATH/extra-addons/.src/apexive/odoo-llm" \
35 | -i "$MODULE" \
36 | --test-enable \
37 | --test-tags="$MODULE" \
38 | --stop-after-init \
39 | --http-port="$PORT" \
40 | --log-level=test
41 |
--------------------------------------------------------------------------------
/llm_letta/models/mail_message.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from odoo import models, tools
4 |
5 | _logger = logging.getLogger(__name__)
6 |
7 |
8 | class MailMessage(models.Model):
9 | _inherit = "mail.message"
10 |
11 | def letta_format_message(self):
12 | """Provider-specific formatting for Letta."""
13 | self.ensure_one()
14 | body = self.body
15 | if body:
16 | body = tools.html2plaintext(body)
17 |
18 | if self.is_llm_user_message()[self]:
19 | formatted_message = {"role": "user"}
20 | if body:
21 | formatted_message["content"] = body
22 | return formatted_message
23 |
24 | elif self.is_llm_assistant_message()[self]:
25 | formatted_message = {"role": "assistant"}
26 | formatted_message["content"] = body
27 |
28 | # Note: Letta handles tool calls differently than OpenAI
29 | # For now, we'll keep it simple and not include tool calls
30 |
31 | return formatted_message
32 |
33 | elif self.is_llm_tool_message()[self]:
34 | # For Letta, tool messages might need different handling
35 | # This is a placeholder for future tool integration
36 | _logger.info(f"Letta: Skipping tool message {self.id} for now")
37 | return None
38 |
39 | else:
40 | return None
41 |
--------------------------------------------------------------------------------
/llm/data/mail_message_subtype.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | LLM Tool
5 |
6 |
7 | 100
8 | mail.thread
9 |
10 |
11 | LLM User
12 |
13 |
14 | 110
15 | mail.thread
16 |
17 |
18 | LLM Assistant
19 |
20 |
21 | 120
22 | mail.thread
23 |
24 |
25 | LLM System
26 |
27 |
28 | 120
29 | mail.thread
30 |
31 |
32 |
--------------------------------------------------------------------------------
/llm_assistant/data/llm_assistant_data.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Assistant Creator
6 |
7 |
8 |
17 |
18 |
19 |
20 |
21 | Website Builder Assistant
22 |
23 |
24 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/llm_generate_job/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.0.0 (2025-10-23)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [MIGRATION] Migrated to Odoo 18.0
5 | * [FIX] Removed deprecated numbercall field from cron jobs
6 | * [IMP] Updated views (tree→list, attrs→direct attributes)
7 |
8 | 16.0.1.0.0 (2025-01-07)
9 | ~~~~~~~~~~~~~~~~~~~~~~~
10 |
11 | * [INIT] Initial release of LLM Generate Job module (renamed from LLM Generation Job)
12 | * [ADD] Generation job lifecycle management (Draft → Queued → Running → Completed/Failed/Cancelled)
13 | * [ADD] Provider-specific queue management with concurrency control
14 | * [ADD] Flexible generation options (direct vs queued)
15 | * [ADD] Comprehensive job monitoring and statistics
16 | * [ADD] Auto-retry mechanism for failed jobs
17 | * [ADD] Performance metrics and queue health monitoring
18 | * [ADD] Cron jobs for queue processing and status checking
19 | * [ADD] Management views for jobs and queues
20 | * [ADD] Provider integration interface with fallback to direct generation
21 | * [ADD] PostgreSQL advisory locking integration
22 | * [ADD] Security access rights for users and managers
23 | * [FEAT] Backward compatibility with existing generation system
24 | * [FEAT] Input/output message tracking with renamed fields (input_message_id, output_message_id)
25 | * [FEAT] Real-time job status updates and progress monitoring
26 | * [REFACTOR] Thread extension now only overrides generate_response() method instead of generate()
27 |
--------------------------------------------------------------------------------
/llm_document_page/models/llm_resource.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from odoo import models
4 |
5 | _logger = logging.getLogger(__name__)
6 |
7 |
8 | class LLMResourceDocumentPage(models.Model):
9 | """Extend LLMResource to handle document.page model."""
10 |
11 | _inherit = "llm.resource"
12 |
13 | def _get_record_external_url(self, res_model, res_id):
14 | """
15 | Extend the external URL computation to handle document.page model.
16 |
17 | :param res_model: The model name
18 | :param res_id: The record ID
19 | :return: The external URL or result from super
20 | """
21 | # First check if it's a document.page model
22 | if res_model == "document.page":
23 | try:
24 | record = self.env[res_model].browse(res_id)
25 | if (
26 | record.exists()
27 | and hasattr(record, "source_url")
28 | and record.source_url
29 | ):
30 | return record.source_url
31 | except Exception as e:
32 | _logger.warning(
33 | "Error computing external URL for document.page resource %s: %s",
34 | res_id,
35 | str(e),
36 | )
37 |
38 | # If not a document.page or no URL found, use the standard implementation
39 | return super()._get_record_external_url(res_model, res_id)
40 |
--------------------------------------------------------------------------------
/llm_ollama/utils/ollama_tool_call_id_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | Utility functions for handling tool IDs and names in the Ollama integration.
3 | """
4 |
5 |
6 | class OllamaToolCallIdUtils:
7 | """
8 | Utility class for working with tool IDs and names.
9 |
10 | This class provides static methods for creating and parsing tool IDs
11 | in a consistent format across the Ollama integration.
12 | """
13 |
14 | @staticmethod
15 | def extract_tool_name_from_id(tool_id):
16 | """
17 | Extract tool name from a tool call ID.
18 |
19 | Args:
20 | tool_id (str): Tool call ID in the format "call__"
21 |
22 | Returns:
23 | str or None: Extracted tool name or None if not found
24 | """
25 | if not tool_id or "_" not in tool_id:
26 | return None
27 |
28 | parts = tool_id.split("_", 2)
29 | if len(parts) < 3:
30 | return None
31 |
32 | return parts[2] # Get the tool name part
33 |
34 | @staticmethod
35 | def create_tool_id(tool_name, uuid_str):
36 | """
37 | Create a tool call ID from a tool name and UUID.
38 |
39 | Args:
40 | tool_name (str): Name of the tool
41 | uuid_str (str): UUID string for the tool call
42 |
43 | Returns:
44 | str: Tool call ID in the format "call__"
45 | """
46 | return f"call_{uuid_str}_{tool_name}"
47 |
--------------------------------------------------------------------------------
/llm_assistant/views/llm_thread_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | llm.thread.form.inherit.assistant
6 | llm.thread
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | llm.thread.search.inherit.assistant
18 | llm.thread
19 |
20 |
21 |
22 |
23 |
24 |
25 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/llm_replicate/views/replicate_model_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | replicate.llm.model.view.form
6 | llm.model
7 |
8 |
9 |
10 |
11 |
15 |
33 |
34 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/llm_letta/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Letta LLM Integration",
3 | "summary": "Letta agent-based AI with persistent memory and MCP tools",
4 | "description": """
5 | Integrates Letta platform for stateful AI agents with persistent memory.
6 |
7 | Features:
8 | • Agent-based conversations with memory persistence
9 | • Full MCP (Model Context Protocol) tool integration
10 | • Automatic agent lifecycle management with threads
11 | • Support for both Letta Cloud and self-hosted servers
12 | • Real-time streaming responses
13 |
14 | Requires Letta server v0.11.7+ and llm_mcp_server module.
15 | """,
16 | "author": "Apexive Solutions LLC",
17 | "website": "https://github.com/apexive/odoo-llm",
18 | "category": "Technical",
19 | "version": "18.0.1.0.1",
20 | "depends": ["llm", "llm_thread", "llm_assistant", "llm_mcp_server"],
21 | "external_dependencies": {
22 | # Note: Using forked version until https://github.com/letta-ai/letta-python/issues/25 is fixed
23 | # Install with: pip install git+https://github.com/apexive/letta-python.git@main
24 | "python": ["letta-client"],
25 | },
26 | "data": [
27 | "security/res_groups.xml",
28 | "data/llm_publisher.xml",
29 | "data/llm_provider.xml",
30 | "views/llm_thread_views.xml",
31 | ],
32 | "images": [
33 | "static/description/banner.jpeg",
34 | ],
35 | "license": "LGPL-3",
36 | "installable": True,
37 | }
38 |
--------------------------------------------------------------------------------
/llm_thread/static/src/components/llm_tool_message/llm_tool_message.scss:
--------------------------------------------------------------------------------
1 | // Styles for LLM Tool Message Component
2 | .o_llm_tool_result {
3 | border: 1px solid var(--border-color);
4 | border-radius: 0.375rem;
5 | padding: 0.75rem;
6 | margin: 0.5rem 0;
7 | background-color: var(--bs-gray-50);
8 |
9 | .o_llm_tool_result_header {
10 | font-weight: 500;
11 | border-bottom: 1px solid var(--border-color);
12 | padding-bottom: 0.5rem;
13 | margin-bottom: 0.5rem;
14 | }
15 |
16 | .o_llm_tool_result_section {
17 | button {
18 | cursor: pointer;
19 | border: none !important;
20 | box-shadow: none !important;
21 |
22 | &:hover {
23 | color: var(--bs-primary) !important;
24 | }
25 |
26 | &:focus {
27 | box-shadow: none !important;
28 | outline: none !important;
29 | }
30 | }
31 | }
32 |
33 | .o_llm_tool_result_args,
34 | .o_llm_tool_result_content {
35 | pre {
36 | white-space: pre-wrap;
37 | word-break: break-all;
38 | max-height: 300px;
39 | margin-bottom: 0;
40 | font-size: 0.875rem;
41 | line-height: 1.4;
42 |
43 | code {
44 | color: inherit;
45 | background: transparent;
46 | padding: 0;
47 | }
48 | }
49 | }
50 |
51 | .o_llm_tool_result_args pre {
52 | max-height: 150px; // Smaller for args
53 | }
54 |
55 | // Error styling
56 | .bg-danger-subtle {
57 | background-color: rgba(var(--bs-danger-rgb), 0.1) !important;
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/llm_training/models/llm_provider.py:
--------------------------------------------------------------------------------
1 | from odoo import models
2 |
3 |
4 | class LLMProvider(models.Model):
5 | _inherit = "llm.provider"
6 |
7 | def upload_file(self, file_tuple, purpose="fine-tune"):
8 | """Upload a file to the provider"""
9 | return self._dispatch("upload_file", file_tuple, purpose)
10 |
11 | def create_training_job(self, training_file_id, model_name, hyperparameters=None):
12 | """Create a fine-tuning job with the provider."""
13 | return self._dispatch(
14 | "create_training_job", training_file_id, model_name, hyperparameters
15 | )
16 |
17 | def retrieve_training_job(self, job_id):
18 | """Retrieve a fine-tuning job with the provider."""
19 | return self._dispatch("retrieve_training_job", job_id)
20 |
21 | def cancel_training_job(self, job_id):
22 | """Cancel a fine-tuning job with the provider."""
23 | return self._dispatch("cancel_training_job", job_id)
24 |
25 | def start_training_job(self, job_record):
26 | """Start a training job full process with the provider."""
27 | return self._dispatch("start_training_job", job_record)
28 |
29 | def check_training_job_status(self, job_record):
30 | """Check the status of a training job with the provider."""
31 | return self._dispatch("check_training_job_status", job_record)
32 |
33 | def validate_datasets(self, job_record):
34 | """Validate datasets for training"""
35 | return self._dispatch("validate_datasets", job_record)
36 |
--------------------------------------------------------------------------------
/web_json_editor/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Web JSON Editor",
3 | "version": "18.0.1.0.0",
4 | "category": "Web",
5 | "summary": "JSON Editor widget for Odoo",
6 | "description": """
7 | Provides a reusable JSON Editor widget for Odoo with schema-based autocomplete.
8 | Features:
9 | - JSON syntax highlighting
10 | - Schema-based autocomplete
11 | - Multiple view modes (code, tree, form, view)
12 | - Validation
13 | """,
14 | "depends": [
15 | "web",
16 | ],
17 | "assets": {
18 | "web.assets_backend": [
19 | # JSONEditor library
20 | "web_json_editor/static/lib/jsoneditor/jsoneditor.min.js",
21 | "web_json_editor/static/lib/jsoneditor/jsoneditor.min.css",
22 | "web_json_editor/static/lib/jsoneditor/img/jsoneditor-icons.svg",
23 | # Field widget
24 | "web_json_editor/static/src/fields/json_field.js",
25 | "web_json_editor/static/src/fields/json_field.xml",
26 | "web_json_editor/static/src/fields/json_field.scss",
27 | # OWL Component
28 | "web_json_editor/static/src/components/json_editor/json_editor.js",
29 | "web_json_editor/static/src/components/json_editor/json_editor.xml",
30 | ],
31 | },
32 | "author": "Apexive Solutions LLC",
33 | "website": "https://github.com/apexive/odoo-llm",
34 | "installable": True,
35 | "application": False,
36 | "auto_install": False,
37 | "license": "LGPL-3",
38 | }
39 |
--------------------------------------------------------------------------------
/llm_knowledge_mistral/views/llm_resource_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | llm.resource.form
5 | llm.resource
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | Select a Mistral provider and OCR model. This parser currently processes PDF and common image attachments (ir.attachment).
15 |
16 |
17 |
23 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/llm_openai/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.2.0 (2025-11-28)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [ADD] Added openai_normalize_prepend_messages() for dispatch pattern compliance
5 | * [IMP] Changed openai_chat to use generic format_messages() and format_tools() dispatch methods
6 | * [IMP] Improved consistency with base provider dispatch pattern
7 |
8 | 18.0.1.1.4 (2025-11-17)
9 | ~~~~~~~~~~~~~~~~~~~~~~~
10 |
11 | * [FIX] Updated batch job processing to use provider's _determine_model_use() method instead of wizard
12 |
13 | 18.0.1.1.3 (2025-10-23)
14 | ~~~~~~~~~~~~~~~~~~~~~~~
15 |
16 | * [MIGRATION] Migrated to Odoo 18.0
17 |
18 | 16.0.1.1.3 (2025-05-13)
19 | ~~~~~~~~~~~~~~~~~~~~~~~
20 |
21 | * [FIX] Fine tuning support
22 |
23 | 16.0.1.1.2 (2025-04-08)
24 | ~~~~~~~~~~~~~~~~~~~~~~~
25 |
26 | * [IMP] Added workaround for Gemini API compatibility (generates placeholder `tool_call_id` if missing)
27 | * [IMP] Modified message formatting to conditionally include `content` key for Gemini compatibility
28 | * [FIX] Fixed errors when using Gemini API due to missing `tool_call_id`
29 |
30 | 16.0.1.1.1 (2025-04-03)
31 | ~~~~~~~~~~~~~~~~~~~~~~~
32 |
33 | * [FIX] Added default model for OpenAI, will work when user adds API key
34 |
35 | 16.0.1.1.0 (2025-03-06)
36 | ~~~~~~~~~~~~~~~~~~~~~~~
37 |
38 | * [ADD] Tool support for OpenAI models - Implemented function calling capabilities
39 | * [IMP] Enhanced message handling for tool execution
40 | * [IMP] Added support for processing tool results in chat context
41 |
42 | 16.0.1.0.0 (2025-01-02)
43 | ~~~~~~~~~~~~~~~~~~~~~~~
44 |
45 | * [INIT] Initial release of the module
46 |
--------------------------------------------------------------------------------
/llm_generate/models/llm_tool_generate.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from odoo import api, models
4 | from odoo.exceptions import UserError
5 |
6 |
7 | class LLMToolGenerate(models.Model):
8 | _inherit = "llm.tool"
9 |
10 | @api.model
11 | def _get_available_implementations(self):
12 | implementations = super()._get_available_implementations()
13 | return implementations + [("odoo_generate", "Odoo Content Generator")]
14 |
15 | def odoo_generate_execute(
16 | self, model_id: int, inputs: dict[str, Any]
17 | ) -> dict[str, Any]:
18 | """Generate content using the specified model and inputs."""
19 | self.ensure_one()
20 |
21 | model = self.env["llm.model"].browse(int(model_id))
22 | if not model.exists():
23 | raise UserError(f"Model with ID {model_id} not found")
24 |
25 | # Use model's generate method - returns tuple (output_data, urls)
26 | output_data, urls = model.generate(inputs)
27 |
28 | # Get the existing tool message from context
29 | tool_message = self.env.context.get("message")
30 |
31 | # Use message method to process URLs and create attachments
32 | markdown_content, attachments = tool_message.process_generation_urls(urls)
33 |
34 | return {
35 | "success": True,
36 | "output_data": output_data,
37 | "urls": [
38 | {"url": att.url, "content_type": att.mimetype} for att in attachments
39 | ],
40 | "markdown": markdown_content,
41 | "content_count": len(urls),
42 | }
43 |
--------------------------------------------------------------------------------
/llm_generate/views/llm_model_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | llm.model.form.inherit.generate
6 | llm.model
7 |
8 |
9 |
10 |
11 |
16 |
Generation Schema Configuration
17 |
For generation models, add input_schema and output_schema keys to the details JSON field above.
20 |
Example:
21 |
{
22 | "input_schema": {
23 | "type": "object",
24 | "properties": {
25 | "prompt": {"type": "string", "description": "The prompt to generate from"},
26 | "size": {"type": "string", "enum": ["256x256", "512x512", "1024x1024"]}
27 | },
28 | "required": ["prompt"]
29 | },
30 | "output_schema": {
31 | "type": "array",
32 | "items": {"type": "string", "format": "uri"}
33 | }
34 | }
35 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/llm_thread/static/src/patches/message_patch.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
9 |
10 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
24 | Tools Requested:
25 |
30 |
34 |
35 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/llm_tool_demo/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Tool Demo",
3 | "version": "18.0.1.0.0",
4 | "category": "Productivity/LLM",
5 | "summary": "Demonstration of @llm_tool decorator usage",
6 | "description": """
7 | LLM Tool Demo Module
8 | ====================
9 |
10 | This module demonstrates how to create LLM tools using the @llm_tool decorator.
11 |
12 | Features:
13 | ---------
14 | * 6 example tools showing different decorator patterns
15 | * Read-only, destructive, and idempotent tool examples
16 | * Type hints and manual schema examples
17 | * Business logic integration (CRM, Sales, Notifications)
18 | * Best practices for tool development
19 |
20 | Examples Included:
21 | ------------------
22 | 1. get_system_info - Simple read-only tool
23 | 2. calculate_business_days - Utility tool with type hints
24 | 3. create_lead_from_description - Business logic tool
25 | 4. generate_sales_report - Complex reporting tool
26 | 5. legacy_example - Manual schema for legacy code
27 | 6. send_notification_to_user - User interaction tool
28 |
29 | Use this module as a reference when creating your own LLM tools.
30 | """,
31 | "author": "Apexive",
32 | "website": "https://github.com/apexive/odoo-llm",
33 | "license": "LGPL-3",
34 | "depends": [
35 | "llm_tool",
36 | "crm", # For lead creation example
37 | "sale", # For sales report example
38 | "mail", # For notification example
39 | ],
40 | "data": [
41 | "security/ir.model.access.csv",
42 | ],
43 | "demo": [],
44 | "images": ["static/description/banner.jpeg"],
45 | "installable": True,
46 | "application": False,
47 | "auto_install": False,
48 | }
49 |
--------------------------------------------------------------------------------
/llm_knowledge/models/llm_knowledge_domain.py:
--------------------------------------------------------------------------------
1 | from odoo import api, fields, models
2 |
3 |
4 | class LLMKnowledgeDomain(models.Model):
5 | _name = "llm.knowledge.domain"
6 | _description = "Collection Domain Filter"
7 | _order = "sequence, id"
8 |
9 | name = fields.Char(
10 | string="Name",
11 | compute="_compute_name",
12 | store=True,
13 | )
14 | collection_id = fields.Many2one(
15 | "llm.knowledge.collection",
16 | string="Collection",
17 | required=True,
18 | ondelete="cascade",
19 | index=True,
20 | )
21 | model_id = fields.Many2one(
22 | "ir.model",
23 | string="Model",
24 | required=True,
25 | ondelete="cascade",
26 | index=True,
27 | )
28 | model_name = fields.Char(
29 | string="Model Name",
30 | related="model_id.model",
31 | store=True,
32 | readonly=True,
33 | )
34 | domain = fields.Char(
35 | string="Domain",
36 | default="[]",
37 | required=True,
38 | help="Domain filter to select records",
39 | )
40 | sequence = fields.Integer(
41 | string="Sequence",
42 | default=10,
43 | help="Order of application",
44 | )
45 | active = fields.Boolean(
46 | string="Active",
47 | default=True,
48 | )
49 |
50 | @api.depends("model_id", "model_id.name")
51 | def _compute_name(self):
52 | """Compute a readable name for the domain filter"""
53 | for record in self:
54 | if record.model_id:
55 | record.name = f"{record.model_id.name} Domain"
56 | else:
57 | record.name = "Domain Filter"
58 |
--------------------------------------------------------------------------------
/llm_knowledge/migrations/16.0.1.1.0/post-migration.py:
--------------------------------------------------------------------------------
1 | """Migration script for consolidating llm_resource into llm_knowledge
2 |
3 | This migration handles the consolidation of the llm_resource module
4 | into the llm_knowledge module.
5 | """
6 |
7 | import logging
8 |
9 | _logger = logging.getLogger(__name__)
10 |
11 |
12 | def migrate(cr, version):
13 | """Migration script to handle llm_resource consolidation"""
14 |
15 | # Check if llm_resource module exists and mark it as uninstalled
16 | cr.execute("""
17 | SELECT id FROM ir_module_module
18 | WHERE name = 'llm_resource' AND state = 'installed'
19 | """)
20 |
21 | llm_resource_module = cr.fetchone()
22 | if llm_resource_module:
23 | _logger.info("Found installed llm_resource module, marking as uninstalled")
24 |
25 | # Mark llm_resource as uninstalled
26 | cr.execute("""
27 | UPDATE ir_module_module
28 | SET state = 'uninstalled'
29 | WHERE name = 'llm_resource'
30 | """)
31 |
32 | # Remove any ir_model_data entries that reference llm_resource
33 | cr.execute("""
34 | DELETE FROM ir_model_data
35 | WHERE module = 'llm_resource'
36 | """)
37 |
38 | _logger.info("Successfully marked llm_resource as uninstalled")
39 | else:
40 | _logger.info("llm_resource module not found or already uninstalled")
41 |
42 | # Update any external references in ir_model_data to point to llm_knowledge
43 | cr.execute("""
44 | UPDATE ir_model_data
45 | SET module = 'llm_knowledge'
46 | WHERE name LIKE '%llm_resource%' AND module != 'llm_knowledge'
47 | """)
48 |
49 | _logger.info("Migration completed successfully")
50 |
--------------------------------------------------------------------------------
/llm_generate_job/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Generate Job",
3 | "version": "18.0.1.0.0",
4 | "category": "Artificial Intelligence",
5 | "summary": "Generation Job Management and Queue System for LLM Providers",
6 | "description": """
7 | LLM Generate Job Management
8 | ===========================
9 |
10 | This module provides a comprehensive generation job management system for LLM providers.
11 |
12 | Features:
13 | - Generation job creation and lifecycle management
14 | - Provider-specific queue management
15 | - Job status tracking and monitoring
16 | - Retry and error handling mechanisms
17 | - Direct vs. queued generation options
18 | - PostgreSQL advisory locking integration
19 |
20 | The system supports both direct generation (legacy mode) and queued generation
21 | for better resource management and scalability.
22 |
23 | Key Changes:
24 | - Renamed from llm_generation_job to llm_generate_job
25 | - Thread extension now only overrides generate_response() method
26 | - Maintains backward compatibility with existing generate() method
27 | """,
28 | "author": "Apexive",
29 | "website": "https://github.com/apexive/odoo-llm",
30 | "depends": [
31 | "llm_thread",
32 | "llm_tool",
33 | "llm_generate",
34 | "web_json_editor",
35 | ],
36 | "data": [
37 | "security/ir.model.access.csv",
38 | "data/llm_generation_cron.xml",
39 | "views/llm_generation_job_views.xml",
40 | "views/llm_generation_queue_views.xml",
41 | "views/llm_generation_menu_views.xml",
42 | ],
43 | "images": [
44 | "static/description/icon.svg",
45 | ],
46 | "installable": True,
47 | "application": False,
48 | "auto_install": False,
49 | "license": "LGPL-3",
50 | }
51 |
--------------------------------------------------------------------------------
/llm_knowledge/__manifest__.py:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LLM Knowledge",
3 | "summary": "RAG vector search: AI knowledge base with semantic document retrieval, embeddings, PDF parsing, and multi-store support (Qdrant, pgvector, Chroma)",
4 | "description": """
5 | Complete RAG (Retrieval-Augmented Generation) system for Odoo with document processing,
6 | vector search, and semantic knowledge base capabilities. Turn your documents into AI-searchable
7 | knowledge with support for PDFs, web pages, and text files. Compatible with Qdrant, pgvector,
8 | and Chroma vector stores.
9 | """,
10 | "category": "Technical",
11 | "version": "18.0.1.1.0",
12 | "depends": ["llm", "llm_store"],
13 | "external_dependencies": {
14 | "python": ["requests", "markdownify", "PyMuPDF", "numpy"],
15 | },
16 | "author": "Apexive Solutions LLC",
17 | "website": "https://github.com/apexive/odoo-llm",
18 | "data": [
19 | # Security must come first
20 | "security/ir.model.access.csv",
21 | # Data / Actions
22 | "data/server_actions.xml",
23 | # Views for models
24 | "views/llm_resource_views.xml", # Consolidated llm.resource views
25 | "views/llm_knowledge_collection_views.xml",
26 | "views/llm_knowledge_chunk_views.xml",
27 | # Wizard Views
28 | "wizards/create_rag_resource_wizard_views.xml",
29 | "wizards/upload_resource_wizard_views.xml",
30 | # Menus must come last
31 | "views/llm_resource_menu.xml",
32 | "views/menu.xml",
33 | ],
34 | "images": [
35 | "static/description/banner.jpeg",
36 | ],
37 | "license": "LGPL-3",
38 | "installable": True,
39 | "application": False,
40 | "auto_install": False,
41 | }
42 |
--------------------------------------------------------------------------------
/llm_tool/models/llm_tool_record_retriever.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | from typing import Any, Union
4 |
5 | from odoo import api, models
6 |
7 | _logger = logging.getLogger(__name__)
8 |
9 |
10 | class LLMToolRecordRetriever(models.Model):
11 | _inherit = "llm.tool"
12 |
13 | @api.model
14 | def _get_available_implementations(self):
15 | implementations = super()._get_available_implementations()
16 | return implementations + [("odoo_record_retriever", "Odoo Record Retriever")]
17 |
18 | def odoo_record_retriever_execute(
19 | self,
20 | model: str,
21 | domain: list[list[Union[str, int, bool, float, None]]] = [], # noqa: B006
22 | fields: list[str] = [], # noqa: B006
23 | limit: int = 100,
24 | ) -> dict[str, Any]:
25 | """
26 | Execute the Odoo Record Retriever tool
27 |
28 | Parameters:
29 | model: The Odoo model to retrieve records from
30 | domain: Domain to filter records (list of lists/tuples like ['field', 'op', 'value'])
31 | fields: List of field names to retrieve
32 | limit: Maximum number of records to retrieve
33 | """
34 | _logger.info(
35 | f"Executing Odoo Record Retriever with: model={model}, domain={domain}, fields={fields}, limit={limit}"
36 | )
37 | model_obj = self.env[model]
38 |
39 | # Using search_read for efficiency
40 | if fields:
41 | result = model_obj.search_read(domain=domain, fields=fields, limit=limit)
42 | else:
43 | records = model_obj.search(domain=domain, limit=limit)
44 | result = records.read()
45 |
46 | # Convert to serializable format
47 | return json.loads(json.dumps(result, default=str))
48 |
--------------------------------------------------------------------------------
/llm/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.5.0 (2025-11-28)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [ADD] Added _extract_content_text() helper for extracting text from message content (handles both string and OpenAI list formats)
5 | * [ADD] Added _dispatch("normalize_prepend_messages") call in chat() for provider-specific message normalization
6 | * [IMP] Improved dispatch pattern consistency for prepend_messages handling
7 |
8 | 18.0.1.4.1 (2025-11-17)
9 | ~~~~~~~~~~~~~~~~~~~~~~~
10 |
11 | * [FIX] Fixed wizard_id not being set on llm.fetch.models.line records
12 | * [IMP] Refactored model fetching: moved logic from wizard default_get() to provider action_fetch_models()
13 | * [IMP] Moved _determine_model_use() from wizard to provider for better extensibility
14 | * [REM] Removed wizard write() override workaround
15 | * [ADD] Comprehensive docstrings with extension pattern examples
16 | * [ADD] Documented standard capability names and priority order
17 |
18 | 18.0.1.4.0 (2025-10-23)
19 | ~~~~~~~~~~~~~~~~~~~~~~~
20 |
21 | * [MIGRATION] Migrated to Odoo 18.0
22 | * [IMP] Updated views and manifest for compatibility
23 |
24 | 16.0.1.3.0
25 | ~~~~~~~~~~
26 |
27 | * [BREAKING] Moved message subtypes to base module
28 | * [ADD] Added required `llm_role` field computation with automatic migration
29 | * [IMP] Enhanced provider dispatch mechanism
30 | * [MIGRATION] Automatic computation of `llm_role` for existing messages
31 | * [MIGRATION] Database migration creates indexes for performance
32 |
33 | 16.0.1.1.0 (2025-03-06)
34 | ~~~~~~~~~~~~~~~~~~~~~~~
35 |
36 | * [ADD] Tool support framework in base LLM models
37 | * [IMP] Enhanced provider interface to support tool execution
38 | * [IMP] Updated model handling for function calling capabilities
39 |
40 | 16.0.1.0.0 (2025-01-02)
41 | ~~~~~~~~~~~~~~~~~~~~~~~
42 |
43 | * [INIT] Initial release
44 |
--------------------------------------------------------------------------------
/llm_document_page/wizards/upload_resource_wizard_views.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
7 | llm.upload.resource.wizard.form.inherit.document.page
10 | llm.upload.resource.wizard
11 |
15 |
16 |
17 |
18 |
23 |
24 |
25 |
26 |
30 |
35 |
Document Page Support
36 |
Document pages only support text-based content (HTML, plain text). Binary files like PDFs, images, or videos will NOT be retrieved properly.
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/llm_generate/models/llm_model.py:
--------------------------------------------------------------------------------
1 | from odoo import api, models
2 |
3 |
4 | class LLMModel(models.Model):
5 | """Extension of llm.model to support automatic I/O schema generation for media generation models."""
6 |
7 | _inherit = "llm.model"
8 |
9 | @api.model_create_multi
10 | def create(self, vals_list):
11 | """Override create to auto-generate I/O schemas for generation models."""
12 | records = super().create(vals_list)
13 | records._auto_generate_io_schema()
14 | return records
15 |
16 | def write(self, vals):
17 | """Override write to auto-generate I/O schemas when details or model_use changes."""
18 | result = super().write(vals)
19 | if "details" in vals or "model_use" in vals:
20 | self._auto_generate_io_schema()
21 | return result
22 |
23 | def _auto_generate_io_schema(self):
24 | """Auto-generate I/O schemas for eligible generation models.
25 |
26 | This method implements the top-level logic:
27 | 1. Check if model_use is generation/image_generation
28 | 2. Check if provider is eligible (via dispatch)
29 | 3. Trigger schema generation (via dispatch)
30 | """
31 | for record in self:
32 | # Top-level condition: only for generation models
33 | if record.model_use not in ["generation", "image_generation"]:
34 | continue
35 |
36 | # Skip if no provider
37 | if not record.provider_id:
38 | continue
39 |
40 | # Check if provider should generate schema (dispatches to provider implementation)
41 | if not record.provider_id.should_generate_io_schema(record):
42 | continue
43 |
44 | # Trigger schema generation (dispatches to provider implementation)
45 | record.provider_id.generate_io_schema(record)
46 |
--------------------------------------------------------------------------------
/llm_mcp_server/changelog.rst:
--------------------------------------------------------------------------------
1 | 18.0.1.3.0 (2025-12-02)
2 | ~~~~~~~~~~~~~~~~~~~~~~~
3 |
4 | * [ADD] New MCP Key wizard - generates API key with ready-to-copy client configurations
5 | * [ADD] "New MCP Key" button in user preferences (Account Security section)
6 | * [ADD] "New MCP Key" button in MCP Server Config form for quick key generation
7 | * [IMP] Client configs now use Jinja2 templates for maintainable config generation
8 | * [IMP] Nested notebook tabs for client configurations with CopyClipboardButton widgets
9 | * [IMP] DRY refactoring - shared config generation between wizard and config form
10 |
11 | 18.0.1.2.0 (2025-11-28)
12 | ~~~~~~~~~~~~~~~~~~~~~~~
13 |
14 | * [ADD] Added "Client Configuration" tab to MCP Server Config form with copy-paste setup instructions
15 | * [ADD] Included configuration snippets for Claude Desktop, Claude Code, and Codex CLI
16 | * [ADD] Added prerequisites section with mcp-remote installation command
17 | * [IMP] Better onboarding experience with inline API key guidance
18 |
19 | 18.0.1.1.0 (2025-11-03)
20 | ~~~~~~~~~~~~~~~~~~~~~~~
21 |
22 | * [IMP] Updated Odoo App Store description page (static/description/index.html)
23 | * [IMP] Improved module presentation with modern Bootstrap 5 layout
24 | * [IMP] Enhanced mobile responsiveness and visual design
25 | * [IMP] Added comprehensive MCP feature descriptions and use cases
26 | * [IMP] Optimized for Odoo App Store HTML sanitization requirements
27 |
28 | 18.0.1.0.0 (2025-10-23)
29 | ~~~~~~~~~~~~~~~~~~~~~~~
30 |
31 | * [INIT] Initial release of the module
32 | * [ADD] MCP 2025-06-18 protocol implementation
33 | * [ADD] Bearer token authentication with Odoo API keys
34 | * [ADD] Dynamic tool discovery from llm.tool registry
35 | * [ADD] Real-time tool execution with proper Odoo context
36 | * [ADD] Health monitoring and session management
37 | * [ADD] Support for Claude Desktop, Claude Code, Cursor, Windsurf, VS Code, and Codex
38 |
--------------------------------------------------------------------------------
/llm_generate_job/tests/test_model_based_queue.py:
--------------------------------------------------------------------------------
1 | """Test script to verify model-based queue functionality"""
2 |
3 |
4 | def test_model_based_queue():
5 | """Test that queue is properly linked to model instead of provider"""
6 |
7 | # This is a simple test structure to verify the refactoring
8 | # In a real Odoo test, you would inherit from TransactionCase or SingleTransactionCase
9 |
10 | print("Model-based queue refactoring test structure:")
11 | print("=" * 50)
12 |
13 | print("\n1. Queue Model Changes:")
14 | print(" - Changed provider_id to model_id in llm.generation.queue")
15 | print(" - Updated all compute methods to use model_id")
16 | print(" - Updated _get_or_create_queue to accept model_id")
17 | print(" - Updated _process_model_queue (renamed from _process_provider_queue)")
18 |
19 | print("\n2. Job Model:")
20 | print(" - Already has model_id field")
21 | print(" - Jobs are tracked by model, not provider")
22 |
23 | print("\n3. Thread Integration:")
24 | print(" - generate_response now checks for queue based on model_id")
25 | print(" - Auto-detects if model has an enabled queue")
26 |
27 | print("\n4. UI Updates:")
28 | print(" - All views updated to show model_id instead of provider_id")
29 | print(" - Icons changed from plug to brain")
30 |
31 | print("\n5. Queue Processing:")
32 | print(" - Queues are now per-model, not per-provider")
33 | print(" - Multiple models from same provider can have different queues")
34 |
35 | print("\nTest scenarios to verify:")
36 | print("- Create a queue for a specific model")
37 | print("- Verify generate_response uses queue when model has one")
38 | print("- Verify queue processing filters jobs by model_id")
39 | print("- Verify UI shows correct model information")
40 |
41 |
42 | if __name__ == "__main__":
43 | test_model_based_queue()
44 |
--------------------------------------------------------------------------------
/.claude/commands/fix-app-store-html.md:
--------------------------------------------------------------------------------
1 | Fix App Store HTML compliance issues for an Odoo module.
2 |
3 | ## Module: $ARGUMENTS
4 |
5 | ## Reference
6 |
7 | See ODOO_APP_STORE_HTML_GUIDE.md for complete guidelines.
8 |
9 | ## Steps
10 |
11 | ### 1. Read Current HTML
12 |
13 | Read `{module}/static/description/index.html`
14 |
15 | ### 2. Identify Violations
16 |
17 | Check for and fix these issues:
18 |
19 | **Structure violations:**
20 |
21 | - Remove ``, ``, ``, `` tags
22 | - Keep only the content that would go inside ``
23 | - Remove ``, ``, `` tags
24 |
25 | **CSS violations to fix:**
26 |
27 | - `rgba(r,g,b,a)` → convert to hex colors (e.g., `#6f42c1`)
28 | - `transition:` → remove entirely
29 | - `transform:` → remove entirely
30 | - `:hover` effects with transforms → remove
31 | - `linear-gradient()` → use solid colors or remove
32 | - `animation:` → remove entirely
33 | - CSS variables (`var(--x)`) → replace with actual values
34 |
35 | **JavaScript violations:**
36 |
37 | - Remove any `onclick`, `onmouseover`, etc. attributes
38 | - Remove `