├── .env.example ├── .github ├── CODEOWNERS └── workflows │ ├── ci.yaml │ ├── pr.yaml │ └── publish.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── docker-compose.yaml ├── docs ├── img │ └── Dynamiq_Logo_Universal_Github.png └── tutorials │ ├── agents.md │ ├── quickstart.md │ └── rag.md ├── dynamiq ├── __init__.py ├── cache │ ├── __init__.py │ ├── backends │ │ ├── __init__.py │ │ ├── base.py │ │ └── redis.py │ ├── codecs.py │ ├── config.py │ ├── managers │ │ ├── __init__.py │ │ ├── base.py │ │ └── workflow.py │ └── utils.py ├── callbacks │ ├── __init__.py │ ├── base.py │ ├── streaming.py │ └── tracing.py ├── clients │ ├── __init__.py │ └── base.py ├── components │ ├── __init__.py │ ├── converters │ │ ├── __init__.py │ │ ├── base.py │ │ ├── docx.py │ │ ├── html.py │ │ ├── pptx.py │ │ ├── pypdf.py │ │ ├── unstructured.py │ │ └── utils.py │ ├── embedders │ │ ├── __init__.py │ │ ├── base.py │ │ ├── bedrock.py │ │ ├── cohere.py │ │ ├── gemini.py │ │ ├── huggingface.py │ │ ├── mistral.py │ │ ├── openai.py │ │ ├── vertexai.py │ │ └── watsonx.py │ ├── retrievers │ │ ├── __init__.py │ │ ├── chroma.py │ │ ├── elasticsearch.py │ │ ├── milvus.py │ │ ├── pgvector.py │ │ ├── pinecone.py │ │ ├── qdrant.py │ │ └── weaviate.py │ ├── serializers.py │ └── splitters │ │ ├── __init__.py │ │ └── document.py ├── connections │ ├── __init__.py │ ├── connections.py │ ├── managers.py │ └── storages.py ├── evaluations │ ├── __init__.py │ ├── base_evaluator.py │ ├── llm_evaluator.py │ ├── metrics │ │ ├── __init__.py │ │ ├── answer_correctness.py │ │ ├── bleu_score.py │ │ ├── context_precision.py │ │ ├── context_recall.py │ │ ├── factual_correctness.py │ │ ├── faithfulness.py │ │ ├── rouge_score.py │ │ └── string_metrics.py │ └── python_evaluator.py ├── executors │ ├── __init__.py │ ├── base.py │ └── pool.py ├── flows │ ├── __init__.py │ ├── base.py │ └── flow.py ├── memory │ ├── __init__.py │ ├── backends │ │ ├── __init__.py │ │ ├── base.py │ │ ├── dynamo_db.py │ │ ├── in_memory.py │ │ ├── pinecone.py │ │ ├── qdrant.py │ │ ├── sqlite.py │ │ └── weaviate.py │ └── memory.py ├── nodes │ ├── __init__.py │ ├── agents │ │ ├── __init__.py │ │ ├── base.py │ │ ├── exceptions.py │ │ ├── orchestrators │ │ │ ├── __init__.py │ │ │ ├── adaptive.py │ │ │ ├── adaptive_manager.py │ │ │ ├── graph.py │ │ │ ├── graph_manager.py │ │ │ ├── graph_state.py │ │ │ ├── linear.py │ │ │ ├── linear_manager.py │ │ │ └── orchestrator.py │ │ ├── react.py │ │ ├── reflection.py │ │ ├── simple.py │ │ └── utils.py │ ├── audio │ │ ├── __init__.py │ │ ├── elevenlabs.py │ │ └── whisper.py │ ├── converters │ │ ├── __init__.py │ │ ├── csv.py │ │ ├── docx.py │ │ ├── html.py │ │ ├── llm_text_extractor.py │ │ ├── pptx.py │ │ ├── pypdf.py │ │ └── unstructured.py │ ├── embedders │ │ ├── __init__.py │ │ ├── base.py │ │ ├── bedrock.py │ │ ├── cohere.py │ │ ├── gemini.py │ │ ├── huggingface.py │ │ ├── mistral.py │ │ ├── openai.py │ │ ├── vertexai.py │ │ └── watsonx.py │ ├── exceptions.py │ ├── llms │ │ ├── __init__.py │ │ ├── ai21.py │ │ ├── anthropic.py │ │ ├── anyscale.py │ │ ├── azureai.py │ │ ├── base.py │ │ ├── bedrock.py │ │ ├── cerebras.py │ │ ├── cohere.py │ │ ├── custom_llm.py │ │ ├── databricks.py │ │ ├── deepinfra.py │ │ ├── deepseek.py │ │ ├── fireworksai.py │ │ ├── gemini.py │ │ ├── groq.py │ │ ├── huggingface.py │ │ ├── mistral.py │ │ ├── nvidia_nim.py │ │ ├── ollama.py │ │ ├── openai.py │ │ ├── perplexity.py │ │ ├── replicate.py │ │ ├── sambanova.py │ │ ├── togetherai.py │ │ ├── vertexai.py │ │ ├── watsonx.py │ │ └── xai.py │ ├── managers.py │ ├── node.py │ ├── operators │ │ ├── __init__.py │ │ └── operators.py │ ├── rankers │ │ ├── __init__.py │ │ ├── cohere.py │ │ ├── llm.py │ │ └── recency.py │ ├── retrievers │ │ ├── __init__.py │ │ ├── base.py │ │ ├── chroma.py │ │ ├── elasticsearch.py │ │ ├── milvus.py │ │ ├── pgvector.py │ │ ├── pinecone.py │ │ ├── qdrant.py │ │ ├── retriever.py │ │ └── weaviate.py │ ├── splitters │ │ ├── __init__.py │ │ └── document.py │ ├── tools │ │ ├── __init__.py │ │ ├── e2b_sandbox.py │ │ ├── exa_search.py │ │ ├── firecrawl.py │ │ ├── function_tool.py │ │ ├── http_api_call.py │ │ ├── human_feedback.py │ │ ├── jina.py │ │ ├── llm_summarizer.py │ │ ├── mcp.py │ │ ├── python.py │ │ ├── scale_serp.py │ │ ├── sql_executor.py │ │ ├── tavily.py │ │ ├── thinking_tool.py │ │ └── zenrows.py │ ├── types.py │ ├── utils │ │ ├── __init__.py │ │ └── utils.py │ ├── validators │ │ ├── __init__.py │ │ ├── base.py │ │ ├── regex_match.py │ │ ├── valid_choices.py │ │ ├── valid_json.py │ │ └── valid_python.py │ └── writers │ │ ├── __init__.py │ │ ├── base.py │ │ ├── chroma.py │ │ ├── elasticsearch.py │ │ ├── milvus.py │ │ ├── pgvector.py │ │ ├── pinecone.py │ │ ├── qdrant.py │ │ └── weaviate.py ├── prompts │ ├── __init__.py │ └── prompts.py ├── runnables │ ├── __init__.py │ └── base.py ├── serializers │ ├── __init__.py │ ├── dumpers │ │ ├── __init__.py │ │ └── yaml.py │ ├── loaders │ │ ├── __init__.py │ │ └── yaml.py │ └── types.py ├── storages │ ├── __init__.py │ └── vector │ │ ├── __init__.py │ │ ├── base.py │ │ ├── chroma │ │ ├── __init__.py │ │ └── chroma.py │ │ ├── elasticsearch │ │ ├── __init__.py │ │ ├── elasticsearch.py │ │ └── filters.py │ │ ├── exceptions.py │ │ ├── milvus │ │ ├── __init__.py │ │ ├── filter.py │ │ └── milvus.py │ │ ├── pgvector │ │ ├── __init__.py │ │ ├── filters.py │ │ └── pgvector.py │ │ ├── pinecone │ │ ├── __init__.py │ │ ├── filters.py │ │ └── pinecone.py │ │ ├── policies.py │ │ ├── qdrant │ │ ├── __init__.py │ │ ├── converters.py │ │ ├── filters.py │ │ └── qdrant.py │ │ ├── utils.py │ │ └── weaviate │ │ ├── __init__.py │ │ ├── filters.py │ │ └── weaviate.py ├── types │ ├── __init__.py │ ├── document.py │ ├── feedback.py │ ├── llm_tool.py │ └── streaming.py ├── utils │ ├── __init__.py │ ├── chat.py │ ├── duration.py │ ├── env.py │ ├── feedback.py │ ├── json_parser.py │ ├── jsonpath.py │ ├── logger.py │ ├── node_generation.py │ └── utils.py └── workflow │ ├── __init__.py │ └── workflow.py ├── examples ├── Makefile ├── __init__.py ├── components │ ├── agents │ │ ├── agents │ │ │ ├── react_agent_wf.py │ │ │ ├── reflection_agent_wf.py │ │ │ ├── simple_agent_wf.py │ │ │ ├── use_agent_messages.py │ │ │ ├── use_agents_hidden_params.py │ │ │ └── use_agents_vision.py │ │ ├── orchestrators │ │ │ ├── adaptive_orchestrator │ │ │ │ ├── adaptive.py │ │ │ │ ├── adaptive_article_o3.py │ │ │ │ ├── adaptive_coding.py │ │ │ │ └── adaptive_coding_workflow.py │ │ │ ├── graph_orchestrator │ │ │ │ ├── code_assistant.py │ │ │ │ ├── concierge_orchestration.py │ │ │ │ ├── email_writer.py │ │ │ │ ├── graph_orchestrator_wf.yaml │ │ │ │ ├── graph_orchestrator_yaml.py │ │ │ │ └── trip_planner_orchestration.py │ │ │ ├── linear_orchestrator │ │ │ │ └── linear_coding.py │ │ │ └── orchestrators_tests_with_guardrails.py │ │ └── streaming │ │ │ ├── intermediate_streaming │ │ │ ├── adaptive_orchestrator │ │ │ │ ├── adaptive_orchestrator.py │ │ │ │ └── app.py │ │ │ ├── agents │ │ │ │ ├── agent.py │ │ │ │ └── app.py │ │ │ ├── graph_orchestrator │ │ │ │ ├── app.py │ │ │ │ └── graph_orchestrator.py │ │ │ ├── linear_orchestrator │ │ │ │ ├── app.py │ │ │ │ └── linear_orchestrator.py │ │ │ └── run.sh │ │ │ ├── orchestrator │ │ │ ├── app.py │ │ │ ├── backend.py │ │ │ └── run.sh │ │ │ ├── react │ │ │ ├── app.py │ │ │ ├── backend.py │ │ │ ├── run.sh │ │ │ └── use_streaming.py │ │ │ ├── reflection │ │ │ ├── app.py │ │ │ ├── backend.py │ │ │ └── run.sh │ │ │ └── simple │ │ │ ├── app.py │ │ │ ├── backend.py │ │ │ └── run.sh │ ├── core │ │ ├── dag │ │ │ ├── __init__.py │ │ │ ├── agent_memory_cycle_dag.py │ │ │ ├── agent_memory_dag.py │ │ │ ├── agent_memory_dag.yaml │ │ │ ├── agent_memory_db_dag.py │ │ │ ├── agent_memory_dynamo_db_dag.yaml │ │ │ ├── agent_rag.py │ │ │ ├── agent_rag.yaml │ │ │ ├── agent_with_tool_params.py │ │ │ ├── agent_with_tool_params.yaml │ │ │ ├── csv_embedding_flow.py │ │ │ ├── csv_embedding_flow.yaml │ │ │ ├── dag.py │ │ │ ├── dag.yaml │ │ │ ├── dag_llm.yaml │ │ │ ├── dag_llm_structured_output.py │ │ │ ├── dag_llm_structured_output.yaml │ │ │ ├── dag_llm_tools.yaml │ │ │ ├── dag_mcp_server.yaml │ │ │ ├── dag_mcp_tool.yaml │ │ │ ├── dag_yaml.py │ │ │ ├── dag_yaml_llm.py │ │ │ ├── dag_yaml_llm_tools.py │ │ │ ├── dag_yaml_mcp.py │ │ │ ├── orchestrator_dag.yaml │ │ │ ├── orchestrator_dag_yaml.py │ │ │ └── simple_dag.py │ │ ├── memory │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── demo_article.py │ │ │ ├── demo_memory.py │ │ │ ├── demo_memory_dynamo_db.py │ │ │ ├── demo_memory_pinecone.py │ │ │ ├── demo_memory_qdrant.py │ │ │ ├── demo_memory_sqlite.py │ │ │ ├── demo_memory_weaviate.py │ │ │ ├── demo_react_agent_chat_memory_pinecone.py │ │ │ ├── demo_simple_agent_chat_memory.py │ │ │ └── demo_simple_agent_chat_memory_qdrant.py │ │ ├── tracing │ │ │ ├── __init__.py │ │ │ ├── agentops_auto.py │ │ │ ├── agentops_tracing.py │ │ │ ├── draw.py │ │ │ └── langfuse_tracing.py │ │ └── websocket │ │ │ ├── __init__.py │ │ │ ├── sse │ │ │ ├── __init__.py │ │ │ ├── http_client.py │ │ │ └── http_server_fastapi.py │ │ │ ├── ws_client.py │ │ │ ├── ws_server_fastapi.py │ │ │ └── ws_streamlit │ │ │ ├── README.md │ │ │ ├── example_agent │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ └── server.py │ │ │ ├── example_agent_chat │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ └── server.py │ │ │ ├── example_llm │ │ │ ├── __init__.py │ │ │ ├── app.py │ │ │ └── server.py │ │ │ └── run.sh │ ├── data │ │ ├── file.docx │ │ ├── file.pdf │ │ ├── file.pptx │ │ ├── file.txt │ │ ├── img.jpeg │ │ ├── sample.html │ │ └── sample_regression_data.csv │ ├── evaluations │ │ ├── llm_evaluator.py │ │ ├── metrics │ │ │ ├── answer_correctness.py │ │ │ ├── bleu_score.py │ │ │ ├── context_precision.py │ │ │ ├── context_recall.py │ │ │ ├── factual_correctness.py │ │ │ ├── faithfulness.py │ │ │ ├── rouge_score.py │ │ │ └── string_metrics.py │ │ ├── python_evaluator.py │ │ └── workflow_eval.py │ ├── helpers │ │ ├── converters │ │ │ ├── csv_converter.py │ │ │ ├── docx_converter.py │ │ │ ├── html_converter.py │ │ │ ├── pptx_converter.py │ │ │ └── pypdf_converter.py │ │ └── validators │ │ │ └── valid_python.py │ ├── llm │ │ ├── llm_with_files │ │ │ ├── llm_file_example.py │ │ │ └── use_file_tool_example.py │ │ ├── llm_with_vision │ │ │ ├── img_url_example.py │ │ │ ├── node_pdf_extractor.py │ │ │ └── pdf_extractor.py │ │ └── llms │ │ │ ├── custom.py │ │ │ ├── function_calling.py │ │ │ ├── mistral_with_messages.py │ │ │ ├── ollama.py │ │ │ ├── perplexity_citations.py │ │ │ ├── streaming.py │ │ │ ├── structured_output.py │ │ │ └── thinking_streaming.py │ ├── rag │ │ ├── embedders │ │ │ └── embedders_execution.py │ │ ├── rerankers │ │ │ ├── __init__.py │ │ │ └── use_cohere.py │ │ └── vector_stores │ │ │ ├── README.md │ │ │ ├── dag │ │ │ ├── dag_chroma.yaml │ │ │ ├── dag_elasticsearch.yaml │ │ │ ├── dag_html_pinecone.yaml │ │ │ ├── dag_pgvector.yaml │ │ │ ├── dag_pinecone.yaml │ │ │ ├── dag_qdrant.yaml │ │ │ ├── dag_weaviate.yaml │ │ │ ├── dag_weaviate_custom.yaml │ │ │ ├── dag_weaviate_tenant.yaml │ │ │ └── dag_yaml.py │ │ │ ├── delete_documents │ │ │ └── pinecone_delete_by_file_ids.py │ │ │ ├── elasticsearch_flow.py │ │ │ ├── filters │ │ │ ├── filtering_example.py │ │ │ └── filtering_readme.md │ │ │ ├── pinecone_flow.py │ │ │ ├── scrapping_pinecone_flow.py │ │ │ └── utils.py │ └── tools │ │ ├── README.md │ │ ├── custom_tools │ │ ├── calculator_sympy.py │ │ ├── file_reader.py │ │ └── scraper.py │ │ ├── human_in_the_loop │ │ ├── confirmation_email_writer │ │ │ ├── console │ │ │ │ └── console_agent.py │ │ │ └── socket │ │ │ │ ├── client.py │ │ │ │ └── socket_agent.py │ │ ├── planning_approval │ │ │ └── orchestrator.py │ │ ├── streaming_orchestrator │ │ │ ├── client_streaming.py │ │ │ └── server_streaming.py │ │ └── streaming_post_writer │ │ │ ├── client_streaming.html │ │ │ └── server_streaming.py │ │ ├── mcp_server_as_tool │ │ ├── mcp_servers │ │ │ ├── math_server.py │ │ │ └── weather_server.py │ │ └── use_mcp_adapter_tool.py │ │ ├── use_agent.py │ │ ├── use_exa.py │ │ ├── use_firecrawl.py │ │ ├── use_function_tool.py │ │ ├── use_http_api_node.py │ │ ├── use_jina.py │ │ ├── use_python_node.py │ │ ├── use_react_fc.py │ │ ├── use_react_search.py │ │ ├── use_react_with_coding.py │ │ ├── use_serp.py │ │ ├── use_sql.py │ │ └── use_tavily.py ├── llm_setup.py └── use_cases │ ├── agents_use_cases │ ├── README.md │ ├── agent_deep_scraping.py │ ├── agent_financial.py │ ├── agent_house_price_regression.py │ ├── agent_searcher.py │ ├── agent_with_local_llm.py │ ├── agent_with_small_llm.py │ ├── agent_with_thinking_tool.py │ └── data │ │ └── house_prices.csv │ ├── chainlit │ ├── bge │ │ ├── __init__.py │ │ ├── component │ │ │ ├── __init__.py │ │ │ └── document.py │ │ └── node │ │ │ ├── __init__.py │ │ │ └── bge.py │ ├── chainlit.md │ ├── main_chainlit_agent.py │ ├── main_chainlit_rag.py │ ├── main_chainlit_react_agent.py │ ├── main_chainlit_server.py │ └── utils.py │ ├── customer_support │ ├── README.md │ ├── bank_api.py │ └── main.py │ ├── erp_system │ ├── README.md │ ├── agent.py │ ├── app.py │ ├── backend.py │ ├── database_tool.py │ └── run.sh │ ├── financial_assistant │ ├── README.md │ └── main.py │ ├── gpt_researcher │ ├── README.md │ ├── gpt_researcher │ │ ├── __init__.py │ │ ├── conduct_research.py │ │ ├── prompts.py │ │ └── write_report.py │ ├── main_gpt_researcher.py │ ├── main_multi_agents.py │ ├── multi_agents │ │ ├── __init__.py │ │ ├── editor_agent.py │ │ ├── human_agent.py │ │ ├── planner_agent.py │ │ ├── publisher_agent.py │ │ ├── researcher_agent.py │ │ ├── utils.py │ │ └── writer_agent.py │ └── utils.py │ ├── job_posting │ ├── README.md │ ├── job_example.md │ └── main.py │ ├── literature_overview │ ├── README.md │ ├── main_orchestrator.py │ └── main_planner.py │ ├── project_manager │ ├── README.md │ ├── app.py │ ├── backend.py │ ├── composio_tool.py │ ├── react_agent_pm.py │ └── run.sh │ ├── researcher │ ├── README.md │ ├── agent.py │ ├── app.py │ ├── backend.py │ └── run.sh │ ├── search │ ├── README.md │ ├── app.py │ ├── run.sh │ ├── server.py │ └── server_via_dynamiq.py │ ├── smm_manager │ ├── README.md │ ├── data │ │ └── emails.txt │ ├── mailgun_tool.py │ └── main.py │ └── trip_planner │ ├── README.md │ ├── main_orchestrator.py │ ├── main_planner.py │ └── prompts.py ├── mkdocs.yml ├── poetry.lock ├── pyproject.toml ├── scripts └── generate_mkdocs.py ├── setup.cfg └── tests ├── __init__.py ├── conftest.py ├── integration ├── __init__.py ├── evaluations │ ├── metrics │ │ ├── test_answer_correctness.py │ │ ├── test_bleu_score.py │ │ ├── test_context_precision.py │ │ ├── test_context_recall.py │ │ ├── test_factual_correctness.py │ │ ├── test_faithfulness.py │ │ ├── test_rouge_score.py │ │ └── test_string_metrics.py │ ├── test_llm_evaluator.py │ └── test_python_evaluator.py ├── flows │ ├── __init__.py │ └── test_flow.py └── nodes │ ├── __init__.py │ ├── agents │ ├── test_agent_input_message.py │ └── test_agent_methods.py │ ├── audio │ ├── __init__.py │ ├── test_elevenlabs.py │ └── test_whisper.py │ ├── callbacks │ └── test_callbacks.py │ ├── converters │ ├── test_csv.py │ ├── test_docx.py │ ├── test_html.py │ ├── test_pptx.py │ ├── test_pypdf.py │ └── test_unstructured.py │ ├── embedders │ ├── conftest.py │ ├── test_bedrock_embedders.py │ ├── test_cohere_embedders.py │ ├── test_embedding_tracing.py │ ├── test_gemini_embedders.py │ ├── test_huggingface_embedders.py │ ├── test_mistral_embedders.py │ ├── test_openai_embedders.py │ ├── test_vertexai_embedders.py │ └── test_watsonx_embedders.py │ ├── llms │ ├── __init__.py │ ├── test_ai21.py │ ├── test_anyscale.py │ ├── test_azure.py │ ├── test_bedrock.py │ ├── test_cerebras.py │ ├── test_customllm.py │ ├── test_databricks.py │ ├── test_deepinfra.py │ ├── test_deepseek.py │ ├── test_fireworksai.py │ ├── test_gemini.py │ ├── test_groq.py │ ├── test_huggingface.py │ ├── test_mistral.py │ ├── test_nvidia_nim.py │ ├── test_ollama.py │ ├── test_perplexity.py │ ├── test_replicate.py │ ├── test_together.py │ ├── test_vertexai.py │ ├── test_watsonx.py │ └── test_xai.py │ ├── operators │ ├── __init__.py │ ├── test_choice.py │ └── test_map.py │ ├── orchestrators │ └── test_graph_orchestrators.py │ ├── rerankers │ └── test_cohere.py │ ├── retrievers │ ├── test_milvus_retriever_flow.py │ └── test_qdrant_retriever_flow.py │ ├── test_caching.py │ ├── test_streaming.py │ ├── tools │ ├── __init__.py │ ├── test_exa.py │ ├── test_firecrawl.py │ ├── test_http_api_call.py │ ├── test_jina.py │ ├── test_mcp_tool.py │ ├── test_python.py │ ├── test_serp.py │ ├── test_sql_executor.py │ └── test_tavily.py │ ├── validators │ ├── __init__.py │ ├── test_regex_match.py │ ├── test_valid_choices.py │ ├── test_valid_json.py │ └── test_valid_python.py │ └── writers │ ├── test_milvus_writer_flow.py │ └── test_qdrant_writer_flow.py ├── integration_with_creds ├── __init__.py ├── agents │ ├── __init__.py │ ├── test_agent_escape_variables.py │ ├── test_agent_images.py │ ├── test_agent_input.py │ ├── test_agent_memory.py │ ├── test_agent_no_tools.py │ ├── test_agent_python_tool.py │ └── test_agent_streaming.py ├── memory │ ├── __init__.py │ ├── test_memory.py │ └── test_memory_retrieval.py └── test_rag_yaml.py └── unit ├── components ├── evaluators │ └── test_llm_evaluator_unit.py └── retrievers │ ├── test_chroma_document_retriever.py │ ├── test_elasticsearch_document_retriever.py │ ├── test_milvus_document_retriever.py │ ├── test_pgvector_document_retriever.py │ ├── test_pinecone_document_retriever.py │ ├── test_qdrant_document_retriever.py │ └── test_weaviate_document_retriever.py ├── connections └── test_connections.py ├── nodes ├── prompts │ └── test_prompts.py ├── retrievers │ ├── test_chroma_retriever.py │ ├── test_elasticsearch_retriever.py │ ├── test_milvus_retriever.py │ ├── test_pgvector_retriever.py │ ├── test_pinecone_retriever.py │ ├── test_qdrant_retriever.py │ ├── test_weaviate_retriever.py │ └── test_weaviate_tenant_retriever.py ├── schema_generation │ └── test_schemas.py ├── test_node.py └── writers │ ├── test_milvus_writer.py │ ├── test_pgvector_writer.py │ ├── test_qdrant_writer.py │ └── test_weaviate_writer.py ├── storages └── vector │ ├── elasticsearch │ └── test_elasticsearch.py │ ├── milvus │ ├── test_milvus_filters.py │ └── test_milvus_storage.py │ ├── qdrant │ ├── test_converters.py │ ├── test_filters.py │ └── test_qdrant_storage.py │ ├── test_base.py │ ├── test_utils.py │ └── weaviate │ ├── test_weaviate.py │ └── test_weaviate_tenant.py └── utils └── test_json_parser.py /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Dynamiq Team will be the default owners for everything in the repo. 2 | # @dynamiq-ai/dynamiq-core will be requested for review when someone opens a pull request. 3 | * @dynamiq-ai/dynamiq-core 4 | -------------------------------------------------------------------------------- /.github/workflows/pr.yaml: -------------------------------------------------------------------------------- 1 | name: Lint PR 2 | 3 | on: 4 | pull_request: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | - reopened 10 | 11 | jobs: 12 | pr-lint: 13 | name: validate PR title 14 | runs-on: ubuntu-latest 15 | permissions: 16 | pull-requests: read 17 | steps: 18 | - uses: amannn/action-semantic-pull-request@v5 19 | env: 20 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 21 | - uses: deepakputhraya/action-pr-title@v1.0.2 22 | with: 23 | prefix_case_sensitive: true 24 | max_length: 64 25 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Package 2 | 3 | on: 4 | release: 5 | types: [ published ] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | pipeline: 10 | environment: prod 11 | name: Publish Package 12 | runs-on: ubuntu-latest 13 | permissions: 14 | id-token: write 15 | contents: read 16 | steps: 17 | - name: Checkout Code 18 | uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 0 21 | persist-credentials: false 22 | - name: Set up Python 23 | uses: actions/setup-python@v5 24 | with: 25 | python-version: '3.12' 26 | check-latest: true 27 | - name: Install Poetry 28 | uses: snok/install-poetry@v1.4.1 29 | with: 30 | version: 1.8.3 31 | virtualenvs-create: false 32 | - name: Configure AWS Credentials 33 | uses: aws-actions/configure-aws-credentials@v4 34 | with: 35 | aws-region: ${{ secrets.AWS_REGION }} 36 | role-to-assume: ${{ secrets.AWS_ROLE }} 37 | - name: Build Package 38 | run: | 39 | poetry build 40 | - name: Publish to AWS CodeArtifact 41 | run: | 42 | export POETRY_HTTP_BASIC_CODEARTIFACT_USERNAME=aws 43 | export POETRY_HTTP_BASIC_CODEARTIFACT_PASSWORD=`aws codeartifact get-authorization-token --domain dynamiq --domain-owner ${{ secrets.AWS_ACCOUNT_ID }} --region ${{ secrets.AWS_REGION }} --query authorizationToken --output text` 44 | export POETRY_REPOSITORIES_CODEARTIFACT_URL=`aws codeartifact get-repository-endpoint --domain dynamiq --domain-owner ${{ secrets.AWS_ACCOUNT_ID }} --repository dynamiq --region ${{ secrets.AWS_REGION }} --format pypi --query repositoryEndpoint --output text` 45 | poetry publish --repository codeartifact 46 | - name: Publish to PyPI 47 | run: poetry publish -u __token__ -p ${{ secrets.PYPI_PASSWORD }} 48 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: "^docs/|env/" 2 | default_stages: [commit] 3 | default_install_hook_types: [pre-commit, commit-msg] 4 | 5 | repos: 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v4.5.0 8 | hooks: 9 | - id: no-commit-to-branch 10 | args: ["--branch", "main"] 11 | - id: end-of-file-fixer 12 | - id: check-yaml 13 | - id: check-merge-conflict 14 | - id: check-json 15 | - id: debug-statements 16 | - id: trailing-whitespace 17 | 18 | - repo: https://github.com/asottile/pyupgrade 19 | rev: v2.34.0 20 | hooks: 21 | - id: pyupgrade 22 | args: ["--py310-plus"] 23 | 24 | - repo: https://github.com/akaihola/darker 25 | rev: v2.1.0 26 | hooks: 27 | - id: darker 28 | args: ["--isort", "--line-length", "120"] 29 | additional_dependencies: 30 | - black==24.8.0 31 | - isort==5.13.2 32 | 33 | - repo: https://github.com/PyCQA/flake8 34 | rev: 7.0.0 35 | hooks: 36 | - id: flake8 37 | 38 | - repo: https://github.com/compilerla/conventional-pre-commit 39 | rev: v3.4.0 40 | hooks: 41 | - id: conventional-pre-commit 42 | stages: [commit-msg] 43 | args: [] 44 | 45 | - repo: https://github.com/PyCQA/bandit 46 | rev: 1.7.10 47 | hooks: 48 | - id: bandit 49 | args: ["-c", "pyproject.toml"] 50 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.12.2-slim AS runtime 2 | 3 | ENV PYTHONPATH=/app/ 4 | ENV RUNTIME_PACKAGES="git curl make" 5 | ENV POETRY_HOME=/opt/poetry 6 | ENV POETRY_VERSION=1.8.3 7 | ENV POETRY_VIRTUALENVS_CREATE=false 8 | ENV PATH=${POETRY_HOME}/bin:${PATH} 9 | 10 | RUN apt-get update && apt-get install -y $RUNTIME_PACKAGES 11 | RUN apt-get install build-essential -y 12 | RUN curl -sSL https://install.python-poetry.org | python3 - --yes 13 | 14 | WORKDIR /app 15 | 16 | COPY ./pyproject.toml /app/pyproject.toml 17 | COPY ./poetry.lock /app/poetry.lock 18 | COPY ./Makefile /app/Makefile 19 | 20 | RUN poetry install --no-root 21 | 22 | FROM runtime AS develop 23 | 24 | COPY ./.pre-commit-config.yaml /app/.pre-commit-config.yaml 25 | COPY ./setup.cfg /app/setup.cfg 26 | COPY ./dynamiq /app/dynamiq 27 | COPY ./examples /app/examples 28 | COPY ./tests /app/tests 29 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | install-dependencies-dev: 2 | poetry install --only dev --no-root 3 | 4 | install-dependencies-examples: 5 | poetry install --only examples --no-root 6 | 7 | install-dependencies-main: 8 | poetry install --only main --no-root 9 | 10 | install-dependencies-all: 11 | poetry install --with examples --no-root 12 | 13 | install-pre-commit: 14 | pre-commit install --install-hooks 15 | 16 | install: 17 | poetry install --no-root 18 | make install-pre-commit 19 | 20 | prepare: 21 | pre-commit run --all-files 22 | 23 | lint: prepare 24 | 25 | test-integration: 26 | pytest tests/integration 27 | 28 | test-integration-with-creds: 29 | pytest tests/integration_with_creds 30 | 31 | test-exclude-integration-with-creds: 32 | pytest tests --ignore=tests/integration_with_creds 33 | 34 | test-unit: 35 | pytest tests/unit 36 | 37 | test: 38 | pytest tests 39 | 40 | test-cov: 41 | mkdir -p ./reports 42 | coverage run -m pytest --junitxml=./reports/test-results.xml tests 43 | coverage report --skip-empty --skip-covered 44 | coverage html -d ./reports/htmlcov --omit="*/test_*,*/tests.py" 45 | coverage xml -o ./reports/coverage.xml --omit="*/test_*,*/tests.py" 46 | 47 | test-cov-exclude-integration-with-creds: 48 | mkdir -p ./reports 49 | coverage run -m pytest --junitxml=./reports/test-results.xml tests --ignore=tests/integration_with_creds 50 | coverage report --skip-empty --skip-covered 51 | coverage html -d ./reports/htmlcov --omit="*/test_*,*/tests.py" 52 | coverage xml -o ./reports/coverage.xml --omit="*/test_*,*/tests.py" 53 | 54 | build-mkdocs: 55 | rm -rf mkdocs/ 56 | python scripts/generate_mkdocs.py 57 | cp README.md mkdocs/index.md 58 | cp -rf docs/tutorials/ mkdocs/tutorials/ 59 | mkdocs build 60 | 61 | publish-mkdocs: 62 | make build-mkdocs 63 | mkdocs gh-deploy --force 64 | 65 | run-mkdocs-locally: 66 | make build-mkdocs 67 | mkdocs serve 68 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | If you discover a security vulnerability in Dynamiq, please report it to us at [security@getdynamiq.ai](mailto:security@getdynamiq.ai). 6 | 7 | In your report, include the following details: 8 | 9 | 1. **Steps to Reproduce:** Clear and reproducible steps that demonstrate the vulnerability. 10 | 2. **Vulnerability Explanation:** A detailed explanation of why you believe there is a vulnerability. 11 | 3. **Exploitation Information:** Any information about active exploitation of the vulnerability (if available). 12 | 13 | ## Vulnerability Response 14 | 15 | Upon receiving your report, we will: 16 | 17 | 1. **Acknowledge Receipt:** Acknowledge your report within 7 business days. 18 | 2. **Preliminary Analysis:** Conduct a preliminary analysis to confirm the vulnerability. 19 | 3. **Confidential Handling:** Ensure the information you provide is kept confidential and used only to fix the vulnerability or coordinate a response as needed. 20 | 21 | We will keep you informed about the status of the issue throughout the process. 22 | 23 | ## Disclosure Policy 24 | 25 | Our goal is to disclose vulnerabilities responsibly. We will set a disclosure date in consultation with the reporting party and the Dynamiq maintainers once a mitigation is available. 26 | 27 | Thank you for helping us keep Dynamiq secure. 28 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | dynamiq-app: 3 | image: dynamiq-app:${IMAGE_TAG:-local} 4 | build: 5 | target: develop 6 | volumes: 7 | - ./:/app 8 | 9 | dynamiq-app-test: 10 | image: dynamiq-app:${IMAGE_TAG:-local} 11 | build: 12 | target: develop 13 | entrypoint: ["make", "test"] 14 | env_file: 15 | - .env 16 | volumes: 17 | - ./:/app 18 | 19 | dynamiq-app-test-cov: 20 | image: dynamiq-app:${IMAGE_TAG:-local} 21 | build: 22 | target: develop 23 | entrypoint: ["make", "test-cov"] 24 | env_file: 25 | - .env 26 | volumes: 27 | - ./:/app 28 | - ./reports:/app/reports:wr 29 | 30 | 31 | dynamiq-app-test-cov-exclude-integration-with-creds: 32 | image: dynamiq-app:${IMAGE_TAG:-local} 33 | build: 34 | target: develop 35 | entrypoint: ["make", "test-cov-exclude-integration-with-creds"] 36 | env_file: 37 | - .env 38 | volumes: 39 | - ./:/app 40 | - ./reports:/app/reports:wr 41 | -------------------------------------------------------------------------------- /docs/img/Dynamiq_Logo_Universal_Github.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/docs/img/Dynamiq_Logo_Universal_Github.png -------------------------------------------------------------------------------- /dynamiq/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .workflow import Workflow 4 | 5 | ROOT_PATH = os.path.dirname(__file__) 6 | -------------------------------------------------------------------------------- /dynamiq/cache/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import * 2 | -------------------------------------------------------------------------------- /dynamiq/cache/backends/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseCache 2 | from .redis import RedisCache 3 | -------------------------------------------------------------------------------- /dynamiq/cache/backends/redis.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from dynamiq.cache.backends import BaseCache 4 | from dynamiq.cache.config import RedisCacheConfig 5 | 6 | 7 | class RedisCache(BaseCache): 8 | """Redis cache backend implementation.""" 9 | 10 | @classmethod 11 | def from_config(cls, config: RedisCacheConfig): 12 | """Create RedisCache instance from configuration. 13 | 14 | Args: 15 | config (RedisCacheConfig): Redis cache configuration. 16 | 17 | Returns: 18 | RedisCache: Redis cache instance. 19 | """ 20 | from redis import Redis 21 | 22 | return cls(client=Redis(**config.to_dict())) 23 | 24 | def get(self, key: str) -> Any: 25 | """Retrieve value from Redis cache. 26 | 27 | Args: 28 | key (str): Cache key. 29 | 30 | Returns: 31 | Any: Cached value. 32 | """ 33 | return self.client.get(key) 34 | 35 | def set(self, key: str, value: dict, ttl: int | None = None) -> Any: 36 | """Set value in Redis cache. 37 | 38 | Args: 39 | key (str): Cache key. 40 | value (dict): Value to cache. 41 | ttl (int | None): Time-to-live for cache entry. 42 | 43 | Returns: 44 | Any: Result of cache set operation. 45 | """ 46 | if ttl is None: 47 | return self.client.set(key, value) 48 | return self.client.setex(key, ttl, value) 49 | 50 | def delete(self, key: str) -> Any: 51 | """Delete value from Redis cache. 52 | 53 | Args: 54 | key (str): Cache key. 55 | 56 | Returns: 57 | Any: Result of cache delete operation. 58 | """ 59 | return self.client.delete(key) 60 | -------------------------------------------------------------------------------- /dynamiq/cache/codecs.py: -------------------------------------------------------------------------------- 1 | import base64 2 | 3 | 4 | class BaseCodec: 5 | """Abstract base class for encoding and decoding.""" 6 | 7 | def encode(self, value: str) -> str: 8 | """Encode a string value. 9 | 10 | Args: 11 | value (str): The string to encode. 12 | 13 | Returns: 14 | str: The encoded string. 15 | """ 16 | raise NotImplementedError 17 | 18 | def decode(self, value: str | bytes) -> str: 19 | """Decode a string or bytes value. 20 | 21 | Args: 22 | value (str | bytes): The value to decode. 23 | 24 | Returns: 25 | str: The decoded string. 26 | """ 27 | raise NotImplementedError 28 | 29 | 30 | class Base64Codec(BaseCodec): 31 | """Base64 encoding and decoding implementation.""" 32 | 33 | def encode(self, value: str) -> str: 34 | """Encode a string using Base64. 35 | 36 | Args: 37 | value (str): The string to encode. 38 | 39 | Returns: 40 | str: The Base64 encoded string. 41 | """ 42 | return base64.b64encode(value.encode()).decode() 43 | 44 | def decode(self, value: str | bytes) -> str: 45 | """Decode a Base64 encoded string or bytes. 46 | 47 | Args: 48 | value (str | bytes): The value to decode. 49 | 50 | Returns: 51 | str: The decoded string. 52 | """ 53 | return base64.b64decode(value).decode() 54 | -------------------------------------------------------------------------------- /dynamiq/cache/config.py: -------------------------------------------------------------------------------- 1 | import enum 2 | from typing import Literal 3 | from pydantic import BaseModel 4 | 5 | from dynamiq.connections import RedisConnection 6 | 7 | 8 | class CacheBackend(str, enum.Enum): 9 | """Enumeration for cache backends.""" 10 | Redis = "Redis" 11 | 12 | 13 | class CacheConfig(BaseModel): 14 | """Configuration for cache settings. 15 | 16 | Attributes: 17 | backend (CacheBackend): The cache backend to use. 18 | namespace (str | None): Optional namespace for cache keys. 19 | ttl (int | None): Optional time-to-live for cache entries. 20 | """ 21 | backend: CacheBackend 22 | namespace: str | None = None 23 | ttl: int | None = None 24 | 25 | def to_dict(self, **kwargs) -> dict: 26 | """Convert config to dictionary. 27 | 28 | Args: 29 | **kwargs: Additional arguments. 30 | 31 | Returns: 32 | dict: Configuration as dictionary. 33 | """ 34 | return self.model_dump(**kwargs) 35 | 36 | 37 | class RedisCacheConfig(CacheConfig, RedisConnection): 38 | """Configuration for Redis cache. 39 | 40 | Attributes: 41 | backend (Literal[CacheBackend.Redis]): The Redis cache backend. 42 | """ 43 | backend: Literal[CacheBackend.Redis] = CacheBackend.Redis 44 | -------------------------------------------------------------------------------- /dynamiq/cache/managers/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import CacheManager 2 | from .workflow import WorkflowCacheManager 3 | -------------------------------------------------------------------------------- /dynamiq/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseCallbackHandler, NodeCallbackHandler 2 | from .streaming import AsyncStreamingIteratorCallbackHandler, StreamingQueueCallbackHandler 3 | from .tracing import TracingCallbackHandler 4 | -------------------------------------------------------------------------------- /dynamiq/clients/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseTracingClient 2 | -------------------------------------------------------------------------------- /dynamiq/clients/base.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import TYPE_CHECKING 3 | 4 | if TYPE_CHECKING: 5 | from dynamiq.callbacks.tracing import Run 6 | 7 | 8 | class BaseTracingClient(abc.ABC): 9 | """Abstract base class for tracing clients.""" 10 | 11 | @abc.abstractmethod 12 | def trace(self, runs: list["Run"]) -> None: 13 | """Trace the given runs. 14 | 15 | Args: 16 | runs (list["Run"]): List of runs to trace. 17 | 18 | Raises: 19 | NotImplementedError: If not implemented. 20 | """ 21 | raise NotImplementedError 22 | -------------------------------------------------------------------------------- /dynamiq/components/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/components/__init__.py -------------------------------------------------------------------------------- /dynamiq/components/converters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/components/converters/__init__.py -------------------------------------------------------------------------------- /dynamiq/components/converters/utils.py: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | 3 | import filetype 4 | 5 | from dynamiq.utils.utils import generate_uuid 6 | 7 | 8 | def get_filename_for_bytesio(file: BytesIO) -> str: 9 | """ 10 | Get a filepath for a BytesIO object. 11 | 12 | Args: 13 | file (BytesIO): The BytesIO object. 14 | 15 | Returns: 16 | str: A filename for the BytesIO object. 17 | 18 | Raises: 19 | ValueError: If the file extension couldn't be guessed. 20 | """ 21 | filename = getattr(file, "name", None) 22 | if filename is None: 23 | file_extension = filetype.guess_extension(file) 24 | if file_extension: 25 | filename = f"{generate_uuid()}.{file_extension}" 26 | else: 27 | raise ValueError( 28 | "Unable to determine file extension. BytesIO object lacks name and " 29 | "extension couldn't be guessed." 30 | ) 31 | return filename 32 | -------------------------------------------------------------------------------- /dynamiq/components/embedders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/components/embedders/__init__.py -------------------------------------------------------------------------------- /dynamiq/components/embedders/bedrock.py: -------------------------------------------------------------------------------- 1 | from dynamiq.components.embedders.base import BaseEmbedder 2 | from dynamiq.connections import AWS as BedrockConnection 3 | 4 | 5 | class BedrockEmbedder(BaseEmbedder): 6 | """ 7 | Initializes the BedrockEmbedder component with given configuration. 8 | 9 | Attributes: 10 | connection (BedrockConnection): The connection to the Bedrock API. A new connection 11 | is created if none is provided. 12 | model (str): The model name to use for embedding. Defaults to "amazon.titan-embed-text-v1". 13 | """ 14 | connection: BedrockConnection 15 | model: str = "amazon.titan-embed-text-v1" 16 | 17 | def __init__(self, **kwargs): 18 | if kwargs.get("client") is None and kwargs.get("connection") is None: 19 | kwargs["connection"] = BedrockConnection() 20 | super().__init__(**kwargs) 21 | 22 | @property 23 | def embed_params(self) -> dict: 24 | params = super().embed_params 25 | if "cohere" in self.model: 26 | params["input_type"] = self.input_type 27 | if self.truncate: 28 | params["truncate"] = self.truncate 29 | 30 | return params 31 | -------------------------------------------------------------------------------- /dynamiq/components/embedders/cohere.py: -------------------------------------------------------------------------------- 1 | from dynamiq.components.embedders.base import BaseEmbedder 2 | from dynamiq.connections import Cohere as CohereConnection 3 | 4 | 5 | class CohereEmbedder(BaseEmbedder): 6 | """ 7 | Initializes the CohereEmbedder component with given configuration. 8 | 9 | Attributes: 10 | connection (CohereConnection): The connection to the Cohere API. A new connection 11 | is created if none is provided. 12 | model (str): The model name to use for embedding. Defaults to "cohere/embed-english-v2.0" 13 | input_type (str): Specifies the type of input you're giving to the model. Defaults to "search_query" 14 | """ 15 | connection: CohereConnection 16 | model: str = "cohere/embed-english-v2.0" 17 | input_type: str = "search_query" 18 | 19 | def __init__(self, **kwargs): 20 | if kwargs.get("client") is None and kwargs.get("connection") is None: 21 | kwargs["connection"] = CohereConnection() 22 | super().__init__(**kwargs) 23 | 24 | @property 25 | def embed_params(self) -> dict: 26 | params = super().embed_params 27 | params["input_type"] = self.input_type 28 | if self.truncate: 29 | params["truncate"] = self.truncate 30 | 31 | return params 32 | -------------------------------------------------------------------------------- /dynamiq/components/embedders/gemini.py: -------------------------------------------------------------------------------- 1 | from dynamiq.components.embedders.base import BaseEmbedder 2 | from dynamiq.connections import Gemini as GeminiConnection 3 | 4 | 5 | class GeminiEmbedder(BaseEmbedder): 6 | """ 7 | Initializes the GeminiEmbedder component with given configuration. 8 | 9 | Attributes: 10 | connection (GeminiConnection): The connection to the Gemini API. A new connection 11 | is created if none is provided. 12 | model (str): The model name to use for embedding. Defaults to "gemini/gemini-embedding-exp-03-07" 13 | input_type (str): Specifies the type of embedding task. Defaults to "search_query" 14 | """ 15 | 16 | connection: GeminiConnection 17 | model: str = "gemini/gemini-embedding-exp-03-07" 18 | input_type: str = "search_query" 19 | 20 | def __init__(self, **kwargs): 21 | if kwargs.get("client") is None and kwargs.get("connection") is None: 22 | kwargs["connection"] = GeminiConnection() 23 | super().__init__(**kwargs) 24 | 25 | @property 26 | def embed_params(self) -> dict: 27 | """ 28 | Returns the embedding parameters for the Gemini API. 29 | 30 | Returns: 31 | dict: A dictionary containing the parameters for the embedding call. 32 | """ 33 | params = super().embed_params 34 | 35 | input_to_task_mapping = { 36 | "search_document": "RETRIEVAL_DOCUMENT", 37 | "search_query": "RETRIEVAL_QUERY", 38 | "classification": "CLASSIFICATION", 39 | "clustering": "CLUSTERING", 40 | } 41 | params["task_type"] = input_to_task_mapping.get(self.input_type) 42 | 43 | if self.truncate: 44 | params["truncate"] = self.truncate 45 | 46 | if self.dimensions: 47 | params["dimensions"] = self.dimensions 48 | 49 | return params 50 | -------------------------------------------------------------------------------- /dynamiq/components/embedders/mistral.py: -------------------------------------------------------------------------------- 1 | from dynamiq.components.embedders.base import BaseEmbedder 2 | from dynamiq.connections import Mistral as MistralConnection 3 | 4 | 5 | class MistralEmbedder(BaseEmbedder): 6 | """ 7 | Initializes the MistralEmbedder component with given configuration. 8 | 9 | Attributes: 10 | connection (MistralConnection): The connection to the Mistral API. A new connection 11 | is created if none is provided. 12 | model (str): The model name to use for embedding. Defaults to "mistral/mistral-embed" 13 | """ 14 | connection: MistralConnection 15 | model: str = "mistral/mistral-embed" 16 | 17 | def __init__(self, **kwargs): 18 | if kwargs.get("client") is None and kwargs.get("connection") is None: 19 | kwargs["connection"] = MistralConnection() 20 | super().__init__(**kwargs) 21 | -------------------------------------------------------------------------------- /dynamiq/components/embedders/watsonx.py: -------------------------------------------------------------------------------- 1 | from dynamiq.components.embedders.base import BaseEmbedder 2 | from dynamiq.connections import WatsonX as WatsonXConnection 3 | 4 | 5 | class WatsonXEmbedder(BaseEmbedder): 6 | """ 7 | Initializes the WatsonXEmbedder component with given configuration. 8 | 9 | Attributes: 10 | connection (WatsonXConnection): The connection to the WatsonX API. A new connection 11 | is created if none is provided. 12 | model (str): The model name to use for embedding. Defaults to "watsonx/ibm/slate-30m-english-rtrvr" 13 | """ 14 | connection: WatsonXConnection 15 | model: str = "watsonx/ibm/slate-30m-english-rtrvr" 16 | 17 | def __init__(self, **kwargs): 18 | if kwargs.get("client") is None and kwargs.get("connection") is None: 19 | kwargs["connection"] = WatsonXConnection() 20 | super().__init__(**kwargs) 21 | -------------------------------------------------------------------------------- /dynamiq/components/retrievers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/components/retrievers/__init__.py -------------------------------------------------------------------------------- /dynamiq/components/splitters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/components/splitters/__init__.py -------------------------------------------------------------------------------- /dynamiq/connections/__init__.py: -------------------------------------------------------------------------------- 1 | from .connections import * 2 | from .storages import * 3 | -------------------------------------------------------------------------------- /dynamiq/connections/storages.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import BaseConnection 2 | 3 | 4 | class RedisConnection(BaseConnection): 5 | """ 6 | Represents a connection to a Redis database. 7 | 8 | This class inherits from BaseConnection and provides specific attributes 9 | for connecting to a Redis database. 10 | 11 | Attributes: 12 | host (str): The hostname or IP address of the Redis server. 13 | port (int): The port number on which the Redis server is listening. 14 | db (int): The Redis database number to connect to. 15 | username (str | None): The username for authentication (optional). 16 | password (str | None): The password for authentication (optional). 17 | """ 18 | 19 | host: str 20 | port: int 21 | db: int 22 | username: str | None = None 23 | password: str | None = None 24 | 25 | def connect(self): 26 | """ 27 | Establishes a connection to the Redis database. 28 | 29 | This method is responsible for creating and initializing the connection 30 | to the Redis server using the provided connection details. 31 | 32 | Note: 33 | This method is currently a placeholder and does not contain 34 | the actual implementation for connecting to Redis. 35 | """ 36 | pass 37 | -------------------------------------------------------------------------------- /dynamiq/evaluations/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_evaluator import BaseEvaluator 2 | from .llm_evaluator import LLMEvaluator 3 | from .python_evaluator import PythonEvaluator 4 | -------------------------------------------------------------------------------- /dynamiq/evaluations/base_evaluator.py: -------------------------------------------------------------------------------- 1 | from functools import cached_property 2 | 3 | from pydantic import BaseModel, ConfigDict, computed_field 4 | 5 | 6 | class BaseEvaluator(BaseModel): 7 | """ 8 | Base class for evaluators. 9 | 10 | Attributes: 11 | name (str): Name of the evaluator. 12 | """ 13 | 14 | name: str 15 | 16 | model_config = ConfigDict(arbitrary_types_allowed=True) 17 | 18 | @computed_field 19 | @cached_property 20 | def type(self) -> str: 21 | """ 22 | Compute the type identifier for the evaluator. 23 | 24 | Returns: 25 | str: A string representing the module and class name. 26 | """ 27 | return f"{self.__module__.rsplit('.', 1)[0]}.{self.__class__.__name__}" 28 | 29 | def run(self) -> list[float]: 30 | """ 31 | Executes the evaluator. 32 | Must be overridden by subclasses. 33 | 34 | Returns: 35 | list[float]: Scores for each reference/answer pair. 36 | """ 37 | raise NotImplementedError("Subclasses must implement this method.") 38 | -------------------------------------------------------------------------------- /dynamiq/evaluations/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from .answer_correctness import AnswerCorrectnessEvaluator 2 | from .bleu_score import BleuScoreEvaluator 3 | from .context_precision import ContextPrecisionEvaluator 4 | from .context_recall import ContextRecallEvaluator 5 | from .factual_correctness import FactualCorrectnessEvaluator 6 | from .faithfulness import FaithfulnessEvaluator 7 | from .rouge_score import RougeScoreEvaluator 8 | from .string_metrics import DistanceMeasure, ExactMatchEvaluator, StringPresenceEvaluator, StringSimilarityEvaluator 9 | -------------------------------------------------------------------------------- /dynamiq/executors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/executors/__init__.py -------------------------------------------------------------------------------- /dynamiq/flows/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseFlow 2 | from .flow import Flow 3 | -------------------------------------------------------------------------------- /dynamiq/memory/__init__.py: -------------------------------------------------------------------------------- 1 | from .memory import Memory, MemoryRetrievalStrategy 2 | -------------------------------------------------------------------------------- /dynamiq/memory/backends/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import MemoryBackend 2 | from .in_memory import InMemory 3 | from .pinecone import Pinecone 4 | from .qdrant import Qdrant 5 | from .sqlite import SQLite 6 | from .dynamo_db import DynamoDB 7 | -------------------------------------------------------------------------------- /dynamiq/nodes/__init__.py: -------------------------------------------------------------------------------- 1 | from .node import CachingConfig, ErrorHandling, InputTransformer, Node, OutputTransformer 2 | from .types import Behavior, ChoiceCondition, ConditionOperator, NodeGroup 3 | -------------------------------------------------------------------------------- /dynamiq/nodes/agents/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Agent, AgentManager 2 | from .react import ReActAgent 3 | from .reflection import ReflectionAgent 4 | from .simple import SimpleAgent 5 | -------------------------------------------------------------------------------- /dynamiq/nodes/agents/orchestrators/__init__.py: -------------------------------------------------------------------------------- 1 | from .adaptive import AdaptiveOrchestrator 2 | from .adaptive_manager import AdaptiveAgentManager 3 | from .graph import GraphOrchestrator 4 | from .graph_manager import GraphAgentManager 5 | from .graph_state import GraphState 6 | from .linear import LinearOrchestrator 7 | from .linear_manager import LinearAgentManager 8 | -------------------------------------------------------------------------------- /dynamiq/nodes/agents/simple.py: -------------------------------------------------------------------------------- 1 | from dynamiq.nodes.agents.base import Agent 2 | 3 | 4 | class SimpleAgent(Agent): 5 | """Agent that uses the Simple strategy for processing tasks.""" 6 | 7 | name: str = "Agent Simple" 8 | -------------------------------------------------------------------------------- /dynamiq/nodes/audio/__init__.py: -------------------------------------------------------------------------------- 1 | from .elevenlabs import ElevenLabsSTS, ElevenLabsTTS, Voices 2 | from .whisper import WhisperSTT 3 | -------------------------------------------------------------------------------- /dynamiq/nodes/converters/__init__.py: -------------------------------------------------------------------------------- 1 | from .csv import CSVConverter 2 | from .docx import DOCXFileConverter 3 | from .html import HTMLConverter 4 | from .llm_text_extractor import LLMImageConverter, LLMPDFConverter 5 | from .pptx import PPTXFileConverter 6 | from .pypdf import PyPDFConverter 7 | from .unstructured import UnstructuredFileConverter 8 | -------------------------------------------------------------------------------- /dynamiq/nodes/embedders/__init__.py: -------------------------------------------------------------------------------- 1 | from .bedrock import BedrockDocumentEmbedder, BedrockTextEmbedder 2 | from .cohere import CohereDocumentEmbedder, CohereTextEmbedder 3 | from .gemini import GeminiDocumentEmbedder, GeminiTextEmbedder 4 | from .huggingface import HuggingFaceDocumentEmbedder, HuggingFaceTextEmbedder 5 | from .mistral import MistralDocumentEmbedder, MistralTextEmbedder 6 | from .openai import OpenAIDocumentEmbedder, OpenAITextEmbedder 7 | from .vertexai import VertexAIDocumentEmbedder, VertexAITextEmbedder 8 | from .watsonx import WatsonXDocumentEmbedder, WatsonXTextEmbedder 9 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/__init__.py: -------------------------------------------------------------------------------- 1 | from .ai21 import AI21 2 | from .anthropic import Anthropic 3 | from .anyscale import Anyscale 4 | from .azureai import AzureAI 5 | from .base import BaseLLM 6 | from .bedrock import Bedrock 7 | from .cerebras import Cerebras 8 | from .cohere import Cohere 9 | from .custom_llm import CustomLLM 10 | from .databricks import Databricks 11 | from .deepinfra import DeepInfra 12 | from .deepseek import DeepSeek 13 | from .fireworksai import FireworksAI 14 | from .gemini import Gemini 15 | from .groq import Groq 16 | from .huggingface import HuggingFace 17 | from .mistral import Mistral 18 | from .nvidia_nim import NvidiaNIM 19 | from .ollama import Ollama 20 | from .openai import OpenAI 21 | from .perplexity import Perplexity 22 | from .replicate import Replicate 23 | from .sambanova import SambaNova 24 | from .togetherai import TogetherAI 25 | from .vertexai import VertexAI 26 | from .watsonx import WatsonX 27 | from .xai import xAI 28 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/ai21.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import AI21 as AI21Connection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class AI21(BaseLLM): 6 | """AI21 LLM node. 7 | 8 | This class provides an implementation for the AI21 Language Model node. 9 | 10 | Attributes: 11 | connection (AI21Connection): The connection to use for the AI21 LLM. 12 | MODEL_PREFIX (str): The prefix for the AI21 model name. 13 | """ 14 | connection: AI21Connection 15 | MODEL_PREFIX = "ai21/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the AI21 LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = AI21Connection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/anthropic.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Anthropic as AnthropicConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class Anthropic(BaseLLM): 6 | """Anthropic LLM node. 7 | 8 | This class provides an implementation for the Anthropic Language Model node. 9 | 10 | Attributes: 11 | connection (AnthropicConnection | None): The connection to use for the Anthropic LLM. 12 | """ 13 | connection: AnthropicConnection | None = None 14 | 15 | def __init__(self, **kwargs): 16 | """Initialize the Anthropic LLM node. 17 | 18 | Args: 19 | **kwargs: Additional keyword arguments. 20 | """ 21 | if kwargs.get("client") is None and kwargs.get("connection") is None: 22 | kwargs["connection"] = AnthropicConnection() 23 | super().__init__(**kwargs) 24 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/anyscale.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Anyscale as AnyscaleConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class Anyscale(BaseLLM): 6 | """Anyscale LLM node. 7 | 8 | This class provides an implementation for the Anyscale Language Model node. 9 | 10 | Attributes: 11 | connection (AnyscaleConnection | None): The connection to use for the Anyscale LLM. 12 | MODEL_PREFIX (str): The prefix for the Anyscale model name. 13 | """ 14 | connection: AnyscaleConnection | None = None 15 | MODEL_PREFIX = "anyscale/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the Anyscale LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = AnyscaleConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/azureai.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import AzureAI as AzureAIConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class AzureAI(BaseLLM): 6 | """AzureAI LLM node. 7 | 8 | This class provides an implementation for the AzureAI Language Model node. 9 | 10 | Attributes: 11 | connection (AzureAIConnection | None): The connection to use for the AzureAI LLM. 12 | MODEL_PREFIX (str): The prefix for the AzureAI model name. 13 | """ 14 | connection: AzureAIConnection | None = None 15 | MODEL_PREFIX = "azure/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the AzureAI LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = AzureAIConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/bedrock.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import AWS as AWSConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class Bedrock(BaseLLM): 6 | """Bedrock LLM node. 7 | 8 | This class provides an implementation for the Bedrock Language Model node. 9 | 10 | Attributes: 11 | connection (AWSConnection | None): The connection to use for the Bedrock LLM. 12 | MODEL_PREFIX (str): The prefix for the Bedrock model name. 13 | """ 14 | connection: AWSConnection | None = None 15 | MODEL_PREFIX = "bedrock/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the Bedrock LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = AWSConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/cerebras.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Cerebras as CerebrasConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class Cerebras(BaseLLM): 6 | """Cerebras LLM node. 7 | 8 | This class provides an implementation for the Cerebras Language Model node. 9 | 10 | Attributes: 11 | connection (CerebrasConnection): The connection to use for the Cerebras LLM. 12 | MODEL_PREFIX (str): The prefix for the Cerebras model name. 13 | """ 14 | connection: CerebrasConnection 15 | MODEL_PREFIX = "cerebras/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the Cerebras LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = CerebrasConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/cohere.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Cohere as CohereConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class Cohere(BaseLLM): 6 | """Cohere LLM node. 7 | 8 | This class provides an implementation for the Cohere Language Model node. 9 | 10 | Attributes: 11 | connection (CohereConnection): The connection to use for the Cohere LLM. 12 | """ 13 | connection: CohereConnection 14 | 15 | def __init__(self, **kwargs): 16 | """Initialize the Cohere LLM node. 17 | 18 | Args: 19 | **kwargs: Additional keyword arguments. 20 | """ 21 | if kwargs.get("client") is None and kwargs.get("connection") is None: 22 | kwargs["connection"] = CohereConnection() 23 | super().__init__(**kwargs) 24 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/databricks.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Databricks as DatabricksConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | from dynamiq.nodes.llms.openai import ReasoningEffort 4 | 5 | 6 | class Databricks(BaseLLM): 7 | """Databricks LLM node. 8 | 9 | This class provides an implementation for the Databricks Language Model node. 10 | 11 | Attributes: 12 | connection (DatabricksConnection): The connection to use for the Databricks LLM. 13 | MODEL_PREFIX (str): The prefix for the Databricks model name. 14 | reasoning_effort (ReasoningEffort | None): Controls the depth and complexity of reasoning 15 | performed by the model. 16 | """ 17 | 18 | connection: DatabricksConnection 19 | MODEL_PREFIX = "databricks/" 20 | reasoning_effort: ReasoningEffort | None = ReasoningEffort.MEDIUM 21 | 22 | def __init__(self, **kwargs): 23 | """Initialize the Databricks LLM node. 24 | 25 | Args: 26 | **kwargs: Additional keyword arguments. 27 | """ 28 | if kwargs.get("client") is None and kwargs.get("connection") is None: 29 | kwargs["connection"] = DatabricksConnection() 30 | super().__init__(**kwargs) 31 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/deepinfra.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import DeepInfra as DeepInfraConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class DeepInfra(BaseLLM): 6 | """DeepInfra LLM node. 7 | 8 | This class provides an implementation for the DeepInfra Language Model node. 9 | 10 | Attributes: 11 | connection (DeepInfraConnection): The connection to use for the DeepInfra LLM. 12 | MODEL_PREFIX (str): The prefix for the DeepInfra model name. 13 | """ 14 | connection: DeepInfraConnection 15 | MODEL_PREFIX = "deepinfra/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the DeepInfra LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = DeepInfraConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/deepseek.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import DeepSeek as DeepSeekConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class DeepSeek(BaseLLM): 6 | """DeepSeek LLM node. 7 | 8 | This class provides an implementation for the DeepSeek Language Model node. 9 | 10 | Attributes: 11 | connection (DeepSeekConnection): The connection to use for the DeepSeek LLM. 12 | MODEL_PREFIX (str): The prefix for the DeepSeek model name. 13 | """ 14 | 15 | connection: DeepSeekConnection 16 | MODEL_PREFIX = "deepseek/" 17 | 18 | def __init__(self, **kwargs): 19 | """Initialize the Replicate LLM node. 20 | 21 | Args: 22 | **kwargs: Additional keyword arguments. 23 | """ 24 | if kwargs.get("client") is None and kwargs.get("connection") is None: 25 | kwargs["connection"] = DeepSeekConnection() 26 | super().__init__(**kwargs) 27 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/fireworksai.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import FireworksAI as FireworksAIConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class FireworksAI(BaseLLM): 6 | """FireworksAI LLM node. 7 | 8 | This class provides an implementation for the Fireworks AI Language Model node. 9 | 10 | Attributes: 11 | connection (FireworksAIConnection | None): The connection to use for the Fireworks AI LLM. 12 | MODEL_PREFIX (str): The prefix for the Fireworks AI model name. 13 | """ 14 | 15 | connection: FireworksAIConnection | None = None 16 | MODEL_PREFIX = "fireworks_ai/" 17 | 18 | def __init__(self, **kwargs): 19 | """Initialize the FireworksAI LLM node. 20 | 21 | Args: 22 | **kwargs: Additional keyword arguments. 23 | """ 24 | if kwargs.get("client") is None and kwargs.get("connection") is None: 25 | kwargs["connection"] = FireworksAIConnection() 26 | super().__init__(**kwargs) 27 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/gemini.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Gemini as GeminiConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class Gemini(BaseLLM): 6 | """Gemini LLM node. 7 | 8 | This class provides an implementation for the Gemini Language Model node. 9 | 10 | Attributes: 11 | connection (GeminiConnection): The connection to use for the Gemini LLM. 12 | """ 13 | 14 | connection: GeminiConnection 15 | MODEL_PREFIX = "gemini/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the Gemini LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = GeminiConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/groq.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Groq as GroqConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class Groq(BaseLLM): 6 | """Groq LLM node. 7 | 8 | This class provides an implementation for the Groq Language Model node. 9 | 10 | Attributes: 11 | connection (GroqConnection | None): The connection to use for the Groq LLM. 12 | MODEL_PREFIX (str): The prefix for the Groq model name. 13 | """ 14 | connection: GroqConnection | None = None 15 | MODEL_PREFIX = "groq/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the Groq LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = GroqConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/huggingface.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import HuggingFace as HuggingFaceConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class HuggingFace(BaseLLM): 6 | """HuggingFace LLM node. 7 | 8 | This class provides an implementation for the HuggingFace Language Model node. 9 | 10 | Attributes: 11 | connection (HuggingFaceConnection | None): The connection to use for the HuggingFace LLM. 12 | MODEL_PREFIX (str): The prefix for the HuggingFace model name. 13 | """ 14 | connection: HuggingFaceConnection | None = None 15 | MODEL_PREFIX = "huggingface/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the HuggingFace LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = HuggingFaceConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/mistral.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Mistral as MistralConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | from dynamiq.prompts import MessageRole 4 | 5 | 6 | class Mistral(BaseLLM): 7 | """Mistral LLM node. 8 | 9 | This class provides an implementation for the Mistral Language Model node. 10 | 11 | Attributes: 12 | connection (MistralConnection | None): The connection to use for the Mistral LLM. 13 | MODEL_PREFIX (str): The prefix for the Mistral model name. 14 | """ 15 | connection: MistralConnection | None = None 16 | MODEL_PREFIX = "mistral/" 17 | 18 | def __init__(self, **kwargs): 19 | """Initialize the Mistral LLM node. 20 | 21 | Args: 22 | **kwargs: Additional keyword arguments. 23 | """ 24 | if kwargs.get("client") is None and kwargs.get("connection") is None: 25 | kwargs["connection"] = MistralConnection() 26 | super().__init__(**kwargs) 27 | 28 | def get_messages( 29 | self, 30 | prompt, 31 | input_data, 32 | ) -> list[dict]: 33 | """ 34 | Format and filter message parameters based on provider requirements. 35 | Override this in provider-specific subclasses. 36 | """ 37 | messages = prompt.format_messages(**dict(input_data)) 38 | formatted_messages = [] 39 | for i, msg in enumerate(messages): 40 | msg_copy = msg.copy() 41 | 42 | is_last_message = i == len(messages) - 1 43 | if is_last_message and msg_copy["role"] == MessageRole.ASSISTANT.value: 44 | msg_copy["prefix"] = True 45 | 46 | formatted_messages.append(msg_copy) 47 | 48 | return formatted_messages 49 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/nvidia_nim.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import NvidiaNIM as NvidiaNIMConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class NvidiaNIM(BaseLLM): 6 | """Nvidia NIM LLM node. 7 | 8 | This class provides an implementation for the Nvidia NIM Language Model node. 9 | 10 | Attributes: 11 | connection (Nvidia_NIM_Connection | None): The connection to use for the Nvidia NIM LLM. 12 | MODEL_PREFIX (str): The prefix for the Nvidia NIM model name. 13 | """ 14 | 15 | connection: NvidiaNIMConnection | None = None 16 | MODEL_PREFIX = "nvidia_nim/" 17 | 18 | def __init__(self, **kwargs): 19 | """Initialize the Nvidia NIM LLM node. 20 | 21 | Args: 22 | **kwargs: Additional keyword arguments. 23 | """ 24 | if kwargs.get("client") is None and kwargs.get("connection") is None: 25 | kwargs["connection"] = NvidiaNIMConnection() 26 | super().__init__(**kwargs) 27 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/replicate.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Replicate as ReplicateConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class Replicate(BaseLLM): 6 | """Replicate LLM node. 7 | 8 | This class provides an implementation for the Replicate Language Model node. 9 | 10 | Attributes: 11 | connection (ReplicateConnection): The connection to use for the Replicate LLM. 12 | MODEL_PREFIX (str): The prefix for the Replicate model name. 13 | """ 14 | connection: ReplicateConnection 15 | MODEL_PREFIX = "replicate/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the Replicate LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = ReplicateConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/sambanova.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import SambaNova as SambaNovaConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class SambaNova(BaseLLM): 6 | """SambaNova LLM node. 7 | 8 | This class provides an implementation for the SambaNova Language Model node. 9 | 10 | Attributes: 11 | connection (SambaNovaConnection): The connection to use for the SambaNova LLM. 12 | MODEL_PREFIX (str): The prefix for the SambaNova model name. 13 | """ 14 | connection: SambaNovaConnection 15 | MODEL_PREFIX = "sambanova/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the SambaNova LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = SambaNovaConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/togetherai.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import TogetherAI as TogetherAIConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class TogetherAI(BaseLLM): 6 | """TogetherAI LLM node. 7 | 8 | This class provides an implementation for the TogetherAI Language Model node. 9 | 10 | Attributes: 11 | connection (TogetherAIConnection | None): The connection to use for the TogetherAI LLM. 12 | MODEL_PREFIX (str): The prefix for the TogetherAI model name. 13 | """ 14 | connection: TogetherAIConnection | None = None 15 | MODEL_PREFIX = "together_ai/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the TogetherAI LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = TogetherAIConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/vertexai.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import VertexAI as VertexAIConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class VertexAI(BaseLLM): 6 | """VertexAI LLM node. 7 | 8 | This class provides an implementation for the VertexAI Language Model node. 9 | 10 | Attributes: 11 | connection (VertexAIConnection | None): The connection to use for the VertexAI LLM. 12 | MODEL_PREFIX (str): The prefix for the VertexAI model name. 13 | """ 14 | 15 | connection: VertexAIConnection | None = None 16 | MODEL_PREFIX = "vertex_ai/" 17 | 18 | def __init__(self, **kwargs): 19 | """Initialize the VertexAI LLM node. 20 | 21 | Args: 22 | **kwargs: Additional keyword arguments. 23 | """ 24 | if kwargs.get("client") is None and kwargs.get("connection") is None: 25 | kwargs["connection"] = VertexAIConnection() 26 | super().__init__(**kwargs) 27 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/watsonx.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import WatsonX as WatsonXConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class WatsonX(BaseLLM): 6 | """WatsonX LLM node. 7 | 8 | This class provides an implementation for the WatsonX Language Model node. 9 | 10 | Attributes: 11 | connection (WatsonXConnection | None): The connection to use for the WatsonX LLM. 12 | MODEL_PREFIX (str): The prefix for the WatsonX model name. 13 | """ 14 | connection: WatsonXConnection | None = None 15 | MODEL_PREFIX = "watsonx_text/" 16 | 17 | def __init__(self, **kwargs): 18 | """Initialize the WatsonX LLM node. 19 | 20 | Args: 21 | **kwargs: Additional keyword arguments. 22 | """ 23 | if kwargs.get("client") is None and kwargs.get("connection") is None: 24 | kwargs["connection"] = WatsonXConnection() 25 | super().__init__(**kwargs) 26 | -------------------------------------------------------------------------------- /dynamiq/nodes/llms/xai.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import xAI as xAIConnection 2 | from dynamiq.nodes.llms.base import BaseLLM 3 | 4 | 5 | class xAI(BaseLLM): 6 | """xAI LLM node. 7 | 8 | This class provides an implementation for the xAI Language Model node. 9 | 10 | Attributes: 11 | connection (xAIConnection | None): The connection to use for the xAI LLM. 12 | MODEL_PREFIX (str): The prefix for the xAI model name. 13 | """ 14 | 15 | connection: xAIConnection | None = None 16 | MODEL_PREFIX = "xai/" 17 | 18 | def __init__(self, **kwargs): 19 | """Initialize the xAI LLM node. 20 | 21 | Args: 22 | **kwargs: Additional keyword arguments. 23 | """ 24 | if kwargs.get("client") is None and kwargs.get("connection") is None: 25 | kwargs["connection"] = xAIConnection() 26 | super().__init__(**kwargs) 27 | -------------------------------------------------------------------------------- /dynamiq/nodes/managers.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | from dynamiq.nodes import Node 4 | 5 | 6 | class NodeManager: 7 | """A class for managing and retrieving node types.""" 8 | 9 | @staticmethod 10 | def get_node_by_type(node_type: str) -> type[Node]: 11 | """ 12 | Retrieves a node class based on the given node type. 13 | 14 | Args: 15 | node_type (str): The type of node to retrieve. 16 | 17 | Returns: 18 | type[Node]: The node class corresponding to the given type. 19 | 20 | Raises: 21 | ValueError: If the node type is not found. 22 | 23 | Example: 24 | >>> node_class = NodeManager.get_node_by_type("LLM_OPENAI") 25 | >>> isinstance(node_class, type(Node)) 26 | True 27 | """ 28 | try: 29 | entity_module, entity_name = node_type.rsplit(".", 1) 30 | imported_module = importlib.import_module(entity_module) 31 | if entity := getattr(imported_module, entity_name, None): 32 | return entity 33 | except (ModuleNotFoundError, ImportError): 34 | raise ValueError(f"Node type {node_type} not found") 35 | -------------------------------------------------------------------------------- /dynamiq/nodes/operators/__init__.py: -------------------------------------------------------------------------------- 1 | from .operators import ( 2 | Choice, 3 | ChoiceOption, 4 | Map, 5 | Pass, 6 | ) 7 | -------------------------------------------------------------------------------- /dynamiq/nodes/rankers/__init__.py: -------------------------------------------------------------------------------- 1 | from .cohere import CohereReranker 2 | from .llm import LLMDocumentRanker 3 | from .recency import TimeWeightedDocumentRanker 4 | -------------------------------------------------------------------------------- /dynamiq/nodes/retrievers/__init__.py: -------------------------------------------------------------------------------- 1 | from .chroma import ChromaDocumentRetriever 2 | from .elasticsearch import ElasticsearchDocumentRetriever 3 | from .milvus import MilvusDocumentRetriever 4 | from .pgvector import PGVectorDocumentRetriever 5 | from .pinecone import PineconeDocumentRetriever 6 | from .qdrant import QdrantDocumentRetriever 7 | from .retriever import VectorStoreRetriever 8 | from .weaviate import WeaviateDocumentRetriever 9 | -------------------------------------------------------------------------------- /dynamiq/nodes/retrievers/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from typing import Any, ClassVar, Literal 3 | 4 | from pydantic import BaseModel, Field 5 | 6 | from dynamiq.nodes.node import NodeGroup, VectorStoreNode 7 | 8 | 9 | class RetrieverInputSchema(BaseModel): 10 | embedding: list[float] = Field(..., description="Parameter to provided embedding for search.") 11 | filters: dict[str, Any] = Field( 12 | default={}, description="Parameter to provided filters to apply for retrieving specific documents." 13 | ) 14 | top_k: int = Field(default=0, description="Parameter to provided how many documents to retrieve.") 15 | content_key: str = Field(default=None, description="Parameter to provide content key.") 16 | embedding_key: str = Field(default=None, description="Parameter to provide embedding key.") 17 | query: str = Field(default=None, description="Parameter to provide query for search.") 18 | alpha: float = Field(default=None, description="Parameter to provide alpha for hybrid retrieval.") 19 | 20 | 21 | class Retriever(VectorStoreNode, ABC): 22 | group: Literal[NodeGroup.RETRIEVERS] = NodeGroup.RETRIEVERS 23 | filters: dict[str, Any] | None = None 24 | top_k: int = 10 25 | input_schema: ClassVar[type[RetrieverInputSchema]] = RetrieverInputSchema 26 | 27 | @property 28 | def to_dict_exclude_params(self): 29 | return super().to_dict_exclude_params | {"document_retriever": True} 30 | -------------------------------------------------------------------------------- /dynamiq/nodes/splitters/__init__.py: -------------------------------------------------------------------------------- 1 | from .document import DocumentSplitter 2 | -------------------------------------------------------------------------------- /dynamiq/nodes/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from .e2b_sandbox import E2BInterpreterTool 2 | from .exa_search import ExaTool 3 | from .firecrawl import FirecrawlTool 4 | from .http_api_call import HttpApiCall, ResponseType 5 | from .jina import JinaResponseFormat, JinaScrapeTool, JinaSearchTool 6 | from .llm_summarizer import SummarizerTool 7 | from .mcp import MCPServer, MCPTool 8 | from .python import Python 9 | from .scale_serp import ScaleSerpTool 10 | from .sql_executor import SQLExecutor 11 | from .tavily import TavilyTool 12 | from .thinking_tool import ThinkingTool 13 | from .zenrows import ZenRowsTool 14 | -------------------------------------------------------------------------------- /dynamiq/nodes/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import Input, Output 2 | -------------------------------------------------------------------------------- /dynamiq/nodes/validators/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseValidator 2 | from .regex_match import MatchType, RegexMatch 3 | from .valid_choices import ValidChoices 4 | from .valid_json import ValidJSON 5 | from .valid_python import ValidPython 6 | -------------------------------------------------------------------------------- /dynamiq/nodes/validators/regex_match.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import re 3 | 4 | from dynamiq.nodes.validators.base import BaseValidator 5 | 6 | 7 | class MatchType(str, enum.Enum): 8 | FULL_MATCH = "fullmatch" 9 | SEARCH = "search" 10 | 11 | 12 | class RegexMatch(BaseValidator): 13 | """ 14 | Validates that a value matches a regular expression. 15 | 16 | Args: 17 | regex: A regular expression pattern. 18 | match_type: Match type to check input value for a regex search or full-match option. 19 | """ 20 | 21 | regex: str 22 | match_type: MatchType | None = MatchType.FULL_MATCH 23 | 24 | def validate(self, content: str): 25 | """ 26 | Validates if the provided value matches the given regular expression pattern. 27 | 28 | Args: 29 | content (str): The value to validate. 30 | 31 | Raises: 32 | ValueError: If the provided value does not match the given pattern. 33 | """ 34 | compiled_pattern = re.compile(self.regex) 35 | match_method = getattr(compiled_pattern, self.match_type) 36 | if not match_method(content): 37 | raise ValueError( 38 | f"Value does not match the valid pattern. Value: '{content}'. Pattern: '{self.regex}'", 39 | ) 40 | -------------------------------------------------------------------------------- /dynamiq/nodes/validators/valid_choices.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from dynamiq.nodes.validators.base import BaseValidator 4 | 5 | 6 | class ValidChoices(BaseValidator): 7 | """ 8 | Class that provides functionality to check if the provided value is within the list of valid choices. 9 | 10 | Args: 11 | choices(List[Any]): A list of values representing the acceptable choices. 12 | 13 | """ 14 | 15 | choices: list[Any] = None 16 | 17 | def validate(self, content: Any): 18 | """ 19 | Validates if the provided value is among the acceptable choices. 20 | 21 | Args: 22 | content(Any): The value to validate. 23 | 24 | Raises: 25 | ValueError: If the provided value is not in valid choices. 26 | """ 27 | if isinstance(content, str): 28 | content = content.strip() 29 | 30 | if content not in self.choices: 31 | raise ValueError( 32 | f"Value is not in valid choices. Value: '{content}'. Choices: '{self.choices}'." 33 | ) 34 | -------------------------------------------------------------------------------- /dynamiq/nodes/validators/valid_json.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from dynamiq.nodes.validators.base import BaseValidator 4 | 5 | 6 | class ValidJSON(BaseValidator): 7 | """ 8 | Class that provides functionality to check if a value matches a basic JSON structure. 9 | """ 10 | 11 | def validate(self, content: str | dict): 12 | """ 13 | Validates if the provided string is a properly formatted JSON. 14 | 15 | Args: 16 | content(str): The value to check. 17 | 18 | Raises: 19 | ValueError: If the value is not a properly formatted JSON. 20 | 21 | """ 22 | try: 23 | if not isinstance(content, str): 24 | content = json.dumps(content) 25 | 26 | json.loads(content) 27 | except (json.decoder.JSONDecodeError, TypeError) as error: 28 | raise ValueError( 29 | f"Value is not valid JSON. Value: '{content}'. Error details: {str(error)}" 30 | ) 31 | -------------------------------------------------------------------------------- /dynamiq/nodes/validators/valid_python.py: -------------------------------------------------------------------------------- 1 | import ast 2 | 3 | from dynamiq.nodes.validators.base import BaseValidator 4 | 5 | 6 | class ValidPython(BaseValidator): 7 | """ 8 | Class that provides functionality to check if a value matches a basic Python code standards. 9 | """ 10 | 11 | def validate(self, content: str): 12 | """ 13 | Validates the provided Python code to determine if it is syntactically correct. 14 | 15 | Args: 16 | content (str): The Python code to validate. 17 | 18 | Raises: 19 | ValueError: Raised if the provided value is not syntactically correct Python code. 20 | """ 21 | try: 22 | ast.parse(content) 23 | except SyntaxError as e: 24 | raise ValueError( 25 | f"Value is not valid python code. Value: '{content}'. Error details: {e.msg}" 26 | ) 27 | -------------------------------------------------------------------------------- /dynamiq/nodes/writers/__init__.py: -------------------------------------------------------------------------------- 1 | from .chroma import ChromaDocumentWriter 2 | from .elasticsearch import ElasticsearchDocumentWriter 3 | from .milvus import MilvusDocumentWriter 4 | from .pgvector import PGVectorDocumentWriter 5 | from .pinecone import PineconeDocumentWriter 6 | from .qdrant import QdrantDocumentWriter 7 | from .weaviate import WeaviateDocumentWriter 8 | -------------------------------------------------------------------------------- /dynamiq/nodes/writers/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from typing import ClassVar, Literal 3 | 4 | from pydantic import BaseModel, Field 5 | 6 | from dynamiq.nodes.node import NodeGroup, VectorStoreNode 7 | from dynamiq.types import Document 8 | 9 | 10 | class WriterInputSchema(BaseModel): 11 | documents: list[Document] = Field(..., description="Parameter to provide documents to write.") 12 | content_key: str = Field(default=None, description="Parameter to provide content key.") 13 | embedding_key: str = Field(default=None, description="Parameter to provide embedding key.") 14 | 15 | 16 | class Writer(VectorStoreNode, ABC): 17 | 18 | group: Literal[NodeGroup.WRITERS] = NodeGroup.WRITERS 19 | input_schema: ClassVar[type[WriterInputSchema]] = WriterInputSchema 20 | -------------------------------------------------------------------------------- /dynamiq/prompts/__init__.py: -------------------------------------------------------------------------------- 1 | from .prompts import ( 2 | BasePrompt, 3 | Message, 4 | MessageRole, 5 | Prompt, 6 | VisionMessage, 7 | VisionMessageImageContent, 8 | VisionMessageImageURL, 9 | VisionMessageTextContent, 10 | ) 11 | -------------------------------------------------------------------------------- /dynamiq/runnables/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Runnable, RunnableConfig, RunnableResult, RunnableStatus 2 | -------------------------------------------------------------------------------- /dynamiq/serializers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/serializers/__init__.py -------------------------------------------------------------------------------- /dynamiq/serializers/dumpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/serializers/dumpers/__init__.py -------------------------------------------------------------------------------- /dynamiq/serializers/loaders/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/serializers/loaders/__init__.py -------------------------------------------------------------------------------- /dynamiq/serializers/types.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, ConfigDict 2 | 3 | from dynamiq import Workflow 4 | from dynamiq.connections import BaseConnection 5 | from dynamiq.flows import Flow 6 | from dynamiq.nodes import Node 7 | 8 | 9 | class WorkflowYamlData(BaseModel): 10 | """Data model for the Workflow YAML.""" 11 | 12 | connections: dict[str, BaseConnection] 13 | nodes: dict[str, Node] 14 | flows: dict[str, Flow] 15 | workflows: dict[str, Workflow] 16 | 17 | model_config = ConfigDict(arbitrary_types_allowed=True) 18 | -------------------------------------------------------------------------------- /dynamiq/storages/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/dynamiq/storages/__init__.py -------------------------------------------------------------------------------- /dynamiq/storages/vector/__init__.py: -------------------------------------------------------------------------------- 1 | from .chroma import ChromaVectorStore 2 | from .elasticsearch import ElasticsearchVectorStore 3 | from .milvus import MilvusVectorStore 4 | from .pgvector import PGVectorStore 5 | from .pinecone import PineconeVectorStore 6 | from .qdrant import QdrantVectorStore 7 | from .weaviate import WeaviateVectorStore 8 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/chroma/__init__.py: -------------------------------------------------------------------------------- 1 | from .chroma import ChromaVectorStore 2 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/elasticsearch/__init__.py: -------------------------------------------------------------------------------- 1 | from .elasticsearch import ElasticsearchVectorStore 2 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/exceptions.py: -------------------------------------------------------------------------------- 1 | class VectorStoreException(Exception): 2 | """ 3 | Base exception class for vector store related errors. 4 | 5 | This exception is raised when a general error occurs in the vector store operations. 6 | """ 7 | 8 | pass 9 | 10 | 11 | class VectorStoreDuplicateDocumentException(Exception): 12 | """ 13 | Exception raised when attempting to add a duplicate document to the vector store. 14 | 15 | This exception is thrown when a document with the same identifier or content is already present 16 | in the vector store and an attempt is made to add it again. 17 | """ 18 | 19 | pass 20 | 21 | 22 | class VectorStoreFilterException(Exception): 23 | """ 24 | Exception raised when there's an error in filtering operations on the vector store. 25 | 26 | This exception is thrown when an invalid filter is applied or when there's an issue with the 27 | filtering process in the vector store. 28 | """ 29 | 30 | pass 31 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/milvus/__init__.py: -------------------------------------------------------------------------------- 1 | from .milvus import MilvusVectorStore 2 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/pgvector/__init__.py: -------------------------------------------------------------------------------- 1 | from .pgvector import PGVectorStore 2 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/pinecone/__init__.py: -------------------------------------------------------------------------------- 1 | from .pinecone import PineconeVectorStore 2 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/policies.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class DuplicatePolicy(str, Enum): 5 | """ 6 | Enumeration of policies for handling duplicate items. 7 | 8 | Attributes: 9 | NONE (str): No specific policy for handling duplicates. 10 | SKIP (str): Skip duplicate items without modifying existing ones. 11 | OVERWRITE (str): Overwrite existing items with duplicate entries. 12 | FAIL (str): Raise an error when encountering duplicate items. 13 | """ 14 | 15 | NONE = "none" 16 | SKIP = "skip" 17 | OVERWRITE = "overwrite" 18 | FAIL = "fail" 19 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/qdrant/__init__.py: -------------------------------------------------------------------------------- 1 | from .qdrant import QdrantVectorStore 2 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/utils.py: -------------------------------------------------------------------------------- 1 | def create_file_id_filter(file_id: str) -> dict: 2 | """ 3 | Create filters for Pinecone query based on file_id. 4 | 5 | Args: 6 | file_id (str): The file ID to filter by. 7 | 8 | Returns: 9 | dict: The filter conditions. 10 | """ 11 | return { 12 | "operator": "AND", 13 | "conditions": [ 14 | {"field": "file_id", "operator": "==", "value": file_id}, 15 | ], 16 | } 17 | 18 | 19 | def create_file_ids_filter(file_ids: list[str]) -> dict: 20 | """ 21 | Create filters for Pinecone query based on multiple file_ids. 22 | 23 | Args: 24 | file_ids (list[str]): The list of file IDs to filter by. 25 | 26 | Returns: 27 | dict: The filter conditions. 28 | """ 29 | return { 30 | "operator": "AND", 31 | "conditions": [ 32 | {"field": "file_id", "operator": "in", "value": file_ids}, 33 | ], 34 | } 35 | -------------------------------------------------------------------------------- /dynamiq/storages/vector/weaviate/__init__.py: -------------------------------------------------------------------------------- 1 | from .weaviate import WeaviateRetrieverVectorStoreParams, WeaviateVectorStore, WeaviateWriterVectorStoreParams 2 | -------------------------------------------------------------------------------- /dynamiq/types/__init__.py: -------------------------------------------------------------------------------- 1 | from .document import Document, DocumentCreationMode 2 | -------------------------------------------------------------------------------- /dynamiq/types/document.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import uuid 3 | from typing import Any, Callable 4 | 5 | from pydantic import BaseModel, Field 6 | 7 | 8 | class Document(BaseModel): 9 | """Document class for Dynamiq. 10 | 11 | Attributes: 12 | id (Callable[[], Any] | str | None): Unique identifier. Defaults to UUID4 hex. 13 | content (str): Main content of the document. 14 | metadata (dict | None): Additional metadata. Defaults to None. 15 | embedding (list | None): Vector representation. Defaults to None. 16 | score (float | None): Relevance or similarity score. Defaults to None. 17 | """ 18 | id: Callable[[], Any] | str | None = Field(default_factory=lambda: uuid.uuid4().hex) 19 | content: str 20 | metadata: dict | None = None 21 | embedding: list | None = None 22 | score: float | None = None 23 | 24 | def to_dict(self, **kwargs) -> dict: 25 | """Convert the Document object to a dictionary. 26 | 27 | Returns: 28 | dict: Dictionary representation of the Document. 29 | """ 30 | return self.model_dump(**kwargs) 31 | 32 | 33 | class DocumentCreationMode(str, enum.Enum): 34 | """Enumeration for document creation modes.""" 35 | ONE_DOC_PER_FILE = "one-doc-per-file" 36 | ONE_DOC_PER_PAGE = "one-doc-per-page" 37 | ONE_DOC_PER_ELEMENT = "one-doc-per-element" 38 | -------------------------------------------------------------------------------- /dynamiq/types/llm_tool.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | 4 | class ToolFunctionParameters(BaseModel): 5 | type: str 6 | properties: dict[str, dict] 7 | required: list[str] 8 | 9 | 10 | class ToolFunction(BaseModel): 11 | name: str 12 | description: str 13 | parameters: ToolFunctionParameters 14 | 15 | 16 | class Tool(BaseModel): 17 | type: str = "function" 18 | function: ToolFunction 19 | -------------------------------------------------------------------------------- /dynamiq/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .duration import format_duration 2 | from .utils import JsonWorkflowEncoder, format_value, generate_uuid, is_called_from_async_context, merge, serialize 3 | -------------------------------------------------------------------------------- /dynamiq/utils/chat.py: -------------------------------------------------------------------------------- 1 | def format_chat_history(chat_history: list[dict[str, str]]) -> str: 2 | """Format chat history for the orchestrator. 3 | 4 | Args: 5 | chat_history (list[dict[str, str]]): List of chat entries. 6 | 7 | Returns: 8 | str: Formatted chat history. 9 | """ 10 | formatted_history = "" 11 | for entry in chat_history: 12 | role = entry["role"].title() 13 | content = entry["content"] 14 | formatted_history += f"{role}: {content}\n" 15 | return formatted_history 16 | -------------------------------------------------------------------------------- /dynamiq/utils/duration.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | 4 | def format_duration(start: datetime, end: datetime) -> str: 5 | """ 6 | Format the duration between two datetime objects into a human-readable string. 7 | 8 | This function calculates the time difference between the start and end datetimes and 9 | returns a formatted string representing the duration in milliseconds, seconds, minutes, 10 | or hours, depending on the length of the duration. 11 | 12 | Args: 13 | start (datetime): The starting datetime. 14 | end (datetime): The ending datetime. 15 | 16 | Returns: 17 | str: A formatted string representing the duration. 18 | - For durations less than 1 second: "Xms" (milliseconds) 19 | - For durations between 1 second and 1 minute: "Xs" (seconds) 20 | - For durations between 1 minute and 1 hour: "Xm" (minutes) 21 | - For durations of 1 hour or more: "Xh" (hours) 22 | 23 | Examples: 24 | >>> from datetime import datetime, timedelta 25 | >>> start = datetime(2023, 1, 1, 12, 0, 0) 26 | >>> print(format_duration(start, start + timedelta(milliseconds=500))) 27 | 500ms 28 | >>> print(format_duration(start, start + timedelta(seconds=45))) 29 | 45.0s 30 | >>> print(format_duration(start, start + timedelta(minutes=30))) 31 | 30.0m 32 | >>> print(format_duration(start, start + timedelta(hours=2))) 33 | 2.0h 34 | """ 35 | delta = end - start 36 | total_seconds = delta.total_seconds() 37 | 38 | if total_seconds < 1: 39 | return f"{total_seconds * 1000:.0f}ms" 40 | elif total_seconds < 60: 41 | return f"{round(total_seconds, 1)}s" 42 | elif total_seconds < 3600: 43 | return f"{round(total_seconds / 60, 1)}m" 44 | else: 45 | return f"{round(total_seconds / 3600, 1)}h" 46 | -------------------------------------------------------------------------------- /dynamiq/utils/env.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Any 3 | 4 | from dynamiq.utils.logger import logger 5 | 6 | 7 | def get_env_var(var_name: str, default_value: Any = None): 8 | """Retrieves the value of an environment variable. 9 | 10 | This function attempts to retrieve the value of the specified environment variable. If the 11 | variable is not found and no default value is provided, it raises a ValueError. 12 | 13 | Args: 14 | var_name (str): The name of the environment variable to retrieve. 15 | default_value (str, optional): The default value to return if the environment variable 16 | is not found. Defaults to None. 17 | 18 | Returns: 19 | str: The value of the environment variable. 20 | 21 | Raises: 22 | ValueError: If the environment variable is not found and no default value is provided. 23 | 24 | Examples: 25 | >>> get_env_var("HOME") 26 | '/home/user' 27 | >>> get_env_var("NONEXISTENT_VAR", "default") 28 | 'default' 29 | >>> get_env_var("NONEXISTENT_VAR") 30 | Traceback (most recent call last): 31 | ... 32 | ValueError: Environment variable 'NONEXISTENT_VAR' not found. 33 | """ 34 | value = os.environ.get(var_name, default_value) 35 | 36 | if value is None: 37 | logger.warning(f"Environment variable '{var_name}' not found") 38 | 39 | return value 40 | -------------------------------------------------------------------------------- /dynamiq/utils/feedback.py: -------------------------------------------------------------------------------- 1 | from dynamiq.callbacks.streaming import StreamingEventMessage 2 | from dynamiq.runnables import RunnableConfig 3 | from dynamiq.types.feedback import FeedbackMethod 4 | 5 | 6 | def send_message( 7 | event_message: StreamingEventMessage, 8 | config: RunnableConfig, 9 | feedback_method: FeedbackMethod = FeedbackMethod.STREAM, 10 | ) -> None: 11 | """Emits message 12 | 13 | Args: 14 | message (StreamingEventMessage): Message to send. 15 | config (RunnableConfig): Configuration for the runnable. 16 | feedback_method (FeedbackMethod, optional): Sets up where message is sent. Defaults to "stream". 17 | """ 18 | 19 | match feedback_method: 20 | case FeedbackMethod.CONSOLE: 21 | print(event_message.data) 22 | case FeedbackMethod.STREAM: 23 | for callback in config.callbacks: 24 | callback.on_node_execute_stream({}, event=event_message) 25 | -------------------------------------------------------------------------------- /dynamiq/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | DEBUG = os.getenv("DEBUG", False) 5 | 6 | logging.basicConfig( 7 | format="%(asctime)s - %(levelname)s - %(message)s", 8 | datefmt="%Y-%m-%d %H:%M:%S", 9 | level=logging.INFO, 10 | ) 11 | 12 | 13 | litellm_logger = logging.getLogger("LiteLLM") 14 | litellm_logger.setLevel(logging.ERROR) 15 | 16 | e2b_logger = logging.getLogger("e2b") 17 | e2b_logger.setLevel(logging.ERROR) 18 | 19 | httpx_logger = logging.getLogger("httpx") 20 | httpx_logger.setLevel(logging.ERROR) 21 | 22 | loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] 23 | openai_loggers = [logger.setLevel(logging.ERROR) for logger in loggers] 24 | 25 | logger = logging.getLogger(__name__) 26 | logger.setLevel(logging.DEBUG if DEBUG else logging.INFO) 27 | -------------------------------------------------------------------------------- /dynamiq/workflow/__init__.py: -------------------------------------------------------------------------------- 1 | from .workflow import Workflow 2 | -------------------------------------------------------------------------------- /examples/Makefile: -------------------------------------------------------------------------------- 1 | run-unstructured-local: 2 | docker run -p 8000:8000 -e UNSTRUCTURED_MEMORY_FREE_MINIMUM_MB=0 -d --rm --name unstructured-api downloads.unstructured.io/unstructured-io/unstructured-api:latest --port 8000 --host 0.0.0.0 3 | 4 | stop-unstructured-local: 5 | docker stop unstructured-api 6 | 7 | run-chroma-local: 8 | docker pull chromadb/chroma 9 | docker run -p 8090:8000 chromadb/chroma 10 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/__init__.py -------------------------------------------------------------------------------- /examples/components/agents/orchestrators/graph_orchestrator/graph_orchestrator_yaml.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from dynamiq import Workflow, runnables 6 | from dynamiq.callbacks import TracingCallbackHandler 7 | from dynamiq.connections.managers import get_connection_manager 8 | from dynamiq.utils import JsonWorkflowEncoder 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | INPUT_DATA = """ 14 | How causality is incorporated in Shapley values. 15 | """ 16 | 17 | 18 | def run_workflow(): 19 | graph_orchestrator_yaml_file_path = os.path.join(os.path.dirname(__file__), "graph_orchestrator_wf.yaml") 20 | tracing = TracingCallbackHandler() 21 | with get_connection_manager() as cm: 22 | # Load the workflow from the YAML file, parse and init components during parsing 23 | wf = Workflow.from_yaml_file( 24 | file_path=graph_orchestrator_yaml_file_path, connection_manager=cm, init_components=True 25 | ) 26 | wf.run( 27 | input_data={"input": INPUT_DATA}, 28 | config=runnables.RunnableConfig(callbacks=[tracing]), 29 | ) 30 | # Check if traces dumped without errors 31 | json.dumps( 32 | {"runs": [run.to_dict() for run in tracing.runs.values()]}, 33 | cls=JsonWorkflowEncoder, 34 | ) 35 | 36 | logger.info(f"Workflow {wf.id} finished. Results:") 37 | for node_id, result in wf.flow._results.items(): 38 | logger.info(f"Node {node_id}-{wf.flow._node_by_id[node_id].name}: \n{result}") 39 | 40 | return tracing.runs 41 | 42 | 43 | if __name__ == "__main__": 44 | run_workflow() 45 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/intermediate_streaming/adaptive_orchestrator/app.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import streamlit as st 4 | 5 | from examples.components.agents.streaming.intermediate_streaming.adaptive_orchestrator.adaptive_orchestrator import ( 6 | run_orchestrator_async, 7 | ) 8 | 9 | if __name__ == "__main__": 10 | st.markdown("# Research Orchestrator") 11 | 12 | with st.form("my_form"): 13 | request = st.text_input("What is your request", placeholder="Research on development of AI in New York.") 14 | 15 | submitted = st.form_submit_button("Submit") 16 | 17 | if submitted: 18 | with st.status("🤖 **Agents at work...**", state="running", expanded=True) as status: 19 | with st.container(height=250, border=False): 20 | result = asyncio.run(run_orchestrator_async(request)) 21 | status.update(label="✅ Result is ready!", state="complete", expanded=False) 22 | 23 | st.subheader("Generated result", anchor=False, divider="rainbow") 24 | st.markdown(result) 25 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/intermediate_streaming/agents/app.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import streamlit as st 4 | from agent import run_agent_async 5 | 6 | if __name__ == "__main__": 7 | st.markdown("# Research Agent") 8 | 9 | with st.form("my_form"): 10 | request = st.text_input("What is your request", placeholder="Research on development of AI in New York.") 11 | 12 | submitted = st.form_submit_button("Submit") 13 | 14 | if submitted: 15 | with st.status("🤖 **Agents at work...**", state="running", expanded=True) as status: 16 | with st.container(height=250, border=False): 17 | result = asyncio.run(run_agent_async(request)) 18 | status.update(label="✅ Result is ready!", state="complete", expanded=False) 19 | 20 | st.subheader("Generated result", anchor=False, divider="rainbow") 21 | st.markdown(result) 22 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/intermediate_streaming/graph_orchestrator/app.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import streamlit as st 4 | 5 | from examples.components.agents.streaming.intermediate_streaming.graph_orchestrator.graph_orchestrator import ( 6 | run_orchestrator_async, 7 | ) 8 | 9 | if __name__ == "__main__": 10 | st.markdown("# Email Write Orchestrator") 11 | 12 | with st.form("my_form"): 13 | request = st.text_input("What is your request", placeholder="Write email about party invitation.") 14 | 15 | submitted = st.form_submit_button("Submit") 16 | 17 | if submitted: 18 | with st.status("🤖 **Agents at work...**", state="running", expanded=True) as status: 19 | with st.container(height=250, border=False): 20 | result = asyncio.run(run_orchestrator_async(request)) 21 | status.update(label="✅ Result is ready!", state="complete", expanded=False) 22 | 23 | st.subheader("Generated result", anchor=False, divider="rainbow") 24 | st.markdown(result) 25 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/intermediate_streaming/linear_orchestrator/app.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import streamlit as st 4 | 5 | from examples.components.agents.streaming.intermediate_streaming.linear_orchestrator.linear_orchestrator import ( 6 | run_orchestrator_async, 7 | ) 8 | 9 | if __name__ == "__main__": 10 | st.markdown("# Research Orchestrator") 11 | 12 | with st.form("my_form"): 13 | request = st.text_input("What is your request", placeholder="Research on development of AI in New York.") 14 | 15 | submitted = st.form_submit_button("Submit") 16 | 17 | if submitted: 18 | with st.status("🤖 **Agents at work...**", state="running", expanded=True) as status: 19 | with st.container(height=250, border=False): 20 | result = asyncio.run(run_orchestrator_async(request)) 21 | status.update(label="✅ Result is ready!", state="complete", expanded=False) 22 | 23 | st.subheader("Generated result", anchor=False, divider="rainbow") 24 | st.markdown(result) 25 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/intermediate_streaming/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/components/agents/streaming/intermediate_streaming/app.py 4 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/orchestrator/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/components/agents/streaming/orchestrator/app.py 4 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/react/app.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import streamlit as st 4 | from backend import generate_agent_response, setup_agent 5 | 6 | st.sidebar.title("Agent Configuration") 7 | agent_role = st.sidebar.text_input("Agent Role", "helpful assistant") 8 | streaming_enabled = st.sidebar.checkbox("Enable Streaming", value=False) 9 | streaming_tokens = st.sidebar.checkbox("Enable Streaming Tokens", value=False) 10 | 11 | streaming_mode = st.sidebar.radio("Streaming Mode", options=["Steps", "Answer"], index=0) # Default to "Answer" 12 | 13 | if "agent" not in st.session_state or st.sidebar.button("Apply Changes"): 14 | st.session_state.agent = setup_agent(agent_role, streaming_enabled, streaming_mode, streaming_tokens) 15 | st.session_state.messages = [] 16 | 17 | st.title("React Agent Chat") 18 | st.write("Ask questions and get responses from an AI assistant.") 19 | 20 | for message in st.session_state.messages: 21 | with st.chat_message(message["role"]): 22 | st.markdown(message["content"]) 23 | 24 | if user_input := st.chat_input("You: "): 25 | st.session_state.messages.append({"role": "user", "content": user_input}) 26 | with st.chat_message("user"): 27 | st.markdown(user_input) 28 | 29 | with st.chat_message("assistant"): 30 | message_placeholder = st.empty() 31 | full_response = "" 32 | 33 | st.session_state.messages.append({"role": "assistant", "content": ""}) 34 | 35 | for chunk in generate_agent_response(st.session_state.agent, user_input): 36 | full_response += chunk 37 | message_placeholder.markdown(full_response + "▌") 38 | 39 | st.session_state.messages[-1]["content"] = full_response 40 | 41 | time.sleep(0.05) 42 | message_placeholder.markdown(full_response) 43 | 44 | st.session_state.messages[-1]["content"] = full_response 45 | 46 | st.session_state["new_input"] = "" 47 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/react/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/components/agents/streaming/react/app.py 4 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/reflection/app.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import streamlit as st 4 | from backend import generate_agent_response, setup_agent 5 | 6 | st.sidebar.title("Agent Configuration") 7 | agent_role = st.sidebar.text_input("Agent Role", "helpful assistant") 8 | streaming_enabled = st.sidebar.checkbox("Enable Streaming", value=False) 9 | 10 | streaming_mode = st.sidebar.radio("Streaming Mode", options=["Answer", "Steps"], index=0) # Default to "Final" 11 | 12 | if "agent" not in st.session_state or st.sidebar.button("Apply Changes"): 13 | st.session_state.agent = setup_agent(agent_role, streaming_enabled, streaming_mode) 14 | st.session_state.messages = [] 15 | 16 | st.title("Reflection Agent Chat") 17 | st.write("Ask questions and get responses from an AI assistant.") 18 | 19 | for message in st.session_state.messages: 20 | with st.chat_message(message["role"]): 21 | st.markdown(message["content"]) 22 | 23 | if user_input := st.chat_input("You: "): 24 | st.session_state.messages.append({"role": "user", "content": user_input}) 25 | with st.chat_message("user"): 26 | st.markdown(user_input) 27 | 28 | with st.chat_message("assistant"): 29 | message_placeholder = st.empty() 30 | full_response = "" 31 | 32 | st.session_state.messages.append({"role": "assistant", "content": ""}) 33 | 34 | for chunk in generate_agent_response(st.session_state.agent, user_input): 35 | full_response += chunk 36 | message_placeholder.markdown(full_response + "▌") 37 | 38 | st.session_state.messages[-1]["content"] = full_response 39 | 40 | time.sleep(0.05) 41 | message_placeholder.markdown(full_response) 42 | 43 | st.session_state.messages[-1]["content"] = full_response 44 | 45 | st.session_state["new_input"] = "" 46 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/reflection/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/components/agents/streaming/reflection/app.py 4 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/simple/app.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import streamlit as st 4 | from backend import generate_agent_response, setup_agent 5 | 6 | st.sidebar.title("Agent Configuration") 7 | agent_role = st.sidebar.text_input("Agent Role", "helpful assistant") 8 | streaming_enabled = st.sidebar.checkbox("Enable Streaming", value=False) 9 | streaming_tokens = st.sidebar.checkbox("Enable Streaming Tokens", value=False) 10 | 11 | streaming_mode = st.sidebar.radio("Streaming Mode", options=["Answer", "Steps"], index=0) # Default to "Final" 12 | 13 | if "agent" not in st.session_state or st.sidebar.button("Apply Changes"): 14 | st.session_state.agent = setup_agent(agent_role, streaming_enabled, streaming_mode, streaming_tokens) 15 | st.session_state.messages = [] 16 | 17 | st.title("Simple Agent Chat") 18 | st.write("Ask questions and get responses from an AI assistant.") 19 | 20 | for message in st.session_state.messages: 21 | with st.chat_message(message["role"]): 22 | st.markdown(message["content"]) 23 | 24 | if user_input := st.chat_input("You: "): 25 | st.session_state.messages.append({"role": "user", "content": user_input}) 26 | with st.chat_message("user"): 27 | st.markdown(user_input) 28 | 29 | with st.chat_message("assistant"): 30 | message_placeholder = st.empty() 31 | full_response = "" 32 | 33 | st.session_state.messages.append({"role": "assistant", "content": ""}) 34 | 35 | for chunk in generate_agent_response(st.session_state.agent, user_input): 36 | full_response += chunk 37 | message_placeholder.markdown(full_response + "▌") 38 | 39 | st.session_state.messages[-1]["content"] = full_response 40 | 41 | time.sleep(0.05) 42 | message_placeholder.markdown(full_response) 43 | 44 | st.session_state.messages[-1]["content"] = full_response 45 | 46 | st.session_state["new_input"] = "" 47 | -------------------------------------------------------------------------------- /examples/components/agents/streaming/simple/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/components/agents/streaming/simple/app.py 4 | -------------------------------------------------------------------------------- /examples/components/core/dag/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/core/dag/__init__.py -------------------------------------------------------------------------------- /examples/components/core/dag/agent_memory_dag.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: 3 | type: dynamiq.connections.OpenAI 4 | pinecone-conn: 5 | type: dynamiq.connections.Pinecone 6 | api_key: ${oc.env:PINECONE_API_KEY} 7 | 8 | nodes: 9 | memory-agent: 10 | type: dynamiq.nodes.agents.simple.SimpleAgent 11 | llm: 12 | id: memory-agent-llm 13 | type: dynamiq.nodes.llms.OpenAI 14 | connection: openai-conn 15 | model: gpt-3.5-turbo 16 | memory: 17 | backend: 18 | type: dynamiq.memory.backends.Pinecone 19 | connection: pinecone-conn 20 | index_type: serverless 21 | cloud: ${oc.env:PINECONE_CLOUD} 22 | region: ${oc.env:PINECONE_REGION} 23 | embedder: 24 | type: dynamiq.nodes.embedders.OpenAIDocumentEmbedder 25 | connection: openai-conn 26 | model: text-embedding-3-small 27 | search_limit: 3 28 | role: "a helpful assistant capable of retaining context and answering questions effectively" 29 | 30 | flows: 31 | memory-agent-flow: 32 | name: Memory Agent Flow 33 | nodes: 34 | - memory-agent 35 | 36 | workflows: 37 | memory-agent-workflow: 38 | flow: memory-agent-flow 39 | version: 1 40 | -------------------------------------------------------------------------------- /examples/components/core/dag/agent_memory_dynamo_db_dag.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: 3 | type: dynamiq.connections.OpenAI 4 | api_key: ${oc.env:OPENAI_API_KEY} 5 | organization: ${oc.env:OPENAI_ORG_ID} 6 | aws-conn: 7 | type: dynamiq.connections.AWS 8 | access_key_id: ${oc.env:AWS_ACCESS_KEY_ID} 9 | secret_access_key: ${oc.env:AWS_SECRET_ACCESS_KEY} 10 | region_name: ${oc.env:AWS_REGION} 11 | 12 | nodes: 13 | chat-agent: 14 | type: dynamiq.nodes.agents.simple.SimpleAgent 15 | llm: 16 | id: chat-agent-llm 17 | type: dynamiq.nodes.llms.OpenAI 18 | connection: openai-conn 19 | model: gpt-4o-mini 20 | memory: 21 | backend: 22 | type: dynamiq.memory.backends.DynamoDB 23 | connection: aws-conn 24 | table_name: "messages" 25 | create_table_if_not_exists: true 26 | message_limit: 50 27 | role: "Helpful assistant focusing on the current conversation." 28 | 29 | flows: 30 | chat-flow: 31 | name: "Chat Flow with DynamoDB Memory" 32 | nodes: 33 | - chat-agent 34 | 35 | workflows: 36 | dynamodb-chat-workflow: 37 | flow: chat-flow 38 | version: 1 39 | -------------------------------------------------------------------------------- /examples/components/core/dag/agent_rag.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from dynamiq import Workflow, runnables 6 | from dynamiq.callbacks import TracingCallbackHandler 7 | from dynamiq.connections.managers import get_connection_manager 8 | from dynamiq.utils import JsonWorkflowEncoder 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | INPUT_DATA = "Dubai customs rules" 13 | 14 | if __name__ == "__main__": 15 | yaml_file_path = os.path.join(os.path.dirname(__file__), "agent_rag.yaml") 16 | tracing = TracingCallbackHandler() 17 | 18 | with get_connection_manager() as cm: 19 | wf = Workflow.from_yaml_file(file_path=yaml_file_path, connection_manager=cm, init_components=True) 20 | 21 | result_1 = wf.run( 22 | input_data={"input": INPUT_DATA}, 23 | config=runnables.RunnableConfig(callbacks=[tracing]), 24 | ) 25 | logger.info(f"Result 1: {result_1.output}") 26 | 27 | trace_dump = json.dumps( 28 | {"runs": [run.to_dict() for run in tracing.runs.values()]}, 29 | cls=JsonWorkflowEncoder, 30 | ) 31 | logger.info("Trace logs serialized successfully.") 32 | 33 | logger.info(f"Workflow {wf.id} finished. Results:") 34 | for node_id, result in wf.flow._results.items(): 35 | logger.info(f"Node {node_id}-{wf.flow._node_by_id[node_id].name}: \n{result}") 36 | -------------------------------------------------------------------------------- /examples/components/core/dag/agent_rag.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: 3 | type: dynamiq.connections.OpenAI 4 | api_key: ${oc.env:OPENAI_API_KEY} 5 | 6 | weaviate-conn: 7 | type: dynamiq.connections.Weaviate 8 | api_key: ${oc.env:WEAVIATE_API_KEY} 9 | url: ${oc.env:WEAVIATE_URL} 10 | 11 | nodes: 12 | agent-rag: 13 | type: dynamiq.nodes.agents.react.ReActAgent 14 | llm: 15 | id: agent-rag-llm 16 | type: dynamiq.nodes.llms.OpenAI 17 | connection: openai-conn 18 | model: gpt-4o 19 | tools: 20 | - id: agent-rag-tool 21 | type: dynamiq.nodes.retrievers.VectorStoreRetriever 22 | text_embedder: 23 | type: dynamiq.nodes.embedders.OpenAITextEmbedder 24 | name: OpenAI Text Embedder 25 | connection: openai-conn 26 | model: text-embedding-3-small 27 | document_retriever: 28 | type: dynamiq.nodes.retrievers.WeaviateDocumentRetriever 29 | name: Weaviate Document Retriever 30 | connection: weaviate-conn 31 | index_name: default 32 | role: AI assistant with knowledge about Dubai city, goal is provide well explained final answers 33 | 34 | flows: 35 | rag-agent-flow: 36 | name: Agent RAG Flow 37 | nodes: 38 | - agent-rag 39 | 40 | workflows: 41 | rag-agent-workflow: 42 | flow: rag-agent-flow 43 | version: 1 44 | -------------------------------------------------------------------------------- /examples/components/core/dag/agent_with_tool_params.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: 3 | type: dynamiq.connections.OpenAI 4 | api_key: ${oc.env:OPENAI_API_KEY} 5 | 6 | cat-api-conn: 7 | type: dynamiq.connections.Http 8 | method: GET 9 | url: https://catfact.ninja/fact 10 | 11 | dog-api-conn: 12 | type: dynamiq.connections.Http 13 | method: GET 14 | url: https://catfact.ninja/fact 15 | 16 | nodes: 17 | animal-facts-agent: 18 | type: dynamiq.nodes.agents.react.ReActAgent 19 | name: Animal Facts Agent 20 | llm: 21 | id: agent-llm 22 | type: dynamiq.nodes.llms.OpenAI 23 | connection: openai-conn 24 | model: gpt-4o 25 | tools: 26 | - id: cat-facts-api-456 27 | type: dynamiq.nodes.tools.http_api_call.HttpApiCall 28 | name: CatFactApi 29 | connection: cat-api-conn 30 | success_codes: [200, 201] 31 | timeout: 60 32 | response_type: json 33 | params: 34 | limit: 10 35 | description: Gets a random cat fact from the CatFact API 36 | 37 | - id: dog-facts-api-789 38 | type: dynamiq.nodes.tools.http_api_call.HttpApiCall 39 | name: DogFactApi 40 | connection: dog-api-conn 41 | success_codes: [200, 201] 42 | timeout: 60 43 | response_type: json 44 | params: 45 | limit: 10 46 | description: Gets a random dog fact from CatFact API for demo - ok 47 | role: is to help users retrieve interesting animal facts 48 | tool_output_max_length: 64000 49 | tool_output_truncate_enabled: true 50 | 51 | flows: 52 | animal-facts-flow: 53 | name: Animal Facts Flow 54 | nodes: 55 | - animal-facts-agent 56 | 57 | workflows: 58 | animal-facts-workflow: 59 | flow: animal-facts-flow 60 | version: 1 61 | -------------------------------------------------------------------------------- /examples/components/core/dag/csv_embedding_flow.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: 3 | type: dynamiq.connections.OpenAI 4 | api_key: ${oc.env:OPENAI_API_KEY} 5 | 6 | nodes: 7 | csv-converter: 8 | type: dynamiq.nodes.converters.CSVConverter 9 | name: CSV Converter 10 | delimiter: "," 11 | content_column: "Target" 12 | metadata_columns: 13 | - "Feature_1" 14 | - "Feature_2" 15 | 16 | document-embedder: 17 | type: dynamiq.nodes.embedders.OpenAIDocumentEmbedder 18 | name: Document Embedder 19 | connection: openai-conn 20 | model: text-embedding-3-small 21 | depends: 22 | - node: csv-converter 23 | input_transformer: 24 | selector: 25 | "documents": "$.csv-converter.output.documents" 26 | 27 | flows: 28 | csv-embedding-flow: 29 | name: CSV Embedding Flow 30 | nodes: 31 | - csv-converter 32 | - document-embedder 33 | 34 | workflows: 35 | csv-embedding-workflow: 36 | flow: csv-embedding-flow 37 | version: 1 38 | -------------------------------------------------------------------------------- /examples/components/core/dag/dag_llm.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: # id 3 | type: dynamiq.connections.OpenAI 4 | api_key: ${oc.env:OPENAI_API_KEY} 5 | 6 | prompt_template: | 7 | Please answer the following question 8 | **User Question:** {{query}} 9 | Answer: 10 | 11 | prompts: 12 | openai-ai-prompt: 13 | messages: 14 | - role: user 15 | content: 16 | - type: text 17 | text: "What’s in this image?" 18 | - type: image_url 19 | image_url: 20 | url: "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" 21 | 22 | 23 | nodes: 24 | openai-1: # id 25 | type: dynamiq.nodes.llms.OpenAI 26 | name: OpenAI-1 27 | model: gpt-4o 28 | connection: openai-conn 29 | prompt: openai-ai-prompt 30 | error_handling: 31 | timeout_seconds: 60 32 | retry_interval_seconds: 1 33 | max_retries: 0 34 | backoff_rate: 1 35 | input_transformer: 36 | path: null 37 | selector: 38 | "query": "$.query" 39 | output_transformer: 40 | path: null 41 | selector: 42 | "answer": "$.content" 43 | caching: 44 | enabled: false 45 | streaming: 46 | enabled: false 47 | 48 | 49 | flows: 50 | retrieval-flow: # id 51 | name: LLM answering flow 52 | nodes: 53 | - openai-1 54 | 55 | 56 | # Could specify multiple workflows in single yaml 57 | workflows: 58 | 59 | retrieval-workflow: # id 60 | flow: retrieval-flow 61 | -------------------------------------------------------------------------------- /examples/components/core/dag/dag_llm_structured_output.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from dynamiq import Workflow, runnables 6 | from dynamiq.callbacks import TracingCallbackHandler 7 | from dynamiq.connections.managers import get_connection_manager 8 | from dynamiq.utils import JsonWorkflowEncoder 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | if __name__ == "__main__": 14 | dag_yaml_file_path = os.path.join(os.path.dirname(__file__), "dag_llm_structured_output.yaml") 15 | tracing = TracingCallbackHandler() 16 | with get_connection_manager() as cm: 17 | wf = Workflow.from_yaml_file(file_path=dag_yaml_file_path, connection_manager=cm, init_components=True) 18 | wf.run( 19 | input_data={}, 20 | config=runnables.RunnableConfig(callbacks=[tracing]), 21 | ) 22 | _ = json.dumps( 23 | {"runs": [run.to_dict() for run in tracing.runs.values()]}, 24 | cls=JsonWorkflowEncoder, 25 | ) 26 | 27 | logger.info(f"Workflow {wf.id} finished. Results:") 28 | for node_id, result in wf.flow._results.items(): 29 | logger.info(f"Node {node_id}-{wf.flow._node_by_id[node_id].name}: \n{result}") 30 | -------------------------------------------------------------------------------- /examples/components/core/dag/dag_llm_structured_output.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: 3 | type: dynamiq.connections.OpenAI 4 | 5 | nodes: 6 | openai-1: # id 7 | type: dynamiq.nodes.llms.OpenAI 8 | name: OpenAI-1 9 | model: gpt-4o-mini 10 | connection: openai-conn 11 | prompt: 12 | id: openai-ai-prompt 13 | messages: 14 | - role: system 15 | content: "Extract the document information in JSON format with fields: title, abstract, tags." 16 | - role: user 17 | content: "I like reading the book 'Harry Potter 7' which contains text about a young magical boy and magic. It can be described as fiction, story, children's literature." 18 | 19 | error_handling: 20 | timeout_seconds: 60 21 | retry_interval_seconds: 1 22 | max_retries: 0 23 | backoff_rate: 1 24 | response_format: 25 | type: "json_object" 26 | output_transformer: 27 | path: null 28 | selector: 29 | "ai_1": "$.content" 30 | caching: 31 | enabled: false 32 | streaming: 33 | enabled: false 34 | 35 | flows: 36 | agent-flow: 37 | name: Agent Flow 38 | nodes: 39 | - openai-1 40 | 41 | workflows: 42 | agent-workflow: 43 | flow: agent-flow 44 | version: 1 45 | -------------------------------------------------------------------------------- /examples/components/core/dag/dag_llm_tools.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: # id 3 | type: dynamiq.connections.OpenAI 4 | api_key: ${oc.env:OPENAI_API_KEY} 5 | 6 | prompt_template: | 7 | Please answer the following question 8 | **User Question:** {{query}} 9 | Answer: 10 | 11 | prompts: 12 | openai-ai-prompt: 13 | messages: 14 | - role: user 15 | content: 16 | - type: text 17 | text: "What time is it in San Francisco, Tokyo, and Paris?" 18 | tools: 19 | - type: "function" 20 | function: 21 | name: "get_current_time" 22 | description: "Get the current time in a given location" 23 | parameters: 24 | type: "object" 25 | required: 26 | - "location" 27 | properties: 28 | location: 29 | type: "string" 30 | description: "The city, e.g. San Francisco" 31 | 32 | 33 | nodes: 34 | openai-1: # id 35 | type: dynamiq.nodes.llms.OpenAI 36 | name: OpenAI-1 37 | model: gpt-4o 38 | connection: openai-conn 39 | prompt: openai-ai-prompt 40 | error_handling: 41 | timeout_seconds: 60 42 | retry_interval_seconds: 1 43 | max_retries: 0 44 | backoff_rate: 1 45 | input_transformer: 46 | path: null 47 | selector: 48 | "query": "$.query" 49 | output_transformer: 50 | path: null 51 | selector: 52 | "answer": "$.content" 53 | "tool_calls": "$.tool_calls" 54 | caching: 55 | enabled: false 56 | streaming: 57 | enabled: false 58 | 59 | 60 | flows: 61 | retrieval-flow: # id 62 | name: LLM answering flow 63 | nodes: 64 | - openai-1 65 | 66 | 67 | # Could specify multiple workflows in single yaml 68 | workflows: 69 | 70 | retrieval-workflow: # id 71 | flow: retrieval-flow 72 | -------------------------------------------------------------------------------- /examples/components/core/dag/dag_yaml.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import os 4 | 5 | from dynamiq import Workflow, runnables 6 | from dynamiq.connections.managers import get_connection_manager 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | def run_sync(): 12 | dag_yaml_file_path = os.path.join(os.path.dirname(__file__), "dag.yaml") 13 | with get_connection_manager() as cm: 14 | # Load the workflow from the YAML file, parse and init components during parsing 15 | wf = Workflow.from_yaml_file(file_path=dag_yaml_file_path, connection_manager=cm, init_components=True) 16 | wf.run( 17 | input_data={"date": "4 May 2024", "next_date": "6 May 2024"}, 18 | config=runnables.RunnableConfig(callbacks=[]), 19 | ) 20 | logger.info(f"Workflow {wf.id} sync finished. Results: ") 21 | for node_id, result in wf.flow._results.items(): 22 | logger.info(f"Node {node_id}-{wf.flow._node_by_id[node_id].name}: {result}") 23 | 24 | 25 | async def run_async(): 26 | dag_yaml_file_path = os.path.join(os.path.dirname(__file__), "dag.yaml") 27 | with get_connection_manager() as cm: 28 | # Load the workflow from the YAML file, parse and init components during parsing 29 | wf = Workflow.from_yaml_file(file_path=dag_yaml_file_path, connection_manager=cm, init_components=True) 30 | await wf.run( 31 | input_data={"date": "4 May 2024", "next_date": "6 May 2024"}, 32 | config=runnables.RunnableConfig(callbacks=[]), 33 | ) 34 | logger.info(f"Workflow {wf.id} async finished. Results: ") 35 | for node_id, result in wf.flow._results.items(): 36 | logger.info(f"Node {node_id}-{wf.flow._node_by_id[node_id].name}: {result}") 37 | 38 | 39 | if __name__ == "__main__": 40 | run_sync() 41 | asyncio.run(run_async()) 42 | -------------------------------------------------------------------------------- /examples/components/core/dag/orchestrator_dag.yaml: -------------------------------------------------------------------------------- 1 | connections: 2 | openai-conn: 3 | type: dynamiq.connections.OpenAI 4 | scale-serp-conn: 5 | type: dynamiq.connections.ScaleSerp 6 | zen-rows-conn: 7 | type: dynamiq.connections.ZenRows 8 | 9 | nodes: 10 | literature-orchestrator: 11 | type: dynamiq.nodes.agents.orchestrators.AdaptiveOrchestrator 12 | manager: 13 | id: adaptive-manager-agent 14 | type: dynamiq.nodes.agents.orchestrators.AdaptiveAgentManager 15 | llm: 16 | id: manager-llm 17 | type: dynamiq.nodes.llms.OpenAI 18 | connection: openai-conn 19 | model: gpt-3.5-turbo 20 | agents: 21 | - id: agent-researcher 22 | type: dynamiq.nodes.agents.ReActAgent 23 | llm: 24 | id: agent-researcher-llm 25 | type: dynamiq.nodes.llms.OpenAI 26 | connection: openai-conn 27 | model: gpt-3.5-turbo 28 | tools: 29 | - id: search-scale-serp 30 | type: dynamiq.nodes.tools.ScaleSerpTool 31 | connection: scale-serp-conn 32 | - id: scrape-zen-rows 33 | type: dynamiq.nodes.tools.SummarizerTool 34 | connection: zen-rows-conn 35 | llm: 36 | id: scrape-zen-rows-llm 37 | type: dynamiq.nodes.llms.OpenAI 38 | connection: openai-conn 39 | model: gpt-3.5-turbo 40 | - id: agent-writer 41 | type: dynamiq.nodes.agents.SimpleAgent 42 | llm: 43 | id: agent-writer-llm 44 | type: dynamiq.nodes.llms.OpenAI 45 | connection: openai-conn 46 | model: gpt-3.5-turbo 47 | 48 | flows: 49 | agent-flow: 50 | name: Agent Flow 51 | nodes: 52 | - literature-orchestrator 53 | 54 | workflows: 55 | agent-workflow: 56 | flow: agent-flow 57 | version: 1 58 | -------------------------------------------------------------------------------- /examples/components/core/dag/orchestrator_dag_yaml.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from dynamiq import Workflow, runnables 6 | from dynamiq.callbacks import TracingCallbackHandler 7 | from dynamiq.connections.managers import get_connection_manager 8 | from dynamiq.utils import JsonWorkflowEncoder 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | INPUT_DATA = """ 14 | I need to write a literature overview on the topic of `SOLID on interview` for my article. 15 | Use the latest and most relevant information from the internet and articles. Try to keep simple format like: 16 | - Introduction 17 | - Main concepts 18 | - Conclusion 19 | Also include the sources in the end of the document. Double check that the information is up-to-date and relevant. 20 | Final result must be provided in a markdown format. 21 | """ 22 | 23 | 24 | if __name__ == "__main__": 25 | dag_yaml_file_path = os.path.join(os.path.dirname(__file__), "orchestrator_dag.yaml") 26 | tracing = TracingCallbackHandler() 27 | with get_connection_manager() as cm: 28 | # Load the workflow from the YAML file, parse and init components during parsing 29 | wf = Workflow.from_yaml_file(file_path=dag_yaml_file_path, connection_manager=cm, init_components=True) 30 | wf.run( 31 | input_data={"input": INPUT_DATA}, 32 | config=runnables.RunnableConfig(callbacks=[tracing]), 33 | ) 34 | # Check if traces dumped without errors 35 | _ = json.dumps( 36 | {"runs": [run.to_dict() for run in tracing.runs.values()]}, 37 | cls=JsonWorkflowEncoder, 38 | ) 39 | 40 | logger.info(f"Workflow {wf.id} finished. Results:") 41 | for node_id, result in wf.flow._results.items(): 42 | logger.info(f"Node {node_id}-{wf.flow._node_by_id[node_id].name}: \n{result}") 43 | -------------------------------------------------------------------------------- /examples/components/core/memory/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/core/memory/__init__.py -------------------------------------------------------------------------------- /examples/components/core/memory/demo_memory.py: -------------------------------------------------------------------------------- 1 | from dynamiq.memory import Memory 2 | from dynamiq.memory.backends import InMemory 3 | from dynamiq.prompts import MessageRole 4 | 5 | # Create a memory instance with InMemory backend 6 | memory = Memory(backend=InMemory()) 7 | 8 | # Add messages with metadata 9 | memory.add(MessageRole.USER, "My favorite color is blue.", metadata={"topic": "colors", "user_id": "123"}) 10 | memory.add(MessageRole.ASSISTANT, "Blue is a calming color.", metadata={"topic": "colors", "user_id": "123"}) 11 | memory.add(MessageRole.USER, "I like red too.", metadata={"topic": "colors", "user_id": "456"}) 12 | memory.add(MessageRole.ASSISTANT, "Red is a passionate color.", metadata={"topic": "colors", "user_id": "456"}) 13 | 14 | # Search with filters only 15 | results = memory.search(filters={"user_id": "123"}) 16 | print("Results with filter only:", [r.content for r in results]) 17 | 18 | # Search with query and filters 19 | results = memory.search(query="color", filters={"user_id": "123"}) 20 | print("Results with query and filter:", [r.content for r in results]) 21 | 22 | # Search with query only 23 | results = memory.search("red") 24 | print("Results with query only:", [r.content for r in results]) 25 | 26 | # Get all messages 27 | messages = memory.get_all() 28 | print("All messages:") 29 | for msg in messages: 30 | print(f"{msg.role}: {msg.content}") 31 | 32 | # Dump 33 | print("Memory dump:", memory.to_dict()) 34 | 35 | # Clear 36 | memory.clear() 37 | print("Is memory empty?", memory.is_empty()) 38 | -------------------------------------------------------------------------------- /examples/components/core/memory/demo_memory_sqlite.py: -------------------------------------------------------------------------------- 1 | from dynamiq.memory.backends import SQLite 2 | from dynamiq.memory.memory import Memory 3 | from dynamiq.prompts import MessageRole 4 | 5 | backend = SQLite() 6 | memory = Memory(backend=backend) 7 | 8 | # Add messages with metadata 9 | memory.add(MessageRole.USER, "My favorite color is blue.", metadata={"topic": "colors", "user_id": "123"}) 10 | memory.add(MessageRole.ASSISTANT, "Blue is a calming color.", metadata={"topic": "colors", "user_id": "123"}) 11 | memory.add(MessageRole.USER, "I like red too.", metadata={"topic": "colors", "user_id": "456"}) 12 | memory.add(MessageRole.ASSISTANT, "Red is a passionate color.", metadata={"topic": "colors", "user_id": "456"}) 13 | 14 | # Search with filters only 15 | results = memory.search(filters={"user_id": "123"}) 16 | print("Results with filter only:", [r.content for r in results]) 17 | 18 | # Search with query and filters 19 | results = memory.search(query="color", filters={"user_id": "123"}) 20 | print("Results with query and filter:", [r.content for r in results]) 21 | 22 | # Search with query only 23 | results = memory.search("red") 24 | print("Results with query only:", [r.content for r in results]) 25 | 26 | # Get all messages 27 | messages = memory.get_all() 28 | print("All messages:") 29 | for msg in messages: 30 | print(f"{msg.role}: {msg.content}") 31 | 32 | # Clear memory 33 | memory.clear() 34 | print("Is memory empty?", memory.is_empty()) 35 | -------------------------------------------------------------------------------- /examples/components/core/memory/demo_simple_agent_chat_memory.py: -------------------------------------------------------------------------------- 1 | from dynamiq.memory import Memory 2 | from dynamiq.memory.backends.in_memory import InMemory 3 | from dynamiq.nodes.agents.simple import SimpleAgent 4 | from examples.llm_setup import setup_llm 5 | 6 | 7 | def setup_agent(): 8 | llm = setup_llm() 9 | memory = Memory(backend=InMemory()) 10 | AGENT_ROLE = "Helpful assistant with the goal of providing useful information and answering questions." 11 | agent = SimpleAgent( 12 | name="Agent", 13 | llm=llm, 14 | role=AGENT_ROLE, 15 | id="agent", 16 | memory=memory, 17 | ) 18 | return agent 19 | 20 | 21 | def chat_loop(agent): 22 | print("Welcome to the AI Chat! (Type 'exit' to end)") 23 | while True: 24 | user_input = input("You: ") 25 | user_id = "default" 26 | session_id = "default" 27 | if user_input.lower() == "exit": 28 | break 29 | 30 | response = agent.run({"input": user_input, "user_id": user_id, "session_id": session_id}) 31 | response_content = response.output.get("content") 32 | print(f"AI: {response_content}") 33 | 34 | print("\nChat History:") 35 | print(agent.memory.get_all_messages_as_string()) 36 | 37 | 38 | if __name__ == "__main__": 39 | chat_agent = setup_agent() 40 | chat_loop(chat_agent) 41 | -------------------------------------------------------------------------------- /examples/components/core/tracing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/core/tracing/__init__.py -------------------------------------------------------------------------------- /examples/components/core/tracing/agentops_auto.py: -------------------------------------------------------------------------------- 1 | import agentops 2 | 3 | from dynamiq.connections import Exa 4 | from dynamiq.nodes.agents import SimpleAgent 5 | from dynamiq.nodes.agents.react import ReActAgent 6 | from dynamiq.nodes.tools.exa_search import ExaTool 7 | from dynamiq.nodes.types import InferenceMode 8 | from dynamiq.utils.env import get_env_var 9 | from dynamiq.utils.logger import logger 10 | from examples.llm_setup import setup_llm 11 | 12 | 13 | @agentops.track_agent(name="React Agent") 14 | def get_react_agent(): 15 | llm = setup_llm() 16 | connection_exa = Exa() 17 | tool_search = ExaTool(connection=connection_exa) 18 | agent = ReActAgent( 19 | name="Agent", 20 | id="React Agent", 21 | llm=llm, 22 | tools=[tool_search], 23 | inference_mode=InferenceMode.XML, 24 | ) 25 | return agent 26 | 27 | 28 | @agentops.track_agent(name="Simple Agent") 29 | def get_simple_agent(): 30 | llm = setup_llm() 31 | agent = SimpleAgent( 32 | name="Agent", 33 | id="Simple Agent", 34 | llm=llm, 35 | role="Agent, goal to provide information based on the user input", 36 | ) 37 | return agent 38 | 39 | 40 | if __name__ == "__main__": 41 | agentops.init(get_env_var("AGENTOPS_API_KEY")) 42 | 43 | agent = get_react_agent() 44 | result = agent.run(input_data={"input": "Who won Euro 2024?"}) 45 | 46 | agent_simple = get_simple_agent() 47 | result_simple = agent_simple.run(input_data={"input": "What is the capital of France?"}) 48 | 49 | output_content = result.output.get("content") 50 | output_content_simple = result_simple.output.get("content") 51 | logger.info("RESULT") 52 | logger.info(output_content) 53 | logger.info(output_content_simple) 54 | agentops.end_session("Success") 55 | -------------------------------------------------------------------------------- /examples/components/core/websocket/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/core/websocket/__init__.py -------------------------------------------------------------------------------- /examples/components/core/websocket/sse/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/core/websocket/sse/__init__.py -------------------------------------------------------------------------------- /examples/components/core/websocket/ws_streamlit/example_agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/core/websocket/ws_streamlit/example_agent/__init__.py -------------------------------------------------------------------------------- /examples/components/core/websocket/ws_streamlit/example_agent/server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | 4 | from fastapi import FastAPI, WebSocket 5 | 6 | from dynamiq import Workflow, connections, flows 7 | from dynamiq.connections import ScaleSerp 8 | from dynamiq.nodes import llms 9 | from dynamiq.nodes.agents.react import ReActAgent 10 | from dynamiq.nodes.node import StreamingConfig 11 | from dynamiq.nodes.tools.scale_serp import ScaleSerpTool 12 | from examples.components.core.websocket.ws_server_fastapi import WorkflowWSHandler 13 | 14 | app = FastAPI() 15 | 16 | HOST = "127.0.0.1" 17 | PORT = 6050 18 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 19 | WF_ID = "9cd3e052-6af8-4e89-9e88-5a654ec9c492" 20 | 21 | 22 | OPENAI_CONNECTION = connections.OpenAI( 23 | id=str(uuid.uuid4()), 24 | api_key=OPENAI_API_KEY, 25 | ) 26 | OPENAI_NODE_STREAMING_EVENT = "streaming-openai-1" 27 | OPENAI_NODE = llms.OpenAI( 28 | name="OpenAI", 29 | model="gpt-4o-mini", 30 | connection=OPENAI_CONNECTION, 31 | streaming=StreamingConfig(enabled=True, event=OPENAI_NODE_STREAMING_EVENT), 32 | ) 33 | 34 | tool_search = ScaleSerpTool(connection=ScaleSerp()) 35 | 36 | agent = ReActAgent( 37 | name="ReAct Agent - Children Teacher", 38 | id="react", 39 | llm=OPENAI_NODE, 40 | tools=[tool_search], 41 | ) 42 | 43 | 44 | @app.websocket("/workflows/test") 45 | async def websocket_endpoint(websocket: WebSocket): 46 | wf = Workflow(id=WF_ID, flow=flows.Flow(nodes=[agent])) 47 | ws_handler = WorkflowWSHandler(workflow=wf, websocket=websocket) 48 | await ws_handler.handle() 49 | 50 | 51 | if __name__ == "__main__": 52 | import uvicorn 53 | 54 | uvicorn.run(app, host=HOST, port=PORT) 55 | -------------------------------------------------------------------------------- /examples/components/core/websocket/ws_streamlit/example_agent_chat/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/core/websocket/ws_streamlit/example_agent_chat/__init__.py -------------------------------------------------------------------------------- /examples/components/core/websocket/ws_streamlit/example_agent_chat/server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | 4 | from fastapi import FastAPI, WebSocket 5 | 6 | from dynamiq import Workflow, connections, flows 7 | from dynamiq.memory import Memory 8 | from dynamiq.nodes import llms 9 | from dynamiq.nodes.agents.simple import SimpleAgent 10 | from dynamiq.nodes.node import StreamingConfig 11 | from examples.components.core.websocket.ws_server_fastapi import WorkflowWSHandler 12 | 13 | app = FastAPI() 14 | 15 | HOST = "127.0.0.1" 16 | PORT = 6050 17 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") 18 | WF_ID = "9cd3e052-6af8-4e89-9e88-5a654ec9c492" 19 | 20 | 21 | OPENAI_CONNECTION = connections.OpenAI( 22 | id=str(uuid.uuid4()), 23 | api_key=OPENAI_API_KEY, 24 | ) 25 | OPENAI_NODE_STREAMING_EVENT = "streaming-openai-1" 26 | OPENAI_NODE = llms.OpenAI( 27 | name="OpenAI", 28 | model="gpt-4o-mini", 29 | connection=OPENAI_CONNECTION, 30 | streaming=StreamingConfig(enabled=True, event=OPENAI_NODE_STREAMING_EVENT), 31 | ) 32 | 33 | memory_in_memory = Memory() 34 | AGENT_ROLE = "helpful assistant, goal is to provide useful information and answer questions" 35 | agent = SimpleAgent( 36 | name="Agent", 37 | llm=OPENAI_NODE, 38 | role=AGENT_ROLE, 39 | id="agent", 40 | memory=memory_in_memory, 41 | ) 42 | 43 | 44 | @app.websocket("/workflows/test") 45 | async def websocket_endpoint(websocket: WebSocket): 46 | wf = Workflow(id=WF_ID, flow=flows.Flow(nodes=[agent])) 47 | ws_handler = WorkflowWSHandler(workflow=wf, websocket=websocket) 48 | await ws_handler.handle() 49 | 50 | 51 | if __name__ == "__main__": 52 | import uvicorn 53 | 54 | uvicorn.run(app, host=HOST, port=PORT) 55 | -------------------------------------------------------------------------------- /examples/components/core/websocket/ws_streamlit/example_llm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/core/websocket/ws_streamlit/example_llm/__init__.py -------------------------------------------------------------------------------- /examples/components/core/websocket/ws_streamlit/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/components/core/websocket/ws_streamlit/example_agent_chat/app.py 4 | -------------------------------------------------------------------------------- /examples/components/data/file.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/data/file.docx -------------------------------------------------------------------------------- /examples/components/data/file.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/data/file.pdf -------------------------------------------------------------------------------- /examples/components/data/file.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/data/file.pptx -------------------------------------------------------------------------------- /examples/components/data/img.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/data/img.jpeg -------------------------------------------------------------------------------- /examples/components/evaluations/metrics/answer_correctness.py: -------------------------------------------------------------------------------- 1 | from dotenv import find_dotenv, load_dotenv 2 | 3 | from dynamiq.evaluations.metrics import AnswerCorrectnessEvaluator 4 | from dynamiq.nodes.llms import OpenAI 5 | 6 | 7 | def main(): 8 | load_dotenv(find_dotenv()) 9 | llm = OpenAI(model="gpt-4o-mini") 10 | 11 | questions = ["What powers the sun and what is its primary function?", "What is the boiling point of water?"] 12 | answers = [ 13 | ( 14 | "The sun is powered by nuclear fission, similar to nuclear reactors on Earth. " 15 | "Its primary function is to provide heat and light to the solar system." 16 | ), 17 | "The boiling point of water is 100 degrees Celsius at sea level.", 18 | ] 19 | ground_truth_answers = [ 20 | ( 21 | "The sun is powered by nuclear fusion, where hydrogen fuses to form helium. " 22 | "This fusion releases energy. The sun provides heat and light essential for life on Earth." 23 | ), 24 | ( 25 | "The boiling point of water is 100 degrees Celsius (212°F) at sea level. " 26 | "Note that the boiling point changes with altitude." 27 | ), 28 | ] 29 | 30 | evaluator = AnswerCorrectnessEvaluator(llm=llm) 31 | results = evaluator.run(questions=questions, answers=answers, ground_truth_answers=ground_truth_answers) 32 | 33 | for idx, result in enumerate(results.results): 34 | print(f"Question {idx+1}: {questions[idx]}") 35 | print(f"Answer Correctness Score: {result.score}") 36 | print(result.reasoning) 37 | print("-" * 50) 38 | 39 | 40 | if __name__ == "__main__": 41 | main() 42 | -------------------------------------------------------------------------------- /examples/components/evaluations/metrics/bleu_score.py: -------------------------------------------------------------------------------- 1 | from dynamiq.evaluations.metrics import BleuScoreEvaluator 2 | 3 | 4 | def main(): 5 | ground_truth_answers = [ 6 | "The cat sits on the mat.", 7 | "A quick brown fox jumps over the lazy dog.", 8 | "Python is a versatile programming language used in various domains.", 9 | ] 10 | answers = [ 11 | "The cat sits on the mat.", 12 | "A fast brown fox leaps over the lazy dog.", 13 | "Python is a powerful programming language used across many fields.", 14 | ] 15 | 16 | bleu_evaluator = BleuScoreEvaluator() 17 | 18 | # Batch evaluation 19 | bleu_scores = bleu_evaluator.run(ground_truth_answers=ground_truth_answers, answers=answers) 20 | print("Batch Evaluation:") 21 | for idx, score in enumerate(bleu_scores): 22 | print(f"Pair {idx + 1}:") 23 | print(f"Ground Truth Answer: {ground_truth_answers[idx]}") 24 | print(f"System Answer: {answers[idx]}") 25 | print(f"BLEU Score: {score}") 26 | print("-" * 60) 27 | print("All BLEU Scores (Batch):") 28 | print(bleu_scores) 29 | 30 | # Single evaluation for a specific pair 31 | print("\nSingle Evaluation:") 32 | gt_single = "The cat sits on the mat." 33 | ans_single = "The cat sits on the mat." 34 | 35 | single_score = bleu_evaluator.run_single(ground_truth_answer=gt_single, answer=ans_single) 36 | 37 | print("Ground Truth Answer:", gt_single) 38 | print("System Answer:", ans_single) 39 | print("BLEU Score (Single):", single_score) 40 | 41 | 42 | if __name__ == "__main__": 43 | main() 44 | -------------------------------------------------------------------------------- /examples/components/evaluations/metrics/factual_correctness.py: -------------------------------------------------------------------------------- 1 | from dotenv import find_dotenv, load_dotenv 2 | 3 | from dynamiq.evaluations.metrics import FactualCorrectnessEvaluator 4 | from dynamiq.nodes.llms import OpenAI 5 | 6 | 7 | def main(): 8 | load_dotenv(find_dotenv()) 9 | 10 | llm = OpenAI(model="gpt-4o-mini") 11 | 12 | answers = [ 13 | ( 14 | "Albert Einstein was a German theoretical physicist. " 15 | "He developed the theory of relativity and contributed " 16 | "to quantum mechanics." 17 | ), 18 | ("The Eiffel Tower is located in Berlin, Germany. " "It was constructed in 1889."), 19 | ] 20 | contexts = [ 21 | ("Albert Einstein was a German-born theoretical physicist. " "He developed the theory of relativity."), 22 | ("The Eiffel Tower is located in Paris, France. " "It was constructed in 1887 and opened in 1889."), 23 | ] 24 | 25 | evaluator = FactualCorrectnessEvaluator(llm=llm) 26 | results = evaluator.run(answers=answers, contexts=contexts) 27 | 28 | for idx, result in enumerate(results.results): 29 | print(f"Answer: {answers[idx]}") 30 | print(f"Factual Correctness Score: {result.score}") 31 | print("Reasoning:") 32 | print(result.reasoning) 33 | print("-" * 50) 34 | 35 | 36 | if __name__ == "__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /examples/components/evaluations/metrics/faithfulness.py: -------------------------------------------------------------------------------- 1 | from dotenv import find_dotenv, load_dotenv 2 | 3 | from dynamiq.evaluations.metrics import FaithfulnessEvaluator 4 | from dynamiq.nodes.llms import OpenAI 5 | 6 | 7 | def main(): 8 | load_dotenv(find_dotenv()) 9 | 10 | llm = OpenAI(model="gpt-4o-mini") 11 | 12 | questions = ["Who was Albert Einstein and what is he best known for?", "Tell me about the Great Wall of China."] 13 | answers = [ 14 | ( 15 | "He was a German-born theoretical physicist, widely acknowledged to be one of the " 16 | "greatest and most influential physicists of all time. He was best known for developing " 17 | "the theory of relativity; he also made important contributions to quantum mechanics." 18 | ), 19 | ( 20 | "The Great Wall of China is a large wall in China. It was built to keep out invaders. " 21 | "It is visible from space." 22 | ), 23 | ] 24 | contexts = [ 25 | ("Albert Einstein was a German-born theoretical physicist. He developed the theory of relativity."), 26 | ( 27 | "The Great Wall of China is a series of fortifications built across the historical " 28 | "northern borders of ancient Chinese states and Imperial China as protection against " 29 | "various nomadic groups." 30 | ), 31 | ] 32 | 33 | evaluator = FaithfulnessEvaluator(llm=llm) 34 | scores = evaluator.run(questions=questions, answers=answers, contexts=contexts) 35 | 36 | for idx, result in enumerate(scores.results): 37 | print(f"Question: {questions[idx]}") 38 | print(f"Faithfulness Score: {result.score}") 39 | print(result.reasoning) 40 | print("-" * 50) 41 | 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /examples/components/helpers/converters/pypdf_converter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from io import BytesIO 3 | 4 | from dynamiq import Workflow 5 | from dynamiq.components.converters.unstructured import DocumentCreationMode 6 | from dynamiq.connections.managers import ConnectionManager 7 | from dynamiq.flows import Flow 8 | from dynamiq.nodes.converters import PyPDFConverter 9 | from dynamiq.nodes.embedders import MistralDocumentEmbedder 10 | from dynamiq.nodes.node import InputTransformer, NodeDependency 11 | 12 | # Please use your own pdf file path 13 | PYPDF_FILE_PATH = "../../data/file.pdf" 14 | 15 | 16 | def main(): 17 | logging.basicConfig(level=logging.INFO) 18 | logger = logging.getLogger(__name__) 19 | 20 | cm = ConnectionManager() 21 | 22 | file_converter_node = PyPDFConverter(document_creation_mode=DocumentCreationMode.ONE_DOC_PER_PAGE) 23 | mistral_text_embedder_node = MistralDocumentEmbedder( 24 | name="MistralDocumentEmbedder", 25 | depends=[ 26 | NodeDependency(file_converter_node), 27 | ], 28 | input_transformer=InputTransformer( 29 | selector={ 30 | "documents": f"${[file_converter_node.id]}.output.documents", 31 | }, 32 | ), 33 | ) 34 | 35 | wf = Workflow( 36 | id="wf", 37 | flow=Flow( 38 | id="wf", 39 | nodes=[file_converter_node, mistral_text_embedder_node], 40 | connection_manager=cm, 41 | ), 42 | ) 43 | 44 | with open(PYPDF_FILE_PATH, "rb") as upload_file: 45 | file = BytesIO(upload_file.read()) 46 | file.name = upload_file.name 47 | 48 | output = wf.run( 49 | input_data={ 50 | "files": [file], 51 | } 52 | ) 53 | logger.info(f"Workflow result:{output}") 54 | 55 | 56 | if __name__ == "__main__": 57 | main() 58 | -------------------------------------------------------------------------------- /examples/components/llm/llm_with_files/llm_file_example.py: -------------------------------------------------------------------------------- 1 | import io 2 | from pathlib import Path 3 | 4 | from dynamiq.prompts import Prompt, VisionMessage, VisionMessageImageContent 5 | from examples.llm_setup import setup_llm 6 | 7 | llm = setup_llm() 8 | 9 | file_path = "../../data/img.jpeg" 10 | 11 | 12 | if __name__ == "__main__": 13 | 14 | file_path_obj = Path(file_path).resolve() 15 | if not file_path_obj.exists(): 16 | raise FileNotFoundError(f"The file {file_path} does not exist.") 17 | if not file_path_obj.is_file(): 18 | raise OSError(f"The path {file_path} is not a valid file.") 19 | 20 | with file_path_obj.open("rb") as file: 21 | image_bytes = file.read() 22 | 23 | image_bytes_io = io.BytesIO(image_bytes) 24 | 25 | prompt = Prompt( 26 | id="1", 27 | messages=[VisionMessage(content=[VisionMessageImageContent(image_url={"url": "{{image}}"})])], 28 | ) 29 | 30 | result = llm.run(input_data={"image": image_bytes_io}, prompt=prompt) 31 | print(result) 32 | -------------------------------------------------------------------------------- /examples/components/llm/llm_with_vision/node_pdf_extractor.py: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | 3 | from dynamiq.nodes import llms 4 | from dynamiq.nodes.converters.llm_text_extractor import LLMPDFConverter, LLMPDFConverterInputSchema 5 | 6 | PDF_FILE_PATH = "../../data/file.pdf" 7 | 8 | 9 | def main(): 10 | # Initialize the LLM 11 | llm = llms.OpenAI( 12 | name="OpenAI Vision", 13 | model="gpt-4o", 14 | postponned_init=True, 15 | ) 16 | 17 | # Initialize the PDF text extractor 18 | converter = LLMPDFConverter(llm=llm) 19 | 20 | # Example file paths 21 | file_paths = [PDF_FILE_PATH] 22 | files = [] 23 | 24 | # Read files into BytesIO objects 25 | for path in file_paths: 26 | with open(path, "rb") as upload_file: 27 | bytes_io = BytesIO(upload_file.read()) 28 | files.append(bytes_io) 29 | 30 | # Execute the extractor 31 | output = converter.execute(input_data=LLMPDFConverterInputSchema(file_paths=file_paths, files=files)) 32 | 33 | # Print the output 34 | print(output) 35 | 36 | 37 | if __name__ == "__main__": 38 | main() 39 | -------------------------------------------------------------------------------- /examples/components/llm/llms/custom.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import HttpApiKey 2 | from dynamiq.nodes.llms.custom_llm import CustomLLM 3 | from dynamiq.prompts import Prompt 4 | 5 | OPENROUTER_API_KEY = "" 6 | OPENROUTER_API_URL = "https://openrouter.ai/api/v1" 7 | DEFAULT_MODEL = "cognitivecomputations/dolphin3.0-mistral-24b:free" 8 | DEFAULT_MAX_TOKENS = 1000 9 | DEFAULT_TEMPERATURE = 0.7 10 | 11 | openrouter_connection = HttpApiKey(url=OPENROUTER_API_URL, api_key=OPENROUTER_API_KEY) 12 | 13 | openrouter_llm = CustomLLM( 14 | name="OpenRouter", 15 | model=DEFAULT_MODEL, 16 | connection=openrouter_connection, 17 | max_tokens=DEFAULT_MAX_TOKENS, 18 | temperature=DEFAULT_TEMPERATURE, 19 | provider_prefix="openrouter", 20 | ) 21 | 22 | prompt = Prompt( 23 | messages=[ 24 | {"role": "system", "content": "You are helpful assistant."}, 25 | {"role": "user", "content": "What is the capital of France?"}, 26 | ] 27 | ) 28 | 29 | response = openrouter_llm.execute(input_data={}, prompt=prompt) 30 | print(response["content"]) 31 | -------------------------------------------------------------------------------- /examples/components/llm/llms/mistral_with_messages.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Mistral as MistralConnection 2 | from dynamiq.nodes.llms import Mistral 3 | from dynamiq.prompts import Message, Prompt 4 | 5 | 6 | def run_mistral_node(prompt: Prompt): 7 | connection = MistralConnection() 8 | mistral_node = Mistral( 9 | model="mistral/mistral-large-latest", 10 | connection=connection, 11 | ) 12 | response = mistral_node.run(input_data={}, prompt=prompt) 13 | return response 14 | 15 | 16 | prompt_with_assistant_last = Prompt( 17 | messages=[ 18 | Message(role="system", content="Be friendly"), 19 | Message(role="user", content="Hello"), 20 | Message(role="assistant", content="Can I help you?"), 21 | ] 22 | ) 23 | 24 | 25 | if __name__ == "__main__": 26 | for msg in prompt_with_assistant_last.messages: 27 | print(msg) 28 | response = run_mistral_node(prompt_with_assistant_last) 29 | print(response) 30 | -------------------------------------------------------------------------------- /examples/components/llm/llms/ollama.py: -------------------------------------------------------------------------------- 1 | from dynamiq.nodes.llms import Ollama 2 | from dynamiq.prompts import Prompt 3 | 4 | 5 | def run_ollama_node(prompt: Prompt): 6 | ollama_node = Ollama( 7 | model="ollama/qwq", 8 | ) 9 | response = ollama_node.run(input_data={}, prompt=prompt) 10 | return response 11 | 12 | 13 | prompt = Prompt( 14 | messages=[ 15 | { 16 | "role": "user", 17 | "content": "Explain the concept of entropy.", 18 | }, 19 | ] 20 | ) 21 | 22 | 23 | if __name__ == "__main__": 24 | response = run_ollama_node(prompt) 25 | print(response) 26 | -------------------------------------------------------------------------------- /examples/components/llm/llms/perplexity_citations.py: -------------------------------------------------------------------------------- 1 | from dynamiq.nodes.llms import Perplexity 2 | from dynamiq.prompts import Prompt 3 | 4 | 5 | def run_perplexity_node(prompt: Prompt): 6 | perplexity_node = Perplexity( 7 | model="llama-3.1-sonar-small-128k-online", 8 | return_citations=True, 9 | ) 10 | response = perplexity_node.run(input_data={}, prompt=prompt) 11 | return response 12 | 13 | 14 | prompt = Prompt( 15 | messages=[ 16 | { 17 | "role": "user", 18 | "content": ("Who won euro 2024?"), 19 | }, 20 | ] 21 | ) 22 | 23 | 24 | if __name__ == "__main__": 25 | response = run_perplexity_node(prompt) 26 | print(response) 27 | -------------------------------------------------------------------------------- /examples/components/rag/rerankers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/components/rag/rerankers/__init__.py -------------------------------------------------------------------------------- /examples/components/rag/rerankers/use_cohere.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Cohere 2 | from dynamiq.nodes.rankers import CohereReranker 3 | from dynamiq.types import Document 4 | 5 | if __name__ == "__main__": 6 | ranker = CohereReranker(connection=Cohere()) 7 | 8 | input_data = { 9 | "query": "What is machine learning?", 10 | "documents": [ 11 | Document(content="Machine learning is a branch of AI...", score=0.8), 12 | Document(content="Deep learning is a subset of machine learning...", score=0.7), 13 | ], 14 | } 15 | 16 | output = ranker.run(input_data=input_data) 17 | 18 | print(output) 19 | -------------------------------------------------------------------------------- /examples/components/rag/vector_stores/README.md: -------------------------------------------------------------------------------- 1 | # Basic Python RAG Example 2 | 3 | ## How to run the RAG Example 4 | 5 | There are several options for running the example workflow. 6 | 7 | If you would like to try the example using the default parameters without making any changes, simply execute the following command: 8 | 9 | ``` 10 | python examples/components/rag/vector_stores/pinecone_flow.py main 11 | ``` 12 | 13 | It is also possible to specify the folder for uploading raw documents using `--folder-path` and to define the question to ask with `--question`: 14 | 15 | ``` 16 | python examples/components/rag/vector_stores/pinecone_flow.py main --folder-path=examples/data --question="How to update an order?" 17 | ``` 18 | 19 | To run only the indexing workflow, use: 20 | 21 | ``` 22 | python examples/components/rag/vector_stores/pinecone_flow.py indexing-flow --folder-path=examples/data 23 | ``` 24 | 25 | Alternatively, to execute only the inference workflow: 26 | 27 | ``` 28 | python examples/components/rag/vector_stores/pinecone_flow.py retrieval-flow --question="How to update an order?" 29 | ``` 30 | 31 | Before running the example, please make sure to start Docker with the required services: 32 | 33 | ``` 34 | make run_unstructured 35 | ``` 36 | 37 | 38 | # RAG example with filters 39 | ``` 40 | python examples/components/rag/vector_stores/filters/filtering_example.py 41 | ``` 42 | -------------------------------------------------------------------------------- /examples/components/rag/vector_stores/filters/filtering_readme.md: -------------------------------------------------------------------------------- 1 | # Filtering Logic 2 | 3 | Technically speaking, filters are defined as nested dictionaries that can be of two types: **Comparison** or **Logic**. 4 | 5 | ## Comparison 6 | 7 | Comparison dictionaries must contain the following keys: 8 | 9 | - `field` 10 | - `operator` 11 | - `value` 12 | 13 | The `field` value in Comparison dictionaries must be the name of one of the meta fields of a document, such as `meta.years`. 14 | 15 | The `operator` value in Comparison dictionaries must be one of the following: 16 | 17 | - `==` 18 | - `!=` 19 | - `>` 20 | - `>=` 21 | - `<` 22 | - `<=` 23 | - `in` 24 | - `not in` 25 | 26 | The `value` takes a single value or (in the case of `in` and `not in`) a list of values. 27 | 28 | ## Logic 29 | 30 | Logic dictionaries must contain the following keys: 31 | 32 | - `operator` 33 | - `conditions` 34 | 35 | The `conditions` key must be a list of dictionaries, either of type Comparison or Logic. 36 | 37 | The `operator` values in Logic dictionaries must be one of the following: 38 | 39 | - `NOT` 40 | - `OR` 41 | - `AND` 42 | 43 | ## Example 44 | 45 | ``` 46 | filters = { 47 | "operator": "AND", 48 | "conditions": [ 49 | { 50 | "field": "years", 51 | "operator": "==", 52 | "value": "2019" 53 | }, 54 | { 55 | "field": "companies", 56 | "operator": "in", 57 | "value": ["BMW", "Mercedes"] 58 | } 59 | ] 60 | } 61 | ``` 62 | -------------------------------------------------------------------------------- /examples/components/rag/vector_stores/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from io import BytesIO 3 | 4 | from dynamiq import ROOT_PATH 5 | from dynamiq.utils import generate_uuid 6 | 7 | 8 | def list_data_folder_paths(folder_path=os.path.join(os.path.dirname(ROOT_PATH), "examples/data/")) -> list[str]: 9 | file_names = os.listdir(folder_path) 10 | file_paths = [os.path.join(folder_path, file_name) for file_name in file_names] 11 | 12 | return file_paths 13 | 14 | 15 | def read_bytes_io_files(file_paths: list[str]): 16 | files = [] 17 | metadata = [] 18 | 19 | # Read files into BytesIO objects 20 | for path in file_paths: 21 | with open(path, "rb") as upload_file: 22 | bytes_io = BytesIO(upload_file.read()) 23 | bytes_io.name = upload_file.name 24 | files.append(bytes_io) 25 | 26 | file_id = generate_uuid() 27 | metadata.append({"file_id": file_id}) 28 | 29 | return {"files": files, "metadata": metadata} 30 | -------------------------------------------------------------------------------- /examples/components/tools/mcp_server_as_tool/mcp_servers/math_server.py: -------------------------------------------------------------------------------- 1 | from mcp.server.fastmcp import FastMCP 2 | 3 | mcp = FastMCP("Math") 4 | 5 | 6 | @mcp.tool() 7 | def add(a: int, b: int) -> int: 8 | """Add two numbers""" 9 | return a + b 10 | 11 | 12 | @mcp.tool() 13 | def multiply(a: float, b: float) -> float: 14 | """Multiply two numbers""" 15 | return a * b 16 | 17 | 18 | if __name__ == "__main__": 19 | mcp.run(transport="stdio") 20 | -------------------------------------------------------------------------------- /examples/components/tools/mcp_server_as_tool/mcp_servers/weather_server.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from datetime import datetime, timedelta, timezone 3 | 4 | from mcp.server.fastmcp import FastMCP 5 | 6 | mcp = FastMCP("Weather") 7 | 8 | WEATHER_DATA = {"Paris": "Sunny", "Tokyo": "Cloudy", "London": "Windy"} 9 | 10 | TIMEZONE_OFFSETS = {"Paris": 2, "Tokyo": 9, "London": 1} 11 | 12 | 13 | @mcp.tool() 14 | async def get_weather(location: str) -> str: 15 | """Get weather for location.""" 16 | weather = WEATHER_DATA.get(location, "Unknown") 17 | return f"The current weather in {location} is {weather}." 18 | 19 | 20 | @mcp.tool() 21 | async def get_time(location: str) -> str: 22 | """Get time for location.""" 23 | offset = TIMEZONE_OFFSETS.get(location, 0) 24 | utc_now = datetime.now(timezone.utc) 25 | local_time = (utc_now + timedelta(hours=offset)).strftime("%H:%M") 26 | return f"The current local time in {location} is {local_time}." 27 | 28 | 29 | @mcp.tool() 30 | async def list_supported_locations() -> list[str]: 31 | """ 32 | List all supported locations for weather and time queries. 33 | """ 34 | return list(WEATHER_DATA.keys()) 35 | 36 | 37 | if __name__ == "__main__": 38 | parser = argparse.ArgumentParser(description="Run MCP with selected transport.") 39 | parser.add_argument( 40 | "--transport", choices=["streamable-http", "sse"], default="streamable-http", help="Transport type to use" 41 | ) 42 | args = parser.parse_args() 43 | 44 | mcp.run(transport=args.transport) 45 | -------------------------------------------------------------------------------- /examples/components/tools/use_exa.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Exa 2 | from dynamiq.nodes.tools.exa_search import ExaTool, QueryType 3 | 4 | 5 | def basic_search_example(): 6 | exa_connection = Exa() 7 | 8 | exa_tool = ExaTool(connection=exa_connection, is_optimized_for_agents=False) 9 | 10 | result = exa_tool.run( 11 | input_data={ 12 | "query": "Latest developments in quantum computing", 13 | "limit": 5, 14 | "query_type": QueryType.neural, 15 | "use_autoprompt": True, 16 | } 17 | ) 18 | 19 | print("Search Results:") 20 | print(result.output.get("content")) 21 | 22 | 23 | def advanced_search_with_contents_example(): 24 | exa_connection = Exa() 25 | exa_tool = ExaTool(connection=exa_connection, is_optimized_for_agents=True) 26 | 27 | result = exa_tool.run( 28 | input_data={ 29 | "query": "Latest developments in quantum computing", 30 | "limit": 5, 31 | "query_type": QueryType.neural, 32 | "use_autoprompt": True, 33 | "category": "research paper", 34 | "include_full_content": True, 35 | } 36 | ) 37 | 38 | print("Search Results with Contents:") 39 | print(result.output.get("content")) 40 | 41 | 42 | if __name__ == "__main__": 43 | basic_search_example() 44 | advanced_search_with_contents_example() 45 | -------------------------------------------------------------------------------- /examples/components/tools/use_firecrawl.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections.connections import Firecrawl 2 | from dynamiq.nodes.tools.firecrawl import FirecrawlTool 3 | 4 | if __name__ == "__main__": 5 | 6 | connection = Firecrawl() 7 | 8 | tool = FirecrawlTool(connection=connection) 9 | 10 | input_data = { 11 | "url": "https://example.com", 12 | } 13 | 14 | result = tool.run(input_data) 15 | 16 | print(result) 17 | -------------------------------------------------------------------------------- /examples/components/tools/use_function_tool.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | from dynamiq.nodes.tools.function_tool import FunctionTool, function_tool 4 | 5 | 6 | class AddNumbersInputSchema(BaseModel): 7 | a: int = -1 8 | b: int = -1 9 | 10 | 11 | # Example usage without decorator 12 | class AddNumbersTool(FunctionTool): 13 | name: str = "Add Numbers Tool" 14 | description: str = "A tool that adds two numbers together." 15 | 16 | def run_func(self, input_data: AddNumbersInputSchema, **kwargs) -> int: 17 | """Add two numbers together.""" 18 | return input_data.a + input_data.b 19 | 20 | 21 | # Example usage with decorator 22 | @function_tool 23 | def multiply_numbers(a: int, b: int, **kwargs) -> int: 24 | """Multiply two numbers together.""" 25 | return a * b 26 | 27 | 28 | if __name__ == "__main__": 29 | # Usage 30 | add_tool = AddNumbersTool() 31 | 32 | input_data = AddNumbersInputSchema() 33 | input_data.a = 3 34 | input_data.b = 5 35 | 36 | result = add_tool.execute(input_data=input_data) 37 | print(result) # Output: {"content": 8} 38 | 39 | # Usage 40 | multiply_tool = multiply_numbers() 41 | result = multiply_tool.execute(input_data=input_data) 42 | print(result) # Output: {"content": 15} 43 | -------------------------------------------------------------------------------- /examples/components/tools/use_http_api_node.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Http as HttpConnection 2 | from dynamiq.nodes.tools.http_api_call import HttpApiCall, ResponseType 3 | 4 | 5 | def main(): 6 | # Create an HTTP connection 7 | connection = HttpConnection( 8 | method="GET", 9 | url="https://catfact.ninja/fact", 10 | ) 11 | 12 | # Create an instance of HttpApiCall 13 | api_call = HttpApiCall( 14 | connection=connection, 15 | success_codes=[200, 201], 16 | timeout=60, 17 | response_type=ResponseType.JSON, 18 | params={"limit": 10}, 19 | ) 20 | 21 | # Prepare input data 22 | input_data = {} 23 | 24 | # Run the API call 25 | try: 26 | result = api_call.run(input_data) 27 | print(result.output) 28 | except ValueError as e: 29 | print(f"Error: {e}") 30 | 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /examples/components/tools/use_jina.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Jina 2 | from dynamiq.nodes.tools import JinaResponseFormat, JinaScrapeTool, JinaSearchTool 3 | 4 | 5 | def basic_search_example(): 6 | jina_connection = Jina() 7 | 8 | jina_search_tool = JinaSearchTool(connection=jina_connection, is_optimized_for_agents=False) 9 | 10 | result = jina_search_tool.run( 11 | input_data={ 12 | "query": "What is AI Agent?", 13 | "max_results": 3, 14 | } 15 | ) 16 | 17 | print("Search Results:") 18 | print(result.output.get("content")) 19 | 20 | 21 | def basic_scrape_example(): 22 | jina_connection = Jina() 23 | 24 | jina_search_tool = JinaScrapeTool( 25 | connection=jina_connection, is_optimized_for_agents=False, response_format=JinaResponseFormat.DEFAULT 26 | ) 27 | 28 | result = jina_search_tool.run( 29 | input_data={ 30 | "url": "https://example.com", 31 | } 32 | ) 33 | 34 | print("Scrape Results:") 35 | print(result.output.get("content")) 36 | 37 | 38 | if __name__ == "__main__": 39 | basic_search_example() 40 | basic_scrape_example() 41 | -------------------------------------------------------------------------------- /examples/components/tools/use_react_fc.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import ScaleSerp 2 | from dynamiq.nodes.agents.react import ReActAgent 3 | from dynamiq.nodes.tools.function_tool import function_tool 4 | from dynamiq.nodes.tools.scale_serp import ScaleSerpTool 5 | from examples.llm_setup import setup_llm 6 | 7 | AGENT_ROLE = ( 8 | "professional writer, goal is to produce" 9 | "a well written and informative response, that can be used for CHILDREN, " 10 | "with emojis and simple language." 11 | ) 12 | 13 | if __name__ == "__main__": 14 | llm = setup_llm() 15 | 16 | @function_tool 17 | def calculate_age(input_age: int, current_year: int, **kwargs) -> int: 18 | """ 19 | Calculate a person's age based on their birth year. 20 | 21 | Args: 22 | input_age (int): The year the person was born. 23 | current_year (int): The current year. 24 | 25 | Returns: 26 | age (int): The person's age. 27 | """ 28 | age = int(current_year) - int(input_age) 29 | return age 30 | 31 | calculate_age_tool = calculate_age() 32 | serp_connection = ScaleSerp() 33 | tool_search = ScaleSerpTool(connection=serp_connection) 34 | 35 | agent = ReActAgent( 36 | name="Agent", 37 | id="agent", 38 | role=AGENT_ROLE, 39 | llm=llm, 40 | tools=[calculate_age_tool, tool_search], 41 | ) 42 | 43 | result = agent.run( 44 | input_data={ 45 | "input": "I was born in 2000, and now is 2024. Your task is to calculate my age and then find the top films, limiting results to number of my age" # noqa: E501 46 | } 47 | ) 48 | 49 | print(result.output.get("content")) 50 | -------------------------------------------------------------------------------- /examples/components/tools/use_serp.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import ScaleSerp 2 | from dynamiq.nodes.tools.scale_serp import ScaleSerpTool, SearchType 3 | 4 | 5 | def basic_search_example(): 6 | scale_connection = ScaleSerp() 7 | 8 | # Initialize with default parameters 9 | search_tool = ScaleSerpTool(connection=scale_connection, is_optimized_for_agents=False, limit=5) 10 | 11 | result = search_tool.run( 12 | input_data={"query": "Latest developments in artificial intelligence", "search_type": SearchType.WEB} 13 | ) 14 | 15 | print("Basic Search Results:") 16 | print(result.output.get("content")) 17 | 18 | 19 | def advanced_search_example(): 20 | scale_connection = ScaleSerp() 21 | 22 | # Initialize with specific parameters 23 | search_tool = ScaleSerpTool( 24 | connection=scale_connection, is_optimized_for_agents=True, search_type=SearchType.NEWS, limit=10 25 | ) 26 | 27 | # Override some parameters during execution 28 | result = search_tool.run( 29 | input_data={"query": "Latest developments in artificial intelligence", "limit": 5} # Override the default limit 30 | ) 31 | 32 | print("Advanced Search Results:") 33 | print(result.output.get("content")) 34 | 35 | 36 | if __name__ == "__main__": 37 | basic_search_example() 38 | advanced_search_example() 39 | -------------------------------------------------------------------------------- /examples/components/tools/use_sql.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import connections 2 | from dynamiq.nodes.tools import SQLExecutor 3 | 4 | 5 | def basic_requests_snowflake_example(): 6 | snowflake_connection = connections.Snowflake() 7 | 8 | snowflake_executor = SQLExecutor(connection=snowflake_connection) 9 | snowflake_insert = { 10 | "query": """INSERT INTO test1 (Name, Description) 11 | VALUES ('Name1', 'Description1'), ('Name2', 'Description2');""" 12 | } 13 | snowflake_select = {"query": """select * from test1"""} 14 | snowflake_delete = {"query": """DELETE FROM test1 WHERE Name = 'Name1';"""} 15 | 16 | for query in [snowflake_insert, snowflake_select, snowflake_delete]: 17 | result = snowflake_executor.run(input_data=query) 18 | print("Query execution results:") 19 | print(result.output.get("content")) 20 | 21 | 22 | def basic_requests_mysql_example(): 23 | mysql_connection = connections.MySQL() 24 | 25 | mysql_executor = SQLExecutor(connection=mysql_connection) 26 | mysql_insert = { 27 | "query": """ 28 | INSERT INTO test1 (`Name`, `Description`) 29 | VALUES 30 | ('Row1Name', 'Row1Description'), 31 | ('Row2Name', 'Row2Description');""" 32 | } 33 | mysql_select = {"query": """select * from test1"""} 34 | mysql_delete = {"query": """DELETE FROM test1 WHERE `Name` = 'Row1Name';"""} 35 | 36 | for query in [mysql_insert, mysql_select, mysql_delete]: 37 | result = mysql_executor.run(input_data=query) 38 | print("Query execution results:") 39 | print(result.output.get("content")) 40 | 41 | 42 | if __name__ == "__main__": 43 | basic_requests_snowflake_example() 44 | basic_requests_mysql_example() 45 | -------------------------------------------------------------------------------- /examples/components/tools/use_tavily.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Tavily 2 | from dynamiq.nodes.tools.tavily import TavilyTool 3 | 4 | 5 | def basic_search_example(): 6 | """Example of basic search using TavilyTool.""" 7 | tavily_connection = Tavily() 8 | 9 | # Initialize the tool with default settings 10 | tavily_tool = TavilyTool(connection=tavily_connection, is_optimized_for_agents=False) 11 | 12 | # Run a basic search 13 | result = tavily_tool.run( 14 | input_data={ 15 | "query": "Latest developments in quantum computing", 16 | "search_depth": "basic", 17 | "max_results": 5, 18 | "include_answer": True, 19 | "use_cache": True, 20 | } 21 | ) 22 | 23 | print("Basic Search Results:") 24 | print(result.output.get("content")) 25 | 26 | 27 | def search_with_parameter_override(): 28 | """Example demonstrating parameter override during execution.""" 29 | tavily_connection = Tavily() 30 | 31 | tavily_tool = TavilyTool(connection=tavily_connection, search_depth="basic", max_results=5, include_answer=False) 32 | 33 | result = tavily_tool.run( 34 | input_data={ 35 | "query": "Latest developments in quantum computing", 36 | "search_depth": "advanced", 37 | "max_results": 3, 38 | "include_answer": True, 39 | "exclude_domains": ["wikipedia.org"], 40 | } 41 | ) 42 | 43 | print("Search Results with Parameter Override:") 44 | print(result.output.get("content")) 45 | 46 | 47 | if __name__ == "__main__": 48 | basic_search_example() 49 | search_with_parameter_override() 50 | -------------------------------------------------------------------------------- /examples/use_cases/agents_use_cases/agent_deep_scraping.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Firecrawl, Tavily 2 | from dynamiq.nodes.agents.react import ReActAgent 3 | from dynamiq.nodes.agents.utils import SummarizationConfig 4 | from dynamiq.nodes.tools.firecrawl import FirecrawlTool 5 | from dynamiq.nodes.tools.tavily import TavilyTool 6 | from dynamiq.nodes.types import InferenceMode 7 | from dynamiq.utils.logger import logger 8 | from examples.llm_setup import setup_llm 9 | 10 | AGENT_ROLE = "A helpful and general-purpose AI assistant" 11 | 12 | PROMPT1 = """Parse 5 pages of https://clutch.co/developers/artificial-intelligence/generative?page=1 13 | and generate csv like file with this structure 14 | Company Name,Rating,Reviews,Location,Minimum Project Size,Hourly Rate,Company Size,Services Focus.""" 15 | 16 | PROMPT2 = """Create long research on state of AI in EU. Give report for each country.""" 17 | 18 | 19 | if __name__ == "__main__": 20 | connection_tavily = Tavily() 21 | connection_firecrawl = Firecrawl() 22 | 23 | tool_search = TavilyTool(connection=connection_tavily) 24 | tool_scrape = FirecrawlTool(connection=connection_firecrawl) 25 | llm = setup_llm(model_provider="claude", model_name="claude-3-7-sonnet-20250219", temperature=0) 26 | 27 | agent = ReActAgent( 28 | name="Agent", 29 | id="Agent", 30 | llm=llm, 31 | tools=[tool_search, tool_scrape], 32 | role=AGENT_ROLE, 33 | max_loops=30, 34 | inference_mode=InferenceMode.XML, 35 | summarization_config=SummarizationConfig(enabled=True, max_token_context_length=50000), 36 | ) 37 | 38 | result = agent.run(input_data={"input": PROMPT1, "files": None}) 39 | 40 | output_content = result.output.get("content") 41 | logger.info("RESULT") 42 | logger.info(output_content) 43 | -------------------------------------------------------------------------------- /examples/use_cases/agents_use_cases/agent_financial.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import E2B 2 | from dynamiq.nodes.agents.react import ReActAgent 3 | from dynamiq.nodes.tools.e2b_sandbox import E2BInterpreterTool 4 | from dynamiq.nodes.types import InferenceMode 5 | from dynamiq.utils.logger import logger 6 | from examples.llm_setup import setup_llm 7 | 8 | AGENT_ROLE = ( 9 | "A helpful and general-purpose AI assistant with strong skills in language, Python, " 10 | "and Linux command-line operations. The goal is to provide concise answers to users. " 11 | "Additionally, generate code to solve tasks and run it accurately. " 12 | "Before answering, create a plan to solve the task. You can search for any API and " 13 | "use any free, open-source API that does not require authorization." 14 | ) 15 | 16 | if __name__ == "__main__": 17 | connection_e2b = E2B() 18 | 19 | tool_code = E2BInterpreterTool(connection=connection_e2b) 20 | 21 | llm = setup_llm(model_provider="gpt", model_name="gpt-4o-mini", temperature=0) 22 | 23 | agent = ReActAgent( 24 | name="Agent", 25 | id="Agent", 26 | llm=llm, 27 | tools=[tool_code], 28 | role=AGENT_ROLE, 29 | inference_mode=InferenceMode.XML, 30 | ) 31 | 32 | result = agent.run(input_data={"input": "What is the current price of Bitcoin?", "files": None}) 33 | 34 | output_content = result.output.get("content") 35 | logger.info("RESULT") 36 | logger.info(output_content) 37 | -------------------------------------------------------------------------------- /examples/use_cases/agents_use_cases/agent_searcher.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Exa 2 | from dynamiq.nodes.agents.react import ReActAgent 3 | from dynamiq.nodes.tools.exa_search import ExaTool 4 | from dynamiq.nodes.types import InferenceMode 5 | from examples.llm_setup import setup_llm 6 | 7 | if __name__ == "__main__": 8 | connection_exa = Exa() 9 | tool_search = ExaTool(connection=connection_exa) 10 | llm = setup_llm(model_provider="gpt", model_name="gpt-4o-mini", temperature=1) 11 | agent = ReActAgent( 12 | name="Agent", 13 | id="Agent", 14 | llm=llm, 15 | tools=[tool_search], 16 | inference_mode=InferenceMode.XML, 17 | ) 18 | result = agent.run(input_data={"input": "Search for the best restaurants in New York"}) 19 | output_content = result.output.get("content") 20 | print("Agent response:", output_content) 21 | -------------------------------------------------------------------------------- /examples/use_cases/agents_use_cases/agent_with_local_llm.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Exa, Ollama 2 | from dynamiq.nodes.agents.react import ReActAgent 3 | from dynamiq.nodes.llms import Ollama as OllamaLLM 4 | from dynamiq.nodes.tools.exa_search import ExaTool 5 | from dynamiq.nodes.types import InferenceMode 6 | from dynamiq.utils.logger import logger 7 | 8 | if __name__ == "__main__": 9 | connection_exa = Exa() 10 | tool_search = ExaTool(connection=connection_exa) 11 | ollama_endpoint = "http://localhost:11434" 12 | 13 | llm = OllamaLLM( 14 | model="qwen2.5-coder:32b", 15 | connection=Ollama(url=ollama_endpoint), 16 | temperature=0.1, 17 | max_tokens=1000, 18 | ) 19 | agent = ReActAgent( 20 | name="Agent", 21 | id="Agent", 22 | llm=llm, 23 | tools=[tool_search], 24 | inference_mode=InferenceMode.XML, 25 | ) 26 | result = agent.run(input_data={"input": "Who won Euro 2024?"}) 27 | output_content = result.output.get("content") 28 | logger.info("RESULT") 29 | logger.info(output_content) 30 | -------------------------------------------------------------------------------- /examples/use_cases/agents_use_cases/agent_with_small_llm.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Exa 2 | from dynamiq.connections import TogetherAI as TogetherAIConnection 3 | from dynamiq.nodes.agents.react import ReActAgent 4 | from dynamiq.nodes.llms.togetherai import TogetherAI 5 | from dynamiq.nodes.tools.exa_search import ExaTool 6 | from dynamiq.nodes.types import InferenceMode 7 | from dynamiq.utils.logger import logger 8 | 9 | if __name__ == "__main__": 10 | connection_exa = Exa() 11 | tool_search = ExaTool(connection=connection_exa) 12 | llm = TogetherAI( 13 | connection=TogetherAIConnection(), 14 | model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", 15 | temperature=0, 16 | max_tokens=4000, 17 | ) 18 | agent = ReActAgent( 19 | name="Agent", 20 | id="Agent", 21 | llm=llm, 22 | tools=[tool_search], 23 | inference_mode=InferenceMode.XML, 24 | ) 25 | result = agent.run(input_data={"input": "Provide me latest paper on LLM for last week."}) 26 | output_content = result.output.get("content") 27 | logger.info("RESULT") 28 | logger.info(output_content) 29 | -------------------------------------------------------------------------------- /examples/use_cases/agents_use_cases/agent_with_thinking_tool.py: -------------------------------------------------------------------------------- 1 | from dynamiq.connections import Exa 2 | from dynamiq.nodes.agents.react import ReActAgent 3 | from dynamiq.nodes.tools import ThinkingTool 4 | from dynamiq.nodes.tools.exa_search import ExaTool 5 | from dynamiq.nodes.types import InferenceMode 6 | from examples.llm_setup import setup_llm 7 | 8 | if __name__ == "__main__": 9 | llm = setup_llm(model_provider="gpt", model_name="gpt-4o-mini", temperature=0.7) 10 | 11 | thinking_tool = ThinkingTool(llm=llm) 12 | connection_exa = Exa() 13 | tool_search = ExaTool(connection=connection_exa) 14 | 15 | agent = ReActAgent( 16 | name="Thinking Agent", 17 | id="thinking_agent", 18 | llm=llm, 19 | tools=[thinking_tool, tool_search], 20 | inference_mode=InferenceMode.XML, 21 | ) 22 | 23 | result = agent.run( 24 | input_data={ 25 | "input": "Please create a report on investment opportunities for 2025, " 26 | "including the best stocks, cryptocurrencies, and other assets." 27 | } 28 | ) 29 | 30 | output_content = result.output.get("content") 31 | print("Agent response:", output_content) 32 | -------------------------------------------------------------------------------- /examples/use_cases/chainlit/bge/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/use_cases/chainlit/bge/__init__.py -------------------------------------------------------------------------------- /examples/use_cases/chainlit/bge/component/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/use_cases/chainlit/bge/component/__init__.py -------------------------------------------------------------------------------- /examples/use_cases/chainlit/bge/node/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/examples/use_cases/chainlit/bge/node/__init__.py -------------------------------------------------------------------------------- /examples/use_cases/chainlit/chainlit.md: -------------------------------------------------------------------------------- 1 | # Welcome to Dynamiq! 🚀🤖 2 | 3 | Hi there, Developer! 👋 We're excited to have you on board. Dynamiq is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs. 4 | 5 | ## Useful Links 🔗 6 | 7 | - **Website:** Get started with [Dynamiq ](https://www.getdynamiq.ai/) 📚 8 | projects, and connect with other developers! 💬 9 | 10 | We can't wait to see what you create with Dynamiq! Happy coding! 💻😊 11 | -------------------------------------------------------------------------------- /examples/use_cases/chainlit/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dynamiq import ROOT_PATH 4 | 5 | 6 | def default_prompt_template() -> str: 7 | """ 8 | Returns the default prompt template for the language model. 9 | """ 10 | return r""" 11 | Please answer the following question based on the information found 12 | within the sections enclosed by triplet quotes (\`\`\`). 13 | Your response should be concise, well-written, and follow markdown formatting guidelines: 14 | 15 | - Use bullet points for list items. 16 | - Use **bold** text for emphasis where necessary. 17 | 18 | **Question:** {{query}} 19 | 20 | Thank you for your detailed attention to the request 21 | **Context information**: 22 | ``` 23 | {% for document in documents %} 24 | --- 25 | Document title: {{ document.metadata["title"] }} 26 | Document information: {{ document.content }} 27 | --- 28 | {% endfor %} 29 | ``` 30 | 31 | **User Question:** {{question}} 32 | Answer: 33 | """ 34 | 35 | 36 | def list_file_paths(folder_path=os.path.join(os.path.dirname(ROOT_PATH), "examples/data/")) -> list[str]: 37 | file_names = os.listdir(folder_path) 38 | file_paths = [os.path.join(folder_path, file_name) for file_name in file_names] 39 | 40 | return file_paths 41 | -------------------------------------------------------------------------------- /examples/use_cases/customer_support/README.md: -------------------------------------------------------------------------------- 1 | # Customer Support Workflow Example 2 | 3 | This directory contains an example of a bank customer support workflow built using Dynamiq agents and tools. The workflow demonstrates how to integrate LLM Agents with RAG to handle different types of customer requests by accessing internal bank API and its documentation. 4 | 5 | ## Components 6 | 7 | ### `bank_api.py` 8 | 9 | - Simple API with single endpoint 10 | - Responds to queries in JSON format. 11 | 12 | ### `main.py` 13 | 14 | - Defines the main workflow logic. 15 | - Creates workflow with instances of `ReActAgent` for handling API and documentation queries. 16 | - Executes the workflow with a sample input. 17 | 18 | ## Workflow Logic 19 | 20 | 1. The user provides a query (e.g., "fast block my card"). 21 | 2. `RAG Agent` is invoked to find relevant documentation on how to proceed with request. 22 | 3. `API Agent` starts with documentation provided by `RAG Agent`. It will gather required informatiom from user and execute operation with API. 23 | 4. Upon completion of the operation, a concise summary of the request and its status will be provided. 24 | 25 | ## Usage 26 | 27 | 1. **Set up environment variables:** 28 | - `OPENAI_API_KEY`: Your OpenAI API key. 29 | - `PINECONE_API_KEY`: Your Pinecone API key. 30 | - `PINECONE_ENVIRONMENT`: Your Pinecone environment. 31 | 32 | 2. **Run the workflow:** `python main.py` 33 | 34 | ## Key Concepts 35 | 36 | - **Workflows:** Creating flow of multiple agents to solve complex tasks. 37 | - **Retrieval-Augmented Generation (RAG):** Combining information retrieval with language model generation to provide more accurate and comprehensive answers. 38 | - **Tool Usage:** Leveraging specialized tools to extend the capabilities of agents. 39 | - **Human Feedback:** Integrating human feedback to improve the accuracy and reliability of agents. 40 | -------------------------------------------------------------------------------- /examples/use_cases/erp_system/README.md: -------------------------------------------------------------------------------- 1 | ## Usage 2 | 3 | ### 1. **Set Up Environment Variables** 4 | Before running the workflow, ensure you have the necessary API keys. 5 | 6 | Add the following environment variables: 7 | - `OPENAI_API_KEY`: Your OpenAI API key. 8 | - `MAILGUN_API_KEY`: Your Mailgun API KEY. 9 | - `MAILGUN_DOMAIN`: Your Mailgun domain. 10 | 11 | ### 2. **Run the Workflow** 12 | 13 | To execute the workflow, run: 14 | 15 | ```bash 16 | bash run.sh 17 | ``` 18 | -------------------------------------------------------------------------------- /examples/use_cases/erp_system/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/use_cases/erp_system/app.py 4 | -------------------------------------------------------------------------------- /examples/use_cases/financial_assistant/README.md: -------------------------------------------------------------------------------- 1 | ## Usage 2 | 3 | This workflow enables you to retrieve financial data, company reports, and stock market information, process it, and generate AI-driven insights. 4 | 5 | ### 1. **Set Up API Keys** 6 | Before running the workflow, ensure you have the necessary API keys configured. These keys are required for accessing financial data and performing analysis. 7 | 8 | #### **Required API Keys:** 9 | - **Alpha Vantage API Key:** 10 | - Sign up at [Alpha Vantage](https://www.alphavantage.co/) to obtain your API key. 11 | - Set it up in `main.py`. 12 | - **OpenAI API Key:** 13 | - Set it as an environment variable: `OPENAI_API_KEY` 14 | 15 | 16 | ### 2. **Run the Workflow** 17 | 18 | To execute the analysis, run: 19 | 20 | ```bash 21 | python main.py 22 | ``` 23 | -------------------------------------------------------------------------------- /examples/use_cases/gpt_researcher/README.md: -------------------------------------------------------------------------------- 1 | ## Usage 2 | 3 | ### 1. **Set Up Environment Variables** 4 | Before running the workflow, ensure you have the necessary API keys. 5 | 6 | Add the following environment variables: 7 | - `OPENAI_API_KEY`: Your OpenAI API key. 8 | - `TAVILY_API_KEY`: Your Tavily API key. 9 | - `PINECONE_API_KEY`: Your Pinecone API key. 10 | - `PINECONE_ENVIRONMENT`: Your Pinecone environment. 11 | 12 | 13 | ### 2. **Run the Workflow** 14 | 15 | Choose between the two available research modes based on your requirements: 16 | 17 | #### **Option 1: GPT Researcher (Concise Report ~ 2-3 Pages)** 18 | 19 | Run the following command: 20 | ```bash 21 | python main_gpt_researcher.py 22 | ``` 23 | 24 | #### **Option 2: Multi-Agent GPT Researcher (Comprehensive Report ~ 5-20 Pages)** 25 | This mode uses an enhanced architecture based on the first one to create an in-depth research report. 26 | The multi-agent system allows for deeper analysis, and expanded content generation. 27 | 28 | Run the following command: 29 | ```bash 30 | python main_gpt_researcher_multi.py 31 | ``` 32 | -------------------------------------------------------------------------------- /examples/use_cases/gpt_researcher/gpt_researcher/__init__.py: -------------------------------------------------------------------------------- 1 | from examples.use_cases.gpt_researcher.gpt_researcher.conduct_research import conduct_research_workflow 2 | from examples.use_cases.gpt_researcher.gpt_researcher.write_report import write_report_workflow 3 | -------------------------------------------------------------------------------- /examples/use_cases/gpt_researcher/multi_agents/__init__.py: -------------------------------------------------------------------------------- 1 | from examples.use_cases.gpt_researcher.multi_agents.editor_agent import run_parallel_research 2 | from examples.use_cases.gpt_researcher.multi_agents.human_agent import review_plan 3 | from examples.use_cases.gpt_researcher.multi_agents.planner_agent import plan_research 4 | from examples.use_cases.gpt_researcher.multi_agents.publisher_agent import run_publisher 5 | from examples.use_cases.gpt_researcher.multi_agents.researcher_agent import run_initial_research 6 | from examples.use_cases.gpt_researcher.multi_agents.writer_agent import run_writer_agent 7 | -------------------------------------------------------------------------------- /examples/use_cases/gpt_researcher/multi_agents/human_agent.py: -------------------------------------------------------------------------------- 1 | def review_plan(context: dict, **kwargs) -> dict: 2 | """Gathers human feedback on the research plan if required.""" 3 | include_human_feedback = context.get("task").get("include_human_feedback") 4 | 5 | if not include_human_feedback: 6 | return {"human_feedback": None, "result": "ok"} 7 | 8 | layout = context.get("sections") 9 | human_feedback = input( 10 | f"Any feedback on this plan of topics to research? {layout}? If not, please reply with 'no'." 11 | ) 12 | 13 | return {"human_feedback": human_feedback, "result": "ok"} 14 | -------------------------------------------------------------------------------- /examples/use_cases/gpt_researcher/multi_agents/publisher_agent.py: -------------------------------------------------------------------------------- 1 | def run_publisher(context: dict, **kwargs) -> dict: 2 | """Generates a formatted research report based on provided context data.""" 3 | 4 | report_date = context.get("date") 5 | introduction = context.get("introduction") 6 | table_of_contents = context.get("table_of_contents") 7 | conclusion = context.get("conclusion") 8 | 9 | headers = context.get("headers") 10 | title = headers.get("title") 11 | date_label = headers.get("date") 12 | 13 | references = "\n".join(context.get("sources", [])) 14 | sections = "\n\n".join(context.get("research_data", [])) 15 | 16 | # Construct the report layout 17 | report_layout = f"""# {title} 18 | #### {date_label}: {report_date} 19 | 20 | ## {headers.get("introduction", "Introduction")} 21 | {introduction} 22 | 23 | ## {headers.get("table_of_contents", "Table of Contents")} 24 | {table_of_contents} 25 | 26 | {sections} 27 | 28 | ## {headers.get("conclusion", "Conclusion")} 29 | {conclusion} 30 | 31 | ## {headers.get("references", "References")} 32 | {references} 33 | """ 34 | 35 | return {"report": report_layout, "result": "success"} 36 | -------------------------------------------------------------------------------- /examples/use_cases/gpt_researcher/multi_agents/researcher_agent.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from examples.use_cases.gpt_researcher.gpt_researcher import conduct_research_workflow, write_report_workflow 4 | 5 | 6 | def run_initial_research(context: dict, **kwargs) -> dict: 7 | # Conduct research and gather the relevant information 8 | task = context.get("task") 9 | num_sub_queries = task.get("num_sub_queries") 10 | max_sources = task.get("max_sources") 11 | max_content_chunks_per_source = task.get("max_content_chunks_per_source") 12 | 13 | query = f'{task.get("query")} - {context.get("query")}' if context.get("query") else task.get("query") 14 | 15 | conduct_research = conduct_research_workflow() 16 | conduct_research.run( 17 | input_data={ 18 | "query": query, 19 | "num_sub_queries": num_sub_queries, 20 | } 21 | ) 22 | 23 | # Wait for Pinecone to load data to cloud before proceeding 24 | time.sleep(15) 25 | 26 | # Generate the research report based on gathered information 27 | write_report = write_report_workflow(max_sources) 28 | write_report_result = write_report.run( 29 | input_data={ 30 | "query": query, 31 | "max_sources": max_sources, 32 | "max_content_chunks_per_source": max_content_chunks_per_source, 33 | } 34 | ) 35 | 36 | # Get the final research report 37 | report = write_report_result.output["generate_report_node"]["output"]["content"] 38 | return {"initial_research": report, "result": "correct"} 39 | -------------------------------------------------------------------------------- /examples/use_cases/gpt_researcher/multi_agents/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from dynamiq.connections import OpenAI as OpenAIConnection 5 | from dynamiq.nodes.llms import OpenAI 6 | from dynamiq.prompts import Message, Prompt 7 | 8 | 9 | def _extract_code_block(text): 10 | match = re.search(r"```(.*?)```", text, re.DOTALL) 11 | return match.group(1).strip() if match else text 12 | 13 | 14 | def execute_agent(system_prompt: str, user_prompt: str, to_json: bool = False) -> dict: 15 | """Executes an LLM request""" 16 | 17 | llm = OpenAI( 18 | model="gpt-4o-mini", 19 | connection=OpenAIConnection(), 20 | max_tokens=3000, 21 | prompt=Prompt( 22 | messages=[ 23 | Message(role="system", content=system_prompt), 24 | Message(role="user", content=user_prompt), 25 | ] 26 | ), 27 | ) 28 | 29 | response = llm.run(input_data={}).output.get("content", "").strip() 30 | 31 | if to_json: 32 | try: 33 | response = _extract_code_block(response) 34 | return json.loads(response.lstrip("'`json").rstrip("'`")) 35 | except json.JSONDecodeError as e: 36 | print(f"JSON decoding failed: {e}. Response: {response}") 37 | return {} 38 | 39 | return response 40 | -------------------------------------------------------------------------------- /examples/use_cases/gpt_researcher/utils.py: -------------------------------------------------------------------------------- 1 | import markdown 2 | from weasyprint import HTML 3 | 4 | from dynamiq.connections import Pinecone 5 | from dynamiq.storages.vector.pinecone import PineconeVectorStore 6 | 7 | 8 | def clean_pinecone_storage(): 9 | """Deletes all documents in the Pinecone vector storage.""" 10 | vector_store = PineconeVectorStore(connection=Pinecone(), index_name="gpt-researcher", create_if_not_exist=True) 11 | 12 | vector_store.delete_documents(delete_all=True) 13 | 14 | 15 | def save_markdown_as_pdf(md_string: str, output_pdf: str): 16 | """Save a Markdown string as a PDF.""" 17 | 18 | html_content = markdown.markdown(md_string) 19 | HTML(string=html_content).write_pdf(output_pdf) 20 | -------------------------------------------------------------------------------- /examples/use_cases/project_manager/README.md: -------------------------------------------------------------------------------- 1 | ## Usage 2 | 3 | ### 1. **Set Up Environment Variables** 4 | Before running the workflow, ensure you have the necessary API keys. 5 | 6 | Add the following environment variables: 7 | - `OPENAI_API_KEY`: Your OpenAI API key. 8 | - `COMPOSIO_API_KEY`: Your Composio API key with access to Linear. 9 | 10 | ### 2. **Run the Workflow** 11 | 12 | To execute the workflow, run: 13 | 14 | ```bash 15 | bash run.sh 16 | ``` 17 | -------------------------------------------------------------------------------- /examples/use_cases/project_manager/app.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import streamlit as st 4 | from backend import generate_agent_response, setup_agent 5 | 6 | if "agent" not in st.session_state or st.sidebar.button("Apply Changes"): 7 | st.session_state.agent = setup_agent() 8 | st.session_state.messages = [] 9 | 10 | st.title("Project Manager Agent Chat") 11 | st.write("Ask questions and get responses from an AI assistant.") 12 | 13 | for message in st.session_state.messages: 14 | with st.chat_message(message["role"]): 15 | st.markdown(message["content"]) 16 | 17 | if user_input := st.chat_input("You: "): 18 | st.session_state.messages.append({"role": "user", "content": user_input}) 19 | with st.chat_message("user"): 20 | st.markdown(user_input) 21 | 22 | with st.chat_message("assistant"): 23 | message_placeholder = st.empty() 24 | full_response = "" 25 | 26 | st.session_state.messages.append({"role": "assistant", "content": ""}) 27 | 28 | for chunk in generate_agent_response(st.session_state.agent, user_input): 29 | full_response += chunk 30 | message_placeholder.markdown(full_response + "▌") 31 | 32 | st.session_state.messages[-1]["content"] = full_response 33 | 34 | time.sleep(0.05) 35 | message_placeholder.markdown(full_response) 36 | 37 | st.session_state.messages[-1]["content"] = full_response 38 | 39 | st.session_state["new_input"] = "" 40 | -------------------------------------------------------------------------------- /examples/use_cases/project_manager/react_agent_pm.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from composio import Action 4 | from composio_tool import ComposioTool 5 | 6 | from dynamiq.memory import Memory 7 | from dynamiq.memory.backends.in_memory import InMemory 8 | from dynamiq.nodes.agents.react import ReActAgent 9 | from examples.llm_setup import setup_llm 10 | 11 | # Create tool instance 12 | tool_1 = ComposioTool(action=Action.LINEAR_LIST_LINEAR_PROJECTS, api_key=os.getenv("COMPOSIO_API_KEY")) 13 | tool_2 = ComposioTool(action=Action.LINEAR_LIST_LINEAR_TEAMS, api_key=os.getenv("COMPOSIO_API_KEY")) 14 | tool_3 = ComposioTool(action=Action.LINEAR_CREATE_LINEAR_ISSUE, api_key=os.getenv("COMPOSIO_API_KEY")) 15 | 16 | llm = setup_llm() 17 | memory = Memory(backend=InMemory()) 18 | 19 | agent = ReActAgent( 20 | name="AI Agent", 21 | llm=llm, 22 | tools=[tool_1, tool_2, tool_3], 23 | memory=memory, 24 | ) 25 | 26 | result = agent.run( 27 | input_data={ 28 | "input": ( 29 | "Show me the project and team list. " 30 | "Create the one task with simple description in any available project." 31 | ), 32 | "user_id": "1", 33 | "session_id": "1", 34 | }, 35 | config=None, 36 | ) 37 | print(result.output) 38 | -------------------------------------------------------------------------------- /examples/use_cases/project_manager/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/use_cases/project_manager/app.py 4 | -------------------------------------------------------------------------------- /examples/use_cases/researcher/README.md: -------------------------------------------------------------------------------- 1 | ## Usage 2 | 3 | ### 1. **Set Up Environment Variables** 4 | Before running the workflow, ensure you have the necessary API keys. 5 | 6 | Add the following environment variables: 7 | - `OPENAI_API_KEY`: Your OpenAI API key. 8 | - `E2B_API_KEY`: Your E2B API key. 9 | - `FIRECRAWL_API_KEY`: Your Firecrawl API key. 10 | - `SERP_API_KEY`: Your ScaleSerp API key. 11 | 12 | ### 2. **Run the Workflow** 13 | 14 | To execute the workflow, run: 15 | 16 | ```bash 17 | bash run.sh 18 | ``` 19 | -------------------------------------------------------------------------------- /examples/use_cases/researcher/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/use_cases/researcher/app.py 4 | -------------------------------------------------------------------------------- /examples/use_cases/search/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m streamlit run examples/use_cases/search/app.py 4 | -------------------------------------------------------------------------------- /examples/use_cases/smm_manager/README.md: -------------------------------------------------------------------------------- 1 | ## Usage 2 | 3 | ### 1. **Set Up Environment Variables** 4 | Before running the workflow, ensure you have the necessary API keys. 5 | 6 | Add the following environment variables: 7 | - `OPENAI_API_KEY`: Your OpenAI API key. 8 | - `MAILGUN_API_KEY`: Your MAILGUN API key. 9 | - `E2B_API_KEY`: Your E2B API key. 10 | 11 | ### 2. **Run the Workflow** 12 | 13 | To execute the workflow, run: 14 | 15 | ```bash 16 | python main.py 17 | ``` 18 | -------------------------------------------------------------------------------- /examples/use_cases/smm_manager/data/emails.txt: -------------------------------------------------------------------------------- 1 | example@example 2 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | # mkdocs.yml 2 | site_name: Dynamiq Documentation 3 | 4 | theme: 5 | name: "material" 6 | 7 | plugins: 8 | - mkdocstrings 9 | - search 10 | 11 | docs_dir: mkdocs 12 | 13 | markdown_extensions: 14 | - pymdownx.superfences 15 | -------------------------------------------------------------------------------- /scripts/generate_mkdocs.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | 4 | 5 | def create_directory(path): 6 | """Create a directory if it doesn't exist.""" 7 | os.makedirs(path, exist_ok=True) 8 | 9 | 10 | def get_python_files(directory): 11 | """Get all Python files in the specified directory, excluding __init__.py files.""" 12 | py_files = glob.glob(os.path.join(directory, "**", "*.py"), recursive=True) 13 | return [file for file in py_files if "__init__.py" not in file] 14 | 15 | 16 | def generate_documentation_file(file_path): 17 | """Generate a documentation file for the given Python file.""" 18 | file_path_splits = file_path.split("/") 19 | 20 | # Create the documentation folder 21 | docs_folder = os.path.join("mkdocs", *file_path_splits[:-1]) 22 | create_directory(docs_folder) 23 | 24 | # Generate the new documentation file name 25 | file_name = file_path_splits[-1].replace(".py", ".md") 26 | new_docs_file = os.path.join(docs_folder, file_name) 27 | 28 | # Generate the file content 29 | file_content = ":::" + ".".join(file_path_splits).replace(".py", "\n") 30 | 31 | # Write the content to the new documentation file 32 | with open(new_docs_file, "w") as f: 33 | f.write(file_content) 34 | 35 | 36 | def main(): 37 | """Main function to generate documentation for Python files.""" 38 | source_directory = "dynamiq" 39 | python_files = get_python_files(source_directory) 40 | for file in python_files: 41 | generate_documentation_file(file) 42 | 43 | print(f"Documentation generated for {len(python_files)} Python files.") 44 | 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | exclude = .git,docs,venv,*/__init__.py 4 | extend-ignore = E203 5 | 6 | [pycodestyle] 7 | max-line-length = 120 8 | exclude = .git,docs,venv 9 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/__init__.py -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration/__init__.py -------------------------------------------------------------------------------- /tests/integration/evaluations/metrics/test_bleu_score.py: -------------------------------------------------------------------------------- 1 | from dynamiq.evaluations.metrics import BleuScoreEvaluator 2 | 3 | 4 | def test_bleu_score_evaluator(): 5 | """ 6 | Test the BleuScoreEvaluator to ensure it correctly computes BLEU scores between 7 | ground truth answers and answers. 8 | """ 9 | # Sample data 10 | ground_truth_answers = [ 11 | "The cat sits on the mat.", 12 | "A quick brown fox jumps over the lazy dog.", 13 | "Python is a versatile programming language used in various domains.", 14 | ] 15 | answers = [ 16 | "The cat sits on the mat.", # Perfect match 17 | "A fast brown fox leaps over the lazy dog.", # Slight variation 18 | "Python is a powerful programming language used across many fields.", # Slight variation 19 | ] 20 | 21 | # Initialize evaluator 22 | bleu_evaluator = BleuScoreEvaluator() 23 | 24 | # Run evaluator 25 | bleu_scores = bleu_evaluator.run(ground_truth_answers=ground_truth_answers, answers=answers) 26 | 27 | expected_scores = [1.0, 0.37, 0.26] # Replace with actual printed scores 28 | for computed, expected in zip(bleu_scores, expected_scores): 29 | assert abs(computed - expected) < 0.01, f"Expected {expected}, got {computed}" 30 | -------------------------------------------------------------------------------- /tests/integration/flows/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration/flows/__init__.py -------------------------------------------------------------------------------- /tests/integration/nodes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration/nodes/__init__.py -------------------------------------------------------------------------------- /tests/integration/nodes/audio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration/nodes/audio/__init__.py -------------------------------------------------------------------------------- /tests/integration/nodes/audio/test_whisper.py: -------------------------------------------------------------------------------- 1 | import json 2 | from io import BytesIO 3 | from urllib.parse import urljoin 4 | 5 | import pytest 6 | 7 | from dynamiq import Workflow, connections 8 | from dynamiq.flows import Flow 9 | from dynamiq.nodes.audio import WhisperSTT 10 | from dynamiq.runnables import RunnableResult, RunnableStatus 11 | 12 | 13 | @pytest.mark.parametrize("audio", [b"bytes_data", BytesIO(b"bytes_data"), b"\xff\xfb\x90\xc4\x00\x00\n\xddu"]) 14 | def test_workflow_with_whisper_transcriber( 15 | mock_whisper_response_text, requests_mock, audio 16 | ): 17 | model = "whisper-1" 18 | connection = connections.Whisper( 19 | url="https://your-url/", 20 | api_key="api-key", 21 | ) 22 | connection_url = urljoin(connection.url, "audio/transcriptions") 23 | call_mock = requests_mock.post( 24 | url=connection_url, text=json.dumps({"text": mock_whisper_response_text}) 25 | ) 26 | 27 | wf_whisper = Workflow( 28 | flow=Flow( 29 | nodes=[WhisperSTT(name="whisper", model=model, connection=connection)] 30 | ), 31 | ) 32 | input_data = {"audio": audio} 33 | response = wf_whisper.run(input_data=input_data) 34 | 35 | expected_result = RunnableResult( 36 | status=RunnableStatus.SUCCESS, 37 | input=input_data, 38 | output={"content": mock_whisper_response_text}, 39 | ).to_dict(skip_format_types={BytesIO, bytes}) 40 | 41 | expected_output = {wf_whisper.flow.nodes[0].id: expected_result} 42 | assert response == RunnableResult( 43 | status=RunnableStatus.SUCCESS, 44 | input=input_data, 45 | output=expected_output, 46 | ) 47 | assert call_mock.called_once 48 | assert call_mock.last_request.url == connection_url 49 | assert ( 50 | call_mock.last_request.headers.get("Authorization") 51 | == f"Bearer {connection.api_key}" 52 | ) 53 | -------------------------------------------------------------------------------- /tests/integration/nodes/converters/test_docx.py: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | 3 | from docx import Document as DocxDocument 4 | 5 | from dynamiq import Workflow 6 | from dynamiq.flows import Flow 7 | from dynamiq.nodes.converters.docx import DOCXFileConverter 8 | from dynamiq.runnables import RunnableResult, RunnableStatus 9 | from dynamiq.types import Document 10 | 11 | 12 | def test_workflow_with_docx_converter(): 13 | content = "Hello, World!" 14 | 15 | doc = DocxDocument() 16 | doc.add_paragraph(content) 17 | 18 | docx_converter = DOCXFileConverter() 19 | wf_docx = Workflow(flow=Flow(nodes=[docx_converter])) 20 | file = BytesIO() 21 | doc.save(file) 22 | file.name = "mock.docx" 23 | input_data = {"files": [file]} 24 | 25 | response = wf_docx.run(input_data=input_data) 26 | document_id = response.output[next(iter(response.output))]["output"]["documents"][0]["id"] 27 | docx_converter_expected_result = RunnableResult( 28 | status=RunnableStatus.SUCCESS, 29 | input=input_data, 30 | output={"documents": [Document(id=document_id, content=content, metadata={"file_path": file.name})]}, 31 | ).to_dict(skip_format_types={BytesIO, bytes}) 32 | 33 | expected_output = {docx_converter.id: docx_converter_expected_result} 34 | assert response == RunnableResult(status=RunnableStatus.SUCCESS, input=input_data, output=expected_output) 35 | -------------------------------------------------------------------------------- /tests/integration/nodes/llms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration/nodes/llms/__init__.py -------------------------------------------------------------------------------- /tests/integration/nodes/operators/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration/nodes/operators/__init__.py -------------------------------------------------------------------------------- /tests/integration/nodes/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration/nodes/tools/__init__.py -------------------------------------------------------------------------------- /tests/integration/nodes/validators/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration/nodes/validators/__init__.py -------------------------------------------------------------------------------- /tests/integration/nodes/validators/test_regex_match.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from dynamiq import Workflow 4 | from dynamiq.flows import Flow 5 | from dynamiq.nodes.validators import RegexMatch 6 | from dynamiq.runnables import RunnableResult, RunnableStatus 7 | 8 | 9 | @pytest.mark.parametrize( 10 | ("content", "regex"), 11 | [ 12 | ("abc123", r"^[a-z]+\d+$"), 13 | ("2021-08-15", r"^\d{4}-\d{2}-\d{2}$"), 14 | ("hello_world", r"^\w+$"), 15 | ("user@example.com", r"^[\w\.-]+@[\w\.-]+\.\w+$"), 16 | ("A1B2C3", r"^[A-Z0-9]+$"), 17 | ], 18 | ) 19 | def test_workflow_with_regex_match(content, regex): 20 | wf_regex_match = Workflow( 21 | flow=Flow(nodes=[RegexMatch(regex=regex)]), 22 | ) 23 | input_data = {"content": content} 24 | response = wf_regex_match.run(input_data=input_data) 25 | 26 | expected_result = RunnableResult( 27 | status=RunnableStatus.SUCCESS, 28 | input=input_data, 29 | output={"valid": True, **input_data}, 30 | ).to_dict() 31 | 32 | expected_output = {wf_regex_match.flow.nodes[0].id: expected_result} 33 | assert response == RunnableResult( 34 | status=RunnableStatus.SUCCESS, 35 | input=input_data, 36 | output=expected_output, 37 | ) 38 | -------------------------------------------------------------------------------- /tests/integration/nodes/validators/test_valid_choices.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from dynamiq import Workflow 4 | from dynamiq.flows import Flow 5 | from dynamiq.nodes.validators import ValidChoices 6 | from dynamiq.runnables import RunnableResult, RunnableStatus 7 | 8 | 9 | @pytest.mark.parametrize( 10 | ("content", "choices"), 11 | [ 12 | (3, [3, 4, 5, 7, 9]), 13 | ("ch", ["ch1", "ch", "ch2"]), 14 | (True, [True, False]), 15 | (9.5, [8.5, 9.5, 10.5]), 16 | ("apple", ["apple", "banana", "cherry"]), 17 | ], 18 | ) 19 | def test_workflow_with_valid_choices(content, choices): 20 | wf_valid_choices = Workflow( 21 | flow=Flow(nodes=[ValidChoices(choices=choices)]), 22 | ) 23 | input_data = {"content": content} 24 | response = wf_valid_choices.run(input_data=input_data) 25 | 26 | expected_result = RunnableResult( 27 | status=RunnableStatus.SUCCESS, 28 | input=input_data, 29 | output={"valid": True, **input_data}, 30 | ).to_dict() 31 | 32 | expected_output = {wf_valid_choices.flow.nodes[0].id: expected_result} 33 | assert response == RunnableResult( 34 | status=RunnableStatus.SUCCESS, 35 | input=input_data, 36 | output=expected_output, 37 | ) 38 | -------------------------------------------------------------------------------- /tests/integration/nodes/validators/test_valid_json.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from dynamiq import Workflow 4 | from dynamiq.flows import Flow 5 | from dynamiq.nodes.validators import ValidJSON 6 | from dynamiq.runnables import RunnableResult, RunnableStatus 7 | 8 | 9 | @pytest.mark.parametrize( 10 | "content", 11 | [ 12 | {"name": "John", "age": 30, "city": "New York"}, 13 | '{"person": {"name": "John", "age": 30, "address": {"city": "New York", "zipcode": "10021"}}}', 14 | '[{"name": "John", "age": 30}, {"name": "Jane", "age": 25}]', 15 | '{"string": "Hello", "number": 123, "boolean": true, "null": null, "array": [1, 2, 3], ' 16 | '"object": {"key": "value"}}', 17 | "{}", 18 | ], 19 | ) 20 | def test_workflow_with_valid_json(content): 21 | wf_valid_json = Workflow( 22 | flow=Flow(nodes=[ValidJSON()]), 23 | ) 24 | input_data = {"content": content} 25 | response = wf_valid_json.run(input_data=input_data) 26 | 27 | expected_result = RunnableResult( 28 | status=RunnableStatus.SUCCESS, 29 | input=input_data, 30 | output={"valid": True, **input_data}, 31 | ).to_dict() 32 | 33 | expected_output = {wf_valid_json.flow.nodes[0].id: expected_result} 34 | assert response == RunnableResult( 35 | status=RunnableStatus.SUCCESS, 36 | input=input_data, 37 | output=expected_output, 38 | ) 39 | -------------------------------------------------------------------------------- /tests/integration/nodes/validators/test_valid_python.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from dynamiq import Workflow 4 | from dynamiq.flows import Flow 5 | from dynamiq.nodes.validators import ValidPython 6 | from dynamiq.runnables import RunnableResult, RunnableStatus 7 | 8 | 9 | @pytest.mark.parametrize( 10 | "content", 11 | [ 12 | "def add(a, b):\n return a + b", 13 | "for i in range(10):\n print(i)", 14 | "class MyClass:\n def __init__(self, value):\n " 15 | "self.value = value\n\n def get_value(self):\n " 16 | "return self.value", 17 | "squares = [x**2 for x in range(10)]", 18 | "import math\nprint(math.sqrt(16))", 19 | ], 20 | ) 21 | def test_workflow_with_valid_python(content): 22 | wf_valid_python = Workflow( 23 | flow=Flow(nodes=[ValidPython()]), 24 | ) 25 | input_data = {"content": content} 26 | response = wf_valid_python.run(input_data=input_data) 27 | 28 | expected_result = RunnableResult( 29 | status=RunnableStatus.SUCCESS, 30 | input=input_data, 31 | output={"valid": True, **input_data}, 32 | ).to_dict() 33 | 34 | expected_output = {wf_valid_python.flow.nodes[0].id: expected_result} 35 | assert response == RunnableResult( 36 | status=RunnableStatus.SUCCESS, 37 | input=input_data, 38 | output=expected_output, 39 | ) 40 | -------------------------------------------------------------------------------- /tests/integration_with_creds/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration_with_creds/__init__.py -------------------------------------------------------------------------------- /tests/integration_with_creds/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration_with_creds/agents/__init__.py -------------------------------------------------------------------------------- /tests/integration_with_creds/memory/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dynamiq-ai/dynamiq/4c9c96e5d14c4b4f39c751e5a1021224f71808f8/tests/integration_with_creds/memory/__init__.py -------------------------------------------------------------------------------- /tests/integration_with_creds/test_rag_yaml.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | from dynamiq import ROOT_PATH 6 | from dynamiq.connections.managers import get_connection_manager 7 | from dynamiq.runnables import RunnableStatus 8 | from examples.components.rag.vector_stores.dag.dag_yaml import indexing_flow, retrieval_flow 9 | 10 | 11 | @pytest.fixture 12 | def rag_examples_folder(): 13 | return os.path.join(os.path.dirname(ROOT_PATH), "examples", "components", "rag", "vector_stores", "dag") 14 | 15 | 16 | @pytest.fixture 17 | def rag_data_path(): 18 | return os.path.join(os.path.dirname(ROOT_PATH), "examples", "components", "data") 19 | 20 | 21 | @pytest.mark.parametrize("rag_yaml_file_name", ["dag_html_pinecone.yaml"]) 22 | def test_indexing_flow(rag_examples_folder, rag_data_path, rag_yaml_file_name): 23 | with get_connection_manager() as cm: 24 | result, dumped_tracing = indexing_flow( 25 | yaml_file_path=os.path.join(rag_examples_folder, rag_yaml_file_name), 26 | data_folder_path=rag_data_path, 27 | cm=cm, 28 | ) 29 | assert result.status == RunnableStatus.SUCCESS 30 | assert dumped_tracing 31 | 32 | 33 | @pytest.mark.parametrize("rag_yaml_file_name", ["dag_pinecone.yaml"]) 34 | def test_retrival_flow(rag_examples_folder, rag_data_path, rag_yaml_file_name): 35 | with get_connection_manager() as cm: 36 | result, dumped_tracing = retrieval_flow( 37 | yaml_file_path=os.path.join(rag_examples_folder, rag_yaml_file_name), 38 | cm=cm, 39 | ) 40 | assert result.status == RunnableStatus.SUCCESS 41 | assert dumped_tracing 42 | -------------------------------------------------------------------------------- /tests/unit/components/retrievers/test_chroma_document_retriever.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock 2 | 3 | from dynamiq.components.retrievers.chroma import ChromaDocumentRetriever 4 | from dynamiq.storages.vector import ChromaVectorStore 5 | 6 | 7 | class TestChromaDocumentRetriever: 8 | def test_run_method(self, mock_documents): 9 | mock_vector_store = MagicMock(spec=ChromaVectorStore) 10 | mock_vector_store.search_embeddings.return_value = [mock_documents] 11 | 12 | retriever = ChromaDocumentRetriever(vector_store=mock_vector_store, filters={"field": "value"}, top_k=5) 13 | 14 | result = retriever.run( 15 | query_embedding=[0.1, 0.2, 0.3], 16 | exclude_document_embeddings=True, 17 | top_k=2, 18 | filters={"new_field": "new_value"}, 19 | ) 20 | 21 | mock_vector_store.search_embeddings.assert_called_once_with( 22 | query_embeddings=[[0.1, 0.2, 0.3]], filters={"new_field": "new_value"}, top_k=2 23 | ) 24 | 25 | assert result == {"documents": mock_documents} 26 | 27 | def test_run_method_with_defaults(self, mock_documents, mock_filters): 28 | mock_vector_store = MagicMock(spec=ChromaVectorStore) 29 | mock_vector_store.search_embeddings.return_value = [mock_documents] 30 | 31 | retriever = ChromaDocumentRetriever(vector_store=mock_vector_store, filters=mock_filters, top_k=5) 32 | 33 | result = retriever.run(query_embedding=[0.1, 0.2, 0.3]) 34 | 35 | mock_vector_store.search_embeddings.assert_called_once_with( 36 | query_embeddings=[[0.1, 0.2, 0.3]], filters=mock_filters, top_k=5 37 | ) 38 | 39 | assert result == {"documents": mock_documents} 40 | -------------------------------------------------------------------------------- /tests/unit/components/retrievers/test_qdrant_document_retriever.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock 2 | 3 | from dynamiq.components.retrievers.qdrant import QdrantDocumentRetriever 4 | from dynamiq.storages.vector.qdrant import QdrantVectorStore 5 | 6 | 7 | class TestQdrantDocumentRetriever: 8 | 9 | def test_run_method(self, mock_documents): 10 | mock_vector_store = MagicMock(spec=QdrantVectorStore) 11 | mock_vector_store._query_by_embedding.return_value = mock_documents 12 | 13 | retriever = QdrantDocumentRetriever(vector_store=mock_vector_store, filters={"field": "value"}, top_k=5) 14 | 15 | result = retriever.run( 16 | query_embedding=[0.1, 0.2, 0.3], 17 | exclude_document_embeddings=True, 18 | top_k=2, 19 | filters={"new_field": "new_value"}, 20 | ) 21 | 22 | mock_vector_store._query_by_embedding.assert_called_once_with( 23 | query_embedding=[0.1, 0.2, 0.3], 24 | filters={"new_field": "new_value"}, 25 | top_k=2, 26 | return_embedding=False, 27 | content_key=None, 28 | ) 29 | 30 | assert result == {"documents": mock_documents} 31 | 32 | def test_run_method_with_defaults(self, mock_documents, mock_filters): 33 | mock_vector_store = MagicMock(spec=QdrantVectorStore) 34 | mock_vector_store._query_by_embedding.return_value = mock_documents 35 | 36 | retriever = QdrantDocumentRetriever(vector_store=mock_vector_store, filters=mock_filters, top_k=5) 37 | 38 | result = retriever.run(query_embedding=[0.1, 0.2, 0.3]) 39 | 40 | mock_vector_store._query_by_embedding.assert_called_once_with( 41 | query_embedding=[0.1, 0.2, 0.3], filters=mock_filters, top_k=5, return_embedding=False, content_key=None 42 | ) 43 | 44 | assert result == {"documents": mock_documents} 45 | -------------------------------------------------------------------------------- /tests/unit/nodes/schema_generation/test_schemas.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from dynamiq.nodes.agents import ReActAgent, ReflectionAgent, SimpleAgent 4 | from dynamiq.nodes.llms import Anthropic, Gemini, OpenAI, WatsonX 5 | from dynamiq.nodes.tools import E2BInterpreterTool, ScaleSerpTool, TavilyTool 6 | from dynamiq.nodes.utils import Input, Output 7 | from dynamiq.utils.node_generation import generate_data_from_schema, generate_node 8 | 9 | 10 | @pytest.mark.parametrize( 11 | ("node", "params"), 12 | [ 13 | (Input, {}), 14 | (Output, {}), 15 | (SimpleAgent, {"llms": {OpenAI: ["model1", "model2"]}, "tools": [E2BInterpreterTool]}), 16 | (ReActAgent, {"llms": {OpenAI: ["model1", "model2"]}, "tools": [E2BInterpreterTool]}), 17 | (ReflectionAgent, {"llms": {OpenAI: ["model1", "model2"]}, "tools": [E2BInterpreterTool]}), 18 | (OpenAI, {"models": ["model1", "model2"]}), 19 | (Gemini, {"models": ["model1", "model2"]}), 20 | (Anthropic, {"models": ["model1", "model2"]}), 21 | (WatsonX, {"models": ["model1", "model2"]}), 22 | (E2BInterpreterTool, {}), 23 | (ScaleSerpTool, {}), 24 | (TavilyTool, {}), 25 | ], 26 | ) 27 | def test_nodes_schema_generation(node, params): 28 | """ 29 | Tests if nodes can be generated from defined schemas. 30 | """ 31 | schema = node._generate_json_schema(**params) 32 | data = generate_data_from_schema(schema) 33 | 34 | try: 35 | _, node_instance = generate_node(node, data, []) 36 | assert isinstance(node_instance, node) 37 | except ValueError as e: 38 | pytest.fail(f"Failed to create Node {node.__name__} instance: {str(e)}") 39 | -------------------------------------------------------------------------------- /tests/unit/storages/vector/qdrant/test_converters.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from qdrant_client.http import models as rest 3 | 4 | from dynamiq.storages.vector.qdrant.converters import convert_id, convert_qdrant_point_to_dynamiq_document 5 | 6 | 7 | def test_convert_id_is_deterministic(): 8 | first_id = convert_id("new-test-id") 9 | second_id = convert_id("new-test-id") 10 | assert first_id == second_id 11 | 12 | 13 | def test_point_to_document_reverts_proper_structure_from_record_without_sparse(): 14 | 15 | point = rest.Record( 16 | id="a1b2c3d4-5678-90ab-cdef-1234567890ab", 17 | payload={ 18 | "id": "new-id", 19 | "content": "New content", 20 | "metadata": { 21 | "new_field": 42, 22 | }, 23 | }, 24 | vector=[0.5, 0.5, 0.5, 0.5], 25 | ) 26 | document = convert_qdrant_point_to_dynamiq_document(point, content_key="content") 27 | assert "new-id" == document.id 28 | assert "New content" == document.content 29 | assert {"new_field": 42} == document.metadata 30 | assert 0.0 == np.sum(np.array([0.5, 0.5, 0.5, 0.5]) - document.embedding) 31 | -------------------------------------------------------------------------------- /tests/unit/storages/vector/test_utils.py: -------------------------------------------------------------------------------- 1 | from dynamiq.storages.vector.utils import create_file_id_filter, create_file_ids_filter 2 | 3 | 4 | def test_create_file_id_filter(): 5 | result = create_file_id_filter("file1") 6 | assert result["operator"] == "AND" 7 | assert len(result["conditions"]) == 1 8 | assert result["conditions"][0]["field"] == "file_id" 9 | assert result["conditions"][0]["operator"] == "==" 10 | assert result["conditions"][0]["value"] == "file1" 11 | 12 | 13 | def test_create_file_ids_filter_empty(): 14 | result = create_file_ids_filter([]) 15 | assert result["operator"] == "AND" 16 | assert len(result["conditions"]) == 1 17 | assert result["conditions"][0]["field"] == "file_id" 18 | assert result["conditions"][0]["operator"] == "in" 19 | assert result["conditions"][0]["value"] == [] 20 | 21 | 22 | def test_create_file_ids_filter_single(): 23 | result = create_file_ids_filter(["file1"]) 24 | assert result["conditions"][0]["value"] == ["file1"] 25 | 26 | 27 | def test_create_file_ids_filter_multiple(): 28 | result = create_file_ids_filter(["file1", "file2", "file3"]) 29 | assert result["conditions"][0]["value"] == ["file1", "file2", "file3"] 30 | --------------------------------------------------------------------------------