├── docs
├── llm-providers.md
├── examples.md
├── frameworks
│ ├── react.md
│ ├── crewai-flow.md
│ ├── react-lcel.md
│ ├── langgraph.md
│ ├── crewai.md
│ └── agno.md
├── installation.md
├── index.md
└── usage.md
├── requirements.txt
├── multi_agent_generator
├── __init__.py
├── frameworks
│ ├── __init__.py
│ ├── langgraph_generator.py
│ ├── agno_generator.py
│ ├── crewai_generator.py
│ ├── react_generator.py
│ └── crewai_flow_generator.py
├── model_inference.py
├── __main__.py
└── generator.py
├── mkdocs.yml
├── LICENSE
├── pyproject.toml
├── setup.py
├── .gitignore
├── README.md
└── streamlit_app.py
/docs/llm-providers.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit>=1.22.0
2 | crewai>=0.28.0
3 | openai>=1.3.0
4 | langchain>=0.0.271
5 | langgraph>=0.0.16
6 | ibm-watsonx-ai>=0.2.0
7 | python-dotenv>=1.0.0
8 | pydantic>=2.0.0
9 | langchain-openai==0.3.28
10 | agno==1.8.4
11 |
--------------------------------------------------------------------------------
/multi_agent_generator/__init__.py:
--------------------------------------------------------------------------------
1 | ## multi-agent-generator/__init__.py
2 | __version__ = "0.4.0"
3 |
4 | from .model_inference import (
5 | ModelInference,
6 | Message
7 | )
8 |
9 | from .frameworks import (
10 | create_crewai_code,
11 | create_crewai_flow_code,
12 | create_langgraph_code,
13 | create_react_code,
14 | create_agno_code
15 | )
--------------------------------------------------------------------------------
/docs/examples.md:
--------------------------------------------------------------------------------
1 | # Examples
2 | ## Research Assistant
3 |
4 | ```
5 | I need a research assistant that summarizes papers and answers questions
6 | ```
7 |
8 | ## Content Creation Team
9 |
10 | ```
11 | I need a team to create viral social media content and manage our brand presence
12 | ```
13 |
14 | ## Customer Support (LangGraph)
15 |
16 | ```
17 | Build me a LangGraph workflow for customer support
18 | ```
--------------------------------------------------------------------------------
/docs/frameworks/react.md:
--------------------------------------------------------------------------------
1 | # ReAct (Classic)
2 |
3 | ReAct (Reasoning + Acting) combines **thoughts + actions**.
4 | The agent reasons about a problem, then decides when to call a tool.
5 |
6 | ---
7 |
8 | ## Example
9 |
10 | ```bash
11 | multi-agent-generator "Answer math questions using a calculator tool" --framework react
12 | ```
13 |
14 | ### Produces:
15 |
16 | - An agent with reasoning + acting steps
17 |
18 | - Tool definitions with parameters
19 |
20 | - ReAct-style execution loop
--------------------------------------------------------------------------------
/multi_agent_generator/frameworks/__init__.py:
--------------------------------------------------------------------------------
1 | from .crewai_generator import create_crewai_code
2 | from .crewai_flow_generator import create_crewai_flow_code
3 | from .langgraph_generator import create_langgraph_code
4 | from .react_generator import create_react_code
5 | from .agno_generator import create_agno_code
6 |
7 | __all__ = [
8 | 'create_crewai_code',
9 | 'create_crewai_flow_code',
10 | 'create_langgraph_code',
11 | 'create_react_code',
12 | 'create_agno_code',
13 | ]
--------------------------------------------------------------------------------
/docs/frameworks/crewai-flow.md:
--------------------------------------------------------------------------------
1 | # CrewAI Flow
2 |
3 | CrewAI Flow extends CrewAI with **event-driven workflows**.
4 | It enables sequential, parallel, and conditional task execution with state management.
5 |
6 | ---
7 |
8 | ## Example
9 |
10 | ```bash
11 | multi-agent-generator "Analyze customer reviews and generate insights" --framework crewai-flow
12 | ```
13 |
14 | ### This produces:
15 | - Specialized agents (e.g., Data Collector, Data Analyst, Writer)
16 |
17 | - Sequential flow: collect → analyze → summarize
18 |
19 | - Task delegation and transitions defined
--------------------------------------------------------------------------------
/docs/frameworks/react-lcel.md:
--------------------------------------------------------------------------------
1 | # ReAct (LCEL)
2 |
3 | ReAct (Reasoning + Acting) with **LangChain Expression Language (LCEL)**.
4 | Supports **multi-step reasoning**, tool usage, and history tracking.
5 |
6 | ---
7 |
8 | ## Example
9 |
10 | ```bash
11 | multi-agent-generator "Find AI papers and summarize them" --framework react-lcel
12 | ```
13 |
14 | ### Generated agent includes:
15 |
16 | - Multi-step reasoning traces
17 |
18 | - Tool calls with inputs/outputs
19 |
20 | - LangChain Expression Language chain
21 |
22 | ## Example Snippet
23 | ```python
24 | chain = (
25 | {"input": RunnablePassthrough(), "history": RunnablePassthrough()}
26 | | react_prompt
27 | | llm
28 | | StrOutputParser()
29 | )
30 | ```
--------------------------------------------------------------------------------
/docs/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | ## Basic Installation
4 | ```bash
5 | pip install multi-agent-generator
6 | ```
7 |
8 | ## Development Installation
9 | ```bash
10 | pip install multi-agent-generator[dev]
11 | ```
12 |
13 | ## Prerequisites
14 |
15 | * At least one supported LLM provider (OpenAI, WatsonX, Ollama, etc.)
16 | * Environment variables setup:
17 |
18 | * `OPENAI_API_KEY` (for OpenAI)
19 | * `WATSONX_API_KEY`, `WATSONX_PROJECT_ID`, `WATSONX_URL` (for WatsonX)
20 | * `OLLAMA_URL` (for Ollama)
21 | * Or a generic `API_KEY` / `API_BASE` if supported by LiteLLM
22 |
23 | * Be aware `Agno` only works with `OPENAI_API_KEY` without tools for Now, and will be expanded for further API's and tools in the future.
24 |
25 | > ⚡ You can freely switch providers using `--provider` in CLI or by setting environment variables.
26 |
--------------------------------------------------------------------------------
/docs/frameworks/langgraph.md:
--------------------------------------------------------------------------------
1 | # LangGraph Framework
2 |
3 | LangGraph is LangChain's framework for **stateful, multi-actor applications**.
4 | It represents workflows as directed graphs with:
5 |
6 | - **Nodes**: agents, tools, or operations
7 | - **Edges**: control/data flow
8 | - **Conditions**: define branching behavior
9 |
10 | ---
11 |
12 | ## Example
13 |
14 | ```bash
15 | multi-agent-generator "Build me a LangGraph workflow for customer support" --framework langgraph
16 | ```
17 |
18 | ### Generates a graph like:
19 | ```json
20 | {
21 | "agents": [{ "name": "support_agent", "llm": "gpt-4.1-mini" }],
22 | "nodes": [
23 | { "name": "greet_customer", "agent": "support_agent" },
24 | { "name": "resolve_issue", "agent": "support_agent" }
25 | ],
26 | "edges": [
27 | { "source": "greet_customer", "target": "resolve_issue" }
28 | ]
29 | }
30 | ```
--------------------------------------------------------------------------------
/docs/frameworks/crewai.md:
--------------------------------------------------------------------------------
1 | # CrewAI Framework
2 |
3 | CrewAI orchestrates **role-playing autonomous AI agents**.
4 | Each agent has:
5 |
6 | - **Role**: what they do
7 | - **Goal**: their objective
8 | - **Backstory**: context
9 | - **Tools**: available abilities
10 |
11 | Tasks are assigned to agents with expected outputs.
12 |
13 | ---
14 |
15 | ## Example
16 |
17 | ```bash
18 | multi-agent-generator "Research AI trends and write a summary" --framework crewai
19 | ```
20 |
21 | ### Produces agents like:
22 | ```json
23 | {
24 | "agents": [
25 | {
26 | "name": "research_specialist",
27 | "role": "Research Specialist",
28 | "goal": "Gather AI research trends",
29 | "tools": ["search_tool"]
30 | },
31 | {
32 | "name": "writer",
33 | "role": "Content Writer",
34 | "goal": "Write a summary",
35 | "tools": ["editor_tool"]
36 | }
37 | ],
38 | "tasks": [...]
39 | }
40 | ```
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Multi-Agent Generator
2 |
3 | 🚀 **Multi-Agent Generator** is a tool that transforms plain English instructions into fully configured multi-agent AI teams.
4 | No scripting. No complexity. Just describe what you want, and let it build the agents.
5 |
6 | **Frameworks supported**: CrewAI, CrewAI Flow, LangGraph, Agno, ReAct (Classic + LCEL).
7 | **LLM providers supported**: Any provider via LiteLLM (OpenAI, IBM WatsonX, Ollama, Anthropic, etc.).
8 |
9 | ---
10 |
11 | ## ✨ Features
12 |
13 | - Generate multi-agent workflows in multiple frameworks
14 | - Provider-agnostic (LiteLLM under the hood)
15 | - Streamlit-based UI for interactive generation
16 | - CLI tool for quick workflows
17 | - JSON + code output formats
18 | - Extensible agent/task configuration
19 |
20 | ---
21 |
22 | ## 🔗 Quick Links
23 |
24 | - [Installation](installation.md)
25 | - [Usage](usage.md)
26 | - [Frameworks](frameworks/crewai.md)
27 | - [Examples](examples.md)
28 | - [Development Guide](development.md)
29 |
--------------------------------------------------------------------------------
/docs/usage.md:
--------------------------------------------------------------------------------
1 | # Usage
2 |
3 | ## CLI
4 |
5 | Basic usage:
6 |
7 | ```bash
8 | multi-agent-generator "I need a research assistant that summarizes papers and answers questions" --framework crewai
9 | ```
10 |
11 | Using WatsonX instead:
12 |
13 | ```bash
14 | multi-agent-generator "I need a research assistant that summarizes papers and answers questions" --framework crewai --provider watsonx
15 | ```
16 |
17 | Try Agno:
18 | ```bash
19 | multi_agent_generator "build a researcher and writer" --framework agno --provider openai --output agno.py --format code
20 | ```
21 |
22 | Using Ollama locally:
23 |
24 | ```bash
25 | multi-agent-generator "Build me a ReAct assistant for customer support" --framework react-lcel --provider ollama
26 | ```
27 |
28 | Save output to a file:
29 |
30 | ```bash
31 | multi-agent-generator "I need a team to create viral social media content" --framework langgraph --output social_team.py
32 | ```
33 |
34 | Get JSON configuration only:
35 |
36 | ```bash
37 | multi-agent-generator "I need a team to analyze customer data" --framework react --format json
38 | ```
39 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Multi-Agent Generator
2 | site_description: "Generate multi-agent AI workflows from plain English using CrewAI, LangGraph, Agno, and ReAct. Powered by LiteLLM."
3 | site_url: https://aakriti1318.github.io/multi-agent-generator/
4 | repo_url: https://github.com/aakriti1318/multi-agent-generator
5 | repo_name: aakriti1318/multi-agent-generator
6 |
7 | theme:
8 | name: material
9 | features:
10 | - navigation.instant
11 | - navigation.sections
12 | - navigation.tabs
13 | - content.code.copy
14 | - content.tabs.link
15 | palette:
16 | - scheme: default
17 | primary: teal
18 | accent: purple
19 |
20 | nav:
21 | - Home: index.md
22 | - Installation: installation.md
23 | - Usage: usage.md
24 | - Frameworks:
25 | - CrewAI: frameworks/crewai.md
26 | - CrewAI Flow: frameworks/crewai-flow.md
27 | - LangGraph: frameworks/langgraph.md
28 | - ReAct (Classic): frameworks/react.md
29 | - ReAct (LCEL): frameworks/react-lcel.md
30 | - Agno: frameworks/agno.md
31 | - LLM Providers: llm-providers.md
32 | - Examples: examples.md
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Aakriti Aggarwal
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/docs/frameworks/agno.md:
--------------------------------------------------------------------------------
1 | # Agno Framework
2 |
3 | Agno coordinates and orchestrates **role-playing autonomous AI agents**.
4 | Each agent has:
5 |
6 | - **Name and Role**: what they do
7 | - **Goal**: their objective
8 | - **Backstory**: context
9 | - **Tools**: available abilities
10 |
11 |
12 | Tasks are assigned to agents with expected outputs.
13 |
14 | ## Example
15 |
16 | ```bash
17 | multi-agent-generator "Research AI trends and write a summary" --framework agno
18 | ```
19 |
20 | ### Produces agents like:
21 | ```json
22 | {
23 | "model_id": "gpt-4o",
24 | "process": "sequential",
25 | "agents": [
26 | {
27 | "name": "research_specialist",
28 | "role": "Research Specialist",
29 | "goal": "Gather AI research trends",
30 | "backstory": "Expert in sourcing and aggregating technology news",
31 | "tools": ["DuckDuckGoTools", "Newspaper4kTools"]
32 | },
33 | {
34 | "name": "writer",
35 | "role": "Content Writer",
36 | "goal": "Write a clear summary",
37 | "backstory": "Skilled at concise technical writing",
38 | "tools": []
39 | }
40 | ],
41 | "tasks": [
42 | {
43 | "name": "research_task",
44 | "description": "Find recent AI trends across news and blogs",
45 | "agent": "research_specialist",
46 | "expected_output": "Bullet list of trends with links"
47 | },
48 | {
49 | "name": "writing_task",
50 | "description": "Summarize the trends for a general audience",
51 | "agent": "writer",
52 | "expected_output": "400-word Markdown summary"
53 | }
54 | ]
55 | }
56 | ```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=42", "wheel", "setuptools_scm"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "multi-agent-generator"
7 | version = "0.4.0"
8 | description = "Generate multi-agent AI teams from plain English, supporting multiple LLM backends via LiteLLM"
9 | readme = "README.md"
10 | requires-python = ">=3.8"
11 | license = { text = "MIT" }
12 | authors = [{ name = "Aakriti Aggarwal", email = "aakritiaggarwal2k@gmail.com" }]
13 | classifiers = [
14 | "Development Status :: 4 - Beta",
15 | "Intended Audience :: Developers",
16 | "License :: OSI Approved :: MIT License",
17 | "Programming Language :: Python :: 3",
18 | "Programming Language :: Python :: 3.8",
19 | "Programming Language :: Python :: 3.9",
20 | "Programming Language :: Python :: 3.10",
21 | ]
22 | dependencies = [
23 | "litellm>=0.1.0", # unified provider-agnostic client
24 | "streamlit>=1.22.0",
25 | "langchain>=0.0.271",
26 | "langchain-core>=0.0.1",
27 | "langchain-openai>=0.0.1",
28 | "langgraph>=0.0.16",
29 | "python-dotenv>=1.0.0",
30 | "pydantic>=2.0.0",
31 | "agno==1.8.4",
32 | ]
33 |
34 | [project.optional-dependencies]
35 | # keep watsonx as opt-in if people want IBM-specific SDK
36 | watsonx = ["ibm-watsonx-ai>=0.2.0"]
37 | dev = ["pytest>=7.0.0", "black>=23.0.0", "flake8>=6.0.0", "twine", "build"]
38 |
39 | [project.urls]
40 | "Homepage" = "https://github.com/aakriti1318/multi-agent-generator"
41 | "Bug Tracker" = "https://github.com/aakriti1318/multi-agent-generator/issues"
42 |
43 | [project.scripts]
44 | multi-agent-generator = "multi_agent_generator.__main__:main"
45 |
46 | [tool.setuptools.packages.find]
47 | include = ["multi_agent_generator*"]
48 |
--------------------------------------------------------------------------------
/multi_agent_generator/model_inference.py:
--------------------------------------------------------------------------------
1 | """
2 | Model inference utilities using LiteLLM for multiple providers.
3 | """
4 | import os
5 | from typing import Dict, List, Optional, Union
6 | from pydantic import BaseModel
7 | from dotenv import load_dotenv
8 | from litellm import completion # Unified API
9 |
10 | # Load environment variables
11 | load_dotenv()
12 |
13 |
14 | class Message(BaseModel):
15 | role: str
16 | content: str
17 |
18 |
19 | class ModelInference:
20 | """
21 | Unified LiteLLM-based model inference class.
22 | Supports OpenAI, WatsonX, Ollama, Anthropic, etc. via LiteLLM.
23 | """
24 |
25 | def __init__(
26 | self,
27 | model: str,
28 | api_key: Optional[str] = None,
29 | api_base: Optional[str] = None,
30 | **default_params
31 | ):
32 | self.model = model
33 | self.api_key = api_key or os.getenv("API_KEY") or os.getenv("OPENAI_API_KEY")
34 | self.api_base = api_base or os.getenv("API_BASE")
35 | self.default_params = default_params
36 |
37 | def generate_text(
38 | self,
39 | messages: List[Union[Dict, Message]],
40 | **override_params
41 | ) -> str:
42 | """
43 | Synchronously generate text.
44 | """
45 | try:
46 | msg_list = [m.dict() if isinstance(m, Message) else m for m in messages]
47 | response = completion(
48 | model=self.model,
49 | messages=msg_list,
50 | api_key=self.api_key,
51 | api_base=self.api_base,
52 | **{**self.default_params, **override_params}
53 | )
54 | return response.choices[0].message.content
55 |
56 | except Exception as e:
57 | raise RuntimeError(f"Model inference failed: {e}")
58 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | """
2 | Setup script for multi-agent-generator package.
3 | """
4 | from setuptools import setup, find_packages
5 | import pathlib
6 |
7 | HERE = pathlib.Path(__file__).parent
8 | try:
9 | long_description = (HERE / "README.md").read_text(encoding="utf-8")
10 | except FileNotFoundError:
11 | long_description = "Multi-Agent Generator - Generate multi-agent AI code from natural language"
12 |
13 | REQUIREMENTS = [
14 | "litellm>=0.1.0",
15 | "streamlit>=1.22.0",
16 | "langchain>=0.0.271",
17 | "langchain-core>=0.0.1",
18 | "langchain-openai>=0.0.1",
19 | "langgraph>=0.0.16",
20 | "python-dotenv>=1.0.0",
21 | "pydantic>=2.0.0",
22 | "agno==1.8.4",
23 | ]
24 |
25 | setup(
26 | name="multi-agent-generator",
27 | version="0.4.0",
28 | description="Generate multi-agent AI teams from plain English using LiteLLM-compatible providers",
29 | long_description=long_description,
30 | long_description_content_type="text/markdown",
31 | author="Aakriti Aggarwal",
32 | author_email="aakritiaggarwal2k@gmail.com",
33 | url="https://github.com/aakriti1318/multi-agent-generator",
34 | packages=find_packages(),
35 | include_package_data=True,
36 | install_requires=REQUIREMENTS,
37 | extras_require={
38 | "watsonx": ["ibm-watsonx-ai>=0.2.0"],
39 | "dev": ["pytest>=7.0.0", "black>=23.0.0", "flake8>=6.0.0", "twine", "build"],
40 | },
41 | entry_points={
42 | "console_scripts": [
43 | "multi-agent-generator=multi_agent_generator.__main__:main",
44 | ],
45 | },
46 | classifiers=[
47 | "Development Status :: 4 - Beta",
48 | "Intended Audience :: Developers",
49 | "License :: OSI Approved :: MIT License",
50 | "Programming Language :: Python :: 3",
51 | "Programming Language :: Python :: 3.8",
52 | "Programming Language :: Python :: 3.9",
53 | "Programming Language :: Python :: 3.10",
54 | ],
55 | python_requires=">=3.8",
56 | )
57 |
--------------------------------------------------------------------------------
/multi_agent_generator/__main__.py:
--------------------------------------------------------------------------------
1 | # mutli-agent-generator/__main__.py
2 | """
3 | Command line interface for multi-agent-generator.
4 | """
5 | import argparse
6 | import json
7 | from dotenv import load_dotenv
8 | from .generator import AgentGenerator
9 | from .frameworks import (
10 | create_crewai_code,
11 | create_crewai_flow_code,
12 | create_langgraph_code,
13 | create_react_code,
14 | create_agno_code
15 | )
16 |
17 | # Load environment variables from .env file if present
18 | load_dotenv()
19 |
20 |
21 | def main():
22 | """Command line entry point."""
23 | parser = argparse.ArgumentParser(description="Generate multi-agent AI code")
24 | parser.add_argument("prompt", help="Plain English description of what you need")
25 | parser.add_argument(
26 | "--framework",
27 | choices=["crewai", "crewai-flow", "langgraph", "react", "react-lcel", "agno"],
28 | default="crewai",
29 | help="Agent framework to use (default: crewai)"
30 | )
31 | parser.add_argument(
32 | "--process",
33 | choices=["sequential", "hierarchical"],
34 | default="sequential",
35 | help="Process type for CrewAI (default: sequential)"
36 | )
37 | parser.add_argument(
38 | "--provider",
39 | default="openai",
40 | help="LLM provider to use (e.g., openai, watsonx, ollama, anthropic, groq, etc.)"
41 | )
42 | parser.add_argument(
43 | "--output",
44 | help="Output file path (default: print to console)"
45 | )
46 | parser.add_argument(
47 | "--format",
48 | choices=["code", "json", "both"],
49 | default="code",
50 | help="Output format (default: code)"
51 | )
52 |
53 |
54 | args = parser.parse_args()
55 |
56 | # Initialize generator
57 | generator = AgentGenerator(provider=args.provider)
58 | print(f"Analyzing prompt using {args.provider.upper()}...")
59 | config = generator.analyze_prompt(args.prompt, args.framework)
60 |
61 | # Add process type to config for CrewAI frameworks
62 | if args.framework in ["crewai", "crewai-flow"]:
63 | config["process"] = args.process
64 | print(f"Using {args.process} process for CrewAI...")
65 |
66 | # Generate code based on the framework
67 | print(f"Generating {args.framework} code...")
68 | if args.framework == "crewai":
69 | code = create_crewai_code(config)
70 | elif args.framework == "crewai-flow":
71 | code = create_crewai_flow_code(config)
72 | elif args.framework == "langgraph":
73 | code = create_langgraph_code(config)
74 | elif args.framework == "react":
75 | code = create_react_code(config)
76 | elif args.framework == "react-lcel":
77 | from .frameworks.react_generator import create_react_lcel_code
78 | code = create_react_lcel_code(config)
79 | elif args.framework == "agno":
80 | code = create_agno_code(config)
81 |
82 | else:
83 | print(f"Unsupported framework: {args.framework}")
84 | return
85 |
86 | # Prepare output
87 | if args.format == "code":
88 | output = code
89 | elif args.format == "json":
90 | output = json.dumps(config, indent=2)
91 | else: # both
92 | output = f"// Configuration:\n{json.dumps(config, indent=2)}\n\n// Generated Code:\n{code}"
93 |
94 | # Write output
95 | if args.output:
96 | with open(args.output, "w") as f:
97 | f.write(output)
98 | print(f"Output successfully written to {args.output}")
99 | else:
100 | print(output)
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | multi_agent_generator.egg-info/
2 |
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .nox/
45 | .coverage
46 | .coverage.*
47 | .cache
48 | nosetests.xml
49 | coverage.xml
50 | *.cover
51 | *.py,cover
52 | .hypothesis/
53 | .pytest_cache/
54 | cover/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | .pybuilder/
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # IPython
84 | profile_default/
85 | ipython_config.py
86 |
87 | # pyenv
88 | # For a library or package, you might want to ignore these files since the code is
89 | # intended to run in multiple environments; otherwise, check them in:
90 | # .python-version
91 |
92 | # pipenv
93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
96 | # install all needed dependencies.
97 | #Pipfile.lock
98 |
99 | # poetry
100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101 | # This is especially recommended for binary packages to ensure reproducibility, and is more
102 | # commonly ignored for libraries.
103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104 | #poetry.lock
105 |
106 | # pdm
107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108 | #pdm.lock
109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110 | # in version control.
111 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
112 | .pdm.toml
113 | .pdm-python
114 | .pdm-build/
115 |
116 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117 | __pypackages__/
118 |
119 | # Celery stuff
120 | celerybeat-schedule
121 | celerybeat.pid
122 |
123 | # SageMath parsed files
124 | *.sage.py
125 |
126 | # Environments
127 | .env
128 | .venv
129 | env/
130 | venv/
131 | ENV/
132 | env.bak/
133 | venv.bak/
134 | .venv*
135 |
136 | # Spyder project settings
137 | .spyderproject
138 | .spyproject
139 |
140 | # Rope project settings
141 | .ropeproject
142 |
143 | # mkdocs documentation
144 | /site
145 |
146 | # mypy
147 | .mypy_cache/
148 | .dmypy.json
149 | dmypy.json
150 |
151 | # Pyre type checker
152 | .pyre/
153 |
154 | # pytype static type analyzer
155 | .pytype/
156 |
157 | # Cython debug symbols
158 | cython_debug/
159 |
160 | # DS Stores
161 | .DS_Store
162 |
163 | # PyCharm
164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166 | # and can be added to the global gitignore or merged into this file. For a more nuclear
167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
168 | #.idea/
--------------------------------------------------------------------------------
/multi_agent_generator/frameworks/langgraph_generator.py:
--------------------------------------------------------------------------------
1 | # multi_agent_generator/frameworks/langgraph_generator.py
2 | """
3 | Generator for LangGraph code.
4 | """
5 | from typing import Dict, Any
6 |
7 | def create_langgraph_code(config: Dict[str, Any]) -> str:
8 | code = """from langgraph.graph import StateGraph, END
9 | from langchain_core.messages import HumanMessage, AIMessage
10 | from langchain_openai import ChatOpenAI
11 | from langchain_core.tools import BaseTool
12 | from typing import Dict, List, Tuple, Any, TypedDict, Annotated
13 | import operator
14 |
15 | # Define state
16 | class AgentState(TypedDict):
17 | messages: List[BaseMessage]
18 | next: str
19 |
20 | """
21 |
22 | # Generate tool definitions if needed
23 | if any(agent["tools"] for agent in config["agents"]):
24 | code += "# Define tools\n"
25 | tools = set()
26 | for agent in config["agents"]:
27 | tools.update(agent["tools"])
28 |
29 | for tool in tools:
30 | code += f"""class {tool.capitalize()}Tool(BaseTool):
31 | name = "{tool}"
32 | description = "Tool for {tool} operations"
33 |
34 | def _run(self, query: str) -> str:
35 | # Implement actual functionality here
36 | return f"Result from {tool} tool: {{query}}"
37 |
38 | async def _arun(self, query: str) -> str:
39 | # Implement actual functionality here
40 | return f"Result from {tool} tool: {{query}}"
41 |
42 | """
43 |
44 | code += "tools = [\n"
45 | for tool in tools:
46 | code += f" {tool.capitalize()}Tool(),\n"
47 | code += "]\n\n"
48 |
49 | # Generate Agent configurations
50 | for agent in config["agents"]:
51 | code += f"# Agent: {agent['name']}\n"
52 | code += f"def {agent['name']}_agent(state: AgentState) -> AgentState:\n"
53 | code += f" \"\"\"Agent that handles {agent['role']}.\"\"\"\n"
54 | code += f" # Create LLM\n"
55 | code += f" llm = ChatOpenAI(model=\"{agent['llm']}\")\n"
56 | code += f" # Get the most recent message\n"
57 | code += f" messages = state['messages']\n"
58 | code += f" response = llm.invoke(messages)\n"
59 | code += f" # Add the response to the messages\n"
60 | code += f" return {{\n"
61 | code += f" \"messages\": messages + [response],\n"
62 | code += f" \"next\": state.get(\"next\", \"\")\n"
63 | code += f" }}\n\n"
64 |
65 | # Define routing logic function
66 | code += """# Define routing logic
67 | def router(state: AgentState) -> str:
68 | \"\"\"Route to the next node.\"\"\"
69 | return state.get("next", "END")
70 |
71 | """
72 |
73 | # Generate graph configuration
74 | code += "# Define the graph\n"
75 | code += "workflow = StateGraph(AgentState)\n\n"
76 |
77 | # Add nodes
78 | code += "# Add nodes to the graph\n"
79 | for node in config["nodes"]:
80 | code += f"workflow.add_node(\"{node['name']}\", {node['agent']}_agent)\n"
81 |
82 | code += "\n# Add conditional edges\n"
83 | # Add edges
84 | for edge in config["edges"]:
85 | if edge["target"] == "END":
86 | code += f"workflow.add_edge(\"{edge['source']}\", END)\n"
87 | else:
88 | code += f"workflow.add_edge(\"{edge['source']}\", \"{edge['target']}\")\n"
89 |
90 | # Set entry point
91 | if config["nodes"]:
92 | code += f"\n# Set entry point\nworkflow.set_entry_point(\"{config['nodes'][0]['name']}\")\n"
93 |
94 | # Compile and run
95 | code += """
96 | # Compile the graph
97 | app = workflow.compile()
98 |
99 | # Run the graph
100 | def run_agent(query: str) -> List[BaseMessage]:
101 | \"\"\"Run the agent on a query.\"\"\"
102 | result = app.invoke({
103 | "messages": [HumanMessage(content=query)],
104 | "next": ""
105 | })
106 | return result["messages"]
107 |
108 | # Example usage
109 | if __name__ == "__main__":
110 | result = run_agent("Your query here")
111 | for message in result:
112 | print(f"{message.type}: {message.content}")
113 | """
114 |
115 | # Now wrap the generated code in JSON format
116 | # return json.dumps({"generated_code": code}, indent=4)
117 | return code
118 |
--------------------------------------------------------------------------------
/multi_agent_generator/frameworks/agno_generator.py:
--------------------------------------------------------------------------------
1 | # multi_agent_generator/frameworks/agno_generator.py
2 | """
3 | Generator for Agno team code.
4 | """
5 | from typing import Dict, Any
6 |
7 | def _sanitize(name: str) -> str:
8 | return (
9 | name.strip()
10 | .lower()
11 | .replace(" ", "_")
12 | .replace("-", "_")
13 | .replace("'", "")
14 | .replace('"', "")
15 | )
16 |
17 | def create_agno_code(config: Dict[str, Any]) -> str:
18 | """
19 | Generate Agno code from a configuration.
20 |
21 | the structure of the generator:
22 | - imports
23 | - agents
24 | - tasks
25 | - team config
26 | - run_workflow(query)
27 | """
28 | model_id = config.get("model_id", "gpt-4o")
29 | process_type = (config.get("process") or "sequential").lower()
30 |
31 | code = ""
32 | # Imports
33 | code += "from agno.agent import Agent\n"
34 | code += "from agno.models.openai import OpenAIChat\n"
35 | code += "from agno.team import Team\n"
36 | code += "from typing import Dict, List, Any\n"
37 | code += "from pydantic import BaseModel, Field\n\n"
38 | code += "from dotenv import load_dotenv\n\n"
39 | code += "load_dotenv() # Load environment variables from .env file\n\n"
40 |
41 | # Agents
42 | agent_vars = []
43 | for agent in config.get("agents", []):
44 | var = f"agent_{_sanitize(agent['name'])}"
45 | agent_vars.append(var)
46 |
47 | role = agent.get("role", "")
48 | goal = agent.get("goal", "")
49 | backstory = agent.get("backstory", "")
50 | instructions = (goal + (" " if goal and backstory else "") + backstory) if (goal or backstory) else ""
51 |
52 | code += f"# Agent: {agent['name']}\n"
53 | code += f"{var} = Agent(\n"
54 | code += f" name={agent['name']!r},\n"
55 | code += f" model=OpenAIChat(id={model_id!r}),\n"
56 | code += f" role={role!r},\n"
57 | if instructions:
58 | code += f" instructions={instructions!r},\n"
59 | # Agno expects tool objects. To keep runnable, emit empty list.
60 | code += f" tools=[],\n"
61 | code += f" markdown=True,\n"
62 | code += ")\n\n"
63 |
64 | # Tasks
65 | task_vars = []
66 | tasks = config.get("tasks", [])
67 | for task in tasks:
68 | tvar = f"task_{_sanitize(task['name'])}"
69 | task_vars.append(tvar)
70 |
71 | desc = task.get("description", "")
72 | expected = task.get("expected_output", "")
73 |
74 | # resolve agent
75 | assigned = task.get("agent")
76 | if assigned:
77 | try:
78 | assigned_var = f"agent_{_sanitize(assigned)}"
79 | except Exception:
80 | assigned_var = agent_vars[0]
81 | else:
82 | if process_type == "hierarchical" and len(agent_vars) > 1:
83 | assigned_var = agent_vars[1]
84 | else:
85 | assigned_var = agent_vars[0]
86 |
87 | code += f"# Task: {task['name']}\n"
88 | code += f"def {tvar}(query: str) -> Any:\n"
89 | if not assigned:
90 | # parity with Crew output comment
91 | fallback_name = assigned or (config['agents'][1]['name'] if process_type == 'hierarchical' and len(config.get('agents', [])) > 1 else config['agents'][0]['name'])
92 | code += f" # Auto-assigned to: {fallback_name}\n"
93 | code += " prompt = (\n"
94 | code += f" {desc!r} + '\\n\\n' +\n"
95 | code += " 'User query: ' + str(query) + '\\n' +\n"
96 | code += f" 'Expected output: ' + {expected!r}\n"
97 | code += " )\n"
98 | code += f" return {assigned_var}.run(prompt).content\n\n"
99 |
100 | # Team
101 | code += "# Team Configuration\n"
102 | code += "team = Team(\n"
103 | code += " name='Auto Team',\n"
104 | code += " mode='coordinate',\n"
105 | code += f" model=OpenAIChat(id={model_id!r}),\n"
106 | code += f" members=[{', '.join(agent_vars)}],\n"
107 | code += " instructions=[\n"
108 | code += " 'Coordinate members to complete the tasks in order.',\n"
109 | code += " 'Use the query as shared context.',\n"
110 | code += " ],\n"
111 | code += " markdown=True,\n"
112 | code += " debug_mode=True,\n"
113 | code += " show_members_responses=True,\n"
114 | code += ")\n\n"
115 |
116 | # Runner
117 | code += "# Run the workflow\n"
118 | code += "def run_workflow(query: str):\n"
119 | code += " \"\"\"Run workflow using Agno Team. Executes tasks in order and returns a dict of results.\"\"\"\n"
120 | code += " results: Dict[str, Any] = {}\n"
121 | for tvar in task_vars:
122 | code += f" results[{tvar!r}] = {tvar}(query)\n"
123 | code += " return results\n\n"
124 |
125 | # Example usage
126 | code += "if __name__ == \"__main__\":\n"
127 | code += " result = run_workflow(\"Your query here\")\n"
128 | code += " print(result)\n"
129 |
130 | return code
131 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Multi-Agent Generator
2 |
3 |
4 | **PyPi Link** - [Multi-agent-generator](https://pypi.org/project/multi-agent-generator/)
5 |
6 | A powerful tool that transforms plain English instructions into fully configured multi-agent AI teams — no scripting, no complexity.
7 | Powered by [LiteLLM](https://docs.litellm.ai/) for **provider-agnostic support** (OpenAI, WatsonX, Ollama, Anthropic, etc.) with both a **CLI** and an optional **Streamlit UI**.
8 |
9 | ---
10 |
11 | ## Features
12 |
13 | * Generate agent code for multiple frameworks:
14 |
15 | * **CrewAI**: Structured workflows for multi-agent collaboration
16 | * **CrewAI Flow**: Event-driven workflows with state management
17 | * **LangGraph**: LangChain’s framework for stateful, multi-actor applications
18 | * **Agno**: Agno framework for Agents Team orchestration
19 | * **ReAct (classic)**: Reasoning + Acting agents using `AgentExecutor`
20 | * **ReAct (LCEL)**: Future-proof ReAct built with LangChain Expression Language (LCEL)
21 |
22 | * **Provider-Agnostic Inference** via LiteLLM:
23 |
24 | * Supports OpenAI, IBM WatsonX, Ollama, Anthropic, and more
25 | * Swap providers with a single CLI flag or environment variable
26 |
27 | * **Flexible Output**:
28 |
29 | * Generate Python code
30 | * Generate JSON configs
31 | * Or both combined
32 |
33 | * **Streamlit UI** (optional):
34 |
35 | * Interactive prompt entry
36 | * Framework selection
37 | * Config visualization
38 | * Copy or download generated code
39 |
40 | ---
41 |
42 | ## Installation
43 |
44 | ### Basic Installation
45 |
46 | ```bash
47 | pip install multi-agent-generator
48 | ```
49 |
50 | ---
51 |
52 | ## Prerequisites
53 |
54 | * At least one supported LLM provider (OpenAI, WatsonX, Ollama, etc.)
55 | * Environment variables setup:
56 |
57 | * `OPENAI_API_KEY` (for OpenAI)
58 | * `WATSONX_API_KEY`, `WATSONX_PROJECT_ID`, `WATSONX_URL` (for WatsonX)
59 | * `OLLAMA_URL` (for Ollama)
60 | * Or a generic `API_KEY` / `API_BASE` if supported by LiteLLM
61 |
62 | * Be aware `Agno` only works with `OPENAI_API_KEY` without tools for Now, and will be expanded for further API's and tools in the future.
63 |
64 | > ⚡ You can freely switch providers using `--provider` in CLI or by setting environment variables.
65 |
66 | ---
67 |
68 | ## Usage
69 |
70 | ### Command Line
71 |
72 | Basic usage with OpenAI (default):
73 |
74 | ```bash
75 | multi-agent-generator "I need a research assistant that summarizes papers and answers questions" --framework crewai
76 | ```
77 |
78 | Using WatsonX instead:
79 |
80 | ```bash
81 | multi-agent-generator "I need a research assistant that summarizes papers and answers questions" --framework crewai --provider watsonx
82 | ```
83 | Using Agno:
84 |
85 | ```bash
86 | multi_agent_generator "build a researcher and writer" --framework agno --provider openai --output agno.py --format code
87 | ```
88 | Using Ollama locally:
89 |
90 | ```bash
91 | multi-agent-generator "Build me a ReAct assistant for customer support" --framework react-lcel --provider ollama
92 | ```
93 |
94 | Save output to a file:
95 |
96 | ```bash
97 | multi-agent-generator "I need a team to create viral social media content" --framework langgraph --output social_team.py
98 | ```
99 |
100 | Get JSON configuration only:
101 |
102 | ```bash
103 | multi-agent-generator "I need a team to analyze customer data" --framework react --format json
104 | ```
105 |
106 | ---
107 |
108 | ## Examples
109 |
110 | ### Research Assistant
111 |
112 | ```
113 | I need a research assistant that summarizes papers and answers questions
114 | ```
115 |
116 | ### Content Creation Team
117 |
118 | ```
119 | I need a team to create viral social media content and manage our brand presence
120 | ```
121 |
122 | ### Customer Support (LangGraph)
123 |
124 | ```
125 | Build me a LangGraph workflow for customer support
126 | ```
127 |
128 | ---
129 |
130 | ## Frameworks
131 |
132 | ### CrewAI
133 |
134 | Role-playing autonomous AI agents with goals, roles, and backstories.
135 |
136 | ### CrewAI Flow
137 |
138 | Event-driven workflows with sequential, parallel, or conditional execution.
139 |
140 | ### LangGraph
141 |
142 | Directed graph of agents/tools with stateful execution.
143 |
144 | ### Agno
145 |
146 | Role-playing Team orchestration AI agents with goals, roles, backstories and instructions.
147 |
148 | ### ReAct (classic)
149 |
150 | Reasoning + Acting agents built with `AgentExecutor`.
151 |
152 | ### ReAct (LCEL)
153 |
154 | Modern ReAct implementation using LangChain Expression Language — better for debugging and future-proof orchestration.
155 |
156 | ---
157 |
158 | ## LLM Providers
159 |
160 | ### OpenAI
161 |
162 | State-of-the-art GPT models (default: `gpt-4o-mini`).
163 |
164 | ### IBM WatsonX
165 |
166 | Enterprise-grade access to Llama and other foundation models (default: `llama-3-70b-instruct`).
167 |
168 | ### Ollama
169 |
170 | Run Llama and other models locally.
171 |
172 | ### Anthropic
173 |
174 | Use Claude models for agent generation.
175 |
176 | …and more, via LiteLLM.
177 |
178 | ---
179 |
180 | ## License
181 |
182 | MIT
183 |
184 | Maintainers: **[Nabarko Roy](https://github.com/Nabarko)**
185 |
186 | Made with ❤️ If you like star the repo and share it with AI Enthusiasts.
187 |
--------------------------------------------------------------------------------
/multi_agent_generator/frameworks/crewai_generator.py:
--------------------------------------------------------------------------------
1 | # multi_agent_generator/frameworks/crewai_generator.py
2 | """
3 | Generator for CrewAI code.
4 | """
5 | from typing import Dict, Any
6 |
7 | def _sanitize_var_name(name: str) -> str:
8 | """Convert agent/task name to a valid Python variable name."""
9 | return name.strip().lower().replace(" ", "_").replace("-", "_").replace("'", "").replace('"', "")
10 |
11 | def create_crewai_code(config: Dict[str, Any]) -> str:
12 | # Get process type from config (default to sequential)
13 | process_type = config.get("process", "sequential").lower()
14 |
15 | # Start with the basic imports plus Flow imports
16 | code = "from crewai import Agent, Task, Crew, Process\n"
17 | if process_type == "sequential":
18 | code += "from crewai.flow.flow import Flow, listen, start\n"
19 | code += "from typing import Dict, List, Any\n"
20 | code += "from pydantic import BaseModel, Field\n\n"
21 |
22 | if process_type == "sequential":
23 | # Define state model for the flow (only for sequential)
24 | code += "# Define flow state\n"
25 | code += "class AgentState(BaseModel):\n"
26 | code += " query: str = Field(default=\"\")\n"
27 | code += " results: Dict[str, Any] = Field(default_factory=dict)\n"
28 | code += " current_step: str = Field(default=\"\")\n\n"
29 |
30 | # Create a mapping of agent names to sanitized variable names
31 | agent_name_to_var = {}
32 |
33 | # Generate Agent configurations
34 | for i, agent in enumerate(config["agents"]):
35 | agent_var = f"agent_{_sanitize_var_name(agent['name'])}"
36 | agent_name_to_var[agent['name']] = agent_var
37 |
38 | code += f"# Agent: {agent['name']}\n"
39 | code += f"{agent_var} = Agent(\n"
40 | code += f" role={agent['role']!r},\n"
41 | code += f" goal={agent['goal']!r},\n"
42 | code += f" backstory={agent['backstory']!r},\n"
43 | code += f" verbose={agent['verbose']},\n"
44 | code += f" allow_delegation={agent['allow_delegation']},\n"
45 | code += f" tools={agent['tools']}"
46 |
47 | # For hierarchical process, mark the first agent as manager
48 | if process_type == "hierarchical" and i == 0:
49 | code += ",\n max_iter=5,\n"
50 | code += " max_execution_time=300\n"
51 | else:
52 | code += "\n"
53 |
54 | code += ")\n\n"
55 |
56 | # Generate Task configurations
57 | for task in config["tasks"]:
58 | task_var = f"task_{_sanitize_var_name(task['name'])}"
59 | code += f"# Task: {task['name']}\n"
60 | code += f"{task_var} = Task(\n"
61 | code += f" description={task['description']!r},\n"
62 |
63 | # Always assign agents to tasks, even in hierarchical mode
64 | agent_name = task.get('agent')
65 | if agent_name and agent_name in agent_name_to_var:
66 | agent_var = agent_name_to_var[agent_name]
67 | code += f" agent={agent_var},\n"
68 | else:
69 | # If no agent specified or agent not found, find the most suitable one
70 | # or assign to the first non-manager agent (for hierarchical)
71 | if process_type == "hierarchical" and len(config["agents"]) > 1:
72 | # Assign to first worker agent (not the manager)
73 | fallback_agent = config["agents"][1]["name"]
74 | else:
75 | # For sequential, assign to first agent
76 | fallback_agent = config["agents"][0]["name"]
77 |
78 | fallback_var = agent_name_to_var[fallback_agent]
79 | code += f" # Auto-assigned to: {fallback_agent}\n"
80 | code += f" agent={fallback_var},\n"
81 |
82 | code += f" expected_output={task['expected_output']!r}\n"
83 | code += ")\n\n"
84 |
85 | # Generate Crew configuration
86 | code += "# Crew Configuration\n"
87 | code += "crew = Crew(\n"
88 | code += " agents=[" + ", ".join(agent_name_to_var.values()) + "],\n"
89 | code += " tasks=[" + ", ".join(f"task_{_sanitize_var_name(t['name'])}" for t in config["tasks"]) + "],\n"
90 |
91 | # Set process type
92 | if process_type == "hierarchical":
93 | code += " process=Process.hierarchical,\n"
94 | # The first agent becomes the manager
95 | manager_var = list(agent_name_to_var.values())[0]
96 | code += f" manager_agent={manager_var},\n"
97 | else:
98 | code += " process=Process.sequential,\n"
99 |
100 | code += " verbose=True\n"
101 | code += ")\n\n"
102 |
103 | # Run the workflow (up to kickoff) for both process types
104 | code += "# Run the workflow\n"
105 | code += "def run_workflow(query: str):\n"
106 | code += " \"\"\"Run workflow using CrewAI.\"\"\"\n"
107 | code += " result = crew.kickoff(\n"
108 | code += " inputs={\n"
109 | code += " \"query\": query\n"
110 | code += " }\n"
111 | code += " )\n"
112 | code += " return result\n\n"
113 |
114 | # Example usage
115 | code += "# Example usage\n"
116 | code += "if __name__ == \"__main__\":\n"
117 | code += " result = run_workflow(\"Your query here\")\n"
118 | code += " print(result)\n"
119 | return code
120 |
--------------------------------------------------------------------------------
/multi_agent_generator/frameworks/react_generator.py:
--------------------------------------------------------------------------------
1 | # multi_agent_generator/frameworks/react_generator.py
2 | """
3 | Generator for ReAct code.
4 | """
5 | from typing import Dict, Any
6 |
7 | def create_react_code(config: Dict[str, Any]) -> str:
8 | code = """from langchain_core.tools import BaseTool
9 | from langchain_core.prompts import ChatPromptTemplate
10 | from langchain_openai import ChatOpenAI
11 | from langchain.agents import create_react_agent, AgentExecutor
12 | from typing import Dict, List, Any
13 |
14 | """
15 |
16 | # Define tools
17 | code += "# Define tools\n"
18 | for tool in config.get("tools", []):
19 | params = ", ".join(tool.get("parameters", {}).keys()) if tool.get("parameters") else ""
20 | param_names = ", ".join(tool.get("parameters", {}).keys()) if tool.get("parameters") else ""
21 | class_name = f"{tool['name'].capitalize()}Tool"
22 | # Use double braces for literal {self.name} and {locals()} inside the generated code
23 | code += f"""class {class_name}(BaseTool):
24 | name = "{tool['name']}"
25 | description = "{tool['description']}"
26 |
27 | def _run(self{', ' if params else ''}{params}) -> str:
28 | try:
29 | # TODO: implement actual functionality
30 | return f"Executed {{self.name}} with inputs: {{locals()}}"
31 | except Exception as e:
32 | return f"Error in {{self.name}}: {{str(e)}}"
33 |
34 | async def _arun(self{', ' if params else ''}{params}) -> str:
35 | return self._run({param_names})
36 |
37 | """
38 |
39 | # Collect tools
40 | code += "tools = [\n"
41 | for tool in config.get("tools", []):
42 | code += f" {tool['name'].capitalize()}Tool(),\n"
43 | code += "]\n\n"
44 |
45 | # Agent setup
46 | if config.get("agents"):
47 | agent = config["agents"][0]
48 | # safe fallback for missing llm field
49 | llm_model = agent.get("llm", "gpt-4.1-mini")
50 | code += f"""llm = ChatOpenAI(model="{llm_model}")
51 |
52 | react_prompt = ChatPromptTemplate.from_messages([
53 | ("system", "You are {agent['role']}. Your goal is {agent['goal']}. Use tools when needed."),
54 | ("human", "{{input}}")
55 | ])
56 |
57 | agent = create_react_agent(llm, tools, react_prompt)
58 | agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
59 |
60 | def run_agent(query: str) -> str:
61 | response = agent_executor.invoke({{"input": query}})
62 | # Try to show intermediate trace if available
63 | try:
64 | if isinstance(response, dict) and 'intermediate_steps' in response:
65 | print('--- Agent Trace ---')
66 | for step in response['intermediate_steps']:
67 | print(step)
68 | print('-------------------')
69 | except Exception:
70 | pass
71 | return response.get("output", "No response generated") if isinstance(response, dict) else str(response)
72 |
73 | if __name__ == "__main__":
74 | result = run_agent("Your query here")
75 | print(result)
76 | """
77 | return code
78 |
79 |
80 | # LCEL-based ReAct code generator
81 |
82 | def create_react_lcel_code(config: Dict[str, Any]) -> str:
83 | code = """from typing import Dict, Any, List
84 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
85 | from langchain_core.output_parsers import StrOutputParser
86 | from langchain_core.runnables import RunnablePassthrough
87 | from langchain_openai import ChatOpenAI
88 | from langchain_core.tools import BaseTool
89 |
90 | """
91 |
92 | # Define tools
93 | code += "# Define tools\n"
94 | for tool in config.get("tools", []):
95 | params = ", ".join(tool.get("parameters", {}).keys()) if tool.get("parameters") else ""
96 | param_names = ", ".join(tool.get("parameters", {}).keys()) if tool.get("parameters") else ""
97 | class_name = f"{tool['name'].capitalize()}Tool"
98 | code += f"""class {class_name}(BaseTool):
99 | name = "{tool['name']}"
100 | description = "{tool['description']}"
101 |
102 | def _run(self{', ' if params else ''}{params}) -> str:
103 | try:
104 | # TODO: implement actual logic for the tool
105 | return f"Executed {{self.name}} with inputs: {{locals()}}"
106 | except Exception as e:
107 | return f"Error in {{self.name}}: {{str(e)}}"
108 |
109 | async def _arun(self{', ' if params else ''}{params}) -> str:
110 | return self._run({param_names})
111 |
112 | """
113 |
114 | # Collect tools
115 | code += "tools = [\n"
116 | for tool in config.get("tools", []):
117 | code += f" {tool['name'].capitalize()}Tool(),\n"
118 | code += "]\n\n"
119 |
120 | if config.get("agents"):
121 | agent = config["agents"][0]
122 | llm_model = agent.get("llm", "gpt-4.1-mini")
123 | code += f"""llm = ChatOpenAI(model="{llm_model}")
124 |
125 | react_prompt = ChatPromptTemplate.from_messages([
126 | ("system", "You are {agent['role']}. Your goal is {agent['goal']}. Use tools when needed."),
127 | MessagesPlaceholder("history"),
128 | ("human", "{{input}}")
129 | ])
130 |
131 | chain = (
132 | {{"input": RunnablePassthrough(), "history": RunnablePassthrough()}}
133 | | react_prompt
134 | | llm
135 | | StrOutputParser()
136 | )
137 |
138 | def run_agent(query: str, history: List[str] = []) -> str:
139 | response = chain.invoke({{"input": query, "history": history}})
140 | # If the config included examples with thoughts/actions/observations, print them for debugging
141 | try:
142 | print("\\n=== Example Traces (from config) ===")
143 | # placeholder: in generated file, the config may be embedded or passed in; this prints examples if present
144 | except Exception:
145 | pass
146 | return response
147 |
148 | if __name__ == "__main__":
149 | result = run_agent("Your query here")
150 | print(result)
151 | """
152 | return code
153 |
--------------------------------------------------------------------------------
/multi_agent_generator/frameworks/crewai_flow_generator.py:
--------------------------------------------------------------------------------
1 | # multi_agent_generator/frameworks/crewai_flow_generator.py
2 | """
3 | Generator for CrewAI Flow code.
4 | """
5 | from typing import Dict, Any
6 |
7 | def create_crewai_flow_code(config: Dict[str, Any]) -> str:
8 | """
9 | Generate CrewAI Flow code from a configuration.
10 |
11 | This function creates event-driven workflow code using the CrewAI Flow framework,
12 | with proper transitions between different workflow steps.
13 |
14 | Args:
15 | config: Dictionary containing agents, tasks, and workflow configuration
16 |
17 | Returns:
18 | Generated Python code as a string
19 | """
20 | # Start with the basic imports
21 | code = "from crewai import Agent, Task, Crew\n"
22 | code += "from crewai.flow.flow import Flow, listen, start\n"
23 | code += "from typing import Dict, List, Any\n"
24 | code += "from pydantic import BaseModel, Field\n\n"
25 |
26 | # Define state model for the flow
27 | code += "# Define flow state\n"
28 | code += "class AgentState(BaseModel):\n"
29 | code += " query: str = Field(default=\"\")\n"
30 | code += " results: Dict[str, Any] = Field(default_factory=dict)\n"
31 | code += " current_step: str = Field(default=\"\")\n\n"
32 |
33 | # Generate Agent configurations
34 | for agent in config["agents"]:
35 | code += f"# Agent: {agent['name']}\n"
36 | code += f"agent_{agent['name']} = Agent(\n"
37 | code += f" role='{agent['role']}',\n"
38 | code += f" goal='{agent['goal']}',\n"
39 | code += f" backstory='{agent['backstory']}',\n"
40 | code += f" verbose={agent['verbose']},\n"
41 | code += f" allow_delegation={agent['allow_delegation']},\n"
42 | code += f" tools={agent['tools']}\n"
43 | code += ")\n\n"
44 |
45 | # Generate Task configurations
46 | for task in config["tasks"]:
47 | code += f"# Task: {task['name']}\n"
48 | code += f"task_{task['name']} = Task(\n"
49 | code += f" description='{task['description']}',\n"
50 | code += f" agent=agent_{task['agent']},\n"
51 | code += f" expected_output='{task['expected_output']}'\n"
52 | code += ")\n\n"
53 |
54 | # Generate Crew configuration
55 | code += "# Crew Configuration\n"
56 | code += "crew = Crew(\n"
57 | code += " agents=[" + ", ".join(f"agent_{a['name']}" for a in config["agents"]) + "],\n"
58 | code += " tasks=[" + ", ".join(f"task_{t['name']}" for t in config["tasks"]) + "],\n"
59 | code += " verbose=True\n"
60 | code += ")\n\n"
61 |
62 | # Create Flow class
63 | code += "# Define CrewAI Flow\n"
64 | code += "class WorkflowFlow(Flow[AgentState]):\n"
65 |
66 | # Define initial step with @start decorator
67 | code += " @start()\n"
68 | code += " def initial_input(self):\n"
69 | code += " \"\"\"Process the initial user query.\"\"\"\n"
70 | code += " print(\"Starting workflow...\")\n"
71 |
72 | # Set the first task as the current step
73 | first_task = config["tasks"][0]["name"] if config["tasks"] else "completed"
74 | code += f" self.state.current_step = \"{first_task}\"\n"
75 | code += " return self.state\n\n"
76 |
77 | # Add task steps with @listen decorators
78 | tasks = config["tasks"]
79 | previous_step = "initial_input"
80 |
81 | for i, task in enumerate(tasks):
82 | task_name = task["name"].replace("-", "_")
83 | code += f" @listen('{previous_step}')\n"
84 | code += f" def execute_{task_name}(self, state):\n"
85 | code += f" \"\"\"Execute the {task['name']} task.\"\"\"\n"
86 | code += f" print(f\"Executing task: {task['name']}\")\n"
87 | code += " \n"
88 | code += f" # Run the specific task with the crew\n"
89 | code += f" result = crew.kickoff(\n"
90 | code += f" tasks=[task_{task['name']}],\n"
91 | code += f" inputs={{\n"
92 | code += f" \"query\": self.state.query,\n"
93 | code += f" \"previous_results\": self.state.results\n"
94 | code += f" }}\n"
95 | code += f" )\n"
96 | code += f" \n"
97 | code += f" # Store results in state\n"
98 | code += f" self.state.results[\"{task['name']}\"] = result\n"
99 |
100 | if i < len(tasks) - 1:
101 | next_task = tasks[i+1]["name"]
102 | code += f" self.state.current_step = \"{next_task}\"\n"
103 | else:
104 | code += f" self.state.current_step = \"completed\"\n"
105 |
106 | code += f" return self.state\n\n"
107 | previous_step = f"execute_{task_name}"
108 |
109 | # Add final aggregation step
110 | code += f" @listen('{previous_step}')\n"
111 | code += f" def aggregate_results(self, state):\n"
112 | code += f" \"\"\"Combine all results from tasks.\"\"\"\n"
113 | code += f" print(\"Workflow completed, aggregating results...\")\n"
114 | code += f" \n"
115 | code += f" # Combine all results\n"
116 | code += f" combined_result = \"\"\n"
117 | code += f" for task_name, result in state.results.items():\n"
118 | code += f" combined_result += f\"\\n\\n=== {task_name} ===\\n{{result}}\"\n"
119 | code += f" \n"
120 | code += f" return combined_result\n\n"
121 |
122 | # Add execution code
123 | code += "# Run the flow\n"
124 | code += "def run_workflow(query: str):\n"
125 | code += " flow = WorkflowFlow()\n"
126 | code += " flow.state.query = query\n"
127 | code += " result = flow.kickoff()\n"
128 | code += " return result\n\n"
129 |
130 | # Visualization function
131 | code += "# Generate a visualization of the flow\n"
132 | code += "def visualize_flow():\n"
133 | code += " flow = WorkflowFlow()\n"
134 | code += " flow.plot(\"workflow_flow\")\n"
135 | code += " print(\"Flow visualization saved to workflow_flow.html\")\n\n"
136 |
137 | code += "# Example usage\n"
138 | code += "if __name__ == \"__main__\":\n"
139 | code += " result = run_workflow(\"Your query here\")\n"
140 | code += " print(result)\n"
141 |
142 | return code
--------------------------------------------------------------------------------
/multi_agent_generator/generator.py:
--------------------------------------------------------------------------------
1 | # multi_agent_generator/generator.py
2 | """
3 | Agent configuration generator that analyzes user requirements.
4 | Unified across multiple LLM providers via LiteLLM.
5 | """
6 | import os
7 | import json
8 | import streamlit as st
9 | from typing import Dict, Any, Optional, List
10 | from .model_inference import ModelInference, Message
11 |
12 |
13 | class AgentGenerator:
14 | """
15 | Generates agent configurations based on natural language descriptions.
16 | Uses LiteLLM for provider-agnostic inference.
17 | """
18 |
19 | def __init__(self, provider: str = "openai"):
20 | """
21 | Initialize the generator with the specified provider.
22 |
23 | Args:
24 | provider: The LLM provider to use (openai, watsonx, ollama, etc.)
25 | """
26 | self.provider = provider.lower()
27 | self.model: Optional[ModelInference] = None
28 |
29 | def set_provider(self, provider: str):
30 | """
31 | Change the LLM provider.
32 |
33 | Args:
34 | provider: The LLM provider (openai, watsonx, ollama, etc.)
35 | """
36 | self.provider = provider.lower()
37 | self.model = None # reset for re-init
38 |
39 | def _initialize_model(self):
40 | """Initialize the LiteLLM ModelInference if not already done."""
41 | if self.model is not None:
42 | return
43 |
44 | default_models = {
45 | "openai": "gpt-4o-mini",
46 | "watsonx": "watsonx/meta-llama/llama-3-3-70b-instruct",
47 | "ollama": "ollama/llama3.2:3b",
48 | }
49 | model_name = default_models.get(self.provider, self.provider)
50 | model_name = os.getenv("DEFAULT_MODEL", model_name)
51 |
52 | common_kwargs = dict(
53 | model=model_name,
54 | max_tokens=1000,
55 | temperature=0.7,
56 | top_p=0.95,
57 | frequency_penalty=0,
58 | presence_penalty=0,
59 | )
60 |
61 | if self.provider == "watsonx":
62 | self.model = ModelInference(**common_kwargs, project_id=os.getenv("WATSONX_PROJECT_ID"))
63 | else:
64 | self.model = ModelInference(**common_kwargs)
65 |
66 | def analyze_prompt(self, user_prompt: str, framework: str) -> Dict[str, Any]:
67 | """
68 | Analyze a natural language prompt to generate agent configuration.
69 |
70 | Args:
71 | user_prompt: The natural language description
72 | framework: The agent framework to use
73 |
74 | Returns:
75 | A dictionary containing the agent configuration
76 | """
77 | self._initialize_model()
78 | system_prompt = self._get_system_prompt_for_framework(framework)
79 |
80 | try:
81 | messages: List[Message] = [
82 | Message(role="system", content=system_prompt),
83 | Message(role="user", content=user_prompt)
84 | ]
85 |
86 | response = self.model.generate_text(messages)
87 |
88 | # Extract JSON from response
89 | json_start = response.find('{')
90 | json_end = response.rfind('}') + 1
91 |
92 | if json_start >= 0 and json_end > json_start:
93 | json_str = response[json_start:json_end]
94 | return json.loads(json_str)
95 | else:
96 | if st is not None:
97 | st.warning("Could not extract valid JSON from model response. Using default configuration.")
98 | return self._get_default_config(framework)
99 |
100 | except Exception as e:
101 | if st is not None:
102 | st.error(f"Error in analyzing prompt: {e}")
103 | return self._get_default_config(framework)
104 |
105 |
106 | def _get_system_prompt_for_framework(self, framework: str) -> str:
107 | """
108 | Get the system prompt for the specified framework.
109 |
110 | Args:
111 | framework: The agent framework to use
112 |
113 | Returns:
114 | The system prompt for the framework
115 | """
116 | if framework == "crewai":
117 | return """
118 | You are an expert at creating AI research assistants using CrewAI. Based on the user's request,
119 | suggest appropriate agents, their roles, tools, and tasks.
120 |
121 | CRITICAL REQUIREMENTS:
122 | 1. Create specialized agents with distinct roles and expertise
123 | 2. ALWAYS assign the most appropriate agent to each task based on their role/expertise
124 | 3. Each task must have an "agent" field with the exact agent name
125 | 4. Match agent specialization to task requirements
126 |
127 | Process Types:
128 | - Sequential: Tasks executed one after another in order
129 | - Hierarchical: A manager agent coordinates and delegates tasks to specialized agents
130 |
131 | Format your response as JSON with this structure:
132 | {
133 | "process": "sequential" or "hierarchical",
134 | "agents": [
135 | {
136 | "name": "agent_name",
137 | "role": "specific specialized role",
138 | "goal": "clear specific goal",
139 | "backstory": "relevant professional backstory",
140 | "tools": ["relevant_tool1", "relevant_tool2"],
141 | "verbose": true,
142 | "allow_delegation": true/false
143 | }
144 | ],
145 | "tasks": [
146 | {
147 | "name": "task_name",
148 | "description": "detailed task description",
149 | "tools": ["required tools for this task"],
150 | "agent": "exact_agent_name_from_above",
151 | "expected_output": "specific expected output"
152 | }
153 | ]
154 | }
155 |
156 | AGENT-TASK ASSIGNMENT RULES:
157 | - Research tasks → Research Specialist/Analyst
158 | - Data collection → Data Specialist/Collector
159 | - Analysis tasks → Data Analyst/Statistician
160 | - Writing tasks → Content Writer/Technical Writer
161 | - Review tasks → Quality Reviewer/Editor
162 | - Coordination tasks → Project Manager/Coordinator
163 |
164 | ALWAYS ensure each task has the most suitable agent assigned based on the agent's role and expertise.
165 | Use exact agent names (matching the "name" field in agents array) in the "agent" field of tasks.
166 | """
167 | elif framework == "crewai-flow":
168 | return """
169 | You are an expert at creating AI research assistants using CrewAI Flow. Based on the user's request,
170 | suggest appropriate agents, their roles, tools, and tasks organized in a workflow.
171 |
172 | CRITICAL REQUIREMENTS:
173 | 1. Create specialized agents with distinct roles and expertise
174 | 2. ALWAYS assign the most appropriate agent to each task based on their role/expertise
175 | 3. Each task must have an "agent" field with the exact agent name
176 | 4. Match agent specialization to task requirements
177 |
178 | Process Types:
179 | - Sequential: Tasks flow through a predefined sequence with specific agent assignments
180 | - Hierarchical: A manager coordinates the flow and delegates to specialized agents
181 |
182 | Format your response as JSON with this structure:
183 | {
184 | "process": "sequential" or "hierarchical",
185 | "agents": [
186 | {
187 | "name": "agent_name",
188 | "role": "specific specialized role",
189 | "goal": "clear specific goal",
190 | "backstory": "relevant professional backstory",
191 | "tools": ["relevant_tool1", "relevant_tool2"],
192 | "verbose": true,
193 | "allow_delegation": true/false
194 | }
195 | ],
196 | "tasks": [
197 | {
198 | "name": "task_name",
199 | "description": "detailed task description",
200 | "tools": ["required tools for this task"],
201 | "agent": "exact_agent_name_from_above",
202 | "expected_output": "specific expected output"
203 | }
204 | ]
205 | }
206 |
207 | ALWAYS ensure proper agent-to-task matching based on expertise and specialization.
208 | """
209 | elif framework == "langgraph":
210 | return """
211 | You are an expert at creating AI agents using LangChain's LangGraph framework. Based on the user's request,
212 | suggest appropriate agents, their roles, tools, and nodes for the graph. Format your response as JSON with this structure:
213 | {
214 | "agents": [
215 | {
216 | "name": "agent name",
217 | "role": "specific role description",
218 | "goal": "clear goal",
219 | "tools": ["tool1", "tool2"],
220 | "llm": "model name (e.g., gpt-4.1-mini)"
221 | }
222 | ],
223 | "nodes": [
224 | {
225 | "name": "node name",
226 | "description": "detailed description",
227 | "agent": "agent name"
228 | }
229 | ],
230 | "edges": [
231 | {
232 | "source": "source node name",
233 | "target": "target node name",
234 | "condition": "condition description (optional)"
235 | }
236 | ]
237 | }
238 | """
239 | elif framework == "react":
240 | return """
241 | You are an expert at creating AI agents using the ReAct (Reasoning + Acting) framework.
242 | Based on the user's request, design an agent with reasoning steps and tool usage.
243 |
244 | Format your response strictly as JSON with this structure:
245 | {
246 | "agents": [
247 | {
248 | "name": "agent name",
249 | "role": "specific role description",
250 | "goal": "clear goal",
251 | "tools": ["tool1", "tool2"],
252 | "llm": "model name (e.g., gpt-4.1-mini)"
253 | }
254 | ],
255 | "tools": [
256 | {
257 | "name": "tool name",
258 | "description": "detailed description of what the tool does",
259 | "parameters": {
260 | "param1": "parameter description",
261 | "param2": "parameter description"
262 | }
263 | }
264 | ],
265 | "examples": [
266 | {
267 | "query": "example user query",
268 | "thought": "single-step thought",
269 | "action": "example action",
270 | "observation": "example observation",
271 | "final_answer": "example final answer"
272 | }
273 | ]
274 | }
275 | """
276 | elif framework == "react-lcel":
277 | return """
278 | You are an expert at creating AI agents using the ReAct (Reasoning + Acting) framework,
279 | implemented with LangChain Expression Language (LCEL).
280 | The agent should demonstrate **multi-step reasoning** with clear intermediate steps.
281 |
282 | Format your response strictly as JSON with this structure:
283 | {
284 | "agents": [
285 | {
286 | "name": "agent name",
287 | "role": "specific role description",
288 | "goal": "clear goal",
289 | "tools": ["tool1", "tool2"],
290 | "llm": "model name (e.g., gpt-4.1-mini)"
291 | }
292 | ],
293 | "tools": [
294 | {
295 | "name": "tool name",
296 | "description": "detailed description of what the tool does",
297 | "parameters": {
298 | "param1": "parameter description",
299 | "param2": "parameter description"
300 | },
301 | "examples": [
302 | {"input": "example input", "output": "expected output"}
303 | ]
304 | }
305 | ],
306 | "examples": [
307 | {
308 | "query": "example user query",
309 | "thoughts": [
310 | "step 1 thought",
311 | "step 2 thought"
312 | ],
313 | "actions": [
314 | {"tool": "tool name", "input": "tool input"}
315 | ],
316 | "observations": [
317 | "result from tool call"
318 | ],
319 | "final_answer": "example final answer"
320 | }
321 | ]
322 | }
323 | """
324 | elif framework == "agno":
325 | return """
326 | You are an expert at creating AI agents using the Agno framework. Based on the user's request,
327 | suggest appropriate agents, their roles, tools, and tasks.
328 |
329 | CRITICAL REQUIREMENTS:
330 | 1. Create specialized agents with distinct roles and expertise
331 | 2. ALWAYS assign the most appropriate agent to each task based on their role/expertise
332 | 3. Each task must have an "agent" field with the exact agent name
333 | 4. Match agent specialization to task requirements
334 |
335 | Process Types:
336 | - Sequential: Tasks executed one after another in order
337 |
338 | Format your response as JSON with this structure:
339 | {
340 | "model_id": "model name (e.g., gpt-4o)",
341 | "process": "sequential",
342 | "agents": [
343 | {
344 | "name": "agent_name",
345 | "role": "specific specialized role",
346 | "goal": "clear specific goal",
347 | "backstory": "relevant professional backstory",
348 | "tools": ["relevant_tool1", "relevant_tool2"],
349 | "verbose": true,
350 | "allow_delegation": true/false
351 | }
352 | ],
353 | "tasks": [
354 | {
355 | "name": "task_name",
356 | "description": "detailed task description",
357 | "tools": ["required tools for this task"],
358 | "agent": "exact_agent_name_from_above",
359 | "expected_output": "specific expected output"
360 | }
361 | ]
362 | }
363 | """
364 | else:
365 | return """
366 | You are an expert at creating AI research assistants. Based on the user's request,
367 | suggest appropriate agents, their roles, tools, and tasks.
368 | """
369 |
370 | def _get_default_config(self, framework: str) -> Dict[str, Any]:
371 | """
372 | Get a default configuration for the specified framework.
373 |
374 | Args:
375 | framework: The agent framework to use
376 |
377 | Returns:
378 | A default configuration dictionary
379 | """
380 | if framework == "crewai":
381 | return {
382 | "process": "sequential", # Default to sequential
383 | "agents": [
384 | {
385 | "name": "research_specialist",
386 | "role": "Research Specialist",
387 | "goal": "Conduct thorough research and gather information",
388 | "backstory": "Expert researcher with years of experience in data gathering and analysis",
389 | "tools": ["search_tool", "web_scraper"],
390 | "verbose": True,
391 | "allow_delegation": False
392 | },
393 | {
394 | "name": "content_writer",
395 | "role": "Content Writer",
396 | "goal": "Create clear and comprehensive written content",
397 | "backstory": "Professional writer skilled in creating engaging and informative content",
398 | "tools": ["writing_tool", "grammar_checker"],
399 | "verbose": True,
400 | "allow_delegation": False
401 | }
402 | ],
403 | "tasks": [
404 | {
405 | "name": "research_task",
406 | "description": "Gather information and conduct research on the given topic",
407 | "tools": ["search_tool"],
408 | "agent": "research_specialist",
409 | "expected_output": "Comprehensive research findings and data"
410 | },
411 | {
412 | "name": "writing_task",
413 | "description": "Create written content based on research findings",
414 | "tools": ["writing_tool"],
415 | "agent": "content_writer",
416 | "expected_output": "Well-written content document"
417 | }
418 | ]
419 | }
420 | elif framework == "crewai-flow":
421 | return {
422 | "process": "sequential", # Default to sequential
423 | "agents": [
424 | {
425 | "name": "research_specialist",
426 | "role": "Research Specialist",
427 | "goal": "Conduct thorough research and gather information",
428 | "backstory": "Expert researcher with years of experience in data gathering and analysis",
429 | "tools": ["search_tool", "web_scraper"],
430 | "verbose": True,
431 | "allow_delegation": False
432 | },
433 | {
434 | "name": "content_writer",
435 | "role": "Content Writer",
436 | "goal": "Create clear and comprehensive written content",
437 | "backstory": "Professional writer skilled in creating engaging and informative content",
438 | "tools": ["writing_tool", "grammar_checker"],
439 | "verbose": True,
440 | "allow_delegation": False
441 | }
442 | ],
443 | "tasks": [
444 | {
445 | "name": "research_task",
446 | "description": "Gather information and conduct research on the given topic",
447 | "tools": ["search_tool"],
448 | "agent": "research_specialist",
449 | "expected_output": "Comprehensive research findings and data"
450 | },
451 | {
452 | "name": "writing_task",
453 | "description": "Create written content based on research findings",
454 | "tools": ["writing_tool"],
455 | "agent": "content_writer",
456 | "expected_output": "Well-written content document"
457 | }
458 | ]
459 | }
460 |
461 | elif framework == "langgraph":
462 | return {
463 | "agents": [{
464 | "name": "default_assistant",
465 | "role": "General Assistant",
466 | "goal": "Help with basic tasks",
467 | "tools": ["basic_tool"],
468 | "llm": "gpt-4.1-mini"
469 | }],
470 | "nodes": [{
471 | "name": "process_input",
472 | "description": "Process user input",
473 | "agent": "default_assistant"
474 | }],
475 | "edges": [{
476 | "source": "process_input",
477 | "target": "END",
478 | "condition": "task completed"
479 | }]
480 | }
481 | elif framework == "react":
482 | return {
483 | "agents": [{
484 | "name": "default_assistant",
485 | "role": "General Assistant",
486 | "goal": "Help with basic tasks",
487 | "tools": ["basic_tool"],
488 | "llm": "gpt-4.1-mini"
489 | }],
490 | "tools": [{
491 | "name": "basic_tool",
492 | "description": "A basic utility tool",
493 | "parameters": {"input": "User input to process"}
494 | }],
495 | "examples": [...]
496 | }
497 | elif framework == "react-lcel":
498 | return {
499 | "agents": [{
500 | "name": "default_assistant",
501 | "role": "General A"
502 | "ssistant",
503 | "goal": "Help with multi-step tasks",
504 | "tools": ["basic_tool"],
505 | "llm": "llm"
506 | }],
507 | "tools": [{
508 | "name": "basic_tool",
509 | "description": "A basic utility tool",
510 | "parameters": {"input": "User input to process"},
511 | "examples": [{"input": "search cats", "output": "cat info"}]
512 | }],
513 | "examples": [{
514 | "query": "Find trending AI research papers",
515 | "thoughts": [
516 | "I should search for trending AI papers",
517 | "I should summarize the findings"
518 | ],
519 | "actions": [
520 | {"tool": "basic_tool", "input": "trending AI papers"}
521 | ],
522 | "observations": [
523 | "Found 3 relevant papers"
524 | ],
525 | "final_answer": "Here are the latest AI papers..."
526 | }]
527 | }
528 | elif framework == "agno":
529 | return {
530 | "model_id": "gpt-4o",
531 | "process": "sequential", # Default to sequential
532 | "agents": [
533 | {
534 | "name": "research_specialist",
535 | "role": "Research Specialist",
536 | "goal": "Conduct thorough research and gather information",
537 | "backstory": "Expert researcher with years of experience in data gathering and analysis",
538 | "tools": ["search_tool", "web_scraper"],
539 | "verbose": True,
540 | "allow_delegation": False
541 | },
542 | {
543 | "name": "content_writer",
544 | "role": "Content Writer",
545 | "goal": "Create clear and comprehensive written content",
546 | "backstory": "Professional writer skilled in creating engaging and informative content",
547 | "tools": ["writing_tool", "grammar_checker"],
548 | "verbose": True,
549 | "allow_delegation": False
550 | }
551 | ],
552 | "tasks": [
553 | {
554 | "name": "research_task",
555 | "description": "Gather information and conduct research on the given topic",
556 | "tools": ["search_tool"],
557 | "agent": "research_specialist",
558 | "expected_output": "Comprehensive research findings and data"
559 | },
560 | {
561 | "name": "writing_task",
562 | "description": "Create written content based on research findings",
563 | "tools": ["writing_tool"],
564 | "agent": "content_writer",
565 | "expected_output": "Well-written content document"
566 | }
567 | ]
568 | }
569 |
570 | else:
571 | return {}
572 |
--------------------------------------------------------------------------------
/streamlit_app.py:
--------------------------------------------------------------------------------
1 | """
2 | Streamlit UI for Multi-Agent Generator.
3 | """
4 | import os
5 | import time
6 | import streamlit as st
7 | import json
8 | from dotenv import load_dotenv
9 |
10 | from multi_agent_generator.generator import AgentGenerator
11 | from multi_agent_generator.frameworks.crewai_generator import create_crewai_code
12 | from multi_agent_generator.frameworks.langgraph_generator import create_langgraph_code
13 | from multi_agent_generator.frameworks.react_generator import create_react_code
14 | from multi_agent_generator.frameworks.crewai_flow_generator import create_crewai_flow_code
15 | from multi_agent_generator.frameworks.agno_generator import create_agno_code
16 |
17 | # Load environment variables
18 | load_dotenv()
19 |
20 | def create_code_block(config, framework):
21 | """Generate code for the selected framework."""
22 | if framework == "crewai":
23 | return create_crewai_code(config)
24 | elif framework == "crewai-flow":
25 | return create_crewai_flow_code(config)
26 | elif framework == "langgraph":
27 | return create_langgraph_code(config)
28 | elif framework == "react":
29 | return create_react_code(config)
30 | elif framework == "agno":
31 | return create_agno_code(config)
32 | else:
33 | return "# Invalid framework"
34 |
35 | def main():
36 | """Main entry point for the Streamlit app."""
37 | st.set_page_config(page_title="Multi-Framework Agent Generator", page_icon="🚀", layout="wide")
38 |
39 | st.title("Multi-Framework Agent Generator")
40 | st.write("Generate agent code for different frameworks based on your requirements!")
41 |
42 | # Initialize session state for model provider
43 | if 'model_provider' not in st.session_state:
44 | st.session_state.model_provider = 'openai'
45 |
46 | # Initialize keys in session state if not present
47 | for key in ['openai_api_key', 'watsonx_api_key', 'watsonx_project_id']:
48 | if key not in st.session_state:
49 | st.session_state[key] = ''
50 |
51 | # Sidebar for LLM provider selection and API keys
52 | st.sidebar.title("🤖 LLM Provider Settings")
53 | model_provider = st.sidebar.radio(
54 | "Choose LLM Provider:",
55 | ["OpenAI", "WatsonX"],
56 | index=0 if st.session_state.model_provider == 'openai' else 1,
57 | key="provider_radio"
58 | )
59 |
60 | st.session_state.model_provider = model_provider.lower()
61 |
62 | # Display provider badge
63 | if model_provider == "OpenAI":
64 | st.sidebar.markdown("")
65 | else:
66 | st.sidebar.markdown("")
67 |
68 | # API Key management in sidebar
69 | with st.sidebar.expander("🔑 API Credentials", expanded=False):
70 | if model_provider == "OpenAI":
71 | # Check for environment variable first
72 | openai_key_env = os.getenv("OPENAI_API_KEY", "")
73 | if openai_key_env:
74 | st.success("OpenAI API Key found in environment variables.")
75 | st.session_state.openai_api_key = openai_key_env
76 | else:
77 | # Then check session state
78 | if st.session_state.openai_api_key:
79 | st.success("OpenAI API Key set for this session.")
80 | else:
81 | # Otherwise prompt for key
82 | api_key = st.text_input(
83 | "Enter OpenAI API Key:",
84 | value=st.session_state.openai_api_key,
85 | type="password",
86 | key="openai_key_input"
87 | )
88 | if api_key:
89 | st.session_state.openai_api_key = api_key
90 | st.success("API Key saved for this session.")
91 |
92 | else: # WatsonX
93 | # Check for environment variables first
94 | watsonx_key_env = os.getenv("WATSONX_API_KEY", "")
95 | watsonx_project_env = os.getenv("WATSONX_PROJECT_ID", "")
96 |
97 | if watsonx_key_env and watsonx_project_env:
98 | st.success("WatsonX credentials found in environment variables.")
99 | st.session_state.watsonx_api_key = watsonx_key_env
100 | st.session_state.watsonx_project_id = watsonx_project_env
101 | else:
102 | # Otherwise check session state or prompt
103 | col1, col2 = st.columns(2)
104 | with col1:
105 | api_key = st.text_input(
106 | "WatsonX API Key:",
107 | value=st.session_state.watsonx_api_key,
108 | type="password",
109 | key="watsonx_key_input"
110 | )
111 | if api_key:
112 | st.session_state.watsonx_api_key = api_key
113 |
114 | with col2:
115 | project_id = st.text_input(
116 | "WatsonX Project ID:",
117 | value=st.session_state.watsonx_project_id,
118 | key="watsonx_project_input"
119 | )
120 | if project_id:
121 | st.session_state.watsonx_project_id = project_id
122 |
123 | if st.session_state.watsonx_api_key and st.session_state.watsonx_project_id:
124 | st.success("WatsonX credentials saved for this session.")
125 |
126 | # Show model information
127 | with st.sidebar.expander("ℹ️ Model Information", expanded=False):
128 | if model_provider == "OpenAI":
129 | st.write("**Model**: GPT-4.1-mini")
130 | st.write("OpenAI's models provide advanced capabilities for natural language understanding and code generation.")
131 | else:
132 | st.write("**Model**: Llama-3-70B-Instruct (via WatsonX)")
133 | st.write("IBM WatsonX provides enterprise-grade access to Llama and other foundation models with IBM's security and governance features.")
134 |
135 | # Framework selection
136 | st.sidebar.title("🔄 Framework Selection")
137 | framework = st.sidebar.radio(
138 | "Choose a framework:",
139 | ["crewai", "crewai-flow", "langgraph", "react", "agno"],
140 | format_func=lambda x: {
141 | "crewai": "CrewAI",
142 | "crewai-flow": "CrewAI Flow",
143 | "langgraph": "LangGraph",
144 | "react": "ReAct Framework",
145 | "agno": "Agno Framework"
146 | }[x],
147 | key="framework_radio"
148 | )
149 |
150 | framework_descriptions = {
151 | "crewai": """
152 | **CrewAI** is a framework for orchestrating role-playing autonomous AI agents.
153 | It allows you to create a crew of agents that work together to accomplish tasks,
154 | with each agent having a specific role, goal, and backstory.
155 | """,
156 | "crewai-flow": """
157 | **CrewAI Flow** extends CrewAI with event-driven workflows.
158 | It enables you to define multi-step processes with clear transitions between steps,
159 | maintaining state throughout the execution, and allowing for complex orchestration
160 | patterns like sequential, parallel, and conditional execution.
161 | """,
162 | "langgraph": """
163 | **LangGraph** is LangChain's framework for building stateful, multi-actor applications with LLMs.
164 | It provides a way to create directed graphs where nodes are LLM calls, tools, or other operations,
165 | and edges represent the flow of information between them.
166 | """,
167 | "react": """
168 | **ReAct** (Reasoning + Acting) is a framework that combines reasoning and action in LLM agents.
169 | It prompts the model to generate both reasoning traces and task-specific actions in an interleaved manner,
170 | creating a synergy between the two that leads to improved performance.
171 | """,
172 | "agno": """
173 | **Agno** is a framework for building and managing agent-based applications.
174 | It provides a way to define agents, their goals, and the tasks they need to accomplish,
175 | along with tools for coordinating their actions and sharing information.
176 | """
177 | }
178 |
179 | st.sidebar.markdown(framework_descriptions[framework])
180 |
181 | # Sidebar for examples
182 | st.sidebar.title("📚 Example Prompts")
183 | example_prompts = {
184 | "Research Assistant": "I need a research assistant that summarizes papers and answers questions",
185 | "Content Creation": "I need a team to create viral social media content and manage our brand presence",
186 | "Data Analysis": "I need a team to analyze customer data and create visualizations",
187 | "Technical Writing": "I need a team to create technical documentation and API guides"
188 | }
189 |
190 | selected_example = st.sidebar.selectbox("Choose an example:", list(example_prompts.keys()), key="example_select")
191 |
192 | # Main input area
193 | col1, col2 = st.columns([2, 1])
194 |
195 | with col1:
196 | st.subheader("🎯 Define Your Requirements")
197 | user_prompt = st.text_area(
198 | "Describe what you need:",
199 | value=example_prompts[selected_example],
200 | height=100,
201 | key="user_prompt"
202 | )
203 |
204 | # Add workflow steps input for CrewAI Flow
205 | if framework == "crewai-flow":
206 | st.subheader("🔄 Define Workflow Steps")
207 | workflow_steps = st.text_area(
208 | "List the steps in your workflow (one per line):",
209 | value="1. Data collection\n2. Analysis\n3. Report generation",
210 | height=100,
211 | key="workflow_steps"
212 | )
213 |
214 | # Generate button with LLM provider name
215 | if st.button(f"🚀 Generate using {model_provider} & {framework.upper()}", key="generate_button"):
216 | # Validation checks
217 | api_key_missing = False
218 | if model_provider == "OpenAI" and not st.session_state.openai_api_key:
219 | st.error("Please set your OpenAI API Key in the sidebar")
220 | api_key_missing = True
221 | elif model_provider == "WatsonX" and (not st.session_state.watsonx_api_key or not st.session_state.watsonx_project_id):
222 | st.error("Please set your WatsonX API Key and Project ID in the sidebar")
223 | api_key_missing = True
224 |
225 | if not api_key_missing:
226 | with st.spinner(f"Generating your {framework} code using {model_provider}..."):
227 |
228 | if model_provider == "OpenAI" and st.session_state.openai_api_key:
229 | os.environ["OPENAI_API_KEY"] = st.session_state.openai_api_key
230 | elif model_provider == "WatsonX":
231 | if st.session_state.watsonx_api_key:
232 | os.environ["WATSONX_API_KEY"] = st.session_state.watsonx_api_key
233 | if st.session_state.watsonx_project_id:
234 | os.environ["WATSONX_PROJECT_ID"] = st.session_state.watsonx_project_id
235 |
236 | # Initialize generator with selected provider
237 | generator = AgentGenerator(provider=model_provider.lower())
238 |
239 | # Handle CrewAI Flow differently
240 | if framework == "crewai-flow":
241 | # Extract workflow steps
242 | steps = [step.strip() for step in workflow_steps.split("\n") if step.strip()]
243 | steps = [step[2:].strip() if step[0].isdigit() and step[1] == "." else step for step in steps]
244 |
245 | # Append workflow information to the prompt
246 | flow_prompt = f"{user_prompt}\n\nWorkflow steps:\n"
247 | for i, step in enumerate(steps):
248 | flow_prompt += f"{i+1}. {step}\n"
249 |
250 | # Use the CrewAI analyzer but modify for flow
251 | config = generator.analyze_prompt(flow_prompt, "crewai")
252 |
253 | # Modify config to ensure tasks align with workflow steps
254 | if len(config["tasks"]) < len(steps):
255 | # Add missing tasks
256 | for i in range(len(config["tasks"]), len(steps)):
257 | config["tasks"].append({
258 | "name": f"step_{i+1}",
259 | "description": f"Execute step: {steps[i]}",
260 | "tools": config["tasks"][0]["tools"] if config["tasks"] else ["basic_tool"],
261 | "agent": config["agents"][0]["name"] if config["agents"] else "default_assistant",
262 | "expected_output": f"Results from {steps[i]}"
263 | })
264 | elif len(config["tasks"]) > len(steps):
265 | # Trim extra tasks
266 | config["tasks"] = config["tasks"][:len(steps)]
267 |
268 | # Update task names and descriptions to match steps
269 | for i, step in enumerate(steps):
270 | config["tasks"][i]["name"] = f"{step.lower().replace(' ', '_')}"
271 | config["tasks"][i]["description"] = f"Execute the '{step}' step"
272 |
273 | st.session_state.config = config
274 | st.session_state.code = create_crewai_flow_code(config) # Function for Flow
275 | else:
276 | config = generator.analyze_prompt(user_prompt, framework)
277 | st.session_state.config = config
278 | st.session_state.code = create_code_block(config, framework)
279 |
280 | st.session_state.framework = framework
281 |
282 | time.sleep(0.5) # Small delay for better UX
283 | st.success(f"✨ {framework.upper()} code generated successfully with {model_provider}!")
284 |
285 | # Add info about the model used
286 | if model_provider == "OpenAI":
287 | st.info("Generated using GPT-4.1-mini")
288 | else:
289 | st.info("Generated using Llama-3-70B-Instruct via WatsonX")
290 |
291 | with col2:
292 | st.subheader("💡 Framework Tips")
293 | if framework == "crewai":
294 | st.info("""
295 | **CrewAI Tips:**
296 | - Define clear roles for each agent
297 | - Set specific goals for better performance
298 | - Consider how agents should collaborate
299 | - Specify task delegation permissions
300 | """)
301 | elif framework == "crewai-flow":
302 | st.info("""
303 | **CrewAI Flow Tips:**
304 | - Define a clear sequence of workflow steps
305 | - Use the @start decorator for the entry point
306 | - Use @listen decorators to define step transitions
307 | - Maintain state between workflow steps
308 | - Consider how to aggregate results at the end
309 | """)
310 | elif framework == "langgraph":
311 | st.info("""
312 | **LangGraph Tips:**
313 | - Design your graph flow carefully
314 | - Define clear node responsibilities
315 | - Consider conditional routing between nodes
316 | - Think about how state is passed between nodes
317 | """)
318 | elif framework == "agno":
319 | st.info("""
320 | **Agno Tips:**
321 | - Define clear roles and goals for each agent
322 | - Assign tasks to appropriate agents
323 | - Utilize tools effectively for task completion
324 | - Coordinate agent interactions through the Team
325 | """)
326 | else: # react
327 | st.info("""
328 | **ReAct Tips:**
329 | - Focus on the reasoning steps
330 | - Define tools with clear descriptions
331 | - Provide examples of thought processes
332 | - Consider the observation/action cycle
333 | """)
334 |
335 | # Add provider comparison
336 | st.subheader("🔄 LLM Provider Comparison")
337 | comparison_md = """
338 | | Feature | OpenAI | WatsonX |
339 | | ------- | ------ | ------- |
340 | | Models | GPT-4o, GPT-3.5, etc. | Llama-3, Granite, etc. |
341 | | Strengths | State-of-the-art performance | Enterprise security & governance |
342 | | Best for | Consumer apps, research | Enterprise deployments |
343 | | Pricing | Token-based | Enterprise contracts |
344 | """
345 | st.markdown(comparison_md)
346 |
347 | # Display results
348 | if 'config' in st.session_state:
349 | st.subheader("🔍 Generated Configuration")
350 |
351 | # Tabs for different views
352 | tab1, tab2, tab3 = st.tabs(["📊 Visual Overview", "💻 Code", "🔄 JSON Config"])
353 |
354 | with tab1:
355 | current_framework = st.session_state.framework
356 |
357 | if current_framework in ["crewai", "crewai-flow"]:
358 | # Display Agents
359 | st.subheader("Agents")
360 | for agent in st.session_state.config["agents"]:
361 | with st.expander(f"🤖 {agent['role']}", expanded=True):
362 | st.write(f"**Goal:** {agent['goal']}")
363 | st.write(f"**Backstory:** {agent['backstory']}")
364 | st.write(f"**Tools:** {', '.join(agent['tools'])}")
365 |
366 | # Display Tasks
367 | st.subheader("Tasks")
368 | for task in st.session_state.config["tasks"]:
369 | with st.expander(f"📋 {task['name']}", expanded=True):
370 | st.write(f"**Description:** {task['description']}")
371 | st.write(f"**Expected Output:** {task['expected_output']}")
372 | st.write(f"**Assigned to:** {task['agent']}")
373 |
374 | # Show Flow Diagram for CrewAI Flow
375 | if current_framework == "crewai-flow":
376 | st.subheader("Flow Diagram")
377 | task_names = [task["name"] for task in st.session_state.config["tasks"]]
378 |
379 | # Create a simple graph visualization
380 | st.write("Event Flow:")
381 | flow_html = f"""
382 |