├── .env_sample ├── .github └── workflows │ ├── docker-image.yml │ └── retype-action.yml ├── .gitignore ├── Ai_Model_Training └── OpenAI │ ├── BHealthy │ ├── BHealth_tool_calling_data.jsonl │ └── BHealthy_data.jsonl │ ├── Sample_Prompts │ └── train_openai_model.py ├── ConversationCache ├── CacheManager.py ├── __init__.py ├── decorators │ ├── __init__.py │ └── singleton.py └── environment │ ├── Configuration.py │ └── __init__.py ├── Dockerfile ├── LICENSE ├── README.md ├── ai_helpers.py ├── app.py ├── appUtils.py ├── audio_helpers.py ├── cache └── .gitkeep ├── config.py ├── docker-compose-deploy.yaml ├── docker-compose-redis_only.yaml ├── docker-compose.yml ├── docs ├── CODE_OF_CONDUCT.md ├── Cloud-Hosting │ ├── aws.md │ ├── cloudserver.md │ ├── index.md │ └── kamatera.md ├── How-To-Guide │ ├── addcustomtoolsviaui.md │ ├── customtools.md │ ├── index.md │ ├── opensource.md │ ├── troubleshooting.md │ └── twilio.md ├── Self-Hosting │ ├── configuration.md │ ├── deployment.md │ ├── index.md │ ├── installation.md │ ├── prerequisites.md │ ├── runasservice.md │ └── setup.md ├── contribution.md ├── images │ ├── Add_Tool.png │ ├── Knotie-ai.png │ ├── admin_save.png │ ├── admin_settings.png │ ├── custom_tools.png │ ├── image.png │ └── submit_tool.png ├── improvements.md └── testing.md ├── generated_tools └── __init__.py ├── openapi_specs └── __init__.py ├── prompts.py ├── requirements.txt ├── retype.json ├── retype.yml ├── stages.py ├── templates ├── admin.html └── index.html ├── testAISalesAgent.py ├── testConversationCache.py ├── tools.py └── tools_helper.py /.env_sample: -------------------------------------------------------------------------------- 1 | # Flask Configuration 2 | SECRET_KEY=Secret12345787 3 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Build and Push Docker image 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout repository 14 | uses: actions/checkout@v2 15 | 16 | - name: Set up Docker Buildx 17 | uses: docker/setup-buildx-action@v1 18 | 19 | - name: Log in to Docker Hub 20 | uses: docker/login-action@v1 21 | with: 22 | username: ${{ secrets.DOCKER_USERNAME }} 23 | password: ${{ secrets.DOCKER_PASSWORD }} 24 | 25 | - name: Build and push Docker image 26 | uses: docker/build-push-action@v2 27 | with: 28 | context: . 29 | push: true 30 | tags: avijeett007/knotie-ai-web:release-1.0 31 | -------------------------------------------------------------------------------- /.github/workflows/retype-action.yml: -------------------------------------------------------------------------------- 1 | name: Publish Retype powered website to CloudFare Websites 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - develop 7 | 8 | jobs: 9 | publish: 10 | name: Publish to retype branch 11 | 12 | runs-on: ubuntu-latest 13 | 14 | permissions: 15 | contents: write 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - uses: retypeapp/action-build@latest 21 | 22 | - uses: retypeapp/action-github-pages@latest 23 | with: 24 | update-branch: true -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | __pycache__/* 4 | __pycache__/*.pyc 5 | *.pyc 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # celery beat schedule file 88 | celerybeat-schedule 89 | celerybeat.pid 90 | 91 | # SageMath parsed files 92 | *.sage.py 93 | 94 | # Environments 95 | .env 96 | .venv 97 | env/ 98 | venv/ 99 | ENV/ 100 | env.bak/ 101 | venv.bak/ 102 | 103 | # Spyder project settings 104 | .spyderproject 105 | .spyproject 106 | 107 | # Rope project settings 108 | .ropeproject 109 | 110 | # mkdocs documentation 111 | /site 112 | 113 | # mypy 114 | .mypy_cache/ 115 | 116 | # Pyre type checker 117 | .pyre/ 118 | 119 | # pyright 120 | .pyright/ 121 | 122 | # end of file 123 | *.db -------------------------------------------------------------------------------- /Ai_Model_Training/OpenAI/BHealthy/BHealth_tool_calling_data.jsonl: -------------------------------------------------------------------------------- 1 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Would you like to know the price of our Silver Gym Membership?"}, {"role": "user", "content": "Yes, Sure."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 3,\"tool_required\": \"yes\",\"tool_name\": \"PriceInquiry\", \"operation_id\": \"fetchMembershipPrice\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {}, \"tool_body_parameters\": {\"membership\": \"Silver-Gym-Membership\"}}"}]} 2 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Would you be happy to book a Free On-Site Appointment in our gym?"}, {"role": "user", "content": "Yes, Sure."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 7,\"tool_required\": \"yes\",\"tool_name\": \"OnsiteAppointment\", \"operation_id\": \"bookAppointment\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {\"url\": \"book-appointment\"}, \"tool_body_parameters\": {\"date\": \"next Monday\", \"time\": \"10:00 AM\"}}"}]} 3 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Can you please help me understand what is your gym requirement?"}, {"role": "user", "content": "Yes, I'm having some back pain that's why considering the gym."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 3,\"tool_required\": \"no\"}"}]} 4 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Do you have any yoga classes available?"}, {"role": "user", "content": "Yes, I'd like to know the schedule."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 2,\"tool_required\": \"yes\",\"tool_name\": \"GetClassSchedule\", \"operation_id\": \"fetchClassSchedule\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {\"class_name\": \"yoga\"}, \"tool_body_parameters\": {}}"}]} 5 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Would you like to schedule a trial session?"}, {"role": "user", "content": "Yes, can I book it for tomorrow?"}, {"role": "assistant", "content": "{\"conversation_stage_id\": 6,\"tool_required\": \"yes\",\"tool_name\": \"ScheduleTrialSession\", \"operation_id\": \"bookTrialSession\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {\"url\": \"schedule-trial\"}, \"tool_body_parameters\": {\"date\": \"tomorrow\", \"time\": \"10:00 AM\"}}"}]} 6 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Can you tell me more about your fitness goals?"}, {"role": "user", "content": "I want to lose weight and build muscle."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 4,\"tool_required\": \"no\"}"}]} 7 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Would you like to upgrade to our Gold membership?"}, {"role": "user", "content": "What are the benefits of the Gold membership?"}, {"role": "assistant", "content": "{\"conversation_stage_id\": 2,\"tool_required\": \"no\"}"}]} 8 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Can I book a session for personal training?"}, {"role": "user", "content": "Yes, I am available next Monday at 10 AM."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 7,\"tool_required\": \"yes\",\"tool_name\": \"PersonalTraining\", \"operation_id\": \"bookTrainerSession\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {\"trainer_id\": \"johndoe\"}, \"tool_body_parameters\": {\"date\": \"next Monday\", \"time\": \"10:00 AM\"}}"}]} 9 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "What time are you available for an appointment?"}, {"role": "user", "content": "I am free tomorrow afternoon."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 6,\"tool_required\": \"yes\",\"tool_name\": \"GymAppointmentAvailability\", \"operation_id\": \"checkAvailability\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {\"date\": \"tomorrow\", \"time\": \"afternoon\"}, \"tool_body_parameters\": {}}"}]} 10 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Are you interested in our fitness classes?"}, {"role": "user", "content": "Yes, I'd like to join the spin class."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 4,\"tool_required\": \"yes\",\"tool_name\": \"GetClassSchedule\", \"operation_id\": \"fetchClassSchedule\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {\"class_name\": \"spin\"}, \"tool_body_parameters\": {}}"}]} 11 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Would you be interested in our new Zumba classes?"}, {"role": "user", "content": "Yes, when do they start?"}, {"role": "assistant", "content": "{\"conversation_stage_id\": 2,\"tool_required\": \"no\"}"}]} 12 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Would you like to schedule a tour of our facilities?"}, {"role": "user", "content": "Yes, I can visit tomorrow."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 7,\"tool_required\": \"yes\",\"tool_name\": \"FacilityTourBooking\", \"operation_id\": \"bookTour\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {\"url\": \"book-gym-tour\"}, \"tool_body_parameters\": {\"date\": \"tomorrow\", \"time\": \"anytime\"}}"}]} 13 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Would you like to know more about our personalized diet plans?"}, {"role": "user", "content": "Yes, please tell me more."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 2,\"tool_required\": \"no\"}"}]} 14 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Are you interested in booking a session with our personal trainer?"}, {"role": "user", "content": "Yes, I am."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 4,\"tool_required\": \"yes\",\"tool_name\": \"PersonalTraining\", \"operation_id\": \"scheduleSession\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {}, \"tool_body_parameters\": {\"trainer\": \"Jane Doe\", \"session_time\": \"2024-08-20T10:00:00\"}}"}]} 15 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Do you want to check the availability of our next appointment?"}, {"role": "user", "content": "Yes, please."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 6,\"tool_required\": \"yes\",\"tool_name\": \"GymAppointmentAvailability\", \"operation_id\": \"checkAvailability\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {}, \"tool_body_parameters\": {}}"}]} 16 | {"messages": [{"role": "system", "content": "You are a sales assistant helping your sales agent determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool to move to the next stage of conversation."}, {"role": "assistant", "content": "Would you like to receive a Calendly invite for a meeting?"}, {"role": "user", "content": "Yes, send it to me."}, {"role": "assistant", "content": "{\"conversation_stage_id\": 7,\"tool_required\": \"yes\",\"tool_name\": \"CalendlyMeetingTool\", \"operation_id\": \"sendInvite\", \"tool_headers\": {\"Authorization\": \"sensitive_value\"}, \"tool_parameters\": {\"email\": \"user@example.com\"}, \"tool_body_parameters\": {}}"}]} -------------------------------------------------------------------------------- /Ai_Model_Training/OpenAI/Sample_Prompts: -------------------------------------------------------------------------------- 1 | Help me create training data in jsonl format to finetune gpt-3.5 turbo model. 2 | 3 | I m looking to replace the system prompt for general AI model for my application to use a purpose specific model. 4 | 5 | System prompt I'm looking to replace 6 | 7 | You are a sales assistant helping your sales agent to determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool in order to move to the next stage of conversation. 8 | Company has the following Products: {company_products_services}. 9 | Start of conversation history: 10 | === 11 | {conversation_history} 12 | Customer Says: {user_input} 13 | === 14 | End of conversation history. 15 | 16 | Current Conversation stage is: {conversation_stage_id} 17 | Make sure to change and progress the conversation stage based on the latest user input and conversation history. Your main focus should be the last Customer Input. 18 | Determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting only from the following options: 19 | {conversation_stages} 20 | 21 | TOOLS: 22 | ------ 23 | {salesperson_name} has access to the following tools: 24 | 25 | {tools} 26 | 27 | Each tool has specific operations and requires certain parameters, including headers, query parameters, path parameters, and body parameters. 28 | **Only decide to call a tool if all required parameters are available through the conversation history**. 29 | **Important Instructions**: 30 | - **Never** use placeholder values such as "TBD" or "To-be-filled". 31 | - If all required parameters are not available, set tool_required to 'no' to help move the conversation forward for the user to provide the missing information. 32 | - Use "sensitive_value" for sensitive fields when the real value is not available to you. 33 | - Your Response must be in JSON format with the following fields: conversation_stage_id (number), tool_required ('yes' or 'no'). If tool_required is 'yes', include tool_name, operation_id, tool_headers, tool_parameters, and tool_body_parameters as applicable. 34 | - **Only** call a tool if **all** required parameters are available. If anything is missing, focus on gathering that information first. 35 | - Your response must be in JSON format as example below. 36 | 37 | 38 | Do not answer anything else nor add anything to your answer. 39 | 40 | Example 1: 41 | Conversation history: 42 | assistant: Would you like know the price of our Silver Gym Membership? 43 | User: Yes, Sure. 44 | assistant: "conversation_stage_id": 3, "tool_required": "yes", "tool_name": "PriceInquiry", 45 | "operation_id": "fetchMembershipPrice", 46 | "tool_headers": {{"Authorization": "sensitive_value"}}, 47 | "tool_parameters": {{}}, 48 | "tool_body_parameters": {{"membership": "Silver-Gym-Membership"}} 49 | End of example 1. 50 | 51 | Example 2: 52 | Conversation history: 53 | assistant: Would you be happy to book a Free On-Site Appointment in our gym? 54 | User: Yes, Sure. 55 | assistant: "conversation_stage_id": 7, "tool_required": "yes", "tool_name": "AppointmentBooking", 56 | "operation_id": "bookAppointment", 57 | "tool_headers": {{"Authorization": "sensitive_value"}}, 58 | "tool_parameters": {{"url": "book-appointment"}}, 59 | "tool_body_parameters": {{"location": "Main Gym", "trainer": "John Doe"}} 60 | End of example 2. 61 | 62 | Example 3: 63 | Conversation history: 64 | assistant: Can you please help me understand what is your gym requirement? 65 | User: Yes, I'm having some back pain that's why considering the gym. 66 | assistant: "conversation_stage_id": 3, "tool_required": "no" 67 | End of example 3. 68 | 69 | Example 4: 70 | Conversation history: 71 | assistant: Would you like to receive a summary of the available gym memberships? 72 | User: Yes, please. 73 | assistant: "conversation_stage_id": 5, "tool_required": "yes", "tool_name": "MembershipSummary", 74 | "operation_id": "fetchSummary", 75 | "tool_headers": {{"Authorization": "sensitive_value"}}, 76 | "tool_parameters": {{"url": "membership-summary"}}, 77 | "tool_body_parameters": {{"userid": "johndoe"}} 78 | End of example 4. 79 | 80 | 81 | 82 | I want the Finetuned AI Model based assistant to generate response as per above example 83 | "conversation_stage_id": 3, "tool_required": "yes", "tool_name": "PriceInquiry", 84 | "operation_id": "fetchMembershipPrice", 85 | "tool_headers": {{"Authorization": "sensitive_value"}}, 86 | "tool_parameters": {{}}, 87 | "tool_body_parameters": {{"membership": "Silver-Gym-Membership"}} 88 | 89 | 90 | Please use the below format to create the jsonl training data please 91 | 92 | {"messages": [{"role": "system", "content": "You are a happy assistant that puts a positive spin on everything."}, {"role": "user", "content": "I fell off my bike today."}, {"role": "assistant", "content": "It's great that you're getting exercise outdoors!"}]} 93 | {"messages": [{"role": "system", "content": "You are a happy assistant that puts a positive spin on everything."}, {"role": "user", "content": "I lost my tennis match today."}, {"role": "assistant", "content": "It's ok, it happens to everyone."}, {"role": "user", "content": "But I trained so hard!"}, {"role": "assistant", "content": "It will pay off next time."}, {"role": "user", "content": "I'm going to switch to golf."}, {"role": "assistant", "content": "Golf is fun too!"}, {"role": "user", "content": "I don't even know how to play golf."}, {"role": "assistant", "content": "It's easy to learn!"}]} 94 | {"messages": [{"role": "user", "content": "I lost my book today."}, {"role": "assistant", "content": "You can read everything on ebooks these days!"}]} 95 | 96 | Make sure the last message in each entry is from the assistant. -------------------------------------------------------------------------------- /Ai_Model_Training/OpenAI/train_openai_model.py: -------------------------------------------------------------------------------- 1 | import json 2 | import tiktoken 3 | import numpy as np 4 | from collections import defaultdict 5 | from openai import OpenAI 6 | import os 7 | import time 8 | 9 | # Initialize the OpenAI client 10 | client = OpenAI(api_key='Enter Your OpenAI API Key Here...') 11 | 12 | # Path to your local JSONL dataset file 13 | # dataset_path = './BHealthy/BHealthy_data.jsonl' 14 | dataset_path = './BHealthy/BHealth_tool_calling_data.jsonl' 15 | # Load the dataset 16 | with open(dataset_path, 'r', encoding='utf-8') as f: 17 | dataset = [json.loads(line) for line in f] 18 | 19 | # Initial dataset stats 20 | print("Num examples:", len(dataset)) 21 | print("First example:") 22 | for message in dataset[0]["messages"]: 23 | print(message) 24 | 25 | # Format error checks 26 | format_errors = defaultdict(int) 27 | 28 | for ex in dataset: 29 | if not isinstance(ex, dict): 30 | format_errors["data_type"] += 1 31 | continue 32 | 33 | messages = ex.get("messages", None) 34 | if not messages: 35 | format_errors["missing_messages_list"] += 1 36 | continue 37 | 38 | for message in messages: 39 | if "role" not in message or "content" not in message: 40 | format_errors["message_missing_key"] += 1 41 | 42 | if any(k not in ("role", "content", "name", "function_call", "weight") for k in message): 43 | format_errors["message_unrecognized_key"] += 1 44 | 45 | if message.get("role", None) not in ("system", "user", "assistant", "function"): 46 | format_errors["unrecognized_role"] += 1 47 | 48 | content = message.get("content", None) 49 | function_call = message.get("function_call", None) 50 | 51 | if (not content and not function_call) or not isinstance(content, str): 52 | format_errors["missing_content"] += 1 53 | 54 | if not any(message.get("role", None) == "assistant" for message in messages): 55 | format_errors["example_missing_assistant_message"] += 1 56 | 57 | if format_errors: 58 | print("Found errors:") 59 | for k, v in format_errors.items(): 60 | print(f"{k}: {v}") 61 | else: 62 | print("No errors found") 63 | 64 | # Token Counting Utilities 65 | encoding = tiktoken.get_encoding("cl100k_base") 66 | 67 | # not exact! 68 | # simplified from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb 69 | def num_tokens_from_messages(messages, tokens_per_message=3, tokens_per_name=1): 70 | num_tokens = 0 71 | for message in messages: 72 | num_tokens += tokens_per_message 73 | for key, value in message.items(): 74 | num_tokens += len(encoding.encode(value)) 75 | if key == "name": 76 | num_tokens += tokens_per_name 77 | num_tokens += 3 78 | return num_tokens 79 | 80 | def num_assistant_tokens_from_messages(messages): 81 | num_tokens = 0 82 | for message in messages: 83 | if message["role"] == "assistant": 84 | num_tokens += len(encoding.encode(message["content"])) 85 | return num_tokens 86 | 87 | def print_distribution(values, name): 88 | print(f"\n#### Distribution of {name}:") 89 | print(f"min / max: {min(values)}, {max(values)}") 90 | print(f"mean / median: {np.mean(values)}, {np.median(values)}") 91 | print(f"p5 / p95: {np.quantile(values, 0.1)}, {np.quantile(values, 0.9)}") 92 | 93 | # Warnings and tokens counts 94 | n_missing_system = 0 95 | n_missing_user = 0 96 | n_messages = [] 97 | convo_lens = [] 98 | assistant_message_lens = [] 99 | 100 | for ex in dataset: 101 | messages = ex["messages"] 102 | if not any(message["role"] == "system" for message in messages): 103 | n_missing_system += 1 104 | if not any(message["role"] == "user" for message in messages): 105 | n_missing_user += 1 106 | n_messages.append(len(messages)) 107 | convo_lens.append(num_tokens_from_messages(messages)) 108 | assistant_message_lens.append(num_assistant_tokens_from_messages(messages)) 109 | 110 | print("Num examples missing system message:", n_missing_system) 111 | print("Num examples missing user message:", n_missing_user) 112 | print_distribution(n_messages, "num_messages_per_example") 113 | print_distribution(convo_lens, "num_total_tokens_per_example") 114 | print_distribution(assistant_message_lens, "num_assistant_tokens_per_example") 115 | n_too_long = sum(l > 16385 for l in convo_lens) 116 | print(f"\n{n_too_long} examples may be over the 16,385 token limit, they will be truncated during fine-tuning") 117 | 118 | # Pricing and default n_epochs estimate 119 | MAX_TOKENS_PER_EXAMPLE = 16385 120 | 121 | TARGET_EPOCHS = 10 122 | MIN_TARGET_EXAMPLES = 100 123 | MAX_TARGET_EXAMPLES = 25000 124 | MIN_DEFAULT_EPOCHS = 1 125 | MAX_DEFAULT_EPOCHS = 25 126 | 127 | n_epochs = TARGET_EPOCHS 128 | n_train_examples = len(dataset) 129 | if n_train_examples * TARGET_EPOCHS < MIN_TARGET_EXAMPLES: 130 | n_epochs = min(MAX_DEFAULT_EPOCHS, MIN_TARGET_EXAMPLES // n_train_examples) 131 | elif n_train_examples * TARGET_EPOCHS > MAX_TARGET_EXAMPLES: 132 | n_epochs = max(MIN_DEFAULT_EPOCHS, MAX_TARGET_EXAMPLES // n_train_examples) 133 | 134 | n_billing_tokens_in_dataset = sum(min(MAX_TOKENS_PER_EXAMPLE, length) for length in convo_lens) 135 | print(f"Dataset has ~{n_billing_tokens_in_dataset} tokens that will be charged for during training") 136 | print(f"By default, you'll train for {n_epochs} epochs on this dataset") 137 | print(f"By default, you'll be charged for ~{n_epochs * n_billing_tokens_in_dataset} tokens") 138 | 139 | # Function to estimate cost 140 | def estimate_cost(total_tokens, epochs, cost_per_token=0.000003): # Example cost per token 141 | return total_tokens * epochs * cost_per_token 142 | 143 | # Function to upload the dataset 144 | def upload_dataset(file_path): 145 | response = client.files.create( 146 | file=open(file_path, 'rb'), 147 | purpose='fine-tune' 148 | ) 149 | file_id = response.id 150 | print(f"Uploaded file ID: {file_id}") 151 | return file_id 152 | 153 | # Function to create a fine-tuning job 154 | def create_fine_tuning_job(file_id, model_name='gpt-4o-mini-2024-07-18', epochs=10): 155 | response = client.fine_tuning.jobs.create( 156 | training_file=file_id, 157 | model=model_name, 158 | hyperparameters={ 159 | "n_epochs":2 160 | } 161 | ) 162 | fine_tune_id = response.id 163 | print(f"Fine-tune job ID: {fine_tune_id}") 164 | return fine_tune_id 165 | 166 | # Function to monitor the fine-tuning job 167 | def monitor_fine_tuning_job(fine_tune_id): 168 | response = client.fine_tuning.jobs.retrieve(fine_tune_id) 169 | status = response.status 170 | print(f"Fine-tuning status: {status}") 171 | return response 172 | 173 | # Function to use the fine-tuned model 174 | 175 | def use_fine_tuned_model(model_name, prompt, max_tokens=150): 176 | try: 177 | response = client.ChatCompletion.create( 178 | model=model_name, 179 | messages=[ 180 | {"role": "system", "content": "You are a helpful assistant."}, 181 | {"role": "user", "content": prompt} 182 | ], 183 | max_tokens=max_tokens 184 | ) 185 | print(response['choices'][0]['message']['content']) 186 | return response['choices'][0]['message']['content'] 187 | except Exception as e: 188 | print(f"Error using fine-tuned model: {e}") 189 | return None 190 | 191 | # Function to calculate dataset tokens 192 | def calculate_dataset_tokens(file_path): 193 | total_tokens = 0 194 | with open(file_path, 'r') as file: 195 | for line in file: 196 | data = json.loads(line) 197 | for message in data['messages']: 198 | total_tokens += num_tokens_from_messages([message]) 199 | if 'response' in data: 200 | response_text = json.dumps(data['response']) 201 | total_tokens += num_tokens_from_messages([{'content': response_text, 'role': 'assistant'}]) 202 | return total_tokens 203 | 204 | # Main function to orchestrate the fine-tuning process 205 | def main(): 206 | # Calculate the number of tokens in the dataset 207 | total_tokens = calculate_dataset_tokens(dataset_path) 208 | print(f"Total tokens in the dataset: {total_tokens}") 209 | 210 | # Estimate the cost 211 | epochs = 10 # Define the number of epochs 212 | estimated_cost = estimate_cost(total_tokens, epochs) 213 | print(f"Estimated cost for fine-tuning: ${estimated_cost:.2f}") 214 | 215 | # Confirm if the user wants to proceed 216 | proceed = input("Do you want to proceed with fine-tuning? (yes/no): ") 217 | if proceed.lower() != 'yes': 218 | print("Fine-tuning aborted.") 219 | return 220 | 221 | # Upload the dataset 222 | file_id = upload_dataset(dataset_path) 223 | 224 | # Create the fine-tuning job 225 | fine_tune_id = create_fine_tuning_job(file_id, epochs=epochs) 226 | 227 | # Monitor the fine-tuning job until completion 228 | while True: 229 | response = monitor_fine_tuning_job(fine_tune_id) 230 | if response.status in ['succeeded', 'failed']: 231 | break 232 | time.sleep(60) # Wait for 1 minute before checking the status again 233 | 234 | if response.status == 'succeeded': 235 | # Use the fine-tuned model 236 | fine_tuned_model = response.fine_tuned_model 237 | prompt = "I want to build some muscle and loose belly fat" 238 | use_fine_tuned_model(fine_tuned_model, prompt) 239 | else: 240 | print("Fine-tuning job did not succeed.") 241 | 242 | # Run the main function 243 | if __name__ == '__main__': 244 | main() 245 | -------------------------------------------------------------------------------- /ConversationCache/CacheManager.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import sqlite3 3 | 4 | from ConversationCache.decorators import singleton 5 | from ConversationCache.environment import Configuration 6 | 7 | 8 | @singleton 9 | class CacheManager: 10 | configuration = Configuration() 11 | connection = None 12 | 13 | def __init__(self) -> None: 14 | super().__init__() 15 | if self.configuration.CACHE_MANAGER_CACHING_MEDIA: 16 | self._create_media_folder() 17 | if self.configuration.CACHE_MANAGER_CACHING_AI_RESPONSES: 18 | self.connection = sqlite3.connect(self.configuration.CACHE_MANAGER_DB_FILE_PATH) 19 | self._create_database() 20 | 21 | def _create_database(self): 22 | cursor = self.connection.cursor() 23 | cursor.execute(''' 24 | CREATE TABLE IF NOT EXISTS queries_and_responses ( 25 | id INTEGER PRIMARY KEY, 26 | user_input TEXT NOT NULL UNIQUE, 27 | agent_output TEXT NOT NULL, 28 | media_temp_file_path VARCHAR(255) NOT NULL 29 | ) 30 | ''') 31 | cursor.execute(''' 32 | CREATE VIRTUAL TABLE IF NOT EXISTS queries_and_responses_fts USING fts5(user_input, agent_output, media_temp_file_path) 33 | ''') 34 | self.connection.commit() 35 | cursor.close() 36 | 37 | self._sync_fts() 38 | 39 | def put(self, user_input: str, agent_output: str, media_temp_file_path: str = ""): 40 | cursor = self.connection.cursor() 41 | 42 | cursor.execute(''' 43 | INSERT INTO queries_and_responses (user_input, agent_output, media_temp_file_path) 44 | VALUES (?, ?, ?) 45 | ''', (user_input, agent_output, media_temp_file_path)) 46 | self.connection.commit() 47 | cursor.close() 48 | self._sync_fts() 49 | 50 | def _sync_fts(self): 51 | cursor = self.connection.cursor() 52 | 53 | cursor.execute(''' 54 | INSERT OR REPLACE INTO queries_and_responses_fts (rowid, user_input, agent_output, media_temp_file_path) 55 | SELECT id, user_input, agent_output, media_temp_file_path FROM queries_and_responses 56 | ''') 57 | 58 | self.connection.commit() 59 | cursor.close() 60 | 61 | def get(self, user_input: str): 62 | cursor = self.connection.cursor() 63 | 64 | cursor.execute(''' 65 | SELECT * FROM queries_and_responses WHERE user_input=? 66 | ''', (user_input,)) 67 | result = cursor.fetchone() 68 | cursor.close() 69 | return result 70 | 71 | def search(self, query: str): 72 | cursor = self.connection.cursor() 73 | 74 | cursor.execute(''' 75 | SELECT user_input, agent_output, media_temp_file_path 76 | FROM queries_and_responses_fts 77 | WHERE queries_and_responses_fts MATCH ? 78 | LIMIT 1 79 | ''', (query,)) 80 | 81 | result = cursor.fetchone() 82 | cursor.close() 83 | return result 84 | 85 | def __del__(self): 86 | if self.connection is not None: 87 | self.connection.close() 88 | 89 | def _create_media_folder(self): 90 | if not os.path.isdir(self.configuration.CACHE_MANAGER_MEDIA_DIR_PATH): 91 | os.mkdir(self.configuration.CACHE_MANAGER_MEDIA_DIR_PATH) 92 | 93 | def _create_media_file_temp_name(self): 94 | pass 95 | -------------------------------------------------------------------------------- /ConversationCache/__init__.py: -------------------------------------------------------------------------------- 1 | from .CacheManager import CacheManager 2 | -------------------------------------------------------------------------------- /ConversationCache/decorators/__init__.py: -------------------------------------------------------------------------------- 1 | from .singleton import singleton 2 | -------------------------------------------------------------------------------- /ConversationCache/decorators/singleton.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | 4 | def singleton(cls): 5 | instances = {} 6 | 7 | @functools.wraps(cls) 8 | def get_instance(*args, **kwargs) -> cls: 9 | if cls not in instances: 10 | instances[cls] = cls(*args, **kwargs) 11 | return instances[cls] 12 | 13 | return get_instance 14 | -------------------------------------------------------------------------------- /ConversationCache/environment/Configuration.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from dotenv import load_dotenv 4 | 5 | from ConversationCache.decorators import singleton 6 | 7 | 8 | @singleton 9 | class Configuration: 10 | CACHE_MANAGER_DB_FILE_PATH = False 11 | CACHE_MANAGER_MEDIA_DIR_PATH = False 12 | CACHE_MANAGER_CACHING_MEDIA = False 13 | CACHE_MANAGER_CACHING_AI_RESPONSES = False 14 | CACHE_MANAGER_CONNECTION_POOL_SIZE = False 15 | OPENAI_FINE_TUNED_MODEL_ID = False 16 | 17 | def __init__(self) -> None: 18 | super().__init__() 19 | load_dotenv() 20 | self.OPENAI_FINE_TUNED_MODEL_ID = os.getenv('OPENAI_FINE_TUNED_MODEL_ID', False) 21 | self.CACHE_MANAGER_CONNECTION_POOL_SIZE = os.getenv('CACHE_MANAGER_CONNECTION_POOL_SIZE', 10) 22 | self.CACHE_MANAGER_DB_FILE_PATH = os.getenv('CACHE_MANAGER_DB_FILE_PATH', False) 23 | self.CACHE_MANAGER_MEDIA_DIR_PATH = os.getenv('CACHE_MANAGER_MEDIA_DIR_PATH', False) 24 | self.CACHE_MANAGER_CACHING_MEDIA = os.getenv('CACHE_MANAGER_CACHING_MEDIA', 'FALSE').upper() == 'TRUE' 25 | self.CACHE_MANAGER_CACHING_AI_RESPONSES = os.getenv('CACHE_MANAGER_CACHING_AI_RESPONSES', 26 | 'FALSE').upper() == 'TRUE' 27 | 28 | # Validations 29 | if self.CACHE_MANAGER_CACHING_AI_RESPONSES and self.CACHE_MANAGER_DB_FILE_PATH is False: 30 | raise ValueError("CACHE_MANAGER_DB_FILE_PATH must be defined in ENV") 31 | 32 | if self.CACHE_MANAGER_CACHING_MEDIA and self.CACHE_MANAGER_MEDIA_DIR_PATH is False: 33 | raise ValueError("CACHE_MANAGER_MEDIA_DIR_PATH must be defined in ENV") 34 | -------------------------------------------------------------------------------- /ConversationCache/environment/__init__.py: -------------------------------------------------------------------------------- 1 | from .Configuration import Configuration 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.9-slim 3 | 4 | # Set the working directory in the container 5 | WORKDIR /usr/src/app 6 | 7 | # Install any needed packages specified in requirements.txt 8 | COPY requirements.txt ./ 9 | RUN pip install --upgrade pip 10 | RUN pip install --no-cache-dir -r requirements.txt 11 | 12 | # Create a directory to store audio files 13 | RUN mkdir -p /usr/src/app/audio_files 14 | RUN mkdir -p /usr/src/app/cache 15 | 16 | # Copy the current directory contents into the container at /usr/src/app 17 | COPY . . 18 | 19 | # Make port 5000 available to the world outside this container 20 | EXPOSE 5000 21 | 22 | # # Define environment variable 23 | ENV FLASK_APP=app.py 24 | ENV FLASK_RUN_HOST=0.0.0.0 25 | ENV FLASK_RUN_PORT=5000 26 | 27 | # Run app.py when the container launches 28 | CMD ["flask", "run", "--host=0.0.0.0", "--port=5000"] 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | Knotie-AI Logo 4 |

5 | 6 |

Knotie-AI

7 | 8 |

9 | 10 | Buy me a coffee 11 | 12 |

13 | 14 | --- 15 | 16 | Knotie-AI is a completely open-source inbound/outbound AI Sales Agent that can communicate with your potential leads and customers. 17 | 18 | For detailed documentation and setup instructions, please visit the [Knotie-AI Documentation](https://knotie-ai.pages.dev/). 19 | 20 | --- 21 | 22 | ## 🚀 Upcoming Features 23 | 24 | | Feature | Description | Potential Release Date | Status | Contribute | 25 | |--------------------------------|-----------------------------------------------------------------------------|------------------------|------------------------------------------------------------------------|------------------| 26 | | **Evaluation Script** | A script to evaluate and test AI Agents before deployment | Q3 2024 | ![success](https://img.shields.io/badge/Status-Completed-brightgreen) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 27 | | **Cloud Deployment Guide** | A comprehensive guide for deploying Knotie-AI on the cloud VPS | Q3 2024 | ![success](https://img.shields.io/badge/Status-Completed-brightgreen) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 28 | | **Wordpress Plugin Update** | Wordpress Integration through Plugin to call the AI Agent from Wordpress | Q3 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 29 | | **File-based State Management**| Support for file-based and cloud database state management | Q3 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 30 | | **Reduce Latency to Response Time under 1 Sec** | Reduce the AI Sales Agent response latency under 1 Sec | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 31 | | **Introduce & Support Assistant API** | Support for OpenAI Threads & Assistant API to offload memory management/tool calling to Assistant | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 32 | | **OpenAPI Specification Support** | Introduce custom API/tools by using OpenAPI/Swagger API Specs. No need to modify any code | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 33 | | **Enhanced Admin UI** | Introduce appropriate UI library and enhance Admin UI to manage/control AI Agent behaviour from the UI | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 34 | | **Custom Workflow support** | Create and manage Workflows, integrate with popular business software to be managed by the AI Sales Agent | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 35 | | **Open Source AI Model Support**| Support Self Hosted Open Source AI Models | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 36 | | **One-Click Twilio Integration**| Integrate Twilio with one-click, instead of manually updating configs | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 37 | | **One-Click Third Party Tool Integration** | Integrate Popular third party SaaS such as Calendly, GHL to manage business by this AI Sales Agent | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 38 | | **Twilio Streaming API Support**| Enhance AI Sales Agent With Twilio Streaming to make it faster and more efficient | Q4 2024 | ![info](https://img.shields.io/badge/Status-Upcoming-blue) | [Contribute](https://kno2gether.com/knotie-ai-sales-agent/) | 39 | 40 | ## 🎥 Demo Video 41 | 42 | Watch the Knotie-AI demo video to see it in action: 43 | 44 |

45 | 46 | Knotie-AI Demo Video 47 | 48 |

49 | 50 |

51 | 52 | Subscribe 53 | 54 |

55 | 56 | 57 | --- 58 | 59 | ## 🤝 Contribute 60 | 61 | We welcome all contributions to the Knotie-AI project! If you have ideas for new features, want to fix a bug, or just improve the documentation, your help is appreciated. 62 | 63 | - **Fork the Repository**: Click the "Fork" button at the top of this page to create your own copy of the repository. 64 | - **Create a Branch**: Create a branch for your feature or bugfix. 65 | - **Make Your Changes**: Make any changes in your forked repository. 66 | - **Submit a Pull Request**: Submit a pull request to have your changes reviewed and merged into the main repository. 67 | 68 | Please see our [Code of Conduct](https://knotie-ai.pages.dev/CODE_OF_CONDUCT) for guidelines on contributing. 69 | 70 | Join our [Discord Community](https://discord.com/invite/7UKpgUbEXf). 71 | Join our [Kno2gether Club](https://community.kno2gether.com/communities/groups/kno2gether-community/home?invite=66b617e90fd0ff23e04efce2) to discuss features, ask questions, and share feedback. 72 | 73 | --- 74 | 75 | ## 📚 Documentation 76 | 77 | - **Documentation Site**: [Knotie-AI Docs](https://knotie-ai.pages.dev/) 78 | - **GitHub Repository**: [Knotie-AI on GitHub](https://github.com/avijeett007/knotie-ai) 79 | - **Code of Conduct**: [Read Here](https://knotie-ai.pages.dev/CODE_OF_CONDUCT) 80 | -------------------------------------------------------------------------------- /ai_helpers.py: -------------------------------------------------------------------------------- 1 | # This code is licensed under the Custom License for Paid Community Members. 2 | # For more information, see the LICENSE file in the root directory of this repository. 3 | from ConversationCache import CacheManager 4 | from ConversationCache.environment import Configuration 5 | from config import Config 6 | import openai 7 | import anthropic 8 | from groq import Groq 9 | from anthropic import Anthropic 10 | import json 11 | from langchain_core.prompts import PromptTemplate 12 | from prompts import get_prompt_template 13 | from stages import OUTBOUND_CONVERSATION_STAGES, INBOUND_CONVERSATION_STAGES 14 | import logging 15 | import sqlite3 16 | from openapi_spec_validator import validate_spec 17 | import yaml 18 | import importlib 19 | import requests 20 | import sys 21 | import os 22 | # Import tool-related functions 23 | from tools_helper import ( 24 | initialize_tools, 25 | get_tool_and_spec, 26 | call_api, 27 | replace_sensitive_values, 28 | fetch_tools_from_db, 29 | initialized_tools 30 | ) 31 | 32 | sys.path.append(os.path.join(os.path.dirname(__file__), 'generated_tools')) 33 | logging.basicConfig(level=logging.INFO) 34 | logger = logging.getLogger(__name__) 35 | 36 | # session ={} # Added for testing. remove after testing 37 | ai_api_key = Config.AI_API_KEY 38 | salesperson_name = Config.AISALESAGENT_NAME 39 | company_name = Config.COMPANY_NAME 40 | company_business = Config.COMPANY_BUSINESS 41 | conversation_purpose = Config.CONVERSATION_PURPOSE 42 | agent_custom_instructions = Config.AGENT_CUSTOM_INSTRUCTIONS 43 | company_products_services = Config.COMPANY_PRODUCTS_SERVICES 44 | conversation_stages = OUTBOUND_CONVERSATION_STAGES 45 | which_model = Config.WHICH_MODEL 46 | llm_model = Config.LLM_MODEL 47 | # In-memory storage for testing 48 | conversation_states = {} 49 | ai = None 50 | GENERATED_TOOLS_DIR = 'generated_tools/' 51 | 52 | if which_model == "GROQ": 53 | params = { 54 | "model": llm_model, 55 | "temperature": 0.5, 56 | "max_tokens": 100, 57 | "stream": False, 58 | "top_p": 1 59 | } # params["messages"] = prompt 60 | elif which_model == "OpenAI" or which_model == "OpenRouter": 61 | params = { 62 | "model": llm_model, 63 | "temperature": 0.5, 64 | "max_tokens": 100, 65 | } # params["messages"] = prompt 66 | elif which_model == "Anthropic": 67 | params = { 68 | "model": llm_model, 69 | "temperature": 0.5, 70 | "max_tokens": 100, 71 | } 72 | 73 | 74 | # Example usage: 75 | initialize_tools() 76 | 77 | 78 | # Method added to reinitialize on updating from UI 79 | def reinitialize_ai_clients(): 80 | global ai 81 | logger.info(f"Initiating AI client") 82 | if Config.WHICH_MODEL == "GROQ": 83 | ai = Groq(api_key=Config.AI_API_KEY) 84 | params = { 85 | "model": llm_model, 86 | "temperature": 0.5, 87 | "max_tokens": 100, 88 | "stream": False, 89 | "top_p": 1 90 | } 91 | elif Config.WHICH_MODEL == "OpenAI" or Config.WHICH_MODEL == "OpenRouter": 92 | openai.api_key = Config.AI_API_KEY 93 | openai.base_url = Config.OPENAI_BASE_URL 94 | params = { 95 | "model": llm_model, 96 | "temperature": 0.5, 97 | "max_tokens": 100, 98 | } 99 | elif Config.WHICH_MODEL == "Anthropic": 100 | ai = Anthropic(api_key=Config.AI_API_KEY) 101 | params = { 102 | "model": llm_model, 103 | "temperature": 0.5, 104 | "max_tokens": 100, 105 | } 106 | 107 | reinitialize_ai_clients() 108 | 109 | def gen_ai_output(prompt, isToolResponse): 110 | """Generate AI output based on the prompt.""" 111 | 112 | print("model selected is: ", which_model) 113 | 114 | if which_model == "OpenAI" or which_model == "OpenRouter": 115 | params["messages"] = prompt 116 | if isToolResponse == "no" and Config.OPENAI_FINE_TUNED_MODEL_ID: 117 | print(f"Fine tuned model selected: {Config.OPENAI_FINE_TUNED_MODEL_ID}") 118 | params['model'] = Config.OPENAI_FINE_TUNED_MODEL_ID 119 | if isToolResponse == "yes" and Config.OPENAI_FINE_TUNED_TOOLS_MODEL_ID: 120 | print(f"Fine tuned model selected for tool calling: {Config.OPENAI_FINE_TUNED_TOOLS_MODEL_ID}") 121 | params['model'] = Config.OPENAI_FINE_TUNED_TOOLS_MODEL_ID 122 | response = openai.chat.completions.create(**params) 123 | return response.choices[0].message.content 124 | if which_model == "GROQ": 125 | params["messages"] = prompt 126 | response = ai.chat.completions.create(**params) 127 | return response.choices[0].message.content 128 | elif which_model == "Anthropic": 129 | system_prompt = None 130 | # Iterate over the list to find and remove the "system" role message 131 | filtered_messages = [] 132 | for msg in prompt: 133 | if msg["role"] == "system": 134 | params["system"] = msg["content"] 135 | else: 136 | filtered_messages.append(msg) 137 | params["messages"] = filtered_messages 138 | response = ai.messages.create(**params) 139 | 140 | return response.content[0].text 141 | 142 | 143 | def is_tool_required(ai_output): 144 | """Check if the use of a tool is required according to AI's output.""" 145 | try: 146 | data = json.loads(ai_output) 147 | return data.get("tool_required") == "yes" 148 | except json.JSONDecodeError: 149 | raise ValueError("Invalid JSON format in AI output.") 150 | 151 | 152 | def get_conversation_stage(ai_output): 153 | """Extract the conversation stage from AI's output.""" 154 | try: 155 | data = json.loads(ai_output) 156 | return int(data.get("conversation_stage_id")) 157 | except json.JSONDecodeError: 158 | raise ValueError("Invalid JSON format in AI output.") 159 | 160 | 161 | def get_tool_details(ai_output): 162 | """Retrieve the tool name, operationId, and parameters if a tool is required.""" 163 | if not is_tool_required(ai_output): 164 | return None, None, None 165 | try: 166 | data = json.loads(ai_output) 167 | tool_name = data.get("tool_name") 168 | operation_id = data.get("operation_id") 169 | tool_parameters = data.get("tool_parameters") 170 | tool_body_parameters = data.get("tool_body_parameters") 171 | tool_headers = data.get("tool_headers") 172 | return tool_name, operation_id, tool_parameters, tool_headers, tool_body_parameters 173 | except json.JSONDecodeError: 174 | raise ValueError("Invalid JSON format in AI output.") 175 | 176 | 177 | def process_initial_message(call_sid, customer_name, customer_problem): 178 | initial_prompt_template = get_prompt_template('AGENT_STARTING_PROMPT_TEMPLATE') 179 | initial_prompt = initial_prompt_template.format( 180 | salesperson_name=salesperson_name, 181 | company_name=company_name, 182 | company_business=company_business, 183 | conversation_purpose=conversation_purpose, 184 | conversation_stages=conversation_stages, 185 | agent_custom_instructions=agent_custom_instructions 186 | ) 187 | message_to_send_to_ai = [ 188 | { 189 | "role": "system", 190 | "content": initial_prompt 191 | } 192 | ] 193 | initial_transcript = "Customer Name:" + customer_name + ". Customer filled up details in the website:" + customer_problem 194 | message_to_send_to_ai.append({"role": "user", "content": initial_transcript}) 195 | # Initialize conversation state for the customer 196 | conversation_states[call_sid] = { 197 | 'conversation_stage_id': 1 198 | } 199 | isToolResponse = "no" 200 | response = gen_ai_output(message_to_send_to_ai, isToolResponse) 201 | return response 202 | 203 | def invoke_stage_tool_analysis(message_history, user_input, conversation_stage_id): 204 | tools_from_db = fetch_tools_from_db() 205 | tools_description = "" 206 | 207 | for tool in tools_from_db: 208 | tool_name = tool['name'] 209 | tool_description = f"{tool_name}: {tool['description']}\nAvailable Operations:\n" 210 | 211 | operation_ids = initialized_tools[tool_name]['operations'] 212 | 213 | for operation_id, operation_details in operation_ids.items(): 214 | tool_description += f" - {operation_id}: {operation_details['method']} {operation_details['url']}\n" 215 | 216 | tool_info = get_tool_and_spec(tool_name) 217 | sensitive_headers = json.loads(tool_info.get('sensitive_headers', '{}') or '{}') 218 | sensitive_body = json.loads(tool_info.get('sensitive_body', '{}') or '{}') 219 | 220 | # Headers 221 | headers_description = ', '.join([ 222 | f"{header}: {details['type']}" + 223 | (f" (enum: {details['enum']})" if details['enum'] else "") + 224 | (" (sensitive)" if header in sensitive_headers else "") + 225 | (" (mandatory)" if details.get('required') else "") 226 | for header, details in operation_details['parameters'].get('header', {}).items() 227 | ]) 228 | 229 | # Query Parameters 230 | query_description = ', '.join([ 231 | f"{param}: {details['type']}" + 232 | (f" (enum: {details['enum']})" if details['enum'] else "") + 233 | (" (mandatory)" if details.get('required') else "") 234 | for param, details in operation_details['parameters'].get('query', {}).items() 235 | ]) 236 | 237 | # Path Parameters 238 | path_description = ', '.join([ 239 | f"{param}: {details['type']}" + 240 | (f" (enum: {details['enum']})" if details['enum'] else "") + 241 | (" (mandatory)" if details.get('required') else "") 242 | for param, details in operation_details['parameters'].get('path', {}).items() 243 | ]) 244 | 245 | # Body Parameters 246 | body_description = ', '.join([ 247 | f"{param}: {details['type']}" + 248 | (f" (enum: {details['enum']})" if details['enum'] else "") + 249 | (" (sensitive)" if param in sensitive_body else "") + 250 | (" (mandatory)" if details.get('required') else "") 251 | for param, details in operation_details['parameters'].get('body', {}).items() 252 | ]) 253 | 254 | if headers_description: 255 | tool_description += f" Headers: {headers_description}\n" 256 | if query_description: 257 | tool_description += f" Query Parameters: {query_description}\n" 258 | if path_description: 259 | tool_description += f" Path Parameters: {path_description}\n" 260 | if body_description: 261 | tool_description += f" Body Parameters: {body_description}\n" 262 | 263 | tools_description += tool_description + "\n" 264 | intent_tool_prompt_template = get_prompt_template('STAGE_TOOL_ANALYZER_PROMPT') 265 | intent_tool_prompt = intent_tool_prompt_template.format( 266 | salesperson_name=salesperson_name, 267 | company_name=company_name, 268 | company_business=company_business, 269 | conversation_purpose=conversation_purpose, 270 | conversation_stage_id=conversation_stage_id, 271 | conversation_stages=conversation_stages, 272 | conversation_history=message_history, 273 | company_products_services=company_products_services, 274 | user_input=user_input, 275 | tools=tools_description 276 | ) 277 | 278 | print("Prompt being sent to AI:" + intent_tool_prompt) 279 | 280 | message_to_send_to_ai = [ 281 | { 282 | "role": "system", 283 | "content": intent_tool_prompt 284 | } 285 | ] 286 | message_to_send_to_ai.append( 287 | {"role": "user", "content": "You Must Respond in the JSON format specified in the system prompt"}) 288 | isToolResponse = "yes" 289 | ai_output = gen_ai_output(message_to_send_to_ai, isToolResponse) 290 | print("AI Tool Model Output:" + ai_output) 291 | return ai_output 292 | 293 | 294 | def initiate_inbound_message(call_sid): 295 | initial_prompt_template = get_prompt_template('AGENT_PROMPT_INBOUND_TEMPLATE') 296 | initial_response = initial_prompt_template.format( 297 | salesperson_name=salesperson_name, 298 | company_name=company_name 299 | ) 300 | conversation_states[call_sid] = { 301 | 'conversation_stage_id': 1 302 | } 303 | return initial_response 304 | 305 | 306 | def process_message(call_sid, message_history, user_input): 307 | conversation_state = conversation_states.get(call_sid) 308 | conversation_stage_id = conversation_state['conversation_stage_id'] 309 | if not conversation_state: 310 | raise ValueError("Conversation state not found for this conversation") 311 | 312 | print('user said:' + user_input) 313 | stage_tool_output = invoke_stage_tool_analysis(message_history, user_input, conversation_stage_id) 314 | stage = get_conversation_stage(stage_tool_output) 315 | conversation_stage_id = get_conversation_stage(stage_tool_output) 316 | conversation_state['conversation_stage_id'] = conversation_stage_id 317 | tool_output = '' 318 | try: 319 | if is_tool_required(stage_tool_output): 320 | print('Tool Required is true') 321 | tool_name, operation_id, tool_parameters, tool_headers, tool_body_parameters = get_tool_details(stage_tool_output) 322 | print(f'Tool called: {tool_name}, Operation: {operation_id}, Params: {tool_parameters}, tool_headers: {tool_headers}, tool_body_parameters: {tool_body_parameters}') 323 | 324 | # Use the dynamically loaded tool if available 325 | if tool_name in initialized_tools: 326 | api_response = call_api(tool_name, tool_parameters, operation_id, tool_headers, tool_body_parameters) 327 | tool_output = f"Tool {tool_name} executed successfully. Response: {api_response}" 328 | message_history.append({"role": "api_response", "content": tool_output}) 329 | 330 | except ValueError as e: 331 | tool_output = "Some Error Occured In calling the tools. Ask User if its okay that you callback the user later with answer of the query." 332 | 333 | print("Creating inbound prompt template") 334 | outbound_prompt_template = get_prompt_template('AGENT_PROMPT_OUTBOUND_TEMPLATE') 335 | inbound_prompt = outbound_prompt_template.format( 336 | salesperson_name=salesperson_name, 337 | company_name=company_name, 338 | company_business=company_business, 339 | conversation_purpose=conversation_purpose, 340 | conversation_stage_id=conversation_stage_id, 341 | company_products_services=company_products_services, 342 | conversation_stages=json.dumps(conversation_stages, indent=2), 343 | conversation_history=json.dumps(message_history, indent=2), 344 | tools_response=tool_output 345 | ) 346 | message_to_send_to_ai_final = [ 347 | { 348 | "role": "system", 349 | "content": inbound_prompt 350 | }, 351 | { 352 | "role": "user", 353 | "content": user_input 354 | } 355 | ] 356 | isToolResponse = "no" 357 | # Check if caching is enabled 358 | if Config.CACHE_ENABLED == 'true': 359 | conversation_cache = CacheManager() 360 | cache_result = conversation_cache.search(user_input) 361 | if cache_result is None: 362 | talkback_response = gen_ai_output(message_to_send_to_ai_final, isToolResponse) 363 | conversation_cache.put(user_input, talkback_response) 364 | return talkback_response 365 | else: 366 | talkback_response = gen_ai_output(message_to_send_to_ai_final, isToolResponse) 367 | return talkback_response 368 | 369 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Request, Response, HTTPException, Depends, File, UploadFile, Form 2 | from fastapi.responses import JSONResponse, RedirectResponse, StreamingResponse 3 | from fastapi.templating import Jinja2Templates 4 | from fastapi.staticfiles import StaticFiles 5 | from fastapi.security import HTTPBasic, HTTPBasicCredentials 6 | from starlette.middleware.sessions import SessionMiddleware 7 | from starlette.responses import FileResponse 8 | from twilio.twiml.voice_response import VoiceResponse, Gather 9 | from twilio.rest import Client 10 | from langchain_core.prompts import PromptTemplate 11 | from werkzeug.utils import secure_filename 12 | from urllib.parse import quote 13 | import uvicorn 14 | import os 15 | import shutil 16 | import subprocess 17 | import requests 18 | import logging 19 | import threading 20 | import time 21 | import redis 22 | import json 23 | import uuid 24 | import sqlite3 25 | import importlib 26 | import re 27 | from typing import Optional 28 | from pydantic import BaseModel 29 | import asyncio 30 | import asyncio 31 | from concurrent.futures import ThreadPoolExecutor 32 | 33 | from audio_helpers import text_to_speech, text_to_speech_stream, save_audio_file, initialize_elevenlabs_client 34 | from ai_helpers import process_initial_message, process_message, initiate_inbound_message, reinitialize_ai_clients 35 | from tools_helper import initialize_tools, EncryptionHelper 36 | from appUtils import clean_response, delayed_delete, save_message_history, get_message_history, process_elevenlabs_audio, generate_diverse_confirmation, get_redis_client 37 | from prompts import AGENT_STARTING_PROMPT_TEMPLATE, STAGE_TOOL_ANALYZER_PROMPT, AGENT_PROMPT_OUTBOUND_TEMPLATE, \ 38 | AGENT_PROMPT_INBOUND_TEMPLATE 39 | from config import Config 40 | 41 | 42 | # Create a thread pool executor 43 | executor = ThreadPoolExecutor() 44 | # Configure logging 45 | logging.basicConfig(level=logging.INFO) 46 | logger = logging.getLogger(__name__) 47 | 48 | OPENAPI_DIR = 'openapi_specs/' 49 | GENERATED_TOOLS_DIR = 'generated_tools/' 50 | 51 | app = FastAPI() 52 | app.add_middleware(SessionMiddleware, secret_key=Config.SECRET_KEY) 53 | 54 | # Mount static files 55 | app.mount("/static", StaticFiles(directory="static"), name="static") 56 | 57 | # Jinja2 templates 58 | templates = Jinja2Templates(directory="templates") 59 | 60 | def initialize_twilio_client(): 61 | logger.info(f"Initiating Twilio client") 62 | global client 63 | client = Client(Config.TWILIO_ACCOUNT_SID, Config.TWILIO_AUTH_TOKEN) 64 | 65 | # SQLite setup for user management 66 | def init_sqlite_db(): 67 | # Initialize user database 68 | conn = sqlite3.connect('knotie.db') 69 | cursor = conn.cursor() 70 | 71 | # Create users table 72 | cursor.execute('''CREATE TABLE IF NOT EXISTS users ( 73 | id INTEGER PRIMARY KEY AUTOINCREMENT, 74 | username TEXT UNIQUE NOT NULL, 75 | password TEXT NOT NULL, 76 | first_login INTEGER DEFAULT 1)''') 77 | conn.commit() 78 | 79 | # Check if admin user exists, if not, create one 80 | cursor.execute('SELECT * FROM users WHERE username = "admin"') 81 | if cursor.fetchone() is None: 82 | cursor.execute('INSERT INTO users (username, password) VALUES (?, ?)', ('admin', 'admin')) 83 | conn.commit() 84 | 85 | # Create chat history table 86 | cursor.execute('''CREATE TABLE IF NOT EXISTS chat_history ( 87 | id INTEGER PRIMARY KEY AUTOINCREMENT, 88 | transaction_id TEXT NOT NULL, 89 | role TEXT NOT NULL, 90 | content TEXT NOT NULL)''') 91 | conn.commit() 92 | 93 | # Create tools table 94 | cursor.execute('''CREATE TABLE IF NOT EXISTS tools ( 95 | id INTEGER PRIMARY KEY AUTOINCREMENT, 96 | name TEXT UNIQUE NOT NULL, 97 | description TEXT NOT NULL, 98 | openapi_spec TEXT NOT NULL, 99 | class_name TEXT NOT NULL, 100 | sensitive_headers TEXT, 101 | sensitive_body TEXT)''') 102 | conn.commit() 103 | 104 | # Create prompts table 105 | cursor.execute('''CREATE TABLE IF NOT EXISTS prompts ( 106 | id INTEGER PRIMARY KEY AUTOINCREMENT, 107 | name TEXT UNIQUE NOT NULL, 108 | template TEXT NOT NULL);''') 109 | conn.commit() 110 | 111 | # Insert the default prompts into the DB if they don't already exist 112 | prompts = [ 113 | ('AGENT_PROMPT_OUTBOUND_TEMPLATE', AGENT_PROMPT_OUTBOUND_TEMPLATE.template), 114 | ('AGENT_PROMPT_INBOUND_TEMPLATE', AGENT_PROMPT_INBOUND_TEMPLATE.template), 115 | ('STAGE_TOOL_ANALYZER_PROMPT', STAGE_TOOL_ANALYZER_PROMPT.template), 116 | ('AGENT_STARTING_PROMPT_TEMPLATE', AGENT_STARTING_PROMPT_TEMPLATE.template) 117 | ] 118 | 119 | for name, template in prompts: 120 | cursor.execute('SELECT * FROM prompts WHERE name = ?', (name,)) 121 | if cursor.fetchone() is None: 122 | cursor.execute('''INSERT INTO prompts (name, template) VALUES (?, ?)''', (name, template)) 123 | conn.commit() 124 | 125 | # Create config table 126 | cursor.execute('''CREATE TABLE IF NOT EXISTS config ( 127 | key TEXT UNIQUE NOT NULL, 128 | value TEXT NOT NULL)''') 129 | conn.commit() 130 | 131 | # Insert default config values if they don't exist 132 | default_configs = { 133 | 'COMPANY_NAME': 'Knolabs AI Agency', 134 | 'COMPANY_BUSINESS': 'Knolabs is an AI Agency which helps HomeService Businesses & Local Shops automate their business processes based on their need and necesity.', 135 | 'COMPANY_PRODUCTS_SERVICES': 'Premium Consultancy Service - Most Budget offer, where we only provide consultancy to businesses who have their own team of developers (charged hourly), Proven Solution Services - Implements Automated Solutions that are already implemented & Proven for existing clients (Charged Based on Solutions), Custom Solutions - AI Automated Solution which are unique to the business (Price Varies, Charged Based on Agreed Contract)', 136 | 'CONVERSATION_PURPOSE': 'Primarily to get an appointment booked to discuss problems of the customer in details', 137 | 'AISALESAGENT_NAME': 'Andrea', 138 | 'AI_API_KEY': 'Default AI API Key', 139 | 'ENCRYPTION_KEY': 'Default Encryption Key', 140 | 'WHICH_MODEL': 'OpenAI', 141 | 'OPENAI_BASE_URL': 'https://api.openai.com/v1/', 142 | 'VOICE_MODE': 'ELEVENLABS_STREAM', 143 | 'LLM_MODEL': 'gpt-3.5-turbo', 144 | 'OPENAI_FINE_TUNED_MODEL_ID': 'gpt-3.5-turbo', 145 | 'OPENAI_FINE_TUNED_TOOLS_MODEL_ID': 'gpt-4o-mini', 146 | 'USE_LANGCHAIN_TOOL_CLASS': 'false', 147 | 'AGENT_CUSTOM_INSTRUCTIONS': 'Ask and Answer in short sentences to be focused on providing right information and provide fastest way for make deal. Be specific and do not try to answer questions which are not specific to the goal or company info or details available in context', 148 | 'TWILIO_ACCOUNT_SID': 'Default Twilio SID', 149 | 'TWILIO_AUTH_TOKEN': 'Default Twilio Auth Token', 150 | 'TWILIO_FROM_NUMBER': 'Default Twilio Number', 151 | 'ELEVENLABS_API_KEY': 'Default Eleven Labs API Key', 152 | 'NGROK_AUTH_TOKEN': 'NGROK Default Auth Token', 153 | 'USE_NGROK': 'true', 154 | 'VOICE_ID': '21m00Tcm4TlvDq8ikWAM', 155 | 'APP_PUBLIC_URL': 'https://default-url.com', 156 | 'CACHE_ENABLED': 'false', 157 | 'REDIS_URL': 'redis://redis:6379' 158 | } 159 | 160 | # Insert default configs if not present 161 | for key, value in default_configs.items(): 162 | cursor.execute('SELECT value FROM config WHERE key = ?', (key,)) 163 | if cursor.fetchone() is None: 164 | cursor.execute('INSERT INTO config (key, value) VALUES (?, ?)', (key, value)) 165 | conn.commit() 166 | 167 | conn.close() 168 | 169 | init_sqlite_db() 170 | 171 | def is_valid_tool_name(tool_name): 172 | """Validate the tool name to prevent directory traversal or other path manipulation.""" 173 | # Allow only alphanumeric characters and underscores in the tool name 174 | # This prevents directory traversal and other malicious input 175 | return re.match(r'^[\w-]+$', tool_name) is not None 176 | 177 | 178 | def generate_tool_client(tool_name, tool_file_path): 179 | # Validate the tool name 180 | if not is_valid_tool_name(tool_name): 181 | raise ValueError(f"Invalid tool name: {tool_name}") 182 | 183 | # Create a unique directory for each tool 184 | tool_output_dir = os.path.join(GENERATED_TOOLS_DIR, tool_name) 185 | 186 | # Check if the directory already exists and delete it if needed 187 | # if os.path.exists(tool_output_dir): 188 | # logger.info(f"Directory {tool_output_dir} exists. Overwriting.") 189 | # shutil.rmtree(tool_output_dir) # Use shutil.rmtree to remove the directory 190 | 191 | # Generate the client using the OpenAPI spec 192 | try: 193 | subprocess.run( 194 | ['openapi-python-client', 'generate', '--path', tool_file_path, '--output-path', tool_output_dir], 195 | check=True 196 | ) 197 | except subprocess.CalledProcessError as e: 198 | logger.error(f"Failed to generate client for tool {tool_name}: {e}") 199 | raise 200 | 201 | def add_tool_to_db(tool_name, tool_description, tool_file, tool_sensitive_headers, tool_sensitive_body): 202 | # Generate client for the tool 203 | generate_tool_client(tool_name, tool_file) 204 | 205 | # Encrypt sensitive headers and body parameters 206 | encrypted_headers = EncryptionHelper.encrypt_data(tool_sensitive_headers) if tool_sensitive_headers else None 207 | encrypted_body = EncryptionHelper.encrypt_data(tool_sensitive_body) if tool_sensitive_body else None 208 | 209 | # Insert tool data into the database 210 | conn = sqlite3.connect('knotie.db') 211 | cursor = conn.cursor() 212 | cursor.execute('INSERT INTO tools (name, description, openapi_spec, class_name, sensitive_headers, sensitive_body) VALUES (?, ?, ?, ?, ?, ?)', 213 | (tool_name, tool_description, tool_file, f'{tool_name}.Client', encrypted_headers, encrypted_body)) 214 | conn.commit() 215 | conn.close() 216 | 217 | # Initialize the tool after adding it 218 | initialize_tools() 219 | 220 | # Twilio client initialization 221 | client = None 222 | 223 | 224 | # Initialize the clients at the start 225 | initialize_twilio_client() 226 | initialize_elevenlabs_client() 227 | 228 | # Remove automatic AI client and tool initialization 229 | # reinitialize_ai_clients() 230 | # initialize_tools() 231 | 232 | # Dependency for authentication 233 | security = HTTPBasic() 234 | def get_current_username(credentials: HTTPBasicCredentials = Depends(security)): 235 | correct_username = "admin" 236 | correct_password = "admin" 237 | if credentials.username != correct_username or credentials.password != correct_password: 238 | raise HTTPException(status_code=401, detail="Incorrect username or password") 239 | return credentials.username 240 | 241 | 242 | # Routes 243 | @app.get("/") 244 | async def read_root(request: Request): 245 | return templates.TemplateResponse("index.html", {"request": request}) 246 | 247 | 248 | @app.get("/admin") 249 | async def admin(request: Request): 250 | if not request.session.get('logged_in'): 251 | return RedirectResponse(url="/") 252 | return templates.TemplateResponse("admin.html", {"request": request}) 253 | 254 | # Dependency for checking session authentication 255 | async def get_current_user(request: Request): 256 | if not request.session.get('logged_in'): 257 | raise HTTPException(status_code=401, detail="Not authenticated") 258 | return request.session.get('username') 259 | 260 | @app.get("/api/config") 261 | async def get_config(username: str = Depends(get_current_user)): 262 | configs = Config.load_dynamic_config() 263 | return JSONResponse(content=configs) 264 | 265 | @app.post("/api/config") 266 | async def update_config(request: Request, username: str = Depends(get_current_user)): 267 | new_config = await request.json() 268 | Config.save_dynamic_config(new_config) 269 | Config.initialize() 270 | initialize_twilio_client() 271 | initialize_elevenlabs_client() 272 | reinitialize_ai_clients() 273 | initialize_tools() 274 | return JSONResponse(content={"message": "Config updated successfully"}) 275 | 276 | 277 | @app.get("/api/chats") 278 | async def get_chats(username: str = Depends(get_current_user)): 279 | redis_client = get_redis_client() 280 | keys = redis_client.keys('*') 281 | logger.info(f"list of keys are: {keys}") 282 | return JSONResponse(content=keys) 283 | 284 | @app.get("/api/chat/{transaction_id}") 285 | async def get_chat(transaction_id: str, username: str = Depends(get_current_user)): 286 | chat_history = get_message_history(transaction_id) 287 | if not chat_history: 288 | raise HTTPException(status_code=404, detail="No chat history found") 289 | return JSONResponse(content=chat_history) 290 | 291 | @app.get("/api/prompts") 292 | async def get_prompts(username: str = Depends(get_current_user)): 293 | conn = sqlite3.connect('knotie.db') 294 | cursor = conn.cursor() 295 | cursor.execute('SELECT name, template FROM prompts') 296 | prompts = cursor.fetchall() 297 | conn.close() 298 | prompts_list = [{"name": prompt[0], "template": prompt[1]} for prompt in prompts] 299 | return JSONResponse(content=prompts_list) 300 | 301 | @app.put("/api/prompts") 302 | async def update_prompt(prompt: dict, username: str = Depends(get_current_user)): 303 | prompt_name = prompt.get('name') 304 | prompt_template = prompt.get('template') 305 | if not prompt_name or not prompt_template: 306 | raise HTTPException(status_code=400, detail="Invalid request data") 307 | conn = sqlite3.connect('knotie.db') 308 | cursor = conn.cursor() 309 | cursor.execute('UPDATE prompts SET template = ? WHERE name = ?', (prompt_template, prompt_name)) 310 | conn.commit() 311 | conn.close() 312 | return JSONResponse(content={"message": "Prompt updated successfully"}) 313 | 314 | @app.get("/api/tools") 315 | async def get_tools(username: str = Depends(get_current_user)): 316 | conn = sqlite3.connect('knotie.db') 317 | cursor = conn.cursor() 318 | cursor.execute('SELECT id, name, description, openapi_spec FROM tools') 319 | tools = cursor.fetchall() 320 | conn.close() 321 | tools_list = [{"id": tool[0], "name": tool[1], "description": tool[2], "openapi_spec": tool[3]} for tool in tools] 322 | return JSONResponse(content=tools_list) 323 | 324 | @app.post("/api/tools") 325 | async def add_tool( 326 | request: Request, 327 | toolName: str = Form(...), 328 | toolDescription: str = Form(...), 329 | toolFile: UploadFile = File(...), 330 | toolSensitiveHeaders: Optional[str] = Form(None), 331 | toolSensitiveBody: Optional[str] = Form(None), 332 | username: str = Depends(get_current_user) 333 | ): 334 | os.makedirs(GENERATED_TOOLS_DIR, exist_ok=True) 335 | filename = secure_filename(toolFile.filename) 336 | tool_file_path = os.path.join(OPENAPI_DIR, filename) 337 | with open(tool_file_path, "wb") as buffer: 338 | shutil.copyfileobj(toolFile.file, buffer) 339 | add_tool_to_db(toolName, toolDescription, tool_file_path, toolSensitiveHeaders, toolSensitiveBody) 340 | return JSONResponse(content={"message": "Tool added successfully"}) 341 | 342 | @app.post("/login") 343 | async def login(request: Request): 344 | data = await request.json() 345 | username = data.get('username') 346 | password = data.get('password') 347 | conn = sqlite3.connect('knotie.db') 348 | cursor = conn.cursor() 349 | cursor.execute('SELECT * FROM users WHERE username = ? AND password = ?', (username, password)) 350 | user = cursor.fetchone() 351 | conn.close() 352 | if user: 353 | request.session['logged_in'] = True 354 | request.session['username'] = username 355 | first_login = user[3] 356 | return JSONResponse(content={'success': True, 'message': 'Login successful', 'first_login': first_login == 1}) 357 | else: 358 | return JSONResponse(content={'success': False, 'message': 'Login failed'}) 359 | 360 | @app.post("/change_password") 361 | async def change_password(request: Request): 362 | if 'logged_in' not in request.session: 363 | raise HTTPException(status_code=403, detail="Not logged in") 364 | data = await request.json() 365 | new_password = data.get('new_password') 366 | conn = sqlite3.connect('knotie.db') 367 | cursor = conn.cursor() 368 | cursor.execute('UPDATE users SET password = ?, first_login = 0 WHERE username = ?', (new_password, request.session['username'])) 369 | conn.commit() 370 | conn.close() 371 | return JSONResponse(content={'success': True, 'message': 'Password changed successfully'}) 372 | 373 | @app.get("/logout") 374 | async def logout(request: Request): 375 | request.session.clear() 376 | return RedirectResponse(url="/") 377 | 378 | 379 | @app.get("/audio/{filename}") 380 | async def serve_audio(filename: str): 381 | directory = 'audio_files' 382 | file_path = os.path.join(directory, filename) 383 | if not os.path.exists(file_path): 384 | raise HTTPException(status_code=404, detail="Audio file not found") 385 | 386 | def cleanup(): 387 | delayed_delete(file_path) 388 | 389 | return FileResponse(file_path, media_type="audio/mpeg", background=cleanup) 390 | 391 | @app.get("/audio-stream") 392 | async def audio_stream(text: str): 393 | audio_stream = text_to_speech_stream(text) 394 | 395 | def generate(): 396 | while True: 397 | chunk = audio_stream.read(1024) 398 | if not chunk: 399 | break 400 | yield chunk 401 | 402 | return StreamingResponse(generate(), media_type="audio/mpeg") 403 | 404 | @app.post("/start-call") 405 | async def start_call(request: Request): 406 | data = await request.json() 407 | unique_id = str(uuid.uuid4()) 408 | message_history = [] 409 | customer_name = data.get('customer_name', 'Valued Customer') 410 | customer_phonenumber = data.get('customer_phonenumber', '') 411 | customer_businessdetails = data.get('customer_businessdetails', 'No details provided.') 412 | 413 | ai_message = process_initial_message(unique_id, customer_name, customer_businessdetails) 414 | logger.info(ai_message) 415 | response = VoiceResponse() 416 | 417 | if Config.VOICE_MODE == "ELEVENLABS_DIRECT": 418 | initial_message = clean_response(ai_message) 419 | audio_filename = process_elevenlabs_audio(initial_message) 420 | response.play(f"{request.base_url}audio/{quote(secure_filename(audio_filename))}") 421 | elif Config.VOICE_MODE == "ELEVENLABS_STREAM": 422 | initial_message = clean_response(ai_message) 423 | audio_url = f"{request.base_url}audio-stream?text={quote(initial_message)}" 424 | logger.info(audio_url) 425 | response.play(audio_url) 426 | elif Config.VOICE_MODE == "TWILIO_DIRECT": 427 | initial_message = clean_response(ai_message) 428 | response.say(initial_message, voice='Google.en-GB-Standard-C', language='en-US') 429 | 430 | initial_transcript = f"Customer Name: {customer_name}. Customer's business Details as filled up in the website: {customer_businessdetails}" 431 | message_history.append({"role": "user", "content": initial_transcript}) 432 | message_history.append({"role": "assistant", "content": initial_message}) 433 | save_message_history(unique_id, message_history) 434 | 435 | redirect_url = f"{Config.APP_PUBLIC_GATHER_URL}?CallSid={unique_id}" 436 | public_url = f"{Config.APP_PUBLIC_EVENT_URL}" 437 | response.redirect(redirect_url) 438 | 439 | client = Client(Config.TWILIO_ACCOUNT_SID, Config.TWILIO_AUTH_TOKEN) 440 | call = client.calls.create( 441 | twiml=str(response), 442 | to=customer_phonenumber, 443 | from_=Config.TWILIO_FROM_NUMBER, 444 | method="GET", 445 | status_callback=public_url, 446 | status_callback_method="POST" 447 | ) 448 | return JSONResponse(content={'message': 'Call initiated', 'call_sid': call.sid}) 449 | 450 | @app.post("/gather") 451 | @app.get("/gather") 452 | async def gather_input(request: Request): 453 | call_sid = request.query_params.get('CallSid', 'default_sid') 454 | resp = VoiceResponse() 455 | gather = Gather(input='speech', action=f"/process-speech?CallSid={call_sid}", speechTimeout='auto', method="POST") 456 | resp.append(gather) 457 | resp.redirect(f"/gather?CallSid={call_sid}") 458 | return Response(content=str(resp), media_type="application/xml") 459 | 460 | @app.post("/gather-inbound") 461 | @app.get("/gather-inbound") 462 | async def gather_input_inbound(request: Request): 463 | resp = VoiceResponse() 464 | unique_id = str(uuid.uuid4()) 465 | message_history = [] 466 | agent_response = initiate_inbound_message(unique_id) 467 | 468 | if Config.VOICE_MODE == "ELEVENLABS_DIRECT": 469 | audio_filename = process_elevenlabs_audio(agent_response) 470 | resp.play(f"{request.base_url}audio/{quote(secure_filename(audio_filename))}") 471 | elif Config.VOICE_MODE == "ELEVENLABS_STREAM": 472 | initial_message = clean_response(agent_response) 473 | audio_url = f"{request.base_url}audio-stream?text={quote(agent_response)}" 474 | resp.play(audio_url) 475 | elif Config.VOICE_MODE == "TWILIO_DIRECT": 476 | initial_message = clean_response(agent_response) 477 | resp.say(agent_response, voice='Google.en-GB-Standard-C', language='en-US') 478 | 479 | message_history.append({"role": "assistant", "content": agent_response}) 480 | save_message_history(unique_id, message_history) 481 | resp.redirect(f"/gather?CallSid={unique_id}") 482 | return Response(content=str(resp), media_type="application/xml") 483 | 484 | 485 | @app.post("/process-speech") 486 | async def process_speech(request: Request): 487 | form_data = await request.form() 488 | speech_result = form_data.get('SpeechResult', '').strip() 489 | call_sid = request.query_params.get('CallSid', 'default_sid') 490 | message_history = get_message_history(call_sid) 491 | message_history.append({"role": "user", "content": speech_result}) 492 | # Run the background task in a separate thread 493 | loop = asyncio.get_running_loop() 494 | await loop.run_in_executor(executor, generate_response_in_background, call_sid, message_history, speech_result) 495 | 496 | 497 | confirmation_prompt = generate_diverse_confirmation(speech_result) 498 | resp = VoiceResponse() 499 | response_text = clean_response(confirmation_prompt) 500 | 501 | audio_url = f"{request.base_url}audio-stream?text={quote(response_text)}" 502 | resp.play(audio_url) 503 | 504 | gather = Gather( 505 | input='speech', 506 | action=f"/process-confirmation?CallSid={call_sid}", 507 | speechTimeout='2', 508 | method='POST' 509 | ) 510 | resp.append(gather) 511 | 512 | resp.redirect(f"/handle-confirmation?CallSid={call_sid}") 513 | 514 | return Response(content=str(resp), media_type="application/xml") 515 | 516 | def generate_response_in_background(call_sid: str, message_history: list, user_input: str): 517 | redis_client = get_redis_client() 518 | ai_response_text = process_message(call_sid, message_history, user_input) 519 | redis_client.set(f"response:{call_sid}", ai_response_text) 520 | redis_client.expire(f"response:{call_sid}", 300) # Expires in 5 minutes 521 | 522 | @app.post("/handle-confirmation") 523 | @app.get("/handle-confirmation") 524 | async def handle_confirmation(request: Request): 525 | call_sid = request.query_params.get('CallSid', 'default_sid') 526 | resp = VoiceResponse() 527 | gather = Gather( 528 | input='speech', 529 | action=f"/process-confirmation?CallSid={call_sid}", 530 | speechTimeout='auto', 531 | method='POST' 532 | ) 533 | resp.append(gather) 534 | return Response(content=str(resp), media_type="application/xml") 535 | 536 | @app.post("/process-confirmation") 537 | async def process_confirmation(request: Request): 538 | form_data = await request.form() 539 | confirmation_result = form_data.get('SpeechResult', '').strip().lower() 540 | call_sid = request.query_params.get('CallSid', 'default_sid') 541 | resp = VoiceResponse() 542 | message_history = get_message_history(call_sid) 543 | 544 | positive_keywords = ["yes", "correct", "you're right", "ahha", "that's right", "right", "yeah", "yep", "affirmative", "sure", "absolutely", "indeed", "aha", "uh-huh", "mmm", "okay", "ok", "alright", "got it"] 545 | negative_keywords = ["no", "incorrect", "that's wrong", "wrong", "nope", "not quite", "negative", "not really", "no way", "uh-oh", "nah", "hmm", "huh-uh"] 546 | 547 | if any(keyword in confirmation_result for keyword in positive_keywords): 548 | logger.info(f'User confirmed yes') 549 | redis_client = get_redis_client() 550 | ai_response_bytes = redis_client.get(f"response:{call_sid}") 551 | 552 | wait_time = 0 553 | max_wait_time = 100 554 | 555 | while ai_response_bytes is None and wait_time < max_wait_time: 556 | logger.info(f'waiting for response from AI') 557 | await asyncio.sleep(5) 558 | wait_time += 1 559 | ai_response_bytes = redis_client.get(f"response:{call_sid}") 560 | 561 | if ai_response_bytes: 562 | ai_response_text = ai_response_bytes.decode('utf-8') 563 | response_text = clean_response(ai_response_text) 564 | audio_url = f"{request.base_url}audio-stream?text={quote(response_text)}" 565 | resp.play(audio_url) 566 | 567 | redis_client.delete(f"response:{call_sid}") 568 | message_history.append({"role": "assistant", "content": response_text}) 569 | save_message_history(call_sid, message_history) 570 | resp.redirect(f"/gather?CallSid={call_sid}") 571 | else: 572 | resp.say("I'm sorry, I'm still processing your request. Please hold for a moment.") 573 | resp.redirect(f"/handle-confirmation?CallSid={call_sid}") 574 | 575 | elif any(keyword in confirmation_result for keyword in negative_keywords): 576 | gather = Gather( 577 | input='speech', 578 | action=f"/process-speech?CallSid={call_sid}", 579 | speechTimeout='auto', 580 | method="POST" 581 | ) 582 | gather.say("I'm sorry, could you please repeat your input?") 583 | resp.append(gather) 584 | else: 585 | gather = Gather( 586 | input='speech', 587 | action=f"/process-confirmation?CallSid={call_sid}", 588 | speechTimeout='auto', 589 | method="POST" 590 | ) 591 | gather.say("I didn't catch that. Could you please say 'yes' or 'no'?") 592 | resp.append(gather) 593 | 594 | return Response(content=str(resp), media_type="application/xml") 595 | 596 | @app.post("/event") 597 | async def event(request: Request): 598 | form_data = await request.form() 599 | call_status = form_data.get('CallStatus', '') 600 | if call_status in ['completed', 'busy', 'failed']: 601 | request.session.pop('message_history', None) 602 | logger.info(f"Call completed with status: {call_status}") 603 | return Response(status_code=204) 604 | 605 | if __name__ == "__main__": 606 | uvicorn.run(app, host="0.0.0.0", port=5000) -------------------------------------------------------------------------------- /appUtils.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | import redis 4 | import json 5 | import threading 6 | import os 7 | import random 8 | from config import Config # Import Config to access dynamic configuration 9 | from audio_helpers import text_to_speech, save_audio_file 10 | 11 | logging.basicConfig(level=logging.INFO) 12 | logger = logging.getLogger(__name__) 13 | 14 | # Initialize the Redis client (placeholder) 15 | redis_client = None 16 | 17 | def get_redis_client(): 18 | """Get or reinitialize the Redis client with the latest configuration.""" 19 | global redis_client 20 | 21 | # Reload the configuration if there are any changes 22 | Config.reload_if_changed() 23 | 24 | # If Redis client is not initialized or URL has changed, reinitialize 25 | if not redis_client or redis_client.connection_pool.connection_kwargs['host'] != Config.REDIS_URL: 26 | redis_client = redis.Redis.from_url(Config.REDIS_URL, decode_responses=False) 27 | logger.info(f"Reinitialized Redis client with new URL: {Config.REDIS_URL}") 28 | 29 | return redis_client 30 | 31 | def clean_response(unfiltered_response_text): 32 | # Check for specific substrings in the response text 33 | if "" in unfiltered_response_text or "" in unfiltered_response_text: 34 | # Remove specific substrings from the response text 35 | filtered_response_text = unfiltered_response_text.replace("", "").replace("", "") 36 | return filtered_response_text 37 | return unfiltered_response_text 38 | 39 | def delayed_delete(filename, delay=5): 40 | """ Delete the file after a specified delay in seconds. """ 41 | def attempt_delete(): 42 | time.sleep(delay) 43 | try: 44 | os.remove(filename) 45 | logger.info(f"Successfully deleted temporary audio file: {filename}") 46 | except Exception as error: 47 | logger.error(f"Error deleting temporary audio file: {filename} - {error}") 48 | 49 | thread = threading.Thread(target=attempt_delete) 50 | thread.start() 51 | 52 | def save_message_history(call_sid, message_history): 53 | logger.info(f"Inside save message history") 54 | redis_client = get_redis_client() # Get the latest Redis client 55 | redis_client.set(call_sid, json.dumps(message_history)) 56 | 57 | def get_message_history(call_sid): 58 | redis_client = get_redis_client() # Get the latest Redis client 59 | message_history_json = redis_client.get(call_sid) 60 | return json.loads(message_history_json) if message_history_json else [] 61 | 62 | def process_elevenlabs_audio(initial_message): 63 | audio_data = text_to_speech(initial_message) 64 | audio_file_path = save_audio_file(audio_data) 65 | audio_filename = os.path.basename(audio_file_path) 66 | return audio_filename 67 | 68 | def generate_diverse_confirmation(speech_result): 69 | """Generates a diverse confirmation message based on the user's input.""" 70 | confirmation_prompts = [ 71 | f"Just to make sure I heard you correctly, you said '{speech_result}'. Is that right?", 72 | f"I think I got that. You mentioned '{speech_result}'. Did I understand correctly?", 73 | f"Let me double-check. You said '{speech_result}', correct?", 74 | f"I want to ensure I understood you properly. You said '{speech_result}', right?", 75 | f"To avoid any misunderstanding, can you confirm you said '{speech_result}'?", 76 | f"I'd like to confirm something. Did you mean to say '{speech_result}'?", 77 | f"Before we continue, I just want to verify. You said '{speech_result}', didn't you?", 78 | f"I believe I heard '{speech_result}'. Am I on the right track?", 79 | f"Quick check - did you say '{speech_result}'?", 80 | f"I want to make sure we're on the same page. You mentioned '{speech_result}', correct?" 81 | ] 82 | 83 | return random.choice(confirmation_prompts) 84 | -------------------------------------------------------------------------------- /audio_helpers.py: -------------------------------------------------------------------------------- 1 | # This code is licensed under the Custom License for Paid Community Members. 2 | # For more information, see the LICENSE file in the root directory of this repository. 3 | 4 | import requests 5 | import tempfile 6 | import os 7 | from werkzeug.utils import secure_filename 8 | from config import Config 9 | import os 10 | from io import BytesIO 11 | from typing import IO 12 | from elevenlabs import VoiceSettings 13 | from elevenlabs.client import ElevenLabs 14 | import logging 15 | 16 | logging.basicConfig(level=logging.INFO) 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | # ElevenLabs client initialization 21 | elevenlabs_client = None 22 | 23 | def initialize_elevenlabs_client(): 24 | global elevenlabs_client 25 | logger.info(f"Initiating ElevenLabs client") 26 | elevenlabs_client = ElevenLabs(api_key=Config.ELEVENLABS_API_KEY) 27 | 28 | 29 | def text_to_speech(text): 30 | url = f"https://api.elevenlabs.io/v1/text-to-speech/{Config.VOICE_ID}" 31 | headers = { 32 | 'Content-Type': 'application/json', 33 | 'xi-api-key': Config.ELEVENLABS_API_KEY 34 | } 35 | data = { 36 | "model_id": "eleven_turbo_v2", 37 | "text": text, 38 | "optimize_streaming_latency": 2, 39 | "voice_settings": { 40 | "similarity_boost": 0.75, 41 | "stability": 0.5, 42 | "use_speaker_boost": True 43 | } 44 | } 45 | response = requests.post(url, headers=headers, json=data) 46 | if response.status_code == 200: 47 | return response.content 48 | else: 49 | raise Exception(f"Failed to generate speech: {response.text}") 50 | 51 | def text_to_speech_stream(text: str) -> IO[bytes]: 52 | """ 53 | Converts text to speech and returns the audio data as a byte stream. 54 | 55 | This function invokes a text-to-speech conversion API with specified parameters, including 56 | voice ID and various voice settings, to generate speech from the provided text. Instead of 57 | saving the output to a file, it streams the audio data into a BytesIO object. 58 | 59 | Args: 60 | text (str): The text content to be converted into speech. 61 | 62 | Returns: 63 | IO[bytes]: A BytesIO stream containing the audio data. 64 | """ 65 | # Perform the text-to-speech conversion 66 | response = elevenlabs_client.text_to_speech.convert( 67 | voice_id=Config.VOICE_ID, # Adam pre-made voice 68 | optimize_streaming_latency="2", 69 | output_format="mp3_22050_32", 70 | text=text, 71 | model_id="eleven_multilingual_v2", 72 | voice_settings=VoiceSettings( 73 | stability=0.5, 74 | similarity_boost=0.8, 75 | use_speaker_boost=True, 76 | ), 77 | ) 78 | 79 | logger.info(f"Streaming audio data") 80 | 81 | # Create a BytesIO object to hold audio data 82 | audio_stream = BytesIO() 83 | 84 | # Write each chunk of audio data to the stream 85 | for chunk in response: 86 | if chunk: 87 | audio_stream.write(chunk) 88 | 89 | # Reset stream position to the beginning 90 | audio_stream.seek(0) 91 | 92 | # Return the stream for further use 93 | return audio_stream 94 | 95 | def save_audio_file(audio_data): 96 | with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3', dir='audio_files') as tmpfile: 97 | tmpfile.write(audio_data) 98 | return tmpfile.name 99 | -------------------------------------------------------------------------------- /cache/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/cache/.gitkeep -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import base64 4 | import sqlite3 5 | from dotenv import load_dotenv 6 | import logging 7 | 8 | load_dotenv() 9 | logger = logging.getLogger(__name__) 10 | 11 | class Config: 12 | # Static config from environment variables 13 | SECRET_KEY = os.getenv('SECRET_KEY') 14 | 15 | # Store the last loaded config timestamp 16 | last_loaded_timestamp = None 17 | 18 | @staticmethod 19 | def load_dynamic_config(): 20 | # Load configuration from the SQLite database 21 | config_data = {} 22 | try: 23 | conn = sqlite3.connect('knotie.db') 24 | cursor = conn.cursor() 25 | cursor.execute('SELECT key, value FROM config') 26 | rows = cursor.fetchall() 27 | config_data = {row[0]: row[1] for row in rows} 28 | except sqlite3.Error as e: 29 | logger.error(f"Error loading config from database: {e}") 30 | finally: 31 | conn.close() 32 | return config_data 33 | 34 | @staticmethod 35 | def save_dynamic_config(config_data): 36 | # Save configuration to the SQLite database 37 | try: 38 | conn = sqlite3.connect('knotie.db') 39 | cursor = conn.cursor() 40 | for key, value in config_data.items(): 41 | cursor.execute('INSERT OR REPLACE INTO config (key, value) VALUES (?, ?)', (key, value)) 42 | conn.commit() 43 | except sqlite3.Error as e: 44 | logger.error(f"Error saving config to database: {e}") 45 | finally: 46 | conn.close() 47 | 48 | @classmethod 49 | def initialize(cls): 50 | cls.dynamic_config = cls.load_dynamic_config() 51 | cls.update_dynamic_config() 52 | 53 | @classmethod 54 | def reload_if_changed(cls): 55 | """Reload the configuration if the config data in the database has changed.""" 56 | # You may implement a logic to check if the database entry has changed, if required. 57 | # For now, we'll just reload dynamically without a timestamp comparison 58 | cls.dynamic_config = cls.load_dynamic_config() 59 | cls.update_dynamic_config() 60 | logger.info("Configuration reloaded from database.") 61 | 62 | @classmethod 63 | def update_dynamic_config(cls): 64 | dynamic_config = cls.load_dynamic_config() 65 | cls.COMPANY_NAME = dynamic_config.get('COMPANY_NAME', 'Default Company Name') 66 | cls.COMPANY_BUSINESS = dynamic_config.get('COMPANY_BUSINESS', 'Default Company Business') 67 | cls.COMPANY_PRODUCTS_SERVICES = dynamic_config.get('COMPANY_PRODUCTS_SERVICES', 'Default Products Services') 68 | cls.CONVERSATION_PURPOSE = dynamic_config.get('CONVERSATION_PURPOSE', 'Default Conversation Purpose') 69 | cls.AISALESAGENT_NAME = dynamic_config.get('AISALESAGENT_NAME', 'Default Agent Name') 70 | cls.AI_API_KEY = dynamic_config.get('AI_API_KEY', 'Default AI API Key') 71 | cls.ENCRYPTION_KEY = dynamic_config.get('ENCRYPTION_KEY', 'Default Encryption Key') 72 | cls.WHICH_MODEL = dynamic_config.get('WHICH_MODEL', 'Default Model') 73 | cls.OPENAI_BASE_URL = dynamic_config.get('OPENAI_BASE_URL', 'https://api.default.com') 74 | cls.VOICE_MODE = dynamic_config.get('VOICE_MODE', 'Default Voice Mode') 75 | cls.LLM_MODEL = dynamic_config.get('LLM_MODEL', 'Default Model') 76 | cls.OPENAI_FINE_TUNED_MODEL_ID = dynamic_config.get('OPENAI_FINE_TUNED_MODEL_ID', 'gpt-3.5-turbo') 77 | cls.OPENAI_FINE_TUNED_TOOLS_MODEL_ID = dynamic_config.get('OPENAI_FINE_TUNED_TOOLS_MODEL_ID', 'gpt-3.5-turbo') 78 | cls.USE_LANGCHAIN_TOOL_CLASS = dynamic_config.get('USE_LANGCHAIN_TOOL_CLASS', False) 79 | cls.AGENT_CUSTOM_INSTRUCTIONS = dynamic_config.get('AGENT_CUSTOM_INSTRUCTIONS', 'Default Instructions') 80 | cls.TWILIO_ACCOUNT_SID = dynamic_config.get('TWILIO_ACCOUNT_SID', 'Default Twilio SID') 81 | cls.TWILIO_AUTH_TOKEN = dynamic_config.get('TWILIO_AUTH_TOKEN', 'Default Twilio Auth Token') 82 | cls.TWILIO_FROM_NUMBER = dynamic_config.get('TWILIO_FROM_NUMBER', 'Default Twilio Number') 83 | cls.ELEVENLABS_API_KEY = dynamic_config.get('ELEVENLABS_API_KEY', 'Default Eleven Labs API Key') 84 | cls.NGROK_AUTH_TOKEN = dynamic_config.get('NGROK_AUTH_TOKEN', 'NGROK Default Auth Token') 85 | cls.USE_NGROK = dynamic_config.get('USE_NGROK', 'true') 86 | cls.VOICE_ID = dynamic_config.get('VOICE_ID', 'Default Voice ID') 87 | cls.APP_PUBLIC_URL = dynamic_config.get('APP_PUBLIC_URL', 'https://default-url.com') 88 | cls.APP_PUBLIC_GATHER_URL = f"{cls.APP_PUBLIC_URL}/gather" 89 | cls.APP_PUBLIC_EVENT_URL = f"{cls.APP_PUBLIC_URL}/event" 90 | cls.CACHE_ENABLED = dynamic_config.get('CACHE_ENABLED', False) 91 | cls.REDIS_URL = dynamic_config.get('REDIS_URL', 'redis://redis:6379') # Set default to local Redis 92 | 93 | @classmethod 94 | def validate_encryption_key(cls): 95 | if cls.ENCRYPTION_KEY and cls.ENCRYPTION_KEY != "Enter a strong ENCRYPTION_KEY": 96 | try: 97 | # Ensure the key is base64 url-safe encoded and 32 bytes long 98 | base64.urlsafe_b64decode(cls.ENCRYPTION_KEY) 99 | logger.info("ENCRYPTION_KEY is valid and set.") 100 | except Exception as e: 101 | logger.error("Invalid ENCRYPTION_KEY. It must be a 32-byte base64-encoded string.") 102 | cls.ENCRYPTION_KEY = None 103 | else: 104 | logger.warning("ENCRYPTION_KEY is not set or is invalid. Please ensure it is updated by the admin.") 105 | cls.ENCRYPTION_KEY = None 106 | 107 | Config.initialize() 108 | -------------------------------------------------------------------------------- /docker-compose-deploy.yaml: -------------------------------------------------------------------------------- 1 | # This code is licensed under the Custom License. 2 | # For more information, see the LICENSE file in the root directory of this repository. 3 | 4 | version: '3.8' 5 | services: 6 | web: 7 | image: avijeett007/knotie-ai-web:latest 8 | ports: 9 | - "5000:5000" 10 | volumes: 11 | - .:/usr/src/app 12 | - ./audio_files:/usr/src/app/audio_files 13 | environment: 14 | FLASK_APP: app.py 15 | FLASK_ENV: development 16 | FLASK_RUN_HOST: 0.0.0.0 17 | env_file: 18 | - .env 19 | depends_on: 20 | - redis 21 | redis: 22 | image: "redis:alpine" 23 | ports: 24 | - "6379:6379" 25 | volumes: 26 | redis_data: 27 | -------------------------------------------------------------------------------- /docker-compose-redis_only.yaml: -------------------------------------------------------------------------------- 1 | # This code is licensed under the Custom License. 2 | # For more information, see the LICENSE file in the root directory of this repository. 3 | 4 | version: '3.8' 5 | services: 6 | redis: 7 | image: "redis:alpine" 8 | ports: 9 | - "6379:6379" 10 | volumes: 11 | redis_data: 12 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # This code is licensed under the Custom License. 2 | # For more information, see the LICENSE file in the root directory of this repository. 3 | 4 | version: '3.8' 5 | services: 6 | web: 7 | build: . 8 | ports: 9 | - "5000:5000" 10 | image: avijeett007/knotie-ai-web:latest 11 | volumes: 12 | - .:/usr/src/app 13 | - ./audio_files:/usr/src/app/audio_files 14 | environment: 15 | - PYTHONUNBUFFERED=1 16 | env_file: 17 | - .env 18 | command: uvicorn app:app --host 0.0.0.0 --port 5000 --reload 19 | depends_on: 20 | - redis 21 | redis: 22 | image: "redis:alpine" 23 | ports: 24 | - "6379:6379" 25 | volumes: 26 | - redis_data:/data 27 | volumes: 28 | redis_data: -------------------------------------------------------------------------------- /docs/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Code of Conduct 3 | description: Code of Conduct for contributing to the Knotie-AI project. 4 | tags: Code of Conduct, Contribution, Knotie-AI 5 | --- 6 | 7 | # 📜 Code of Conduct 8 | 9 | ## Our Pledge 10 | 11 | We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. 12 | 13 | We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to creating a positive environment include: 18 | 19 | - Using welcoming and inclusive language 20 | - Being respectful of differing viewpoints and experiences 21 | - Gracefully accepting constructive criticism 22 | - Focusing on what is best for the community 23 | - Showing empathy towards other community members 24 | 25 | Examples of unacceptable behavior include: 26 | 27 | - The use of sexualized language or imagery and unwelcome sexual attention or advances 28 | - Trolling, insulting/derogatory comments, and personal or political attacks 29 | - Public or private harassment 30 | - Publishing others’ private information, such as a physical or email address, without their explicit permission 31 | - Other conduct which could reasonably be considered inappropriate in a professional setting 32 | 33 | ## Enforcement Responsibilities 34 | 35 | Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. 36 | 37 | ## Scope 38 | 39 | This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. 40 | 41 | ## Enforcement 42 | 43 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at support@kno2gether.com. All complaints will be reviewed and investigated promptly and fairly. 44 | 45 | All community leaders are obligated to respect the privacy and security of the reporter of any incident. 46 | 47 | ## Enforcement Guidelines 48 | 49 | Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: 50 | 51 | ### 1. Correction 52 | 53 | **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. 54 | 55 | **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. 56 | 57 | ### 2. Warning 58 | 59 | **Community Impact**: A violation through a single incident or series of actions. 60 | 61 | **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. 62 | 63 | ### 3. Temporary Ban 64 | 65 | **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. 66 | 67 | **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. 68 | 69 | ### 4. Permanent Ban 70 | 71 | **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. 72 | 73 | **Consequence**: A permanent ban from any sort of public interaction within the community. 74 | 75 | ## Attribution 76 | 77 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 78 | 79 | Community Impact Guidelines were inspired by [Mozilla’s code of conduct enforcement ladder](https://github.com/mozilla/diversity). 80 | 81 | For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. -------------------------------------------------------------------------------- /docs/Cloud-Hosting/aws.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: AWS 3 | description: This documentation is coming soon. 4 | --- 5 | 6 | # 🚧 Coming Soon! 7 | 8 | We are still working on this section of the documentation. It will be available soon! Stay tuned for updates. 9 | 10 | In the meantime, feel free to explore other sections of our [documentation](https://knotie-ai.pages.dev/) or [contribute](https://kno2gether.com/knotie-ai-sales-agent/) to the project. 11 | -------------------------------------------------------------------------------- /docs/Cloud-Hosting/cloudserver.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: CloudServer 3 | description: This documentation is coming soon. 4 | --- 5 | 6 | # 🚧 Coming Soon! 7 | 8 | We are still working on this section of the documentation. It will be available soon! Stay tuned for updates. 9 | 10 | In the meantime, feel free to explore other sections of our [documentation](https://knotie-ai.pages.dev/) or [contribute](https://kno2gether.com/knotie-ai-sales-agent/) to the project. 11 | -------------------------------------------------------------------------------- /docs/Cloud-Hosting/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Cloud Guide 3 | description: Detailed Cloud Hosting Guide for various Cloud Hosting Providers 4 | --- 5 | 6 | # 📚 Cloud-Hosting Guide 7 | 8 | This section provides step-by-step guides to help you get the most out of Knotie-AI. Just so you know, we also offer hosting services to manage this AI-Sales Agent for you. 9 | Please contact us via below link to know further. 10 | 11 | https://kno2gether.com/knotie-ai-sales-agent/ 12 | 13 | ## Cloud Provider Docs 14 | - [Kamatera](kamatera.md) 15 | - [AWS](aws.md) 16 | - [CloudServer](cloudServer.md) 17 | 18 | -------------------------------------------------------------------------------- /docs/Cloud-Hosting/kamatera.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Kamatera 3 | description: This documentation is coming soon. 4 | --- 5 | 6 | # 🚧 Coming Soon! 7 | 8 | We are still working on this section of the documentation. It will be available soon! Stay tuned for updates. 9 | 10 | In the meantime, feel free to explore other sections of our [documentation](https://knotie-ai.pages.dev/) or [contribute](https://kno2gether.com/knotie-ai-sales-agent/) to the project. 11 | -------------------------------------------------------------------------------- /docs/How-To-Guide/addcustomtoolsviaui.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 🛠️ Add Custom Tools and APIs 3 | description: A detailed guide on how to integrate custom tools and APIs with Knotie-AI. 4 | tags: Custom Tools, API Integration, Knotie-AI, Python 5 | --- 6 | 7 | # 🛠️ Add Custom Tools and APIs via Admin UI 8 | 9 | Integrating custom tools and APIs with Knotie-AI has become even more straightforward and easy. Now you don't need to fiddle with code anymore. That means Knotie-AI is becoming more Business Friendly. This guide will walk you through the steps required to add and configure your custom tools within the system. 10 | 11 | --- 12 | 13 | ## 🧩 Start Knotie-AI 14 | 15 | First ensure Knotie-AI is started locally. Follow this [Guide](../Self-Hosting/runasservice.md). If you're a developer, follow this [Guide](../Self-Hosting/setup.md) instead. 16 | 17 | --- 18 | 19 | ## 📄 Open Admin UI 20 | 21 | Once Knotie-AI is started, launch the recently updated Admin UI (With much more options added) 22 | Follow this [Configuration and Secret Management Guide](../Self-Hosting/installation.md) to setup all important configuration first 23 | 24 | --- 25 | 26 | ## 🔧 Prepare Your Tools 27 | 28 | To add tools through Admin UI, you need your API/Custom Tools to be available in OpenAPI 3.0 Specification (Yaml file) 29 | If you already have OpenAPI 3.0 Compliant YAML, follow `Add Your Tools` Section in this guide. 30 | 31 | If you don't have OpenAPI 3.0 Compliant YAML, then follow the below steps. 32 | 33 | 34 | ### 1. Prepare a CURL command for your tool/api 35 | 36 | Open Postman or your favourite API Execution/Testing tool where you can test the custom API. 37 | Execute the API once, to test, if everything is allright. 38 | 39 | Now, copy the CURL command output for the api. It should look something like this -> 40 | 41 | ```python 42 | curl --location 'https://kno2getherworkflow.ddns.net/webhook/fetchMemberShip' \ 43 | --header 'Authorization: b78f794ffed9' \ 44 | --header 'Content-Type: application/json' \ 45 | --data '{ 46 | "membership": "Gold-Gym-Membership" 47 | }' 48 | ``` 49 | 50 | - **Remove any sensitive information:** From the Curl, remove any sensitive information, replace with "*****" or "SENSITIVE INFO" 51 | 52 | ```python 53 | curl --location 'https://kno2getherworkflow.ddns.net/webhook/fetchMemberShip' \ 54 | --header 'Authorization: ************' \ 55 | --header 'Content-Type: application/json' \ 56 | --data '{ 57 | "membership": "Gold-Gym-Membership" 58 | }' 59 | ``` 60 | 61 | ### 2. Create OpenAPI Spec Document 62 | 63 | Use any AI Chatbot like ChatGPT, Bard or Bing AI and use the below prompt to convert the curl into an OpenAPI Specification Document. 64 | 65 | 66 | ```python 67 | create openapi specification for the below curl API, dont keep the sensitive info like authorization header in the specification. Use the below Specification as an example guide. 68 | 69 | CURL To Convert: 70 | curl --location 'https://kno2getherworkflow.ddns.net/webhook/fetchMemberShip' \ 71 | --header 'Authorization: ************' \ 72 | --header 'Content-Type: application/json' \ 73 | --data '{ 74 | "membership": "Gold-Gym-Membership" 75 | }' 76 | 77 | Example Spec Document: 78 | 79 | openapi: 3.0.0 80 | info: 81 | title: Appointment Booking API 82 | description: API for booking appointments via webhook. 83 | version: 1.0.0 84 | 85 | servers: 86 | - url: https://kno2getherworkflow.ddns.net/webhook 87 | 88 | paths: 89 | /bookAppointment: 90 | post: 91 | summary: Book an appointment 92 | operationId: bookAppointment 93 | description: Endpoint to book an appointment by providing the date, time, and customer name. 94 | parameters: 95 | - in: header 96 | name: Authorization 97 | schema: 98 | type: string 99 | required: true 100 | example: "sensitive value" 101 | description: Authorization header required to access this endpoint. 102 | requestBody: 103 | required: true 104 | content: 105 | application/json: 106 | schema: 107 | type: object 108 | properties: 109 | date: 110 | type: string 111 | example: "tomorrow" 112 | description: "The date for the appointment." 113 | time: 114 | type: string 115 | example: "10:00" 116 | description: "The time for the appointment." 117 | customer_name: 118 | type: string 119 | example: "John" 120 | description: "Name of the customer booking the appointment." 121 | required: 122 | - date 123 | - time 124 | - customer_name 125 | responses: 126 | '200': 127 | description: Appointment booked successfully. 128 | '400': 129 | description: Invalid request. 130 | 131 | ``` 132 | 133 | Copy the above prompt, change your CURL output and use any AI Model to generate the OpenAPI Spec Document. 134 | 135 | --- 136 | 137 | ## 3. Add Your Tools 138 | 139 | ### 1. Now Headover to ADMIN UI and click on Custom Tools. 140 | ![Screenshot below](../images/custom_tools.png) 141 | 142 | ### 2. Click on Add Tool (Green Button) and you should see a form like below image. 143 | 144 | ![Screenshot below](../images/Add_Tool.png) 145 | 146 | ### 3. Enter Your Tool Name : Please enter a valid and relevant tool name as this will be used by AI Model. 147 | 148 | Enter Your Tool Description : This is very important that you add a brief appropriate description about what the Tool/API does. This is instrumental for AI Models to choose right tool during conversation. 149 | 150 | ### 4. Upload OpenAPI Spec File. 151 | 152 | ### 5. Now add Your Sensitive Headers as JSON. (Don't worry, the agent won't send header as JSON, this is for secure storing of your sensitive information) 153 | For example (as per given API above, I'll add this, in the sensitive header text area) 154 | ``` 155 | {"Authorization": "b78f794ffed9"} 156 | ``` 157 | 158 | ### 6. Similarly, add any sensitive information which should be sent in request body. 159 | 160 | ### 7. Submit Tools. 161 | ![Screenshot below](../images/submit_tool.png) 162 | 163 | You can add as many custom API/Tools using the above process. 164 | 165 | Note: This sensitive information won't be shared to any AI Model. This will be kept safely within Database, encrypted. 166 | 167 | ## ⚠️ Important Notes 168 | 169 | - **Technical Complexity:** The Technical complexity behind this work was a milestone to achieve. We may not have been able to support all form of APIs(Nested/complex APIs). Please watchout the logs and if you face issues. Please raise a support request (If premium member), or please submit an issue through github Issue tab. 170 | - **Future Improvements:** A more user-friendly framework for adding tools is in the works. Stay tuned for updates! 171 | 172 | --- 173 | 174 | ## 🎥 Video Tutorial 175 | 176 | For additional help, please watch the tutorial video on YouTube and join our community. If you encounter any issues, feel free to ask for help in the "Ask For Help" channel. 177 | 178 | 179 | [Watch the Tutorial](https://youtu.be/FlsN7rokbr0) | [Join the Community](https://community.kno2gether.com/communities/groups/kno2gether-community/home?invite=66b617e90fd0ff23e04efce2) 180 | -------------------------------------------------------------------------------- /docs/How-To-Guide/customtools.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 🛠️ Custom Tools and APIs 3 | description: A detailed guide on how to integrate custom tools and APIs with Knotie-AI. 4 | tags: Custom Tools, API Integration, Knotie-AI, Python 5 | --- 6 | 7 | # 🛠️ Custom Tools and APIs - (This Guide is Outdated now !!). Use This [Guide](addcustomtoolsviaui.md) 8 | 9 | Integrating custom tools and APIs with Knotie-AI is straightforward. This guide will walk you through the steps required to add and configure your custom tools within the system. 10 | 11 | --- 12 | 13 | ## 🧩 Modifying `tools.py` 14 | 15 | To add your custom tool: 16 | 17 | 1. **Open `tools.py`:** Open the `tools.py` file in your preferred IDE. 18 | 19 | 2. **Remove Existing Tools (Optional):** If you want to clean up the existing tools, you can remove them and keep only your custom tool(s). 20 | 21 | 3. **Create a New Tool Class:** 22 | - Define a new class for your tool. Below is an example of a custom tool that fetches pricing information: 23 | 24 | ```python 25 | class CustomFetchPriceTool(BaseTool): 26 | name = "PriceInquiry" 27 | description = tools_info["PriceInquiry"]["description"] 28 | 29 | def _run(self, membership_type: str) -> str: 30 | print('Fetch product price is called') 31 | 32 | # Set up the endpoint and headers 33 | url = 'https://kno2getherworkflow.ddns.net/webhook/fetchMemberShip' 34 | headers = {'Content-Type': 'application/json'} 35 | 36 | # Prepare the data payload with the membership type 37 | data = { 38 | "membership": membership_type 39 | } 40 | 41 | # Send a POST request to the server 42 | response = requests.post(url, headers=headers, json=data) 43 | 44 | # Check if the request was successful 45 | if response.status_code == 200: 46 | # Parse the JSON response to get the price 47 | price_info = response.json() 48 | return f"The price is ${price_info['price']} per month." 49 | else: 50 | return "Failed to fetch the price, please try again later." 51 | ``` 52 | 53 | 4. **Customize the Tool:** 54 | - **Class Name:** Change the class name as desired. 55 | - **Tool Name:** Update the `name` attribute. 56 | - **Description:** Provide a clear description of what the tool does. 57 | - **Target URL:** Modify the target URL in the `_run` method to point to your API. 58 | - **Data Payload:** Update the payload to fit your API requirements. 59 | - **Return Message:** Customize the return message to suit your needs. 60 | 61 | --- 62 | 63 | ## 📄 Updating `tools_info` 64 | 65 | After defining your tool, update the `tools_info` dictionary at the top of the `tools.py` file to include your custom tool: 66 | 67 | ```python 68 | tools_info = { 69 | "PriceInquiry": { 70 | "name": "PriceInquiry", 71 | "description": "Fetches product prices of Gym memberships.", 72 | "parameters": { 73 | "product_name": ["Silver-Gym-Membership", "Gold-Gym-Membership", "Platinum-Gym-Membership"] 74 | } 75 | }, 76 | // Add your custom tool information here 77 | } 78 | ``` 79 | 80 | This dictionary is used by the AI model to decide which parameters to use when calling the tool. 81 | 82 | --- 83 | 84 | ## 🔧 Modifying `ai_helpers.py` 85 | 86 | You are almost done! Just two more steps: 87 | 88 | ### 1. Initialize the Tool 89 | 90 | In the `ai_helpers.py` file, find the following section: 91 | 92 | ```python 93 | if Config.USE_LANGCHAIN_TOOL_CLASS: 94 | OnsiteAppointmentTool = OnsiteAppointmentTool() 95 | FetchProductPriceTool = FetchProductPriceTool() 96 | CalendlyMeetingTool = CalendlyMeetingTool() 97 | AppointmentAvailabilityTool = AppointmentAvailabilityTool() 98 | ``` 99 | 100 | - **Remove Unnecessary Tools:** Remove any tools you don’t need and keep only your custom tool. For example: 101 | 102 | ```python 103 | if Config.USE_LANGCHAIN_TOOL_CLASS: 104 | CustomFetchPriceTool = CustomFetchPriceTool() 105 | ``` 106 | 107 | ### 2. Update the `process_message` Function 108 | 109 | In the same `ai_helpers.py` file, locate the `process_message` function: 110 | 111 | - **Modify the `if` Statement:** Ensure there’s an `if` statement that matches the `tool_name` with the name you provided in `tools.py`. Here’s an example: 112 | 113 | ```python 114 | if tool_name == "PriceInquiry": 115 | tool_output = CustomFetchPriceTool._run(params) # Ensure params are passed correctly 116 | message_history.append({"role": "api_response", "content": tool_output}) 117 | ``` 118 | 119 | This code calls the `_run` method of your tool when the AI decides to use it. 120 | 121 | --- 122 | 123 | ## ⚠️ Important Notes 124 | 125 | - **Technical Complexity:** This process involves some technical steps and may require a good understanding of Python and API integration. 126 | - **Future Improvements:** A more user-friendly framework for adding tools (potentially using LangChain) is in the works. Stay tuned for updates! 127 | 128 | --- 129 | 130 | ## 🎥 Video Tutorial 131 | 132 | For additional help, please watch the tutorial video on YouTube and join our community. If you encounter any issues, feel free to ask for help in the "Ask For Help" channel. 133 | 134 | 135 | [Watch the Tutorial](https://youtube.com/link-to-video) | [Join the Community](https://discord.com/invite/link) 136 | -------------------------------------------------------------------------------- /docs/How-To-Guide/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: How-To Guide 3 | description: Detailed how-to guides for various features of Knotie-AI. 4 | --- 5 | 6 | # 📚 How-To Guide 7 | 8 | This section provides step-by-step guides to help you get the most out of Knotie-AI. 9 | 10 | ## Sections 11 | - [How To Add Custom Tools](customtools.md) 12 | - [How To Use Open Source Model](opensource.md) 13 | - [[How To Setup Twilio](twilio.md) 14 | - [Troubleshooting](troubleshooting.md) 15 | -------------------------------------------------------------------------------- /docs/How-To-Guide/opensource.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: OpenSource 3 | description: This documentation is coming soon. 4 | --- 5 | 6 | # 🚧 Coming Soon! 7 | 8 | We are still working on this section of the documentation. It will be available soon! Stay tuned for updates. 9 | 10 | In the meantime, feel free to explore other sections of our [documentation](https://knotie-ai.pages.dev/) or [contribute](https://kno2gether.com/knotie-ai-sales-agent/) to the project. 11 | -------------------------------------------------------------------------------- /docs/How-To-Guide/troubleshooting.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 🛠️ Troubleshooting 3 | description: This documentation will help you troubleshoot issues with Knotie-AI. 4 | tags: Troubleshooting, Docker, Knotie-AI, Debugging 5 | --- 6 | 7 | # 🛠️ Troubleshooting 8 | 9 | If you encounter any issues while running Knotie-AI, this guide will help you diagnose and resolve them. 10 | 11 | --- 12 | 13 | ## 🔍 Basic Troubleshooting Steps 14 | 15 | ### 1. **Check Running Containers** 16 | 17 | The first step in troubleshooting is to check the status of the running Docker containers. You can do this by using the following command: 18 | 19 | ```bash 20 | docker ps 21 | ``` 22 | 23 | This command will list all the running containers, showing their container IDs, names, and other important information. 24 | 25 | ### 2. **View Logs** 26 | 27 | Once you have the container ID of the Knotie-AI service, you can view the logs to diagnose any issues. Use the following command to view the logs: 28 | 29 | ```bash 30 | docker logs -f 31 | ``` 32 | 33 | Replace `` with the actual container ID from the previous step. 34 | 35 | The `-f` flag allows you to follow the log output in real-time, which can be helpful for monitoring the application as it runs. 36 | 37 | --- 38 | 39 | ## ⚠️ Common Issues and Fixes 40 | 41 | While the above steps cover the basic troubleshooting approach, more detailed information and common fixes will be provided in future updates. 42 | 43 | Stay tuned! 44 | 45 | --- 46 | 47 | ## ❓ Need More Help? 48 | 49 | If you're unable to resolve the issue using the above steps, consider reaching out to the community: 50 | 51 | - [Join our Community](https://community.kno2gether.com/communities/groups/kno2gether-community/home?invite=66b617e90fd0ff23e04efce2) 52 | - [Watch the Video Tutorials](https://www.youtube.com/channel/UCxgkN3luQgLQOd_L7tbOdhQ) 53 | 54 | We’re here to help! -------------------------------------------------------------------------------- /docs/How-To-Guide/twilio.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Twilio 3 | description: This documentation is coming soon. 4 | --- 5 | 6 | # 🚧 Coming Soon! 7 | 8 | We are still working on this section of the documentation. It will be available soon! Stay tuned for updates. 9 | 10 | In the meantime, feel free to explore other sections of our [documentation](https://knotie-ai.pages.dev/) or [contribute](https://kno2gether.com/knotie-ai-sales-agent/) to the project. 11 | -------------------------------------------------------------------------------- /docs/Self-Hosting/configuration.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Configuration 3 | description: Detailed description of project files and their purposes in Knotie-AI. 4 | tags: Configuration, Project Files, Knotie-AI 5 | --- 6 | 7 | # 🛠️ Configuration 8 | 9 | ## Important Project File Details 10 | 11 | - 📄 **.env_sample**: A sample environment variable file that should be copied to `.env` for configuring the application necessary settings. 12 | - 🧠 **ai_helpers.py**: Contains helper functions for AI operations, such as interacting with machine learning models or APIs. 13 | - 🌐 **app.py**: The main Python Flask application file that defines HTTP routes and the web server logic. 14 | - 🎙️ **audio_helpers.py**: Includes functions to handle audio processing tasks within the application, such as audio file manipulation or audio stream processing using ElevenLabs API. 15 | - ⚙️ **config.py**: Configuration file for setting up global variables and application settings. 16 | - ⚙️ **config.json**: JSON Configuration file for setting up global variables, secrets and application settings. It can be managed via admin UI. 17 | - 🐳 **docker-compose.yml**: Docker Compose file to define and run multi-container Docker applications. Specifies services, networks, and volumes. 18 | - 🐳 **Dockerfile**: A Dockerfile to build the Docker image for the application, specifying the environment, dependencies, and commands. 19 | - 📝 **prompts.py**: Likely includes predefined prompts for different logic within this application. Note: This is a very generalized prompt to showcase the capability and give you guidance. You may want to change it as per your business requirements. 20 | - 📈 **stages.py**: Defines various stages or states of a process in a standard Sales Process, useful for workflow management or state tracking. Note: This is a very generalized stage. You may want to change it as per your business requirements. 21 | - 🔧 **tools.py**: Provides additional utility functions or tools that can be used throughout the application for various tasks. -------------------------------------------------------------------------------- /docs/Self-Hosting/deployment.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Run The Code Locally 3 | description: Instruction to run/deploy Knotie-AI with/without docker. 4 | tags: Local Setup, Python, Redis, Knotie-AI 5 | --- 6 | 7 | # 🚀 Running Code using python/conda env 8 | 9 | If you want to run the code without Docker locally, follow these steps: 10 | 11 | 1. 🛠️ **Run Redis locally** (You can ask ChatGPT how to run Redis. Make sure to run it on port 6379). The current code uses Redis for state management (such as Conversation history etc.). 12 | 13 | Alternatively, use the below command from knotie-ai root folder (make sure you have docker and docker-compose installed and running) 14 | ```bash 15 | docker compose -f docker-compose-redis-only.yaml up -d 16 | ``` 17 | 18 | 2. 🐍 **Install Python 3.11 and above**. 19 | 20 | 3. 🐍 **Install VirtualEnv or Anaconda**. 21 | 22 | 4. 🐍 **Create a Python environment**: 23 | ```bash 24 | conda create -n aisalesagent python=3.11 25 | ``` 26 | 27 | 5. 🐍 **Activate the environment**: 28 | ```bash 29 | conda activate aisalesagent 30 | ``` 31 | 32 | 6. 📥 **Download the zip or clone the repo**: 33 | ```bash 34 | git clone https://github.com/avijeett007/Knotie-AI.git 35 | cd knotie-ai 36 | ``` 37 | 38 | 7. 📂 **Go inside the folder and install all the requirements**: 39 | ```bash 40 | pip install -r requirements.txt 41 | ``` 42 | 43 | 8. 📝 **Copy `.env_sample` to `.env`**: 44 | ```bash 45 | cp .env_sample .env 46 | ``` 47 | 48 | 9. 📝 **Open `.env` in a text editor and update all the config properties as required**. 49 | 50 | 10. 🚀 **Run the application**: 51 | ```bash 52 | python app.py 53 | 54 | OR 55 | 56 | FLASK_APP=app.py FLASK_ENV=development flask run --host=0.0.0.0 --port=5000 57 | ``` 58 | -------------------------------------------------------------------------------- /docs/Self-Hosting/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Self-Hosting Guide 3 | description: A comprehensive guide for developers to understand the code & self-hosting Knotie-AI for improvements 4 | --- 5 | 6 | # 🛠️ Self-Hosting Guide 7 | 8 | Welcome to the Self-Hosting Guide for Knotie-AI. This guide will help you set up, configure, and deploy Knotie-AI on your own server as a developer. 9 | 10 | ## Sections 11 | - [Prerquisite](prerequisites.md) 12 | - [Installation](installation.md) 13 | - [Configuration](configuration.md) 14 | - [Deployment](deployment.md) 15 | -------------------------------------------------------------------------------- /docs/Self-Hosting/installation.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: ⚙️ Configuration & Secret Management 3 | description: Manage your configurations and secrets easily using the Admin UI in Knotie-AI. 4 | tags: Local Setup, Python, Redis, Knotie-AI, Secret Management, Config Management 5 | --- 6 | 7 | 8 | # Configuration and Secret Management 9 | 10 | All the configurations and secrets for Knotie-AI are now managed through `config.json`. The recommended way to manage `config.json` is by using the Admin UI, which allows for hotloading of configurations. 11 | Run The Code using either [Directly/Locally](deployment.md) or using [docker service](setup.md) or in the [cloud](../Cloud-Hosting/index.md) 12 | 13 | --- 14 | 15 | ## Accessing the Admin UI 16 | 17 | 1. **Start the Application:** Once the app is started, head over to your browser and navigate to: https://localhost:5000 (This is the default URL if you're running the application locally.) 18 | Or navigate to https://your_server_ip:5000 19 | or navigate to https://your_application_public_url (If you're running ngrok or running this app behind an Proxy/LB) 20 | 21 | 2. **Login:** Use the following default credentials to log in: 22 | 23 | - **Username:** `admin` 24 | - **Password:** `admin` 25 | 26 | > **Important:** On your first login, you will be prompted to change the default credentials. Please remember the new password, as it is required for future logins. 27 | 28 | --- 29 | 30 | ## Updating Configurations and Secrets 31 | 32 | 1. **Access the Settings:** 33 | - Once logged in, locate the **Settings** link in the Admin UI. Click on it to access the configuration and secret management section. 34 | 35 | 2. **Edit Configurations:** 36 | - You can now update the necessary configuration and secret sections using the Admin UI. 37 | 38 | 3. **Save Changes:** 39 | - Ensure you hit the **Update** button to save your changes. This action will reinitialize all necessary clients to apply the new settings. 40 | 41 | --- 42 | 43 | ## Placeholder for Screenshots 44 | 45 | ![Admin Configuration Screen](../../admin_settings.png) 46 | ![Admin Configuration Save Changes](../../admin_save.png) 47 | 48 | --- 49 | 50 | ## Important Notes 51 | 52 | - **Remember Your Password:** After changing your password on the first login, ensure you remember it. If you forget it, you will need to follow the Reset User Documentation(#) to regain access. 53 | - **Reinitialization:** Updating the configurations using the Admin UI triggers reinitialization of clients, so it's crucial to ensure that all settings are correct before applying them. 54 | 55 | --- 56 | 57 | *Stay tuned for more updates and screenshots to make your configuration management experience even smoother!* -------------------------------------------------------------------------------- /docs/Self-Hosting/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Prerequisites 3 | description: List of prerequisites needed to set up and run Knotie-AI. 4 | tags: Prerequisites, Setup, Knotie-AI, Docker, Python, Twilio 5 | --- 6 | 7 | # 🛠️ Prerequisites 8 | 9 | Before setting up Knotie-AI, ensure you have the following prerequisites: 10 | 11 | - 🐳 **Docker**: Install Docker from [here](https://www.docker.com/get-started) 12 | - 🐙 **Docker Compose**: Included with Docker Desktop 13 | - ☎️ **Twilio Account**: [Signup here](https://www.twilio.com/login) 14 | - 🧠 **ElevenLabs Account**: [Signup here](https://try.elevenlabs.io/434k6rx9zk3h) 15 | - 💾 **Groq Account**: [Create keys here](https://console.groq.com/keys) 16 | - 🌐 **OpenAI Account**: [Create keys here](https://platform.openai.com/account/usage) 17 | - 🌍 **NGROK Account**: [Register here](https://ngrok.com/) 18 | 19 | --- 20 | 21 | ## 🛠️ Twilio Configuration 22 | 23 | 1. Once your Twilio account is registered, create a phone number [here](https://console.twilio.com/us1/develop/phone-numbers/manage/incoming). Twilio provides free credit and a free phone number to test. For production usage, make sure to buy a local number. 24 | 25 | 2. Run the following command to start NGROK: 26 | ```bash 27 | ngrok http 5000 28 | ``` 29 | This will create a public link such as `https://a98d-82-26-133-9.ngrok-free.app`. Make a note of this URL. 30 | 31 | 3. Update this URL and API Keys/Account SID/Auth Tokens, etc., from Twilio, OpenAI, Eleven Labs into the `.env` file as described in the run steps below. 32 | 33 | 4. Go to the Twilio Manage Phone number page and update the following sections with the NGROK Public URL: 34 | ```bash 35 | This is a Must Step 36 | ------------------- 37 | 38 | A Call Comes In --> https://ngrok_public_link/gather-inbound 39 | Primary Handler Fails --> https://ngrok_public_link/gather 40 | 41 | Click Save Changes 42 | ``` 43 | 44 | ![Twilio Configuration](../images/image.png) -------------------------------------------------------------------------------- /docs/Self-Hosting/runasservice.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Run The Code as Docker Container 3 | description: Steps to set up Knotie-AI using Docker and configuring environment variables. 4 | tags: Setup, Docker, Configuration, Knotie-AI 5 | --- 6 | 7 | # ⚙️ Run Steps As Docker Service (For Business Owners/Entrepreneurs/Non-Dev Purpose/Checking Out Purpose) 8 | 9 | ## Steps to Run 10 | 11 | 1. 📥 **Clone the repository** or download the files to your local machine. 12 | ```bash 13 | git clone https://github.com/avijeett007/knotie-ai.git 14 | cd knotie-ai 15 | ``` 16 | 17 | 2. 📂 **Navigate to the project directory** where `docker-compose-deploy.yml` is located. 18 | ```bash 19 | cd knotie-ai 20 | ``` 21 | 22 | 3. 📝 **Copy `.env_sample` to `.env`** and modify the environment variables as per your requirements. 23 | ```bash 24 | cp .env_sample .env 25 | ``` 26 | 27 | 4. 🛠️ **Run the following command to build and start the containers**: 28 | ```bash 29 | docker-compose -f docker-compose-deploy.yml up -d 30 | ``` 31 | 32 | 5. 🌐 **Access the application** via `http://localhost:5000` or another configured port or if hosting on cloud, use http://:5000. 33 | 34 | --- 35 | 36 | ## 🛑 Stopping the Application 37 | 38 | To stop the application, use the following Docker Compose command: 39 | 40 | ```bash 41 | docker-compose -f docker-compose-deploy.yml down 42 | ``` 43 | 44 | Next, Follow the [Configuration Guide](installation.md) -------------------------------------------------------------------------------- /docs/Self-Hosting/setup.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Run The Code as Docker Container 3 | description: Steps to set up Knotie-AI using Docker and configuring environment variables. 4 | tags: Setup, Docker, Configuration, Knotie-AI 5 | --- 6 | 7 | # ⚙️ Run Steps As Docker Service (For developers) 8 | 9 | ## Steps to Run 10 | 11 | 1. 📥 **Clone the repository** or download the files to your local machine. 12 | ```bash 13 | git clone https://github.com/avijeett007/knotie-ai.git 14 | cd knotie-ai 15 | ``` 16 | 17 | 2. 📂 **Navigate to the project directory** where `docker-compose.yml` is located. 18 | ```bash 19 | cd knotie-ai 20 | ``` 21 | 22 | 3. 📝 **Copy `.env_sample` to `.env`** and modify the environment variables as per your requirements. 23 | ```bash 24 | cp .env_sample .env 25 | ``` 26 | 27 | 4. 🛠️ **Run the following command to build and start the containers**: 28 | ```bash 29 | docker-compose up --build 30 | ``` 31 | 32 | 5. 🌐 **Access the application** via `http://localhost:5000` or another configured port. 33 | 34 | --- 35 | 36 | ## 🛑 Stopping the Application 37 | 38 | To stop the application, use the following Docker Compose command: 39 | 40 | ```bash 41 | docker-compose down 42 | ``` 43 | 44 | ## 🔄 Changing Underlying Code 45 | 46 | If you've changed any code in the application, please make sure to use the following Docker Compose command to rebuild the image and start it: 47 | 48 | ```bash 49 | docker-compose build --no-cache 50 | docker-compose up 51 | ``` -------------------------------------------------------------------------------- /docs/contribution.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Contribution 3 | description: Guidelines for contributing to the Knotie-AI project. 4 | tags: Contribution, Open Source, Knotie-AI 5 | --- 6 | 7 | # 🤝 Contribution Guidelines 8 | 9 | All contributions are welcome! Please follow the guidelines below to contribute to the Knotie-AI project. 10 | 11 | ## 📝 Guidelines 12 | 13 | 1. **Fork the repository**: Click on the "Fork" button at the top right corner of the repository page to create a copy of the repository in your GitHub account. 14 | 15 | 2. **Clone your fork**: Clone the forked repository to your local machine. 16 | ```bash 17 | git clone https://github.com/avijeett007/knotie-ai.git 18 | cd knotie-ai 19 | ``` 20 | 21 | 3. **Create a new branch**: Create a new branch for your feature or bugfix. 22 | ```bash 23 | git checkout -b feature-name 24 | ``` 25 | 26 | 4. **Make your changes**: Make the necessary changes in your local repository. 27 | 28 | 5. **Commit your changes**: Commit your changes with a descriptive commit message. 29 | ```bash 30 | git add . 31 | git commit -m "Description of the feature or bugfix" 32 | ``` 33 | 34 | 6. **Push to your fork**: Push the changes to your forked repository. 35 | ```bash 36 | git push origin feature-name 37 | ``` 38 | 39 | 7. **Create a Pull Request**: Go to the original repository and create a Pull Request to merge your changes. 40 | 41 | 8. **Review and Discuss**: Participate in the review process and address any feedback or changes requested by the maintainers. 42 | 43 | ## 📜 Code of Conduct 44 | 45 | Please adhere to the [Code of Conduct](CODE_OF_CONDUCT.md) in all your interactions with the project. 46 | 47 | ## 💬 Join the Discussion 48 | 49 | Join our [Discord Community](https://discord.com/invite/7UKpgUbEXf) to discuss features, ask questions, and share feedback. -------------------------------------------------------------------------------- /docs/images/Add_Tool.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/docs/images/Add_Tool.png -------------------------------------------------------------------------------- /docs/images/Knotie-ai.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/docs/images/Knotie-ai.png -------------------------------------------------------------------------------- /docs/images/admin_save.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/docs/images/admin_save.png -------------------------------------------------------------------------------- /docs/images/admin_settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/docs/images/admin_settings.png -------------------------------------------------------------------------------- /docs/images/custom_tools.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/docs/images/custom_tools.png -------------------------------------------------------------------------------- /docs/images/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/docs/images/image.png -------------------------------------------------------------------------------- /docs/images/submit_tool.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/docs/images/submit_tool.png -------------------------------------------------------------------------------- /docs/improvements.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Further Improvements 3 | description: Suggestions for further improvements and future work on Knotie-AI. 4 | tags: Improvements, Future Work, Knotie-AI 5 | --- 6 | 7 | # 🚀 Further Improvement Suggestions 8 | 9 | Here are some suggestions for further improvements and future work on Knotie-AI: 10 | 11 | 1. **Update Prompts**: Please update prompts as per your business use cases. The provided prompts in `prompts.py` are very generalized and inspired by the 'SalesGPT' Open Source Project. 12 | 13 | 2. **Adjust Sales Stages**: Adjust Sales Stages based on your requirement. The current Sales Stages provided in `stages.py` are very generalized and inspired by the 'SalesGPT' Open Source Project. 14 | 15 | 3. **Test AI Outputs**: If you want to test AI outputs before integrating with Twilio and ElevenLabs API first, use the `testAISalesAgent.py` to do the communication with AI Sales agent in text format. Adjust your prompts accordingly & configuration. 16 | 17 | 4. **Evaluation Script**: An `testAISalesAgent.py` will be created to allow you to evaluate and test AI Agents before you run it for your use case (This is Completed now). 18 | 19 | 5. **State Management**: The current code relies on Redis for in-memory state management. File-based and cloud database support will be included for users who can't run Docker in their local (Redis in their local) due to lower system specification. (Work In Progress). 20 | 21 | 6. **Cloud Deployment Guide**: A guide on how to deploy this on the cloud will be included (Work In Progress). 22 | 23 | 7. **Wordpress Plugin**: The Wordpress plugin will be updated to support tool calling, email address collection from customers, etc. (Work In Progress). 24 | 25 | 8. **Suggestions and Feedback**: If you have any other suggestions, please join the [Community](https://community.kno2gether.com/communities/groups/kno2gether-community/home?invite=66b617e90fd0ff23e04efce2) and raise your suggestion or feedback using the **Feedbacks/Suggestions** channel. 26 | 27 | 9. **Join Private Knotie-AI Channel** : If you wish to contribute as developer or hosting with us, you can get access to our Private Knotie-AI Channel in the Community. Please raise a direct request by sending email to support@kno2gether.com. -------------------------------------------------------------------------------- /docs/testing.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Testing 3 | description: Instructions to test the AI Sales Agent using Postman or similar API execution platform. 4 | tags: Testing, API, Postman, Knotie-AI 5 | --- 6 | 7 | # 🧪 Testing Your AI Sales Agent 8 | 9 | To test the agent, install Postman or a similar API execution platform. Open Postman and click on import and paste the below CURL command: 10 | 11 | ```bash 12 | curl --location 'https://a98d-82-26-133-9.ngrok-free.app/start-call' --header 'Content-Type: application/json' --data '{ 13 | "customer_name" : "Avijit", 14 | "customer_phonenumber": "+441234567890", 15 | "customer_businessdetails": "Looking for a gym membership to address back pain" 16 | }' 17 | ``` 18 | 19 | Make sure to change the public URL with your NGROK provided URL and phone number/business details as per your testing requirement. -------------------------------------------------------------------------------- /generated_tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/generated_tools/__init__.py -------------------------------------------------------------------------------- /openapi_specs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/avijeett007/Knotie-AI/b04ae1bbbba6fa44bd23d99d1c8da70bcdc36fba/openapi_specs/__init__.py -------------------------------------------------------------------------------- /prompts.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import PromptTemplate 2 | import sqlite3 3 | 4 | AGENT_STARTING_PROMPT_TEMPLATE = PromptTemplate.from_template( 5 | """Never forget your name is {salesperson_name}. 6 | You're an expert Sales Representative at {company_name}. {company_name}'s business is the following: {company_business}. 7 | You are contacting a potential prospect in order to {conversation_purpose} after the prospect has fill up his details in company website. 8 | Craft your response accordingly. 9 | 10 | Follow in each question and answer these instructions: 11 | {agent_custom_instructions} 12 | 13 | If you're asked about where you got the user's contact information, say that you got it from form filled up in the Company's Website. 14 | Keep your responses in short length to retain the user's attention. Never produce lists, just answers. 15 | Start the conversation by just a greeting and how is the prospect doing without pitching in your first turn. 16 | When the conversation is over, output 17 | Always think about at which conversation stage you are at before answering: 18 | {conversation_stages} 19 | 20 | Example 1: 21 | Conversation history: 22 | {salesperson_name}: Hello ! Good morning John ! 23 | User: Hello, who is this? 24 | {salesperson_name}: This is {salesperson_name} calling from {company_name}. How are you John? 25 | User: I am well, why are you calling? 26 | 27 | Example 2: 28 | Conversation history: 29 | {salesperson_name}: Hello John! This is {salesperson_name} calling from {company_name} 30 | User: Hello, why are you calling? 31 | {salesperson_name}: I am calling to talk about options for your gym memberships. 32 | 33 | 34 | You must respond according to the previous conversation history if any and the stage of the conversation you are at. 35 | Only generate one response at a time and act as {salesperson_name} only! When you are done generating, end with '' to give the user a chance to respond. 36 | 37 | {salesperson_name}:""" 38 | ) 39 | 40 | AGENT_PROMPT_INBOUND_TEMPLATE = PromptTemplate.from_template( 41 | """ 42 | Hi, This is {salesperson_name} from {company_name}. Thank you for calling us. Please let me know how can I help you today? 43 | """) 44 | 45 | AGENT_PROMPT_OUTBOUND_TEMPLATE = PromptTemplate.from_template( 46 | """ 47 | As {salesperson_name}, continue engaging the client you've contacted about {company_business}.Company has following Products:{company_products_services}. Your conversation aims to {conversation_purpose}. 48 | 49 | Maintain concise responses and direct the conversation based on the client’s interest and feedback, aligning with the conversation stages: 50 | Current Stage: {conversation_stage_id} 51 | {conversation_stages} 52 | 53 | Keep your responses in short length to retain the user's attention. Never produce lists, just answers. Be apologetic first whenever you feel you've missed to provide enough details and offer more details. 54 | Include any necessary details or responses generated by tools when relevant: 55 | TOOLS_RESPONSE 56 | -------------- 57 | {tools_response} 58 | 59 | Example ongoing conversation: 60 | --- 61 | {salesperson_name}: Good morning! 62 | John: Hello, who is this? 63 | {salesperson_name}: I'm {salesperson_name} from {company_name}, how are you today? 64 | John: I'm okay, why are you calling? 65 | {salesperson_name}: I’d like to talk about how our gym membership options can help you. 66 | --- 67 | 68 | Respond based on the conversation history and current stage, concluding your input with ''. 69 | Start of conversation history: 70 | === 71 | {conversation_history} 72 | === 73 | End of conversation history. 74 | 75 | When the conversation is complete, indicate with ''. 76 | {salesperson_name}: """ 77 | ) 78 | 79 | 80 | 81 | STAGE_TOOL_ANALYZER_PROMPT = PromptTemplate.from_template(""" 82 | You are a sales assistant helping your sales agent to determine the next stage of conversation to move to when talking to a user and decide if the sales agent needs to call a tool in order to move to the next stage of conversation. 83 | Company has the following Products: {company_products_services}. 84 | Start of conversation history: 85 | === 86 | {conversation_history} 87 | Customer Says: {user_input} 88 | === 89 | End of conversation history. 90 | 91 | Current Conversation stage is: {conversation_stage_id} 92 | Make sure to change and progress the conversation stage based on the latest user input and conversation history. Your main focus should be the last Customer Input. 93 | Determine what should be the next immediate conversation stage for the agent in the sales conversation by selecting only from the following options: 94 | {conversation_stages} 95 | 96 | TOOLS: 97 | ------ 98 | {salesperson_name} has access to the following tools: 99 | 100 | {tools} 101 | 102 | Each tool has specific operations and requires certain parameters, including headers, query parameters, path parameters, and body parameters. 103 | **Only decide to call a tool if all required parameters are available through the conversation history**. 104 | **Important Instructions**: 105 | - **Never** use placeholder values such as "TBD" or "To-be-filled". 106 | - If all required parameters are not available, set `tool_required` to 'no' to help move the conversation forward for the user to provide the missing information. 107 | - Use `"sensitive_value"` for sensitive fields when the real value is not available to you. 108 | - Your Response must be in JSON format with the following fields: `conversation_stage_id` (number), `tool_required` ('yes' or 'no'). If `tool_required` is 'yes', include `tool_name`, `operation_id`, `tool_headers`, `tool_parameters`, and `tool_body_parameters` as applicable. 109 | - **Only** call a tool if **all** required parameters are available. If anything is missing, focus on gathering that information first. 110 | - Your response must be in JSON format as example below. 111 | 112 | 113 | Do not answer anything else nor add anything to your answer. 114 | 115 | Example 1: 116 | Conversation history: 117 | assistant: Would you like know the price of our Silver Gym Membership? 118 | User: Yes, Sure. 119 | assistant: "conversation_stage_id": 3, "tool_required": "yes", "tool_name": "PriceInquiry", 120 | "operation_id": "fetchMembershipPrice", 121 | "tool_headers": {{"Authorization": "sensitive_value"}}, 122 | "tool_parameters": {{}}, 123 | "tool_body_parameters": {{"membership": "Silver-Gym-Membership"}} 124 | End of example 1. 125 | 126 | Example 2: 127 | Conversation history: 128 | assistant: Would you be happy to book a Free On-Site Appointment in our gym? 129 | User: Yes, Sure. 130 | assistant: "conversation_stage_id": 7, "tool_required": "yes", "tool_name": "AppointmentBooking", 131 | "operation_id": "bookAppointment", 132 | "tool_headers": {{"Authorization": "sensitive_value"}}, 133 | "tool_parameters": {{"url": "book-appointment"}}, 134 | "tool_body_parameters": {{"location": "Main Gym", "trainer": "John Doe"}} 135 | End of example 2. 136 | 137 | Example 3: 138 | Conversation history: 139 | assistant: Can you please help me understand what is your gym requirement? 140 | User: Yes, I'm having some back pain that's why considering the gym. 141 | assistant: "conversation_stage_id": 3, "tool_required": "no" 142 | End of example 3. 143 | 144 | Example 4: 145 | Conversation history: 146 | assistant: Would you like to receive a summary of the available gym memberships? 147 | User: Yes, please. 148 | assistant: "conversation_stage_id": 5, "tool_required": "yes", "tool_name": "MembershipSummary", 149 | "operation_id": "fetchSummary", 150 | "tool_headers": {{"Authorization": "sensitive_value"}}, 151 | "tool_parameters": {{"url": "membership-summary"}}, 152 | "tool_body_parameters": {{"userid": "johndoe"}} 153 | End of example 4. 154 | """) 155 | 156 | def get_prompt_template(prompt_name): 157 | """Fetch prompt template from the database by name and return it as a PromptTemplate object.""" 158 | conn = sqlite3.connect('knotie.db') 159 | cursor = conn.cursor() 160 | cursor.execute('SELECT template FROM prompts WHERE name = ?', (prompt_name,)) 161 | result = cursor.fetchone() 162 | conn.close() 163 | if result: 164 | template_string = result[0] 165 | # Convert the string to a PromptTemplate object 166 | return PromptTemplate.from_template(template_string) 167 | else: 168 | raise ValueError(f"Prompt with name {prompt_name} not found in the database.") 169 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn 3 | starlette 4 | jinja2 5 | aiofiles 6 | python-multipart 7 | itsdangerous 8 | pydantic 9 | twilio 10 | redis 11 | python-dotenv 12 | groq 13 | langchain 14 | openai 15 | requests 16 | werkzeug 17 | anthropic 18 | elevenlabs 19 | openapi-spec-validator 20 | openapi-python-client 21 | PyYAML 22 | cryptography -------------------------------------------------------------------------------- /retype.json: -------------------------------------------------------------------------------- 1 | { 2 | "input": "./docs", 3 | "output": "./public", 4 | "base": "/", 5 | "name": "Knotie-AI Documentation", 6 | "icon": "info", 7 | "search": true, 8 | "sections": [ 9 | { 10 | "label": "Introduction", 11 | "file": "index.md" 12 | }, 13 | { 14 | "label": "Self-Hosting Guide", 15 | "icon": "server", 16 | "sections": [ 17 | { 18 | "label": "Prerequisites", 19 | "file": "Self-Hosting/prerequisites.md" 20 | }, 21 | { 22 | "label": "Configuration & Secret Management", 23 | "file": "Self-Hosting/installation.md" 24 | }, 25 | { 26 | "label": "Configuration", 27 | "file": "Self-Hosting/configuration.md" 28 | }, 29 | { 30 | "label": "Deployment", 31 | "file": "Self-Hosting/deployment.md" 32 | }, 33 | { 34 | "label": "Setup", 35 | "file": "setup.md" 36 | } 37 | ] 38 | }, 39 | { 40 | "label": "Cloud Guide", 41 | "icon": "cloud", 42 | "sections": [ 43 | { 44 | "label": "Overview", 45 | "file": "Cloud-Hosting/index.md" 46 | }, 47 | { 48 | "label": "Setup", 49 | "file": "Cloud-Hosting/setup.md" 50 | }, 51 | { 52 | "label": "Scaling", 53 | "file": "Cloud-Hosting/scaling.md" 54 | } 55 | ] 56 | }, 57 | { 58 | "label": "How-To Guide", 59 | "icon": "lightbulb", 60 | "sections": [ 61 | { 62 | "label": "Overview", 63 | "file": "How-To-Guide/index.md" 64 | }, 65 | { 66 | "label": "Usage", 67 | "file": "How-To-Guide/usage.md" 68 | }, 69 | { 70 | "label": "Troubleshooting", 71 | "file": "How-To-Guide/troubleshooting.md" 72 | } 73 | ] 74 | }, 75 | { 76 | "label": "Other Guides", 77 | "sections": [ 78 | { 79 | "label": "Testing", 80 | "file": "testing.md" 81 | }, 82 | { 83 | "label": "Configuration", 84 | "file": "configuration.md" 85 | }, 86 | { 87 | "label": "Contribution", 88 | "file": "contribution.md" 89 | }, 90 | { 91 | "label": "Further Improvements", 92 | "file": "improvements.md" 93 | }, 94 | { 95 | "label": "Code of Conduct", 96 | "file": "CODE_OF_CONDUCT.md" 97 | } 98 | ] 99 | } 100 | ] 101 | } 102 | -------------------------------------------------------------------------------- /retype.yml: -------------------------------------------------------------------------------- 1 | input: . 2 | output: .retype 3 | url: https://knotie-ai.pages.dev/ 4 | branding: 5 | title: Knotie-AI 6 | label: Docs 7 | links: 8 | - text: Getting Started 9 | link: https://kno2gether.com/knotie-ai-sales-agent/ 10 | footer: 11 | copyright: "© Copyright {{ year }}. All rights reserved." -------------------------------------------------------------------------------- /stages.py: -------------------------------------------------------------------------------- 1 | OUTBOUND_CONVERSATION_STAGES = { 2 | "1": "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are calling.", 3 | "2": "Qualification: Verify if the customer uses Salesforce. Ensure they are using Salesforce or plan on moving to Salesforce. Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.", 4 | "3": "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.", 5 | "4": "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.", 6 | "5": "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.", 7 | "6": "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.", 8 | "7": "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.", 9 | "8": "End conversation: It's time to end the call as there is nothing else to be said.", 10 | } 11 | 12 | INBOUND_CONVERSATION_STAGES = { 13 | "1": "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are calling.", 14 | "2": "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.", 15 | "3": "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.", 16 | "4": "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points. If you are offering meeting or appointment, make sure to check the availability first and confirm with customer.", 17 | "5": "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.", 18 | "6": "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Use tools to find out next avaialble meeting time, confirm with user. Ensure to summarize what has been discussed and reiterate the benefits.", 19 | "7": "End conversation: It's time to end the call as there is nothing else to be said.", 20 | } 21 | -------------------------------------------------------------------------------- /templates/admin.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Knotie AI Admin 7 | 67 | 68 | 69 | 78 | 79 |
80 |
81 |

Configuration

82 |
83 | 84 |
85 |
86 | 87 | 116 | 117 | 122 | 123 | 128 |
129 | 130 | 283 | 284 | -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Knotie AI 7 | 64 | 65 | 66 |
67 |

Knotie AI

68 |
69 |
70 | 71 | 72 |
73 |
74 | 75 | 76 |
77 | 78 |
79 |
80 |
81 | 82 | 109 | 110 | -------------------------------------------------------------------------------- /testAISalesAgent.py: -------------------------------------------------------------------------------- 1 | from ai_helpers import process_initial_message, process_message, initiate_inbound_message 2 | from appUtils import clean_response,save_message_history 3 | import uuid 4 | import redis 5 | import time 6 | import json 7 | import logging 8 | 9 | # Initial customer details 10 | customer_name = "John Doe" 11 | customer_problem = "Looking for a gym membership to address back pain" 12 | unique_id = str(uuid.uuid4()) 13 | 14 | logging.basicConfig(level=logging.INFO) 15 | logger = logging.getLogger(__name__) 16 | 17 | # # Initialize Message History array to save the conversation 18 | message_history = [] 19 | 20 | redis_client = redis.Redis(host='redis', port=6379, db=0, decode_responses=True) 21 | initial_transcript = "Customer Name:" + customer_name + ". Customer filled up details in the website:" + customer_problem 22 | message_history.append({"role": "user", "content": initial_transcript}) 23 | 24 | 25 | # # Generate initial message 26 | initial_response = process_initial_message(unique_id, customer_name, customer_problem) 27 | print("Initial Response:", initial_response) 28 | initial_message=clean_response(initial_response) 29 | message_history.append({"role": "assistant", "content": initial_message}) 30 | save_message_history(unique_id, message_history) 31 | 32 | # # Display initial AI response 33 | print("Assistant Response:", initial_response) 34 | 35 | while True: 36 | user_input = input("Your response: ") 37 | message_history.append({"role": "user", "content": user_input}) 38 | start_time = time.time() 39 | # Generate the assistant's response based on user input and message history 40 | response_text = process_message(unique_id, message_history, user_input) 41 | assistant_response = clean_response(response_text) 42 | 43 | # Append the assistant's response to the message history 44 | message_history.append({"role": "assistant", "content": assistant_response}) 45 | 46 | # Check if the current assistant response contains 'END_OF_CALL' 47 | if "" in assistant_response: 48 | print("Assistant Response:", assistant_response) 49 | print("The conversation has ended.") 50 | break 51 | # Print the assistant's response 52 | print("Assistant Response:", assistant_response) 53 | end_time = time.time() 54 | elapsed_time_ms = (end_time - start_time) * 1000 55 | logger.info(f"Elapsed latency by AI Model for producing response: {elapsed_time_ms:.2f} ms") 56 | -------------------------------------------------------------------------------- /testConversationCache.py: -------------------------------------------------------------------------------- 1 | from ConversationCache import CacheManager 2 | 3 | conversation_cache = CacheManager() 4 | 5 | if conversation_cache.get("Test") is None: 6 | conversation_cache.put("Test", "Yes is test") 7 | if conversation_cache.get("Testing") is None: 8 | conversation_cache.put("Testing", "Is that testing?") 9 | if conversation_cache.get("Testing of telecom") is None: 10 | conversation_cache.put("Testing of telecom", "Is that testing?") 11 | 12 | 13 | print(conversation_cache.search("Test")) 14 | print(conversation_cache.search("Testing")) 15 | print(conversation_cache.search("Testing of telecom")) 16 | print(conversation_cache.search("Testing telecom")) 17 | print(conversation_cache.search("Testing an telecom")) 18 | print(conversation_cache.search("Testing the telecom")) 19 | print(conversation_cache.search("Test the telecom")) 20 | print(conversation_cache.search("about telecom")) 21 | -------------------------------------------------------------------------------- /tools.py: -------------------------------------------------------------------------------- 1 | # This file is not maintained anymore. New way to adding custom tool is added now !! 2 | import requests 3 | from langchain.tools import tool, BaseTool, StructuredTool 4 | from pydantic import BaseModel 5 | 6 | tools_info = { 7 | "MeetingScheduler": { 8 | "name": "MeetingScheduler", 9 | "description": "Books meetings with clients." 10 | }, 11 | "GymAppointmentAvailability": { 12 | "name": "GymAppointmentAvailability", 13 | "description": "Assistants must Check next appointment available in Gym and confirm with customer, before offering appointment to customer" 14 | }, 15 | "OnsiteAppointment": { 16 | "name": "OnsiteAppointment", 17 | "description": "Book Onsite Gym Appointment as per user's availability." 18 | }, 19 | "PriceInquiry": { 20 | "name": "PriceInquiry", 21 | "description": "Fetches product prices of Gym memberships.", 22 | "parameters": { 23 | "product_name": ["Silver-Gym-Membership", "Gold-Gym-Membership", "Platinum-Gym-Membership"] 24 | } 25 | } 26 | } 27 | 28 | #### Manual Tools/Method Implementation using Python functions #### 29 | 30 | # def OnsiteAppointmentTool(): 31 | # # Assume arguments is a dict that contains date and time 32 | # print('Onsite Appointment function is called') 33 | # return f"Onsite Appointment is booked." 34 | 35 | # def FetchProductPriceTool(membership_type): 36 | # print('Fetch product price is called') 37 | 38 | # # Set up the endpoint and headers 39 | # url = 'https://kno2getherworkflow.ddns.net/webhook/fetchMemberShip' 40 | # headers = {'Content-Type': 'application/json'} 41 | 42 | # # Prepare the data payload with the membership type 43 | # data = { 44 | # "membership": membership_type 45 | # } 46 | 47 | # # Send a POST request to the server 48 | # response = requests.post(url, headers=headers, json=data) 49 | 50 | # # Check if the request was successful 51 | # if response.status_code == 200: 52 | # # Parse the JSON response to get the price 53 | # price_info = response.json() 54 | # return f"The price is ${price_info['price']} per month." 55 | # else: 56 | # return "Failed to fetch the price, please try again later." 57 | 58 | # def CalendlyMeetingTool(): 59 | # print('Calendly Meeting invite is sent.') 60 | # # Assume arguments is a dict that contains date and time 61 | # return f"Calendly meeting invite is sent now." 62 | 63 | # def AppointmentAvailabilityTool(): 64 | # print('Checking appointment availability.') 65 | # # Assume arguments is a dict that contains date and time 66 | # return f"Our next available appointment is tomorrow, 24th April at 4 PM." 67 | 68 | #### Define Langchain Tools/Method Implementation using tool decorator #### 69 | 70 | # @tool 71 | # def OnsiteAppointmentTool(): 72 | # """ 73 | # Book an onsite gym appointment. 74 | # """ 75 | # print('Onsite Appointment function is called') 76 | # return f"Onsite Appointment is booked." 77 | 78 | # @tool 79 | # def FetchProductPriceTool(membership_type: str) -> str: 80 | # """ 81 | # Fetch the price of a gym membership. 82 | 83 | # Args: 84 | # membership_type (str): The type of membership (e.g., Silver-Gym-Membership, Gold-Gym-Membership, Platinum-Gym-Membership). 85 | 86 | # Returns: 87 | # str: The price of the membership. 88 | # """ 89 | # print('Fetch product price is called') 90 | 91 | # # Set up the endpoint and headers 92 | # url = 'https://kno2getherworkflow.ddns.net/webhook/fetchMemberShip' 93 | # headers = {'Content-Type': 'application/json'} 94 | 95 | # # Prepare the data payload with the membership type 96 | # data = { 97 | # "membership": membership_type 98 | # } 99 | 100 | # # Send a POST request to the server 101 | # response = requests.post(url, headers=headers, json=data) 102 | 103 | # # Check if the request was successful 104 | # if response.status_code == 200: 105 | # # Parse the JSON response to get the price 106 | # price_info = response.json() 107 | # return f"The price is ${price_info['price']} per month." 108 | # else: 109 | # return "Failed to fetch the price, please try again later." 110 | 111 | # @tool 112 | # def CalendlyMeetingTool(): 113 | # """ 114 | # Send a Calendly meeting invite. 115 | # """ 116 | # print('Calendly Meeting invite is sent.') 117 | # return f"Calendly meeting invite is sent now." 118 | 119 | # @tool 120 | # def AppointmentAvailabilityTool(): 121 | # """ 122 | # Check the next available appointment at the gym. 123 | # """ 124 | # print('Checking appointment availability.') 125 | # return f"Our next available appointment is tomorrow, 24th April at 4 PM." 126 | 127 | 128 | #### Define Langchain Tools/Method Implementation using Basetool Implementation for more flexibility and control. #### 129 | 130 | class OnsiteAppointmentTool(BaseTool): 131 | name = "OnsiteAppointment" 132 | description = tools_info["OnsiteAppointment"]["description"] 133 | 134 | def _run(self) -> str: 135 | print('Onsite Appointment function is called') 136 | return f"Onsite Appointment is booked." 137 | 138 | 139 | 140 | 141 | class FetchProductPriceTool(BaseTool): 142 | name = "PriceInquiry" 143 | description = tools_info["PriceInquiry"]["description"] 144 | 145 | def _run(self, membership_type: str) -> str: 146 | print('Fetch product price is called') 147 | 148 | # Set up the endpoint and headers 149 | url = 'https://kno2getherworkflow.ddns.net/webhook/fetchMemberShip' 150 | headers = {'Content-Type': 'application/json'} 151 | 152 | # Prepare the data payload with the membership type 153 | data = { 154 | "membership": membership_type 155 | } 156 | 157 | # Send a POST request to the server 158 | response = requests.post(url, headers=headers, json=data) 159 | 160 | # Check if the request was successful 161 | if response.status_code == 200: 162 | # Parse the JSON response to get the price 163 | price_info = response.json() 164 | return f"The price is ${price_info['price']} per month." 165 | else: 166 | return "Failed to fetch the price, please try again later." 167 | 168 | class CalendlyMeetingTool(BaseTool): 169 | name = "MeetingScheduler" 170 | description = tools_info["MeetingScheduler"]["description"] 171 | 172 | def _run(self) -> str: 173 | print('Calendly Meeting invite is sent.') 174 | return f"Calendly meeting invite is sent now." 175 | 176 | class AppointmentAvailabilityTool(BaseTool): 177 | name = "GymAppointmentAvailability" 178 | description = tools_info["GymAppointmentAvailability"]["description"] 179 | 180 | def _run(self) -> str: 181 | print('Checking appointment availability.') 182 | return f"Our next available appointment is tomorrow, 24th April at 4 PM." 183 | 184 | 185 | #### Define Langchain Tools/Method Implementation using StructuredTool Implementation for more structured input and output with validation and type safety. THIS IS NOT INTEGRATED WITH THE CODE YET #### 186 | 187 | # class FetchProductPriceInput(BaseModel): 188 | # membership_type: str 189 | 190 | # class OnsiteAppointmentTool(StructuredTool): 191 | # name = "OnsiteAppointment" 192 | # description = tools_info["OnsiteAppointment"]["description"] 193 | 194 | # def _run(self) -> str: 195 | # print('Onsite Appointment function is called') 196 | # return f"Onsite Appointment is booked." 197 | 198 | # class FetchProductPriceTool(StructuredTool): 199 | # name = "PriceInquiry" 200 | # description = tools_info["PriceInquiry"]["description"] 201 | # args_schema = FetchProductPriceInput 202 | 203 | # def _run(self, membership_type: str) -> str: 204 | # print('Fetch product price is called') 205 | 206 | # # Set up the endpoint and headers 207 | # url = 'https://kno2getherworkflow.ddns.net/webhook/fetchMemberShip' 208 | # headers = {'Content-Type': 'application/json'} 209 | 210 | # # Prepare the data payload with the membership type 211 | # data = { 212 | # "membership": membership_type 213 | # } 214 | 215 | # # Send a POST request to the server 216 | # response = requests.post(url, headers=headers, json=data) 217 | 218 | # # Check if the request was successful 219 | # if response.status_code == 200: 220 | # # Parse the JSON response to get the price 221 | # price_info = response.json() 222 | # return f"The price is ${price_info['price']} per month." 223 | # else: 224 | # return "Failed to fetch the price, please try again later." 225 | 226 | # class CalendlyMeetingTool(StructuredTool): 227 | # name = "MeetingScheduler" 228 | # description = tools_info["MeetingScheduler"]["description"] 229 | 230 | # def _run(self) -> str: 231 | # print('Calendly Meeting invite is sent.') 232 | # return f"Calendly meeting invite is sent now." 233 | 234 | # class AppointmentAvailabilityTool(StructuredTool): 235 | # name = "GymAppointmentAvailability" 236 | # description = tools_info["GymAppointmentAvailability"]["description"] 237 | 238 | # def _run(self) -> str: 239 | # print('Checking appointment availability.') 240 | # return f"Our next available appointment is tomorrow, 24th April at 4 PM." -------------------------------------------------------------------------------- /tools_helper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import json 4 | import sqlite3 5 | import logging 6 | import importlib 7 | import requests 8 | from config import Config 9 | from cryptography.fernet import Fernet 10 | import sys 11 | 12 | GENERATED_TOOLS_DIR = 'generated_tools/' 13 | logger = logging.getLogger(__name__) 14 | # Initialize tools dynamically 15 | initialized_tools = {} 16 | 17 | class EncryptionHelper: 18 | _cipher = None 19 | 20 | @classmethod 21 | def get_cipher(cls): 22 | # Reinitialize cipher if the encryption key changes 23 | if cls._cipher is None or cls._cipher.key != Config.ENCRYPTION_KEY: 24 | if Config.ENCRYPTION_KEY: 25 | cls._cipher = Fernet(Config.ENCRYPTION_KEY) 26 | else: 27 | raise ValueError("ENCRYPTION_KEY is not set or is invalid. Cannot initialize encryption components.") 28 | return cls._cipher 29 | 30 | @classmethod 31 | def encrypt_data(cls, data): 32 | cipher = cls.get_cipher() 33 | return cipher.encrypt(data.encode()).decode() 34 | 35 | @classmethod 36 | def decrypt_data(cls, data): 37 | cipher = cls.get_cipher() 38 | return cipher.decrypt(data.encode()).decode() 39 | 40 | # Tool Initialization Functions 41 | 42 | def fetch_tools_from_db(): 43 | if not os.path.exists('knotie.db'): 44 | logger.warning("Database file 'knotie.db' does not exist.") 45 | return [] 46 | 47 | try: 48 | conn = sqlite3.connect('knotie.db') 49 | cursor = conn.cursor() 50 | cursor.execute('SELECT name, description, class_name, openapi_spec FROM tools') 51 | tools = cursor.fetchall() 52 | except sqlite3.OperationalError as e: 53 | logger.warning(f"Database error: {e}") 54 | return [] 55 | finally: 56 | conn.close() 57 | 58 | return [{"name": row[0], "description": row[1], "class_name": row[2], "openapi_spec": row[3]} for row in tools] 59 | 60 | def find_client_module(tool_name): 61 | """ Dynamically find the client module based on the tool's directory structure. """ 62 | tool_dir = os.path.join(GENERATED_TOOLS_DIR, tool_name) 63 | 64 | for root, dirs, files in os.walk(tool_dir): 65 | if 'client.py' in files: 66 | relative_path = os.path.relpath(root, GENERATED_TOOLS_DIR) 67 | module_path = relative_path.replace(os.path.sep, '.') 68 | return module_path + '.client' 69 | 70 | return None 71 | 72 | def extract_base_url_from_openapi_file(openapi_spec_path): 73 | """ Extract the base URL from the OpenAPI spec file. """ 74 | try: 75 | with open(openapi_spec_path, 'r') as file: 76 | if openapi_spec_path.endswith('.json'): 77 | openapi_spec = json.load(file) 78 | elif openapi_spec_path.endswith('.yaml') or openapi_spec_path.endswith('.yml'): 79 | openapi_spec = yaml.safe_load(file) 80 | else: 81 | logger.error(f"Unsupported file format for OpenAPI spec: {openapi_spec_path}") 82 | return None 83 | except Exception as e: 84 | logger.error(f"Error reading OpenAPI spec file at {openapi_spec_path}: {e}") 85 | return None 86 | 87 | servers = openapi_spec.get('servers', []) 88 | if servers: 89 | return servers[0].get('url') 90 | return None 91 | 92 | def load_openapi_spec(openapi_spec_path): 93 | """Load and validate the OpenAPI spec from a file.""" 94 | try: 95 | with open(openapi_spec_path, 'r') as file: 96 | spec_content = file.read() 97 | spec = yaml.safe_load(spec_content) 98 | 99 | if not isinstance(spec, dict): 100 | logging.error("The loaded OpenAPI spec is not a dictionary. Check the file content.") 101 | return None 102 | 103 | return spec 104 | 105 | except FileNotFoundError: 106 | logging.error(f"OpenAPI spec file not found at path: {openapi_spec_path}") 107 | return None 108 | except yaml.YAMLError as e: 109 | logging.error(f"Error parsing YAML file: {e}") 110 | return None 111 | except Exception as e: 112 | logging.error(f"Unexpected error while loading OpenAPI spec: {e}") 113 | return None 114 | 115 | def extract_operation_ids_from_openapi(openapi_spec): 116 | """Extract operation IDs from the OpenAPI spec.""" 117 | try: 118 | # Load the spec if it's a file path, otherwise assume it's a dictionary 119 | if isinstance(openapi_spec, str): 120 | logging.info(f"Loading OpenAPI spec from file: {openapi_spec}") 121 | spec = load_openapi_spec(openapi_spec) 122 | if spec is None: 123 | return {} 124 | elif isinstance(openapi_spec, dict): 125 | spec = openapi_spec 126 | else: 127 | logging.error("The OpenAPI spec is not a valid string or dictionary.") 128 | return {} 129 | 130 | operation_ids = {} 131 | 132 | paths = spec.get('paths') 133 | if not isinstance(paths, dict): 134 | logging.error("'paths' is not a dictionary.") 135 | return {} 136 | 137 | for path, path_item in paths.items(): 138 | if not isinstance(path_item, dict): 139 | logging.error(f"'path_item' for path {path} is not a dictionary.") 140 | continue 141 | 142 | for operation, operation_item in path_item.items(): 143 | if not isinstance(operation_item, dict): 144 | logging.error(f"'operation_item' for operation {operation} in path {path} is not a dictionary.") 145 | continue 146 | 147 | operation_id = operation_item.get('operationId') 148 | if operation_id: 149 | method = operation.upper() if isinstance(operation, str) else None 150 | url = extract_url(spec, path) 151 | parameters = extract_parameters_from_operation(operation_item) 152 | 153 | if method and url: 154 | operation_ids[operation_id] = { 155 | 'method': method, 156 | 'url': url, 157 | 'parameters': parameters 158 | } 159 | 160 | return operation_ids 161 | 162 | except Exception as e: 163 | logging.error(f"An error occurred while extracting operation IDs: {e}") 164 | return {} 165 | 166 | def extract_url(spec, path): 167 | """Extract URL from the spec and path.""" 168 | try: 169 | servers = spec.get('servers') 170 | if isinstance(servers, list) and servers: 171 | server_url = servers[0].get('url') 172 | if isinstance(server_url, str): 173 | return server_url + path 174 | logging.error("Failed to extract the base URL from the spec.") 175 | return None 176 | except Exception as e: 177 | logging.error(f"Error extracting URL: {e}") 178 | return None 179 | 180 | def extract_parameters_from_operation(operation_item): 181 | """Extract parameters from the operation item in the OpenAPI spec.""" 182 | parameters = { 183 | 'query': {}, 184 | 'header': {}, 185 | 'path': {}, 186 | 'body': {} 187 | } 188 | 189 | try: 190 | for param in operation_item.get('parameters', []): 191 | param_name = param.get('name') 192 | param_in = param.get('in') 193 | param_schema = param.get('schema', {}) 194 | is_required = param.get('required', False) 195 | 196 | if param_in in parameters and param_name: 197 | parameters[param_in][param_name] = { 198 | 'type': param_schema.get('type', 'string'), 199 | 'enum': param_schema.get('enum', []), 200 | 'required': is_required 201 | } 202 | 203 | request_body = operation_item.get('requestBody') 204 | if isinstance(request_body, dict): 205 | content = request_body.get('content', {}) 206 | for media_type, media_item in content.items(): 207 | schema = media_item.get('schema', {}) 208 | required_fields = schema.get('required', []) 209 | if schema.get('type') == 'object': 210 | for prop_name, prop_schema in schema.get('properties', {}).items(): 211 | is_required = prop_name in required_fields 212 | parameters['body'][prop_name] = { 213 | 'type': prop_schema.get('type', 'string'), 214 | 'enum': prop_schema.get('enum', []), 215 | 'required': is_required 216 | } 217 | 218 | except Exception as e: 219 | logging.error(f"An error occurred while extracting parameters: {e}") 220 | 221 | return parameters 222 | 223 | 224 | def initialize_tools(): 225 | tools_from_db = fetch_tools_from_db() 226 | logger.info(f"step 1") 227 | for tool in tools_from_db: 228 | tool_name = tool["name"] 229 | openapi_spec_path = tool.get("openapi_spec") 230 | 231 | if not openapi_spec_path or not os.path.exists(openapi_spec_path): 232 | logger.error(f"OpenAPI spec file not found for tool {tool_name} at path: {openapi_spec_path}") 233 | continue 234 | 235 | client_module = find_client_module(tool_name) 236 | logger.info(f"step 2") 237 | if client_module is None: 238 | logger.error(f"Client module not found for tool {tool_name}") 239 | continue 240 | 241 | try: 242 | base_url = extract_base_url_from_openapi_file(openapi_spec_path) 243 | logger.info(f"step 3") 244 | operation_ids = extract_operation_ids_from_openapi(openapi_spec_path) 245 | logger.info(f"step 4") 246 | if not base_url: 247 | logger.error(f"Base URL not found in the OpenAPI spec for tool {tool_name}") 248 | continue 249 | 250 | sys.path.append(os.path.join(GENERATED_TOOLS_DIR, tool_name)) 251 | module = importlib.import_module(client_module) 252 | logger.info(f"step 5") 253 | ToolClass = getattr(module, 'Client') 254 | 255 | initialized_tools[tool["name"]] = { 256 | 'client': ToolClass(base_url=base_url), 257 | 'operations': operation_ids 258 | } 259 | logger.info(f"step 6") 260 | logger.info(f"tool initialized successfully with operations: {operation_ids.keys()}") 261 | print(f"tool initialized successfully with operations: {operation_ids.keys()}") 262 | except ModuleNotFoundError as e: 263 | logger.error(f"Error initializing tool {tool_name}: {e}") 264 | except AttributeError as e: 265 | logger.error(f"Error finding class or method in module {tool_name}: {e}") 266 | except Exception as e: 267 | logger.error(f"Unexpected error initializing tool {tool_name}: {e}") 268 | 269 | # AI Processing and Tool Usage 270 | 271 | def get_tool_and_spec(tool_name): 272 | conn = sqlite3.connect('knotie.db') 273 | cursor = conn.cursor() 274 | cursor.execute('SELECT name, description, class_name, openapi_spec, sensitive_headers, sensitive_body FROM tools WHERE name = ?', (tool_name,)) 275 | tool = cursor.fetchone() 276 | conn.close() 277 | 278 | if tool: 279 | return { 280 | "name": tool[0], 281 | "description": tool[1], 282 | "class_name": tool[2], 283 | "openapi_spec": tool[3], 284 | "sensitive_headers": EncryptionHelper.decrypt_data(tool[4]) if tool[4] else None, 285 | "sensitive_body": EncryptionHelper.decrypt_data(tool[5]) if tool[5] else None 286 | } 287 | else: 288 | raise ValueError(f"Tool with name {tool_name} not found") 289 | 290 | 291 | def get_api_info_from_openapi(spec, operation_id): 292 | for path, path_item in spec['paths'].items(): 293 | for operation, operation_item in path_item.items(): 294 | if 'operationId' in operation_item and operation_item['operationId'] == operation_id: 295 | method = operation.upper() 296 | url = spec['servers'][0]['url'] + path 297 | parameters = extract_parameters_from_operation(operation_item) 298 | return method, url, parameters 299 | 300 | raise ValueError(f"Operation ID {operation_id} not found in OpenAPI spec") 301 | 302 | 303 | def call_api(tool_name, tool_parameters, operation_id, tool_headers, tool_body_parameters): 304 | logger.info(f"Calling tool: {tool_name}") 305 | print(f"Calling tool: {tool_name}") 306 | 307 | # Step 1: Get the tool and its OpenAPI spec 308 | tool_info = get_tool_and_spec(tool_name) 309 | openapi_spec = tool_info['openapi_spec'] 310 | 311 | # Ensure the OpenAPI spec is loaded correctly as a dictionary 312 | if isinstance(openapi_spec, str): 313 | spec = load_openapi_spec(openapi_spec) 314 | else: 315 | spec = openapi_spec 316 | 317 | if not isinstance(spec, dict): 318 | raise ValueError("Invalid OpenAPI spec format. Expected a dictionary.") 319 | 320 | # Step 2: Extract the relevant API information 321 | method, url, parameters = get_api_info_from_openapi(spec, operation_id) 322 | 323 | query_params = {} 324 | headers = tool_headers if tool_headers else {} 325 | body = tool_body_parameters if tool_body_parameters else {} 326 | 327 | # Assign parameters from tool_body_parameters to the body 328 | for param_name, param_value in tool_body_parameters.items(): 329 | if param_name in parameters['body']: 330 | body[param_name] = param_value 331 | 332 | # Assign header parameters from tool_headers to the headers 333 | for param_name, param_value in tool_headers.items(): 334 | if param_name in parameters['header']: 335 | headers[param_name] = param_value 336 | 337 | # Assign query parameters from tool_parameters to the query_params 338 | for param_name, param_value in tool_parameters.items(): 339 | if param_name in parameters['query']: 340 | query_params[param_name] = param_value 341 | 342 | # Step 3: Replace sensitive values in headers and body 343 | headers, body = replace_sensitive_values(headers, body, tool_name) 344 | 345 | # Debugging: Log the request details 346 | logger.info(f"Request Method: {method}") 347 | logger.info(f"Request URL: {url}") 348 | logger.info(f"Request Headers: {headers}") 349 | logger.info(f"Request Query Params: {query_params}") 350 | logger.info(f"Request Body: {json.dumps(body)}") # Convert body to JSON string for logging 351 | 352 | # Step 4: Send the API request 353 | try: 354 | if method == 'GET': 355 | logger.debug('Sending GET request') 356 | response = requests.get(url, params=query_params, headers=headers) 357 | elif method == 'POST': 358 | logger.debug(f'Sending POST request to URL: {url}, with Body: {json.dumps(body)}, Params: {query_params}, Headers: {headers}') 359 | response = requests.post(url, json=body, params=query_params, headers=headers) 360 | # Add other HTTP methods as needed 361 | else: 362 | raise ValueError(f"Unsupported HTTP method: {method}") 363 | 364 | # Step 5: Handle the response 365 | if response.status_code == 200: 366 | return response.json() # Assuming the response is in JSON format 367 | else: 368 | logger.error(f"API call failed with status code {response.status_code}: {response.text}") 369 | return {"error": f"API call failed with status code {response.status_code}"} 370 | except Exception as e: 371 | logger.error(f"Error during API call: {e}") 372 | return {"error": str(e)} 373 | 374 | def replace_sensitive_values(headers, body_parameters, tool_name): 375 | tool_info = get_tool_and_spec(tool_name) 376 | 377 | # Handle None values by providing an empty string or JSON object 378 | sensitive_headers = json.loads(tool_info.get('sensitive_headers', '{}') or '{}') 379 | sensitive_body = json.loads(tool_info.get('sensitive_body', '{}') or '{}') 380 | 381 | # Replace sensitive headers 382 | for key, value in headers.items(): 383 | if value == "sensitive_value" and key in sensitive_headers: 384 | headers[key] = sensitive_headers[key] 385 | 386 | # Replace sensitive body parameters 387 | for key, value in body_parameters.items(): 388 | if value == "sensitive_value" and key in sensitive_body: 389 | body_parameters[key] = sensitive_body[key] 390 | 391 | return headers, body_parameters 392 | --------------------------------------------------------------------------------