├── .gitignore ├── Code-Interpreter Functionality ├── Codeexcel.py └── salessheet.xlsx ├── Function-Calling Functionality ├── AirTableFC.py └── Findleads.py ├── README.md ├── Retrieval Functionality ├── Sales.pdf └── salescall.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /Code-Interpreter Functionality/Codeexcel.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import json 3 | 4 | excel_file = 'salessheet.xlsx' 5 | data = pd.read_excel(excel_file, sheet_name='Sales Data', header=3) 6 | 7 | # Convert DataFrame to JSON string 8 | json_str = data.to_json(orient='records') 9 | 10 | # Convert JSON string to Python object (list of dictionaries) 11 | data_list = json.loads(json_str) 12 | 13 | # Create a mapping based on the first row (header row) 14 | header_mapping = data_list[0] 15 | key_mapping = {f'Unnamed: {i}': header_mapping[f'Unnamed: {i}'] for i in range(len(header_mapping)) if header_mapping[f'Unnamed: {i}'] is not None} 16 | 17 | # Cleaning the data and applying dynamic mapping 18 | cleaned_data = [] 19 | for item in data_list[1:]: # Skip the first row as it's used for mapping 20 | cleaned_item = {} 21 | for key, value in item.items(): 22 | # Apply mapping and clean key 23 | new_key = key_mapping.get(key, key) 24 | if new_key is not None and value is not None: # Exclude keys and values that are None 25 | cleaned_item[new_key.replace('Unnamed: ', '')] = value 26 | cleaned_data.append(cleaned_item) 27 | 28 | 29 | json_data = json.dumps(cleaned_data, indent=4) 30 | print(json_data) 31 | # Optionally, write this list of dictionaries to a text file 32 | with open('output.txt', 'w') as file: 33 | file.write(json_data) 34 | 35 | 36 | 37 | import os 38 | import openai 39 | import json 40 | from dotenv import load_dotenv 41 | load_dotenv() 42 | 43 | 44 | # OpenAI API Key 45 | api_key = os.environ.get("OPENAI_API_KEY") 46 | 47 | 48 | 49 | # Initialize the client 50 | client = openai.Client() 51 | 52 | 53 | 54 | #if you dont specfiy the assitant seems openai will keep creating assitants 55 | def create_assistants(): 56 | assistant_file_id="assitant_id3.txt" 57 | if os.path.exists(assistant_file_id): 58 | with open(assistant_file_id, "r") as file: 59 | assistant_id=file.read().strip() 60 | else: 61 | print("Creating an Assitant....") 62 | # Upload a file with an "assistants" purpose 63 | file = client.files.create( 64 | file=open("output.txt", "rb"), 65 | purpose='assistants' 66 | ) 67 | assistant = client.beta.assistants.create( 68 | name= "Data Analyst Sales Assistant", 69 | instructions="then follow these steps -Reading the JSON File: Utilize Python, specifically the json module, to open and parse the data from the text file. Employ the open() function to access the file, and json.load() to convert the contents into a Python dictionary or list, depending on the JSON structure.Understanding Data Structure: The JSON data encompasses fields like 'Deal Orders', 'Company', 'Status', 'Contact', along with financial metrics and other relevant details. Acquaint yourself with these key data points for accurate responses.Data Analysis and Insights: Provide comprehensive summaries, highlight key trends, and deduce insights from the data, including total sales figures, average deal sizes, status distributions, and more.Visualization of Data: Craft and present visual charts like bar graphs and pie charts to depict sales trends and patterns. Ensure these visualizations are both accurate and user-friendly, offering clear insights at a glance.Query Handling: Efficiently respond to a range of queries related to the sales data. This includes providing specific deal details, comparing time periods, and summarizing overall sales performance.Maintaining Data Accuracy: Always ensure the information you deliver is precise and reflects the most current data available in the text file.Your role is crucial in aiding users to comprehend our sales data thoroughly, enabling them to make well-informed decisions based on this insightful analysis. If it just requires you to answer basic questions based in the data given and doesnt need code to answer just use retrival fucntion", 70 | tools=[{"type": "code_interpreter"},{"type": "retrieval"},], 71 | model="gpt-4-1106-preview", 72 | file_ids=[file.id] 73 | ) 74 | #write new assistant_id to a file 75 | assistant_id= assistant.id 76 | with open(assistant_file_id, "w") as file: 77 | file.write(assistant_id) 78 | print(f"Assitant created with ID: {assistant_id}") 79 | 80 | #retrieve the assitant_id 81 | assistant = client.beta.assistants.retrieve(assistant_id) 82 | print(f"Assistant created with ID: {assistant_id}") 83 | 84 | #step 2: Create a thread 85 | print("Creating a Thread for a new user conversation.....") 86 | thread = client.beta.threads.create() 87 | print(f"Thread created with ID: {thread.id}") 88 | 89 | #step add a message to the thread 90 | user_message="create me a bar graph measuring the expensives of each deal in our data file" 91 | print(f"Adding user's message to the Thread: {user_message}") 92 | message = client.beta.threads.messages.create( 93 | thread_id=thread.id, 94 | role="user", 95 | content=user_message 96 | ) 97 | print("Message added to the Thread.") 98 | 99 | # Step 4: Run the Assistant 100 | print("Running the Assistant to generate a response...") 101 | run = client.beta.threads.runs.create( 102 | thread_id=thread.id, 103 | assistant_id=assistant.id, 104 | instructions="Please address the user respectfully. The user requires help with math." 105 | ) 106 | print(f"Run created with ID: {run.id} and status: {run.status}") 107 | 108 | # Step 5: Display the Assistant's Response 109 | # Poll the Run status until it's completed 110 | while True: 111 | run = client.beta.threads.runs.retrieve( 112 | thread_id=thread.id, 113 | run_id=run.id 114 | ) 115 | if run.status == 'completed': 116 | print("Run completed. Retrieving the Assistant's responses...") 117 | break 118 | print("Waiting for the Assistant to complete the run...") 119 | 120 | messages = client.beta.threads.messages.list( 121 | thread_id=thread.id 122 | ) 123 | with open("messages1.json", "w") as f: 124 | messages_json = messages.model_dump() 125 | json.dump(messages_json, f, indent=4) 126 | print("Displaying the conversation:") 127 | 128 | 129 | for msg in messages.data: 130 | role = msg.role 131 | for content_item in msg.content: 132 | if content_item.type == "text": 133 | text_content = content_item.text.value 134 | print(f"{role.capitalize()}: {text_content}") 135 | elif content_item.type == "image_file": 136 | image_file_id = content_item.image_file.file_id 137 | print(f"Image File ID: {image_file_id}") 138 | 139 | # Retrieve the image content using the client's method 140 | image_data = client.files.content(image_file_id) 141 | image_data_bytes = image_data.read() 142 | 143 | # Save the image data to a file 144 | with open(f"./image_{image_file_id}.png", "wb") as file: 145 | file.write(image_data_bytes) 146 | print(f"Image {image_file_id} saved as image_{image_file_id}.png") 147 | # msg in messages.data: 148 | # print(f"{msg.role.capitalize()}: {msg.content[0].text.value}") 149 | 150 | # save run steps to json file 151 | # run_steps = client.beta.threads.runs.steps.list( 152 | # thread_id=thread.id, 153 | # run_id=run.id 154 | # ) 155 | 156 | # with open("run_steps.json" , "w") as f: 157 | # run_steps_json = run_steps.model_dump() 158 | # json.dump(run_steps_json,f, indent=4) 159 | 160 | 161 | 162 | if __name__ == "__main__": 163 | create_assistants() 164 | -------------------------------------------------------------------------------- /Code-Interpreter Functionality/salessheet.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlozieAI/OpenAI---Assistant-API-Demo/f9e990639999b786c4ca4b56731888c0ea6ea32c/Code-Interpreter Functionality/salessheet.xlsx -------------------------------------------------------------------------------- /Function-Calling Functionality/AirTableFC.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | import requests 4 | import urllib.parse 5 | import json 6 | import time 7 | import os 8 | from dotenv import load_dotenv 9 | load_dotenv() 10 | 11 | 12 | def get_records_by_lead_name(lead_name: str): 13 | # Airtable settings 14 | base_id = os.environ.get("base_id") 15 | table_id = os.environ.get("table_id") 16 | api_key_airtable = os.environ.get("api_key_airtable") 17 | 18 | # Filter setup 19 | column_name = 'Leads Name' 20 | encoded_filter_formula = urllib.parse.quote(f"{{{column_name}}}='{lead_name}'") 21 | 22 | # Airtable API URL 23 | api_url = f"https://api.airtable.com/v0/{base_id}/{table_id}?filterByFormula={encoded_filter_formula}" 24 | 25 | # Headers for authentication 26 | headers = { 27 | 'Authorization': f'Bearer {api_key_airtable}' 28 | } 29 | 30 | # Make the API request 31 | response = requests.get(api_url, headers=headers) 32 | 33 | if response.status_code == 200: 34 | # Successful request 35 | records = response.json().get('records', []) 36 | return records 37 | else: 38 | # Failed request, log details 39 | error_info = response.text 40 | try: 41 | # Attempt to parse JSON error message 42 | error_info = json.loads(response.text) 43 | except json.JSONDecodeError: 44 | # Response is not JSON formatted 45 | pass 46 | 47 | 48 | print(f"Failed to retrieve data. Status Code: {response.status_code}. Response: {error_info}") 49 | return None 50 | 51 | 52 | # OpenAI API Key 53 | api_key = os.environ.get("OPENAI_API_KEY") 54 | 55 | 56 | 57 | tools_list = [{ 58 | "type": "function", 59 | "function": { 60 | 61 | "name": "get_records_by_lead_name", 62 | "description": "Retrieve infomation of the specfic lead like when's the next meeting with them, their email and etc", 63 | "parameters": { 64 | "type": "object", 65 | "properties": { 66 | "lead_name": { 67 | "type": "string", 68 | "description": "The lead's name" 69 | } 70 | }, 71 | "required": ["lead_name"] 72 | } 73 | } 74 | }] 75 | 76 | 77 | 78 | # Initialize the client 79 | client = openai.Client() 80 | 81 | 82 | 83 | #if you dont specfiy the assitant seems openai will keep creating assitants 84 | def create_assistants(): 85 | assistant_file_id="assitant_id1.txt" 86 | if os.path.exists(assistant_file_id): 87 | with open(assistant_file_id, "r") as file: 88 | assistant_id=file.read().strip() 89 | else: 90 | print("Creating an Assitant....") 91 | # Upload a file with an "assistants" purpose 92 | assistant = client.beta.assistants.create( 93 | name= "Airtable Func Call", 94 | instructions="You are a sale chatbot. Use the data you pull from our airtable database to answer question on our potential clients.", 95 | model="gpt-4-1106-preview", 96 | tools=tools_list 97 | ) 98 | #write new assistant_id to a file 99 | assistant_id= assistant.id 100 | with open(assistant_file_id, "w") as file: 101 | file.write(assistant_id) 102 | print(f"Assitant created with ID: {assistant_id}") 103 | 104 | #retrieve the assitant_id 105 | assistant = client.beta.assistants.retrieve(assistant_id) 106 | print(f"Assistant created with ID: {assistant_id}") 107 | 108 | #step 2: Create a thread 109 | print("Creating a Thread for a new user conversation.....") 110 | thread = client.beta.threads.create() 111 | print(f"Thread created with ID: {thread.id}") 112 | 113 | #step add a message to the thread 114 | user_message="What do we need to prepare for in our next meeting with David" 115 | print(f"Adding user's message to the Thread: {user_message}") 116 | message = client.beta.threads.messages.create( 117 | thread_id=thread.id, 118 | role="user", 119 | content=user_message 120 | ) 121 | print("Message added to the Thread.") 122 | 123 | # Step 4: Run the Assistant 124 | print("Running the Assistant to generate a response...") 125 | run = client.beta.threads.runs.create( 126 | thread_id=thread.id, 127 | assistant_id=assistant.id, 128 | instructions="You are a sale chatbot. Use the data you pull from our airtable database to answer question on our potential clients." 129 | ) 130 | print(f"Run created with ID: {run.id} and status: {run.status}") 131 | print(run.model_dump_json(indent=4)) 132 | 133 | # Step 5: Display the Assistant's Response 134 | # Poll the Run status until it's completed 135 | while True: 136 | # Wait for 5 seconds 137 | time.sleep(5) 138 | 139 | # Retrieve the run status 140 | run_status = client.beta.threads.runs.retrieve( 141 | thread_id=thread.id, 142 | run_id=run.id 143 | ) 144 | print(run_status.model_dump_json(indent=4)) 145 | 146 | # If run is completed, get messages 147 | if run_status.status == 'completed': 148 | messages = client.beta.threads.messages.list( 149 | thread_id=thread.id 150 | ) 151 | 152 | # Loop through messages and print content based on role 153 | for msg in messages.data: 154 | role = msg.role 155 | content = msg.content[0].text.value 156 | print(f"{role.capitalize()}: {content}") 157 | 158 | # save run steps to json file 159 | run_steps = client.beta.threads.runs.steps.list( 160 | thread_id=thread.id, 161 | run_id=run.id 162 | ) 163 | print(run_steps) 164 | 165 | break 166 | elif run_status.status == 'requires_action': 167 | print("Function Calling") 168 | required_actions = run_status.required_action.submit_tool_outputs.model_dump() 169 | print( "Run Required Action State") 170 | print(required_actions) 171 | tool_outputs = [] 172 | for action in required_actions["tool_calls"]: 173 | func_name = action['function']['name'] 174 | arguments = json.loads(action['function']['arguments']) 175 | 176 | if func_name == "get_records_by_lead_name": 177 | output = get_records_by_lead_name(lead_name=arguments['lead_name']) 178 | output_string = json.dumps(output) 179 | tool_outputs.append({ 180 | "tool_call_id": action['id'], 181 | "output": output_string 182 | }) 183 | else: 184 | raise ValueError(f"Unknown function: {func_name}") 185 | 186 | print("Tool Outputs") 187 | print(tool_outputs) 188 | print("Submitting outputs back to the Assistant...") 189 | client.beta.threads.runs.submit_tool_outputs( 190 | thread_id=thread.id, 191 | run_id=run.id, 192 | tool_outputs=tool_outputs 193 | ) 194 | else: 195 | print("Waiting for the Assistant to process...") 196 | time.sleep(5) 197 | 198 | 199 | 200 | 201 | if __name__ == "__main__": 202 | create_assistants() 203 | -------------------------------------------------------------------------------- /Function-Calling Functionality/Findleads.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | import requests 4 | import urllib.parse 5 | import json 6 | import time 7 | import re 8 | 9 | 10 | def json_to_readable_summary(json_data, indent_level=0, max_depth=2): 11 | """ 12 | Convert JSON data into a readable string format. 13 | :param json_data: The JSON data to convert. 14 | :param indent_level: Current indentation level for nested structures. 15 | :param max_depth: Maximum depth to traverse in nested structures. 16 | :return: A string representing the readable summary of the JSON data. 17 | """ 18 | summary = [] 19 | indent = " " * indent_level 20 | 21 | if isinstance(json_data, dict): 22 | for key, value in json_data.items(): 23 | if isinstance(value, (dict, list)) and indent_level < max_depth: 24 | summary.append(f"{indent}{key}:") 25 | summary.extend(json_to_readable_summary(value, indent_level + 1, max_depth)) 26 | else: 27 | summary.append(f"{indent}{key}: {value}") 28 | elif isinstance(json_data, list): 29 | for item in json_data: 30 | summary.extend(json_to_readable_summary(item, indent_level + 1, max_depth)) 31 | else: 32 | summary.append(f"{indent}{json_data}") 33 | 34 | return summary 35 | 36 | # Example usage 37 | company_data = { 38 | # ... (your JSON data) 39 | } 40 | 41 | readable_summary = json_to_readable_summary(company_data) 42 | print('\n'.join(readable_summary)) 43 | 44 | 45 | def search_linkedin_profile(name): 46 | api_key = "" 47 | search_engine_id = "" 48 | search_query = f"{name} linkedin" 49 | url = "https://www.googleapis.com/customsearch/v1" 50 | params = { 51 | "key": api_key, 52 | "cx": search_engine_id, 53 | "q": search_query , 54 | } 55 | 56 | url = f"https://www.googleapis.com/customsearch/v1?q={search_query}&cx={search_engine_id}&key={api_key}" 57 | url_pattern = re.compile(r'https://www\.linkedin\.com/in/[\w-]+/?$') 58 | url_pattern2 = re.compile(r'(https://www\.linkedin\.com/in/[\w-]+/?$|linkedin\.com/in/([\w-]+)/?)') 59 | 60 | response = requests.get(url) 61 | 62 | if response.status_code == 200: 63 | search_results = response.json() 64 | print("Search Results:") 65 | search_links = [item.get("link") for item in search_results.get("items", [])] 66 | valid_links = [link for link in search_links if url_pattern.match(link)] 67 | for link in search_links: 68 | print(link) 69 | 70 | for link in valid_links: 71 | print(link) 72 | # Check if the list of links is not empty 73 | if valid_links: 74 | # Assuming the first result is the desired one (you might need more logic here) 75 | linkedin_url = valid_links[0] 76 | elif search_links: 77 | for link in search_links: 78 | match = url_pattern2.search(link) 79 | if match: 80 | # Extract username and create standard LinkedIn URL 81 | username = match.group(2) if match.group(2) else match.group(1).split('/')[-1] 82 | linkedin_url = f"https://www.linkedin.com/in/{username}" 83 | break 84 | else: 85 | print("No LinkedIn profiles found in the search results.") 86 | linkedin_url = None 87 | else: 88 | print(f"Failed to retrieve search results. Status Code: {response.status_code}") 89 | linkedin_url = None 90 | print(linkedin_url) 91 | return linkedin_url 92 | 93 | 94 | def get_linkedin_profile_and_company_data(name): 95 | linkedin_url = search_linkedin_profile(name) 96 | print(linkedin_url) 97 | if not linkedin_url: 98 | print("LinkedIn profile not found.") 99 | return 100 | 101 | api_key = '' 102 | headers = {'Authorization': 'Bearer ' + api_key} 103 | 104 | # First API Call - LinkedIn Profile Data 105 | api_endpoint_1 = 'https://nubela.co/proxycurl/api/v2/linkedin' 106 | params_1 = { 107 | 'linkedin_profile_url': linkedin_url, 108 | 'extra': 'exclude', 109 | 'github_profile_id': 'exclude', 110 | 'facebook_profile_id': 'exclude', 111 | 'twitter_profile_id': 'exclude', 112 | 'personal_contact_number': 'exclude', 113 | 'personal_email': 'exclude', 114 | 'inferred_salary': 'exclude', 115 | 'skills': 'exclude', 116 | 'use_cache': 'if-present', 117 | 'fallback_to_cache': 'on-error', 118 | } 119 | response_1 = requests.get(api_endpoint_1, params=params_1, headers=headers) 120 | 121 | if response_1.status_code == 200: 122 | json_response_1 = response_1.json() 123 | readable_summary = json_to_readable_summary(json_response_1) 124 | occupation = json_response_1.get("occupation", "") 125 | # Initialize variables for current job details 126 | 127 | # Process the LinkedIn profile data... 128 | # Extract current job company URL for the second API call 129 | # Loop through experiences to find the current job 130 | for job in json_response_1.get("experiences", []): 131 | company_name = job.get("company", "") 132 | if company_name in occupation and job.get("ends_at") is None: 133 | current_job_company_url = job.get("company_linkedin_profile_url", "") 134 | 135 | if current_job_company_url: 136 | # Second API Call - LinkedIn Company Data 137 | api_endpoint_2 = 'https://nubela.co/proxycurl/api/linkedin/company' 138 | params_2 = { 139 | 'url': current_job_company_url, 140 | 'resolve_numeric_id': 'true', 141 | 'categories': 'include', 142 | 'funding_data': 'include', 143 | 'extra': 'include', 144 | 'exit_data': 'include', 145 | 'acquisitions': 'include', 146 | 'use_cache': 'if-present', 147 | } 148 | response_2 = requests.get(api_endpoint_2, params=params_2, headers=headers) 149 | 150 | 151 | if response_2.status_code == 200: 152 | json_response_2 = response_2.json() 153 | readable_summary_2 = json_to_readable_summary(json_response_2) 154 | # Process the LinkedIn company data... 155 | print("Company Data:", json_response_2) 156 | else: 157 | print("Failed to retrieve company data.") 158 | else: 159 | print("Failed to retrieve LinkedIn profile data.") 160 | 161 | 162 | print(readable_summary) 163 | print(readable_summary_2) 164 | return readable_summary, readable_summary_2 165 | OPENAI_API_TOKEN = "" 166 | # OpenAI API Key 167 | api_key = os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN 168 | 169 | tools_list = [{ 170 | "type": "function", 171 | "function": { 172 | 173 | "name": "get_linkedin_profile_and_company_data", 174 | "description": "Retrieve infomation of the specfic lead like company they worked for, what thier company does , company size , website and etc from scraping linkedin", 175 | "parameters": { 176 | "type": "object", 177 | "properties": { 178 | "name": { 179 | "type": "string", 180 | "description": "The lead's name" 181 | } 182 | }, 183 | "required": ["name"] 184 | } 185 | } 186 | }] 187 | 188 | 189 | 190 | # Initialize the client 191 | client = openai.Client() 192 | 193 | 194 | 195 | #if you dont specfiy the assitant seems openai will keep creating assitants 196 | def create_assistants(): 197 | assistant_file_id="assitant_id1.txt" 198 | if os.path.exists(assistant_file_id): 199 | with open(assistant_file_id, "r") as file: 200 | assistant_id=file.read().strip() 201 | else: 202 | print("Creating an Assitant....") 203 | # Upload a file with an "assistants" purpose 204 | assistant = client.beta.assistants.create( 205 | name= "Find Lead Info", 206 | instructions="You are a sale chatbot. Use your ability to pull info from linkedin to anwer question about potential leads.", 207 | model="gpt-4-1106-preview", 208 | tools=tools_list 209 | ) 210 | #write new assistant_id to a file 211 | assistant_id= assistant.id 212 | with open(assistant_file_id, "w") as file: 213 | file.write(assistant_id) 214 | print(f"Assitant created with ID: {assistant_id}") 215 | 216 | #retrieve the assitant_id 217 | assistant = client.beta.assistants.retrieve(assistant_id) 218 | print(f"Assistant created with ID: {assistant_id}") 219 | 220 | #step 2: Create a thread 221 | print("Creating a Thread for a new user conversation.....") 222 | thread = client.beta.threads.create() 223 | print(f"Thread created with ID: {thread.id}") 224 | 225 | #step add a message to the thread 226 | user_message=""" During an AI conference I attended last week, I had the opportunity to meet a gentleman named Liam Ottely, who mentioned he runs an AI automation agency and expressed interest in a potential partnership. Could you please conduct some research for me to find out more about his company? Specifically, I'd like to know the name of his company, its official website, and a brief overview of what services or products his company offers. 227 | """ 228 | print(f"Adding user's message to the Thread: {user_message}") 229 | message = client.beta.threads.messages.create( 230 | thread_id=thread.id, 231 | role="user", 232 | content=user_message 233 | ) 234 | print("Message added to the Thread.") 235 | 236 | # Step 4: Run the Assistant 237 | print("Running the Assistant to generate a response...") 238 | run = client.beta.threads.runs.create( 239 | thread_id=thread.id, 240 | assistant_id=assistant.id, 241 | instructions="Please address the user respectfully. The user requires help with math." 242 | ) 243 | print(f"Run created with ID: {run.id} and status: {run.status}") 244 | print(run.model_dump_json(indent=4)) 245 | 246 | # Step 5: Display the Assistant's Response 247 | # Poll the Run status until it's completed 248 | while True: 249 | # Wait for 5 seconds 250 | time.sleep(5) 251 | 252 | # Retrieve the run status 253 | run_status = client.beta.threads.runs.retrieve( 254 | thread_id=thread.id, 255 | run_id=run.id 256 | ) 257 | print(run_status.model_dump_json(indent=4)) 258 | 259 | # If run is completed, get messages 260 | if run_status.status == 'completed': 261 | messages = client.beta.threads.messages.list( 262 | thread_id=thread.id 263 | ) 264 | 265 | # Loop through messages and print content based on role 266 | for msg in messages.data: 267 | role = msg.role 268 | content = msg.content[0].text.value 269 | print(f"{role.capitalize()}: {content}") 270 | 271 | break 272 | elif run_status.status == 'requires_action': 273 | print("required run") 274 | print(run_status.model_dump_json(indent=4)) 275 | print("Function Calling") 276 | required_actions = run_status.required_action.submit_tool_outputs.model_dump() 277 | print(required_actions) 278 | tool_outputs = [] 279 | import json 280 | for action in required_actions["tool_calls"]: 281 | func_name = action['function']['name'] 282 | arguments = json.loads(action['function']['arguments']) 283 | 284 | if func_name == "get_linkedin_profile_and_company_data": 285 | output = get_linkedin_profile_and_company_data(name=arguments['name']) 286 | output_string = json.dumps(output) 287 | tool_outputs.append({ 288 | "tool_call_id": action['id'], 289 | "output": output_string 290 | }) 291 | else: 292 | raise ValueError(f"Unknown function: {func_name}") 293 | 294 | print("Submitting outputs back to the Assistant...") 295 | client.beta.threads.runs.submit_tool_outputs( 296 | thread_id=thread.id, 297 | run_id=run.id, 298 | tool_outputs=tool_outputs 299 | ) 300 | else: 301 | print("Waiting for the Assistant to process...") 302 | time.sleep(5) 303 | 304 | 305 | 306 | 307 | if __name__ == "__main__": 308 | create_assistants() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAI---Assistant-API-Demo 2 | Repo for showcasing the code for building an AI Sales Assistant using OpenAI Assistant's Retrieval, Function Calling, and Code Interpreter functionalities. 3 | 4 | **Introduction** 5 | 6 | 7 | This README provides detailed instructions for setting up and running the Python project that interfaces with the OpenAI API. The setup process includes installing Python, managing project dependencies, configuring environment variables, and obtaining an OpenAI API key. 8 | 9 | 10 | **Prerequisites** 11 | 12 | Note - At least for windows might have to run as adminster when installing all of this to be able to run the commands/software packages [How to run as admin](https://learn.microsoft.com/en-us/windows/terminal/faq) 13 | 14 | VScode setup 15 | 16 | 17 | [Guide](https://medium.com/nerd-for-tech/install-visual-studio-code-fe3908c5cf15) 18 | 19 | 20 | 21 | Installing Python 22 | 23 | 24 | Before proceeding, ensure that Python is installed on your system. We recommend using Homebrew on macOS and Chocolatey on Windows for a smooth installation experience. 25 | 26 | 27 | macOS (using Homebrew): 28 | 29 | 30 | Install Homebrew by running the following command in your terminal: 31 | 32 | 33 | ```sh 34 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" 35 | ``` 36 | 37 | Once Homebrew is installed, install Python by running: 38 | 39 | 40 | ```sh 41 | brew install python 42 | ``` 43 | 44 | Windows (using Chocolatey): 45 | 46 | 47 | Install Chocolatey by following the instructions on the [Chocolatey Installation page](https://chocolatey.org/install). 48 | Once Chocolatey is installed, install Python by running the following command in your terminal (run as Administrator): 49 | 50 | 51 | ```powershell 52 | choco install python 53 | ``` 54 | 55 | Note - For Windows it might have default aliases for Python that redirect the python command to the Microsoft Store which could cause the python command not to work. Disable these aliases as it might have default aliases for Python that redirect the python command to the Microsoft Store. Disable these. 56 | 57 | Steps to disable app aliases 58 | Go to Settings > Apps > Advanced App settings. Click on App execution aliases. [Here's is video also](https://www.google.com/search?q=windows+disable+app+aliases&oq=windows+disable+app+alis&gs_lcrp=EgZjaHJvbWUqCQgBECEYChigATIGCAAQRRg5MgkIARAhGAoYoAEyCQgCECEYChigATIJCAMQIRgKGKABMgkIBBAhGAoYoAEyCQgFECEYChigATIHCAYQIRifBTIHCAcQIRifBdIBCjEwMDI5ajBqMTWoAgiwAgE&sourceid=chrome&ie=UTF-8#kpvalbx=_0HkkZvS3KOvJp84P-fmw-AE_45) 59 | 60 | **Dependency Management** 61 | 62 | 63 | This project uses a requirements.txt file to manage dependencies. Ensure you have Python and pip installed before proceeding. 64 | 65 | 66 | **Setup Instructions** 67 | 68 | 69 | 1. Clone the Repository 70 | 71 | 72 | First, clone the project repository to your local machine using git: 73 | 74 | 75 | ```sh 76 | git clone [repository URL] 77 | cd [project directory] 78 | ``` 79 | Guide in how to install git if needed - [Guide](https://github.com/git-guides/install-git) 80 | 81 | 2. Install Dependencies 82 | 83 | 84 | Navigate to the project directory and install the required Python packages using the following command: 85 | 86 | 87 | ```sh 88 | pip install -r requirements.txt 89 | ``` 90 | 91 | 3. Configure Environment Variables 92 | 93 | 94 | Create a .env file in the project root directory. This file will store your OpenAI API key and any other sensitive information. 95 | 96 | 97 | ```sh 98 | touch .env # On macOS and Linux 99 | type nul > .env # On Windows in cmd or use New-Item .env -ItemType file in PowerShell 100 | ``` 101 | 102 | Add the following line to your .env file, replacing YOUR_API_KEY with your actual OpenAI API key: 103 | 104 | 105 | ``` 106 | OPENAI_API_KEY=YOUR_API_KEY 107 | ``` 108 | 109 | 4. Obtaining an OpenAI API Key 110 | 111 | 112 | To use the OpenAI API, you need an API key. If you do not have one, visit the [OpenAI API key page](https://platform.openai.com/api-keys) to sign up for an account and obtain your API key. 113 | 114 | 115 | 5. Running the Project 116 | 117 | 118 | With the setup complete, you can now run the project using the following command: 119 | 120 | 121 | ```sh 122 | python main.py 123 | ``` 124 | 125 | Replace main.py with the name of your main Python script if different. 126 | 127 | **Voiceflow and Microsoft Teams Intergation** 128 | 129 | 130 | Can checkout the repo or the intergations below 131 | 132 | [Microsoft Teams](https://github.com/AlozieAI/Microsoft-and-Voiceflow-Integration.git) 133 | 134 | [Voiceflow-AssisstantAPI-Template](https://github.com/AlozieAI/Voiceflow-AssisstantAPI-Template) 135 | 136 | [Voiceflow---Multi-Assistant-Functionality-Template](https://github.com/AlozieAI/Voiceflow---Multi-Assistant-Functionality-Template) 137 | 138 | 139 | **Additional Information** 140 | 141 | For more detailed information on the OpenAI API and its capabilities or other documention on how to setup OpenAI API in a python env, refer to the [OpenAI API documentation](https://platform.openai.com/docs/overview) and [Open API setup doc](https://platform.openai.com/docs/quickstart?context=python) 142 | 143 | **IMPORTANT!** 144 | 145 | Open AI Assistant API V2 146 | 147 | OpenAI just added a V2 version of the Opena Assitant API V2. This includes some breaking changes so if you want the current code to still work confirm that you are using the older version of openai lib(1.20.0)(you check your version using openai --version)(run pip install openai==1.20.0) or make the proper code changes need for the code to work with Open AI Assitant API V2. [Here is the documention for differences between V1 and V2](https://platform.openai.com/docs/assistants/migration/changing-beta-versions) 148 | 149 | -------------------------------------------------------------------------------- /Retrieval Functionality/Sales.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlozieAI/OpenAI---Assistant-API-Demo/f9e990639999b786c4ca4b56731888c0ea6ea32c/Retrieval Functionality/Sales.pdf -------------------------------------------------------------------------------- /Retrieval Functionality/salescall.py: -------------------------------------------------------------------------------- 1 | import os 2 | import openai 3 | import json 4 | from dotenv import load_dotenv 5 | 6 | load_dotenv() 7 | api_key = os.environ.get("OPENAI_API_KEY") 8 | 9 | 10 | #Step 1 Initialize the client 11 | client = openai.Client() 12 | 13 | 14 | 15 | assistant_instructions = """ You are an internal sales assistant employed by our chatbot company, tasked with analyzing and extracting valuable insights from our sales calls. Your primary role is to assist in enhancing our sales call analysis based on past interactions. You are also responsible for various call-related tasks, including sentiment analysis, providing detailed responses to questions and queries, and ensuring the utmost accuracy in your responses. 16 | 17 | Your source of information is a document file containing transcripts of our previous sales calls. Your goal is to leverage this data to offer comprehensive and insightful answers to a wide range of questions and inquiries. Your responses should go beyond basic information, aiming for depth and precision when addressing the queries. 18 | 19 | Your assistance will play a pivotal role in improving our sales strategies and customer interactions. Your expertise in extracting insights from our sales calls will help us better understand customer needs, objections, and preferences, ultimately leading to more successful sales outcomes. """ 20 | 21 | 22 | 23 | 24 | #if you dont specfiy the assitant seems openai will keep creating assitants 25 | def create_assistants(): 26 | assistant_file_id="assistant_id_.txt" 27 | if os.path.exists(assistant_file_id): 28 | with open(assistant_file_id, "r") as file: 29 | assistant_id=file.read().strip() 30 | 31 | 32 | else: 33 | 34 | print("Creating an Assitant....") 35 | # Upload a file with an "assistants" purpose 36 | file = client.files.create( 37 | file=open("Sales.pdf", "rb"), 38 | purpose='assistants' 39 | ) 40 | 41 | 42 | assistant = client.beta.assistants.create( 43 | name= "Sales Call Knowledgebot", 44 | instructions=assistant_instructions, 45 | tools=[{"type": "retrieval"}], 46 | model="gpt-4-1106-preview", 47 | file_ids=[file.id] 48 | ) 49 | #write new assistant_id to a file 50 | assistant_id= assistant.id 51 | with open(assistant_file_id, "w") as file: 52 | file.write(assistant_id) 53 | print(f"Assitant created with ID: {assistant_id}") 54 | 55 | #retrieve the assitant_id 56 | assistant = client.beta.assistants.retrieve(assistant_id) 57 | print(f"Assistant created with ID: {assistant_id}") 58 | 59 | #step 2: Create a thread 60 | print("Creating a Thread for a new user conversation.....") 61 | thread = client.beta.threads.create() 62 | print(f"Thread created with ID: {thread.id}") 63 | print("using existing thread") 64 | 65 | #Step 3 add a message to the thread 66 | user_message="What are the most common objections we face in our sales call" 67 | print(f"Adding user's message to the Thread: {user_message}") 68 | message = client.beta.threads.messages.create( 69 | thread_id=thread.id, 70 | role="user", 71 | content=user_message 72 | ) 73 | print("Message added to the Thread.") 74 | 75 | # Step 4: Run the Assistant 76 | print("Running the Assistant to generate a response...") 77 | run = client.beta.threads.runs.create( 78 | thread_id=thread.id, 79 | assistant_id=assistant.id, 80 | instructions=assistant_instructions 81 | ) 82 | print(f"Run created with ID: {run.id} and status: {run.status}") 83 | 84 | # Step 5: Display the Assistant's Response 85 | # Poll the Run status until it's completed 86 | while True: 87 | run = client.beta.threads.runs.retrieve( 88 | thread_id=thread.id, 89 | run_id=run.id 90 | ) 91 | if run.status == 'completed': 92 | print("Run completed. Retrieving the Assistant's responses...") 93 | break 94 | print("Waiting for the Assistant to complete the run...") 95 | 96 | messages = client.beta.threads.messages.list( 97 | thread_id=thread.id 98 | ) 99 | with open("messages.json", "w") as f: 100 | messages_json = messages.model_dump() 101 | json.dump(messages_json, f, indent=4) 102 | print("Displaying the conversation:") 103 | for msg in messages.data: 104 | print(f"{msg.role.capitalize()}: {msg.content[0].text.value}") 105 | 106 | # save run steps to json file 107 | run_steps = client.beta.threads.runs.steps.list( 108 | thread_id=thread.id, 109 | run_id=run.id 110 | ) 111 | 112 | with open("run_steps.json" , "w") as f: 113 | run_steps_json = run_steps.model_dump() 114 | json.dump(run_steps_json,f, indent=4) 115 | 116 | 117 | 118 | if __name__ == "__main__": 119 | create_assistants() 120 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | openai==1.20.0 2 | requests 3 | pandas 4 | python-dotenv 5 | --------------------------------------------------------------------------------