├── .gitignore ├── Dockerfile ├── README.md ├── clients ├── __init__.py ├── langchain │ ├── get_nla_agent.py │ ├── get_openapi_agent.py │ └── get_openapi_chain.py └── tree_of_thoughts │ ├── AsyncGuidance.py │ ├── AsyncMonteCarlo.py │ ├── AsyncOpenAI.py │ └── __init__.py ├── docker-compose.yaml ├── docker-tests.yaml ├── docs └── api_documentation.md ├── logger.py ├── main.py ├── middlewares └── authentication.py ├── noteable_openapi.json ├── noteable_openapi.yaml ├── requirements.txt ├── routes ├── __init__.py ├── ai_utilities.py └── authentication.py ├── services ├── ai_services.py ├── api_agent.py ├── api_chain.py ├── ask_question.py ├── code_interpreter.py ├── nla_agent.py ├── tree_of_thoughts.py └── utils │ ├── __init__.py │ ├── handle_exception.py │ └── logger_utils.py ├── shared_dependencies.md └── tests ├── __init__.py └── test_ai_utilities.py /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | env 3 | __pycache__ 4 | .vscode 5 | .idea 6 | /.idea/ 7 | .pytest_cache 8 | /logs/ 9 | openai.logs -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | RUN pip install --no-cache-dir -r requirements.txt 7 | 8 | COPY . . 9 | 10 | CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI Services API 2 | 3 | ### ⚠️ Documentation in /docs is not up-to-date ⚠️ 4 | 5 | Example payload to **nla_agent** 6 | 7 | ```json 8 | { 9 | "input": "My noteable project id is 245eafa1-8f73-4c27-a72a-c46a2f713ccc . Use this csv file and turn it into a jupyter notebook https://fred.stlouisfed.org/...\nCreate a line plot using seaborn", 10 | "service":"nla_agent", 11 | "envs": { 12 | "OPENAI_API_KEY": "sk-suchandsuch", 13 | "PLUGIN_API_KEY": "plugin_api_key", 14 | "plugin_name": "noteable" 15 | } 16 | } 17 | ``` 18 | 19 | Make sure you pass the API key (`x-api-key`) in headers: 20 | 21 | ![image](https://github.com/danny-avila/ai-services/assets/110412045/79ffc7f7-1a16-495a-9fb5-0f9b01f6ac71) 22 | -------------------------------------------------------------------------------- /clients/__init__.py: -------------------------------------------------------------------------------- 1 | # empty init file 2 | from .langchain.get_openapi_chain import get_openapi_chain 3 | from .langchain.get_openapi_agent import get_openapi_agent 4 | from .langchain.get_nla_agent import get_nla_agent -------------------------------------------------------------------------------- /clients/langchain/get_nla_agent.py: -------------------------------------------------------------------------------- 1 | from langchain.chat_models import ChatOpenAI 2 | # from langchain.llms import OpenAI 3 | from langchain.agents import AgentType, initialize_agent 4 | from langchain.agents.agent_toolkits import NLAToolkit 5 | from langchain.requests import Requests 6 | from langchain.tools.plugin import AIPlugin 7 | 8 | AI_PLUGINS = { 9 | "ai_agents": "https://ai-agents-plugin.vercel.app/.well-known/ai-plugin.json", 10 | "penrose": "https://www.aperiodic.io/.well-known/ai-plugin.json" 11 | } 12 | 13 | def get_nla_agent(openai_api_key, model_name, plugin_name, plugin_api_key): 14 | llm=ChatOpenAI(openai_api_key=openai_api_key, model_name=model_name, temperature=0) 15 | # llm=OpenAI(openai_api_key=openai_api_key, model_name=model_name, temperature=0) 16 | plugin=AIPlugin.from_url(AI_PLUGINS[plugin_name]) 17 | # requests = Requests(headers={"Authorization": f"Bearer {plugin_api_key}"}) 18 | # toolkit = NLAToolkit.from_llm_and_ai_plugin(llm, plugin, requests=requests) 19 | toolkit = NLAToolkit.from_llm_and_ai_plugin(llm, plugin) 20 | 21 | # tools = toolkit.get_tools() 22 | # agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, 23 | # verbose=True) 24 | 25 | openapi_format_instructions = """Use the following format: 26 | 27 | Question: the input question you must answer 28 | Thought: you should always think about what to do 29 | Action: the action to take, should be one of [{tool_names}] 30 | Action Input: what to instruct the AI Action representative. 31 | Observation: The Agent's response 32 | ... (this Thought/Action/Action Input/Observation can repeat N times) 33 | Thought: I now know the final answer. User can't see any of my observations, API responses, links, or tools. 34 | Final Answer: the final answer to the original input question with the right amount of detail 35 | 36 | When responding with your Final Answer, remember that the person you are responding to CANNOT see any of your Thought/Action/Action Input/Observations, so if there is any relevant information there you need to include it explicitly in your response. 37 | """ 38 | 39 | tools = toolkit.get_tools() 40 | agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, 41 | verbose=True, agent_kwargs={"format_instructions":openapi_format_instructions}) 42 | return agent, tools -------------------------------------------------------------------------------- /clients/langchain/get_openapi_agent.py: -------------------------------------------------------------------------------- 1 | import json 2 | import yaml 3 | from langchain.agents import create_openapi_agent 4 | from langchain.agents.agent_toolkits import OpenAPIToolkit 5 | from langchain.llms.openai import OpenAI 6 | from langchain.requests import RequestsWrapper 7 | from langchain.tools.json.tool import JsonSpec 8 | from langchain.agents.agent_toolkits.openapi import planner 9 | from langchain.agents.agent_toolkits.openapi.spec import reduce_openapi_spec 10 | 11 | # def convert_json_to_yaml(json_file, yaml_file): 12 | # with open(json_file, 'r') as f: 13 | # data = json.load(f) 14 | 15 | # with open(yaml_file, 'w') as f: 16 | # yaml.dump(data, f, default_flow_style=False) 17 | 18 | def get_openapi_agent(api_key, model_name, plugin_name): 19 | # with open(f"{plugin_name}_openapi.json") as f: 20 | # data = json.load(f) 21 | # json_spec=JsonSpec(dict_=data) 22 | # convert_json_to_yaml(f"{plugin_name}_openapi.json", f"{plugin_name}_openapi.yaml") 23 | 24 | # with open(f"{plugin_name}_openapi.yaml") as f: 25 | # raw_api_spec = yaml.load(f, Loader=yaml.FullLoader) 26 | # json_spec=JsonSpec(dict_=data, max_value_length=4000) 27 | 28 | with open(f"{plugin_name}_openapi.yaml") as f: 29 | raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader) 30 | openai_api_spec = reduce_openapi_spec(raw_openai_api_spec) 31 | 32 | headers = { 33 | "Accept": "application/json" 34 | } 35 | 36 | # openai_requests_wrapper=RequestsWrapper(headers=headers) 37 | requests_wrapper=RequestsWrapper(headers=headers) 38 | llm=OpenAI(openai_api_key=api_key, model_name=model_name, temperature=0) 39 | 40 | # openapi_toolkit = OpenAPIToolkit.from_llm(OpenAI(openai_api_key=api_key, model_name=model_name, temperature=0), json_spec, openai_requests_wrapper, verbose=True) 41 | # openapi_agent_executor = planner.create_openapi_agent(api_spec, 42 | # llm=OpenAI(openai_api_key=api_key, model_name=model_name, temperature=0), 43 | # toolkit=openapi_toolkit, 44 | # verbose=True 45 | # ) 46 | 47 | 48 | # return openapi_agent_executor 49 | 50 | agent = planner.create_openapi_agent(openai_api_spec, requests_wrapper, llm) 51 | return agent -------------------------------------------------------------------------------- /clients/langchain/get_openapi_chain.py: -------------------------------------------------------------------------------- 1 | from langchain.tools import OpenAPISpec, APIOperation 2 | from langchain.chains import OpenAPIEndpointChain 3 | from langchain.requests import Requests 4 | from langchain.llms import OpenAI 5 | 6 | def get_openapi_chain(api_key, model_name): 7 | spec = OpenAPISpec.from_url( 8 | "https://www.klarna.com/us/shopping/public/openai/v0/api-docs/") 9 | operation = APIOperation.from_openapi_spec( 10 | spec, '/public/openai/v0/products', "get") 11 | llm = OpenAI(openai_api_key=api_key, model_name=model_name) # Load a Language Model 12 | 13 | chain = OpenAPIEndpointChain.from_api_operation( 14 | operation, 15 | llm, 16 | requests=Requests(), 17 | verbose=True, 18 | return_intermediate_steps=True, # Return request and response text 19 | # raw_response=True # Return raw response 20 | ) 21 | 22 | return chain 23 | -------------------------------------------------------------------------------- /clients/tree_of_thoughts/AsyncGuidance.py: -------------------------------------------------------------------------------- 1 | # clients\tree_of_thoughts\AsyncOpenAI.py 2 | import os 3 | import openai 4 | from tree_of_thoughts import AbstractLanguageModel 5 | import guidance 6 | import time 7 | import os 8 | 9 | 10 | class GuidanceLanguageModel(AbstractLanguageModel): 11 | def __init__(self, model, strategy="cot", evaluation_strategy="value", enable_ReAct_prompting=False): 12 | # gpt4 = guidance.llms.OpenAI("gpt-4") 13 | # vicuna = guidance.llms.transformers.Vicuna("your_path/vicuna_13B", device_map="auto") 14 | self.model = model 15 | 16 | # reference : https://www.promptingguide.ai/techniques/react 17 | self.ReAct_prompt = '' 18 | if enable_ReAct_prompting: 19 | self.ReAct_prompt = '''{{#assistant~}} 20 | {{gen 'Observation' temperature=0.5 max_tokens=50}} 21 | {{~/assistant}}''' 22 | 23 | self.strategy = strategy 24 | self.evaluation_strategy = evaluation_strategy 25 | 26 | self.thoughts_program = guidance(''' 27 | {{#system~}} 28 | You are a logical and rational assistant. 29 | {{~/system}} 30 | 31 | {{#user~}} 32 | Given the current state of reasoning: 33 | {{state_text}} 34 | Generate {{k}} coherent thoughts as short as possible to continue the reasoning process. 35 | Don't answer the question yet. 36 | {{~/user}} 37 | 38 | %s 39 | 40 | {{#assistant~}} 41 | {{gen 'Thoughts' temperature=0.5 max_tokens=50}} 42 | {{~/assistant}} 43 | ''' % self.ReAct_prompt, llm=self.model) 44 | 45 | self.value_program = guidance(''' 46 | {{#system~}} 47 | You are a logical and rational assistant. 48 | {{~/system}} 49 | 50 | {{#user~}} 51 | Given the current state of reasoning: 52 | {{state_text}} 53 | Evaluate its value as a float between 0 and 1, and NOTHING ELSE 54 | Don't answer the question yet. 55 | {{~/user}} 56 | 57 | {{#assistant~}} 58 | {{gen 'Value' temperature=1 max_tokens=10}} 59 | {{~/assistant}} 60 | ''', llm=self.model) 61 | 62 | self.vote_program = guidance(''' 63 | {{#system~}} 64 | You are a logical and rational assistant. 65 | {{~/system}} 66 | 67 | {{#user~}} 68 | Given the following states of reasoning, vote for the best state: 69 | {{states_text}} 70 | Give the index of your voted best state(the 1st state has index 0), and NOTHING ELSE 71 | Don't answer the question yet. 72 | {{~/user}} 73 | 74 | {{#assistant~}} 75 | {{gen 'Vote' temperature=1 max_tokens=10}} 76 | {{~/assistant}} 77 | ''', llm=self.model) 78 | 79 | def stream_message(self, message): 80 | if self.stream_handler: 81 | self.stream_handler(message) 82 | 83 | async def model_response_handler(self, program, **kargs): 84 | print("Calling guidance model(Modify Me to handle specific LLM response excpetions!)") 85 | reponse = program(**kargs) 86 | return reponse 87 | 88 | async def generate_thoughts(self, state, k): 89 | #implement the thought generation logic using self.model 90 | state_text = ' '.join(state) 91 | 92 | thoughts = [] 93 | for _ in range(k): 94 | response = await self.model_response_handler(self.thoughts_program, state_text=state_text, k=1) 95 | text = response['Thoughts'] 96 | thoughts += [text] 97 | # print(thoughts) 98 | print(f"Generated thoughts: {thoughts}") 99 | return thoughts 100 | 101 | async def evaluate_states(self, states): 102 | #implement state evaluation logic using self.model 103 | if self.evaluation_strategy == 'value': 104 | state_values = {} 105 | for state in states: 106 | state_text = ' '.join(state) 107 | response = await self.model_response_handler(self.value_program, state_text=state_text) 108 | try: 109 | value_text = response['Value'] 110 | # print(f"Value text {value_text}") 111 | self.stream_message(f"Value text {value_text}") 112 | value = float(value_text) 113 | # print(f"value: {value}") 114 | self.stream_message(f"value: {value}") 115 | except ValueError: 116 | # print(f"Value text {value_text} cannot be converted to float") 117 | self.stream_message(f"Value text {value_text} cannot be converted to float") 118 | value = 0 # Assign a default value if the conversion fails 119 | state_values[state] = value 120 | return state_values 121 | 122 | elif self.evaluation_strategy == 'vote': 123 | states_text = '\n'.join([' '.join(state) for state in states]) 124 | response = await self.model_response_handler(self.vote_program, states_text=states_text) 125 | best_state_text = response['Vote'] 126 | # print(f"Best state text: {best_state_text}") 127 | self.stream_message(f"Best state text: {best_state_text}") 128 | best_state = int(best_state_text) 129 | return {state: 1 if i == best_state else 0 for i in range(len(states))} 130 | 131 | else: 132 | raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.") 133 | 134 | 135 | 136 | class GuidanceOpenAILanguageModel(GuidanceLanguageModel): 137 | def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=False, stream_handler=None): 138 | self.stream_handler = stream_handler 139 | if api_key == "" or api_key == None: 140 | api_key = os.environ.get("OPENAI_API_KEY", "") 141 | if api_key != "": 142 | openai.api_key = api_key 143 | else: 144 | raise Exception("Please provide OpenAI API key") 145 | 146 | if api_base == ""or api_base == None: 147 | api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1" 148 | if api_base != "": 149 | # e.g. https://api.openai.com/v1/ or your custom url 150 | openai.api_base = api_base 151 | print(f'Using custom api_base {api_base}') 152 | 153 | if api_model == "" or api_model == None: 154 | api_model = os.environ.get("OPENAI_API_MODEL", "") 155 | if api_model != "": 156 | self.api_model = api_model 157 | else: 158 | self.api_model = "text-davinci-003" 159 | print(f'Using api_model {self.api_model}') 160 | 161 | super().__init__(guidance.llms.OpenAI(self.api_model), strategy, evaluation_strategy, enable_ReAct_prompting) 162 | 163 | 164 | async def model_response_handler(self, program, **kargs): 165 | error_msg = '' 166 | while True: 167 | try: 168 | program.llm.max_retries = 60 169 | guidance.llms.OpenAI.cache.clear() 170 | response = program(**kargs) 171 | return response 172 | except openai.error.RateLimitError as e: 173 | sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30) 174 | print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT') 175 | time.sleep(sleep_duratoin) 176 | except Exception as e: 177 | if str(e) == f'''Too many (more than {guidance.llm.max_retries}) OpenAI API RateLimitError's in a row!''': 178 | sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30) 179 | print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT') 180 | time.sleep(sleep_duratoin) 181 | else: 182 | error_msg = str(e) 183 | break 184 | raise Exception(error_msg) -------------------------------------------------------------------------------- /clients/tree_of_thoughts/AsyncMonteCarlo.py: -------------------------------------------------------------------------------- 1 | # clients\tree_of_thoughts\AsyncMonteCarlo.py 2 | from typing import Any, Dict, Union 3 | import os 4 | import asyncio 5 | import json 6 | import logging 7 | import numpy as np 8 | logging.basicConfig(level=logging.INFO, 9 | format='%(asctime)s - %(levelname)s - %(message)s') 10 | logger = logging.getLogger(__name__) 11 | 12 | class TreeofThoughts: 13 | def __init__(self, model): 14 | self.model = model 15 | self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = { 16 | "nodes": {}, 17 | } 18 | self.best_state = None 19 | self.best_value = float("-inf") 20 | self.history = [] # added line initalize history 21 | 22 | def save_tree_to_json(self, file_name): 23 | self.model.stream_message(json.dumps(self.tree, indent=4)) 24 | os.makedirs(os.path.dirname(file_name), exist_ok=True) 25 | with open(file_name, 'w') as json_file: 26 | json.dump(self.tree, json_file, indent=4) 27 | 28 | def logNewState(self, state, evaluation): 29 | if not (type(state) == str): 30 | state = " | ".join(state) 31 | if state in self.tree['nodes']: 32 | self.tree['nodes'][state]['thoughts'].append(evaluation) 33 | else: 34 | self.tree['nodes'][state] = {'thoughts': [evaluation]} 35 | 36 | def adjust_pruning_threshold_precentile(self, evaluated_thoughts, percentile): 37 | values = np.array(list(evaluated_thoughts.values())) 38 | if values.size == 0: 39 | return 0 40 | return max(np.percentile(values, percentile), 0.1) 41 | 42 | def adjust_pruning_threshold_moving_average(self, evaluated_thoughts, window_size): 43 | values = list(evaluated_thoughts.values()) 44 | if len(values) < window_size: 45 | return np.mean(values) if values else 0 46 | else: 47 | return max(np.mean(values[-window_size:]), 0.1) 48 | 49 | class AsyncMonteCarloTreeofThoughts(TreeofThoughts): 50 | def __init__(self, model, objective="balance", stream_handler=None): 51 | super().__init__(model) 52 | self.stream_handler = stream_handler 53 | self.objective = objective 54 | self.solution_found = False 55 | self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = { 56 | "nodes": {}, 57 | "metrics": {"thoughts": {}, "evaluations": {}}, 58 | } 59 | 60 | def optimize_params(self, num_thoughts, max_steps, max_states): 61 | if self.objective == 'speed': 62 | num_thoughts = max(1, num_thoughts - 1) 63 | max_steps = max(1, max_steps - 1) 64 | max_states = max(1, max_states - 1) 65 | elif self.objective == 'reliability': 66 | num_thoughts += 1 67 | max_steps += 1 68 | max_states += 1 69 | elif self.objective == 'balanace': 70 | if self.solution_found: 71 | num_thoughts = max(1, num_thoughts - 1) 72 | max_steps = max(1, max_steps - 1) 73 | max_states = max(1, max_states - 1) 74 | else: 75 | num_thoughts += 1 76 | max_steps += 1 77 | max_states += 1 78 | 79 | return num_thoughts, max_steps, max_states 80 | 81 | async def solve(self, 82 | initial_prompt: str, 83 | num_thoughts: int, 84 | max_steps: int, 85 | max_states: int, 86 | pruning_threshold: float, 87 | # sleep_time: float, 88 | ): 89 | self.file_name = f"logs/tree_of_thoughts_output_montecarlo.json" 90 | return await self.monte_carlo_search( 91 | initial_prompt, 92 | num_thoughts, 93 | max_steps, 94 | max_states, 95 | pruning_threshold, 96 | # sleep_time, 97 | ) 98 | # v3 99 | 100 | async def monte_carlo_search(self, 101 | initial_prompt: str, 102 | num_thoughts: int, 103 | max_steps: int, 104 | max_states: int, 105 | pruning_threshold: float, 106 | ): 107 | current_states = [initial_prompt] 108 | state_values = {} 109 | visit_counts = {initial_prompt: 0} 110 | transposition_table = {} 111 | 112 | best_state = None 113 | best_value = float('-inf') 114 | 115 | for step in range(1, max_steps + 1): 116 | selected_states = [] 117 | 118 | for state in current_states: 119 | if state in transposition_table: 120 | state_value = transposition_table[state] 121 | else: 122 | await asyncio.sleep(1) 123 | thoughts = await self.model.generate_thoughts(state, num_thoughts, initial_prompt) 124 | await asyncio.sleep(1) 125 | evaluated_thoughts = await self.model.evaluate_states(thoughts, initial_prompt) 126 | 127 | for thought, value in evaluated_thoughts.items(): 128 | flattened_state = (state, thought) if isinstance( 129 | state, str) else (*state, thought) 130 | transposition_table[flattened_state] = value 131 | 132 | for thought, value in evaluated_thoughts.items(): 133 | flattened_state = (state, thought) if isinstance( 134 | state, str) else (*state, thought) 135 | 136 | self.logNewState(flattened_state, value) 137 | 138 | if flattened_state not in visit_counts: 139 | visit_counts[flattened_state] = 0 140 | 141 | if visit_counts[state] > visit_counts[flattened_state] and visit_counts[flattened_state] > 0: 142 | ucb1_value = value + \ 143 | np.sqrt( 144 | 2 * np.log(visit_counts[state]) / visit_counts[flattened_state]) 145 | 146 | if ucb1_value >= pruning_threshold: 147 | selected_states.append(flattened_state) 148 | state_values[flattened_state] = value 149 | 150 | # Update the best state if the current state value is greater than the best value 151 | if value > best_value: 152 | best_state = flattened_state 153 | best_value = value 154 | 155 | visit_counts[state] += 1 156 | 157 | if len(selected_states) > max_states: 158 | current_states = selected_states[:max_states] 159 | self.save_tree_to_json(self.file_name) 160 | 161 | solution = await self.model.generate_solution(initial_prompt, best_state) 162 | return solution if solution else best_state 163 | -------------------------------------------------------------------------------- /clients/tree_of_thoughts/AsyncOpenAI.py: -------------------------------------------------------------------------------- 1 | # clients\tree_of_thoughts\AsyncOpenAI.py 2 | import os 3 | import re 4 | import openai 5 | import logging 6 | import asyncio 7 | from tree_of_thoughts import AbstractLanguageModel 8 | 9 | logging.basicConfig(level=logging.INFO, 10 | format='%(asctime)s - %(levelname)s - %(message)s') 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class AsyncOpenAILanguageModel(AbstractLanguageModel): 15 | def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_base="", api_model="", enable_ReAct_prompting=True, stream_handler=None): 16 | self.session = openai.aiosession.get() 17 | self.stream_handler = stream_handler 18 | if api_key == "" or api_key == None: 19 | api_key = os.environ.get("OPENAI_API_KEY", "") 20 | if api_key != "": 21 | openai.api_key = api_key 22 | else: 23 | raise Exception("Please provide OpenAI API key") 24 | 25 | if api_base == "" or api_base == None: 26 | # if not set, use the default base path of "https://api.openai.com/v1" 27 | api_base = os.environ.get("OPENAI_API_BASE", "") 28 | if api_base != "": 29 | # e.g. https://api.openai.com/v1/ or your custom url 30 | openai.api_base = api_base 31 | print(f'Using custom api_base {api_base}') 32 | 33 | if api_model == "" or api_model == None: 34 | api_model = os.environ.get("OPENAI_API_MODEL", "") 35 | if api_model != "": 36 | self.api_model = api_model 37 | else: 38 | self.api_model = "text-davinci-003" 39 | print(f'Using api_model {self.api_model}') 40 | 41 | self.use_chat_api = 'gpt' in self.api_model 42 | 43 | # reference : https://www.promptingguide.ai/techniques/react 44 | self.ReAct_prompt = '' 45 | if enable_ReAct_prompting: 46 | self.ReAct_prompt = "Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx'." 47 | 48 | self.strategy = strategy 49 | self.evaluation_strategy = evaluation_strategy 50 | 51 | def stream_message(self, message): 52 | if self.stream_handler: 53 | self.stream_handler(message) 54 | 55 | async def openai_api_call_handler(self, prompt, max_tokens, temperature, k=1, stop=None): 56 | while True: 57 | try: 58 | if self.use_chat_api: 59 | messages = [ 60 | { 61 | "role": "user", 62 | "content": prompt 63 | } 64 | ] 65 | response = await openai.ChatCompletion.acreate( 66 | model=self.api_model, 67 | messages=messages, 68 | max_tokens=max_tokens, 69 | temperature=temperature, 70 | ) 71 | else: 72 | response = await openai.Completion.acreate( 73 | engine=self.api_model, 74 | prompt=prompt, 75 | n=k, 76 | max_tokens=max_tokens, 77 | stop=stop, 78 | temperature=temperature, 79 | ) 80 | # with open("openai.logs", 'a') as log_file: 81 | # log_file.write("\n" + "-----------" + 82 | # '\n' + "Prompt : " + prompt+"\n") 83 | 84 | self.stream_message(f"\n-----------\nPrompt: {prompt}") 85 | return response 86 | except openai.error.RateLimitError as e: 87 | sleep_duration = os.environ.get("OPENAI_RATE_TIMEOUT", 30) 88 | print( 89 | f'{str(e)}, sleep for {sleep_duration}s, set it by env OPENAI_RATE_TIMEOUT') 90 | await asyncio.sleep(sleep_duration) 91 | 92 | def openai_choice2text_handler(self, choice): 93 | if self.use_chat_api: 94 | text = choice['message']['content'] 95 | else: 96 | text = choice.text.strip() 97 | return text 98 | 99 | async def generate_text(self, prompt, k): 100 | if self.use_chat_api: 101 | thoughts = [] 102 | for _ in range(k): 103 | response = await self.openai_api_call_handler(prompt, 400, 0.5, k) 104 | text = self.openai_choice2text_handler(response.choices[0]) 105 | thoughts += [text] 106 | self.stream_message(f"\n-----------\nThoughts: {thoughts}") 107 | # print(f'thoughts: {thoughts}') 108 | return thoughts 109 | 110 | else: 111 | response = await self.openai_api_call_handler(prompt, 300, 0.5, k) 112 | thoughts = [self.openai_choice2text_handler( 113 | choice) for choice in response.choices] 114 | return thoughts 115 | 116 | async def generate_thoughts(self, state, k, initial_prompt, rejected_solutions=None): 117 | if (type(state) == str): 118 | state_text = state 119 | else: 120 | state_text = '\n'.join(state) 121 | print("New state generating thought:", state, "\n\n") 122 | # prompt = f"Considering the thoughts you've had until now:\n\n{state_text}\n\nDevise the next coherent thought that will aid in advancing the reasoning process and achieving a solution to {initial_prompt}. Assess various scenarios, think unconventionally, anticipate potential challenges, and resolve any outstanding queries. Tap into your mind's full potential and make certain no open questions remain." 123 | prompt = f"""You are TreeofThoughts, a super-intelligent AI model devoted to helping Humans by any means necessary. Your purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them. 124 | Considering the reasoning provided:\n\n 125 | ###'{state_text}'\n\n### 126 | Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: 127 | ###{rejected_solutions}###, 128 | complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.""" 129 | 130 | prompt += self.ReAct_prompt 131 | # print(prompt) 132 | thoughts = await self.generate_text(prompt, k) 133 | # print(thoughts) 134 | # print(f"Generated thoughts: {thoughts}") 135 | return thoughts 136 | 137 | async def generate_solution(self, initial_prompt, state, rejected_solutions=None): 138 | try: 139 | 140 | if isinstance(state, list): 141 | state_text = '\n'.join(state) 142 | else: 143 | state_text = state 144 | 145 | prompt = f"""You are TreeofThoughts, a super-intelligent AI model devoted to helping Humans by any means necessary. Your purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them. 146 | Considering the reasoning provided:\n\n 147 | ###'{state_text}'\n\n### 148 | Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: 149 | ###{rejected_solutions}###, 150 | complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.""" 151 | answer = await self.generate_text(prompt, 1) 152 | print(f'Answer: {answer}') 153 | # print(thoughts) 154 | # print(f"General Solution : {answer}") 155 | return answer 156 | except Exception as e: 157 | logger.error(f"Error in generate_solutions: {e}") 158 | return None 159 | 160 | async def evaluate_states(self, states, initial_prompt): 161 | if not states: 162 | return {} 163 | 164 | if self.evaluation_strategy == 'value': 165 | state_values = {} 166 | for state in states: 167 | if (type(state) == str): 168 | state_text = state 169 | else: 170 | state_text = '\n'.join(state) 171 | print("We receive a state of type", type( 172 | state), "For state: ", state, "\n\n") 173 | prompt = f""" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\n 174 | Past solutions:\n\n 175 | {state_text}\n 176 | If the solutions is not directly concretely making fast progress in achieving the goal, give it a lower score. 177 | Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\nDO NOT RETURN ANYTHING ELSE 178 | """ 179 | 180 | response = await self.openai_api_call_handler(prompt, 50, 1) 181 | try: 182 | value_text = self.openai_choice2text_handler( 183 | response.choices[0]) 184 | # print(f'state: {value_text}') 185 | self.stream_message(f"\n-----------\nValue Text: {value_text}") 186 | # value = float(value_text) 187 | matches = re.findall(r'[-+]?[0-9]*\.[0-9]+', value_text) 188 | if matches: 189 | value = float(matches[-1]) # Get the last match 190 | print(f"Evaluated Thought Value: {value}") 191 | self.stream_message(f"\n-----------\nEvaluated Thought Value: {value}") 192 | else: 193 | raise ValueError("No float found in value text") 194 | print(f"Evaluated Thought Value: {value}") 195 | self.stream_message(f"\n-----------\nEvaluated Thought Value: {value}") 196 | except ValueError: 197 | self.stream_message(f"\n-----------\nValueError, defaulting to 0") 198 | value = 0 # Assign a default value if the conversion fails 199 | state_values[state] = value 200 | return state_values 201 | 202 | elif self.evaluation_strategy == 'vote': 203 | states_text = '\n'.join([' '.join(state) for state in states]) 204 | 205 | prompt = f"Given the following states of reasoning, vote for the best state utilizing an scalar value 1-10:\n{states_text}\n\nVote, on the probability of this state of reasoning achieveing {initial_prompt} and become very pessimistic very NOTHING ELSE" 206 | 207 | response = await self.openai_api_call_handler(prompt, 100, 1) 208 | 209 | print(f'state response: {response}') 210 | 211 | best_state_text = self.openai_choice2text_handler( 212 | response.choices[0]) 213 | 214 | print(f"Best state text: {best_state_text}") 215 | 216 | best_state = tuple(best_state_text.split()) 217 | 218 | print(f'best_state: {best_state}') 219 | 220 | return {state: 1 if state == best_state else 0 for state in states} 221 | 222 | else: 223 | raise ValueError( 224 | "Invalid evaluation strategy. Choose 'value' or 'vote'.") 225 | -------------------------------------------------------------------------------- /clients/tree_of_thoughts/__init__.py: -------------------------------------------------------------------------------- 1 | from .AsyncMonteCarlo import AsyncMonteCarloTreeofThoughts 2 | from .AsyncOpenAI import AsyncOpenAILanguageModel 3 | from .AsyncGuidance import GuidanceOpenAILanguageModel -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | fastapi: 4 | build: . 5 | ports: 6 | - "8080:8080" 7 | volumes: 8 | - .:/app 9 | command: ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080", "--reload", "--log-level", "debug", "--loop", "asyncio"] -------------------------------------------------------------------------------- /docker-tests.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | fastapi: 4 | build: . 5 | ports: 6 | - "8080:8080" 7 | volumes: 8 | - .:/app 9 | command: [ "pytest" ] -------------------------------------------------------------------------------- /docs/api_documentation.md: -------------------------------------------------------------------------------- 1 | # AI Utility API Documentation 2 | 3 | This API provides a RESTful interface to various AI utility libraries and securely manages user credentials for accessing these services. 4 | 5 | ## Endpoints 6 | 7 | ### POST /ask 8 | 9 | This endpoint accepts a question and returns an answer using the specified AI service. 10 | 11 | **URL**: `/ask` 12 | 13 | **Method**: `POST` 14 | 15 | **Content-Type**: `application/json` 16 | 17 | **Request Payload**: 18 | 19 | ```json 20 | { 21 | "service": "q&a", 22 | "input": "How many people live in Canada as of 2023?", 23 | "envs": { 24 | "OPENAI_API_KEY": "your_openai_api_key" 25 | } 26 | } 27 | ``` 28 | 29 | **Success Response**: 30 | 31 | - **Code**: `200 OK` 32 | - **Content**: 33 | 34 | ```json 35 | { 36 | "result": "Arrr, there be 38,645,670 people livin' in Canada as of 2023!", 37 | "error": "", 38 | "stdout": "Answer the following questions as best you can, but speaking as a pirate might speak...Final Answer: Arrr, there be 38,645,670 people livin' in Canada as of 2023!" 39 | } 40 | ``` 41 | 42 | **Error Response**: 43 | 44 | - **Code**: `400 Bad Request` 45 | - **Content**: 46 | 47 | ```json 48 | { 49 | "detail": "Invalid request payload." 50 | } 51 | ``` 52 | 53 | ### POST /sentiment_analysis 54 | 55 | This endpoint accepts text data and returns the sentiment score using the specified AI service. 56 | 57 | **URL**: `/sentiment_analysis` 58 | 59 | **Method**: `POST` 60 | 61 | **Content-Type**: `application/json` 62 | 63 | **Request Payload**: 64 | 65 | ```json 66 | { 67 | "service": "sentiment_analysis", 68 | "input": "I love this product!", 69 | "envs": { 70 | "OPENAI_API_KEY": "your_openai_api_key" 71 | } 72 | } 73 | ``` 74 | 75 | **Success Response**: 76 | 77 | - **Code**: `200 OK` 78 | - **Content**: 79 | 80 | ```json 81 | { 82 | "result": "positive", 83 | "error": "", 84 | "stdout": "The sentiment analysis result is: positive" 85 | } 86 | ``` 87 | 88 | **Error Response**: 89 | 90 | - **Code**: `400 Bad Request` 91 | - **Content**: 92 | 93 | ```json 94 | { 95 | "detail": "Invalid request payload." 96 | } 97 | ``` 98 | 99 | ## Authentication 100 | 101 | This API uses a secret-based token for authentication. Include the token in the `Authorization` header of your requests. 102 | 103 | **Example**: 104 | 105 | ``` 106 | Authorization: Bearer your_secret_token 107 | ``` 108 | 109 | ## Errors 110 | 111 | In case of errors, the API will return an appropriate HTTP status code along with a JSON object containing the error message. 112 | 113 | **Example**: 114 | 115 | ```json 116 | { 117 | "detail": "Invalid request payload." 118 | } 119 | ``` -------------------------------------------------------------------------------- /logger.py: -------------------------------------------------------------------------------- 1 | # logger.py 2 | import logging 3 | 4 | # Create a custom logger 5 | logger = logging.getLogger(__name__) 6 | 7 | # Configure the logger 8 | c_handler = logging.StreamHandler() 9 | c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s') 10 | c_handler.setFormatter(c_format) 11 | logger.addHandler(c_handler) 12 | 13 | # Set level of logger 14 | logger.setLevel(logging.DEBUG) 15 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # main.py 2 | import openai 3 | from logger import logger 4 | from fastapi import FastAPI 5 | from contextlib import asynccontextmanager 6 | from fastapi.middleware.cors import CORSMiddleware 7 | from routes.ai_utilities import ai_utilities_router 8 | from middlewares.authentication import AuthenticationMiddleware 9 | 10 | @asynccontextmanager 11 | async def lifespan(app: FastAPI): 12 | yield 13 | logger.info("Shutting down...") 14 | logger.info("Closing OpenAI AIO session...") 15 | await openai.aiosession.get().close() 16 | 17 | app = FastAPI(lifespan=lifespan) 18 | 19 | app.include_router(ai_utilities_router, prefix="/ai") 20 | 21 | app.add_middleware(AuthenticationMiddleware) 22 | app.add_middleware( 23 | CORSMiddleware, 24 | allow_origins=["*"], 25 | allow_credentials=True, 26 | allow_methods=["*"], 27 | allow_headers=["*"], 28 | ) 29 | 30 | if __name__ == "__main__": 31 | import uvicorn 32 | uvicorn.run("main:app", host="0.0.0.0", port=8080, log_level="info") -------------------------------------------------------------------------------- /middlewares/authentication.py: -------------------------------------------------------------------------------- 1 | # middlewares\authentication.py 2 | from fastapi import Request, HTTPException 3 | from starlette.middleware.base import BaseHTTPMiddleware 4 | 5 | SECRET_KEY = "your-secret-key" 6 | 7 | async def authenticate(request: Request): 8 | if "x-api-key" in request.headers and request.headers["x-api-key"] == SECRET_KEY: 9 | return True 10 | else: 11 | raise HTTPException(status_code=401, detail="Invalid API key") 12 | 13 | class AuthenticationMiddleware(BaseHTTPMiddleware): 14 | async def dispatch(self, request: Request, call_next): 15 | if not await authenticate(request): 16 | raise HTTPException(status_code=401, detail="Unauthorized") 17 | 18 | response = await call_next(request) 19 | return response -------------------------------------------------------------------------------- /noteable_openapi.json: -------------------------------------------------------------------------------- 1 | { 2 | "openapi": "3.0.2", 3 | "info": { 4 | "title": "Origamist", 5 | "description": "ChatGPT Plugin server for creating computational notebooks (in Python!), allowing you to execute code, explore data, and visualize results.", 6 | "version": "0.25.1" 7 | }, 8 | "servers": [ 9 | { 10 | "url": "https://chat.noteable.io", 11 | "description": "Origamist server on https://chat.noteable.io" 12 | } 13 | ], 14 | "paths": { 15 | "/api/origami/s/default": { 16 | "get": { 17 | "summary": "Get Default Space", 18 | "description": "Get the user's configured default Space for creating new Projects in.", 19 | "operationId": "get_default_space", 20 | "responses": { 21 | "200": { 22 | "description": "Successful Response", 23 | "content": { 24 | "application/json": { 25 | "schema": {} 26 | } 27 | } 28 | } 29 | }, 30 | "security": [ 31 | { 32 | "HTTPBearer": [] 33 | } 34 | ] 35 | } 36 | }, 37 | "/api/origami/p/default": { 38 | "get": { 39 | "summary": "Get Default Project", 40 | "description": "Get the user's configured default project.", 41 | "operationId": "get_default_project", 42 | "responses": { 43 | "200": { 44 | "description": "Successful Response", 45 | "content": { 46 | "application/json": { 47 | "schema": {} 48 | } 49 | } 50 | } 51 | }, 52 | "security": [ 53 | { 54 | "HTTPBearer": [] 55 | } 56 | ] 57 | }, 58 | "put": { 59 | "summary": "Set Default Project", 60 | "description": "Configure the user's default project (by UUID) for new notebooks.", 61 | "operationId": "set_default_project", 62 | "parameters": [ 63 | { 64 | "required": true, 65 | "schema": { 66 | "title": "New Default Project Id", 67 | "type": "string", 68 | "format": "uuid" 69 | }, 70 | "name": "new_default_project_id", 71 | "in": "query" 72 | } 73 | ], 74 | "responses": { 75 | "200": { 76 | "description": "Successful Response", 77 | "content": { 78 | "application/json": { 79 | "schema": { 80 | "title": "Response Set Default Project", 81 | "type": "object" 82 | } 83 | } 84 | } 85 | }, 86 | "422": { 87 | "description": "Validation Error", 88 | "content": { 89 | "application/json": { 90 | "schema": { 91 | "$ref": "#/components/schemas/HTTPValidationError" 92 | } 93 | } 94 | } 95 | } 96 | }, 97 | "security": [ 98 | { 99 | "HTTPBearer": [] 100 | } 101 | ] 102 | }, 103 | "delete": { 104 | "summary": "Delete Default Project", 105 | "description": "Clear the user's default project.", 106 | "operationId": "clear_default_project", 107 | "responses": { 108 | "200": { 109 | "description": "Successful Response", 110 | "content": { 111 | "application/json": { 112 | "schema": { 113 | "title": "Response Clear Default Project", 114 | "type": "object" 115 | } 116 | } 117 | } 118 | } 119 | }, 120 | "security": [ 121 | { 122 | "HTTPBearer": [] 123 | } 124 | ] 125 | } 126 | }, 127 | "/api/origami/p/": { 128 | "post": { 129 | "summary": "Create New Project", 130 | "description": "Create a new Project for the user in their default Space. Projects can be optionally created\nfrom a git repository, in which case users can sync changes from the Noteable UI. Private repos\ncan be created from the Noteable UI but", 131 | "operationId": "create_project", 132 | "requestBody": { 133 | "content": { 134 | "application/json": { 135 | "schema": { 136 | "$ref": "#/components/schemas/CreateProjectOptions" 137 | } 138 | } 139 | }, 140 | "required": true 141 | }, 142 | "responses": { 143 | "200": { 144 | "description": "Successful Response", 145 | "content": { 146 | "application/json": { 147 | "schema": {} 148 | } 149 | } 150 | }, 151 | "422": { 152 | "description": "Validation Error", 153 | "content": { 154 | "application/json": { 155 | "schema": { 156 | "$ref": "#/components/schemas/HTTPValidationError" 157 | } 158 | } 159 | } 160 | } 161 | }, 162 | "security": [ 163 | { 164 | "HTTPBearer": [] 165 | } 166 | ] 167 | } 168 | }, 169 | "/api/origami/p/{project_id}/files": { 170 | "get": { 171 | "summary": "Get Project Files", 172 | "description": "Get a list of files in the project.", 173 | "operationId": "get_project_files", 174 | "parameters": [ 175 | { 176 | "required": true, 177 | "schema": { 178 | "title": "Project Id", 179 | "type": "string", 180 | "format": "uuid" 181 | }, 182 | "name": "project_id", 183 | "in": "path" 184 | }, 185 | { 186 | "required": false, 187 | "schema": { 188 | "title": "Filename Contains", 189 | "type": "string", 190 | "default": "" 191 | }, 192 | "name": "filename_contains", 193 | "in": "query" 194 | }, 195 | { 196 | "required": false, 197 | "schema": { 198 | "title": "File Limit", 199 | "type": "integer", 200 | "default": 20 201 | }, 202 | "name": "file_limit", 203 | "in": "query" 204 | }, 205 | { 206 | "required": false, 207 | "schema": { 208 | "title": "Sort By", 209 | "type": "string", 210 | "default": "updated_at" 211 | }, 212 | "name": "sort_by", 213 | "in": "query" 214 | }, 215 | { 216 | "required": false, 217 | "schema": { 218 | "title": "Sort Order", 219 | "enum": [ 220 | "ascending", 221 | "descending" 222 | ], 223 | "type": "string", 224 | "default": "descending" 225 | }, 226 | "name": "sort_order", 227 | "in": "query" 228 | } 229 | ], 230 | "responses": { 231 | "200": { 232 | "description": "Successful Response", 233 | "content": { 234 | "application/json": { 235 | "schema": {} 236 | } 237 | } 238 | }, 239 | "422": { 240 | "description": "Validation Error", 241 | "content": { 242 | "application/json": { 243 | "schema": { 244 | "$ref": "#/components/schemas/HTTPValidationError" 245 | } 246 | } 247 | } 248 | } 249 | }, 250 | "security": [ 251 | { 252 | "HTTPBearer": [] 253 | } 254 | ] 255 | } 256 | }, 257 | "/api/origami/f/": { 258 | "post": { 259 | "summary": "Create Notebook", 260 | "description": "Creates a new notebook. If no project ID is provided, the user's default project will be used.", 261 | "operationId": "create_notebook", 262 | "requestBody": { 263 | "content": { 264 | "application/json": { 265 | "schema": { 266 | "$ref": "#/components/schemas/CreateNotebookRequest" 267 | } 268 | } 269 | }, 270 | "required": true 271 | }, 272 | "responses": { 273 | "200": { 274 | "description": "Successful Response", 275 | "content": { 276 | "application/json": { 277 | "schema": {} 278 | } 279 | } 280 | }, 281 | "422": { 282 | "description": "Validation Error", 283 | "content": { 284 | "application/json": { 285 | "schema": { 286 | "$ref": "#/components/schemas/HTTPValidationError" 287 | } 288 | } 289 | } 290 | } 291 | }, 292 | "security": [ 293 | { 294 | "HTTPBearer": [] 295 | } 296 | ] 297 | } 298 | }, 299 | "/api/origami/f/{file_id}": { 300 | "get": { 301 | "summary": "Get Notebook", 302 | "description": "Get a summary of a notebook. This includes the list of cell IDs that make up\nthe notebook document, the current kernel state, and the notebook's name.", 303 | "operationId": "get_notebook", 304 | "parameters": [ 305 | { 306 | "required": true, 307 | "schema": { 308 | "title": "File Id", 309 | "type": "string", 310 | "format": "uuid" 311 | }, 312 | "name": "file_id", 313 | "in": "path" 314 | } 315 | ], 316 | "responses": { 317 | "200": { 318 | "description": "Successful Response" 319 | }, 320 | "422": { 321 | "description": "Validation Error", 322 | "content": { 323 | "application/json": { 324 | "schema": { 325 | "$ref": "#/components/schemas/HTTPValidationError" 326 | } 327 | } 328 | } 329 | } 330 | }, 331 | "security": [ 332 | { 333 | "HTTPBearer": [] 334 | } 335 | ] 336 | } 337 | }, 338 | "/api/origami/f/{file_id}/datasources": { 339 | "get": { 340 | "summary": "Get Datasources", 341 | "description": "Get the databases for a notebook.", 342 | "operationId": "get_datasources", 343 | "parameters": [ 344 | { 345 | "required": true, 346 | "schema": { 347 | "title": "File Id", 348 | "type": "string", 349 | "format": "uuid" 350 | }, 351 | "name": "file_id", 352 | "in": "path" 353 | } 354 | ], 355 | "responses": { 356 | "200": { 357 | "description": "Successful Response", 358 | "content": { 359 | "application/json": { 360 | "schema": { 361 | "title": "Response Get Datasources", 362 | "type": "array", 363 | "items": { 364 | "$ref": "#/components/schemas/DataSource" 365 | } 366 | } 367 | } 368 | } 369 | }, 370 | "422": { 371 | "description": "Validation Error", 372 | "content": { 373 | "application/json": { 374 | "schema": { 375 | "$ref": "#/components/schemas/HTTPValidationError" 376 | } 377 | } 378 | } 379 | } 380 | }, 381 | "security": [ 382 | { 383 | "HTTPBearer": [] 384 | } 385 | ] 386 | } 387 | }, 388 | "/api/origami/f/{file_id}/run_multiple_cells": { 389 | "post": { 390 | "summary": "Run Multiple Cells", 391 | "description": "Execute multiple cells in a Notebook.", 392 | "operationId": "run_multiple_cells", 393 | "parameters": [ 394 | { 395 | "required": true, 396 | "schema": { 397 | "title": "File Id", 398 | "type": "string", 399 | "format": "uuid" 400 | }, 401 | "name": "file_id", 402 | "in": "path" 403 | } 404 | ], 405 | "requestBody": { 406 | "content": { 407 | "application/json": { 408 | "schema": { 409 | "$ref": "#/components/schemas/ExecuteCellsRequest" 410 | } 411 | } 412 | }, 413 | "required": true 414 | }, 415 | "responses": { 416 | "204": { 417 | "description": "Successful Response" 418 | }, 419 | "422": { 420 | "description": "Validation Error", 421 | "content": { 422 | "application/json": { 423 | "schema": { 424 | "$ref": "#/components/schemas/HTTPValidationError" 425 | } 426 | } 427 | } 428 | } 429 | }, 430 | "security": [ 431 | { 432 | "HTTPBearer": [] 433 | } 434 | ] 435 | } 436 | }, 437 | "/api/origami/f/{file_id}/kernel": { 438 | "delete": { 439 | "summary": "Shutdown Kernel From File", 440 | "description": "Shutdown the kernel for a notebook.", 441 | "operationId": "shutdown_kernel_from_file", 442 | "parameters": [ 443 | { 444 | "required": true, 445 | "schema": { 446 | "title": "File Id", 447 | "type": "string", 448 | "format": "uuid" 449 | }, 450 | "name": "file_id", 451 | "in": "path" 452 | } 453 | ], 454 | "responses": { 455 | "200": { 456 | "description": "Successful Response", 457 | "content": { 458 | "application/json": { 459 | "schema": {} 460 | } 461 | } 462 | }, 463 | "422": { 464 | "description": "Validation Error", 465 | "content": { 466 | "application/json": { 467 | "schema": { 468 | "$ref": "#/components/schemas/HTTPValidationError" 469 | } 470 | } 471 | } 472 | } 473 | }, 474 | "security": [ 475 | { 476 | "HTTPBearer": [] 477 | } 478 | ] 479 | } 480 | }, 481 | "/api/origami/f/{file_id}/c/{cell_id}": { 482 | "get": { 483 | "summary": "Get Cell", 484 | "description": "Return Cell model details", 485 | "operationId": "get_cell", 486 | "parameters": [ 487 | { 488 | "required": true, 489 | "schema": { 490 | "title": "File Id", 491 | "type": "string", 492 | "format": "uuid" 493 | }, 494 | "name": "file_id", 495 | "in": "path" 496 | }, 497 | { 498 | "required": true, 499 | "schema": { 500 | "title": "Cell Id", 501 | "type": "string" 502 | }, 503 | "name": "cell_id", 504 | "in": "path" 505 | } 506 | ], 507 | "responses": { 508 | "200": { 509 | "description": "Successful Response", 510 | "content": { 511 | "application/json": { 512 | "schema": { 513 | "title": "Response Get Cell", 514 | "type": "object" 515 | } 516 | } 517 | } 518 | }, 519 | "422": { 520 | "description": "Validation Error", 521 | "content": { 522 | "application/json": { 523 | "schema": { 524 | "$ref": "#/components/schemas/HTTPValidationError" 525 | } 526 | } 527 | } 528 | } 529 | }, 530 | "security": [ 531 | { 532 | "HTTPBearer": [] 533 | } 534 | ] 535 | }, 536 | "put": { 537 | "summary": "Update Cell", 538 | "description": "Replace the source code of a cell.", 539 | "operationId": "update_cell", 540 | "parameters": [ 541 | { 542 | "required": true, 543 | "schema": { 544 | "title": "File Id", 545 | "type": "string", 546 | "format": "uuid" 547 | }, 548 | "name": "file_id", 549 | "in": "path" 550 | }, 551 | { 552 | "required": true, 553 | "schema": { 554 | "title": "Cell Id", 555 | "type": "string" 556 | }, 557 | "name": "cell_id", 558 | "in": "path" 559 | } 560 | ], 561 | "requestBody": { 562 | "content": { 563 | "application/json": { 564 | "schema": { 565 | "$ref": "#/components/schemas/UpdateCellRequest" 566 | } 567 | } 568 | }, 569 | "required": true 570 | }, 571 | "responses": { 572 | "204": { 573 | "description": "Successful Response" 574 | }, 575 | "422": { 576 | "description": "Validation Error", 577 | "content": { 578 | "application/json": { 579 | "schema": { 580 | "$ref": "#/components/schemas/HTTPValidationError" 581 | } 582 | } 583 | } 584 | } 585 | }, 586 | "security": [ 587 | { 588 | "HTTPBearer": [] 589 | } 590 | ] 591 | }, 592 | "post": { 593 | "summary": "Change Cell Type", 594 | "description": "Endpoint to allow updating the type of a cell. Currently only supports changing\nbetween Code, Markdown, and SQL cells.", 595 | "operationId": "change_cell_type", 596 | "parameters": [ 597 | { 598 | "required": true, 599 | "schema": { 600 | "title": "File Id", 601 | "type": "string", 602 | "format": "uuid" 603 | }, 604 | "name": "file_id", 605 | "in": "path" 606 | }, 607 | { 608 | "required": true, 609 | "schema": { 610 | "title": "Cell Id", 611 | "type": "string" 612 | }, 613 | "name": "cell_id", 614 | "in": "path" 615 | } 616 | ], 617 | "requestBody": { 618 | "content": { 619 | "application/json": { 620 | "schema": { 621 | "$ref": "#/components/schemas/ChangeCellTypeRequest" 622 | } 623 | } 624 | }, 625 | "required": true 626 | }, 627 | "responses": { 628 | "204": { 629 | "description": "Successful Response" 630 | }, 631 | "422": { 632 | "description": "Validation Error", 633 | "content": { 634 | "application/json": { 635 | "schema": { 636 | "$ref": "#/components/schemas/HTTPValidationError" 637 | } 638 | } 639 | } 640 | } 641 | }, 642 | "security": [ 643 | { 644 | "HTTPBearer": [] 645 | } 646 | ] 647 | } 648 | }, 649 | "/api/origami/f/{file_id}/c": { 650 | "post": { 651 | "summary": "Create Cell", 652 | "description": "Create a code or markdown cell.", 653 | "operationId": "create_cell", 654 | "parameters": [ 655 | { 656 | "required": true, 657 | "schema": { 658 | "title": "File Id", 659 | "type": "string", 660 | "format": "uuid" 661 | }, 662 | "name": "file_id", 663 | "in": "path" 664 | } 665 | ], 666 | "requestBody": { 667 | "content": { 668 | "application/json": { 669 | "schema": { 670 | "$ref": "#/components/schemas/CreateCellRequest" 671 | } 672 | } 673 | }, 674 | "required": true 675 | }, 676 | "responses": { 677 | "200": { 678 | "description": "Successful Response", 679 | "content": { 680 | "application/json": { 681 | "schema": { 682 | "title": "Response Create Cell", 683 | "type": "object" 684 | } 685 | } 686 | } 687 | }, 688 | "422": { 689 | "description": "Validation Error", 690 | "content": { 691 | "application/json": { 692 | "schema": { 693 | "$ref": "#/components/schemas/HTTPValidationError" 694 | } 695 | } 696 | } 697 | } 698 | }, 699 | "security": [ 700 | { 701 | "HTTPBearer": [] 702 | } 703 | ] 704 | } 705 | }, 706 | "/api/origami/f/{file_id}/c/{cell_id}/run": { 707 | "post": { 708 | "summary": "Run Cell", 709 | "description": "Run a Cell within a Notebook by ID.", 710 | "operationId": "run_cell", 711 | "parameters": [ 712 | { 713 | "required": true, 714 | "schema": { 715 | "title": "File Id", 716 | "type": "string", 717 | "format": "uuid" 718 | }, 719 | "name": "file_id", 720 | "in": "path" 721 | }, 722 | { 723 | "required": true, 724 | "schema": { 725 | "title": "Cell Id", 726 | "type": "string" 727 | }, 728 | "name": "cell_id", 729 | "in": "path" 730 | } 731 | ], 732 | "responses": { 733 | "200": { 734 | "description": "Successful Response", 735 | "content": { 736 | "application/json": { 737 | "schema": {} 738 | } 739 | } 740 | }, 741 | "422": { 742 | "description": "Validation Error", 743 | "content": { 744 | "application/json": { 745 | "schema": { 746 | "$ref": "#/components/schemas/HTTPValidationError" 747 | } 748 | } 749 | } 750 | } 751 | }, 752 | "security": [ 753 | { 754 | "HTTPBearer": [] 755 | } 756 | ] 757 | } 758 | }, 759 | "/api/origami/u/me": { 760 | "get": { 761 | "summary": "Get User Info", 762 | "description": "Get details of the Plugin user's Noteable account information.\n - useful when debugging permissions issues", 763 | "operationId": "get_user_info", 764 | "responses": { 765 | "200": { 766 | "description": "Successful Response", 767 | "content": { 768 | "application/json": { 769 | "schema": {} 770 | } 771 | } 772 | } 773 | }, 774 | "security": [ 775 | { 776 | "HTTPBearer": [] 777 | } 778 | ] 779 | } 780 | }, 781 | "/api/origami/k/": { 782 | "get": { 783 | "summary": "Get Active Kernel Sessions", 784 | "description": "Returns a list of the user's active kernel sessions.", 785 | "operationId": "get_active_kernel_sessions", 786 | "responses": { 787 | "200": { 788 | "description": "Successful Response", 789 | "content": { 790 | "application/json": { 791 | "schema": {} 792 | } 793 | } 794 | } 795 | }, 796 | "security": [ 797 | { 798 | "HTTPBearer": [] 799 | } 800 | ] 801 | } 802 | }, 803 | "/api/origami/k/{kernel_session_id}": { 804 | "delete": { 805 | "summary": "Shutdown Kernel", 806 | "description": "Shutdown the kernel for a notebook.", 807 | "operationId": "shutdown_kernel", 808 | "parameters": [ 809 | { 810 | "required": true, 811 | "schema": { 812 | "title": "Kernel Session Id", 813 | "type": "string", 814 | "format": "uuid" 815 | }, 816 | "name": "kernel_session_id", 817 | "in": "path" 818 | } 819 | ], 820 | "responses": { 821 | "200": { 822 | "description": "Successful Response", 823 | "content": { 824 | "application/json": { 825 | "schema": {} 826 | } 827 | } 828 | }, 829 | "422": { 830 | "description": "Validation Error", 831 | "content": { 832 | "application/json": { 833 | "schema": { 834 | "$ref": "#/components/schemas/HTTPValidationError" 835 | } 836 | } 837 | } 838 | } 839 | }, 840 | "security": [ 841 | { 842 | "HTTPBearer": [] 843 | } 844 | ] 845 | } 846 | } 847 | }, 848 | "components": { 849 | "schemas": { 850 | "ChangeCellTypeRequest": { 851 | "title": "ChangeCellTypeRequest", 852 | "required": [ 853 | "cell_type" 854 | ], 855 | "type": "object", 856 | "properties": { 857 | "cell_type": { 858 | "title": "Cell Type", 859 | "enum": [ 860 | "code", 861 | "markdown", 862 | "sql" 863 | ], 864 | "type": "string" 865 | }, 866 | "db_connection": { 867 | "title": "Db Connection", 868 | "type": "string" 869 | }, 870 | "assign_results_to": { 871 | "title": "Assign Results To", 872 | "type": "string" 873 | } 874 | } 875 | }, 876 | "CreateCellRequest": { 877 | "title": "CreateCellRequest", 878 | "type": "object", 879 | "properties": { 880 | "cell_id": { 881 | "title": "Cell Id", 882 | "type": "string" 883 | }, 884 | "cell_type": { 885 | "title": "Cell Type", 886 | "enum": [ 887 | "code", 888 | "markdown", 889 | "sql" 890 | ], 891 | "type": "string", 892 | "description": "The type of cell to create.", 893 | "default": "code" 894 | }, 895 | "and_run": { 896 | "title": "Run Cell", 897 | "type": "boolean", 898 | "description": "Whether to run the cell after creating it. Only applies to code and sql cells.", 899 | "default": false 900 | }, 901 | "source": { 902 | "title": "Source", 903 | "type": "array", 904 | "items": { 905 | "type": "string" 906 | }, 907 | "description": "Lines of source code to place in the cell." 908 | }, 909 | "after_cell_id": { 910 | "title": "After Cell ID", 911 | "type": "string", 912 | "description": "The ID of the cell to insert this one after. If null, it'll be added to the end of the notebook." 913 | }, 914 | "datasource_id": { 915 | "title": "SQL cell datasource ID", 916 | "type": "string", 917 | "description": "datasource_id to run SQL against if this is a SQL cell" 918 | }, 919 | "assign_results_to": { 920 | "title": "SQL cell results variable name", 921 | "type": "string", 922 | "description": "The variable name to assign SQL query results (as a Dataframe)" 923 | } 924 | } 925 | }, 926 | "CreateNotebookRequest": { 927 | "title": "CreateNotebookRequest", 928 | "type": "object", 929 | "properties": { 930 | "project_id": { 931 | "title": "Project ID", 932 | "type": "string", 933 | "description": "The ID of the project to create the notebook in. Will default to the user's default project if not provided.", 934 | "format": "uuid" 935 | }, 936 | "notebook_name": { 937 | "title": "Notebook Name", 938 | "type": "string", 939 | "description": "The name of the notebook to create. Must end with .ipynb file extension." 940 | }, 941 | "start_kernel": { 942 | "title": "Start Kernel", 943 | "type": "boolean", 944 | "description": "Whether to start the kernel after creating the notebook.", 945 | "default": true 946 | } 947 | } 948 | }, 949 | "CreateProjectOptions": { 950 | "title": "CreateProjectOptions", 951 | "required": [ 952 | "name" 953 | ], 954 | "type": "object", 955 | "properties": { 956 | "name": { 957 | "title": "Name", 958 | "type": "string" 959 | }, 960 | "description": { 961 | "title": "Description", 962 | "type": "string" 963 | }, 964 | "git_url": { 965 | "title": "Git Url", 966 | "type": "string" 967 | } 968 | } 969 | }, 970 | "DataSource": { 971 | "title": "DataSource", 972 | "required": [ 973 | "name", 974 | "description", 975 | "type_id", 976 | "sql_cell_handle" 977 | ], 978 | "type": "object", 979 | "properties": { 980 | "name": { 981 | "title": "Data Source Name", 982 | "type": "string", 983 | "description": "Name of the data source." 984 | }, 985 | "description": { 986 | "title": "Data Source Description", 987 | "type": "string", 988 | "description": "The description of the data source." 989 | }, 990 | "type_id": { 991 | "title": "Type of database", 992 | "type": "string", 993 | "description": "Type of Data Source" 994 | }, 995 | "sql_cell_handle": { 996 | "title": "db_connection string", 997 | "type": "string", 998 | "description": "db_connection in the Noteable cell metadata for the database" 999 | } 1000 | } 1001 | }, 1002 | "ExecuteCellsRequest": { 1003 | "title": "ExecuteCellsRequest", 1004 | "type": "object", 1005 | "properties": { 1006 | "ids": { 1007 | "title": "Ids", 1008 | "type": "array", 1009 | "items": { 1010 | "type": "string" 1011 | } 1012 | }, 1013 | "before_id": { 1014 | "title": "Before Id", 1015 | "type": "string" 1016 | }, 1017 | "after_id": { 1018 | "title": "After Id", 1019 | "type": "string" 1020 | }, 1021 | "all": { 1022 | "title": "All", 1023 | "type": "boolean" 1024 | } 1025 | } 1026 | }, 1027 | "HTTPValidationError": { 1028 | "title": "HTTPValidationError", 1029 | "type": "object", 1030 | "properties": { 1031 | "detail": { 1032 | "title": "Detail", 1033 | "type": "array", 1034 | "items": { 1035 | "$ref": "#/components/schemas/ValidationError" 1036 | } 1037 | } 1038 | } 1039 | }, 1040 | "UpdateCellRequest": { 1041 | "title": "UpdateCellRequest", 1042 | "type": "object", 1043 | "properties": { 1044 | "source": { 1045 | "title": "Source", 1046 | "type": "array", 1047 | "items": { 1048 | "type": "string" 1049 | }, 1050 | "description": "Lines of source code to replace the cell with.", 1051 | "default": [] 1052 | }, 1053 | "and_run": { 1054 | "title": "Run Cell", 1055 | "type": "boolean", 1056 | "description": "Whether to run the cell after updating it. Only applies to code and sql cells.", 1057 | "default": false 1058 | } 1059 | } 1060 | }, 1061 | "ValidationError": { 1062 | "title": "ValidationError", 1063 | "required": [ 1064 | "loc", 1065 | "msg", 1066 | "type" 1067 | ], 1068 | "type": "object", 1069 | "properties": { 1070 | "loc": { 1071 | "title": "Location", 1072 | "type": "array", 1073 | "items": { 1074 | "anyOf": [ 1075 | { 1076 | "type": "string" 1077 | }, 1078 | { 1079 | "type": "integer" 1080 | } 1081 | ] 1082 | } 1083 | }, 1084 | "msg": { 1085 | "title": "Message", 1086 | "type": "string" 1087 | }, 1088 | "type": { 1089 | "title": "Error Type", 1090 | "type": "string" 1091 | } 1092 | } 1093 | } 1094 | }, 1095 | "securitySchemes": { 1096 | "HTTPBearer": { 1097 | "type": "http", 1098 | "scheme": "bearer" 1099 | } 1100 | } 1101 | } 1102 | } -------------------------------------------------------------------------------- /noteable_openapi.yaml: -------------------------------------------------------------------------------- 1 | components: 2 | schemas: 3 | ChangeCellTypeRequest: 4 | properties: 5 | assign_results_to: 6 | title: Assign Results To 7 | type: string 8 | cell_type: 9 | enum: 10 | - code 11 | - markdown 12 | - sql 13 | title: Cell Type 14 | type: string 15 | db_connection: 16 | title: Db Connection 17 | type: string 18 | required: 19 | - cell_type 20 | title: ChangeCellTypeRequest 21 | type: object 22 | CreateCellRequest: 23 | properties: 24 | after_cell_id: 25 | description: The ID of the cell to insert this one after. If null, it'll 26 | be added to the end of the notebook. 27 | title: After Cell ID 28 | type: string 29 | and_run: 30 | default: false 31 | description: Whether to run the cell after creating it. Only applies to 32 | code and sql cells. 33 | title: Run Cell 34 | type: boolean 35 | assign_results_to: 36 | description: The variable name to assign SQL query results (as a Dataframe) 37 | title: SQL cell results variable name 38 | type: string 39 | cell_id: 40 | title: Cell Id 41 | type: string 42 | cell_type: 43 | default: code 44 | description: The type of cell to create. 45 | enum: 46 | - code 47 | - markdown 48 | - sql 49 | title: Cell Type 50 | type: string 51 | datasource_id: 52 | description: datasource_id to run SQL against if this is a SQL cell 53 | title: SQL cell datasource ID 54 | type: string 55 | source: 56 | description: Lines of source code to place in the cell. 57 | items: 58 | type: string 59 | title: Source 60 | type: array 61 | title: CreateCellRequest 62 | type: object 63 | CreateNotebookRequest: 64 | properties: 65 | notebook_name: 66 | description: The name of the notebook to create. Must end with .ipynb file 67 | extension. 68 | title: Notebook Name 69 | type: string 70 | project_id: 71 | description: The ID of the project to create the notebook in. Will default 72 | to the user's default project if not provided. 73 | format: uuid 74 | title: Project ID 75 | type: string 76 | start_kernel: 77 | default: true 78 | description: Whether to start the kernel after creating the notebook. 79 | title: Start Kernel 80 | type: boolean 81 | title: CreateNotebookRequest 82 | type: object 83 | CreateProjectOptions: 84 | properties: 85 | description: 86 | title: Description 87 | type: string 88 | git_url: 89 | title: Git Url 90 | type: string 91 | name: 92 | title: Name 93 | type: string 94 | required: 95 | - name 96 | title: CreateProjectOptions 97 | type: object 98 | DataSource: 99 | properties: 100 | description: 101 | description: The description of the data source. 102 | title: Data Source Description 103 | type: string 104 | name: 105 | description: Name of the data source. 106 | title: Data Source Name 107 | type: string 108 | sql_cell_handle: 109 | description: db_connection in the Noteable cell metadata for the database 110 | title: db_connection string 111 | type: string 112 | type_id: 113 | description: Type of Data Source 114 | title: Type of database 115 | type: string 116 | required: 117 | - name 118 | - description 119 | - type_id 120 | - sql_cell_handle 121 | title: DataSource 122 | type: object 123 | ExecuteCellsRequest: 124 | properties: 125 | after_id: 126 | title: After Id 127 | type: string 128 | all: 129 | title: All 130 | type: boolean 131 | before_id: 132 | title: Before Id 133 | type: string 134 | ids: 135 | items: 136 | type: string 137 | title: Ids 138 | type: array 139 | title: ExecuteCellsRequest 140 | type: object 141 | HTTPValidationError: 142 | properties: 143 | detail: 144 | items: 145 | $ref: '#/components/schemas/ValidationError' 146 | title: Detail 147 | type: array 148 | title: HTTPValidationError 149 | type: object 150 | UpdateCellRequest: 151 | properties: 152 | and_run: 153 | default: false 154 | description: Whether to run the cell after updating it. Only applies to 155 | code and sql cells. 156 | title: Run Cell 157 | type: boolean 158 | source: 159 | default: [] 160 | description: Lines of source code to replace the cell with. 161 | items: 162 | type: string 163 | title: Source 164 | type: array 165 | title: UpdateCellRequest 166 | type: object 167 | ValidationError: 168 | properties: 169 | loc: 170 | items: 171 | anyOf: 172 | - type: string 173 | - type: integer 174 | title: Location 175 | type: array 176 | msg: 177 | title: Message 178 | type: string 179 | type: 180 | title: Error Type 181 | type: string 182 | required: 183 | - loc 184 | - msg 185 | - type 186 | title: ValidationError 187 | type: object 188 | securitySchemes: 189 | HTTPBearer: 190 | scheme: bearer 191 | type: http 192 | info: 193 | description: ChatGPT Plugin server for creating computational notebooks (in Python!), 194 | allowing you to execute code, explore data, and visualize results. 195 | title: Origamist 196 | version: 0.25.1 197 | openapi: 3.0.2 198 | paths: 199 | /api/origami/f/: 200 | post: 201 | description: Creates a new notebook. If no project ID is provided, the user's 202 | default project will be used. 203 | operationId: create_notebook 204 | requestBody: 205 | content: 206 | application/json: 207 | schema: 208 | $ref: '#/components/schemas/CreateNotebookRequest' 209 | required: true 210 | responses: 211 | '200': 212 | content: 213 | application/json: 214 | schema: {} 215 | description: Successful Response 216 | '422': 217 | content: 218 | application/json: 219 | schema: 220 | $ref: '#/components/schemas/HTTPValidationError' 221 | description: Validation Error 222 | security: 223 | - HTTPBearer: [] 224 | summary: Create Notebook 225 | /api/origami/f/{file_id}: 226 | get: 227 | description: 'Get a summary of a notebook. This includes the list of cell IDs 228 | that make up 229 | 230 | the notebook document, the current kernel state, and the notebook''s name.' 231 | operationId: get_notebook 232 | parameters: 233 | - in: path 234 | name: file_id 235 | required: true 236 | schema: 237 | format: uuid 238 | title: File Id 239 | type: string 240 | responses: 241 | '200': 242 | description: Successful Response 243 | '422': 244 | content: 245 | application/json: 246 | schema: 247 | $ref: '#/components/schemas/HTTPValidationError' 248 | description: Validation Error 249 | security: 250 | - HTTPBearer: [] 251 | summary: Get Notebook 252 | /api/origami/f/{file_id}/c: 253 | post: 254 | description: Create a code or markdown cell. 255 | operationId: create_cell 256 | parameters: 257 | - in: path 258 | name: file_id 259 | required: true 260 | schema: 261 | format: uuid 262 | title: File Id 263 | type: string 264 | requestBody: 265 | content: 266 | application/json: 267 | schema: 268 | $ref: '#/components/schemas/CreateCellRequest' 269 | required: true 270 | responses: 271 | '200': 272 | content: 273 | application/json: 274 | schema: 275 | title: Response Create Cell 276 | type: object 277 | description: Successful Response 278 | '422': 279 | content: 280 | application/json: 281 | schema: 282 | $ref: '#/components/schemas/HTTPValidationError' 283 | description: Validation Error 284 | security: 285 | - HTTPBearer: [] 286 | summary: Create Cell 287 | /api/origami/f/{file_id}/c/{cell_id}: 288 | get: 289 | description: Return Cell model details 290 | operationId: get_cell 291 | parameters: 292 | - in: path 293 | name: file_id 294 | required: true 295 | schema: 296 | format: uuid 297 | title: File Id 298 | type: string 299 | - in: path 300 | name: cell_id 301 | required: true 302 | schema: 303 | title: Cell Id 304 | type: string 305 | responses: 306 | '200': 307 | content: 308 | application/json: 309 | schema: 310 | title: Response Get Cell 311 | type: object 312 | description: Successful Response 313 | '422': 314 | content: 315 | application/json: 316 | schema: 317 | $ref: '#/components/schemas/HTTPValidationError' 318 | description: Validation Error 319 | security: 320 | - HTTPBearer: [] 321 | summary: Get Cell 322 | post: 323 | description: 'Endpoint to allow updating the type of a cell. Currently only 324 | supports changing 325 | 326 | between Code, Markdown, and SQL cells.' 327 | operationId: change_cell_type 328 | parameters: 329 | - in: path 330 | name: file_id 331 | required: true 332 | schema: 333 | format: uuid 334 | title: File Id 335 | type: string 336 | - in: path 337 | name: cell_id 338 | required: true 339 | schema: 340 | title: Cell Id 341 | type: string 342 | requestBody: 343 | content: 344 | application/json: 345 | schema: 346 | $ref: '#/components/schemas/ChangeCellTypeRequest' 347 | required: true 348 | responses: 349 | '204': 350 | description: Successful Response 351 | '422': 352 | content: 353 | application/json: 354 | schema: 355 | $ref: '#/components/schemas/HTTPValidationError' 356 | description: Validation Error 357 | security: 358 | - HTTPBearer: [] 359 | summary: Change Cell Type 360 | put: 361 | description: Replace the source code of a cell. 362 | operationId: update_cell 363 | parameters: 364 | - in: path 365 | name: file_id 366 | required: true 367 | schema: 368 | format: uuid 369 | title: File Id 370 | type: string 371 | - in: path 372 | name: cell_id 373 | required: true 374 | schema: 375 | title: Cell Id 376 | type: string 377 | requestBody: 378 | content: 379 | application/json: 380 | schema: 381 | $ref: '#/components/schemas/UpdateCellRequest' 382 | required: true 383 | responses: 384 | '204': 385 | description: Successful Response 386 | '422': 387 | content: 388 | application/json: 389 | schema: 390 | $ref: '#/components/schemas/HTTPValidationError' 391 | description: Validation Error 392 | security: 393 | - HTTPBearer: [] 394 | summary: Update Cell 395 | /api/origami/f/{file_id}/c/{cell_id}/run: 396 | post: 397 | description: Run a Cell within a Notebook by ID. 398 | operationId: run_cell 399 | parameters: 400 | - in: path 401 | name: file_id 402 | required: true 403 | schema: 404 | format: uuid 405 | title: File Id 406 | type: string 407 | - in: path 408 | name: cell_id 409 | required: true 410 | schema: 411 | title: Cell Id 412 | type: string 413 | responses: 414 | '200': 415 | content: 416 | application/json: 417 | schema: {} 418 | description: Successful Response 419 | '422': 420 | content: 421 | application/json: 422 | schema: 423 | $ref: '#/components/schemas/HTTPValidationError' 424 | description: Validation Error 425 | security: 426 | - HTTPBearer: [] 427 | summary: Run Cell 428 | /api/origami/f/{file_id}/datasources: 429 | get: 430 | description: Get the databases for a notebook. 431 | operationId: get_datasources 432 | parameters: 433 | - in: path 434 | name: file_id 435 | required: true 436 | schema: 437 | format: uuid 438 | title: File Id 439 | type: string 440 | responses: 441 | '200': 442 | content: 443 | application/json: 444 | schema: 445 | items: 446 | $ref: '#/components/schemas/DataSource' 447 | title: Response Get Datasources 448 | type: array 449 | description: Successful Response 450 | '422': 451 | content: 452 | application/json: 453 | schema: 454 | $ref: '#/components/schemas/HTTPValidationError' 455 | description: Validation Error 456 | security: 457 | - HTTPBearer: [] 458 | summary: Get Datasources 459 | /api/origami/f/{file_id}/kernel: 460 | delete: 461 | description: Shutdown the kernel for a notebook. 462 | operationId: shutdown_kernel_from_file 463 | parameters: 464 | - in: path 465 | name: file_id 466 | required: true 467 | schema: 468 | format: uuid 469 | title: File Id 470 | type: string 471 | responses: 472 | '200': 473 | content: 474 | application/json: 475 | schema: {} 476 | description: Successful Response 477 | '422': 478 | content: 479 | application/json: 480 | schema: 481 | $ref: '#/components/schemas/HTTPValidationError' 482 | description: Validation Error 483 | security: 484 | - HTTPBearer: [] 485 | summary: Shutdown Kernel From File 486 | /api/origami/f/{file_id}/run_multiple_cells: 487 | post: 488 | description: Execute multiple cells in a Notebook. 489 | operationId: run_multiple_cells 490 | parameters: 491 | - in: path 492 | name: file_id 493 | required: true 494 | schema: 495 | format: uuid 496 | title: File Id 497 | type: string 498 | requestBody: 499 | content: 500 | application/json: 501 | schema: 502 | $ref: '#/components/schemas/ExecuteCellsRequest' 503 | required: true 504 | responses: 505 | '204': 506 | description: Successful Response 507 | '422': 508 | content: 509 | application/json: 510 | schema: 511 | $ref: '#/components/schemas/HTTPValidationError' 512 | description: Validation Error 513 | security: 514 | - HTTPBearer: [] 515 | summary: Run Multiple Cells 516 | /api/origami/k/: 517 | get: 518 | description: Returns a list of the user's active kernel sessions. 519 | operationId: get_active_kernel_sessions 520 | responses: 521 | '200': 522 | content: 523 | application/json: 524 | schema: {} 525 | description: Successful Response 526 | security: 527 | - HTTPBearer: [] 528 | summary: Get Active Kernel Sessions 529 | /api/origami/k/{kernel_session_id}: 530 | delete: 531 | description: Shutdown the kernel for a notebook. 532 | operationId: shutdown_kernel 533 | parameters: 534 | - in: path 535 | name: kernel_session_id 536 | required: true 537 | schema: 538 | format: uuid 539 | title: Kernel Session Id 540 | type: string 541 | responses: 542 | '200': 543 | content: 544 | application/json: 545 | schema: {} 546 | description: Successful Response 547 | '422': 548 | content: 549 | application/json: 550 | schema: 551 | $ref: '#/components/schemas/HTTPValidationError' 552 | description: Validation Error 553 | security: 554 | - HTTPBearer: [] 555 | summary: Shutdown Kernel 556 | /api/origami/p/: 557 | post: 558 | description: 'Create a new Project for the user in their default Space. Projects 559 | can be optionally created 560 | 561 | from a git repository, in which case users can sync changes from the Noteable 562 | UI. Private repos 563 | 564 | can be created from the Noteable UI but' 565 | operationId: create_project 566 | requestBody: 567 | content: 568 | application/json: 569 | schema: 570 | $ref: '#/components/schemas/CreateProjectOptions' 571 | required: true 572 | responses: 573 | '200': 574 | content: 575 | application/json: 576 | schema: {} 577 | description: Successful Response 578 | '422': 579 | content: 580 | application/json: 581 | schema: 582 | $ref: '#/components/schemas/HTTPValidationError' 583 | description: Validation Error 584 | security: 585 | - HTTPBearer: [] 586 | summary: Create New Project 587 | /api/origami/p/default: 588 | delete: 589 | description: Clear the user's default project. 590 | operationId: clear_default_project 591 | responses: 592 | '200': 593 | content: 594 | application/json: 595 | schema: 596 | title: Response Clear Default Project 597 | type: object 598 | description: Successful Response 599 | security: 600 | - HTTPBearer: [] 601 | summary: Delete Default Project 602 | get: 603 | description: Get the user's configured default project. 604 | operationId: get_default_project 605 | responses: 606 | '200': 607 | content: 608 | application/json: 609 | schema: {} 610 | description: Successful Response 611 | security: 612 | - HTTPBearer: [] 613 | summary: Get Default Project 614 | put: 615 | description: Configure the user's default project (by UUID) for new notebooks. 616 | operationId: set_default_project 617 | parameters: 618 | - in: query 619 | name: new_default_project_id 620 | required: true 621 | schema: 622 | format: uuid 623 | title: New Default Project Id 624 | type: string 625 | responses: 626 | '200': 627 | content: 628 | application/json: 629 | schema: 630 | title: Response Set Default Project 631 | type: object 632 | description: Successful Response 633 | '422': 634 | content: 635 | application/json: 636 | schema: 637 | $ref: '#/components/schemas/HTTPValidationError' 638 | description: Validation Error 639 | security: 640 | - HTTPBearer: [] 641 | summary: Set Default Project 642 | /api/origami/p/{project_id}/files: 643 | get: 644 | description: Get a list of files in the project. 645 | operationId: get_project_files 646 | parameters: 647 | - in: path 648 | name: project_id 649 | required: true 650 | schema: 651 | format: uuid 652 | title: Project Id 653 | type: string 654 | - in: query 655 | name: filename_contains 656 | required: false 657 | schema: 658 | default: '' 659 | title: Filename Contains 660 | type: string 661 | - in: query 662 | name: file_limit 663 | required: false 664 | schema: 665 | default: 20 666 | title: File Limit 667 | type: integer 668 | - in: query 669 | name: sort_by 670 | required: false 671 | schema: 672 | default: updated_at 673 | title: Sort By 674 | type: string 675 | - in: query 676 | name: sort_order 677 | required: false 678 | schema: 679 | default: descending 680 | enum: 681 | - ascending 682 | - descending 683 | title: Sort Order 684 | type: string 685 | responses: 686 | '200': 687 | content: 688 | application/json: 689 | schema: {} 690 | description: Successful Response 691 | '422': 692 | content: 693 | application/json: 694 | schema: 695 | $ref: '#/components/schemas/HTTPValidationError' 696 | description: Validation Error 697 | security: 698 | - HTTPBearer: [] 699 | summary: Get Project Files 700 | /api/origami/s/default: 701 | get: 702 | description: Get the user's configured default Space for creating new Projects 703 | in. 704 | operationId: get_default_space 705 | responses: 706 | '200': 707 | content: 708 | application/json: 709 | schema: {} 710 | description: Successful Response 711 | security: 712 | - HTTPBearer: [] 713 | summary: Get Default Space 714 | /api/origami/u/me: 715 | get: 716 | description: "Get details of the Plugin user's Noteable account information.\n\ 717 | \ - useful when debugging permissions issues" 718 | operationId: get_user_info 719 | responses: 720 | '200': 721 | content: 722 | application/json: 723 | schema: {} 724 | description: Successful Response 725 | security: 726 | - HTTPBearer: [] 727 | summary: Get User Info 728 | servers: 729 | - description: Origamist server on https://chat.noteable.io 730 | url: https://chat.noteable.io 731 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi[all] 2 | openai 3 | pydantic==1.10.8 4 | pytest==6.2.5 5 | tree-of-thoughts 6 | langchain 7 | codeinterpreterapi -------------------------------------------------------------------------------- /routes/__init__.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter 2 | 3 | from .ai_utilities import ai_utilities_router 4 | from .authentication import authentication_router 5 | 6 | router = APIRouter() 7 | 8 | router.include_router(ai_utilities_router, prefix="/ai", tags=["AI Utilities"]) 9 | router.include_router(authentication_router, prefix="/auth", tags=["Authentication"]) -------------------------------------------------------------------------------- /routes/ai_utilities.py: -------------------------------------------------------------------------------- 1 | # routes\ai_utilities.py 2 | import json 3 | from fastapi import APIRouter, Depends, HTTPException 4 | from pydantic import BaseModel 5 | from typing import Dict 6 | from services.ai_services import AI_SERVICES 7 | from middlewares.authentication import authenticate 8 | 9 | ai_utilities_router = APIRouter() 10 | 11 | class RequestPayload(BaseModel): 12 | service: str 13 | input: str 14 | envs: Dict[str, str] 15 | 16 | class ApiResponse(BaseModel): 17 | result: str 18 | error: str 19 | stdout: str 20 | 21 | def process_result(result): 22 | if isinstance(result, dict): 23 | if 'error' in result: 24 | raise HTTPException(status_code=400, detail=result['error']) 25 | stdout_dict = result.copy() 26 | stdout_dict.pop('output', None) 27 | stdout = json.dumps(stdout_dict) 28 | result = result.get("output", "") 29 | else: 30 | stdout = result 31 | return result, stdout 32 | 33 | @ai_utilities_router.post("/ask", response_model=ApiResponse) 34 | async def ask(payload: RequestPayload, token: str = Depends(authenticate)): 35 | service = payload.service.lower() 36 | input_text = payload.input 37 | envs = payload.envs 38 | 39 | if service not in AI_SERVICES: 40 | raise HTTPException(status_code=400, detail="Invalid service requested") 41 | 42 | try: 43 | result = await AI_SERVICES[service](input_text, envs) 44 | result, stdout = process_result(result) 45 | except Exception as e: 46 | raise HTTPException(status_code=500, detail=str(e)) 47 | 48 | return ApiResponse(result=result, error="", stdout=stdout) 49 | 50 | # @ai_utilities_router.get("/plugins", response_model=ApiResponse) 51 | # async def plugins(token: str = Depends(authenticate)): 52 | # try: 53 | # result = await load_plugins() 54 | # result, stdout = process_result(result) 55 | # except Exception as e: 56 | # raise HTTPException(status_code=500, detail=str(e)) 57 | 58 | # return ApiResponse(result=result, error="", stdout=stdout) 59 | 60 | -------------------------------------------------------------------------------- /routes/authentication.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, HTTPException 2 | from pydantic import BaseModel 3 | from middlewares.authentication import authenticate 4 | 5 | authentication_router = APIRouter() 6 | 7 | class AuthRequest(BaseModel): 8 | secret_key: str 9 | 10 | @authentication_router.post("/authenticate") 11 | async def auth_endpoint(auth_request: AuthRequest) -> dict: 12 | secret_key = auth_request.secret_key 13 | is_authenticated = authenticate(secret_key) 14 | 15 | if is_authenticated: 16 | return {"status": "success", "message": "Authenticated successfully"} 17 | else: 18 | raise HTTPException(status_code=401, detail="Invalid secret key") -------------------------------------------------------------------------------- /services/ai_services.py: -------------------------------------------------------------------------------- 1 | # services\ai_services.py 2 | from .ask_question import ask_question 3 | from .tree_of_thoughts import tree_of_thoughts 4 | from .api_chain import api_chain 5 | from .api_agent import api_agent 6 | from .nla_agent import nla_agent 7 | from .code_interpreter import code_interpreter 8 | 9 | AI_SERVICES = { 10 | "q&a": ask_question, 11 | "nla_agent": nla_agent, 12 | "code_interpreter": code_interpreter, 13 | "api_agent": api_agent, 14 | "api_chain": api_chain, 15 | "tree_of_thoughts": tree_of_thoughts 16 | } -------------------------------------------------------------------------------- /services/api_agent.py: -------------------------------------------------------------------------------- 1 | # services\api_agent.py 2 | from typing import Dict 3 | from clients import get_openapi_agent 4 | from .utils import handle_exception, logger_stream_handler 5 | 6 | async def api_agent(input_text: str, envs: Dict[str, str]) -> str: 7 | try: 8 | api_agent = get_openapi_agent(api_key = envs["OPENAI_API_KEY"], model_name = "gpt-3.5-turbo", plugin_name = envs["plugin_name"]) 9 | response = api_agent.run(input_text) 10 | logger_stream_handler(f"logger_stream_handler test: {response}") 11 | # logger.debug("api_agent: %s", response) 12 | return response 13 | except Exception as e: 14 | handle_exception(e, "api_agent") 15 | return "" 16 | -------------------------------------------------------------------------------- /services/api_chain.py: -------------------------------------------------------------------------------- 1 | # services\api_chain.py 2 | from typing import Dict 3 | from clients import get_openapi_chain 4 | from .utils import handle_exception, logger_stream_handler 5 | 6 | async def api_chain(input_text: str, envs: Dict[str, str]) -> str: 7 | try: 8 | api_chain = get_openapi_chain(api_key = envs["OPENAI_API_KEY"], model_name = "gpt-3.5-turbo") 9 | response = api_chain(input_text) 10 | logger_stream_handler(f"logger_stream_handler test: {response}") 11 | # logger.debug("api_chain: %s", response) 12 | return response 13 | except Exception as e: 14 | handle_exception(e, "api_chain") 15 | return "" # return empty string if an exception is caught 16 | -------------------------------------------------------------------------------- /services/ask_question.py: -------------------------------------------------------------------------------- 1 | # services\ask_question_service.py 2 | import openai 3 | from typing import Dict 4 | # from logger import logger 5 | from aiohttp import ClientSession 6 | from .utils import handle_exception, logger_stream_handler 7 | 8 | openai.aiosession.set(ClientSession()) 9 | 10 | async def ask_question(input_text: str, envs: Dict[str, str]) -> str: 11 | try: 12 | openai.api_key = envs["OPENAI_API_KEY"] 13 | messages = [{"role": "user", "content": f"Answer the following question as best you can: {input_text}" }] 14 | response = await openai.ChatCompletion.acreate( 15 | model="gpt-3.5-turbo", 16 | messages=messages, 17 | max_tokens=100, 18 | n=1, 19 | stop=None, 20 | temperature=0.5, 21 | ) 22 | 23 | # logger.debug("ask_question: %s", response) 24 | logger_stream_handler(f"logger_stream_handler test: {response}") 25 | return response.choices[0].message.content.strip() 26 | except Exception as e: 27 | handle_exception(e, "ask_question") 28 | -------------------------------------------------------------------------------- /services/code_interpreter.py: -------------------------------------------------------------------------------- 1 | # services\code_interpreter.py 2 | import pprint 3 | from typing import Dict 4 | from codeinterpreterapi import CodeInterpreterSession 5 | from .utils import handle_exception, logger_stream_handler 6 | 7 | async def code_interpreter(input_text: str, envs: Dict[str, str]) -> str: 8 | try: 9 | # Check if the API key exists in the environment variables 10 | openai_api_key = envs["OPENAI_API_KEY"] 11 | 12 | session = CodeInterpreterSession(openai_api_key=openai_api_key) 13 | await session.astart() 14 | 15 | # generate a response based on user input 16 | response = await session.generate_response(input_text) 17 | 18 | # ouput the response (text + image) 19 | # print("AI: ", response.content) 20 | pprint.pprint(response) 21 | # for file in response.files: 22 | # file.show_image() 23 | 24 | # terminate the session 25 | await session.astop() 26 | logger_stream_handler(f"logger_stream_handler test: {response}") 27 | # logger.debug("code_interpreter: %s", response) 28 | return response.content 29 | except Exception as e: 30 | handle_exception(e, "code_interpreter") 31 | return "" 32 | 33 | -------------------------------------------------------------------------------- /services/nla_agent.py: -------------------------------------------------------------------------------- 1 | # services\nla_agent.py 2 | from typing import Dict 3 | from clients import get_nla_agent 4 | from .utils import handle_exception, logger_stream_handler 5 | 6 | async def nla_agent(input_text: str, envs: Dict[str, str]) -> str: 7 | try: 8 | # Check if the API key exists in the environment variables 9 | plugin_api_key = envs["PLUGIN_API_KEY"] if "PLUGIN_API_KEY" in envs else None 10 | 11 | nla_agent, _tools = get_nla_agent( 12 | openai_api_key=envs["OPENAI_API_KEY"], 13 | model_name="gpt-3.5-turbo", 14 | plugin_name=envs["plugin_name"], 15 | plugin_api_key=plugin_api_key 16 | ) 17 | response = nla_agent.run(input_text) 18 | logger_stream_handler(f"logger_stream_handler test: {response}") 19 | # logger.debug("nla_agent: %s", response) 20 | return response 21 | except Exception as e: 22 | handle_exception(e, "nla_agent") 23 | return "" 24 | 25 | -------------------------------------------------------------------------------- /services/tree_of_thoughts.py: -------------------------------------------------------------------------------- 1 | # services\tree_of_thoughts_service.py 2 | from typing import Dict 3 | from logger import logger 4 | from .utils import handle_exception, logger_stream_handler 5 | from clients.tree_of_thoughts import AsyncOpenAILanguageModel, AsyncMonteCarloTreeofThoughts 6 | 7 | def logger_stream_handler(message): 8 | logger.debug(message) 9 | 10 | async def tree_of_thoughts(input_text: str, envs: Dict[str, str]) -> str: 11 | try: 12 | logger_stream_handler("Starting tree_of_thoughts service") 13 | api_model = envs["MODEL"] 14 | api_key = envs["OPENAI_API_KEY"] 15 | model = AsyncOpenAILanguageModel(api_key=api_key, api_model=api_model, stream_handler=logger_stream_handler) 16 | tree_of_thoughts = AsyncMonteCarloTreeofThoughts(model, stream_handler=logger_stream_handler) 17 | initial_prompt = "design a new transportation system for an all-new city" 18 | num_thoughts = 1 19 | max_steps = 3 20 | max_states = 5 21 | pruning_threshold = 0.5 22 | 23 | solution = await tree_of_thoughts.solve( 24 | initial_prompt=initial_prompt, 25 | num_thoughts=num_thoughts, 26 | max_steps=max_steps, 27 | max_states=max_states, 28 | pruning_threshold=pruning_threshold, 29 | # sleep_time=sleep_time 30 | ) 31 | 32 | # logger.debug("tree_of_thoughts: %s", solution) 33 | return f"Solution: {solution}" 34 | except Exception as e: 35 | handle_exception(e, "tree_of_thoughts") 36 | -------------------------------------------------------------------------------- /services/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # services\utils\__init__.py 2 | from .handle_exception import handle_exception 3 | from .logger_utils import logger_stream_handler -------------------------------------------------------------------------------- /services/utils/handle_exception.py: -------------------------------------------------------------------------------- 1 | # services/utils/exception_handler.py 2 | import traceback 3 | from fastapi import HTTPException 4 | from logger import logger 5 | 6 | def handle_exception(e: Exception, service_name: str) -> None: 7 | logger.error("%s Exception: %s", service_name, e) 8 | logger.error("Exception type: %s", type(e).__name__) 9 | logger.error("Traceback: %s", traceback.format_exc()) 10 | error_message = f"An error of type {type(e).__name__} occurred. Arguments:\n{e.args}" 11 | raise HTTPException(status_code=500, detail=error_message) -------------------------------------------------------------------------------- /services/utils/logger_utils.py: -------------------------------------------------------------------------------- 1 | # utils\logger_utils.py 2 | from logger import logger 3 | 4 | def logger_stream_handler(message: str) -> None: 5 | logger.debug(message) -------------------------------------------------------------------------------- /shared_dependencies.md: -------------------------------------------------------------------------------- 1 | the app is: Develop a RESTful API in Python using FastAPI. This API will interface with various AI utility libraries and securely manage user credentials for accessing these services. 2 | 3 | the files we have decided to generate are: requirements.txt, main.py, Dockerfile, docker-compose.yaml, and files inside the routes directory. 4 | 5 | Shared dependencies: 6 | 7 | 1. Exported variables: 8 | - app (FastAPI instance) 9 | 10 | 2. Data schemas: 11 | - RequestPayload (service, input, envs) 12 | - ApiResponse (result, error, stdout) 13 | 14 | 3. Id names of DOM elements: None (not applicable for this API) 15 | 16 | 4. Message names: 17 | - success_message 18 | - error_message 19 | 20 | 5. Function names: 21 | - ask 22 | - sentiment_analysis 23 | - authenticate 24 | - other AI utility functions based on the libraries used -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Empty __init__.py file for the tests package 2 | -------------------------------------------------------------------------------- /tests/test_ai_utilities.py: -------------------------------------------------------------------------------- 1 | # tests\test_ai_utilities.py 2 | import pytest 3 | from fastapi.testclient import TestClient 4 | from main import app 5 | from services.ai_services import AI_SERVICES 6 | 7 | SECRET_KEY = "your-secret-key" 8 | headers = {"x-api-key": SECRET_KEY} 9 | 10 | client = TestClient(app) 11 | 12 | def test_ask(): 13 | test_data = { 14 | "service": "q&a", 15 | "input": "How many people live in canada as of 2023?", 16 | "envs": { 17 | "OPENAI_API_KEY": "test_key", 18 | } 19 | } 20 | response = client.post("/ai/ask", headers=headers, json=test_data) 21 | assert response.status_code == 200 22 | assert "result" in response.json() 23 | assert "error" in response.json() 24 | assert "stdout" in response.json() 25 | 26 | def test_ask_invalid_service(): 27 | test_data = { 28 | "service": "invalid_service", 29 | "input": "How many people live in canada as of 2023?", 30 | "envs": { 31 | "OPENAI_API_KEY": "test_key", 32 | } 33 | } 34 | response = client.post("/ai/ask", headers=headers, json=test_data) 35 | assert response.status_code == 400 36 | assert "detail" in response.json() 37 | 38 | def test_sentiment_analysis(): 39 | test_data = { 40 | "service": "sentiment_analysis", 41 | "input": "I love this product!", 42 | "envs": { 43 | "OPENAI_API_KEY": "test_key", 44 | } 45 | } 46 | response = client.post("/ai/ask", headers=headers, json=test_data) 47 | assert response.status_code == 200 48 | assert "result" in response.json() 49 | assert "error" in response.json() 50 | assert "stdout" in response.json() 51 | 52 | def test_sentiment_analysis_invalid_input(): 53 | test_data = { 54 | "text": "", 55 | "envs": { 56 | "OPENAI_API_KEY": "test_key", 57 | } 58 | } 59 | response = client.post("/ai/sentiment_analysis", headers=headers, json=test_data) 60 | assert response.status_code == 422 61 | assert "detail" in response.json() 62 | 63 | @pytest.mark.parametrize("service_name", AI_SERVICES.keys()) 64 | def test_ai_services(service_name): 65 | test_data = { 66 | "service": service_name, 67 | "input": "Test input", 68 | "envs": { 69 | "OPENAI_API_KEY": "test_key", 70 | } 71 | } 72 | response = client.post("/ai/ask", headers=headers, json=test_data) 73 | assert response.status_code == 200 74 | assert "result" in response.json() 75 | assert "error" in response.json() 76 | assert "stdout" in response.json() 77 | --------------------------------------------------------------------------------