├── client_gpt ├── __init__.py └── client_gpt.py ├── .gitignore ├── requirements.txt ├── README.md └── setup.py /client_gpt/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Distribution / packaging 2 | .Python 3 | 4 | # Environments 5 | .env 6 | Venv 7 | Venv/ 8 | .vscode/ 9 | build/ 10 | 11 | client_GPT.egg-info/ 12 | dist/ -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | black==23.3.0 2 | certifi==2022.12.7 3 | charset-normalizer==3.1.0 4 | click==8.1.3 5 | colorama==0.4.6 6 | idna==3.4 7 | mypy-extensions==1.0.0 8 | packaging==23.1 9 | pathspec==0.11.1 10 | platformdirs==3.2.0 11 | requests==2.28.2 12 | urllib3==1.26.15 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## client_gpt 2 | 3 | The end user can use ClientGPT to communicate with the GT3 model from OpenAI. 4 | 5 | ## install 6 | 7 | pip install client_gpt 8 | 9 | ## use 10 | 11 | from client_gpt.client_gpt import ClientGPT 12 | 13 | api_key = "your_api_key" 14 | 15 | model = "your_model_id" 16 | 17 | client = ClientGPT(api_key, model) 18 | 19 | response_text, message_id, conversation_id = client.ask(prompt='Hello, how are you?', conversation_id=None, previous_convo_id=None) 20 | 21 | print(response_text) 22 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | setuptools.setup( 4 | name="client_GPT", 5 | version="2.1.0", 6 | author="littleknitsstory", 7 | description="A package for working with GPT language models.", 8 | url="https://github.com/littleknitsstory/client-gpt", 9 | packages=setuptools.find_packages(), 10 | install_requires=[ 11 | "requests", 12 | ], 13 | classifiers=[ 14 | "Development Status :: 3 - Alpha", 15 | "Intended Audience :: Developers", 16 | "Topic :: Software Development :: Libraries", 17 | "Programming Language :: Python :: 3", 18 | ], 19 | python_requires=">=3.6", 20 | ) 21 | -------------------------------------------------------------------------------- /client_gpt/client_gpt.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import re 4 | import uuid 5 | import os 6 | from typing import Tuple, Optional 7 | 8 | 9 | class ClientGPT: 10 | """ 11 | A class for interacting with the OpenAI API. 12 | Attributes: 13 | api_key (str): Your OpenAI API key. 14 | model (str): The ID of the GPT-3 model to use. 15 | conversation_id (str or None): the unique identifier of the current conversation in the OpenAI. If the value is None, a new conversation ID will be created. If a string is passed, then an existing conversation ID is used. 16 | session (requests.Session): A session object for making HTTP requests. 17 | Method: 18 | ask(prompt, conversation_id=None, previous_convo_id=None): Sends a prompt to the OpenAI API and returns the response. 19 | """ 20 | 21 | chat_api_url = "https://api.openai.com/v1/completions" 22 | 23 | def __init__(self, api_key: str, model: str): 24 | self.api_key = api_key 25 | self.model = model 26 | self.conversation_id = None 27 | self.session = requests.Session() 28 | 29 | def ask( 30 | self, prompt: str, conversation_id: str or None, previous_convo_id: str or None 31 | ) -> Optional[str]: 32 | """ 33 | Args: 34 | prompt (str): a string containing text to send to the OpenAI server. 35 | conversation_id (str or None): The ID of the conversation. If None, a new conversation will be started. 36 | previous_convo_id (str or None): The ID of the previous conversation. If None, a new conversation will be started. 37 | 38 | Return: 39 | Tuple[str, str or None, str or None]: A tuple containing the response text, the message ID, and the conversation ID. 40 | """ 41 | headers = { 42 | "Content-Type": "application/json", 43 | "Authorization": f"Bearer {self.api_key}", 44 | "Accept": "application/json", 45 | "OpenAI-Integration-Name": "python", 46 | "OpenAI-Integration-Version": "0.1", 47 | } 48 | 49 | data = { 50 | "model": self.model, 51 | "prompt": prompt, 52 | "max_tokens": 150, 53 | "temperature": 0.3, 54 | "n": 1, 55 | "echo": True, 56 | "stream": False, 57 | } 58 | 59 | if previous_convo_id is None: 60 | previous_convo_id = str(uuid.uuid4()) 61 | 62 | if conversation_id is not None and len(conversation_id) == 0: 63 | conversation_id = None 64 | 65 | try: 66 | response = self.session.post( 67 | self.chat_api_url, 68 | headers=headers, 69 | data=json.dumps(data), 70 | ) 71 | if response.status_code == 200: 72 | response_data = json.loads(response.text) 73 | choices = response_data["choices"] 74 | print(choices) 75 | if len(choices) > 0: 76 | text = choices[0]["text"] 77 | return text 78 | else: 79 | return None 80 | elif response.status_code == 401: 81 | if os.path.exists("auth.json"): 82 | os.remove("auth.json") 83 | return ( 84 | f"[Status Code] 401 | [Response Text] {response.text}", 85 | None, 86 | None, 87 | ) 88 | elif response.status_code >= 500: 89 | print( 90 | ">> Looks like the server is either overloaded or down. Try again later." 91 | ) 92 | return ( 93 | f"[Status Code] {response.status_code} | [Response Text] {response.text}", 94 | None, 95 | None, 96 | ) 97 | else: 98 | return ( 99 | f"[Status Code] {response.status_code} | [Response Text] {response.text}", 100 | None, 101 | None, 102 | ) 103 | except Exception as e: 104 | print(">> Error when calling OpenAI API: " + str(e)) 105 | return "400", None, None 106 | --------------------------------------------------------------------------------