├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── pyproject.toml ├── setup.py ├── src └── gpt_to_chatgpt │ └── __init__.py └── tests └── test.py /.gitignore: -------------------------------------------------------------------------------- 1 | /__pycache__ 2 | dist 3 | gpt_to_chatgpt.egg-info 4 | hold 5 | build -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Bram Adams 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | recursive-include src *.py 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gpt-to-chatgpt-py 2 | Convert a regular GPT call into a ChatGPT call 3 | 4 | TYPESCRIPT VERSION HERE -> https://github.com/bramses/gpt-to-chatgpt-ts 5 | 6 | ## Installation 7 | 8 | ```py 9 | pip install gpt-to-chatgpt==0.1.4 10 | ``` 11 | 12 | ## Functions 13 | 14 | ### toChatML() 15 | 16 | Converts a string message into a Chat Markup Language (ChatML) object. 17 | 18 | #### Usage 19 | ```python 20 | toChatML(message:str, options:Optional[dict]=None) -> List[dict] 21 | ``` 22 | 23 | #### Arguments 24 | 25 | - message: The string message to be converted to ChatML. 26 | - options (optional): A dictionary that can contain the following keys: 27 | - system_messages: A list of strings that represent system messages to be added to the ChatML object. 28 | - role: The role of the message (either Role.USER or Role.ASSISTANT). 29 | 30 | #### Examples 31 | 32 | ```python 33 | toChatML('hello') 34 | # Output: [{'role': Role.USER, 'content': 'hello'}] 35 | ``` 36 | 37 | ```python 38 | toChatML('hello', {'system_messages': ['hi'], 'role': Role.ASSISTANT}) 39 | # Output: [{'role': Role.SYSTEM, 'content': 'hi'}, {'role': Role.ASSISTANT, 'content': 'hello'}] 40 | ``` 41 | 42 | ### get_message() 43 | 44 | Extracts the message content from a response object. 45 | 46 | #### Usage 47 | 48 | ```python 49 | get_message(response: dict, options: Optional[dict] = None) -> Union[str, dict] 50 | ``` 51 | 52 | ### Full Usage 53 | 54 | ```py 55 | import openai 56 | import os 57 | from dotenv import load_dotenv 58 | from gpt_to_chatgpt import toChatML, get_message 59 | 60 | load_dotenv() 61 | 62 | openai.api_key = os.getenv("OPENAI_API_KEY") 63 | 64 | res = openai.ChatCompletion.create( 65 | model="gpt-3.5-turbo", 66 | messages=toChatML("this is a test"), 67 | ) 68 | 69 | print(get_message(res)) 70 | 71 | # As an AI language model, I don't really take tests, but I'm always ready to respond to your prompts and queries. How can I assist you today? 72 | ``` 73 | 74 | 75 | #### Arguments 76 | 77 | - response: The response object from which to extract the message content. 78 | - options (optional): A dictionary that can contain the following keys: 79 | - usage: A boolean value that indicates whether to return the usage information of the response. 80 | - role: A boolean value that indicates whether to return the role of the message. 81 | - isMessages: A boolean value that indicates whether to return the message as a list of messages. 82 | 83 | #### Examples 84 | 85 | ```python 86 | get_message(test_response) 87 | # Output: 'The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.' 88 | 89 | get_message(test_response, {'usage': True, 'role': True, 'isMessages': True}) 90 | # Output: {'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87}, 91 | # 'roles': ['assistant'], 92 | # 'messages': ['The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.']} 93 | 94 | get_message(test_response, {'usage': True, 'role': True}) 95 | # Output: {'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87}, 96 | # 'role': 'assistant', 97 | # 'message': 'The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.'} 98 | 99 | get_message(test_response, {'usage': True, 'isMessages': True}) 100 | # Output: {'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87}, 101 | # 'messages': ['The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.']} 102 | ``` 103 | 104 | ## About the Developer 105 | 106 | This repository was written by Bram Adams, a writer and programmer based out of NYC. 107 | 108 | Bram publishes a Zettelkasten, with a twice/weekly newsletter (which you can subscribe to [here](https://www.bramadams.dev/#/portal/)), is a community developer ambassador for OpenAI and does freeleance contracts (for hire!) related to AI/web dev/AR+VR. 109 | 110 | Bram is also the creator of [Stenography](https://stenography.dev), a API and [VSC Extension](https://marketplace.visualstudio.com/items?itemName=Stenography.stenography) that automatically documents code on save. 111 | 112 | You can learn more about him and his work on his [website](https://www.bramadams.dev/about/). -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "gpt-to-chatgpt" 7 | version = "0.1.4" 8 | authors = [ 9 | { name="Bram Adams", email="bram+support@bramadams.dev" }, 10 | ] 11 | description = "A small example package" 12 | readme = "README.md" 13 | requires-python = ">=3.7" 14 | classifiers = [ 15 | "Programming Language :: Python :: 3", 16 | "License :: OSI Approved :: MIT License", 17 | "Operating System :: OS Independent", 18 | ] 19 | 20 | [project.urls] 21 | "Homepage" = "https://github.com/bramses/gpt-to-chatgpt-py" 22 | "Bug Tracker" = "https://github.com/bramses/gpt-to-chatgpt-py/issues" -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | from distutils.core import setup 3 | 4 | with open("README.md", "r", encoding="utf-8") as fh: 5 | long_description = fh.read() 6 | 7 | setup( 8 | name='gpt-to-chatgpt', 9 | version='0.1.1', 10 | long_description=long_description, 11 | long_description_content_type="text/markdown", 12 | description='Convert GPT Completion style message to a ChatGPT call', 13 | packages=find_packages(), 14 | zip_safe=False, 15 | python_requires='>=3.7', 16 | install_requires=[ 17 | # List any dependencies your package needs to run here 18 | ], 19 | entry_points={ 20 | # If your package provides a command-line interface, define it here 21 | }, 22 | classifiers=[ 23 | "Programming Language :: Python :: 3", 24 | "License :: OSI Approved :: MIT License", 25 | "Operating System :: OS Independent", 26 | ], 27 | author='Bram Adams', 28 | author_email='bram+support@bramadams.dev', 29 | url='https://github.com/bramses/gpt-to-chatgpt-py', 30 | license='MIT' 31 | ) -------------------------------------------------------------------------------- /src/gpt_to_chatgpt/__init__.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | class Role(Enum): 4 | SYSTEM = 0 5 | USER = 1 6 | ASSISTANT = 2 7 | 8 | def role_to_string(role): 9 | if role == Role.SYSTEM: 10 | return 'system' 11 | elif role == Role.USER: 12 | return 'user' 13 | elif role == Role.ASSISTANT: 14 | return 'assistant' 15 | else: 16 | return 'unknown' 17 | 18 | def toChatML(original, options = {'system_messages': None, 'role': Role.USER }): 19 | messages = [] 20 | 21 | if 'system_messages' in options and options['system_messages'] is not None: 22 | # lambda for message in system_messages to create a dict with role SYSTEM 23 | messages.extend([{'role' : role_to_string(Role.SYSTEM), 'content': message} for message in options['system_messages']]) 24 | 25 | if 'role' not in options: 26 | options['role'] = Role.USER 27 | 28 | messages.append({'role' : role_to_string(options['role']), 'content': original}) 29 | 30 | return messages 31 | 32 | 33 | def get_message(response, options = { 'usage': False, 'role': False, 'isMessages': False }): 34 | 35 | response_dict = {} 36 | 37 | if 'usage' in options and options['usage'] == True: 38 | response_dict['usage'] = response['usage'] 39 | 40 | 41 | if 'isMessages' in options and options['isMessages'] == True: 42 | response_dict['messages'] = [] 43 | 44 | for message in response['choices']: 45 | response_dict['messages'].append(message['message']['content']) 46 | 47 | if 'role' in options and options['role'] == True: 48 | response_dict['roles'] = [] 49 | for message in response['choices']: 50 | response_dict['roles'].append(message['message']['role']) 51 | 52 | # if response_dict is not Empty 53 | if response_dict != {}: 54 | if ('isMessages' not in options) or ('isMessages' in options and options['isMessages'] == False): 55 | response_dict['message'] = response['choices'][0]['message']['content'] 56 | if 'role' in options and options['role'] == True: 57 | response_dict['role'] = response['choices'][0]['message']['role'] 58 | return response_dict 59 | 60 | # base case just return the message 61 | return response['choices'][0]['message']['content'] -------------------------------------------------------------------------------- /tests/test.py: -------------------------------------------------------------------------------- 1 | from src.gpt_to_chatgpt import toChatML, get_message, Role, role_to_string 2 | 3 | 4 | def test_toChatML(): 5 | print('Testing toChatML()') 6 | assert toChatML('hello') == [{'role': role_to_string(Role.USER), 'content': 'hello'}] 7 | print('toChatML() passed') 8 | 9 | 10 | def test_toChatML_with_options(): 11 | print('Testing toChatML() with options') 12 | assert toChatML('hello', {'system_messages': ['hi'], 'role': Role.ASSISTANT}) == [ 13 | {'role': role_to_string(Role.SYSTEM), 'content': 'hi'}, {'role': role_to_string(Role.ASSISTANT), 'content': 'hello'}] 14 | 15 | assert toChatML('hello', {'system_messages': ['hi']}) == [ 16 | {'role': role_to_string(Role.SYSTEM), 'content': 'hi'}, {'role': role_to_string(Role.USER), 'content': 'hello'}] 17 | print('toChatML() with options passed') 18 | 19 | 20 | test_response = { 21 | 'id': 'chatcmpl-6p9XYPYSTTRi0xEviKjjilqrWU2Ve', 22 | 'object': 'chat.completion', 23 | 'created': 1677649420, 24 | 'model': 'gpt-3.5-turbo', 25 | 'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87}, 26 | 'choices': [ 27 | { 28 | 'message': { 29 | 'role': 'assistant', 30 | 'content': 'The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.'}, 31 | 'finish_reason': 'stop', 32 | 'index': 0 33 | } 34 | ] 35 | } 36 | 37 | 38 | def test_get_message(): 39 | print('Testing get_message()') 40 | assert get_message( 41 | test_response) == 'The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.' 42 | print('get_message() passed') 43 | 44 | 45 | def test_get_message_with_options(): 46 | print('Testing get_message() with options') 47 | assert get_message(test_response, {'usage': True, 'role': True, 'isMessages': True}) == { 48 | 'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87}, 49 | 'roles': ['assistant'], 50 | 'messages': ['The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.']} 51 | 52 | assert get_message(test_response, {'usage': True, 'role': True}) == { 53 | 'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87}, 54 | 'role': 'assistant', 55 | 'message': 'The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.'} 56 | 57 | assert get_message(test_response, {'usage': True, 'role': True}) == { 58 | 'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87}, 59 | 'role': 'assistant', 60 | 'message': 'The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.'} 61 | 62 | assert get_message(test_response, {'usage': True, 'isMessages': True}) == { 63 | 'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87}, 64 | 'messages': ['The 2020 World Series was played in Arlington, Texas at the Globe Life Field, which was the new home stadium for the Texas Rangers.']} 65 | print('get_message() with options passed') 66 | 67 | 68 | if __name__ == '__main__': 69 | print('Running tests...') 70 | print('----------------') 71 | test_toChatML() 72 | test_toChatML_with_options() 73 | test_get_message() 74 | test_get_message_with_options() 75 | --------------------------------------------------------------------------------