├── .gitignore ├── LICENCE.txt ├── README.md ├── docs └── .gitkeep ├── images └── .gitkeep ├── requirements.txt └── src └── services ├── anthropic_service.py └── azure_openai_service.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *.cover 45 | 46 | # Translations 47 | *.mo 48 | *.pot 49 | 50 | # Django stuff: 51 | *.log 52 | 53 | # Sphinx documentation 54 | docs/_build/ 55 | 56 | # PyBuilder 57 | target/ 58 | 59 | # DotEnv configuration 60 | .env 61 | 62 | # Database 63 | *.db 64 | *.rdb 65 | 66 | # Pycharm 67 | .idea 68 | 69 | # VS Code 70 | .vscode/ 71 | *.code-workspace 72 | 73 | # Spyder 74 | .spyproject/ 75 | 76 | # Jupyter NB Checkpoints 77 | .ipynb_checkpoints/ 78 | 79 | # exclude data from source control by default 80 | /data/ 81 | 82 | # Mac OS-specific storage files 83 | .DS_Store 84 | 85 | # vim 86 | *.swp 87 | *.swo 88 | 89 | # Mypy cache 90 | .mypy_cache/ 91 | -------------------------------------------------------------------------------- /LICENCE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Datalumina B.V. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daveebbelaar/ai-experiments/19c1a6fbf7ae19e87cf03b7acc036fe246ee86d4/README.md -------------------------------------------------------------------------------- /docs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daveebbelaar/ai-experiments/19c1a6fbf7ae19e87cf03b7acc036fe246ee86d4/docs/.gitkeep -------------------------------------------------------------------------------- /images/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daveebbelaar/ai-experiments/19c1a6fbf7ae19e87cf03b7acc036fe246ee86d4/images/.gitkeep -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | anthropic 2 | python-dotenv 3 | openai -------------------------------------------------------------------------------- /src/services/anthropic_service.py: -------------------------------------------------------------------------------- 1 | import anthropic 2 | import os 3 | import json 4 | from dotenv import load_dotenv 5 | 6 | 7 | class AnthropicService: 8 | def __init__(self): 9 | load_dotenv() 10 | self.llm = anthropic.Anthropic( 11 | api_key=os.getenv("ANTHROPIC_API_KEY"), 12 | ) 13 | 14 | def generate_reponse(self, prompt: str): 15 | 16 | SYSTEM_PROMPT = ( 17 | "You are a marketing expert, specialized in writing LinkedIn posts to build a personal brand on the platform." 18 | "Always start with a strong hook on the first line and limit posts to 800 characters or less." 19 | "Use emoijs where appropriate, but never more than 3 in one post" 20 | "The user will provide the topics and extra details to include in the post." 21 | "Output in JSON format, with the following keys: content, keywords, title" 22 | "content: the full linkedin post, with the content properly escaped and formatted with every paragraph separated by a newline character." 23 | "keywords: list of relevant keywords for the post," 24 | "title: a title for the post for internal reference in CMS" 25 | ) 26 | 27 | message = self.llm.messages.create( 28 | # model="claude-3-opus-20240229", # Most powerful model 29 | model="claude-3-sonnet-20240229", # Most balanced model 30 | max_tokens=1000, 31 | temperature=0.0, 32 | system=SYSTEM_PROMPT, 33 | messages=[ 34 | {"role": "user", "content": prompt}, 35 | { 36 | "role": "assistant", 37 | "content": "{", 38 | }, # Prefill Claude's response to force JSON output 39 | ], 40 | ) 41 | 42 | # Attempt to correctly escape and format the JSON string 43 | try: 44 | # Create a valid JSON string with the escaped content 45 | json_string = "{" + message.content[0].text 46 | result = json.loads(json_string) 47 | return result 48 | except json.JSONDecodeError as e: 49 | print(f"Error parsing JSON: {e}") 50 | return None 51 | 52 | 53 | anthropic_service = AnthropicService() 54 | 55 | 56 | result = anthropic_service.generate_reponse( 57 | prompt="Write a linkedin post about the often overlooked importance data plays in AI strategy." 58 | ) 59 | 60 | print(result["content"]) 61 | -------------------------------------------------------------------------------- /src/services/azure_openai_service.py: -------------------------------------------------------------------------------- 1 | import os 2 | from openai import AzureOpenAI 3 | from dotenv import load_dotenv 4 | from openai._types import NotGiven 5 | import json 6 | 7 | NOT_GIVEN = NotGiven() 8 | 9 | 10 | class AzureOpenAIService: 11 | def __init__(self): 12 | load_dotenv() 13 | self.azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") 14 | self.api_key = os.getenv("AZURE_OPENAI_KEY") 15 | self.api_version = "2023-12-01-preview" 16 | self.deployment_name = "gpt-4-turbo" 17 | self.message_history = [] 18 | 19 | @property 20 | def client(self): 21 | if not hasattr(self, "_client"): 22 | self._client = AzureOpenAI( 23 | azure_endpoint=self.azure_endpoint, 24 | api_key=self.api_key, 25 | api_version=self.api_version, 26 | ) 27 | return self._client 28 | 29 | def chat_completion_one_shot( 30 | self, 31 | messages: list, 32 | max_tokens: int | NotGiven = NOT_GIVEN, 33 | temperature: int | NotGiven = NOT_GIVEN, 34 | response_format: dict | NotGiven = NOT_GIVEN, 35 | ): 36 | # Is stateless and doesn't store the message history 37 | response = self.client.chat.completions.create( 38 | model=self.deployment_name, # deployment name used in Azure 39 | messages=messages, 40 | max_tokens=max_tokens, 41 | temperature=temperature, 42 | response_format=response_format, 43 | ) 44 | return response.choices[0].message.content 45 | 46 | def generate_response(self, prompt: str): 47 | 48 | SYSTEM_PROMPT = ( 49 | "You are a marketing expert, specialized in writing LinkedIn posts to build a personal brand on the platform." 50 | "Always start with a strong hook on the first line and limit posts to 800 characters or less." 51 | "Use emoijs where appropriate, but never more than 3 in one post" 52 | "The user will provide the topics and extra details to include in the post." 53 | "Output in JSON format, with the following keys: content, keywords, title" 54 | "content: the full linkedin post, with the content properly escaped and formatted with every paragraph separated by a newline character." 55 | "keywords: list of relevant keywords for the post," 56 | "title: a title for the post for internal reference in CMS" 57 | ) 58 | 59 | messages = [ 60 | {"role": "system", "content": SYSTEM_PROMPT}, 61 | {"role": "user", "content": prompt}, 62 | ] 63 | 64 | try: 65 | response = self.chat_completion_one_shot( 66 | messages=messages, 67 | response_format={"type": "json_object"}, 68 | temperature=0, 69 | ) 70 | response_json = json.loads(response) 71 | return response_json 72 | except json.JSONDecodeError: 73 | print("Could not parse headers due to invalid JSON") 74 | return None 75 | 76 | 77 | azure_openai_service = AzureOpenAIService() 78 | 79 | result = azure_openai_service.generate_response( 80 | prompt="Write a linkedin post about the often overlooked importance data plays in AI strategy." 81 | ) 82 | 83 | print(result["content"]) 84 | --------------------------------------------------------------------------------