├── .gitignore ├── README.md ├── anthropic_gradio └── __init__.py ├── app.py ├── chatinterface.png ├── composition.py ├── custom_app.py ├── custom_chat_anthropic.png ├── pyproject.toml └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Python cache files 2 | __pycache__/ 3 | *.pyc 4 | 5 | # Virtual environment 6 | env/ 7 | .venv/ 8 | 9 | # Package artifacts 10 | dist/ 11 | build/ 12 | *.egg-info/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `anthropic-gradio` 2 | 3 | is a Python package that makes it very easy for developers to create machine learning apps that are powered by Anthropic's API. 4 | 5 | # Installation 6 | 7 | 1. Clone this repo: `git clone https://github.com/AK391/anthropic-gradio.git` 8 | 2. Navigate into the folder that you cloned this repo into: `cd anthropic-gradio` 9 | 3. Install this package: `pip install -e .` 10 | 11 | 14 | 15 | That's it! 16 | 17 | # Basic Usage 18 | 19 | Just like if you were to use the `anthropic` API, you should first save your Anthropic API key to this environment variable: 20 | 21 | ``` 22 | export ANTHROPIC_API_KEY= 23 | ``` 24 | 25 | Then in a Python file, write: 26 | 27 | ```python 28 | import gradio as gr 29 | import anthropic_gradio 30 | 31 | gr.load( 32 | name='claude-3-opus-20240229', 33 | src=anthropic_gradio.registry, 34 | ).launch() 35 | ``` 36 | 37 | Run the Python file, and you should see a Gradio Interface connected to the model on Anthropic! 38 | 39 | ![ChatInterface](chatinterface.png) 40 | 41 | # Customization 42 | 43 | Once you can create a Gradio UI from an Anthropic endpoint, you can customize it by setting your own input and output components, or any other arguments to `gr.Interface`. For example: 44 | 45 | ```py 46 | import gradio as gr 47 | import anthropic_gradio 48 | 49 | gr.load( 50 | name='claude-3-opus-20240229', 51 | src=anthropic_gradio.registry, 52 | title='Anthropic-Gradio Integration', 53 | description="Chat with Claude 3 Opus model.", 54 | examples=["Explain quantum gravity to a 5-year old.", "How many R are there in the word Strawberry?"] 55 | ).launch() 56 | ``` 57 | 58 | ![CustomizedInterface](custom_chat_anthropic.png) 59 | 60 | # Composition 61 | 62 | Or use your loaded Interface within larger Gradio Web UIs, e.g. 63 | 64 | ```python 65 | import gradio as gr 66 | import anthropic_gradio 67 | 68 | with gr.Blocks() as demo: 69 | with gr.Tab("Claude 3 Opus"): 70 | gr.load('claude-3-opus-20240229', src=anthropic_gradio.registry) 71 | with gr.Tab("Claude 3 Sonnet"): 72 | gr.load('claude-3-sonnet-20240229', src=anthropic_gradio.registry) 73 | 74 | demo.launch() 75 | ``` 76 | 77 | # Under the Hood 78 | 79 | The `anthropic-gradio` Python library has two dependencies: `anthropic` and `gradio`. It defines a "registry" function `anthropic_gradio.registry`, which takes in a model name and returns a Gradio app. 80 | 81 | # Supported Models in Anthropic 82 | 83 | All chat API models supported by Anthropic are compatible with this integration. For a comprehensive list of available models and their specifications, please refer to the [Anthropic Models documentation](https://docs.anthropic.com/en/docs/about-claude/models). 84 | 85 | ------- 86 | 87 | Note: if you are getting an authentication error, then the Anthropic API Client is not able to get the API token from the environment variable. This happened to me as well, in which case save it in your Python session, like this: 88 | 89 | ```py 90 | import os 91 | 92 | os.environ["ANTHROPIC_API_KEY"] = ... 93 | ``` -------------------------------------------------------------------------------- /anthropic_gradio/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import anthropic 3 | import gradio as gr 4 | from typing import Callable 5 | 6 | __version__ = "0.0.1" 7 | 8 | 9 | def get_fn(model_name: str, preprocess: Callable, postprocess: Callable, api_key: str): 10 | def fn(message, history): 11 | inputs = preprocess(message, history) 12 | client = anthropic.Anthropic(api_key=api_key) 13 | with client.messages.stream( 14 | model=model_name, 15 | max_tokens=1000, 16 | messages=inputs["messages"] 17 | ) as stream: 18 | response_text = "" 19 | for chunk in stream: 20 | if chunk.type == "content_block_delta": 21 | delta = chunk.delta.text 22 | response_text += delta 23 | yield postprocess(response_text) 24 | 25 | return fn 26 | 27 | 28 | def get_interface_args(pipeline): 29 | if pipeline == "chat": 30 | inputs = None 31 | outputs = None 32 | 33 | def preprocess(message, history): 34 | messages = [] 35 | for user_msg, assistant_msg in history: 36 | messages.append({"role": "user", "content": [{"type": "text", "text": user_msg}]}) 37 | messages.append({"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]}) 38 | messages.append({"role": "user", "content": [{"type": "text", "text": message}]}) 39 | return {"messages": messages} 40 | 41 | postprocess = lambda x: x # No post-processing needed 42 | else: 43 | # Add other pipeline types when they will be needed 44 | raise ValueError(f"Unsupported pipeline type: {pipeline}") 45 | return inputs, outputs, preprocess, postprocess 46 | 47 | 48 | def get_pipeline(model_name): 49 | # Determine the pipeline type based on the model name 50 | # For simplicity, assuming all models are chat models at the moment 51 | return "chat" 52 | 53 | 54 | def registry(name: str, token: str | None = None, **kwargs): 55 | """ 56 | Create a Gradio Interface for a model on Anthropic. 57 | 58 | Parameters: 59 | - name (str): The name of the Anthropic model. 60 | - token (str, optional): The API key for Anthropic. 61 | """ 62 | api_key = token or os.environ.get("ANTHROPIC_API_KEY") 63 | if not api_key: 64 | raise ValueError("ANTHROPIC_API_KEY environment variable is not set.") 65 | 66 | pipeline = get_pipeline(name) 67 | inputs, outputs, preprocess, postprocess = get_interface_args(pipeline) 68 | fn = get_fn(name, preprocess, postprocess, api_key) 69 | 70 | if pipeline == "chat": 71 | interface = gr.ChatInterface(fn=fn, **kwargs) 72 | else: 73 | # For other pipelines, create a standard Interface (not implemented yet) 74 | interface = gr.Interface(fn=fn, inputs=inputs, outputs=outputs, **kwargs) 75 | 76 | return interface 77 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | import anthropic_gradio 3 | 4 | gr.load( 5 | name='claude-3-opus-20240229', 6 | src=anthropic_gradio.registry, 7 | ).launch() -------------------------------------------------------------------------------- /chatinterface.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gradio-app/anthropic-gradio/f10a4de7a885a9e359c77db693f8a355dda0a61f/chatinterface.png -------------------------------------------------------------------------------- /composition.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | import anthropic_gradio # Changed from openai_gradio to anthropic_gradio 3 | 4 | with gr.Blocks() as demo: 5 | with gr.Tab("Claude-3-Opus"): 6 | gr.load('claude-3-opus-20240229', src=anthropic_gradio.registry) 7 | with gr.Tab("Claude-3-Sonnet"): 8 | gr.load('claude-3-sonnet-20240229', src=anthropic_gradio.registry) 9 | 10 | demo.launch() -------------------------------------------------------------------------------- /custom_app.py: -------------------------------------------------------------------------------- 1 | import gradio as gr 2 | import anthropic_gradio 3 | 4 | gr.load( 5 | name='claude-3-opus-20240229', 6 | src=anthropic_gradio.registry, 7 | title='Anthropic-Gradio Integration', 8 | description="Chat with Claude 3 Opus model.", 9 | examples=["Explain quantum gravity to a 5-year old.", "How many R are there in the word Strawberry?"] 10 | ).launch() -------------------------------------------------------------------------------- /custom_chat_anthropic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gradio-app/anthropic-gradio/f10a4de7a885a9e359c77db693f8a355dda0a61f/custom_chat_anthropic.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "anthropic-gradio" 7 | version = "0.1.0" 8 | description = "A Python package for creating Gradio applications with Anthropic models" 9 | authors = [ 10 | { name = "Your Name", email = "your.email@example.com" } 11 | ] 12 | readme = "README.md" 13 | requires-python = ">=3.10" 14 | classifiers = [ 15 | "Programming Language :: Python :: 3", 16 | "License :: OSI Approved :: MIT License", 17 | "Operating System :: OS Independent", 18 | ] 19 | dependencies = [ 20 | "gradio>=5.0.2", 21 | "anthropic", 22 | ] 23 | 24 | [project.urls] 25 | homepage = "https://github.com/your-username/anthropic-gradio" 26 | repository = "https://github.com/your-username/anthropic-gradio" 27 | 28 | [project.optional-dependencies] 29 | dev = ["pytest"] 30 | 31 | [tool.hatch.build.targets.wheel] 32 | packages = ["anthropic_gradio"] 33 | 34 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | anthropic 2 | gradio==5.0.0b5 --------------------------------------------------------------------------------