├── .github └── workflows │ └── generate.yml ├── README-js.md ├── README-python.md ├── README.md ├── generate.py └── requirements.txt /.github/workflows/generate.yml: -------------------------------------------------------------------------------- 1 | name: Generate README-python.md 2 | 3 | on: 4 | push: 5 | branches: 6 | - main # Replace with your main branch name 7 | paths: 8 | - README.md 9 | 10 | permissions: 11 | contents: write 12 | 13 | jobs: 14 | generate-readme: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | 21 | - name: Set up Python 22 | uses: actions/setup-python@v2 23 | with: 24 | python-version: 3.x # Use the Python version you need 25 | 26 | - name: Install dependencies 27 | run: pip install -r requirements.txt 28 | 29 | - name: Run script to transpile README.md 30 | env: 31 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 32 | run: python generate.py 33 | 34 | - name: Commit and push changes 35 | run: | 36 | git config --global user.name 'GitHub Actions' 37 | git config --global user.email 'actions@github.com' 38 | git add README-*.md 39 | git commit -m "✨ Generate README.md in various languages." 40 | git push 41 | -------------------------------------------------------------------------------- /README-js.md: -------------------------------------------------------------------------------- 1 | # LLM API call examples 2 | 3 | This repository contains a list of working code examples for calling various LLM APIs. 4 | 5 | [README.md](README.md) is the source of truth and contains all examples in `curl` format. 6 | 7 | [README-python.md](README-python.md) contains the same examples in Python, and is generated automatically using GPT-3.5 whenever README.md is updated. 8 | 9 | [README-js.md](README-js.md) contains the same examples in JavaScript, and is generated automatically using GPT-3.5 whenever README.md is updated. 10 | 11 | See also: [List of cloud hosts for inference and fine-tuning](https://github.com/jamesmurdza/awesome-inference-hosts) 12 | 13 | ## Table of Contents 14 | 15 | - [OpenAI](#openai) 16 | - [Anthropic](#anthropic) 17 | - [Cohere](#cohere) 18 | - [Mistral](#mistral) 19 | - [Google](#google) 20 | - [Groq](#groq) 21 | 22 | ## OpenAI 23 | 24 | 🔑 Get API key [here](https://platform.openai.com/account/api-keys). 25 | 26 | 📃 API [docs](https://platform.openai.com/docs/). 27 | 28 | ### Chat 29 | ```javascript 30 | const response = await fetch("https://api.openai.com/v1/chat/completions", { 31 | method: "POST", 32 | headers: { 33 | "Content-Type": "application/json", 34 | "Authorization": `Bearer ${process.env.OPENAI_API_KEY}` 35 | }, 36 | body: JSON.stringify({ 37 | "model": "gpt-3.5-turbo", 38 | "messages": [ 39 | {"role": "system", "content": "You are a helpful assistant."}, 40 | {"role": "user", "content": "Hello!"} 41 | ] 42 | }) 43 | }).then((response) => response.json()); 44 | ``` 45 | 46 | ### Embeddings 47 | ```javascript 48 | const fetch = require("node-fetch"); 49 | const response = await fetch("https://api.openai.com/v1/embeddings", { 50 | method: "POST", 51 | headers: { 52 | "Authorization": `Bearer ${process.env.OPENAI_API_KEY}`, 53 | "Content-Type": "application/json" 54 | }, 55 | body: JSON.stringify({ 56 | "input": "The food was delicious and the wine...", 57 | "model": "text-embedding-ada-002", 58 | "encoding_format": "float" 59 | }) 60 | }).then((response) => response.json()); 61 | ``` 62 | 63 | ## Anthropic 64 | 65 | 🔑 Get API key [here](https://console.anthropic.com/account/keys). 66 | 67 | 📃 API [docs](https://docs.anthropic.com/). 68 | 69 | ### Chat 70 | ```javascript 71 | const fetch = require("node-fetch"); 72 | const response = await fetch("https://api.anthropic.com/v1/complete", { 73 | method: "POST", 74 | headers: { 75 | "accept": "application/json", 76 | "anthropic-version": "2023-06-01", 77 | "content-type": "application/json", 78 | "x-api-key": process.env.ANTHROPIC_API_KEY 79 | }, 80 | body: JSON.stringify({ 81 | "model": "claude-2.1", 82 | "prompt": "\n\nHuman: Hello, world!\n\nAssistant:", 83 | "max_tokens_to_sample": 256 84 | }) 85 | }).then((response) => response.json()); 86 | ``` 87 | 88 | ## Cohere 89 | 90 | 🔑 Get API key [here](https://dashboard.cohere.com/api-keys). 91 | 92 | 📃 API [docs](https://docs.cohere.com/). 93 | 94 | ### Chat 95 | ```javascript 96 | const fetch = require("node-fetch"); 97 | const response = await fetch("https://api.cohere.ai/v1/chat", { 98 | headers: { 99 | "accept": "application/json", 100 | "content-type": "application/json", 101 | "Authorization": `Bearer ${process.env.COHERE_API_KEY}` 102 | }, 103 | method: "POST", 104 | body: JSON.stringify({ 105 | "chat_history": [ 106 | {"role": "USER", "message": "Who discovered gravity?"}, 107 | {"role": "CHATBOT", "message": "The man who is widely credited with discovering gravity is Sir Isaac Newton"} 108 | ], 109 | "message": "What year was he born?", 110 | "connectors": [{"id": "web-search"}] 111 | }) 112 | }).then((response) => response.json()); 113 | ``` 114 | 115 | ### Embeddings 116 | ```javascript 117 | const response = await fetch("https://api.cohere.ai/v1/embed", { 118 | method: "POST", 119 | headers: { 120 | "accept": "application/json", 121 | "content-type": "application/json", 122 | "Authorization": `Bearer ${process.env.COHERE_API_KEY}` 123 | }, 124 | body: JSON.stringify({ 125 | "texts": [ 126 | "hello", 127 | "goodbye" 128 | ], 129 | "truncate": "END" 130 | }) 131 | }).then((response) => response.json()); 132 | ``` 133 | 134 | ## Mistral 135 | 136 | 🔑 Get API key [here](https://console.mistral.ai/users/api-keys/). 137 | 138 | 📃 API [docs](https://docs.mistral.ai/api/). 139 | 140 | ### Chat 141 | ```javascript 142 | const fetch = require("node-fetch"); 143 | const response = await fetch("https://api.mistral.ai/v1/chat/completions", { 144 | method: "POST", 145 | headers: { 146 | "Content-Type": "application/json", 147 | "Accept": "application/json", 148 | "Authorization": `Bearer ${process.env.MISTRAL_API_KEY}` 149 | }, 150 | body: JSON.stringify({ 151 | "model": "mistral-tiny", 152 | "messages": [{"role": "user", "content": "Who is the most renowned French writer?"}] 153 | }) 154 | }).then((response) => response.json()); 155 | ``` 156 | 157 | ### Embeddings 158 | ```javascript 159 | const response = await fetch("https://api.mistral.ai/v1/embeddings", { 160 | method: "POST", 161 | headers: { 162 | "Content-Type": "application/json", 163 | "Accept": "application/json", 164 | "Authorization": `Bearer ${process.env.MISTRAL_API_KEY}` 165 | }, 166 | body: JSON.stringify({ 167 | "model": "mistral-embed", 168 | "input": ["Embed this sentence.", "As well as this one."] 169 | }) 170 | }).then((response) => response.json()); 171 | ``` 172 | 173 | ## Google 174 | 175 | 🔑 Get API key [here](https://makersuite.google.com/app/apikey). 176 | 177 | 📃 API [docs](https://ai.google.dev/api/rest). 178 | 179 | ### Chat 180 | ```javascript 181 | const fetch = require("node-fetch"); 182 | const response = await fetch(`https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=${process.env.GOOGLE_API_KEY}`,{ 183 | method: 'POST', 184 | headers: { 185 | 'Content-Type': 'application/json' 186 | }, 187 | body: JSON.stringify({ 188 | "contents": [ 189 | { 190 | "parts": [ 191 | { 192 | "text": "Write a story about a magic backpack." 193 | } 194 | ] 195 | } 196 | ] 197 | }) 198 | }).then((response) => response.json()); 199 | ``` 200 | 201 | ### Embeddings 202 | ```javascript 203 | const fetch = require("node-fetch"); 204 | const response = await fetch("https://generativelanguage.googleapis.com/v1beta/models/embedding-001:generateContent?key=" + process.env.GOOGLE_API_KEY, { 205 | method: 'POST', 206 | headers: { 207 | 'Content-Type': 'application/json' 208 | }, 209 | body: JSON.stringify({ 210 | "contents": [ 211 | { 212 | "parts": [ 213 | { 214 | "text": "This is a sentence." 215 | } 216 | ] 217 | } 218 | ] 219 | }) 220 | }).then((response) => response.json()); 221 | ``` 222 | 223 | ## Groq 224 | 225 | 🔑 Get API key [here](https://console.groq.com/keys). 226 | 227 | 📃 API [docs](https://console.groq.com/docs/). 228 | 229 | ### Chat 230 | ```javascript 231 | const response = await fetch("https://api.groq.com/openai/v1/chat/completions", { 232 | method: "POST", 233 | headers: { 234 | "Content-Type": "application/json", 235 | "Authorization": `Bearer ${process.env.GROQ_API_KEY}` 236 | }, 237 | body: JSON.stringify({ 238 | "model": "mixtral-8x7b-32768", 239 | "messages": [ 240 | {"role": "system", "content": "You are a helpful assistant."}, 241 | {"role": "user", "content": "Hello!"} 242 | ] 243 | }) 244 | }).then((response) => response.json()); 245 | ``` 246 | -------------------------------------------------------------------------------- /README-python.md: -------------------------------------------------------------------------------- 1 | # LLM API call examples 2 | 3 | This repository contains a list of working code examples for calling various LLM APIs. 4 | 5 | [README.md](README.md) is the source of truth and contains all examples in `curl` format. 6 | 7 | [README-python.md](README-python.md) contains the same examples in Python, and is generated automatically using GPT-3.5 whenever README.md is updated. 8 | 9 | [README-js.md](README-js.md) contains the same examples in JavaScript, and is generated automatically using GPT-3.5 whenever README.md is updated. 10 | 11 | See also: [List of cloud hosts for inference and fine-tuning](https://github.com/jamesmurdza/awesome-inference-hosts) 12 | 13 | ## Table of Contents 14 | 15 | - [OpenAI](#openai) 16 | - [Anthropic](#anthropic) 17 | - [Cohere](#cohere) 18 | - [Mistral](#mistral) 19 | - [Google](#google) 20 | - [Groq](#groq) 21 | 22 | ## OpenAI 23 | 24 | 🔑 Get API key [here](https://platform.openai.com/account/api-keys). 25 | 26 | 📃 API [docs](https://platform.openai.com/docs/). 27 | 28 | ### Chat 29 | ```python 30 | import requests 31 | import os 32 | 33 | response = requests.post( 34 | "https://api.openai.com/v1/chat/completions", 35 | headers={ 36 | "Content-Type": "application/json", 37 | "Authorization": f"Bearer {os.environ['OPENAI_API_KEY']}" 38 | }, 39 | json={ 40 | "model": "gpt-3.5-turbo", 41 | "messages": [ 42 | {"role": "system", "content": "You are a helpful assistant."}, 43 | {"role": "user", "content": "Hello!"} 44 | ] 45 | } 46 | ) 47 | ``` 48 | 49 | ### Embeddings 50 | ```python 51 | import requests 52 | import os 53 | 54 | response = requests.post( 55 | "https://api.openai.com/v1/embeddings", 56 | headers={ 57 | "Authorization": "Bearer " + os.environ.get("OPENAI_API_KEY"), 58 | "Content-Type": "application/json" 59 | }, 60 | json={ 61 | "input": "The food was delicious and the wine...", 62 | "model": "text-embedding-ada-002", 63 | "encoding_format": "float" 64 | } 65 | ) 66 | ``` 67 | 68 | ## Anthropic 69 | 70 | 🔑 Get API key [here](https://console.anthropic.com/account/keys). 71 | 72 | 📃 API [docs](https://docs.anthropic.com/). 73 | 74 | ### Chat 75 | ```python 76 | import requests 77 | import os 78 | 79 | response = requests.post( 80 | "https://api.anthropic.com/v1/complete", 81 | headers={ 82 | "accept": "application/json", 83 | "anthropic-version": "2023-06-01", 84 | "content-type": "application/json", 85 | "x-api-key": os.environ.get("ANTHROPIC_API_KEY") 86 | }, 87 | json={ 88 | "model": "claude-2.1", 89 | "prompt": "\n\nHuman: Hello, world!\n\nAssistant:", 90 | "max_tokens_to_sample": 256 91 | } 92 | ) 93 | ``` 94 | 95 | ## Cohere 96 | 97 | 🔑 Get API key [here](https://dashboard.cohere.com/api-keys). 98 | 99 | 📃 API [docs](https://docs.cohere.com/). 100 | 101 | ### Chat 102 | ```python 103 | import requests 104 | import os 105 | 106 | response = requests.post( 107 | "https://api.cohere.ai/v1/chat", 108 | headers={ 109 | "accept": "application/json", 110 | "content-type": "application/json", 111 | "Authorization": f"Bearer {os.environ['COHERE_API_KEY']}" 112 | }, 113 | json={ 114 | "chat_history": [ 115 | {"role": "USER", "message": "Who discovered gravity?"}, 116 | {"role": "CHATBOT", "message": "The man who is widely credited with discovering gravity is Sir Isaac Newton"} 117 | ], 118 | "message": "What year was he born?", 119 | "connectors": [{"id": "web-search"}] 120 | } 121 | ) 122 | ``` 123 | 124 | ### Embeddings 125 | ```python 126 | import requests 127 | import os 128 | 129 | response = requests.post( 130 | "https://api.cohere.ai/v1/embed", 131 | headers={ 132 | "accept": "application/json", 133 | "content-type": "application/json", 134 | "Authorization": f"Bearer {os.environ['COHERE_API_KEY']}" 135 | }, 136 | json={ 137 | "texts": [ 138 | "hello", 139 | "goodbye" 140 | ], 141 | "truncate": "END" 142 | } 143 | ) 144 | ``` 145 | 146 | ## Mistral 147 | 148 | 🔑 Get API key [here](https://console.mistral.ai/users/api-keys/). 149 | 150 | 📃 API [docs](https://docs.mistral.ai/api/). 151 | 152 | ### Chat 153 | ```python 154 | import requests 155 | import os 156 | 157 | response = requests.post( 158 | "https://api.mistral.ai/v1/chat/completions", 159 | headers={ 160 | "Content-Type": "application/json", 161 | "Accept": "application/json", 162 | "Authorization": f"Bearer {os.environ.get('MISTRAL_API_KEY')}" 163 | }, 164 | json={ 165 | "model": "mistral-tiny", 166 | "messages": [{"role": "user", "content": "Who is the most renowned French writer?"}] 167 | } 168 | ) 169 | ``` 170 | 171 | ### Embeddings 172 | ```python 173 | import requests 174 | import os 175 | 176 | response = requests.post( 177 | "https://api.mistral.ai/v1/embeddings", 178 | headers={ 179 | "Content-Type": "application/json", 180 | "Accept": "application/json", 181 | "Authorization": f"Bearer {os.environ['MISTRAL_API_KEY']}" 182 | }, 183 | json={ 184 | "model": "mistral-embed", 185 | "input": ["Embed this sentence.", "As well as this one."] 186 | } 187 | ) 188 | ``` 189 | 190 | ## Google 191 | 192 | 🔑 Get API key [here](https://makersuite.google.com/app/apikey). 193 | 194 | 📃 API [docs](https://ai.google.dev/api/rest). 195 | 196 | ### Chat 197 | ```python 198 | import requests 199 | import os 200 | 201 | response = requests.post( 202 | "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=" + os.environ["GOOGLE_API_KEY"], 203 | headers={ 204 | "Content-Type": "application/json" 205 | }, 206 | json={ 207 | "contents": [ 208 | { 209 | "parts": [ 210 | { 211 | "text": "Write a story about a magic backpack." 212 | } 213 | ] 214 | } 215 | ] 216 | } 217 | ) 218 | ``` 219 | 220 | ### Embeddings 221 | ```python 222 | import requests 223 | import os 224 | 225 | response = requests.post( 226 | "https://generativelanguage.googleapis.com/v1beta/models/embedding-001:generateContent?key=" + os.environ.get("GOOGLE_API_KEY"), 227 | headers={ 228 | "Content-Type": "application/json" 229 | }, 230 | json={ 231 | "contents": [ 232 | { 233 | "parts": [ 234 | { 235 | "text": "This is a sentence." 236 | } 237 | ] 238 | } 239 | ] 240 | } 241 | ) 242 | ``` 243 | 244 | ## Groq 245 | 246 | 🔑 Get API key [here](https://console.groq.com/keys). 247 | 248 | 📃 API [docs](https://console.groq.com/docs/). 249 | 250 | ### Chat 251 | ```python 252 | import requests 253 | import os 254 | 255 | response = requests.post( 256 | "https://api.groq.com/openai/v1/chat/completions", 257 | headers={ 258 | "Content-Type": "application/json", 259 | "Authorization": f"Bearer {os.environ['GROQ_API_KEY']}" 260 | }, 261 | json={ 262 | "model": "mixtral-8x7b-32768", 263 | "messages": [ 264 | {"role": "system", "content": "You are a helpful assistant."}, 265 | {"role": "user", "content": "Hello!"} 266 | ] 267 | } 268 | ) 269 | ``` 270 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LLM API call examples 2 | 3 | This repository contains a list of working code examples for calling various LLM APIs. 4 | 5 | [README.md](README.md) is the source of truth and contains all examples in `curl` format. 6 | 7 | [README-python.md](README-python.md) contains the same examples in Python, and is generated automatically using GPT-3.5 whenever README.md is updated. 8 | 9 | [README-js.md](README-js.md) contains the same examples in JavaScript, and is generated automatically using GPT-3.5 whenever README.md is updated. 10 | 11 | See also: [List of cloud hosts for inference and fine-tuning](https://github.com/jamesmurdza/awesome-inference-hosts) 12 | 13 | ## Table of Contents 14 | 15 | - [OpenAI](#openai) 16 | - [Anthropic](#anthropic) 17 | - [Cohere](#cohere) 18 | - [Mistral](#mistral) 19 | - [Google](#google) 20 | - [Groq](#groq) 21 | 22 | ## OpenAI 23 | 24 | 🔑 Get API key [here](https://platform.openai.com/account/api-keys). 25 | 26 | 📃 API [docs](https://platform.openai.com/docs/). 27 | 28 | ### Chat 29 | ```bash 30 | curl "https://api.openai.com/v1/chat/completions" \ 31 | -H "Content-Type: application/json" \ 32 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 33 | -d '{ 34 | "model": "gpt-3.5-turbo", 35 | "messages": [ 36 | { 37 | "role": "system", 38 | "content": "You are a helpful assistant." 39 | }, 40 | { 41 | "role": "user", 42 | "content": "Hello!" 43 | } 44 | ] 45 | }' 46 | ``` 47 | 48 | ### Embeddings 49 | ```bash 50 | curl "https://api.openai.com/v1/embeddings" \ 51 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 52 | -H "Content-Type: application/json" \ 53 | -d '{ 54 | "input": "The food was delicious and the wine...", 55 | "model": "text-embedding-ada-002", 56 | "encoding_format": "float" 57 | }' 58 | ``` 59 | 60 | ## Anthropic 61 | 62 | 🔑 Get API key [here](https://console.anthropic.com/account/keys). 63 | 64 | 📃 API [docs](https://docs.anthropic.com/). 65 | 66 | ### Chat 67 | ```bash 68 | curl "https://api.anthropic.com/v1/complete" \ 69 | -H 'accept: application/json' \ 70 | -H 'anthropic-version: 2023-06-01' \ 71 | -H 'content-type: application/json' \ 72 | -H "x-api-key: $ANTHROPIC_API_KEY" \ 73 | -d '{ 74 | "model": "claude-2.1", 75 | "prompt": "\n\nHuman: Hello, world!\n\nAssistant:", 76 | "max_tokens_to_sample": 256 77 | }' 78 | ``` 79 | 80 | ## Cohere 81 | 82 | 🔑 Get API key [here](https://dashboard.cohere.com/api-keys). 83 | 84 | 📃 API [docs](https://docs.cohere.com/). 85 | 86 | ### Chat 87 | ```bash 88 | curl "https://api.cohere.ai/v1/chat" \ 89 | -H 'accept: application/json' \ 90 | -H 'content-type: application/json' \ 91 | -H "Authorization: Bearer $COHERE_API_KEY" \ 92 | -d '{ 93 | "chat_history": [ 94 | {"role": "USER", "message": "Who discovered gravity?"}, 95 | {"role": "CHATBOT", "message": "The man who is widely credited with discovering gravity is Sir Isaac Newton"} 96 | ], 97 | "message": "What year was he born?", 98 | "connectors": [{"id": "web-search"}] 99 | }' 100 | ``` 101 | 102 | ### Embeddings 103 | ```bash 104 | curl "https://api.cohere.ai/v1/embed" \ 105 | -H 'accept: application/json' \ 106 | -H 'content-type: application/json' \ 107 | -H "Authorization: Bearer $COHERE_API_KEY" \ 108 | -d '{ 109 | "texts": [ 110 | "hello", 111 | "goodbye" 112 | ], 113 | "truncate": "END" 114 | }' 115 | ``` 116 | 117 | ## Mistral 118 | 119 | 🔑 Get API key [here](https://console.mistral.ai/users/api-keys/). 120 | 121 | 📃 API [docs](https://docs.mistral.ai/api/). 122 | 123 | ### Chat 124 | ```bash 125 | curl "https://api.mistral.ai/v1/chat/completions" \ 126 | -H 'Content-Type: application/json' \ 127 | -H 'Accept: application/json' \ 128 | -H "Authorization: Bearer $MISTRAL_API_KEY" \ 129 | -d '{ 130 | "model": "mistral-tiny", 131 | "messages": [{"role": "user", "content": "Who is the most renowned French writer?"}] 132 | }' 133 | ``` 134 | 135 | ### Embeddings 136 | ```bash 137 | curl "https://api.mistral.ai/v1/embeddings" \ 138 | -H 'Content-Type: application/json' \ 139 | -H 'Accept: application/json' \ 140 | -H "Authorization: Bearer $MISTRAL_API_KEY" \ 141 | -d '{ 142 | "model": "mistral-embed", 143 | "input": ["Embed this sentence.", "As well as this one."] 144 | }' 145 | ``` 146 | 147 | ## Google 148 | 149 | 🔑 Get API key [here](https://makersuite.google.com/app/apikey). 150 | 151 | 📃 API [docs](https://ai.google.dev/api/rest). 152 | 153 | ### Chat 154 | ```bash 155 | curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \ 156 | -H 'Content-Type: application/json' \ 157 | -X POST \ 158 | -d '{ 159 | "contents": [ 160 | { 161 | "parts": [ 162 | { 163 | "text": "Write a story about a magic backpack." 164 | } 165 | ] 166 | } 167 | ] 168 | }' 169 | ``` 170 | 171 | ### Embeddings 172 | ```bash 173 | curl "https://generativelanguage.googleapis.com/v1beta/models/embedding-001:generateContent?key=$GOOGLE_API_KEY" \ 174 | -H 'Content-Type: application/json' \ 175 | -X POST \ 176 | -d '{ 177 | "contents": [ 178 | { 179 | "parts": [ 180 | { 181 | "text": "This is a sentence." 182 | } 183 | ] 184 | } 185 | ] 186 | }' 187 | ``` 188 | 189 | ## Groq 190 | 191 | 🔑 Get API key [here](https://console.groq.com/keys). 192 | 193 | 📃 API [docs](https://console.groq.com/docs/). 194 | 195 | ### Chat 196 | ```bash 197 | curl "https://api.groq.com/openai/v1/chat/completions" \ 198 | -H "Content-Type: application/json" \ 199 | -H "Authorization: Bearer $GROQ_API_KEY" \ 200 | -d '{ 201 | "model": "mixtral-8x7b-32768", 202 | "messages": [ 203 | { 204 | "role": "system", 205 | "content": "You are a helpful assistant." 206 | }, 207 | { 208 | "role": "user", 209 | "content": "Hello!" 210 | } 211 | ] 212 | }' 213 | ``` 214 | -------------------------------------------------------------------------------- /generate.py: -------------------------------------------------------------------------------- 1 | import re 2 | from codechain.generation import ModifyCodeChain 3 | from langchain.chat_models import ChatOpenAI 4 | 5 | bash_to_python_prompt = """\ 6 | convert this block of code as concisely as possible to 7 | ```python 8 | import requests 9 | import os 10 | 11 | response = requests.post( 12 | "https://...", 13 | headers={{ 14 | ... 15 | }}, 16 | ... 17 | ) 18 | ``` 19 | use os.environ to get API keys 20 | don't use intermediate variables 21 | always use double quotes 22 | """ 23 | 24 | bash_to_javascript_prompt = """\ 25 | convert this block of code as concisely as possible to 26 | ```javascript 27 | const fetch = require("node-fetch"); 28 | const response = await fetch("https://...",{{ 29 | ... 30 | body: ... 31 | }}).then((response) => response.json()); 32 | ``` 33 | use process.env to get API keys 34 | don't use intermediate variables 35 | always use double quotes 36 | """ 37 | 38 | prompts = { 39 | "python": bash_to_python_prompt, 40 | "javascript": bash_to_javascript_prompt, 41 | } 42 | 43 | def transpile_readme(readme_content, to_language): 44 | # Function to replace bash code blocks with python code blocks 45 | def transpile_block(match): 46 | bash_code = match.group(1) 47 | generator = ModifyCodeChain.from_instruction( 48 | prompts[to_language], ChatOpenAI(model="gpt-3.5-turbo", temperature=0.2) 49 | ) 50 | return f"```{to_language}\n{generator.run(bash_code)}\n```" 51 | 52 | # Use re.sub() to find and replace bash code blocks with python code blocks 53 | return re.sub( 54 | r"```bash\n(.*?)```", transpile_block, readme_content, flags=re.DOTALL 55 | ) 56 | 57 | # Read the content of README.md 58 | with open("README.md", "r") as readme_file: 59 | readme_content = readme_file.read() 60 | 61 | # Write the transpiled README-python.md and README-javascript.md 62 | with open("README-python.md", "w") as readme_file: 63 | readme_file.write(transpile_readme(readme_content, "python")) 64 | print("Wrote README-python.md") 65 | with open("README-js.md", "w") as readme_file: 66 | readme_file.write(transpile_readme(readme_content, "javascript")) 67 | print("Wrote README-js.md") -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | regex 2 | openai 3 | langchain 4 | codechain --------------------------------------------------------------------------------