├── .gitignore ├── go.mod ├── go.sum ├── prompts.txt ├── chatgpt.go └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/derwiki/go-chatgpt 2 | 3 | go 1.20 4 | 5 | require github.com/sashabaranov/go-openai v1.7.0 // indirect 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/sashabaranov/go-openai v1.7.0 h1:D1dBXoZhtf/aKNu6WFf0c7Ah2NM30PZ/3Mqly6cZ7fk= 2 | github.com/sashabaranov/go-openai v1.7.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= 3 | -------------------------------------------------------------------------------- /prompts.txt: -------------------------------------------------------------------------------- 1 | cat chatgpt.go | PROMPT_PREFIX="Generate a README.md for this Golang CLI tool. Make sure to include a) description b) example c) how to build d) how to use, including different environment variable options and e) how to contribute" ./chatgpt 2 | cat chatgpt.go | PROMPT_PREFIX="Create a usage string to be printed for this program if run with invalid arguments. Make sure you include how to use PROMPT_PREFIX: " ./chatgpt -------------------------------------------------------------------------------- /chatgpt.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "encoding/json" 8 | "fmt" 9 | "io/ioutil" 10 | "log" 11 | "net/http" 12 | "os" 13 | "strconv" 14 | "strings" 15 | 16 | openai "github.com/sashabaranov/go-openai" 17 | ) 18 | 19 | const apiBaseURL = "https://api.openai.com/v1/completions" 20 | 21 | type TextCompletionResponse struct { 22 | Choices []ChatGPTCompletionsResponseChoice `json:"choices"` 23 | } 24 | type ChatGPTCompletionsResponseChoice struct { 25 | FinishReason string `json:"finish_reason"` 26 | Index int `json:"index"` 27 | LogProbs string `json:"logprobs"` 28 | Text string `json:"text"` 29 | } 30 | type ChatGPTCompletionsRequest struct { 31 | Model string `json:"model"` 32 | Prompt string `json:"prompt"` 33 | MaxTokens int `json:"max_tokens"` 34 | } 35 | 36 | type Config struct { 37 | OpenAIApiKey string 38 | MaxTokens int 39 | PromptPrefix string 40 | Model string 41 | } 42 | 43 | func getTextCompletion(prompt string, config Config) string { 44 | textCompletionRequest := ChatGPTCompletionsRequest{ 45 | Model: "text-davinci-003", 46 | Prompt: config.PromptPrefix + prompt, 47 | MaxTokens: config.MaxTokens, 48 | } 49 | requestBodyBytes, err := json.Marshal(textCompletionRequest) 50 | if err != nil { 51 | log.Fatal(err) 52 | } 53 | 54 | client := &http.Client{} 55 | 56 | request, err := http.NewRequest("POST", apiBaseURL, bytes.NewBuffer(requestBodyBytes)) 57 | if err != nil { 58 | log.Fatal(err) 59 | } 60 | 61 | request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", config.OpenAIApiKey)) 62 | request.Header.Set("Content-Type", "application/json") 63 | 64 | response, err := client.Do(request) 65 | if err != nil { 66 | log.Fatal(err) 67 | } 68 | // close the response body at the end of the function 69 | defer response.Body.Close() 70 | 71 | var responseBody TextCompletionResponse 72 | err = json.NewDecoder(response.Body).Decode(&responseBody) 73 | if err != nil { 74 | log.Fatal(err) 75 | } 76 | 77 | if len(responseBody.Choices) == 0 { 78 | log.Fatal("No choices found in the response body.") 79 | } 80 | 81 | return strings.TrimSpace(responseBody.Choices[0].Text) 82 | } 83 | 84 | func getChatCompletions(content string, config Config, model string) string { 85 | if model == "" { 86 | model = openai.GPT3Dot5Turbo 87 | } 88 | // TODO(derwiki) assert model exists in openai package 89 | client := openai.NewClient(config.OpenAIApiKey) 90 | resp, err := client.CreateChatCompletion( 91 | context.Background(), 92 | openai.ChatCompletionRequest{ 93 | Model: openai.GPT3Dot5Turbo, 94 | Messages: []openai.ChatCompletionMessage{ 95 | { 96 | Role: openai.ChatMessageRoleUser, 97 | Content: config.PromptPrefix + content, 98 | }, 99 | }, 100 | }, 101 | ) 102 | 103 | if err != nil { 104 | fmt.Printf("ChatCompletion error: %v\n", err) 105 | return "" 106 | } 107 | 108 | return resp.Choices[0].Message.Content 109 | } 110 | 111 | func hasStdinInput() bool { 112 | info, err := os.Stdin.Stat() 113 | if err != nil { 114 | log.Fatal(err) 115 | } 116 | 117 | return info.Mode()&os.ModeCharDevice == 0 118 | } 119 | 120 | func main() { 121 | config, err := loadConfig() 122 | 123 | var prompt string 124 | if err != nil { 125 | fmt.Println("error: Fatal occurred in loadConfig") 126 | } else if len(os.Args) > 1 { 127 | prompt = os.Args[1] 128 | } else if hasStdinInput() { 129 | scanner := bufio.NewScanner(os.Stdin) 130 | 131 | scanner.Split(bufio.ScanBytes) 132 | var buffer bytes.Buffer 133 | for scanner.Scan() { 134 | buffer.Write(scanner.Bytes()) 135 | } 136 | 137 | prompt = strings.TrimSpace(buffer.String()) 138 | } else { 139 | fmt.Println("error: No prompt found in args or STDIN") 140 | printUsage() 141 | return 142 | } 143 | 144 | // Create channels for the API responses 145 | gpt3TurboCh := make(chan string) 146 | gpt3Davinci003Ch := make(chan string) 147 | gpt3Davinci002Ch := make(chan string) 148 | textDavinci002Ch := make(chan string) 149 | gpt4Ch := make(chan string) 150 | 151 | // if a model is specified, only call that model and exit 152 | if config.Model != "" { 153 | if config.Model == openai.GPT3Dot5Turbo { 154 | fmt.Println(getChatCompletions(prompt, config, openai.GPT3Dot5Turbo)) 155 | } else if config.Model == openai.GPT3TextDavinci003 { 156 | fmt.Println(getChatCompletions(prompt, config, openai.GPT3TextDavinci003)) 157 | } else if config.Model == openai.GPT3TextDavinci002 { 158 | fmt.Println(getChatCompletions(prompt, config, openai.GPT3TextDavinci002)) 159 | } else if config.Model == "text-davinci-002" { 160 | fmt.Println(getTextCompletion(prompt, config)) 161 | } else if config.Model == openai.GPT4 { 162 | fmt.Println(getChatCompletions(prompt, config, openai.GPT4)) 163 | } 164 | return 165 | } 166 | 167 | // Launch goroutines to call the API functions in parallel 168 | go func() { 169 | gpt3TurboCh <- getChatCompletions(prompt, config, openai.GPT3Dot5Turbo) 170 | }() 171 | go func() { 172 | gpt3Davinci003Ch <- getChatCompletions(prompt, config, openai.GPT3TextDavinci003) 173 | }() 174 | go func() { 175 | gpt3Davinci002Ch <- getChatCompletions(prompt, config, openai.GPT3TextDavinci002) 176 | }() 177 | go func() { 178 | textDavinci002Ch <- getTextCompletion(prompt, config) 179 | }() 180 | go func() { 181 | gpt4Ch <- getChatCompletions(prompt, config, openai.GPT4) 182 | }() 183 | 184 | // Wait for the API responses from the channels 185 | gpt3TurboRes := <-gpt3TurboCh 186 | gpt3Davinci003Res := <-gpt3Davinci003Ch 187 | gpt3Davinci002Res := <-gpt3Davinci002Ch 188 | textDavinci002Res := <-textDavinci002Ch 189 | gpt4Res := <-gpt4Ch 190 | 191 | // TODO(derwiki) put this in config 192 | verbose := false 193 | if verbose { 194 | fmt.Println(prompt) 195 | } 196 | // Print the API responses 197 | fmt.Println("\n> Chat Completion (gpt-3.5-turbo):") 198 | fmt.Println(gpt3TurboRes) 199 | fmt.Println("\n> Chat Completion (text-davinci-003):") 200 | fmt.Println(gpt3Davinci003Res) 201 | fmt.Println("\n> Chat Completion (text-davinci-002):") 202 | fmt.Println(gpt3Davinci002Res) 203 | fmt.Println("\n> Text Completion (da-vinci-002):") 204 | fmt.Println(textDavinci002Res) 205 | fmt.Println("\n> Chat Completion (gpt-4):") 206 | fmt.Println(gpt4Res) 207 | 208 | refine := fmt.Sprintf("Which of the following answers is best? \n\n%s\n\n%s\n\n%s\n\n%s", gpt3TurboRes, gpt3Davinci003Res, gpt3Davinci002Res, textDavinci002Res) 209 | refined := getChatCompletions(refine, config, openai.GPT4) 210 | fmt.Println("\n> Which of those answers is best?") 211 | fmt.Println(refined) 212 | } 213 | 214 | func loadConfig() (Config, error) { 215 | config := Config{} 216 | 217 | config.PromptPrefix = os.Getenv("PROMPT_PREFIX") 218 | 219 | apiKey := os.Getenv("OPENAI_API_KEY") 220 | if apiKey == "" { 221 | apiKeyBytes, err := ioutil.ReadFile("./.openai_key") 222 | if err != nil { 223 | return config, err 224 | } 225 | apiKey = strings.TrimSpace(string(apiKeyBytes)) 226 | } 227 | config.OpenAIApiKey = apiKey 228 | 229 | maxTokensStr := os.Getenv("MAX_TOKENS") 230 | if maxTokensStr == "" { 231 | config.MaxTokens = 100 232 | } else { 233 | maxTokens, err := strconv.Atoi(maxTokensStr) 234 | if err != nil { 235 | return config, err 236 | } 237 | config.MaxTokens = maxTokens 238 | } 239 | 240 | config.Model = os.Getenv("GPT_MODEL") 241 | 242 | return config, nil 243 | } 244 | 245 | func printUsage() { 246 | fmt.Println(` 247 | Usage: 248 | ./chatgpt [PROMPT] 249 | echo "PROMPT" | ./chatgpt 250 | cat chatgpt.go | PROMPT_PREFIX="Improve this program" ./chatgpt 251 | 252 | Description: 253 | A Go command-line interface to communicate with OpenAI's ChatGPT API. 254 | This program sends a prompt or question to the ChatGPT API for several models, 255 | prints the generated response for each, and then sends all the responses to 256 | gpt-4 to ask which is best. 257 | 258 | Required Options: 259 | PROMPT The question or prompt to send to the ChatGPT API. 260 | 261 | Environment Variables: 262 | OPENAI_API_KEY Your OpenAI API key. 263 | MAX_TOKENS The maximum number of tokens to generate in the response. (default: 100) 264 | PROMPT_PREFIX A prefix to add to each prompt. 265 | GPT_MODEL The model to use. If not specified, all models will be used. 266 | 267 | Example: 268 | ./chatgpt "What is the capital of Ohio?" 269 | 270 | > Chat Completion (gpt-3.5-turbo): 271 | The capital of Ohio is Columbus. 272 | 273 | > Chat Completion (text-davinci-003): 274 | The capital of Ohio is Columbus. 275 | 276 | > Chat Completion (text-davinci-002): 277 | The capital of Ohio is Columbus. 278 | 279 | > Text Completion (da-vinci-002): 280 | The capital of Ohio is Columbus. 281 | 282 | > Chat Completion (gpt-4): 283 | The capital of Ohio is Columbus. 284 | 285 | > Which of those answers is best? 286 | All of the answers are the same and correct. 287 | `) 288 | } 289 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenAI ChatGPT Command Line Interface 2 | 3 | A Go command-line interface to communicate with OpenAI's ChatGPT API. 4 | This program sends a prompt or question to the ChatGPT API for several models, prints the generated response for each, 5 | and then sends all the responses to chatgpt-3.5-turbo to ask which is best. 6 | 7 | ## Example 8 | 9 | ``` 10 | ➜ go-chatgpt git:(main) ✗ ./chatgpt "What is the meaning of life?" 11 | 12 | > Chat Completion (gpt-3.5-turbo): 13 | As an AI language model, I don't have a personal belief on the meaning of life. 14 | However, the meaning of life is a philosophical question that has baffled humans 15 | for centuries. It is a subjective concept that varies from person to person. Some 16 | may believe that the meaning of life is to pursue happiness and pleasure, while 17 | others may believe it to be a spiritual or religious pursuit. Ultimately, the 18 | meaning of life is personal and can only be determined by the individual. 19 | 20 | > Chat Completion (text-davinci-003): 21 | As an AI language model, I don't have personal beliefs and values, so I cannot 22 | hold my own meaning of life. The meaning of life can vary from person to person, 23 | and it's a philosophical and existential question that has been asked for 24 | centuries by philosophers, scientists, and spiritual leaders. Some believe it is 25 | to pursue happiness and personal fulfillment, others believe it is to fulfill a 26 | God-given purpose, while others see it as simply to exist and experience the 27 | universe around us. 28 | 29 | > Chat Completion (text-davinci-002): 30 | As an AI language model, I am incapable of having personal beliefs or opinions. 31 | However, the meaning of life is a highly debated philosophical and existential 32 | question that has no single, definitive answer. Different people may have 33 | different beliefs, interpretations, and perspectives regarding the purpose, 34 | significance, and value of life. Some may find meaning in spirituality, religion, 35 | or personal goals, while others may focus on human connection, experiences, or 36 | the pursuit of happiness. Ultimately, the meaning of life is a subjective and 37 | complex concept that may vary for each individual. 38 | 39 | > Text Completion (da-vinci-002): 40 | This is an individual question that each person has to answer for themselves. 41 | There is no one-size-fits-all answer to this question. Everyone has their own 42 | perspectives and interpretations, and it is these individual perspectives that 43 | help give our lives meaning. Ultimately, it is up to you to decide what the 44 | meaning of life is for you. 45 | 46 | > Which of those answers is best? 47 | All three answers provide good perspectives on the meaning of life, but the last 48 | one is the best because it emphasizes the importance of individual perspective 49 | and interpretation. It also highlights that there is no one definitive answer to 50 | this question and that it's up to each person to decide for themselves what the 51 | meaning of life is. 52 | ``` 53 | 54 | ## Build 55 | ChatGPT is built in Go, and requires Go 1.16+ to be installed. To build ChatGPT, run the following command: 56 | ``` 57 | go build -o chatgpt 58 | ``` 59 | 60 | ## Usage 61 | The ChatGPT CLI tool can accept a prompt either as a command-line argument or as standard input. 62 | 63 | ### Command-Line Argument 64 | The following command sends a prompt as a command-line argument: 65 | ```shell 66 | ./chatgpt "What is the meaning of life?" 67 | ``` 68 | 69 | ### Standard Input 70 | The following command sends a prompt as standard input: 71 | ```shell 72 | echo "What is the meaning of life?" | ./chatgpt 73 | ``` 74 | 75 | ### Environment Variables 76 | The following environment variables can be used to configure ChatGPT: 77 | 78 | #### `OPENAI_API_KEY` 79 | This variable is used to authenticate your OpenAI API key. If this variable is not set, ChatGPT will look for your key in the `.openai_key` file. 80 | 81 | #### `MAX_TOKENS` 82 | Defines the maximum number of tokens to generate in the response. The default value is `100`. 83 | 84 | #### `GPT_MODEL` 85 | Specify a single model to use for completions.5-turbo model. If not specified, all models will be used concurrently. 86 | 87 | #### `PROMPT_PREFIX` 88 | 89 | Defines a prefix that is prepended to any prompts sent to the API. Mostly useful when data is coming in on STDIN and you 90 | want to add instructions preceding, e.g.: 91 | ``` 92 | ➜ go-chatgpt git:(main) ✗ cat chatgpt.go | PROMPT_PREFIX="Suggest improvements for this Go program: " ./chatgpt 93 | 94 | > Chat Completion (gpt-3.5-turbo): 95 | Here are some potential improvements for this Go program: 96 | 97 | 1. Add error handling: There are several places in this program where errors can occur, such as when making API requests or parsing configuration values. It would be helpful to include more robust error handling to provide more helpful error messages and prevent the program from crashing. 98 | 99 | 2. Use a package manager: Instead of managing dependencies manually, it would be better to use a package manager like Go Modules to manage dependencies automatically. 100 | 101 | 3. Consolidate API response handling: The program currently has two separate functions for handling API requests and responses, which can make it harder to read and maintain. It might make sense to consolidate these functions into a single function that can handle requests and responses for any OpenAI API. 102 | 103 | 4. Create an API client package: Instead of having API functions scattered throughout the codebase, it would be better to create a separate package for OpenAI API clients that can be imported and reused across different programs. 104 | 105 | 5. Use environment variables for model selection: Rather than hardcoding the model names in the code, it would be better to use environment variables to specify which models to use. This would make the code more flexible and easier to configure. 106 | 107 | 6. Improve channel handling: The program currently launches multiple goroutines to call the API functions in parallel and waits for their results with channels. However, this approach can be difficult to manage and scale as the number of API requests grows. It might make more sense to use a worker pool or other concurrency pattern to handle API requests more efficiently. 108 | 109 | > Chat Completion (text-davinci-003): 110 | 1. Remove unused imports, such as "strconv". 111 | 112 | 2. Use constants or variables for the model name in ChatGPTCompletionsRequest instead of hardcoding it. 113 | 114 | 3. Use context.Background() in getTextCompletion to be consistent with getChatCompletions. 115 | 116 | 4. Print the error message instead of calling log.Fatal in the getTextCompletion function. 117 | 118 | 5. Add error handling when loading the environment variables. Instead of returning an empty Config and error, it is better to return a Config with default values and the error message. 119 | 120 | 6. Remove unnecessary spaces in print statements. 121 | 122 | 7. Add comments to explain what each function does. 123 | 124 | 8. Use a switch statement instead of if else if for the model input to be more readable. 125 | 126 | 9. Improve the error message for when no prompt is found. 127 | 128 | 10. Use defer to close the response body in getChatCompletions instead of at the end of the function. 129 | 130 | 11. Use "strconv.Itoa" instead of fmt.Sprintf for the MaxTokens value in ChatGPTCompletionsRequest. 131 | 132 | 12. Rename variables with shorter and more descriptive names. 133 | 134 | 13. Remove the printUsage function since it is not used. 135 | 136 | 14. Add error handling when decoding the json response in getTextCompletion. 137 | 138 | > Chat Completion (text-davinci-002): 139 | 1. Improve error handling: Right now, the program uses `log.Fatal` to handle errors, which terminates the program. It would be more user-friendly to return error messages and let the user decide how to handle them. 140 | 141 | 2. Make constants configurable: The program currently specifies the GPT models and API URL as constants. It would be better to make these configurable by the user, either via command line arguments or environment variables. 142 | 143 | 3. Simplify main function: The main function currently launches goroutines to call the API functions in parallel, which can be complex to understand. It would be simpler to use a loop to call the API functions sequentially and store the results in an array. 144 | 145 | 4. Refactor API request functions: The current implementation of the `getTextCompletion` function requires the caller to pass in a large `Config` struct. It would be simpler to pass in only the necessary parameters. Similarly, the `getChatCompletions` function could be simplified by allowing the caller to pass in the GPT model as a parameter, rather than requiring it to be hard-coded. 146 | 147 | 5. Improve user interface: The program currently prints the raw API responses to the console, which may be difficult for users to read. It would be better to format the responses and provide options for the user to refine or choose the best response. 148 | 149 | 6. Unit tests: The current program lacks unit tests, which can help catch bugs and improve code quality. It would be good to write unit tests for each of the API request functions and any helper functions. 150 | 151 | > Text Completion (da-vinci-002): 152 | Suggestions: 153 | 1. Separate the code into multiple functions, such as, getTextCompletion(), parseResponse(), printSummary() and such, to make the code more structurally organized and easier to read. 154 | 2. Add documentation to the code to explain how each function is used. 155 | 3. Refactor the code to make it more efficient by removing redundant loops and variable declarations. 156 | 4. Handle potential errors in a better way, for example, by printing 157 | 158 | > Which of those answers is best? 159 | error messages to the console instead of terminating the program. 160 | 5. Use a logger instead of printing to the console directly. 161 | 6. Add support for different response formats, such as JSON and XML. 162 | 7. Implement caching to reduce the number of API requests made. 163 | 8. Add more control over the output, such as the number of results returned or sorting options. 164 | 9. Use a configuration file to set program options, such as API credentials and model selection. 165 | 10. Validate user input to prevent errors and improve security. 166 | ``` 167 | 168 | ## Contributing 169 | If you want to contribute to ChatGPT, you can send a pull request with your changes. Before doing so, please make sure all tests pass by running the following command: 170 | ``` 171 | go test 172 | ``` 173 | 174 | ## License 175 | This project is released under the MIT License. --------------------------------------------------------------------------------