├── .gitignore
├── banner.png
├── ai_readme
├── ai_commit
├── ai_chat
├── LICENSE
├── ai_pr
├── ai_describe
├── ai_review
├── src
└── ai_chat.py
├── README.md
└── openai
/.gitignore:
--------------------------------------------------------------------------------
1 | ai_changelog
2 |
3 | .DS_Store
4 | .env
--------------------------------------------------------------------------------
/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ikramhasan/AI-Dev-Scripts/HEAD/banner.png
--------------------------------------------------------------------------------
/ai_readme:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # AI Readme Generation
4 | ai_readme () {
5 | local dir=$(pwd)
6 | echo "Generating readme for directory: $dir"
7 | readmeai --repository $dir --api ollama --model mistral:7b-instruct --emojis --badge-style flat-square
8 | }
9 |
10 | ai_readme
11 |
--------------------------------------------------------------------------------
/ai_commit:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # AI Commit Message
4 | ai_commit() {
5 | diff=$(git diff --cached)
6 | ollama run deepseek-coder:6.7b-instruct "Diff: $diff. Suggest a git commit message for the diff provided above. The commit message should be written in active voice and should follow conventional commit style, and the format should be [scope]: . Example: fix(authentication): add password regex pattern."
7 | }
8 |
9 | ai_commit
--------------------------------------------------------------------------------
/ai_chat:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
4 |
5 | # Parse command-line arguments
6 | while [[ $# -gt 0 ]]; do
7 | key="$1"
8 |
9 | case $key in
10 | -t|--type)
11 | type="$2"
12 | shift # past argument
13 | shift # past value
14 | ;;
15 | -f|--filename)
16 | filename="$2"
17 | shift # past argument
18 | shift # past value
19 | ;;
20 | -q|--question)
21 | question="$2"
22 | shift # past argument
23 | shift # past value
24 | ;;
25 | *) # unknown option
26 | echo "Unknown option: $1"
27 | exit 1
28 | ;;
29 | esac
30 | done
31 |
32 | # Call the Python script with the provided arguments
33 | python3 "$SCRIPT_DIR/src/ai_chat.py" -t "$type" -f "$filename" -q "$question"
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Ikramul Hasan
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/ai_pr:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # AI PR Review
4 | ai_pr () {
5 | command="ollama run deepseek-coder:6.7b-instruct"
6 | while getopts ":m:" opt; do
7 | case ${opt} in
8 | m)
9 | if [ "$OPTARG" = "openai" ]; then
10 | echo "Using OpenAI's GPT-3 model for code review."
11 | command="openai"
12 | elif [ "$OPTARG" = "deepseek" ]; then
13 | echo "Using DeepSeek Coder's 6.7b model for code review."
14 | command="ollama run deepseek-coder:6.7b-instruct"
15 | elif [ "$OPTARG" = "mistral" ]; then
16 | echo "Using Mistral's 7b model for code review."
17 | command="ollama run mistral:7b-instruct"
18 | fi
19 | ;;
20 | \?)
21 | echo "Invalid option: $OPTARG" 1>&2
22 | ;;
23 | :)
24 | echo "Invalid option: $OPTARG requires an argument" 1>&2
25 | ;;
26 | esac
27 | done
28 | shift $((OPTIND -1))
29 |
30 | echo "Reviewing PR $1"
31 | diff=$(gh pr diff "$1")
32 | $command "PR diff: $diff. Suppose you wrote the code in the git PR diff above. Provide a brief summary of this PR, ignore any files that are not critical to the code, i.e: package-json.lock, and state the changes you made to existing code in markdown format, and in first person."
33 | }
34 |
35 | ai_pr "$@"
--------------------------------------------------------------------------------
/ai_describe:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # List all the images in the current directory
4 | echo "Images in the current directory:"
5 | images=()
6 | while IFS= read -r -d '' file; do
7 | images+=("$file")
8 | done < <(find . -maxdepth 1 -type f \( -iname "*.jpg" -o -iname "*.jpeg" -o -iname "*.png" -o -iname "*.webp" -o -iname "*.gif" \) -print0)
9 |
10 | num_images=${#images[@]}
11 |
12 | if [ $num_images -eq 0 ]; then
13 | echo "No images found."
14 | exit 1
15 | fi
16 |
17 | for ((i=0; i> "$review_file"
26 | echo "$ollama_suggestions" >> "$review_file"
27 | echo "" >> "$review_file"
28 | done
29 | }
30 |
31 | # Review files for each file format
32 | for format in "${file_formats[@]}"; do
33 | review_files "$format"
34 | done
35 |
36 | echo "Review completed. Suggestions saved in $review_file"
--------------------------------------------------------------------------------
/src/ai_chat.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import io
3 | import sys
4 | import time
5 | from langchain_community.llms import Ollama
6 | from langchain_community.embeddings import GPT4AllEmbeddings
7 | from langchain_community.vectorstores import Chroma
8 | from langchain_core.output_parsers import StrOutputParser
9 | from langchain_core.runnables import RunnablePassthrough
10 | from langchain_text_splitters import RecursiveCharacterTextSplitter
11 | from langchain.prompts import PromptTemplate
12 | from langchain_core.prompts import ChatPromptTemplate
13 | from langchain.prompts import HumanMessagePromptTemplate
14 | from langchain_community.document_loaders import PyPDFLoader
15 | from langchain_community.document_loaders import WebBaseLoader
16 | from langchain_community.document_loaders import UnstructuredMarkdownLoader
17 | import threading
18 |
19 |
20 | def show_loading():
21 | chars = "/—\\|"
22 | i = 0
23 | while not done:
24 | sys.stdout.write("\r" + "Loading... " + chars[i % len(chars)])
25 | sys.stdout.flush()
26 | i += 1
27 | time.sleep(0.1)
28 |
29 |
30 | # Start the loading animation in a separate thread
31 | done = False
32 | loading_thread = threading.Thread(target=show_loading)
33 | loading_thread.start()
34 | # Create ArgumentParser object
35 | parser = argparse.ArgumentParser(description="You own command line RAG.")
36 |
37 | # Add arguments
38 | parser.add_argument(
39 | "-t", "--type", type=str, help="File/Content type. One of `md`, `pdf`, or `web`."
40 | )
41 | parser.add_argument("-f", "--filename", type=str, help="File name / Web url")
42 | parser.add_argument("-q", "--question", type=str, help="The question to ask the model.")
43 |
44 | # Parse the arguments
45 | args = parser.parse_args()
46 |
47 | # Access the arguments
48 | filename = args.filename
49 | question = args.question
50 | content_type = args.type
51 |
52 | if filename is None:
53 | filename = input("Enter filename: ")
54 |
55 | if question is None:
56 | question = input("Enter question: ")
57 |
58 | if content_type is None:
59 | content_type = input("Enter content type: ")
60 |
61 | llm = Ollama(model="mistral:7b-instruct", temperature=0.8)
62 |
63 | if content_type == "md":
64 | loader = UnstructuredMarkdownLoader(filename)
65 | docs = loader.load()
66 | elif content_type == "pdf":
67 | loader = PyPDFLoader(filename)
68 | docs = loader.load_and_split()
69 | elif content_type == "web":
70 | loader = WebBaseLoader(filename)
71 | docs = loader.load()
72 |
73 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
74 | splits = text_splitter.split_documents(docs)
75 |
76 | embedding = GPT4AllEmbeddings()
77 | vectorstore = Chroma.from_documents(documents=splits, embedding=embedding)
78 |
79 | # Retrieve and generate using the relevant snippets of the blog.
80 | retriever = vectorstore.as_retriever()
81 | prompt = ChatPromptTemplate(
82 | input_variables=["context", "question"],
83 | messages=[
84 | HumanMessagePromptTemplate(
85 | prompt=PromptTemplate(
86 | input_variables=["context", "question"],
87 | template="You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Keep the answer concise.\nQuestion: {question} \nContext: {context} \nAnswer:",
88 | )
89 | )
90 | ],
91 | )
92 |
93 |
94 | def format_docs(docs):
95 | formatted_doc = "\n\n".join(doc.page_content for doc in docs)
96 | return formatted_doc
97 |
98 |
99 | rag_chain = (
100 | {"context": retriever | format_docs, "question": RunnablePassthrough()}
101 | | prompt
102 | | llm
103 | | StrOutputParser()
104 | )
105 | done = True
106 | loading_thread.join()
107 | answer = rag_chain.invoke(question)
108 | print("\r")
109 | print("=================== Answer ===================")
110 | print(answer.strip())
111 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
AI DEV SCRIPTS
6 |
7 |
8 | Streamline Code Review, Commit & Speed Up Dev Process. Your Own Personal Senior Engineer For Free!
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 | Table of Contents
17 |
18 | - [📍 Overview](#-overview)
19 | - [🧩 Features](#-features)
20 | - [📦 Scripts](#-scripts)
21 | - [🚀 Getting Started](#-getting-started)
22 | - [⚙️ Installation](#️-installation)
23 | - [🤖 Usage](#-usage)
24 | - [🤝 Contributing](#-contributing)
25 | - [🎗 License](#-license)
26 | - [🔗 Acknowledgments](#-acknowledgments)
27 | - [⭐ Star History](#-star-history)
28 |
29 |
30 |
31 | ## 📍 Overview
32 |
33 | AI DEV SCRIPTS, leverages Local LLMs to streamline code improvement workflows and enhance your coding. It includes scripts like `ai_review` for suggestion generation, `ai_pr` for pull request analysis, and `ai_commit` for suggested commit messages, `ai_chat` for full RAG compatible chat feature. Additionally, it features an `ai_readme` script that generates customized readmes based on directory locations. Overall, it utilizes Ollama's DeepSeek Coder, and Mistral model to automate code improvements, security checks, and documentation within the repository ecosystem.
34 |
35 | ---
36 |
37 | ## 🧩 Features
38 |
39 | - **AI Review**: Scours through specified file formats and requests AI-generated suggestions for improvement.
40 | - **AI PR**: Analyzes GitHub Pull Requests by calling an external Ollama DeepSeek Coder service.
41 | - **AI Commit**: Generates commit messages using an AI model, adhering to conventional commit style and active voice guidelines.
42 | - **AI Readme**: Generates customized readmes based on the repository's location, utilizing OpenAI's Ollama API and the Mistral model.
43 | - **AI Chat**: Chat with websites, pdf, and markdown files, a RAG in your own terminal.
44 | - **AI Describe**: Describe an image.
45 |
46 | ---
47 |
48 | ## 📦 Scripts
49 |
50 | | File | Summary |
51 | | -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
52 | | [ai_review](ai_review) | This `ai_review` file initiates the script that scours through specified file formats and requests AI-generated suggestions for improvement. It generates a markdown file containing improvements, best practices, readability enhancements, maintainability tips, and potential code examples, creating an impactful code improvement workflow within this repository's ecosystem. |
53 | | [ai_pr](ai_pr) | The `ai_pr` script analyzes GitHub Pull Requests by calling an external Ollama DeepSeek Coder service. It generates brief summaries and flags potential security or coding best practices issues from the presented git changes. This tool supports automated PR review processes in the given repository infrastructure. |
54 | | [ai_commit](ai_commit) | The `ai_commit` script in this repository's scripts folder is designed to generate commit messages using an AI model. This tool runs the deepseek-coder model from Ollama to suggest a commit message based on the git diff provided as input, adhering to conventional commit style and active voice guidelines. |
55 | | [ai_readme](ai_readme) | Generate readme files for directories using the AI, named `ai_readme` script. The script triggers an AI to produce customized readmes based on the repository's location, utilizing OpenAI's Ollama API and the Mistral model. Emojis and flat-square badge styles are incorporated in the readme generation process. |
56 | | [ai_chat](ai_chat) | Chat with web pages, PDFs, or markdown files of any size. Complete rag functionality. |
57 | | [ai_describe](ai_describe) | Describe an image (uses the `llava` model). |
58 | | [openai](openai) | General purpose openai script. It's a dependency for several other scripts here. |
59 |
60 | ---
61 |
62 | ## 🚀 Getting Started
63 |
64 | **Requirements:**
65 |
66 | - Bash
67 | - readmeai
68 | - ollama
69 | - deepseek-coder
70 | - mistral
71 |
72 | ### ⚙️ Installation
73 |
74 | **1. Clone the repository:**
75 |
76 | ```sh
77 | git clone https://github.com/ikramhasan/AI-Dev-Scripts.git
78 | ```
79 |
80 | **2. Install the required dependencies:**
81 |
82 | ```sh
83 | pip install readmeai
84 | ```
85 |
86 | **3. Install ollama:** Download the latest release from [here](https://ollama.com/download)
87 |
88 | **4. Install deepseek-coder:**
89 |
90 | ```sh
91 | ollama run deepseek-coder:6.7b-instruct
92 | ```
93 |
94 | **5. Install mistral:**
95 |
96 | ```sh
97 | ollama run mistral:7b-instruct
98 | ```
99 |
100 | **6. Make the scripts executable:**
101 |
102 | ```sh
103 | chmod +x ./ai_review
104 | ```
105 |
106 | **7. (Optional) Add the scripts to your PATH:**
107 |
108 | ```sh
109 | export PATH=$PATH:/path/to/AI-Dev-Scripts
110 | ```
111 |
112 | ### 🤖 Usage
113 |
114 | ai_review
115 |
116 | > Navigate to the directory where you want to run the script and execute the command below:
117 | >
118 | > ```console
119 | > $ ./ai_review file.py file.js # for specific files
120 | > ```
121 | >
122 | > Or,
123 | >
124 | > ```console
125 | > $ ./ai_review *.py *.js # for all files with .py and .js extensions
126 | > ```
127 |
128 | ai_readme
129 |
130 | > Navigate to the directory where you want to generate the readme and run the command below:
131 | >
132 | > ```console
133 | > $ ./ai_readme
134 | > ```
135 |
136 | ai_commit
137 |
138 | > Navigate to your repo and run ai_commit using the command below:
139 | >
140 | > ```console
141 | > $ ./ai_commit
142 | > ```
143 |
144 | ai_pr
145 |
146 | > Copy the pr link and run the command below:
147 | >
148 | > ```console
149 | > $ ./ai_pr
150 | > ```
151 |
152 | ai_chat
153 |
154 | > Navigate to the directory where you want to run the script and run the command below:
155 | >
156 | > ```console
157 | > $ ./ai_chat -t md -f blog.md -q "What is this blog about?"
158 | > ```
159 | >
160 | > Or,
161 | >
162 | > ```console
163 | > $ ./ai_chat -t pdf -f blog.pdf -q "What is this blog about?"
164 | > ```
165 | >
166 | > Or,
167 | >
168 | > ```console
169 | > $ ./ai_chat -t web -f https://www.example.com -q "What is this blog about?"
170 | > ```
171 |
172 | ai_describe
173 |
174 | > Navigate to the directory of your image and run the command below:
175 | >
176 | > ```console
177 | > $ ./ai_describe
178 | > ```
179 | >
180 | > Then follow the on-screen instructions.
181 |
182 | openai
183 |
184 | > The script requires `jq` to be installed. Run the following command to install it.
185 | >
186 | > ```console
187 | > brew install jq
188 | > ```
189 | >
190 | > After jq is installed, add the OPENAI_API_KEY variable to your path by running this command:
191 | >
192 | > ```console
193 | > export OPENAI_API_KEY=your_openai_api_key
194 | > ```
195 | >
196 | > Then run the script using the command below:
197 | >
198 | > ```console
199 | > $ ./openai
200 | > ```
201 | >
202 | > Then follow the on-screen instructions.
203 |
204 | ---
205 |
206 | ## 🤝 Contributing
207 |
208 | Contributions are welcome! Here are several ways you can contribute:
209 |
210 | - **[Report Issues](https://github.com/ikramhasan/AI-Dev-Scripts/issues)**: Submit bugs found or log feature requests for the `scripts` project.
211 | - **[Submit Pull Requests](https://github.com/ikramhasan/AI-Dev-Scripts/pulls)**: Review open PRs, and submit your own PRs.
212 |
213 |
214 |
223 |
224 | ---
225 |
226 | ## 🎗 License
227 |
228 | This project is protected under the [MIT](https://choosealicense.com/licenses/mit) License.
229 |
230 | ---
231 |
232 | ## 🔗 Acknowledgments
233 |
234 | - ollama
235 | - readmeai
236 | - [Janlay Wu](https://github.com/janlay) for the [openai](https://github.com/janlay/openai-cli) script
237 |
238 | ## ⭐ Star History
239 |
240 | [](https://star-history.com/#ikramhasan/AI-Dev-Scripts&Date)
241 |
242 | [**Return**](#-overview)
243 |
244 | ---
245 |
--------------------------------------------------------------------------------
/openai:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # OpenAI CLI v2.2.2
4 | # Created by @janlay
5 | #
6 |
7 | set -eo pipefail
8 |
9 | # openai-cli accepts various exported environment variables:
10 | # OPENAI_API_KEY : OpenAI's API key
11 | # OPENAI_API_ENDPOINT : Custom API endpoint
12 | # OPENAI_MAX_TOKENS : Maximum number of tokens to use
13 | # OPENAI_CHAT_MODEL : ChatGPT model
14 | # OPENAI_DATA_DIR : Directory to store data
15 | OPENAI_API_ENDPOINT="${OPENAI_API_ENDPOINT:-https://api.openai.com}"
16 | OPENAI_API_KEY="${OPENAI_API_KEY:-}"
17 | OPENAI_MAX_TOKENS="${OPENAI_MAX_TOKENS:-2000}"
18 | OPENAI_CHAT_MODEL="${OPENAI_CHAT_MODEL:-gpt-3.5-turbo-1106}"
19 | declare _config_dir="${OPENAI_DATA_DIR:-$XDG_CONFIG_HOME}"
20 | OPENAI_DATA_DIR="${_config_dir:-$HOME/.openai}"
21 |
22 | # defaults
23 | readonly _app_name=openai _app_version=2.2.2
24 | readonly default_api_version=1 default_api_name=chat/completions default_model="$OPENAI_CHAT_MODEL" default_topic=General
25 |
26 | declare -i chat_mode=0 dry_run=0
27 | declare tokens_file="$OPENAI_DATA_DIR/total_tokens" api_version=$default_api_version api_name=$default_api_name topic=$default_topic
28 | declare dump_file dumped_file data_file temp_dir rest_args prompt_file prompt
29 |
30 | trap cleanup EXIT
31 | cleanup() {
32 | if [ -d "$temp_dir" ]; then
33 | rm -rf -- "$temp_dir"
34 | fi
35 | }
36 |
37 | raise_error() {
38 | [ "$2" = 0 ] || echo -n "$_app_name: " >&2
39 | echo -e "$1" >&2
40 | exit "${2:-1}"
41 | }
42 |
43 | load_conversation() {
44 | [ -f "$data_file" ] && cat "$data_file" || echo '{}'
45 | }
46 |
47 | update_conversation() {
48 | local entry="$2" data
49 | [[ $entry == \{* ]] || entry=$(jq -n --arg content "$entry" '{$content}')
50 | entry=$(jq --arg role "$1" '. += {$role}' <<<"$entry")
51 | data=$(load_conversation)
52 | jq --argjson item "$entry" '.messages += [$item]' <<<"$data" >"$data_file"
53 | }
54 |
55 | save_tokens() {
56 | local data num="$1"
57 | [ -f "$data_file" ] && {
58 | data=$(load_conversation)
59 | jq --argjson tokens "$num" '.total_tokens += $tokens' <<<"$data" >"$data_file"
60 | }
61 |
62 | data=0
63 | [ -f "$tokens_file" ] && data=$(cat "$tokens_file")
64 | echo "$((data + num))" >"$tokens_file"
65 | }
66 |
67 | read_prompt() {
68 | # read prompt from args first
69 | local word accepts_props=1 props='{}' real_prompt
70 | if [ ${#rest_args[@]} -gt 0 ]; then
71 | # read file $prompt_file word by word, and extract words starting with '+'
72 | for word in "${rest_args[@]}"; do
73 | if [ $accepts_props -eq 1 ] && [ "${word:0:1}" = '+' ]; then
74 | word="${word:1}"
75 | # determine value's type for jq
76 | local options=(--arg key "${word%%=*}") value="${word#*=}" arg=--arg
77 | [[ $value =~ ^[+-]?\ ?[0-9.]+$ || $value = true || $value = false || $value == [\[\{]* ]] && arg=--argjson
78 | options+=("$arg" value "$value")
79 | props=$(jq "${options[@]}" '.[$key] = $value' <<<"$props")
80 | else
81 | real_prompt="$real_prompt $word"
82 | accepts_props=0
83 | fi
84 | done
85 | [ -n "$props" ] && echo "$props" >"$temp_dir/props"
86 | fi
87 |
88 | if [ -n "$real_prompt" ]; then
89 | [ -n "$prompt_file" ] && echo "* Prompt file \`$prompt_file' will be ignored as the prompt parameters are provided." >&2
90 | echo -n "${real_prompt:1}" >"$temp_dir/prompt"
91 | elif [ -n "$prompt_file" ]; then
92 | [ -f "$prompt_file" ] || raise_error "File not found: $prompt_file." 3
93 | [[ -s $prompt_file ]] || raise_error "Empty file: $prompt_file." 4
94 | fi
95 | }
96 |
97 | openai_models() {
98 | call_api | jq
99 | }
100 |
101 | openai_moderations() {
102 | local prop_file="$temp_dir/props" payload="{\"model\": \"text-moderation-latest\"}"
103 |
104 | # overwrite default properties with user's
105 | read_prompt
106 | [ -f "$prop_file" ] && payload=$(jq -n --argjson payload "$payload" '$payload | . += input' <"$prop_file")
107 |
108 | # append user's prompt to messages
109 | local payload_file="$temp_dir/payload" input_file="$temp_dir/prompt"
110 | [ -f "$input_file" ] || input_file="${prompt_file:-/dev/stdin}"
111 | jq -Rs -cn --argjson payload "$payload" '$payload | .input = input' "$input_file" >"$payload_file"
112 |
113 | call_api | jq -c '.results[]'
114 | }
115 |
116 | openai_images_generations() {
117 | local prop_file="$temp_dir/props" payload="{\"n\": 1, \"size\": \"1024x1024\"}"
118 |
119 | # overwrite default properties with user's
120 | read_prompt
121 | [ -f "$prop_file" ] && payload=$(jq -n --argjson payload "$payload" '$payload | . += input | . += {response_format: "url"}' <"$prop_file")
122 |
123 | # append user's prompt to messages
124 | local payload_file="$temp_dir/payload" input_file="$temp_dir/prompt"
125 | [ -f "$input_file" ] || input_file="${prompt_file:-/dev/stdin}"
126 | jq -Rs -cn --argjson payload "$payload" '$payload | .prompt = input' "$input_file" >"$payload_file"
127 |
128 | call_api | jq -r '.data[].url'
129 | }
130 |
131 | openai_embeddings() {
132 | local prop_file="$temp_dir/props" payload="{\"model\": \"text-embedding-ada-002\"}"
133 |
134 | # overwrite default properties with user's
135 | read_prompt
136 | [ -f "$prop_file" ] && payload=$(jq -n --argjson payload "$payload" '$payload | . += input' <"$prop_file")
137 |
138 | # append user's prompt to messages
139 | local payload_file="$temp_dir/payload" input_file="$temp_dir/prompt"
140 | [ -f "$input_file" ] || input_file="${prompt_file:-/dev/stdin}"
141 | jq -Rs -cn --argjson payload "$payload" '$payload | .input = input' "$input_file" >"$payload_file"
142 |
143 | call_api | jq -c
144 | }
145 |
146 | openai_chat_completions() {
147 | [ -n "$dumped_file" ] || {
148 | local prop_file="$temp_dir/props" payload="{\"model\": \"$default_model\", \"stream\": true, \"temperature\": 0.5, \"max_tokens\": $OPENAI_MAX_TOKENS}"
149 |
150 | # overwrite default properties with user's
151 | read_prompt
152 | [ -f "$prop_file" ] && {
153 | payload=$(jq -n --argjson payload "$payload" '$payload | . += input | . += {messages: []}' <"$prop_file")
154 | }
155 |
156 | local data
157 | data=$(load_conversation | jq .messages)
158 | [ "$topic" != "$default_topic" ] && {
159 | if [ $chat_mode -eq 1 ]; then
160 | # load all messages for chat mode
161 | payload=$(jq --argjson messages "$data" 'setpath(["messages"]; $messages)' <<<"$payload")
162 | else
163 | # load only first message for non-chat mode
164 | payload=$(jq --argjson messages "$data" 'setpath(["messages"]; [$messages[0]])' <<<"$payload")
165 | fi
166 | }
167 | # append user's prompt to messages
168 | local payload_file="$temp_dir/payload" input_file="$temp_dir/prompt"
169 | [ -f "$input_file" ] || input_file="${prompt_file:-/dev/stdin}"
170 | jq -Rs -cn --argjson payload "$payload" '$payload | .messages += [{role: "user", content: input}]' "$input_file" >"$payload_file"
171 | }
172 |
173 | local chunk reason text role fn_name
174 | call_api | while read -r chunk; do
175 | [ -z "$chunk" ] && continue
176 | chunk=$(cut -d: -f2- <<<"$chunk" | jq '.choices[0]')
177 | reason=$(jq -r '.finish_reason // empty' <<<"$chunk")
178 | [[ $reason = stop || $reason = function_call ]] && break
179 | [ -n "$reason" ] && raise_error "API error: $reason" 10
180 |
181 | # get role and function info from the first chunk
182 | [ -z "$role" ] && {
183 | role=$(jq -r '.delta.role // empty' <<<"$chunk")
184 | fn_name=$(jq -r '.delta.function_call.name // empty' <<<"$chunk")
185 | }
186 |
187 | # workaround: https://stackoverflow.com/a/15184414
188 | chunk=$(
189 | jq -r '.delta | .function_call.arguments // .content // empty' <<<"$chunk"
190 | printf x
191 | )
192 | # ensure chunk is not empty
193 | [ ${#chunk} -ge 2 ] || continue
194 |
195 | chunk="${chunk:0:${#chunk}-2}"
196 | text="$text$chunk"
197 | echo -n "$chunk"
198 | done
199 |
200 | # append response to topic file for chat mode
201 | [ "$chat_mode" -eq 1 ] && {
202 | [ -n "$fn_name" ] && text=$(jq -n --arg name "$fn_name" --argjson arguments "${text:-\{\}}" '{function_call: {$name, $arguments}}')
203 |
204 | update_conversation user "$prompt"
205 | update_conversation "$role" "$text"
206 | }
207 | echo
208 | }
209 |
210 | # shellcheck disable=SC2120
211 | call_api() {
212 | # return dumped file if specified
213 | [ -n "$dumped_file" ] && {
214 | cat "$dumped_file"
215 | return
216 | }
217 |
218 | local url="$OPENAI_API_ENDPOINT/v$api_version/$api_name" auth="Bearer $OPENAI_API_KEY"
219 |
220 | # dry-run mode
221 | [ "$dry_run" -eq 1 ] && {
222 | echo "Dry-run mode, no API calls made."
223 | echo -e "\nRequest URL:\n--------------\n$url"
224 | echo -en "\nAuthorization:\n--------------\n"
225 | sed -E 's/(sk-.{3}).{41}/\1****/' <<<"$auth"
226 | [ -n "$payload_file" ] && {
227 | echo -e "\nPayload:\n--------------"
228 | jq <"$payload_file"
229 | }
230 | exit 0
231 | } >&2
232 |
233 | local args=("$url" --no-buffer -fsSL -H 'Content-Type: application/json' -H "Authorization: $auth")
234 | [ -n "$payload_file" ] && args+=(-d @"$payload_file")
235 | [ $# -gt 0 ] && args+=("$@")
236 |
237 | [ -n "$dump_file" ] && args+=(-o "$dump_file")
238 | curl "${args[@]}"
239 | [ -z "$dump_file" ] || exit 0
240 | }
241 |
242 | create_topic() {
243 | update_conversation system "${rest_args[*]}"
244 | raise_error "Topic '$topic' created with initial prompt '${rest_args[*]}'" 0
245 | }
246 |
247 | usage() {
248 | raise_error "OpenAI Client v$_app_version
249 |
250 | SYNOPSIS
251 | ABSTRACT
252 | $_app_name [-n] [-a api_name] [-v api_version] [-o dump_file] [INPUT...]
253 | $_app_name -i dumped_file
254 |
255 | DEFAULT_API (v$default_api_version/$default_api_name)
256 | $_app_name [-c] [+property=value...] [@TOPIC] [-f file | prompt ...]
257 | prompt
258 | Prompt string for the request to OpenAI API. This can consist of multiple
259 | arguments, which are considered to be separated by spaces.
260 | -f file
261 | A file to be read as prompt. If file is - or neither this parameter nor a prompt
262 | is specified, read from standard input.
263 | -c
264 | Continues the topic, the default topic is '$default_topic'.
265 | property=value
266 | Overwrites default properties in payload. Prepend a plus sign '+' before property=value.
267 | eg: +model=gpt-3.5-turbo-0301, +stream=false
268 |
269 | TOPICS
270 | Topic starts with an at sign '@'.
271 | To create new topic, use \`$_app_name @new_topic initial prompt'
272 |
273 | OTHER APIS
274 | $_app_name -a models
275 |
276 | GLOBAL OPTIONS
277 | Global options apply to all APIs.
278 | -v version
279 | API version, default is '$default_api_version'.
280 | -a name
281 | API name, default is '$default_api_name'.
282 | -n
283 | Dry-run mode, don't call API.
284 | -o filename
285 | Dumps API response to a file and exits.
286 | -i filename
287 | Uses specified dumped file instead of requesting API.
288 | Any request-related arguments and user input are ignored.
289 |
290 | --
291 | Ignores rest of arguments, useful when unquoted prompt consists of '-'.
292 |
293 | -h
294 | Shows this help" 0
295 | }
296 |
297 | parse() {
298 | local opt
299 | while getopts 'v:a:f:i:o:cnh' opt; do
300 | case "$opt" in
301 | c)
302 | chat_mode=1
303 | ;;
304 | v)
305 | api_version="$OPTARG"
306 | ;;
307 | a)
308 | api_name="$OPTARG"
309 | ;;
310 | f)
311 | prompt_file="$OPTARG"
312 | [ "$prompt_file" = - ] && prompt_file=
313 | ;;
314 | n)
315 | dry_run=1
316 | ;;
317 | i)
318 | dumped_file="$OPTARG"
319 | ;;
320 | o)
321 | dump_file="$OPTARG"
322 | ;;
323 | h | ?)
324 | usage
325 | ;;
326 | esac
327 | done
328 | shift "$((OPTIND - 1))"
329 |
330 | # extract the leading topic
331 | [[ "$1" =~ ^@ ]] && {
332 | topic="${1#@}"
333 | shift
334 | }
335 |
336 | [ $chat_mode -eq 0 ] || {
337 | [[ -n $topic && $topic != "$default_topic" ]] || raise_error 'Topic is required for chatting.' 2
338 | }
339 |
340 | rest_args=("$@")
341 | }
342 |
343 | check_bin() {
344 | command -v "$1" >/dev/null || raise_error "$1 not found. Use package manager (Homebrew, apt-get etc.) to install it." "${2:-1}"
345 | }
346 |
347 | main() {
348 | parse "$@"
349 | check_bin jq 10
350 |
351 | mkdir -p "$OPENAI_DATA_DIR"
352 | data_file="$OPENAI_DATA_DIR/$topic.json"
353 | temp_dir=$(mktemp -d)
354 |
355 | if [[ $topic == "$default_topic" || -f "$data_file" ]]; then
356 | [ -z "$OPENAI_API_KEY" ] && raise_error 'OpenAI API key is required.' 11
357 |
358 | local fn="openai_${api_name//\//_}"
359 | [ "$(type -t "$fn")" = function ] || raise_error "API '$api_name' is not available." 12
360 | "$fn"
361 | else
362 | [ ${#rest_args[@]} -gt 0 ] || raise_error "Prompt for new topic is required" 13
363 | create_topic
364 | fi
365 | }
366 |
367 | main "$@"
368 |
--------------------------------------------------------------------------------