├── .DS_Store ├── .gitignore ├── LICENSE ├── README.md ├── bash-utils ├── openai.bash └── release.sh ├── choice_test.py ├── config.toml ├── poetry.lock ├── pyproject.toml ├── requirements.txt ├── results.csv ├── src └── kel │ ├── __init__.py │ ├── __main__.py │ ├── __version__.py │ ├── assistant │ ├── __init__.py │ ├── assist_test.py │ └── summon_assistant.py │ ├── config │ ├── __init__.py │ └── get_configs.py │ ├── constants │ ├── __init__.py │ ├── base_price.py │ └── constants.py │ ├── gpt │ ├── __init__.py │ ├── askanthropic.py │ ├── askgoogle.py │ ├── askgpt.py │ ├── askollama.py │ └── askopenai.py │ ├── inputs │ ├── __init__.py │ ├── gatekeeper.py │ └── inputs.py │ └── utils │ ├── __init__.py │ ├── cost.py │ ├── models.py │ └── utils.py └── tests └── test____main__.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QAInsights/kel/f73979359d68204ec9f495e0cc43b66825c3f187/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | venv/ 163 | .idea/ 164 | .DS_Store 165 | 166 | choice_test.py 167 | results.csv 168 | 169 | 170 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 NaveenKumar Namachivayam ⚡ 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 💬 Kel 2 | 3 | [![Install with pip](https://img.shields.io/badge/Install_with-pip-blue)](https://pypi.org/project/kel-cli) 4 | ![PyPI - Version](https://img.shields.io/pypi/v/kel-cli) 5 | 6 | Kel is your AI assistant in your CLI. 7 | 8 | > Kel `கேள்` means `ask` in Tamil. 9 | 10 | ## 🎥 Demo 11 | 12 | ![Kel-Demo](https://raw.githubusercontent.com/QAInsights/kel-docs/main/static/img/kel-demo.gif) 13 | 14 | ## ✅ Features 15 | 16 | - Free and Open Source 17 | - Bring your own API keys 18 | - Supports multiple Large Language Models (LLMs) like GPT-4, Claude, ollama2, and Google. 19 | - Supports OpenAI assistants to chat with your documents 20 | - Customizable 21 | 22 | ## 🧩 Installation 23 | 24 | ### Pre-requisites 25 | - Python 3.6 or higher 26 | - pip3 27 | - API keys for OpenAI and other LLMs 28 | 29 | ### ⛳ Steps 30 | 31 | ```bash 32 | python3 -m pip install --user pipx 33 | python3 -m pipx ensurepath 34 | pipx install kel-cli 35 | 36 | # copy the default config file to current user's home directory 37 | curl -O https://raw.githubusercontent.com/QAInsights/kel/main/config.toml 38 | mkdir -p ~/.kel 39 | mv config.toml ~/.kel/config.toml 40 | ``` 41 | Open the config file to customize the settings. 42 | 43 | Update the LLM API keys in the OS environment variables. 44 | 45 | ## Usage 46 | 47 | ```bash 48 | kel -v 49 | ``` 50 | 51 | ```bash 52 | kel -h 53 | ``` 54 | 55 | ```bash 56 | kel "git command to rebase" 57 | ``` 58 | 59 | ```bash 60 | kel "command to get active connections in linux" 61 | ``` 62 | 63 | ```bash 64 | kel "What was the population of India in 1990?" 65 | 66 | > I'm sorry, I can only assist with questions related to software engineering and command line tools. 67 | I am unable to provide information on the population of India in 1990. 68 | ``` 69 | 70 | Now change the prompt and ask the same question. 71 | ```bash 72 | kel "What was the population of India in 1990?" -p "You are a demography expert" 73 | 74 | > The population of India in 1990 was around 874 million people. 75 | ``` 76 | 77 | Now change the LLM and ask the same question. 78 | ```bash 79 | kel "What was the population of India in 1990?" -p "You are a demography expert" -c ollama -m llama2 80 | ``` 81 | 82 | To view the config file details, run the following command. 83 | ```bash 84 | kel -s openai 85 | ``` 86 | 87 | > [!IMPORTANT] 88 | > LLMs price varies based on the usage. Please check the pricing before using it. 89 | > LLMs can make mistakes. Review the answers before using it. 90 | 91 | 92 | ## 🧰 Configuration 93 | 94 | Kel can be configured using a [config file](./config.toml). It is a TOML file and supports vast number of options. 95 | 96 | The default config file is `~/.kel/config.toml` or `~/.config/kel/config.toml` or `KEL_CONFIG_FILE` environment variable. 97 | 98 | ## ⚙️ Defaults 99 | 100 | - OpenAI's `gpt-3.5-turbo-1106` 101 | - Display stats 102 | - Default prompt focuses on developers 103 | - Copies the answer to clipboard 104 | - and more... 105 | 106 | ## 💰 Support 107 | 108 | If you like this project, please consider donating to the following addresses. 109 | 110 | - Buy me a coffee: https://www.buymeacoffee.com/qainsights 111 | 112 | 113 | 114 | -------------------------------------------------------------------------------- /bash-utils/openai.bash: -------------------------------------------------------------------------------- 1 | 2 | # OpenAI API Key 3 | # Get OpenAI API key from environment variable 4 | 5 | openai_api_key() { 6 | if [ -z "$OPENAI_API_KEY" ]; then 7 | echo "OPENAI_API_KEY is not set" 8 | return 1 9 | fi 10 | echo "$OPENAI_API_KEY" 11 | } 12 | 13 | 14 | curl "https://api.openai.com/v1/assistants?order=desc&limit=20" \ 15 | -H "Content-Type: application/json" \ 16 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 17 | -H "OpenAI-Beta: assistants=v1" | jq -r '.data[].id' | tee assistant_ids.txt 18 | 19 | # Read assistant ids from file one by one 20 | while read -r id; do 21 | echo "Deleting assistant $id" 22 | echo "curl https://api.openai.com/v1/assistants/$id" 23 | curl https://api.openai.com/v1/assistants/"$id" \ 24 | -H "Content-Type: application/json" \ 25 | -H "Authorization: Bearer $OPENAI_API_KEY" \ 26 | -H "OpenAI-Beta: assistants=v1" \ 27 | -X DELETE 28 | sleep 1 29 | done < assistant_ids.txt 30 | 31 | # Delete assistant_ids.txt file 32 | rm assistant_ids.txt 33 | 34 | # Delete all files from OpenAI 35 | 36 | curl https://api.openai.com/v1/assistants/asst_abc123/files/file-abc123 \ 37 | -H 'Authorization: Bearer $OPENAI_API_KEY"' \ 38 | -H 'Content-Type: application/json' \ 39 | -H 'OpenAI-Beta: assistants=v1' 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /bash-utils/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: 4 | # Change the version in pyproject.toml and __version__.py 5 | # before running this script 6 | 7 | # get input from user 8 | # shellcheck disable=SC2162 9 | 10 | read -p "Did you change the version in pyproject.toml and __version__.py " message 11 | 12 | if [ "$message" != "y" ]; then 13 | echo "Please change the version in pyproject.toml and __version__.py" 14 | exit 1 15 | else 16 | echo "Continuing with the release" 17 | rm -rf ../dist 18 | echo "Release script" 19 | source ../*env/bin/activate 20 | poetry build 21 | twine upload ../dist/* 22 | 23 | fi 24 | -------------------------------------------------------------------------------- /choice_test.py: -------------------------------------------------------------------------------- 1 | from langchain.callbacks.manager import CallbackManager 2 | from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler 3 | # from langchain.llms import Ollama 4 | # 5 | # llm = Ollama( 6 | # model="llama2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]) 7 | # ) 8 | # llm("Tell me about the history of AI") 9 | 10 | 11 | # from kel.config import get_configs as config 12 | # 13 | # print((lambda x: config.get_default_anthropic_streaming_response())(None)) 14 | 15 | # import random 16 | # 17 | # user_choice_1 = "Give me min, max, 95 percentile, 99 percentile elapsed or response time in a table format." 18 | # user_choice_2 = "Analyze the data and give me the bottleneck in a simple sentence." 19 | # user_choice_3 = "Give me the passed and failed transactions in a table format." 20 | # user_choice_4 = "Give me the HTTP response code split by transaction in a table format." 21 | # 22 | # 23 | # openai_response_prefix = ["Thinking...", "Crunching...", "Analyzing...", 24 | # "Processing...", "Calculating...", "Working...", 25 | # "Cooking", "Slicing...", "Dicing...", "Chopping..."] 26 | # 27 | # print(f""" 28 | # random.choice(openai_response_prefix): {random.choice(openai_response_prefix)} 29 | # """) 30 | # openai_assistant_choices = [] 31 | # for choice in range(1, 5): 32 | # openai_assistant_choices.append(f"user_choice_{choice}") 33 | # 34 | # choices = { 35 | # 1: openai_assistant_choices[0], 36 | # 2: openai_assistant_choices[1], 37 | # 3: openai_assistant_choices[2], 38 | # 4: openai_assistant_choices[3], 39 | # } 40 | # 41 | # user_input = input("Enter your choice: ") 42 | # while True: 43 | # if user_input == "exit": 44 | # break 45 | # 46 | # if user_input.strip().isdigit() and int(user_input.strip()) in choices: 47 | # print(openai_assistant_choices[int(user_input.strip()) - 1]) 48 | # user_input = input("Enter your choice: ") 49 | # continue 50 | # if user_input.strip().isdigit() and int(user_input.strip()) not in choices: 51 | # print("Invalid choice. Please try again.") 52 | # user_input = input("Enter your choice: ") 53 | # continue 54 | # else: 55 | # print(user_input) 56 | # user_input = input("Enter your choice: ") 57 | # continue 58 | -------------------------------------------------------------------------------- /config.toml: -------------------------------------------------------------------------------- 1 | [companies] 2 | supported_companies = ["OpenAI", "Anthropic", "Ollama", "Google"] 3 | 4 | [general] 5 | protocol = "https" 6 | default_language = "english" 7 | default_company_name = "OpenAI" 8 | copy_to_clipboard = true 9 | display_llm_company_model_name = true 10 | 11 | [style] 12 | # Supported colors: https://rich.readthedocs.io/en/stable/appendix/colors.html#appendix-colors 13 | response_color = "green" 14 | warning_color = "yellow" 15 | error_color = "red" 16 | info_color = "cyan" 17 | 18 | [stats] 19 | display_cost = true 20 | display_tokens = true 21 | display_response_time = true 22 | 23 | [openai] 24 | default_openai_model_name = "gpt-3.5-turbo-1106" 25 | default_openai_org_name = "" 26 | default_openai_endpoint = "api.openai.com" 27 | default_openai_uri = "/v1/chat/completions" 28 | default_openai_max_tokens = 100 29 | default_openai_temperature = 0.9 30 | default_openai_prompt = """ 31 | You are an expert in the field of software engineering and command line tools. 32 | If I ask for the commands, please give me only the commands which is enclosed in 33 | quotes. 34 | Do not return anything other than the command. Do not wrap responses in quotes. 35 | """ 36 | 37 | [openai_assistant] 38 | enable_openai_assistant = false 39 | openai_assistant_model_name = "gpt-4-1106-preview" 40 | openai_assistant_instructions = """ 41 | Analyse the file and answer questions about performance stats. Give me the stats in a table format unless I ask it in a different way. 42 | If you have trouble in understanding the file format, consider it as a CSV file unless I specify the file format. 43 | Keep the answers short and simple unless I ask for a detailed explanation. 44 | """ 45 | openai_assistant_prompt = """ 46 | Do not give detailed explanation. If you find any bottleneck, please share that as well. 47 | """ 48 | openai_assistant_choice_1 = "Give me min, max, 95 percentile, 99 percentile elapsed or response time in a table format." 49 | openai_assistant_choice_2 = "Analyze the data and give me the bottleneck in a simple sentence." 50 | openai_assistant_choice_3 = "Give me the passed and failed transactions in a table format." 51 | openai_assistant_choice_4 = "Give me the HTTP response code split by transaction in a table format." 52 | 53 | openai_delete_assistant_at_exit = true 54 | 55 | 56 | [anthropic] 57 | # chat mode is not available for Anthropic yet 58 | anthropic_enable_chat = false 59 | default_anthropic_model_name = "claude-2.1" 60 | default_anthropic_max_tokens = 100 61 | default_anthropic_streaming_response = true 62 | default_anthropic_prompt = """ 63 | You are an expert in the field of software engineering and command line tools. 64 | If I ask for the commands, please give me only the commands which is enclosed in 65 | quotes. 66 | Give me the command in one sentence, unless I ask you to give it in a different way. 67 | Do not return anything other than the command. Do not wrap responses in quotes. 68 | """ 69 | 70 | [ollama] 71 | ollama_endpoint = "localhost:11434" 72 | # chat mode is not available for Ollama yet 73 | ollama_enable_chat = false 74 | default_ollama_model_name = "llama2" 75 | default_ollama_max_tokens = 100 76 | default_ollama_streaming_response = true 77 | default_ollama_prompt = """ 78 | You are an expert in the field of software engineering and command line tools. 79 | If I ask for the commands, please give me only the commands which is enclosed in 80 | quotes. 81 | Give me the command in one sentence, unless I ask you to give it in a different way. 82 | Do not return anything other than the command. Do not wrap responses in quotes. 83 | """ 84 | 85 | [google] 86 | default_google_model_name = "models/gemini-pro" 87 | default_google_streaming_response = true 88 | default_google_prompt = """ 89 | You are an expert in the field of software engineering and command line tools. 90 | If I ask for the commands, please give me only the commands which is enclosed in 91 | quotes. 92 | Politely decline to answer for other questions. 93 | Give me the command in one sentence, unless I ask you to give it in a different way. 94 | Do not return anything other than the command. Do not wrap responses in quotes. 95 | """ 96 | enable_prompt_feedback = false 97 | view_all_response_candidates = true 98 | [google.safety_settings] 99 | # enter the setting in the format of `category=threshold` e.g. hate_speech=3 100 | # acceptable threshold values are [1, 2, 3, 4] 101 | hate_speech=1 102 | harassment=1 103 | 104 | 105 | ############################################## 106 | # Thresholds for safety settings 107 | # ---------------------------------------------- 108 | # BLOCK_LOW_AND_ABOVE = 1 109 | # BLOCK_MEDIUM_AND_ABOVE = 2 110 | # BLOCK_ONLY_HIGH = 3 111 | # BLOCK_NONE = 4 112 | 113 | # For categories, refer to 114 | # ---------------------------------------------- 115 | # https://ai.google.dev/api/python/google/ai/generativelanguage/HarmCategory 116 | 117 | ############################################## 118 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "kel-cli" 3 | version = "0.0.12" 4 | description = "AI assistant in your CLI." 5 | authors = ["NaveenKumar Namachivayam "] 6 | license = "MIT" 7 | readme = "README.md" 8 | packages = [ 9 | { include="kel", from="./src"}, 10 | ] 11 | 12 | keywords = [ 13 | "gpt", "cli", "llm", "openai", "anthropic", "kel", "google", 14 | "ollama", "ai", "artificial intelligence", "assistant", "chatbot", "chat", 15 | ] 16 | 17 | [tool.poetry.dependencies] 18 | python = "^3.10" 19 | annotated-types = "^0.6.0" 20 | anyio = "^4.1.0" 21 | certifi = "^2023.11.17" 22 | click = "^8.1.7" 23 | colorama = "^0.4.6" 24 | distro = "^1.8.0" 25 | exceptiongroup = "^1.2.0" 26 | h11 = "^0.14.0" 27 | httpcore = "^1.0.2" 28 | httpx = "^0.25.2" 29 | idna = "^3.6" 30 | markdown-it-py = "^3.0.0" 31 | mdurl = "^0.1.2" 32 | openai = "^1.3.9" 33 | pydantic = "^2.5.2" 34 | pydantic-core = "^2.14.5" 35 | pygments = "^2.17.2" 36 | pyperclip = "^1.8.2" 37 | rich = "^13.7.0" 38 | sniffio = "^1.3.0" 39 | toml = "^0.10.2" 40 | tqdm = "^4.66.1" 41 | typing-extensions = "^4.9.0" 42 | anthropic = "^0.7.8" 43 | langchain = "^0.0.350" 44 | google-generativeai = "^0.3.1" 45 | 46 | 47 | [build-system] 48 | requires = ["poetry-core"] 49 | build-backend = "poetry.core.masonry.api" 50 | 51 | [project.urls] 52 | Homepage = "https://github.com/qainsights/kel" 53 | Issues = "https://github.com/qainsights/kel/issues" 54 | 55 | [tool.poetry.scripts] 56 | kel = 'kel.__main__:main' 57 | 58 | 59 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.9.1 2 | aiosignal==1.3.1 3 | annotated-types==0.6.0 4 | anthropic==0.7.8 5 | anyio==3.7.1 6 | async-timeout==4.0.3 7 | attrs==23.1.0 8 | build==1.0.3 9 | CacheControl==0.13.1 10 | cachetools==5.3.2 11 | certifi==2023.11.17 12 | cffi==1.16.0 13 | charset-normalizer==3.3.2 14 | cleo==2.1.0 15 | click==8.1.7 16 | colorama==0.4.6 17 | crashtest==0.4.1 18 | dataclasses-json==0.6.3 19 | distlib==0.3.8 20 | distro==1.8.0 21 | dulwich==0.21.7 22 | exceptiongroup==1.2.0 23 | fastjsonschema==2.19.0 24 | filelock==3.13.1 25 | frozenlist==1.4.0 26 | fsspec==2023.12.2 27 | google-ai-generativelanguage==0.4.0 28 | google-api-core==2.15.0 29 | google-auth==2.25.2 30 | google-generativeai==0.3.1 31 | googleapis-common-protos==1.62.0 32 | grpcio==1.60.0 33 | grpcio-status==1.60.0 34 | h11==0.14.0 35 | httpcore==1.0.2 36 | httpx==0.25.2 37 | huggingface-hub==0.19.4 38 | idna==3.6 39 | importlib-metadata==7.0.0 40 | installer==0.7.0 41 | jaraco.classes==3.3.0 42 | jsonpatch==1.33 43 | jsonpointer==2.4 44 | -e git+https://github.com/QAInsights/kel.git@083289ed928ee9cabc7e7a6899f0878f089fca3f#egg=kel_cli 45 | keyring==24.3.0 46 | langchain==0.0.350 47 | langchain-community==0.0.3 48 | langchain-core==0.1.0 49 | langsmith==0.0.70 50 | markdown-it-py==3.0.0 51 | marshmallow==3.20.1 52 | mdurl==0.1.2 53 | more-itertools==10.1.0 54 | msgpack==1.0.7 55 | multidict==6.0.4 56 | mypy-extensions==1.0.0 57 | numpy==1.26.2 58 | openai==1.3.6 59 | packaging==23.2 60 | pexpect==4.9.0 61 | pkginfo==1.9.6 62 | platformdirs==3.11.0 63 | poetry==1.7.1 64 | poetry-core==1.8.1 65 | poetry-plugin-export==1.6.0 66 | proto-plus==1.23.0 67 | protobuf==4.25.1 68 | ptyprocess==0.7.0 69 | pyasn1==0.5.1 70 | pyasn1-modules==0.3.0 71 | pycparser==2.21 72 | pydantic==2.5.2 73 | pydantic_core==2.14.5 74 | Pygments==2.17.2 75 | pyperclip==1.8.2 76 | pyproject_hooks==1.0.0 77 | PyYAML==6.0.1 78 | rapidfuzz==3.5.2 79 | requests==2.31.0 80 | requests-toolbelt==1.0.0 81 | rich==13.7.0 82 | rsa==4.9 83 | shellingham==1.5.4 84 | sniffio==1.3.0 85 | SQLAlchemy==2.0.23 86 | tenacity==8.2.3 87 | tokenizers==0.15.0 88 | toml==0.10.2 89 | tomli==2.0.1 90 | tomlkit==0.12.3 91 | tqdm==4.66.1 92 | trove-classifiers==2023.11.29 93 | typing-inspect==0.9.0 94 | typing_extensions==4.9.0 95 | urllib3==2.1.0 96 | virtualenv==20.25.0 97 | xattr==0.10.1 98 | yarl==1.9.4 99 | zipp==3.17.0 100 | -------------------------------------------------------------------------------- /results.csv: -------------------------------------------------------------------------------- 1 | timeStamp,elapsed,label,responseCode,responseMessage,threadName,dataType,success,failureMessage,bytes,sentBytes,grpThreads,allThreads,URL,Latency,IdleTime,Connect 2 | 1673958946553,95,HTTP Request,200,OK,Thread Group 1-1,text,true,,1591,109,1,1,http://example.com/,92,0,56 3 | 1673958946969,45,HTTP Request,200,OK,Thread Group 1-2,text,true,,1591,109,2,2,http://example.com/,45,0,17 4 | 1673958947719,28,HTTP Request,200,OK,Thread Group 1-1,text,true,,1591,109,2,2,http://example.com/,28,0,0 5 | 1673958948053,27,HTTP Request,200,OK,Thread Group 1-2,text,true,,1591,109,2,2,http://example.com/,27,0,0 6 | 1673958948827,27,HTTP Request,200,OK,Thread Group 1-1,text,true,,1591,109,2,2,http://example.com/,27,0,0 -------------------------------------------------------------------------------- /src/kel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QAInsights/kel/f73979359d68204ec9f495e0cc43b66825c3f187/src/kel/__init__.py -------------------------------------------------------------------------------- /src/kel/__main__.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from kel.inputs.gatekeeper import gatekeeper_tasks 4 | from kel.gpt.askgpt import gpt 5 | from kel.config.get_configs import get_enable_openai_assistant 6 | from kel.assistant.summon_assistant import summon_assistant 7 | 8 | 9 | def main(): 10 | if get_enable_openai_assistant(): 11 | summon_assistant() 12 | else: 13 | asyncio.run(gpt()) 14 | 15 | 16 | if __name__ == "__main__": 17 | main() 18 | -------------------------------------------------------------------------------- /src/kel/__version__.py: -------------------------------------------------------------------------------- 1 | __version_info__ = (0, 0, 12) 2 | __version__ = '.'.join(map(str, __version_info__)) -------------------------------------------------------------------------------- /src/kel/assistant/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QAInsights/kel/f73979359d68204ec9f495e0cc43b66825c3f187/src/kel/assistant/__init__.py -------------------------------------------------------------------------------- /src/kel/assistant/assist_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | from openai import OpenAI 5 | 6 | client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) 7 | 8 | file = client.files.create( 9 | file=open("../../results.csv", "rb"), 10 | purpose='assistants' 11 | ) 12 | 13 | assistant = client.beta.assistants.create( 14 | name="PerfGPT", 15 | instructions="Analyse the file and answer questions about performance stats", 16 | tools=[{"type": "code_interpreter"}], 17 | model="gpt-4-1106-preview", 18 | file_ids=[file.id], 19 | ) 20 | 21 | thread = client.beta.threads.create() 22 | 23 | message = client.beta.threads.messages.create( 24 | thread_id=thread.id, 25 | role="user", 26 | content="Give me the 95th elapsed time percentile, maximum elapsed time, and minimum elapsed time.", 27 | # content=""" 28 | # Give me the 95th elapsed time percentile, maximum elapsed time, and minimum elapsed time. 29 | # Give me unique transactions names, and the number of transactions. 30 | # Present the number of transactions, and the number of transactions with errors. 31 | # Also give HTTP error codes, and the number of transactions with HTTP error codes. 32 | # Everything should be in a table format so that I can understand it. 33 | # Do not give detailed explanation. If you find any bottleneck, please share that as well. 34 | # """, 35 | ) 36 | 37 | run = client.beta.threads.runs.create( 38 | thread_id=thread.id, 39 | assistant_id=assistant.id, 40 | ) 41 | 42 | while run.status != "completed": 43 | run = client.beta.threads.runs.retrieve( 44 | thread_id=thread.id, 45 | run_id=run.id 46 | ) 47 | time.sleep(1) 48 | 49 | messages = client.beta.threads.messages.list( 50 | thread_id=thread.id, 51 | ) 52 | 53 | print(f"M: {messages.data[-1]}") 54 | print("M2: " + messages.data[-1].content[-1].text.value) 55 | 56 | for message in reversed(messages.data): 57 | if message.role != "user": 58 | print("System message " + message.content[-1].text.value) 59 | # print("Each message " + message.content[-1].text.value) 60 | -------------------------------------------------------------------------------- /src/kel/assistant/summon_assistant.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import random 4 | 5 | from kel.inputs.inputs import get_user_inputs_from_cli 6 | 7 | from openai import OpenAI 8 | from rich.progress import Progress 9 | 10 | from kel.config import get_configs as config 11 | from kel.utils.utils import print_in_color 12 | from kel.constants.constants import openai_response_prefix, openai_assistant_prefix, openai_user_prefix, exit_message 13 | 14 | 15 | class Assistant: 16 | 17 | def __init__(self, assistant_name, file): 18 | self.assistant_name = assistant_name 19 | self.file = file 20 | self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) 21 | 22 | self.file = self.client.files.create( 23 | file=open(self.file, "rb"), 24 | purpose='assistants' 25 | ) 26 | self.assistant = self.client.beta.assistants.create( 27 | name=self.assistant_name, 28 | instructions=config.get_openai_assistant_instructions(), 29 | tools=[{"type": "code_interpreter"}, {"type": "retrieval"}], 30 | model=config.get_openai_assistant_model_name(), 31 | file_ids=[self.file.id], 32 | ) 33 | 34 | def create_a_thread(self): 35 | return self.client.beta.threads.create() 36 | 37 | def add_message_to_thread(self, thread_id, content): 38 | return self.client.beta.threads.messages.create( 39 | thread_id=thread_id, 40 | role="user", 41 | content=content, 42 | ) 43 | 44 | def start_run(self, thread_id, assistant_id): 45 | return self.client.beta.threads.runs.create( 46 | thread_id=thread_id, 47 | assistant_id=assistant_id, 48 | ) 49 | 50 | def get_messages_and_print(self, thread_id, run_id): 51 | messages = self.client.beta.threads.messages.list( 52 | thread_id=thread_id, 53 | ) 54 | 55 | for message in reversed(messages.data[:-1]): 56 | if message.role != "user" and message.run_id == run_id: 57 | print_in_color(f"{openai_assistant_prefix}" + message.content[-1].text.value, config.get_info_color()) 58 | 59 | def delete_assistant(self): 60 | if config.get_openai_delete_assistant_at_exit(): 61 | self.client.beta.assistants.delete(self.assistant.id) 62 | self.client.files.delete(self.file.id) 63 | 64 | message = "Assistant and files have been deleted successfully." if config.get_openai_delete_assistant_at_exit() else "Assistant and files have not been deleted." 65 | print_in_color(message, config.get_warning_color()) 66 | 67 | 68 | def summon_assistant(): 69 | assistant_name, file = vars(get_user_inputs_from_cli()).values() 70 | 71 | print_in_color("Summoning an assistant...", config.get_info_color()) 72 | print_in_color(f""" 73 | Not sure what to `Kel`? Try one of these choices: 74 | 75 | 1: {config.get_openai_assistant_choices()[0]} 76 | 2: {config.get_openai_assistant_choices()[1]} 77 | 3: {config.get_openai_assistant_choices()[2]} 78 | 4: {config.get_openai_assistant_choices()[3]} 79 | 80 | To exit, type `:q` or `:quit`. 81 | """, config.get_info_color()) 82 | 83 | assistant = Assistant(assistant_name, file) 84 | get_user_question = input(f"{openai_user_prefix}") 85 | 86 | thread = assistant.create_a_thread() 87 | # print(f"Thread id: {thread.id}") 88 | 89 | choices = { 90 | 1: config.get_openai_assistant_choices()[0], 91 | 2: config.get_openai_assistant_choices()[1], 92 | 3: config.get_openai_assistant_choices()[2], 93 | 4: config.get_openai_assistant_choices()[3], 94 | } 95 | 96 | while get_user_question != ":q" or get_user_question != ":quit": 97 | if get_user_question == ":q" or get_user_question == ":quit": 98 | print_in_color(exit_message, config.get_info_color()) 99 | break 100 | 101 | if get_user_question.strip().isdigit() and int(get_user_question.strip()) not in choices: 102 | print_in_color("Invalid choice. Please try again.", config.get_warning_color()) 103 | get_user_question = input(f"{openai_user_prefix}") 104 | continue 105 | 106 | if get_user_question.strip().isdigit() and int(get_user_question.strip()) in choices: 107 | assistant.add_message_to_thread(thread.id, choices[int(get_user_question.strip())]) 108 | run = assistant.start_run(thread.id, assistant.assistant.id) 109 | 110 | else: 111 | message = assistant.add_message_to_thread(thread.id, get_user_question) 112 | # print(f"Message id: {message.id}") 113 | run = assistant.start_run(thread.id, assistant.assistant.id) 114 | # print(f"Run id: {run.id} | Run status: {run.status}") 115 | 116 | with Progress(transient=True) as progress: 117 | task = progress.add_task(f"[cyan]{random.choice(openai_response_prefix)}...", total=100) 118 | 119 | while not progress.finished: 120 | while run.status != "completed": 121 | run = assistant.client.beta.threads.runs.retrieve( 122 | thread_id=thread.id, 123 | run_id=run.id 124 | ) 125 | time.sleep(1) 126 | if run.status == "completed": 127 | progress.update(task, advance=100) 128 | break 129 | progress.update(task, advance=1) 130 | 131 | assistant.get_messages_and_print(thread.id, run.id) 132 | 133 | get_user_question = input(f"{openai_user_prefix}") 134 | 135 | assistant.delete_assistant() 136 | -------------------------------------------------------------------------------- /src/kel/config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QAInsights/kel/f73979359d68204ec9f495e0cc43b66825c3f187/src/kel/config/__init__.py -------------------------------------------------------------------------------- /src/kel/config/get_configs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from kel.constants.constants import app_name 4 | import toml 5 | 6 | from kel.utils.utils import print_in_color 7 | 8 | 9 | def get_config_file_location(): 10 | """ 11 | Get the config file location 12 | Returns: 13 | 14 | """ 15 | config_file = os.getenv("KEL_CONFIG_FILE") 16 | try: 17 | if config_file and os.path.exists(os.path.expanduser(config_file)): 18 | return os.path.expanduser(config_file) 19 | elif os.path.exists(os.path.expanduser(f"~/.{app_name.lower()}/config.toml")): 20 | return os.path.expanduser(f"~/.{app_name.lower()}/config.toml") 21 | else: 22 | return os.path.expanduser("./config.toml") 23 | except Exception as e: 24 | print_in_color(f"Error: {e}", get_error_color()) 25 | sys.exit(1) 26 | 27 | 28 | try: 29 | config = toml.load(get_config_file_location()) 30 | except toml.TomlDecodeError as e: 31 | print(f"Invalid TOML file: {e}") 32 | sys.exit(1) 33 | except Exception as e: 34 | print(f"Error: {e}") 35 | sys.exit(1) 36 | 37 | 38 | def get_all_config_keys_values(): 39 | """ 40 | Get all config keys values 41 | Returns: 42 | 43 | """ 44 | all_config_keys_values = {} 45 | for k, v in config.items(): 46 | all_config_keys_values.update({k: v}) 47 | return all_config_keys_values 48 | 49 | 50 | def get_config_by_key(key): 51 | """ 52 | Get the config by key 53 | Args: 54 | key: 55 | 56 | Returns: 57 | 58 | """ 59 | config = toml.load(get_config_file_location()) 60 | return config[key] 61 | 62 | 63 | def get_default_company_name(): 64 | """ 65 | Get the default company name 66 | Returns: 67 | 68 | """ 69 | return config.get("general", {}).get("default_company_name", "openai") 70 | 71 | 72 | def get_display_llm_company_model_name(): 73 | """ 74 | Get the display llm company model name 75 | Returns: 76 | 77 | """ 78 | return config.get("general", {}).get("display_llm_company_model_name", False) 79 | 80 | 81 | def get_response_language(): 82 | """ 83 | Get the response language 84 | Returns: 85 | 86 | """ 87 | return config.get("general", {}).get("default_language", "en") 88 | 89 | 90 | def get_copy_to_clipboard(): 91 | """ 92 | Get the copy to clipboard 93 | Returns: 94 | 95 | """ 96 | return config.get("general", {}).get("copy_to_clipboard", True) 97 | 98 | 99 | def get_default_protocol(): 100 | """ 101 | Get the protocol 102 | Returns: 103 | 104 | """ 105 | return config.get("general", {}).get("protocol", "https") 106 | 107 | 108 | # OpenAI Configs 109 | def get_openai_default_model(): 110 | """ 111 | Get the default model 112 | Returns: 113 | 114 | """ 115 | return config.get("openai", {}).get("default_openai_model_name", "gpt-4") 116 | 117 | 118 | def get_openai_default_prompt(): 119 | """ 120 | Get the default prompt 121 | Returns: 122 | 123 | """ 124 | return config.get("openai", {}).get("default_openai_prompt", 125 | "You are an expert in software engineering. You are helping a developer.") 126 | 127 | 128 | def get_default_openai_endpoint(): 129 | """ 130 | Get the default openai endpoint 131 | Returns: 132 | 133 | """ 134 | return config.get("openai", {}).get("default_openai_endpoint", f"{get_default_protocol()}://api.openai.com") 135 | 136 | 137 | def get_default_openai_uri(): 138 | """ 139 | Get the default openai uri 140 | Returns: 141 | 142 | """ 143 | return config.get("openai", {}).get("default_openai_uri", "/v1/chat/completions") 144 | 145 | 146 | def get_openai_model_name(): 147 | """ 148 | Get the openai model name 149 | Returns: 150 | 151 | """ 152 | return config.get("openai", {}).get("default_model_name", "gpt-4") 153 | 154 | 155 | def get_openai_key(): 156 | """ 157 | Get the openai key 158 | Returns: 159 | 160 | """ 161 | openai_key = os.getenv("OPENAI_API_KEY") 162 | if not openai_key: 163 | sys.exit("Error: OPENAI_API_KEY is not set in the environment variable.") 164 | return openai_key 165 | 166 | 167 | def get_openai_max_tokens(): 168 | """ 169 | Get the openai max tokens 170 | Returns: 171 | 172 | """ 173 | return config.get("openai", {}).get("default_openai_max_tokens", 150) 174 | 175 | 176 | def get_openai_temperature(): 177 | """ 178 | Get the openai temperature 179 | Returns: 180 | 181 | """ 182 | return config.get("openai", {}).get("default_openai_temperature", 0.9) 183 | 184 | 185 | def get_enable_openai_assistant(): 186 | """ 187 | Get the enable openai assistant 188 | Returns: 189 | 190 | """ 191 | return config.get("openai_assistant", {}).get("enable_openai_assistant", False) 192 | 193 | 194 | def get_openai_assistant_model_name(): 195 | """ 196 | Get the openai assistant model name 197 | Returns: 198 | 199 | """ 200 | return config.get("openai_assistant", {}).get("openai_assistant_model_name", "gpt-4-1106-preview") 201 | 202 | 203 | def get_openai_assistant_instructions(): 204 | """ 205 | Get the openai assistant instructions 206 | Returns: 207 | 208 | """ 209 | return config.get("openai_assistant", {}).get("openai_assistant_instructions", 210 | "Analyse the file and answer questions about performance stats") 211 | 212 | 213 | def get_openai_assistant_prompt(): 214 | """ 215 | Get the openai assistant prompt 216 | Returns: 217 | 218 | """ 219 | return config.get("openai_assistant", {}).get("openai_assistant_prompt", 220 | """ 221 | Everything should be in a table format so that I can understand it. 222 | Do not give detailed explanation. If you find any bottleneck, please share that as well. 223 | """ 224 | ) 225 | 226 | 227 | def get_openai_assistant_choices(): 228 | """ 229 | Get the openai assistant choices 230 | :return: 231 | """ 232 | try: 233 | openai_assistant_choices = [ 234 | config.get("openai_assistant", {}).get(f"openai_assistant_choice_{choice}") 235 | for choice in range(1, 5) 236 | if config.get("openai_assistant", {}).get(f"openai_assistant_choice_{choice}") is not None 237 | ] 238 | 239 | if len(openai_assistant_choices) != 4: 240 | sys.exit(f""" 241 | OpenAI Assistant choices are not properly set in the config file. It must have four choices. 242 | Please check the config file at {get_config_file_location()}. 243 | """) 244 | 245 | return openai_assistant_choices 246 | 247 | except Exception as e: 248 | print(f""" 249 | OpenAI Assistant choices are not properly set in the config file. It must have four choices. 250 | Please check the config file at {get_config_file_location()}. 251 | Error: {e} 252 | """) 253 | sys.exit(f"Error: {e}") 254 | 255 | 256 | def get_openai_delete_assistant_at_exit(): 257 | """ 258 | Get the openai delete assistant at exit 259 | Returns: 260 | 261 | """ 262 | return config.get("openai_assistant", {}).get("delete_openai_assistant_at_exit", True) 263 | 264 | 265 | # Anthropic Configs 266 | def get_anthropic_key(): 267 | """ 268 | Get the anthropic key 269 | Returns: 270 | 271 | """ 272 | anthropic_key = os.getenv("ANTHROPIC_API_KEY") 273 | if not anthropic_key: 274 | sys.exit("Error: ANTHROPIC_API_KEY is not set in the environment variable.") 275 | return anthropic_key 276 | 277 | 278 | def get_anthropic_default_model_name(): 279 | """ 280 | Get the anthropic model name 281 | Returns: 282 | 283 | """ 284 | return config.get("anthropic", {}).get("default_anthropic_model_name", "claude") 285 | 286 | 287 | def get_anthropic_default_prompt(): 288 | """ 289 | Get the default prompt 290 | Returns: 291 | 292 | """ 293 | return config.get("anthropic", {}).get("default_anthropic_prompt", 294 | "You are an expert in software engineering. You are helping a developer.") 295 | 296 | 297 | def get_anthropic_default_max_tokens(): 298 | """ 299 | Get the default max tokens 300 | Returns: 301 | 302 | """ 303 | return config.get("anthropic", {}).get("default_anthropic_max_tokens", 100) 304 | 305 | 306 | def get_default_anthropic_streaming_response(): 307 | """ 308 | Get the default streaming response settings 309 | Returns: 310 | 311 | """ 312 | return config.get("anthropic", {}).get("default_anthropic_streaming_response", False) 313 | 314 | 315 | def get_anthropic_enable_chat(): 316 | """ 317 | Get the enable chat 318 | Returns: 319 | 320 | """ 321 | return config.get("anthropic", {}).get("anthropic_enable_chat", False) 322 | 323 | 324 | def get_ollama_enable_chat(): 325 | """ 326 | Get the enable chat 327 | Returns: 328 | 329 | """ 330 | return config.get("ollama", {}).get("ollama_enable_chat", False) 331 | 332 | 333 | def get_ollama_default_model_name(): 334 | """ 335 | Get the default model name 336 | Returns: 337 | 338 | """ 339 | return config.get("ollama", {}).get("default_ollama_model_name", "ollama2") 340 | 341 | 342 | def get_ollama_default_prompt(): 343 | """ 344 | Get the default prompt 345 | Returns: 346 | 347 | """ 348 | return config.get("ollama", {}).get("default_ollama_prompt", 349 | "You are an expert in software engineering. You are helping a developer.") 350 | 351 | 352 | def get_ollama_default_max_tokens(): 353 | """ 354 | Get the default max tokens 355 | Returns: 356 | 357 | """ 358 | return config.get("ollama", {}).get("default_ollama_max_tokens", 100) 359 | 360 | 361 | def get_ollama_streaming_response(): 362 | """ 363 | Get the streaming response 364 | Returns: 365 | 366 | """ 367 | return config.get("ollama", {}).get("default_ollama_streaming_response", False) 368 | 369 | 370 | def get_ollama_default_endpoint(): 371 | """ 372 | Get the endpoint 373 | Returns: 374 | 375 | """ 376 | return config.get("ollama", {}).get("default_ollama_endpoint", f"{get_default_protocol()}://api.ollama.ai") 377 | 378 | 379 | # Stats Configs 380 | def get_display_cost(): 381 | """ 382 | Get the display cost 383 | Returns: 384 | 385 | """ 386 | return config.get("stats", {}).get("display_cost", False) 387 | 388 | 389 | def get_display_tokens(): 390 | """ 391 | Get the display tokens 392 | Returns: 393 | 394 | """ 395 | return config.get("stats", {}).get("display_tokens", False) 396 | 397 | 398 | def get_display_response_time(): 399 | """ 400 | Get the display response time 401 | Returns: 402 | 403 | """ 404 | return config.get("stats", {}).get("display_response_time", False) 405 | 406 | 407 | def get_response_color(): 408 | """ 409 | Get the response color 410 | Returns: 411 | 412 | """ 413 | return config.get("style", {}).get("response_color", "green") 414 | 415 | 416 | def get_warning_color(): 417 | """ 418 | Get the warning color 419 | Returns: 420 | 421 | """ 422 | return config.get("style", {}).get("warning_color", "yellow") 423 | 424 | 425 | def get_error_color(): 426 | """ 427 | Get the error color 428 | Returns: 429 | 430 | """ 431 | return config.get("style", {}).get("error_color", "red") 432 | 433 | 434 | def get_info_color(): 435 | """ 436 | Get the info color 437 | Returns: 438 | 439 | """ 440 | return config.get("style", {}).get("info_color", "cyan") 441 | 442 | 443 | def get_default_google_model_name(): 444 | """ 445 | Get the default google model name 446 | Returns: 447 | 448 | """ 449 | return config.get("google", {}).get("default_google_model_name", "gemini-pro") 450 | 451 | 452 | def get_default_google_streaming_response(): 453 | """ 454 | Get the default google streaming response 455 | Returns: 456 | 457 | """ 458 | return config.get("google", {}).get("default_google_streaming_response", False) 459 | 460 | 461 | def get_default_google_prompt(): 462 | """ 463 | Get the default google prompt 464 | Returns: 465 | 466 | """ 467 | return config.get("google", {}).get("default_google_prompt", 468 | "You are an expert in software engineering. You are helping a developer.") 469 | 470 | 471 | def get_google_key(): 472 | """ 473 | Get the openai key 474 | Returns: 475 | 476 | """ 477 | google_key = os.getenv("GOOGLE_API_KEY") 478 | if not google_key: 479 | sys.exit("Error: GOOGLE_API_KEY is not set in the environment variable.") 480 | return google_key 481 | 482 | 483 | def get_google_prompt_feedback(): 484 | """ 485 | Get the prompt feedback 486 | Returns: 487 | 488 | """ 489 | return config.get("google", {}).get("enable_prompt_feedback", False) 490 | 491 | 492 | def get_view_all_response_candidates(): 493 | """ 494 | Get the view all response candidates 495 | Returns: 496 | 497 | """ 498 | return config.get("google", {}).get("view_all_response_candidates", False) 499 | 500 | 501 | def get_google_safety_settings(): 502 | """ 503 | Get the Google safety settings 504 | Returns: 505 | 506 | """ 507 | return config.get("google", {}).get("safety_settings", None) 508 | -------------------------------------------------------------------------------- /src/kel/constants/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QAInsights/kel/f73979359d68204ec9f495e0cc43b66825c3f187/src/kel/constants/__init__.py -------------------------------------------------------------------------------- /src/kel/constants/base_price.py: -------------------------------------------------------------------------------- 1 | 2 | # OpenAI API price 3 | # For one token, gpt-4-1106-preview costs $0.01/1K tokens = $0.00001/token 4 | openai_api_input_price_per_token = { 5 | "gpt-4-1106-preview": 0.00001, 6 | "gpt-4": 0.00003, 7 | "gpt-4-32k": 0.00006, 8 | "gpt-3.5-turbo-1106": 0.000001, 9 | "gpt-3.5-turbo-instruct": 0.0000015 10 | } 11 | openai_api_output_price_per_token = { 12 | "gpt-4-1106-preview": 0.00003, 13 | "gpt-4": 0.00006, 14 | "gpt-4-32k": 0.00012, 15 | "gpt-3.5-turbo-1106": 0.000002, 16 | "gpt-3.5-turbo-instruct": 0.000002 17 | } -------------------------------------------------------------------------------- /src/kel/constants/constants.py: -------------------------------------------------------------------------------- 1 | from kel.__version__ import __version__ 2 | 3 | valid_ai_company_names = [ 4 | "openai", 5 | "anthropic", 6 | "ollama", 7 | "google" 8 | ] 9 | 10 | valid_ai_company_official_names = [ 11 | "OpenAI", 12 | "Anthropic", 13 | "Ollama", 14 | "Google" 15 | ] 16 | 17 | ## MODELS START ## 18 | 19 | valid_openai_chat_models = [ 20 | "gpt-4", 21 | "gpt-4-32k", 22 | "gpt-4-1106-preview", 23 | "gpt-4-0613", 24 | "gpt-4-0314", 25 | "gpt-3.5-turbo", 26 | "gpt-3.5-turbo-1106", 27 | "gpt-3.5-turbo-16k-0613", 28 | "gpt-3.5-turbo-16k", 29 | "gpt-3.5-turbo-0613" 30 | ] 31 | 32 | valid_anthropic_chat_models = [ 33 | "claude-2.1", 34 | "claude-instant-1.2", 35 | "claude-instant-1", 36 | "claude-2" 37 | ] 38 | 39 | valid_google_models = [ 40 | "models/gemini-pro" 41 | ] 42 | 43 | ## MODELS END ## 44 | 45 | valid_api_keys_env = { 46 | "openai": "OPENAI_API_KEY", 47 | "anthropic": "ANTHROPIC_API_KEY" 48 | } 49 | 50 | valid_show_options = [ 51 | "companies", 52 | "general", 53 | "style", 54 | "stats", 55 | "all" 56 | ] 57 | 58 | 59 | def get_official_names(): 60 | if len(valid_ai_company_official_names) > 2: 61 | return ", ".join(valid_ai_company_official_names[:-1]) + ", and " + valid_ai_company_official_names[-1] 62 | if len(valid_ai_company_official_names) == 2: 63 | return valid_ai_company_official_names[0] + " and " + valid_ai_company_official_names[1] 64 | if len(valid_ai_company_official_names) == 1: 65 | return valid_ai_company_official_names[0] 66 | 67 | 68 | # Emoji constants 69 | emoji_info = ":speech_balloon:" 70 | emoji_time = ":clock3:" 71 | emoji_pencil = ":pencil:" 72 | emoji_error = ":x:" 73 | emoji_money = ":moneybag:" 74 | emoji_thinking = ":thinking:" 75 | 76 | # OpenAI constants 77 | openai_response_prefix = ["Thinking...", "Crunching...", "Analyzing...", 78 | "Processing...", "Calculating...", "Working...", 79 | "Cooking", "Slicing...", "Dicing...", "Chopping..."] 80 | openai_user_prefix = "Ask `Kel`: " 81 | openai_assistant_prefix = "Assistant " 82 | exit_message = "Exiting chat mode... Bye!" 83 | pricing_error_message = "Error: Pricing information is not available for this model." 84 | 85 | # App constants 86 | app_name = "Kel" 87 | app_version = __version__ 88 | 89 | app_description = f""" 90 | Ask Kel. Your CLI based AI assistant. 91 | Supported AI companies: {get_official_names()}. 92 | """ 93 | epilog = 'Thank you for using Kel!' 94 | -------------------------------------------------------------------------------- /src/kel/gpt/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QAInsights/kel/f73979359d68204ec9f495e0cc43b66825c3f187/src/kel/gpt/__init__.py -------------------------------------------------------------------------------- /src/kel/gpt/askanthropic.py: -------------------------------------------------------------------------------- 1 | from io import StringIO 2 | import time 3 | 4 | from anthropic import HUMAN_PROMPT, AI_PROMPT 5 | 6 | from kel.config import get_configs as config 7 | from kel.utils.utils import copy_to_clipboard, print_in_color, before_ask_gpt_display, after_ask_gpt_display 8 | 9 | 10 | async def ask_anthropic(client, question, company, prompt, model, max_tokens): 11 | """ 12 | Ask Anthropic GPT 13 | :param company: 14 | :param client: 15 | :param question: 16 | :param prompt: 17 | :param model: 18 | :param max_tokens: 19 | :return: 20 | """ 21 | calc_token = StringIO() 22 | stream = config.get_default_anthropic_streaming_response() 23 | before_ask_gpt_display(company=company, model=model) 24 | 25 | print_in_color(f"Thinking... 🤔", config.get_info_color(), end="\n") 26 | 27 | if stream: 28 | start_time = time.time() 29 | try: 30 | response = await client.completions.create( 31 | model=model, 32 | max_tokens_to_sample=int(max_tokens), 33 | prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}", 34 | stream=stream, 35 | ) 36 | except Exception as e: 37 | print(f"Error: {e}") 38 | return f"Error: {e}" 39 | response_time = time.time() - start_time 40 | async for completion in response: 41 | print(f"{completion.completion}", end="", flush=True) 42 | calc_token.write(str(completion.completion)) 43 | calc_token.write(question) 44 | 45 | else: 46 | start_time = time.time() 47 | try: 48 | response = await client.completions.create( 49 | model=model, 50 | max_tokens_to_sample=int(max_tokens), 51 | prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}", 52 | stream=stream, 53 | ) 54 | except Exception as e: 55 | print(f"Error: {e}") 56 | return f"Error: {e}" 57 | response_time = time.time() - start_time 58 | # Streaming response cannot be copied to clipboard 59 | if config.get_copy_to_clipboard(): 60 | copy_to_clipboard(response.completion) 61 | 62 | calc_token.write(question) 63 | calc_token.write(response.completion) 64 | print_in_color(f"{response.completion}", config.get_response_color(), end="\n") 65 | 66 | calc_token = await client.count_tokens(calc_token.getvalue()) 67 | after_ask_gpt_display(response_time=response_time, end=" ") 68 | after_ask_gpt_display(consumed_tokens=calc_token, end=" ") 69 | -------------------------------------------------------------------------------- /src/kel/gpt/askgoogle.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import google.generativeai as genai 4 | from rich import box 5 | from rich.console import Console 6 | from rich.table import Table 7 | 8 | from kel.config import get_configs as config 9 | from kel.utils.utils import print_in_color, before_ask_gpt_display, after_ask_gpt_display, copy_to_clipboard 10 | 11 | 12 | def ask_google(client=None, company=None, question=None, prompt=None, model=None, temperature=None): 13 | """ 14 | Ask Google GPT 15 | 16 | :param client: 17 | :param question: 18 | :param prompt: 19 | :param model: 20 | :param temperature: 21 | :return: 22 | """ 23 | model = genai.GenerativeModel(model) 24 | # Get safety settings 25 | safety_settings = [] 26 | question = "Answer in " + config.get_response_language() + prompt + question 27 | before_ask_gpt_display(company=company, model=model.model_name) 28 | 29 | if config.get_google_safety_settings() is not None: 30 | for k, v in config.get_google_safety_settings().items(): 31 | safety_settings.append({"category": k, "threshold": v}) 32 | 33 | if config.get_default_google_streaming_response(): 34 | start_time = time.time() 35 | response = model.generate_content(question, 36 | stream=True, 37 | safety_settings=safety_settings) 38 | response_time = time.time() - start_time 39 | for chunk in response: 40 | try: 41 | print_in_color(chunk.text, config.get_response_color()) 42 | except Exception as e: 43 | print(f'{type(e).__name__}: {e}') 44 | else: 45 | start_time = time.time() 46 | response = model.generate_content(question, 47 | safety_settings=safety_settings) 48 | response_time = time.time() - start_time 49 | print_in_color(response.text, config.get_response_color()) 50 | response = response.text 51 | if config.get_copy_to_clipboard(): 52 | copy_to_clipboard(str(response.text)) 53 | 54 | if config.get_google_prompt_feedback(): 55 | table = Table(title="Prompt Feedback", show_header=True, header_style="bold blue", box=box.HEAVY) 56 | table.add_column("Category", style="cyan") 57 | table.add_column("Probability", style="green") 58 | for items in response.prompt_feedback.safety_ratings: 59 | table.add_row(str(items).split(":")[1].split("probability")[0], str(items).split(":")[-1]) 60 | console = Console() 61 | console.print(table) 62 | 63 | if config.get_view_all_response_candidates(): 64 | for count, response in enumerate(response.candidates): 65 | if count > 0: 66 | print_in_color(response.content.parts[0].text, config.get_response_color()) 67 | 68 | after_ask_gpt_display(response_time=response_time) 69 | # Streaming response cannot be copied to clipboard 70 | -------------------------------------------------------------------------------- /src/kel/gpt/askgpt.py: -------------------------------------------------------------------------------- 1 | from anthropic import AsyncAnthropic 2 | from openai import AsyncOpenAI 3 | import google.generativeai as genai 4 | 5 | from kel.config import get_configs as config 6 | from kel.gpt.askanthropic import ask_anthropic 7 | from kel.gpt.askgoogle import ask_google 8 | from kel.gpt.askollama import ask_ollama 9 | from kel.gpt.askopenai import ask_openai 10 | from kel.inputs.gatekeeper import gatekeeper_tasks 11 | from kel.inputs.inputs import get_user_inputs_from_cli 12 | 13 | 14 | class AICompany: 15 | def __init__(self, company_name): 16 | self.company_name = company_name 17 | 18 | 19 | class GPTModel(AICompany): 20 | def __init__(self, model_name, model_api_key, model_endpoint, model_prompt, model_max_token, model_temperature, 21 | company_name): 22 | super().__init__(company_name) 23 | self.model_api_key = model_api_key 24 | self.model_name = model_name 25 | self.model_endpoint = model_endpoint 26 | self.model_max_token = model_max_token 27 | self.model_prompt = model_prompt 28 | self.model_temperature = model_temperature 29 | 30 | if company_name == "openai": 31 | self.client = AsyncOpenAI(api_key=self.model_api_key) 32 | elif company_name == "google": 33 | self.client = genai.configure(api_key=self.model_api_key) 34 | elif company_name == "anthropic": 35 | self.client = AsyncAnthropic() 36 | 37 | @staticmethod 38 | def call_ollama(company=None, question=None, prompt=None, model=None, max_tokens=None): 39 | if model is None: 40 | model = config.get_ollama_default_model_name() 41 | if max_tokens is None: 42 | max_tokens = config.get_ollama_default_max_tokens() 43 | if prompt is None: 44 | prompt = config.get_ollama_default_prompt() 45 | if company is None: 46 | company = config.get_default_company_name() 47 | 48 | ask_ollama(company=company, question=question, prompt=prompt, model=model, max_tokens=max_tokens) 49 | 50 | def call_google(self, question=None, prompt=None, model=None, temperature=None, max_tokens=None, company=None): 51 | if model is None: 52 | model = config.get_default_google_model_name() 53 | if prompt is None: 54 | prompt = config.get_default_google_prompt() 55 | 56 | ask_google(client=self.client, company=company, question=question, prompt=prompt, model=model, temperature=temperature) 57 | 58 | async def call_anthropic(self, question=None, prompt=None, company=None, model=None, max_tokens=None): 59 | if model is None: 60 | model = config.get_anthropic_default_model_name() 61 | if max_tokens is None: 62 | max_tokens = config.get_anthropic_default_max_tokens() 63 | if prompt is None: 64 | prompt = config.get_anthropic_default_prompt() 65 | 66 | await ask_anthropic(client=self.client, question=question, prompt=prompt, company=company, model=model, 67 | max_tokens=max_tokens) 68 | 69 | async def ask_gpt(self, question=None, prompt=None, model=None, temperature=None, max_tokens=None, 70 | company=None, assistant=None, file=None): 71 | """ 72 | Ask GPT 73 | Args: 74 | question: 75 | 76 | Returns: 77 | :param assistant: 78 | :param company: 79 | :param file: 80 | :param prompt: 81 | :param max_tokens: 82 | :param temperature: 83 | :param model: 84 | :param question: 85 | 86 | """ 87 | if model is None: 88 | model = config.get_openai_default_model() 89 | if temperature is None: 90 | temperature = config.get_openai_temperature() 91 | if max_tokens is None: 92 | max_tokens = config.get_openai_max_tokens() 93 | if prompt is None: 94 | prompt = config.get_openai_default_prompt() 95 | await ask_openai(client=self.client, company=company, question=question, prompt=prompt, model=model, 96 | temperature=temperature, 97 | max_tokens=max_tokens) 98 | 99 | 100 | async def gpt() -> None: 101 | """ 102 | Main function 103 | Returns: 104 | 105 | """ 106 | 107 | show, question, prompt, model, temperature, max_tokens, company = vars(get_user_inputs_from_cli()).values() 108 | company_name = gatekeeper_tasks(question, prompt, model, temperature, max_tokens, company) 109 | 110 | if company_name == "openai": 111 | openai = GPTModel( 112 | company_name=company_name, 113 | model_name=model, 114 | model_api_key=config.get_openai_key(), 115 | model_prompt=prompt, 116 | model_endpoint=f"{config.get_default_protocol()}://{config.get_default_openai_endpoint()}{config.get_default_openai_uri()}", 117 | model_max_token=max_tokens, 118 | model_temperature=temperature 119 | ) 120 | 121 | await openai.ask_gpt(question, prompt, model, temperature, max_tokens, company=company_name) 122 | 123 | elif company_name == "anthropic": 124 | anthropic = GPTModel( 125 | company_name=company_name, 126 | model_name=model, 127 | model_prompt=prompt, 128 | model_max_token=max_tokens, 129 | model_api_key=None, 130 | model_endpoint=None, 131 | model_temperature=None 132 | ) 133 | 134 | await anthropic.call_anthropic( 135 | question=question, 136 | company=company_name, 137 | prompt=anthropic.model_prompt, 138 | model=anthropic.model_name, 139 | max_tokens=anthropic.model_max_token 140 | ) 141 | 142 | elif company_name == "ollama": 143 | ollama = GPTModel( 144 | company_name=company_name, 145 | model_name=model, 146 | model_prompt=prompt, 147 | model_endpoint=config.get_ollama_default_endpoint(), 148 | model_temperature=None, 149 | model_max_token=None, 150 | model_api_key=None 151 | ) 152 | 153 | ollama.call_ollama( 154 | company=company_name, 155 | question=question, 156 | prompt=prompt, 157 | model=model, 158 | max_tokens=max_tokens 159 | ) 160 | 161 | elif company_name == "google": 162 | google = GPTModel( 163 | company_name=company_name, 164 | model_name=model, 165 | model_api_key=config.get_google_key(), 166 | model_endpoint=None, 167 | model_prompt=prompt, 168 | model_max_token=max_tokens, 169 | model_temperature=None 170 | ) 171 | 172 | google.call_google( 173 | question=question, 174 | prompt=prompt, 175 | model=model, 176 | temperature=None, 177 | max_tokens=None, 178 | company=company_name) 179 | -------------------------------------------------------------------------------- /src/kel/gpt/askollama.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | 4 | from langchain.callbacks.manager import CallbackManager 5 | from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler 6 | from langchain.llms import Ollama 7 | 8 | from kel.config import get_configs as config 9 | from kel.constants.constants import emoji_time, emoji_money, emoji_info 10 | from kel.utils.utils import copy_to_clipboard, print_in_color, before_ask_gpt_display, after_ask_gpt_display 11 | 12 | 13 | def ask_ollama(company=None, question=None, model=None, prompt=None, max_tokens=None): 14 | """ 15 | Call the Ollama API 16 | Returns: 17 | 18 | """ 19 | 20 | before_ask_gpt_display(company=company, model=model) 21 | 22 | try: 23 | start_time = time.time() 24 | llm = Ollama( 25 | model=model, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]) 26 | ) 27 | response_time = time.time() - start_time 28 | if question is not None: 29 | question = str(question).strip() + str(prompt).strip() 30 | 31 | # Printing the response 32 | llm(question) 33 | after_ask_gpt_display(response_time=response_time) 34 | 35 | # TODO: Implement this 36 | # after_ask_gpt_display(consumed_tokens=model) 37 | except Exception as e: 38 | print(f"Error: {e}") 39 | sys.exit(1) 40 | 41 | 42 | -------------------------------------------------------------------------------- /src/kel/gpt/askopenai.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from kel.config import get_configs as config 4 | from kel.constants.constants import emoji_error, emoji_info, \ 5 | valid_openai_chat_models 6 | from kel.utils.cost import calculate_cost 7 | from kel.utils.utils import copy_to_clipboard, print_in_color, before_ask_gpt_display, after_ask_gpt_display 8 | 9 | 10 | async def ask_openai(client=None, question=None, prompt=None, model=None, temperature=None, max_tokens=None, 11 | company=None, assistant=None, file=None): 12 | try: 13 | before_ask_gpt_display(company=company, model=model) 14 | 15 | print_in_color(f"Thinking...🤔", config.get_info_color(), end="") 16 | 17 | if model in valid_openai_chat_models: 18 | start_time = time.time() 19 | print_in_color(".", config.get_info_color(), end="") 20 | 21 | response = await client.chat.completions.create( 22 | model=model, 23 | messages=[ 24 | {"role": "system", "content": f"{prompt}"}, 25 | {"role": "user", 26 | "content": f"{question}. You will respond in {config.get_response_language()}"} 27 | ], 28 | max_tokens=int(max_tokens), 29 | temperature=float(temperature), 30 | ) 31 | print_in_color(".", config.get_info_color()) 32 | 33 | response_time = time.time() - start_time 34 | 35 | print_in_color(f"{emoji_info} {response.choices[0].message.content}", config.get_response_color()) 36 | 37 | after_ask_gpt_display(response_time=response_time, 38 | end=" ") 39 | after_ask_gpt_display(total_tokens=response.usage.total_tokens, 40 | end=" ") 41 | 42 | cost = calculate_cost(company, 43 | model, 44 | prompt_tokens=response.usage.prompt_tokens, 45 | completion_tokens=response.usage.completion_tokens 46 | ) 47 | after_ask_gpt_display(cost=cost, end=" ") 48 | 49 | if config.get_copy_to_clipboard(): 50 | copy_to_clipboard(response.choices[0].message.content) 51 | 52 | return response.choices[0].message.content, response_time 53 | else: 54 | print_in_color( 55 | f"{emoji_error} Error: {model} is not a valid model name for {config.get_default_company_name()}.", 56 | config.get_warning_color()) 57 | return f"Error: {model} is not a valid model name." 58 | 59 | except Exception as e: 60 | print(f"Error: {e}") 61 | return f"Error: {e}" 62 | -------------------------------------------------------------------------------- /src/kel/inputs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QAInsights/kel/f73979359d68204ec9f495e0cc43b66825c3f187/src/kel/inputs/__init__.py -------------------------------------------------------------------------------- /src/kel/inputs/gatekeeper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from kel.config import get_configs as config 5 | from kel.utils.utils import print_in_color 6 | from kel.constants.constants import valid_ai_company_names, valid_openai_chat_models, valid_anthropic_chat_models, \ 7 | valid_api_keys_env, valid_google_models 8 | 9 | 10 | def check_api_key(company_name=None): 11 | try: 12 | for item in valid_api_keys_env: 13 | if company_name == item: 14 | if os.getenv(valid_api_keys_env[item]) is None: 15 | print_in_color( 16 | f"{valid_api_keys_env[item]} is not set. Please set it in your environment variables.", 17 | config.get_error_color()) 18 | sys.exit(1) 19 | except Exception as e: 20 | print_in_color(f"Error: {e}", config.get_error_color()) 21 | sys.exit(1) 22 | 23 | 24 | def validate_chat_model(company_name=None, model_name=None): 25 | if company_name is not None: 26 | company_name = company_name.lower() 27 | if company_name == "openai": 28 | if model_name not in valid_openai_chat_models: 29 | print_in_color(f"Error: {model_name} is not a valid {company_name} chat model.", 30 | config.get_error_color()) 31 | print_in_color(f"Valid {company_name} chat models are: {valid_openai_chat_models}", 32 | config.get_info_color()) 33 | sys.exit(1) 34 | elif company_name == "google": 35 | if model_name not in valid_google_models: 36 | print_in_color(f"Error: {model_name} is not a valid {company_name} chat model.", 37 | config.get_error_color()) 38 | print_in_color(f"Valid {company_name} chat models are: {valid_google_models}", 39 | config.get_info_color()) 40 | sys.exit(1) 41 | elif company_name == "anthropic": 42 | if model_name not in valid_anthropic_chat_models: 43 | print_in_color(f"Error: {model_name} is not a valid {company_name} chat model.", 44 | config.get_error_color()) 45 | print_in_color(f"Valid {company_name} chat models are: {valid_anthropic_chat_models}", 46 | config.get_info_color()) 47 | sys.exit(1) 48 | else: 49 | pass 50 | 51 | 52 | def get_model_name(company_name): 53 | """ 54 | Get model name 55 | :param company_name: 56 | :return: 57 | """ 58 | if company_name is not None: 59 | company_name = company_name.lower() 60 | if company_name == "openai": 61 | return config.get_openai_default_model() 62 | elif company_name == "anthropic": 63 | return config.get_anthropic_default_model_name() 64 | elif company_name == "ollama": 65 | return config.get_ollama_default_model_name() 66 | elif company_name == "google": 67 | return config.get_default_google_model_name() 68 | else: 69 | print_in_color(f"Error in setting up the model. Please check the config file.", config.get_error_color()) 70 | sys.exit(1) 71 | 72 | 73 | def gatekeeper_tasks(question=None, prompt=None, model=None, temperature=None, max_tokens=None, company=None, **kwargs): 74 | """ 75 | Gatekeeper tasks 76 | :return: 77 | """ 78 | if company is None: 79 | company_name = config.get_default_company_name().lower() 80 | else: 81 | company_name = str(company).strip().lower() 82 | 83 | if company_name == "": 84 | print_in_color("Error: Company name is not set in the config file or not passed.", config.get_warning_color()) 85 | sys.exit(1) 86 | 87 | if company_name.lower() not in valid_ai_company_names: 88 | print_in_color("Error: No valid AI company names found. Please check the config file.", 89 | config.get_warning_color()) 90 | sys.exit(1) 91 | 92 | if company_name.lower() == "openai" or company_name.lower() == "anthropic" or company_name.lower() == "google": 93 | check_api_key(company_name=company_name) 94 | 95 | if model is None: 96 | model = get_model_name(company_name=company_name) 97 | else: 98 | model = str(model).strip().lower() 99 | 100 | validate_chat_model(company_name=company_name, model_name=model) 101 | 102 | return company_name 103 | -------------------------------------------------------------------------------- /src/kel/inputs/inputs.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | 4 | from kel.config.get_configs import get_enable_openai_assistant 5 | 6 | from kel.constants.constants import app_name, app_description, epilog, valid_show_options, valid_ai_company_names 7 | from kel.utils.utils import display_config, cli_art 8 | 9 | 10 | def get_user_inputs_from_cli(): 11 | """ 12 | Get user inputs 13 | Returns: 14 | 15 | """ 16 | # Parse the arguments 17 | parser = argparse.ArgumentParser( 18 | prog=app_name, 19 | description=app_description, 20 | epilog=epilog, 21 | ) 22 | 23 | if get_enable_openai_assistant(): 24 | parser.add_argument( 25 | '-a', 26 | '--assistant', 27 | type=str, 28 | help='assistant name e.g PerfGPT', 29 | required=False 30 | ) 31 | 32 | parser.add_argument( 33 | '-f', 34 | '--file', 35 | type=str, 36 | help='file path', 37 | required=False 38 | ) 39 | 40 | return parser.parse_args() 41 | 42 | parser.add_argument( 43 | '-s', 44 | '--show', 45 | type=str, 46 | help='show default config e.g general, stats, style, companies', 47 | required=False 48 | ) 49 | 50 | if '-v' in sys.argv or '--version' in sys.argv: 51 | parser.add_argument( 52 | '-v', 53 | '--version', 54 | action='version', 55 | version=''.format(version=cli_art()), 56 | help='show version' 57 | ) 58 | 59 | if '-s' not in sys.argv and '--show' not in sys.argv: 60 | parser.add_argument( 61 | 'question', 62 | help='your question to Kel: ', 63 | type=str, 64 | ) 65 | 66 | parser.add_argument( 67 | '-p', 68 | '--prompt', 69 | help='prompt e.g "You are a web performance expert. You are helping a developer."', 70 | type=str, 71 | required=False 72 | ) 73 | 74 | parser.add_argument( 75 | '-m', 76 | '--model', 77 | help='model name e.g gpt-4', 78 | type=str, 79 | required=False 80 | ) 81 | parser.add_argument( 82 | '-t', 83 | '--temperature', 84 | help='temperature e.g 0.9', 85 | type=float, 86 | required=False 87 | ) 88 | parser.add_argument( 89 | '-mt', 90 | '--max_tokens', 91 | type=int, 92 | help='max tokens e.g 150', 93 | required=False 94 | ) 95 | parser.add_argument( 96 | '-c', 97 | '--company', 98 | type=str, 99 | help='company name e.g OpenAI', 100 | required=False 101 | ) 102 | 103 | 104 | 105 | args = parser.parse_args() 106 | 107 | if args.show in valid_show_options or args.show in valid_ai_company_names: 108 | display_config(args.show.lower()) 109 | else: 110 | if args.show: 111 | print(f"Invalid show option: {args.show}. Valid options are: {valid_show_options}") 112 | sys.exit() 113 | 114 | return args 115 | -------------------------------------------------------------------------------- /src/kel/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QAInsights/kel/f73979359d68204ec9f495e0cc43b66825c3f187/src/kel/utils/__init__.py -------------------------------------------------------------------------------- /src/kel/utils/cost.py: -------------------------------------------------------------------------------- 1 | import kel.constants.base_price as base_price 2 | from kel.constants.constants import pricing_error_message 3 | 4 | 5 | def calculate_cost(company, model, prompt_tokens=0, 6 | completion_tokens=0): 7 | """ 8 | Calculate cost 9 | :param completion_tokens: 10 | :param prompt_tokens: 11 | :param company: 12 | :param model: 13 | :return: 14 | """ 15 | 16 | if company == "openai": 17 | try: 18 | if model in base_price.openai_api_input_price_per_token: 19 | input_price = base_price.openai_api_input_price_per_token[model] 20 | output_price = base_price.openai_api_output_price_per_token[model] 21 | return (input_price * prompt_tokens) + (output_price * completion_tokens) 22 | else: 23 | raise KeyError(f"{pricing_error_message}") 24 | except Exception as e: 25 | print(f"Error: {e}") 26 | return 0 27 | -------------------------------------------------------------------------------- /src/kel/utils/models.py: -------------------------------------------------------------------------------- 1 | def get_google_models(): 2 | """ 3 | Get google models 4 | Returns: 5 | 6 | """ 7 | import os 8 | import google.generativeai as genai 9 | 10 | genai.configure(api_key=os.getenv('GOOGLE_API_KEY')) 11 | google_models = [m.name for m in genai.list_models() if 'generateContent' in m.supported_generation_methods] 12 | print(google_models) 13 | return google_models 14 | -------------------------------------------------------------------------------- /src/kel/utils/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import pyperclip 4 | from rich.console import Console 5 | from rich.table import Table 6 | 7 | from kel.constants.constants import emoji_info, emoji_time, emoji_money, valid_ai_company_official_names 8 | from kel.config import get_configs as config 9 | from kel.__version__ import __version__ 10 | 11 | def copy_to_clipboard(text): 12 | """ 13 | Copy to clipboard 14 | Args: 15 | text: 16 | 17 | Returns: 18 | 19 | """ 20 | try: 21 | pyperclip.copy(text) 22 | # Copy the response to the clipboard and remove double quotes 23 | pyperclip.copy(text.strip().replace("\"", "")) 24 | print("📝 Response copied to clipboard.", end="\n") 25 | except Exception as e: 26 | print("Error copying to clipboard") 27 | print(e) 28 | 29 | 30 | def print_in_color(text, color, end="\n"): 31 | """ 32 | Print in color 33 | Args: 34 | text: 35 | color: 36 | 37 | Returns: 38 | :param end: 39 | :param color: 40 | :param text: 41 | 42 | """ 43 | console = Console(log_time=False) 44 | console.print(text, style=color, end=end) 45 | 46 | 47 | def before_ask_gpt_display(*args, **kwargs): 48 | """ 49 | Before ask gpt 50 | Returns: 51 | 52 | """ 53 | end = "\n" 54 | if "end" in kwargs: 55 | end = kwargs.get("end") 56 | if "company" in kwargs: 57 | company = kwargs.get("company") 58 | else: 59 | company = config.get_default_company_name() 60 | 61 | if "model" in kwargs: 62 | if config.get_display_llm_company_model_name(): 63 | model = kwargs.get("model") 64 | print_in_color(f"{emoji_info} You are using {company}'s model: {model}", 65 | config.get_info_color(), end=end) 66 | 67 | 68 | def after_ask_gpt_display(*args, **kwargs): 69 | """ 70 | After ask gpt 71 | Returns: 72 | 73 | """ 74 | end = "\n" 75 | if "end" in kwargs: 76 | end = kwargs.get("end") 77 | if config.get_display_response_time(): 78 | if "response_time" in kwargs: 79 | data = kwargs.get("response_time") 80 | if data: 81 | print_in_color(f"{emoji_time} Response Time: {data:.5f} seconds", 82 | config.get_info_color(), 83 | end=end) 84 | 85 | if config.get_display_tokens(): 86 | if "total_tokens" in kwargs: 87 | data = kwargs.get("total_tokens") 88 | if data: 89 | print_in_color(f"{emoji_money} Total Tokens: {data}", 90 | config.get_info_color(), 91 | end=end) 92 | 93 | if config.get_display_cost(): 94 | if "cost" in kwargs: 95 | data = kwargs.get("cost") 96 | if data: 97 | print_in_color(f"{emoji_money} Total Cost: {data:.6f} USD", 98 | config.get_info_color(), 99 | end=end) 100 | 101 | 102 | def display_config(args=None): 103 | """ 104 | Display config 105 | :param args: 106 | :return: 107 | """ 108 | table = Table(title="Kel Config", show_header=True, header_style="bold blue") 109 | 110 | table.add_column("Name", style="cyan") 111 | table.add_column("Value", style="green") 112 | 113 | if args == "all": 114 | for key, value in config.get_all_config_keys_values().items(): 115 | if type(key) is str: 116 | table.add_row(str(key), "") 117 | table.add_row("--" * 15, "--" * 15) 118 | 119 | if type(value) is dict: 120 | for k, v in value.items(): 121 | table.add_row(str(k), str(v)) 122 | table.add_row("--" * 15, "--" * 15) 123 | else: 124 | for key, value in config.get_config_by_key(args).items(): 125 | table.add_row(str(key), str(value)) 126 | console = Console() 127 | console.print(table) 128 | 129 | sys.exit() 130 | 131 | 132 | def cli_art(): 133 | # Print the ascii art for the word `kel` 134 | print_in_color(rf""" 135 | |‾‾| /‾‾/ |‾‾‾‾‾‾| |‾‾| 136 | | |/ / | (‾‾‾ | | 137 | | ( | ‾‾‾| | | 138 | | |\ \ | (___ | |___ 139 | |__| \__\ |______| |______| v{__version__} 140 | """, config.get_info_color()) 141 | -------------------------------------------------------------------------------- /tests/test____main__.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest.mock import patch 3 | from kel.inputs.gatekeeper import gatekeeper_tasks 4 | from kel.gpt.askgpt import gpt 5 | from kel.config.get_configs import get_enable_openai_assistant 6 | from kel.assistant.summon_assistant import summon_assistant 7 | from src.kel.__main__ import main 8 | 9 | @patch('kel.config.get_configs.get_enable_openai_assistant') 10 | @patch('kel.assistant.summon_assistant.summon_assistant') 11 | @patch('asyncio.run') 12 | def test_main_when_openai_assistant_enabled(mock_asyncio_run, mock_summon_assistant, mock_get_enable_openai_assistant): 13 | mock_get_enable_openai_assistant.return_value = True 14 | main() 15 | mock_summon_assistant.assert_called_once() 16 | mock_asyncio_run.assert_not_called() 17 | 18 | @patch('kel.config.get_configs.get_enable_openai_assistant') 19 | @patch('kel.assistant.summon_assistant.summon_assistant') 20 | @patch('asyncio.run') 21 | def test_main_when_openai_assistant_disabled(mock_asyncio_run, mock_summon_assistant, mock_get_enable_openai_assistant): 22 | mock_get_enable_openai_assistant.return_value = False 23 | main() 24 | mock_summon_assistant.assert_not_called() 25 | mock_asyncio_run.assert_called_once_with(gpt()) 26 | --------------------------------------------------------------------------------