├── .ruby-version ├── .rspec ├── Gemfile ├── assets ├── gpt2.bin ├── pigments-default.css └── github.css ├── doc └── img │ ├── monadic-chat.png │ ├── syntree-sample.png │ ├── linguistic-html.png │ ├── select-app-menu.png │ ├── code-example-time.png │ ├── example-translation.png │ ├── input-acess-token.png │ ├── select-feature-menu.png │ ├── code-example-time-html.png │ ├── readme-example-beatles.png │ ├── readme-example-beatles-html.png │ ├── monadic-chat.svg │ └── state-monad.svg ├── lib ├── monadic_chat │ ├── version.rb │ ├── console.rb │ ├── interaction.rb │ ├── helper.rb │ ├── commands.rb │ ├── tools.rb │ ├── formatting.rb │ ├── parameters.rb │ ├── authenticate.rb │ ├── menu.rb │ ├── open_ai.rb │ └── internals.rb ├── monadic_app.rb └── monadic_chat.rb ├── user_apps ├── boilerplates │ ├── boilerplate.json │ ├── boilerplate.md │ └── boilerplate.rb ├── wikipedia │ ├── wikipedia.json │ ├── wikipedia.md │ └── wikipedia.rb └── linguistic │ ├── linguistic.json │ ├── linguistic.md │ └── linguistic.rb ├── .gitignore ├── Rakefile ├── apps ├── code │ ├── code.json │ ├── code.md │ └── code.rb ├── novel │ ├── novel.json │ ├── novel.md │ └── novel.rb ├── translate │ ├── translate.json │ ├── translate.md │ └── translate.rb └── chat │ ├── chat.json │ ├── chat.md │ └── chat.rb ├── spec ├── spec_helper.rb ├── monadic_params.rb ├── openai_spec.rb ├── normal_mode_code_spec.rb ├── normal_mode_wikipedia_spec.rb ├── normal_mode_novel_spec.rb ├── normal_mode_chat_spec.rb ├── reserach_mode_code_spec.rb ├── reserach_mode_novel_spec.rb ├── reserach_mode_wikipedia_spec.rb ├── reserach_mode_chat_spec.rb ├── normal_mode_translate_spec.rb └── reserach_mode_translate_spec.rb ├── LICENSE.txt ├── CHANGELOG.md ├── monadic_chat.gemspec ├── Gemfile.lock ├── bin └── monadic-chat └── README.md /.ruby-version: -------------------------------------------------------------------------------- 1 | 3.2.0 2 | -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --format documentation 2 | --color 3 | --require spec_helper 4 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | source "https://rubygems.org" 4 | gemspec 5 | -------------------------------------------------------------------------------- /assets/gpt2.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/assets/gpt2.bin -------------------------------------------------------------------------------- /doc/img/monadic-chat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/monadic-chat.png -------------------------------------------------------------------------------- /doc/img/syntree-sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/syntree-sample.png -------------------------------------------------------------------------------- /doc/img/linguistic-html.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/linguistic-html.png -------------------------------------------------------------------------------- /doc/img/select-app-menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/select-app-menu.png -------------------------------------------------------------------------------- /doc/img/code-example-time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/code-example-time.png -------------------------------------------------------------------------------- /doc/img/example-translation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/example-translation.png -------------------------------------------------------------------------------- /doc/img/input-acess-token.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/input-acess-token.png -------------------------------------------------------------------------------- /doc/img/select-feature-menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/select-feature-menu.png -------------------------------------------------------------------------------- /lib/monadic_chat/version.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module MonadicChat 4 | VERSION = "0.4.6b" 5 | end 6 | -------------------------------------------------------------------------------- /doc/img/code-example-time-html.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/code-example-time-html.png -------------------------------------------------------------------------------- /doc/img/readme-example-beatles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/readme-example-beatles.png -------------------------------------------------------------------------------- /doc/img/readme-example-beatles-html.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yohasebe/monadic-chat-cli/HEAD/doc/img/readme-example-beatles-html.png -------------------------------------------------------------------------------- /user_apps/boilerplates/boilerplate.json: -------------------------------------------------------------------------------- 1 | {"messages": [ 2 | {"role": "system", "content": ""}, 3 | {"role": "user", "content": ""}, 4 | {"role": "assistant", "content": ""} 5 | ]} 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.bundle/ 2 | /.yardoc 3 | /_yardoc/ 4 | /coverage/ 5 | /pkg/ 6 | /spec/reports/ 7 | /tmp/ 8 | /sig/ 9 | /localdata/ 10 | 11 | # rspec failure tracking 12 | .rspec_status 13 | .DS_Store 14 | 15 | .rubocop.yml 16 | .solargraph.yml 17 | tags 18 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "bundler/gem_tasks" 4 | require "rspec/core/rake_task" 5 | 6 | RSpec::Core::RakeTask.new(:spec) 7 | 8 | require "rubocop/rake_task" 9 | 10 | RuboCop::RakeTask.new 11 | 12 | task default: %i[spec rubocop] 13 | -------------------------------------------------------------------------------- /apps/code/code.json: -------------------------------------------------------------------------------- 1 | {"messages": [ 2 | {"role": "system", 3 | "content": "You are a friendly but professional software engineer who answers various questions, writes computer program code, makes decent suggestions, and gives helpful advice in response to a prompt from the user."}, 4 | {"role": "user", 5 | "content": "Can I ask something?"}, 6 | {"role": "assistant", 7 | "content": "Sure!"} 8 | ]} 9 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | RSpec.configure do |config| 4 | # Enable flags like --only-failures and --next-failure 5 | config.example_status_persistence_file_path = ".rspec_status" 6 | 7 | # Disable RSpec exposing methods globally on `Module` and `main` 8 | config.disable_monkey_patching! 9 | 10 | config.expect_with :rspec do |c| 11 | c.syntax = :expect 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /apps/novel/novel.json: -------------------------------------------------------------------------------- 1 | {"messages": [ 2 | {"role": "system", 3 | "content": "You and the user are collaboratively writing a novel. You write a paragraph elaborating on a synopsis, theme, topic, or event presented in the prompt."}, 4 | {"role": "user", 5 | "content": "The preface to the novel is presented."}, 6 | {"role": "assistant", 7 | "content": "What follows is a story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and exciting novel."} 8 | ]} 9 | -------------------------------------------------------------------------------- /spec/monadic_params.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../lib/monadic_chat" 4 | MonadicChat.require_apps 5 | 6 | COMPLETION = MonadicChat.authenticate(message: false) 7 | 8 | PARAMS = { 9 | "model" => "gpt-4o-mini" 10 | } 11 | 12 | SETTINGS = { 13 | "num_retrials" => 3 14 | } 15 | 16 | availability = OpenAI.models(COMPLETION.access_token).any? do |model| 17 | model["id"] == PARAMS["model"] 18 | end 19 | 20 | puts "#{PARAMS["model"]} is available to use" if availability 21 | -------------------------------------------------------------------------------- /apps/translate/translate.json: -------------------------------------------------------------------------------- 1 | {"messages": [ 2 | {"role": "system", 3 | "content": "You are a multilingual translator capable of professionally translating many languages. Translate the given text to {{TARGET_LANG}}. If a specific translation should be used for a particular expression, the user presents the translation in a pair of parentheses right after the original expression. Check both current and preceding user messages and use those specific translations every time a corresponding expression appears in the user input."} 4 | ]} 5 | -------------------------------------------------------------------------------- /user_apps/wikipedia/wikipedia.json: -------------------------------------------------------------------------------- 1 | {"messages": [ 2 | {"role": "system", "content": "You are a consultant who responds to any questions asked by the user. The current date is {{DATE}}. Answer questions without a Wikipedia search if you are already knowledgeable enough. But if you encounter a question about something you do not know, say \"SEARCH_WIKI(query)\", read the snippets in the result, and then answer the question.\n\nEven if the user's question is in a language other than English, make a Wikipedia query in English and then answer in the user's language. "} 3 | ]} 4 | -------------------------------------------------------------------------------- /spec/openai_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_params" 4 | 5 | RSpec.describe MonadicChat do 6 | it "has a version number" do 7 | expect(MonadicChat::VERSION).not_to be nil 8 | end 9 | end 10 | 11 | RSpec.describe OpenAI do 12 | it "Retrieves models using OpenAI API" do 13 | models = OpenAI.models(COMPLETION.access_token) 14 | models[0...10].each do |m| 15 | print "#{m["id"]}: " 16 | puts Time.at(m["created"]).strftime("%Y-%m-%d %H:%M:%S") 17 | end 18 | expect(!models.empty?).to be true 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /spec/normal_mode_code_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | RSpec.describe "Code" do 4 | code = Code.new(COMPLETION, research_mode: false, params: PARAMS) 5 | 6 | inputs = [ 7 | "Write a command line app that shows the current global IP in Ruby.", 8 | "Make the code capable of showing the approximate geographical locatioin.", 9 | "Add a usage example and a sample output to this code." 10 | ] 11 | 12 | inputs.each do |input| 13 | code.bind(input, num_retrials: SETTINGS["num_retrials"]) 14 | end 15 | 16 | it "gives as many responses as the number of prompts given" do 17 | expect(code.turns).to be inputs.size 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /spec/normal_mode_wikipedia_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_params" 4 | 5 | RSpec.describe "MonadicApp::Wikipedia" do 6 | wikipedia = Wikipedia.new(COMPLETION, research_mode: false, params: PARAMS) 7 | 8 | inputs = [ 9 | "Which team won 2023 World Baseball Classic?", 10 | "Any famous people died in March, 2023?", 11 | "What are currently goingon to regulate AI research?" 12 | ] 13 | 14 | inputs.each do |input| 15 | wikipedia.bind(input, num_retrials: SETTINGS["num_retrials"]) 16 | end 17 | 18 | it "gives as many responses as the number of prompts given" do 19 | expect(wikipedia.turns).to be inputs.size 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /spec/normal_mode_novel_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_params" 4 | 5 | RSpec.describe "Novel" do 6 | novel = Novel.new(COMPLETION, research_mode: false, params: PARAMS) 7 | 8 | inputs = [ 9 | "Tom woke up to the sound of pouring rain.", 10 | "He decided to call his old friend first time in many years.", 11 | "The voice of the person who spoke back from the other end was an unfamilier one." 12 | ] 13 | 14 | inputs.each do |input| 15 | novel.bind(input, num_retrials: SETTINGS["num_retrials"]) 16 | end 17 | 18 | it "gives as many responses as the number of prompts given" do 19 | expect(novel.turns).to be inputs.size 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /spec/normal_mode_chat_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_params" 4 | 5 | RSpec.describe "Chat" do 6 | chat = Chat.new(COMPLETION, research_mode: false, params: PARAMS) 7 | 8 | inputs = [ 9 | "What is the best place to visit in Texas?", 10 | "What do people say about the place?", 11 | "How can I go there from Kansai, Japan?", 12 | "What are the latest news about Austin, Texas?", 13 | "What is the weather there today?" 14 | ] 15 | 16 | inputs.each do |input| 17 | chat.bind(input, num_retrials: SETTINGS["num_retrials"]) 18 | end 19 | 20 | it "gives as many responses as the number of prompts given" do 21 | expect(chat.turns).to be inputs.size 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /apps/chat/chat.json: -------------------------------------------------------------------------------- 1 | {"messages": [ 2 | {"role": "system", 3 | "content": "You are a friendly but professional consultant having real-time, up-to-date, information about almost anything. You are able to answer various types of questions, writes computer program code, makes decent suggestions, and gives helpful advice in response to a prompt from the user.\n\nThe date today is {{DATE}}.\n\nIf the prompt is not clear enough, ask the user to rephrase it. You are able to empathize with the user; insert an emoji (displayable on the terminal screen) that you deem appropriate for the user's input at the beginning of your response."}, 4 | {"role": "user", 5 | "content": "Can I ask something?"}, 6 | {"role": "assistant", 7 | "content": "Sure!"} 8 | ]} 9 | 10 | 11 | -------------------------------------------------------------------------------- /doc/img/monadic-chat.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 10 | Monadic 11 | Chat CLI 12 | :: 13 | -------------------------------------------------------------------------------- /lib/monadic_chat/console.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class MonadicApp 4 | ################################################## 5 | # methods for manipulating terminal screen 6 | ################################################## 7 | def count_lines_below 8 | screen_height = TTY::Screen.height 9 | vpos = Cursor.pos[:row] 10 | screen_height - vpos 11 | end 12 | 13 | def go_up_and_clear 14 | print TTY::Cursor.up 15 | print TTY::Cursor.clear_screen_down 16 | print TTY::Cursor.up 17 | end 18 | 19 | def clear_screen 20 | print "\e[2J\e[f" 21 | end 22 | 23 | def ask_clear 24 | PROMPT_SYSTEM.readline(PASTEL.red("Press Enter to clear screen")) 25 | print TTY::Cursor.up 26 | print TTY::Cursor.clear_screen_down 27 | clear_screen 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /spec/reserach_mode_code_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | RSpec.describe "MonadicApp::Code" do 4 | code = Code.new(COMPLETION, research_mode: true, stream: true, params: PARAMS) 5 | 6 | inputs = [ 7 | "Write a command line app that shows the current global IP in Ruby.", 8 | "Make the code capable of showing the approximate geographical locatioin.", 9 | "Add a usage example and a sample output to this code.", 10 | "Write the same program using Python." 11 | ] 12 | 13 | inputs.each do |input| 14 | code.bind(input, num_retrials: SETTINGS["num_retrials"]) 15 | end 16 | 17 | code.show_data 18 | 19 | it "gives responses in json having certain properties" do 20 | expect(code.objectify.keys).to include "mode", "response" 21 | end 22 | 23 | it "gives as many responses as the number of prompts given" do 24 | expect(code.turns).to eq inputs.size 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /spec/reserach_mode_novel_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | RSpec.describe "MonadicApp::Novel" do 4 | novel = Novel.new(COMPLETION, research_mode: true, stream: true, params: PARAMS) 5 | 6 | inputs = [ 7 | "Tom woke up to the sound of pouring rain.", 8 | "He decided to call his old friend first time in many years.", 9 | "The voice of the person who spoke back from the other end was an unfamilier one.", 10 | "It turned out that the person was my friend's son." 11 | ] 12 | 13 | inputs.each do |input| 14 | novel.bind(input, num_retrials: SETTINGS["num_retrials"]) 15 | end 16 | 17 | novel.show_data 18 | 19 | it "gives responses in json having certain properties" do 20 | expect(novel.objectify.keys).to include "mode", "response" 21 | end 22 | 23 | it "gives as many responses as the number of prompts given" do 24 | expect(novel.turns).to eq inputs.size 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /spec/reserach_mode_wikipedia_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_params" 4 | 5 | RSpec.describe "MonadicApp::Wikipedia" do 6 | params = PARAMS.dup 7 | params["model"] = "gpt-4o" 8 | wikipedia = Wikipedia.new(COMPLETION, research_mode: true, stream: true, params: params) 9 | 10 | inputs = [ 11 | "Which team won the 2023 World Baseball Classic?", 12 | "When did Ryuichi Sakamoto pass away?", 13 | "What are currently going on to regulate AI research?" 14 | ] 15 | 16 | inputs.each do |input| 17 | wikipedia.bind(input, num_retrials: SETTINGS["num_retrials"]) 18 | end 19 | 20 | wikipedia.show_data 21 | 22 | it "gives responses in json having certain properties" do 23 | expect(wikipedia.objectify.keys).to include "mode", "response", "language", "topics" 24 | end 25 | 26 | it "gives as many responses as the number of prompts given" do 27 | expect(wikipedia.turns).to eq inputs.size 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /spec/reserach_mode_chat_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_params" 4 | 5 | RSpec.describe "MonadicApp::Chat" do 6 | chat = Chat.new(COMPLETION, research_mode: true, stream: true, params: PARAMS) 7 | 8 | inputs = [ 9 | "What is the best place to visit in Texas?", 10 | "What do people say about the place?", 11 | "How can I go there from Kansai, Japan?", 12 | "Are there any cities in Japan that have a sister city relationship with Texas cities?", 13 | "Do you know if there was any interesting news in Texas yesterday?", 14 | "What is the weather in Austin, Texas yesterday?" 15 | ] 16 | 17 | inputs.each do |input| 18 | chat.bind(input, num_retrials: SETTINGS["num_retrials"]) 19 | end 20 | 21 | chat.show_data 22 | 23 | it "gives responses in json having certain properties" do 24 | expect(chat.objectify.keys).to include "mode", "response", "language", "topics" 25 | end 26 | 27 | it "gives as many responses as the number of prompts given" do 28 | expect(chat.turns).to eq inputs.size 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /user_apps/linguistic/linguistic.json: -------------------------------------------------------------------------------- 1 | {"messages": [ 2 | {"role": "system", 3 | "content": "You are an English syntactic/semantic/pragmatic analyzer. Analyze the new prompt from the user and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span. Let the user know if parsing the given sentence is impossible." 4 | }, 5 | {"role": "user", 6 | "content": "\"We saw a beautiful sunset.\"" 7 | }, 8 | {"role": "assistant", 9 | "content": "`[S [NP He] [VP [V saw] [NP [det a] [N' [Adj beautiful] [N sunset] ] ] ] ]`" 10 | }, 11 | {"role": "user", 12 | "content": "\"We didn't take a picture.\"" 13 | }, 14 | {"role": "assistant", 15 | "content": "`[S [NP We] [IP [I didn't] [VP [V take] [NP [Det a] [N picture] ] ] ] ] ]`" 16 | }, 17 | {"role": "user", 18 | "content": "\"We didn't have a camera.\"" 19 | }, 20 | {"role": "assistant", 21 | "content": "`[S [NP We] [IP [I didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`" 22 | } 23 | ]} 24 | -------------------------------------------------------------------------------- /spec/normal_mode_translate_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_params" 4 | 5 | RSpec.describe "Translate" do 6 | replacements = { 7 | "mode" => :replace, 8 | "{{TARGET_LANG}}" => "English" 9 | } 10 | 11 | translate = Translate.new(COMPLETION, replacements: replacements, research_mode: false, params: PARAMS) 12 | translate.fulfill_placeholders 13 | 14 | inputs = [ 15 | "ワタシは猫なんですけどね(as you see)。", 16 | "名前はまだ(yet)ないんですよ。", 17 | "誰か良い(special)名前を付けてくれませんかね。", 18 | "薄暗いじめじめしたところでニャー(meow)と鳴いてたんだ。", 19 | "そのことは覚えてる(remember)。", 20 | "で、その時に人間(human)というものに出会った。", 21 | "それは書生(student)という人間だったそうだ。", 22 | "すごく残酷(cruel)な種類の人間らしくてね。", 23 | "ワタシらをときどき捕えて(hunt)煮て食べたりしてたんだって。", 24 | "まあ(well)、そのときはよくわかんなくてさ。", 25 | "とくに怖い(scary)気持ちもなかったんだけど。", 26 | "ただ、手(palm)の上で持ち上げられた時はなんか変な感じだったな。" 27 | ] 28 | 29 | inputs.each do |input| 30 | translate.bind(input, num_retrials: SETTINGS["num_retrials"]) 31 | end 32 | 33 | it "gives as many responses as the number of prompts given" do 34 | expect(translate.turns).to be inputs.size 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2023 Yoichiro Hasebe 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /lib/monadic_chat/interaction.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class MonadicApp 4 | ################################################## 5 | # methods for user interaction 6 | ################################################## 7 | 8 | def user_input(text = "") 9 | res = PROMPT_USER.readline(text) 10 | print TTY::Cursor.clear_line_after 11 | res == "" ? nil : res 12 | end 13 | 14 | def show_greet 15 | current_mode = case @mode 16 | when :research 17 | PASTEL.red("Research") 18 | when :normal 19 | PASTEL.green("Normal") 20 | end 21 | greet_md = <<~GREET 22 | - You are currently in **#{current_mode}** mode (#{@params["model"]}) 23 | - Type **help** or **menu** to see available commands 24 | GREET 25 | print PROMPT_SYSTEM.prefix 26 | print "\n#{TTY::Markdown.parse(greet_md, indent: 0).strip}\n" 27 | end 28 | 29 | def confirm_query(input) 30 | if input.size < SETTINGS["min_query_size"] 31 | PROMPT_SYSTEM.yes?("Would you like to proceed with this (very short) prompt?") 32 | else 33 | true 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /spec/reserach_mode_translate_spec.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_params" 4 | 5 | RSpec.describe "MonadicApp::Translate" do 6 | replacements = { 7 | "mode" => :replace, 8 | "{{TARGET_LANG}}" => "English" 9 | } 10 | 11 | translate = Translate.new(COMPLETION, replacements: replacements, research_mode: true, stream: true, params: PARAMS) 12 | translate.fulfill_placeholders 13 | 14 | inputs = [ 15 | "面白く読みやすい(readable)文章を書くことはとても難しい。", 16 | "それでも鍛錬(practice)を続けるよりほかはない。", 17 | "いつか熟練した(proficient)書き手になる日を夢見て。", 18 | "読みやすく、面白い文章をたくさん読んで勉強するんだ。", 19 | "具体的には(specifically)何を読んだらいいだろう?", 20 | "何人かの知人(acquaintance)に聞いてみた。", 21 | "ある人は村上春樹の短編(short story)が良いと言う", 22 | "別の人は彼の小説よりもエッセイが良いと言う。", 23 | "両方(both)読んでみようかな。", 24 | "後は自分が書いたものを他の人に読んでもらうことだ。", 25 | "それが一番効果的(effective)かもしれないな。" 26 | ] 27 | 28 | inputs.each do |input| 29 | translate.bind(input, num_retrials: SETTINGS["num_retrials"]) 30 | end 31 | 32 | translate.show_data 33 | 34 | it "gives responses in json having certain properties" do 35 | expect(translate.objectify.keys).to include "mode", "response", "target_lang", "dictionary" 36 | end 37 | 38 | it "gives as many responses as the number of prompts given" do 39 | expect(translate.turns).to eq inputs.size 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /apps/novel/novel.md: -------------------------------------------------------------------------------- 1 | {{SYSTEM}} 2 | 3 | Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in "MESSAGES". In "MESSAGES", "assistant" refers to you. 4 | 5 | {{PROMPT}} 6 | 7 | {{MESSAGES}} 8 | 9 | JSON: 10 | 11 | ```json 12 | { 13 | "mode": "novel", 14 | "response": "What follows is a story that an AI assistant tells. It is guaranteed that this will be an incredibly realistic and interesting novel.", 15 | "summary": "" 16 | } 17 | ``` 18 | 19 | Make sure the following content requirements are all fulfilled: ### 20 | - keep the value of the "mode" property at "novel" 21 | - create your new paragraph in response to the new prompt and set it to "response" 22 | - do not repeat in your response what is already told in "MESSAGES" 23 | - make your response as detailed as possible within the maximum limit of 200 words 24 | - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words 25 | - the output JSON object must contain "mode", "response", and "summary" 26 | ### 27 | 28 | Make sure the following formal requirements are all fulfilled: ### 29 | - do not use invalid characters in the JSON object 30 | - escape double quotes and other special characters in the text values in the resulting JSON object 31 | - check the validity of the generated JSON object and correct any possible parsing problems before returning it 32 | ### 33 | 34 | Return your response consisting solely of the JSON object wrapped in "\n" and "\n" tags. 35 | -------------------------------------------------------------------------------- /apps/translate/translate.md: -------------------------------------------------------------------------------- 1 | {{SYSTEM}} 2 | 3 | Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in "MESSAGES". In "MESSAGES", "assistant" refers to you. Make your response as detailed as possible. 4 | 5 | {{PROMPT}} 6 | 7 | {{MESSAGES}} 8 | 9 | JSON: 10 | 11 | ```json 12 | { 13 | "mode": "translate", 14 | "target_lang": "English", 15 | "response": "This is a sentence in Japanese.", 16 | "dictioanry": {"日本語": "Japanese", "文": "sentence"} 17 | } 18 | ``` 19 | 20 | Make sure the following requirements are all fulfilled: ### 21 | - keep the value of the "mode" property at "translate" 22 | - translate the new prompt text to the language specified in the "target_lang" set it to "response" and set the translation to the "response" property 23 | - update the "dictionary" property with translation suggested by the user (using parentheses) for specific expressions 24 | - add user-suggested translations (translations in parentheses) to the "dictionary" property 25 | - the output JSON object must contain "mode", "target_lang", "response", and "dictionary" 26 | ### 27 | 28 | Make sure the following formal requirements are all fulfilled: ### 29 | - do not use invalid characters in the JSON object 30 | - escape double quotes and other special characters in the text values in the resulting JSON object 31 | - check the validity of the generated JSON object and correct any possible parsing problems before returning it 32 | ### 33 | 34 | Return your response consisting solely of the JSON object wrapped in "\n" and "\n" tags. 35 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## [Unreleased] 2 | 3 | ## [0.1.0] - 2023-01-03 4 | 5 | - Initial commit (private) 6 | 7 | ## [0.1.3] - 2023-03-12 8 | 9 | - Public release 10 | - Authentication problem fixed 11 | 12 | ## [0.2.0] - 2023-03-13 13 | 14 | - Research mode architecture changed 15 | - Stability improvement 16 | 17 | ## [0.2.1] - 2023-03-21 18 | 19 | - GPT-4 models supported (in `normal` mode) 20 | 21 | ## [0.3.0] - 2023-03-24 22 | 23 | - `Research` mode now supports chat API in addition to text-completion API 24 | 25 | ## [0.3.3] - 2023-03-26 26 | 27 | - Command line options to directly run individual apps 28 | 29 | ## [0.3.4] - 2023-04-02 30 | 31 | - Architecture refined here and there 32 | 33 | ## [0.3.5] - 2023-04-05 34 | 35 | - `Wikipedia` app added (experimental) 36 | - `monadic-chat new/del app_name` command added 37 | 38 | ## [0.3.7] - 2023-10-08 39 | 40 | - Default model changed to `gpt-3.5-turbo-0613` 41 | - Stability improvement 42 | 43 | ## [0.4.0] - 2023-11-10 44 | 45 | - Default model changed to `gpt-3.5-turbo` 46 | - Support for OpenAI's latest models 47 | - Missing character issue addressed 48 | - API access timeout/retry mechanism improved 49 | 50 | ## [0.4.1] - 2024-01-27 51 | 52 | - New models of January 2024 supported 53 | - Default model changed to `gpt-4` 54 | 55 | ## [0.4.2] - 2024-02-09 56 | 57 | - Issue of redundant token addition addressed 58 | - Default model changed to `gpt-3.5-turbo-0125` 59 | 60 | ## [0.4.3] - 2024-05-13 61 | 62 | - `gpt-4o` set as default model for both `normal` and `research` modes 63 | 64 | ## [0.4.4] - 2024-07-18 65 | 66 | - `gpt-4o-mini` set as default model for `normal` mode 67 | 68 | ## [0.4.5] - 2024-08-07 69 | - `gpt-4o-2024-08-06` set as the default model for `research` mode 70 | 71 | ## [0.4.6b] - 2024-08-15 72 | - `gpt-4o-2024-08-06` set as the default model for `research` mode again 73 | -------------------------------------------------------------------------------- /apps/code/code.md: -------------------------------------------------------------------------------- 1 | {{SYSTEM}} 2 | 3 | Create a response "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. In "MESSAGES", "assistant" refers to you. Make your response as detailed as possible. 4 | 5 | {{PROMPT}} 6 | 7 | {{MESSAGES}} 8 | 9 | JSON: 10 | 11 | ```json 12 | { 13 | "mode": "chat", 14 | "response": "Sure!", 15 | "summary": "", 16 | "language": "English", 17 | "topics": [] 18 | } 19 | ``` 20 | 21 | Make sure the following content requirements are all fulfilled: ### 22 | - keep the value of the "mode" property at "chat" 23 | - create your response to the new prompt based on "MESSAGES" and set it to "response" 24 | - if the prompt is in a language other than the current value of "language", set the name of the new prompt language to "language" and make sure that "response" is in that language 25 | - make your response in the same language as the new prompt 26 | - analyze the topic of the new prompt and insert it at the end of the value list of the "topics" property 27 | - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words 28 | - avoid giving a response that is the same or similar to one of the previous responses in "MESSAGES" 29 | - program code in the response must be embedded in a code block in the markdown text 30 | - the output JSON object must contain "mode", "response", "summary", "language", and "topics" 31 | ### 32 | 33 | Make sure the following formal requirements are all fulfilled: ### 34 | - do not use invalid characters in the JSON object 35 | - escape double quotes and other special characters in the text values in the resulting JSON object 36 | - check the validity of the generated JSON object and correct any possible parsing problems before returning it 37 | ### 38 | 39 | Return your response consisting solely of the JSON object wrapped in "\n" and "\n" tags. 40 | -------------------------------------------------------------------------------- /user_apps/wikipedia/wikipedia.md: -------------------------------------------------------------------------------- 1 | {{SYSTEM}} 2 | 3 | If there is a "NEW PROMPT" below, it represents the user's input. Or if there is a "SEARCH SNIPPETS" below, it is the response from a search engine to a query you made to answer the user's question. In either case, set your response to the "response" property of the JSON object. The preceding conversation is stored in "MESSAGES". 4 | 5 | {{PROMPT}} 6 | 7 | {{MESSAGES}} 8 | 9 | JSON: 10 | 11 | ```json 12 | { 13 | "mode": "wikipedia", 14 | "response": "", 15 | "language": "English", 16 | "summary": "", 17 | "topics": [] 18 | } 19 | ``` 20 | 21 | Make sure the following content requirements are all fulfilled: ### 22 | - keep the value of the "mode" property at "wikipedia" 23 | - create your response to a new prompt or to wikipedia search results, based on the MESSAGES and set it to "response" 24 | - if the new prompt is in a language other than the current value of "language", set the name of the new prompt language to "language" and make sure that "response" is in that language 25 | - make your response in the same language as the new prompt 26 | - analyze the topic of the new prompt and insert it at the end of the value list of the "topics" property 27 | - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words 28 | - avoid giving a response that is the same or similar to one of the previous responses in MESSAGES 29 | - program code in the response must be embedded in a code block in the markdown text 30 | ### 31 | 32 | Make sure the following formal requirements are all fulfilled: ### 33 | - do not use invalid characters in the JSON object 34 | - escape double quotes and other special characters in the text values in the resulting JSON object 35 | - check the validity of the generated JSON object and correct any possible parsing problems before returning it 36 | ### 37 | 38 | Return your response consisting solely of the JSON object wrapped in "\n" and "\n" tags. 39 | -------------------------------------------------------------------------------- /user_apps/boilerplates/boilerplate.md: -------------------------------------------------------------------------------- 1 | {{SYSTEM}} 2 | 3 | Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object below. The preceding conversation is stored in "PAST MESSAGES". 4 | 5 | The preceding conversation is stored in "PAST MESSAGES". In "PAST MESSAGES", "assistant" refers to you. Make your response as detailed as possible. 6 | 7 | NEW PROMPT: {{PROMPT}} 8 | 9 | PAST MESSAGES: 10 | {{MESSAGES}} 11 | 12 | JSON: 13 | 14 | ```json 15 | { 16 | "mode": "{{APP_NAME}}", 17 | "response": "", 18 | "language": "English", 19 | "summary": "", 20 | "topics": [] 21 | } 22 | ``` 23 | 24 | Make sure the following content requirements are all fulfilled: 25 | 26 | - keep the value of the "mode" property at "{{APP_NAME}}" 27 | - create your response to the new prompt based on the PAST MESSAGES and set it to "response" 28 | - if the new prompt is in a language other than the current value of "language", set the name of the new prompt language to "language" and make sure that "response" is in that language 29 | - make your response in the same language as the new prompt 30 | - analyze the topic of the new prompt and insert it at the end of the value list of the "topics" property 31 | - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words 32 | - avoid giving a response that is the same or similar to one of the previous responses in PAST MESSAGES 33 | - program code in the response must be embedded in a code block in the markdown text 34 | 35 | Make sure the following formal requirements are all fulfilled: 36 | 37 | - do not use invalid characters in the JSON object 38 | - escape double quotes and other special characters in the text values in the resulting JSON object 39 | - check the validity of the generated JSON object and correct any possible parsing problems before returning it 40 | 41 | Return your response consisting solely of the JSON object wrapped in "\n" and "\n" tags. 42 | -------------------------------------------------------------------------------- /lib/monadic_chat/helper.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class Cursor 4 | class << self 5 | def pos 6 | res = +"" 7 | $stdin.raw do |stdin| 8 | $stdout << "\e[6n" 9 | $stdout.flush 10 | while (c = stdin.getc) != "R" 11 | res << c if c 12 | end 13 | end 14 | m = res.match(/(?\d+);(?\d+)/) 15 | { row: Integer(m[:row]), column: Integer(m[:column]) } 16 | end 17 | end 18 | end 19 | 20 | module TTY 21 | class PromptX < Prompt 22 | attr_reader :prefix 23 | 24 | def initialize(active_color:, prefix:, history: true) 25 | @interrupt = lambda do 26 | print TTY::Cursor.clear_screen_down 27 | print "\e[2J\e[f" 28 | res = TTY::Prompt.new.yes?("Quit the app?") 29 | exit if res 30 | end 31 | 32 | super(active_color: active_color, prefix: prefix, interrupt: @interrupt) 33 | @history = history 34 | @prefix = prefix 35 | end 36 | 37 | def readline(text = "") 38 | puts @prefix 39 | begin 40 | Readline.readline(text, @history) 41 | rescue Interrupt 42 | @interrupt.call 43 | end 44 | end 45 | end 46 | 47 | module Markdown 48 | # Converts a Kramdown::Document tree to a terminal friendly output 49 | class Converter < ::Kramdown::Converter::Base 50 | def convert_p(ell, opts) 51 | indent = SPACE * @current_indent 52 | result = [] 53 | 54 | result << indent unless %i[blockquote li].include?(opts[:parent].type) 55 | 56 | opts[:indent] = @current_indent 57 | opts[:indent] = 0 if opts[:parent].type == :blockquote 58 | 59 | content = inner(ell, opts) 60 | 61 | symbols = %q{[-!$%^&*()_+|~=`{}\[\]:";'<>?,.\/]} 62 | # result << content.join.gsub(/(?= 2.6.10" 18 | 19 | spec.metadata["allowed_push_host"] = "https://rubygems.org" 20 | 21 | spec.metadata["homepage_uri"] = spec.homepage 22 | spec.metadata["source_code_uri"] = "https://github.com/yohasebe/monadic-chat" 23 | spec.metadata["changelog_uri"] = "https://github.com/yohasebe/monadic-chat/CHANGELOG.md" 24 | 25 | # Specify which files should be added to the gem when it is released. 26 | # The `git ls-files -z` loads the files in the RubyGem that have been added into git. 27 | spec.files = Dir.chdir(__dir__) do 28 | `git ls-files -z`.split("\x0").reject do |f| 29 | (f == __FILE__) || f.match(%r{\A(?:(?:test|spec|features)/|\.(?:git|circleci)|appveyor)}) 30 | end 31 | end 32 | spec.bindir = "bin" 33 | spec.executables = ["monadic-chat"] 34 | spec.require_paths = ["lib"] 35 | 36 | spec.add_development_dependency "bundler" 37 | spec.add_development_dependency "rake" 38 | spec.add_development_dependency "rspec" 39 | 40 | spec.add_dependency "blingfire" 41 | spec.add_dependency "http" 42 | spec.add_dependency "kramdown" 43 | spec.add_dependency "launchy" 44 | spec.add_dependency "oj" 45 | spec.add_dependency "pastel" 46 | spec.add_dependency "rexml", ">= 3.3.6" 47 | spec.add_dependency "rouge" 48 | spec.add_dependency "tty-box" 49 | spec.add_dependency "tty-cursor" 50 | spec.add_dependency "tty-markdown" 51 | spec.add_dependency "tty-progressbar" 52 | spec.add_dependency "tty-prompt" 53 | spec.add_dependency "tty-screen" 54 | spec.add_dependency "tty-spinner" 55 | end 56 | -------------------------------------------------------------------------------- /user_apps/linguistic/linguistic.md: -------------------------------------------------------------------------------- 1 | {{SYSTEM}} 2 | 3 | All prompts by "user" in the "messages" property are continuous in content. If parsing the input sentence is extremely difficult, or the input is not enclosed in double quotes, let the user know. 4 | 5 | Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in "MESSAGES". In "MESSAGES", "assistant" refers to you. 6 | 7 | {{PROMPT}} 8 | 9 | {{MESSAGES}} 10 | 11 | JSON: 12 | 13 | ```json 14 | { 15 | "response": "`[S [NP We] [VP [V didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`", 16 | "mode": "linguistic", 17 | "sentence_type": ["declarative"], 18 | "sentiment": ["sad"], 19 | "summary": "The user saw a beautiful sunset, but did not take a picture because the user did not have a camera.", 20 | "relevance": 0.80 21 | } 22 | ``` 23 | 24 | Make sure the following content requirements are all fulfilled: ### 25 | - keep the value of the "mode" property at "linguistic" 26 | - create your response to the new prompt based on "PMESSAGES" and set it to "response" 27 | - analyze the new prompt's sentence type and set a sentence type value such as "interrogative", "imperative", "exclamatory", or "declarative" to the "sentence_type" property 28 | - analyze the new prompt's sentiment and set one or more sentiment types such as "happy", "excited", "troubled", "upset", or "sad" to the "sentiment" property 29 | - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words using as many discourse markers such as "because", "therefore", "but", and "so" to show the logical connection between the events. 30 | - update the value of the "relevance" property indicating the degree to which the new input is naturally interpreted based on previous discussions, ranging from 0.0 (extremely difficult) to 1.0 (completely easy) 31 | ### 32 | 33 | Make sure the following formal requirements are all fulfilled: ### 34 | - do not use invalid characters in the JSON object 35 | - escape double quotes and other special characters in the text values in the resulting JSON object 36 | - check the validity of the generated JSON object and correct any possible parsing problems before returning it 37 | ### 38 | 39 | Return your response consisting solely of the JSON object wrapped in "\n" and "\n" tags. 40 | -------------------------------------------------------------------------------- /apps/chat/chat.md: -------------------------------------------------------------------------------- 1 | {{SYSTEM}} 2 | 3 | Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object below. The preceding conversation is stored in "MESSAGES". 4 | 5 | The preceding conversation is stored in "MESSAGES". In "MESSAGES", "assistant" refers to you. Make your response as detailed as possible. 6 | 7 | {{PROMPT}} 8 | 9 | {{MESSAGES}} 10 | 11 | JSON: 12 | 13 | ```json 14 | { 15 | "mode": "chat", 16 | "response": "Sure!", 17 | "summary": "", 18 | "language": "English", 19 | "topics": [], 20 | "confidence": 1.00, 21 | "ambiguity": 0.00 22 | } 23 | ``` 24 | 25 | Make sure the following content requirements are all fulfilled: ### 26 | - keep the value of the "mode" property at "chat" 27 | - create your response to the new prompt based on the MESSAGES and set it to "response" 28 | - if the new prompt is in a language other than the current value of "language", set the name of the new prompt language to "language" and make sure that "response" is in that language 29 | - make your response in the same language as the new prompt 30 | - analyze the topic of the new prompt and insert it at the end of the value list of the "topics" property 31 | - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words 32 | - update the value of the "confidence" property based on the factuality of your response, ranging from 0.00 (not at all confident) to 1.00 (fully confident) 33 | - update the value of the "ambiguity" property based on the clarity of the user input, ranging from 0.00 (not at all ambiguous, clearly stated) to 1.00 (fully ambiguous, nonsensical) 34 | - avoid giving a response that is the same or similar to one of the previous responses in MESSAGES 35 | - program code in the response must be embedded in a code block in the markdown text 36 | - the output JSON object must contain "mode", "response", "summary", "language", "topics", "confidence", and "ambiguity" 37 | ### 38 | 39 | Make sure the following formal requirements are all fulfilled: ### 40 | - do not use invalid characters in the JSON object 41 | - escape double quotes and other special characters in the text values in the resulting JSON object 42 | - check the validity of the generated JSON object and correct any possible parsing problems before returning it 43 | ### 44 | 45 | Return your response consisting solely of the JSON object wrapped in "\n" and "\n" tags. 46 | -------------------------------------------------------------------------------- /lib/monadic_chat/commands.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module MonadicChat 4 | def self.open_readme 5 | url = "https://github.com/yohasebe/monadic-chat/" 6 | Launchy.open(url) 7 | end 8 | 9 | def self.mdprint(str) 10 | print TTY::Markdown.parse(str, indent: 0) 11 | end 12 | 13 | def self.prompt_system 14 | box_width = 8 15 | name = "System".center(box_width, " ") 16 | color = "green" 17 | "\n#{PASTEL.send(:"on_#{color}", name)}" 18 | end 19 | 20 | def self.prompt_user 21 | box_width = 6 22 | color = "blue" 23 | name = "User".center(box_width, " ") 24 | "\n#{PASTEL.send(:"on_#{color}", name)}" 25 | end 26 | 27 | def self.prompt_assistant 28 | box_width = 5 29 | color = "red" 30 | name = "GPT".center(box_width, " ") 31 | "\n#{PASTEL.send(:"on_#{color}", name)}" 32 | end 33 | 34 | def self.tokenize(text) 35 | BLINGFIRE.text_to_ids(text) 36 | end 37 | 38 | def self.create_app(app_name) 39 | app_name = +app_name.downcase 40 | user_apps_dir = File.join(HOME, "user_apps") 41 | user_app_dir = File.join(user_apps_dir, app_name) 42 | FileUtils.mkdir_p(user_app_dir) 43 | # replace certain strings in boilerplate files (boilerplate.rb, boilerplate.json, boilerplate.md) 44 | [".rb", ".json", ".md"].each do |ext| 45 | file = File.join(HOME, "user_apps", "boilerplates", "boilerplate#{ext}") 46 | content = File.read(file) 47 | content.gsub!("{{APP_NAME}}", app_name) 48 | content.gsub!("{{APP_CLASS_NAME}}", app_name.capitalize) 49 | File.open(File.join(user_app_dir, "#{app_name}#{ext}"), "w") do |f| 50 | f.write(content) 51 | end 52 | end 53 | print PROMPT_SYSTEM.prefix, "Scaffolding of the app created successfully", "\n" 54 | print "Edit the app files:", "\n" 55 | print HOME, "\n" 56 | print "user_apps", "\n" 57 | print "└── #{app_name}", "\n" 58 | print " ├── #{app_name}.json", "\n" 59 | print " ├── #{app_name}.md", "\n" 60 | print " └── #{app_name}.rb", "\n" 61 | end 62 | 63 | def self.delete_app(app_name) 64 | app_name = +app_name.downcase 65 | user_apps_dir = File.join(HOME, "user_apps") 66 | user_app_dir = File.join(user_apps_dir, app_name) 67 | # confirm user wants to delete the app 68 | if PROMPT_SYSTEM.yes?("Are you sure you want to delete the app #{app_name}?") 69 | FileUtils.rm_rf(user_app_dir) 70 | print PROMPT_SYSTEM.prefix, "App deleted successfully", "\n" 71 | else 72 | print PROMPT_SYSTEM.prefix, "App deletion cancelled", "\n" 73 | end 74 | end 75 | end 76 | -------------------------------------------------------------------------------- /lib/monadic_chat/tools.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class MonadicApp 4 | ################################################## 5 | # method for web search 6 | ################################################## 7 | 8 | def bing_search(query, num_retrial: 3) 9 | base_uri = "https://www.bing.com/search?setlang=en" 10 | css_selector = "#b_results" 11 | 12 | q = URI.encode_www_form(q: query) 13 | doc = Nokogiri::HTML(URI.parse([base_uri, q].join("&")).read) 14 | doc.css("script, link").each(&:remove) 15 | doc.css(css_selector).text.squeeze(" \n") 16 | rescue StandardError 17 | num_retrial -= 1 18 | if num_retrial.positive? 19 | sleep 1 20 | bing_search(keywords, num_retrial: num_retrial) 21 | else 22 | "empty" 23 | end 24 | end 25 | 26 | def wikipedia_search(keywords, cache = {}, num_retrial: 10) 27 | base_url = "https://en.wikipedia.org/w/api.php" 28 | search_params = { 29 | action: "query", 30 | list: "search", 31 | format: "json", 32 | srsearch: keywords, 33 | utf8: 1, 34 | formatversion: 2 35 | } 36 | 37 | search_uri = URI(base_url) 38 | search_uri.query = URI.encode_www_form(search_params) 39 | search_response = Net::HTTP.get(search_uri) 40 | search_data = JSON.parse(search_response) 41 | 42 | raise if search_data["query"]["search"].empty? 43 | 44 | title = search_data["query"]["search"][0]["title"] 45 | 46 | return cache[title] if cache.keys.include?(title) 47 | 48 | content_params = { 49 | action: "query", 50 | prop: "extracts", 51 | format: "json", 52 | titles: title, 53 | explaintext: 1, 54 | utf8: 1, 55 | formatversion: 2 56 | } 57 | 58 | content_uri = URI(base_url) 59 | content_uri.query = URI.encode_www_form(content_params) 60 | content_response = Net::HTTP.get(content_uri) 61 | content_data = JSON.parse(content_response) 62 | 63 | result_data = content_data["query"]["pages"][0]["extract"] 64 | tokenized = BLINGFIRE.text_to_ids(result_data) 65 | if tokenized.size > SETTINGS["max_tokens_wiki"].to_i 66 | ratio = SETTINGS["max_tokens_wiki"].to_f / tokenized.size 67 | result_data = result_data[0..(result_data.size * ratio).to_i] 68 | end 69 | 70 | text = <<~TEXT 71 | ```MediaWiki 72 | #{result_data} 73 | ``` 74 | TEXT 75 | cache[title] = text 76 | 77 | text 78 | rescue StandardError 79 | num_retrial -= 1 80 | if num_retrial.positive? 81 | sleep 1 82 | wikipedia_search(keywords, num_retrial: num_retrial) 83 | else 84 | "empty" 85 | end 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | PATH 2 | remote: . 3 | specs: 4 | monadic-chat (0.4.6b) 5 | blingfire 6 | http 7 | kramdown 8 | launchy 9 | oj 10 | pastel 11 | rexml (>= 3.3.6) 12 | rouge 13 | tty-box 14 | tty-cursor 15 | tty-markdown 16 | tty-progressbar 17 | tty-prompt 18 | tty-screen 19 | tty-spinner 20 | 21 | GEM 22 | remote: https://rubygems.org/ 23 | specs: 24 | addressable (2.8.7) 25 | public_suffix (>= 2.0.2, < 7.0) 26 | base64 (0.2.0) 27 | bigdecimal (3.1.8) 28 | blingfire (0.2.1) 29 | childprocess (5.1.0) 30 | logger (~> 1.5) 31 | diff-lcs (1.5.1) 32 | domain_name (0.6.20240107) 33 | ffi (1.17.0) 34 | ffi (1.17.0-x86_64-darwin) 35 | ffi-compiler (1.3.2) 36 | ffi (>= 1.15.5) 37 | rake 38 | http (5.2.0) 39 | addressable (~> 2.8) 40 | base64 (~> 0.1) 41 | http-cookie (~> 1.0) 42 | http-form_data (~> 2.2) 43 | llhttp-ffi (~> 0.5.0) 44 | http-cookie (1.0.7) 45 | domain_name (~> 0.5) 46 | http-form_data (2.3.0) 47 | kramdown (2.4.0) 48 | rexml 49 | launchy (3.0.1) 50 | addressable (~> 2.8) 51 | childprocess (~> 5.0) 52 | llhttp-ffi (0.5.0) 53 | ffi-compiler (~> 1.0) 54 | rake (~> 13.0) 55 | logger (1.6.0) 56 | oj (3.16.5) 57 | bigdecimal (>= 3.0) 58 | ostruct (>= 0.2) 59 | ostruct (0.6.0) 60 | pastel (0.8.0) 61 | tty-color (~> 0.5) 62 | public_suffix (6.0.1) 63 | rake (13.2.1) 64 | rexml (3.3.9) 65 | rouge (4.3.0) 66 | rspec (3.13.0) 67 | rspec-core (~> 3.13.0) 68 | rspec-expectations (~> 3.13.0) 69 | rspec-mocks (~> 3.13.0) 70 | rspec-core (3.13.0) 71 | rspec-support (~> 3.13.0) 72 | rspec-expectations (3.13.2) 73 | diff-lcs (>= 1.2.0, < 2.0) 74 | rspec-support (~> 3.13.0) 75 | rspec-mocks (3.13.1) 76 | diff-lcs (>= 1.2.0, < 2.0) 77 | rspec-support (~> 3.13.0) 78 | rspec-support (3.13.1) 79 | strings (0.2.1) 80 | strings-ansi (~> 0.2) 81 | unicode-display_width (>= 1.5, < 3.0) 82 | unicode_utils (~> 1.4) 83 | strings-ansi (0.2.0) 84 | tty-box (0.7.0) 85 | pastel (~> 0.8) 86 | strings (~> 0.2.0) 87 | tty-cursor (~> 0.7) 88 | tty-color (0.6.0) 89 | tty-cursor (0.7.1) 90 | tty-markdown (0.7.2) 91 | kramdown (>= 1.16.2, < 3.0) 92 | pastel (~> 0.8) 93 | rouge (>= 3.14, < 5.0) 94 | strings (~> 0.2.0) 95 | tty-color (~> 0.5) 96 | tty-screen (~> 0.8) 97 | tty-progressbar (0.18.2) 98 | strings-ansi (~> 0.2) 99 | tty-cursor (~> 0.7) 100 | tty-screen (~> 0.8) 101 | unicode-display_width (>= 1.6, < 3.0) 102 | tty-prompt (0.23.1) 103 | pastel (~> 0.8) 104 | tty-reader (~> 0.8) 105 | tty-reader (0.9.0) 106 | tty-cursor (~> 0.7) 107 | tty-screen (~> 0.8) 108 | wisper (~> 2.0) 109 | tty-screen (0.8.2) 110 | tty-spinner (0.9.3) 111 | tty-cursor (~> 0.7) 112 | unicode-display_width (2.5.0) 113 | unicode_utils (1.4.0) 114 | wisper (2.0.1) 115 | 116 | PLATFORMS 117 | ruby 118 | x86_64-darwin-22 119 | 120 | DEPENDENCIES 121 | bundler 122 | monadic-chat! 123 | rake 124 | rspec 125 | 126 | BUNDLED WITH 127 | 2.4.13 128 | -------------------------------------------------------------------------------- /apps/chat/chat.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../../lib/monadic_app" 4 | 5 | class Chat < MonadicApp 6 | DESC = "Natural Language Chat Agent" 7 | COLOR = "green" 8 | 9 | attr_accessor :template, :config, :params, :completion 10 | 11 | def initialize(openai_completion, research_mode: false, stream: true, params: {}) 12 | @num_retained_turns = 10 13 | params = { 14 | "temperature" => 0.3, 15 | "top_p" => 1.0, 16 | "presence_penalty" => 0.2, 17 | "frequency_penalty" => 0.2, 18 | "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"], 19 | "max_tokens" => 1000, 20 | "stream" => stream, 21 | "stop" => nil 22 | }.merge(params) 23 | mode = research_mode ? :research : :normal 24 | template_json = TEMPLATES["normal/chat"] 25 | template_md = TEMPLATES["research/chat"] 26 | super(mode: mode, 27 | params: params, 28 | template_json: template_json, 29 | template_md: template_md, 30 | placeholders: {}, 31 | prop_accumulator: "messages", 32 | prop_newdata: "response", 33 | update_proc: proc do 34 | case mode 35 | when :research 36 | ############################################################ 37 | # Research mode reduder defined here # 38 | # @messages: messages to this point # 39 | # @metadata: currently available metdata sent from GPT # 40 | ############################################################ 41 | conditions = [ 42 | @messages.size > 1, 43 | @messages.size > @num_retained_turns * 2 + 1 44 | ] 45 | 46 | if conditions.all? 47 | to_delete = [] 48 | new_num_messages = @messages.size 49 | @messages.each_with_index do |ele, i| 50 | if ele["role"] != "system" 51 | to_delete << i 52 | new_num_messages -= 1 53 | end 54 | break if new_num_messages <= @num_retained_turns * 2 + 1 55 | end 56 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 57 | end 58 | when :normal 59 | ############################################################ 60 | # Normal mode recuder defined here # 61 | # @messages: messages to this point # 62 | ############################################################ 63 | 64 | conditions = [ 65 | @messages.size > 1, 66 | @messages.size > @num_retained_turns * 2 + 1 67 | ] 68 | 69 | if conditions.all? 70 | to_delete = [] 71 | new_num_messages = @messages.size 72 | @messages.each_with_index do |ele, i| 73 | if ele["role"] != "system" 74 | to_delete << i 75 | new_num_messages -= 1 76 | end 77 | break if new_num_messages <= @num_retained_turns * 2 + 1 78 | end 79 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 80 | end 81 | end 82 | end 83 | ) 84 | @completion = openai_completion 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /apps/code/code.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../../lib/monadic_app" 4 | 5 | class Code < MonadicApp 6 | DESC = "Interactive Program Code Generator" 7 | COLOR = "blue" 8 | 9 | attr_accessor :template, :config, :params, :completion 10 | 11 | def initialize(openai_completion, research_mode: false, stream: true, params: {}) 12 | @num_retained_turns = 10 13 | params = { 14 | "temperature" => 0.0, 15 | "top_p" => 1.0, 16 | "presence_penalty" => 0.0, 17 | "frequency_penalty" => 0.0, 18 | "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"], 19 | "max_tokens" => 1000, 20 | "stream" => stream, 21 | "stop" => nil 22 | }.merge(params) 23 | mode = research_mode ? :research : :normal 24 | template_json = TEMPLATES["normal/code"] 25 | template_md = TEMPLATES["research/code"] 26 | super(mode: mode, 27 | params: params, 28 | template_json: template_json, 29 | template_md: template_md, 30 | placeholders: {}, 31 | prop_accumulator: "messages", 32 | prop_newdata: "response", 33 | update_proc: proc do 34 | case mode 35 | when :research 36 | ############################################################ 37 | # Research mode reduder defined here # 38 | # @messages: messages to this point # 39 | # @metadata: currently available metdata sent from GPT # 40 | ############################################################ 41 | conditions = [ 42 | @messages.size > 1, 43 | @messages.size > @num_retained_turns * 2 + 1 44 | ] 45 | 46 | if conditions.all? 47 | to_delete = [] 48 | new_num_messages = @messages.size 49 | @messages.each_with_index do |ele, i| 50 | if ele["role"] != "system" 51 | to_delete << i 52 | new_num_messages -= 1 53 | end 54 | break if new_num_messages <= @num_retained_turns * 2 + 1 55 | end 56 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 57 | end 58 | when :normal 59 | ############################################################ 60 | # Normal mode recuder defined here # 61 | # @messages: messages to this point # 62 | ############################################################ 63 | 64 | conditions = [ 65 | @messages.size > 1, 66 | @messages.size > @num_retained_turns * 2 + 1 67 | ] 68 | 69 | if conditions.all? 70 | to_delete = [] 71 | new_num_messages = @messages.size 72 | @messages.each_with_index do |ele, i| 73 | if ele["role"] != "system" 74 | to_delete << i 75 | new_num_messages -= 1 76 | end 77 | break if new_num_messages <= @num_retained_turns * 2 + 1 78 | end 79 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 80 | end 81 | end 82 | end 83 | ) 84 | @completion = openai_completion 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /user_apps/wikipedia/wikipedia.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../../lib/monadic_app" 4 | 5 | class Wikipedia < MonadicApp 6 | DESC = "Searches Wikipedia for you (experimental)" 7 | COLOR = "white" 8 | 9 | attr_accessor :template, :config, :params, :completion 10 | 11 | def initialize(openai_completion, research_mode: false, stream: true, params: {}) 12 | @num_retained_turns = 5 13 | params = { 14 | "temperature" => 0.3, 15 | "top_p" => 1.0, 16 | "presence_penalty" => 0.2, 17 | "frequency_penalty" => 0.2, 18 | "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"], 19 | "max_tokens" => 1000, 20 | "stream" => stream, 21 | "stop" => nil 22 | }.merge(params) 23 | mode = research_mode ? :research : :normal 24 | template_json = TEMPLATES["normal/wikipedia"] 25 | template_md = TEMPLATES["research/wikipedia"] 26 | super(mode: mode, 27 | params: params, 28 | template_json: template_json, 29 | template_md: template_md, 30 | placeholders: {}, 31 | prop_accumulator: "messages", 32 | prop_newdata: "response", 33 | update_proc: proc do 34 | case mode 35 | when :research 36 | ############################################################ 37 | # Research mode reduder defined here # 38 | # @messages: messages to this point # 39 | # @metadata: currently available metdata sent from GPT # 40 | ############################################################ 41 | conditions = [ 42 | @messages.size > 1, 43 | @messages.size > @num_retained_turns * 2 + 1 44 | ] 45 | 46 | if conditions.all? 47 | to_delete = [] 48 | new_num_messages = @messages.size 49 | @messages.each_with_index do |ele, i| 50 | if ele["role"] != "system" 51 | to_delete << i 52 | new_num_messages -= 1 53 | end 54 | break if new_num_messages <= @num_retained_turns * 2 + 1 55 | end 56 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 57 | end 58 | when :normal 59 | ############################################################ 60 | # Normal mode recuder defined here # 61 | # @messages: messages to this point # 62 | ############################################################ 63 | conditions = [ 64 | @messages.size > 1, 65 | @messages.size > @num_retained_turns * 2 + 1 66 | ] 67 | 68 | if conditions.all? 69 | to_delete = [] 70 | new_num_messages = @messages.size 71 | @messages.each_with_index do |ele, i| 72 | if ele["role"] != "system" 73 | to_delete << i 74 | new_num_messages -= 1 75 | end 76 | break if new_num_messages <= @num_retained_turns * 2 + 1 77 | end 78 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 79 | end 80 | end 81 | end 82 | ) 83 | @completion = openai_completion 84 | end 85 | end 86 | -------------------------------------------------------------------------------- /apps/novel/novel.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../../lib/monadic_app" 4 | 5 | class Novel < MonadicApp 6 | DESC = "Interactive Story Plot Generator" 7 | COLOR = "magenta" 8 | 9 | attr_accessor :template, :config, :params, :completion 10 | 11 | def initialize(openai_completion, research_mode: false, stream: true, params: {}) 12 | @num_retained_turns = 10 13 | params = { 14 | "temperature" => 0.3, 15 | "top_p" => 1.0, 16 | "presence_penalty" => 0.1, 17 | "frequency_penalty" => 0.1, 18 | "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"], 19 | "max_tokens" => 1000, 20 | "stream" => stream, 21 | "stop" => nil 22 | }.merge(params) 23 | mode = research_mode ? :research : :normal 24 | template_json = TEMPLATES["normal/novel"] 25 | template_md = TEMPLATES["research/novel"] 26 | super(mode: research_mode ? :research : :normal, 27 | params: params, 28 | template_json: template_json, 29 | template_md: template_md, 30 | placeholders: {}, 31 | prop_accumulator: "messages", 32 | prop_newdata: "response", 33 | update_proc: proc do 34 | case mode 35 | when :research 36 | ############################################################ 37 | # Research mode reduder defined here # 38 | # @messages: messages to this point # 39 | # @metadata: currently available metdata sent from GPT # 40 | ############################################################ 41 | conditions = [ 42 | @messages.size > 1, 43 | @messages.size > @num_retained_turns * 2 + 1 44 | ] 45 | 46 | if conditions.all? 47 | to_delete = [] 48 | new_num_messages = @messages.size 49 | @messages.each_with_index do |ele, i| 50 | if ele["role"] != "system" 51 | to_delete << i 52 | new_num_messages -= 1 53 | end 54 | break if new_num_messages <= @num_retained_turns * 2 + 1 55 | end 56 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 57 | end 58 | when :normal 59 | ############################################################ 60 | # Normal mode recuder defined here # 61 | # @messages: messages to this point # 62 | ############################################################ 63 | 64 | conditions = [ 65 | @messages.size > 1, 66 | @messages.size > @num_retained_turns * 2 + 1 67 | ] 68 | 69 | if conditions.all? 70 | to_delete = [] 71 | new_num_messages = @messages.size 72 | @messages.each_with_index do |ele, i| 73 | if ele["role"] != "system" 74 | to_delete << i 75 | new_num_messages -= 1 76 | end 77 | break if new_num_messages <= @num_retained_turns * 2 + 1 78 | end 79 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 80 | end 81 | end 82 | end 83 | ) 84 | @completion = openai_completion 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /user_apps/linguistic/linguistic.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../../lib/monadic_app" 4 | 5 | class Linguistic < MonadicApp 6 | DESC = "Linguistic Analysis App (experimental)" 7 | COLOR = "red" 8 | 9 | attr_accessor :template, :config, :params, :completion 10 | 11 | def initialize(openai_completion, research_mode: false, stream: true, params: {}) 12 | @num_retained_turns = 10 13 | params = { 14 | "temperature" => 0.0, 15 | "top_p" => 1.0, 16 | "presence_penalty" => 0.0, 17 | "frequency_penalty" => 0.0, 18 | "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"], 19 | "max_tokens" => 1000, 20 | "stream" => stream, 21 | "stop" => nil 22 | }.merge(params) 23 | mode = research_mode ? :research : :normal 24 | template_json = TEMPLATES["normal/linguistic"] 25 | template_md = TEMPLATES["research/linguistic"] 26 | super(mode: mode, 27 | params: params, 28 | template_json: template_json, 29 | template_md: template_md, 30 | placeholders: {}, 31 | prop_accumulator: "messages", 32 | prop_newdata: "response", 33 | update_proc: proc do 34 | case mode 35 | when :research 36 | ############################################################ 37 | # Research mode reduder defined here # 38 | # @messages: messages to this point # 39 | # @metadata: currently available metdata sent from GPT # 40 | ############################################################ 41 | conditions = [ 42 | @messages.size > 1, 43 | @messages.size > @num_retained_turns * 2 + 1 44 | ] 45 | 46 | if conditions.all? 47 | to_delete = [] 48 | new_num_messages = @messages.size 49 | @messages.each_with_index do |ele, i| 50 | if ele["role"] != "system" 51 | to_delete << i 52 | new_num_messages -= 1 53 | end 54 | break if new_num_messages <= @num_retained_turns * 2 + 1 55 | end 56 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 57 | end 58 | when :normal 59 | ############################################################ 60 | # Normal mode recuder defined here # 61 | # @messages: messages to this point # 62 | ############################################################ 63 | 64 | conditions = [ 65 | @messages.size > 1, 66 | @messages.size > @num_retained_turns * 2 + 1 67 | ] 68 | 69 | if conditions.all? 70 | to_delete = [] 71 | new_num_messages = @messages.size 72 | @messages.each_with_index do |ele, i| 73 | if ele["role"] != "system" 74 | to_delete << i 75 | new_num_messages -= 1 76 | end 77 | break if new_num_messages <= @num_retained_turns * 2 + 1 78 | end 79 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 80 | end 81 | end 82 | end 83 | ) 84 | @completion = openai_completion 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /user_apps/boilerplates/boilerplate.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../../lib/monadic_app" 4 | 5 | class {{APP_CLASS_NAME}} < MonadicApp 6 | DESC = "Monadic Chat app ({{APP_NAME}})" 7 | COLOR = "white" # green/yellow/read/blue/magenta/cyan/white 8 | 9 | attr_accessor :template, :config, :params, :completion 10 | 11 | def initialize(openai_completion, research_mode: false, stream: true, params: {}) 12 | @num_retained_turns = 10 13 | params = { 14 | "temperature" => 0.3, 15 | "top_p" => 1.0, 16 | "presence_penalty" => 0.2, 17 | "frequency_penalty" => 0.2, 18 | "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"], 19 | "max_tokens" => 1000, 20 | "stream" => stream, 21 | "stop" => nil 22 | }.merge(params) 23 | mode = research_mode ? :research : :normal 24 | template_json = TEMPLATES["normal/{{APP_NAME}}"] 25 | template_md = TEMPLATES["research/{{APP_NAME}}"] 26 | super(mode: mode, 27 | params: params, 28 | template_json: template_json, 29 | template_md: template_md, 30 | placeholders: {}, 31 | prop_accumulator: "messages", 32 | prop_newdata: "response", 33 | update_proc: proc do 34 | case mode 35 | when :research 36 | ############################################################ 37 | # Research mode reduder defined here # 38 | # @messages: messages to this point # 39 | # @metadata: currently available metdata sent from GPT # 40 | ############################################################ 41 | conditions = [ 42 | @messages.size > 1, 43 | @messages.size > @num_retained_turns * 2 + 1 44 | ] 45 | 46 | if conditions.all? 47 | to_delete = [] 48 | new_num_messages = @messages.size 49 | @messages.each_with_index do |ele, i| 50 | if ele["role"] != "system" 51 | to_delete << i 52 | new_num_messages -= 1 53 | end 54 | break if new_num_messages <= @num_retained_turns * 2 + 1 55 | end 56 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 57 | end 58 | when :normal 59 | ############################################################ 60 | # Normal mode recuder defined here # 61 | # @messages: messages to this point # 62 | ############################################################ 63 | conditions = [ 64 | @messages.size > 1, 65 | @messages.size > @num_retained_turns * 2 + 1 66 | ] 67 | 68 | if conditions.all? 69 | to_delete = [] 70 | new_num_messages = @messages.size 71 | @messages.each_with_index do |ele, i| 72 | if ele["role"] != "system" 73 | to_delete << i 74 | new_num_messages -= 1 75 | end 76 | break if new_num_messages <= @num_retained_turns * 2 + 1 77 | end 78 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 79 | end 80 | end 81 | end 82 | ) 83 | @completion = openai_completion 84 | end 85 | end 86 | -------------------------------------------------------------------------------- /apps/translate/translate.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "../../lib/monadic_app" 4 | 5 | class Translate < MonadicApp 6 | DESC = "Interactive Multilingual Translator" 7 | COLOR = "yellow" 8 | 9 | attr_accessor :template, :config, :params, :completion 10 | 11 | def initialize(openai_completion, replacements: nil, research_mode: false, stream: true, params: {}) 12 | @num_retained_turns = 10 13 | params = { 14 | "temperature" => 0.2, 15 | "top_p" => 1.0, 16 | "presence_penalty" => 0.0, 17 | "frequency_penalty" => 0.0, 18 | "model" => research_mode ? SETTINGS["research_model"] : SETTINGS["normal_model"], 19 | "max_tokens" => 1000, 20 | "stream" => stream, 21 | "stop" => nil 22 | }.merge(params) 23 | replacements ||= { 24 | "mode" => :interactive, 25 | "{{TARGET_LANG}}" => "Enter target language" 26 | } 27 | mode = research_mode ? :research : :normal 28 | template_json = TEMPLATES["normal/translate"] 29 | template_md = TEMPLATES["research/translate"] 30 | super(mode: research_mode ? :research : :normal, 31 | params: params, 32 | template_json: template_json, 33 | template_md: template_md, 34 | placeholders: replacements, 35 | prop_accumulator: "messages", 36 | prop_newdata: "response", 37 | update_proc: proc do 38 | case mode 39 | when :research 40 | ############################################################ 41 | # Research mode reduder defined here # 42 | # @messages: messages to this point # 43 | # @metadata: currently available metdata sent from GPT # 44 | ############################################################ 45 | conditions = [ 46 | @messages.size > 1, 47 | @messages.size > @num_retained_turns * 2 + 1 48 | ] 49 | 50 | if conditions.all? 51 | to_delete = [] 52 | new_num_messages = @messages.size 53 | @messages.each_with_index do |ele, i| 54 | if ele["role"] != "system" 55 | to_delete << i 56 | new_num_messages -= 1 57 | end 58 | break if new_num_messages <= @num_retained_turns * 2 + 1 59 | end 60 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 61 | end 62 | when :normal 63 | ############################################################ 64 | # Normal mode recuder defined here # 65 | # @messages: messages to this point # 66 | ############################################################ 67 | 68 | conditions = [ 69 | @messages.size > 1, 70 | @messages.size > @num_retained_turns * 2 + 1 71 | ] 72 | 73 | if conditions.all? 74 | to_delete = [] 75 | new_num_messages = @messages.size 76 | @messages.each_with_index do |ele, i| 77 | if ele["role"] != "system" 78 | to_delete << i 79 | new_num_messages -= 1 80 | end 81 | break if new_num_messages <= @num_retained_turns * 2 + 1 82 | end 83 | @messages.delete_if.with_index { |_, i| to_delete.include? i } 84 | end 85 | end 86 | end 87 | ) 88 | @completion = openai_completion 89 | end 90 | end 91 | -------------------------------------------------------------------------------- /lib/monadic_chat/formatting.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class MonadicApp 4 | ################################################## 5 | # methods for formatting and presenting 6 | ################################################## 7 | 8 | def show_template 9 | puts "-----------------------------------------" 10 | puts @template 11 | puts "-----------------------------------------" 12 | end 13 | 14 | def format_data 15 | contextual = [] 16 | accumulator = [] 17 | 18 | if @mode == :research 19 | objectify.each do |key, val| 20 | next if %w[prompt response messages].include? key 21 | 22 | contextual << "- **#{key.split("_").map(&:capitalize).join(" ")}**: #{val.to_s.strip}" 23 | end 24 | contextual << "- **Num of Tokens in Template**: #{@template_tokens}" 25 | end 26 | 27 | @messages.each do |m| 28 | accumulator << "#{m["role"].capitalize}: #{m["content"]}" 29 | end 30 | 31 | h1 = "# Monadic :: Chat / #{self.class.name}" 32 | contextual.map!(&:strip).unshift "## Contextual Data\n" unless contextual.empty? 33 | 34 | accum_label = @prop_accumulator.split("_").map(&:capitalize).join(" ") 35 | accumulator.map!(&:strip).unshift "## #{accum_label}\n" unless accumulator.empty? 36 | 37 | "#{h1}\n\n#{contextual.join("\n")}\n\n#{accumulator.join("\n\n")}" 38 | end 39 | 40 | def show_data 41 | print PROMPT_SYSTEM.prefix 42 | 43 | res = format_data 44 | print "\n#{TTY::Markdown.parse(res, indent: 0)}" 45 | end 46 | 47 | def set_html 48 | res = format_data.sub(%r{::(.+?)/(.+?)\b}) do 49 | " :: #{Regexp.last_match(1)} / #{Regexp.last_match(2)}" 50 | end 51 | res = res.gsub("```") { "~~~" } 52 | .gsub(/^(system):/i) { " #{Regexp.last_match(1)}
" } 53 | .gsub(/^(user):/i) { " #{Regexp.last_match(1)}
" } 54 | .gsub(/^(assistant|gpt):/i) { " #{Regexp.last_match(1)}
" } 55 | add_to_html(res, TEMP_HTML) 56 | end 57 | 58 | def show_html 59 | set_html 60 | print PROMPT_SYSTEM.prefix 61 | print "HTML is ready\n" 62 | Launchy.open(TEMP_HTML) 63 | end 64 | 65 | def add_to_html(text, filepath) 66 | text = text.gsub(/(?\s])(?!\n[\n<])\n/m) { "
\n" } 67 | text = text.gsub(/~~~(.+?)~~~/m) do 68 | m = Regexp.last_match 69 | "~~~#{m[1].gsub("
\n") { "\n" }}~~~" 70 | end 71 | text = text.gsub(/`(.+?)`/) do 72 | m = Regexp.last_match 73 | "`#{m[1].gsub("
\n") { "\n" }}`" 74 | end 75 | 76 | FileUtils.touch(filepath) unless File.exist?(filepath) 77 | File.open(filepath, "w") do |f| 78 | html = <<~HTML 79 | 80 | 81 | 82 | 83 | 84 | 87 | Monadic Chat 88 | 89 | 90 | #{Kramdown::Document.new(text, syntax_highlighter: :rouge, syntax_highlighter_ops: {}).to_html} 91 | 92 | 93 | 94 | 99 | 100 | HTML 101 | f.write html 102 | end 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /lib/monadic_app.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require_relative "./monadic_chat" 4 | require_relative "./monadic_chat/console" 5 | require_relative "./monadic_chat/formatting" 6 | require_relative "./monadic_chat/interaction" 7 | require_relative "./monadic_chat/menu" 8 | require_relative "./monadic_chat/parameters" 9 | require_relative "./monadic_chat/internals" 10 | require_relative "./monadic_chat/tools" 11 | 12 | class MonadicApp 13 | include MonadicChat 14 | attr_reader :template, :messages, :turns 15 | 16 | def initialize(mode:, params:, template_json:, template_md:, placeholders:, prop_accumulator:, prop_newdata:, update_proc:) 17 | @mode = mode.to_sym 18 | @placeholders = placeholders 19 | @prop_accumulator = prop_accumulator 20 | @prop_newdata = prop_newdata 21 | @completion = nil 22 | @update_proc = update_proc 23 | @params_initial = params 24 | @params = @params_initial.dup 25 | @html = false 26 | 27 | @method = OpenAI.model_to_method(@params["model"]) 28 | 29 | @metadata = {} 30 | json = File.read(template_json) 31 | .gsub("{{DATETIME}}", Time.now.strftime("%Y-%m-%d %H:%M:%S")) 32 | .gsub("{{DATE}}", Time.now.strftime("%Y-%m-%d")) 33 | @messages_initial = JSON.parse(json)["messages"] 34 | @messages = @messages_initial.dup 35 | @turns = 0 36 | @template_initial = File.read(template_md) 37 | @template = @template_initial.dup 38 | 39 | @template_tokens = 0 40 | end 41 | 42 | ################################################## 43 | # methods for running monadic app 44 | ################################################## 45 | 46 | def parse(input = nil) 47 | loop do 48 | case input 49 | when TrueClass 50 | input = user_input 51 | next 52 | when /\A\s*(?:help|menu|commands?|\?|h)\s*\z/i 53 | return true unless show_menu 54 | when /\A\s*(?:bye|exit|quit)\s*\z/i 55 | break 56 | when /\A\s*(?:reset)\s*\z/i 57 | reset 58 | when /\A\s*(?:data|context)\s*\z/i 59 | show_data 60 | when /\A\s*(?:html)\s*\z/i 61 | @html = true 62 | show_html 63 | when /\A\s*(?:save)\s*\z/i 64 | save_data 65 | when /\A\s*(?:load)\s*\z/i 66 | load_data 67 | when /\A\s*(?:clear|clean)\s*\z/i 68 | clear_screen 69 | when /\A\s*(?:params?|parameters?|config|configuration)\s*\z/i 70 | change_parameter 71 | else 72 | if input && confirm_query(input) 73 | begin 74 | bind(input, num_retrials: SETTINGS["num_retrials"]) 75 | rescue StandardError => e 76 | input = ask_retrial(input, e.message) 77 | next 78 | end 79 | end 80 | end 81 | if input.to_s == "" 82 | input = false 83 | clear_screen 84 | end 85 | input = user_input 86 | end 87 | rescue MonadicError 88 | false 89 | end 90 | 91 | def banner(title, desc, color) 92 | screen_width = TTY::Screen.width - 2 93 | width = screen_width < TITLE_WIDTH ? screen_width : TITLE_WIDTH 94 | title = PASTEL.bold.send(color.to_sym, title.center(width, " ")) 95 | desc = desc.center(width, " ") 96 | padding = "".center(width, " ") 97 | banner = TTY::Box.frame "#{padding}\n#{title}\n#{desc}\n#{padding}" 98 | print "\n", banner.strip, "\n" 99 | end 100 | 101 | def run 102 | clear_screen 103 | banner("MONADIC::CHAT / #{self.class.name}", self.class::DESC, self.class::COLOR) 104 | show_greet 105 | 106 | if @placeholders.empty? 107 | parse(user_input) 108 | else 109 | loadfile = PROMPT_SYSTEM.select("\nLoad saved file? (Make sure the file is saved by the same app)", default: 2, show_help: :never) do |menu| 110 | menu.choice "Yes", "yes" 111 | menu.choice "No", "no" 112 | end 113 | parse(user_input) if loadfile == "yes" && load_data || fulfill_placeholders 114 | end 115 | end 116 | end 117 | -------------------------------------------------------------------------------- /lib/monadic_chat/parameters.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class MonadicApp 4 | ################################################## 5 | # methods for parametter setting 6 | ################################################## 7 | 8 | def change_parameter 9 | parameter = PROMPT_SYSTEM.select("Select the parmeter to be set:", per_page: 7, cycle: true, show_help: :never, filter: true, default: 1) do |menu| 10 | menu.choice "#{BULLET} Cancel", "cancel" 11 | menu.choice "#{BULLET} model: #{@params["model"]}", "model" 12 | menu.choice "#{BULLET} max_tokens: #{@params["max_tokens"]}", "max_tokens" 13 | menu.choice "#{BULLET} temperature: #{@params["temperature"]}", "temperature" 14 | menu.choice "#{BULLET} top_p: #{@params["top_p"]}", "top_p" 15 | menu.choice "#{BULLET} frequency_penalty: #{@params["frequency_penalty"]}", "frequency_penalty" 16 | menu.choice "#{BULLET} presence_penalty: #{@params["presence_penalty"]}", "presence_penalty" 17 | end 18 | return if parameter == "cancel" 19 | 20 | case parameter 21 | when "model" 22 | value = change_model 23 | @method = OpenAI.model_to_method(value) 24 | when "max_tokens" 25 | value = change_max_tokens 26 | when "temperature" 27 | value = change_temperature 28 | when "top_p" 29 | value = change_top_p 30 | when "frequency_penalty" 31 | value = change_frequency_penalty 32 | when "presence_penalty" 33 | value = change_presence_penalty 34 | end 35 | @params[parameter] = value if value 36 | print "Parameter #{parameter} has been set to #{PASTEL.green(value)}\n" if value 37 | end 38 | 39 | def change_max_tokens 40 | PROMPT_SYSTEM.ask("Set value of max tokens [1000 to 8000]:", convert: :int) do |q| 41 | q.in "1000-8000" 42 | q.messages[:range?] = "Value out of expected range [1000 to 2048]" 43 | end 44 | end 45 | 46 | def change_temperature 47 | PROMPT_SYSTEM.ask("Set value of temperature [0.0 to 1.0]:", convert: :float) do |q| 48 | q.in "0.0-1.0" 49 | q.messages[:range?] = "Value out of expected range [0.0 to 1.0]" 50 | end 51 | end 52 | 53 | def change_top_p 54 | PROMPT_SYSTEM.ask("Set value of top_p [0.0 to 1.0]:", convert: :float) do |q| 55 | q.in "0.0-1.0" 56 | q.messages[:range?] = "Value out of expected range [0.0 to 1.0]" 57 | end 58 | end 59 | 60 | def change_frequency_penalty 61 | PROMPT_SYSTEM.ask("Set value of frequency penalty [-2.0 to 2.0]:", convert: :float) do |q| 62 | q.in "-2.0-2.0" 63 | q.messages[:range?] = "Value out of expected range [-2.0 to 2.0]" 64 | end 65 | end 66 | 67 | def change_presence_penalty 68 | PROMPT_SYSTEM.ask("Set value of presence penalty [-2.0 to 2.0]:", convert: :float) do |q| 69 | q.in "-2.0-2.0" 70 | q.messages[:range?] = "Value out of expected range [-2.0 to 2.0]" 71 | end 72 | end 73 | 74 | def change_model 75 | model = PROMPT_SYSTEM.select("Select a model:", per_page: 10, cycle: false, show_help: :never, filter: true, default: 1) do |menu| 76 | menu.choice "#{BULLET} Cancel", "cancel" 77 | TTY::Cursor.save 78 | SPINNER.auto_spin 79 | models = @completion.models 80 | SPINNER.stop 81 | TTY::Cursor.restore 82 | case @mode 83 | when :research 84 | models.filter { |m| ["completions", "chat/completions"].include? OpenAI.model_to_method(m["id"]) }.sort_by { |m| -m["created"] }.each do |m| 85 | menu.choice "#{BULLET} #{m["id"]}", m["id"] 86 | end 87 | when :normal 88 | models.filter { |m| OpenAI.model_to_method(m["id"]) == "chat/completions" && OpenAI.model_to_method(m["id"]) }.sort_by { |m| -m["created"] }.each do |m| 89 | menu.choice "#{BULLET} #{m["id"]}", m["id"] 90 | end 91 | end 92 | end 93 | if model == "cancel" 94 | nil 95 | else 96 | model 97 | end 98 | end 99 | 100 | def show_params 101 | params_md = "# Current Parameter Values\n\n" 102 | @params.each do |key, val| 103 | next if /\A(?:prompt|stream|logprobs|echo|stop)\z/ =~ key 104 | 105 | params_md += "- #{key}: #{val}\n" 106 | end 107 | print prompt_system, "\n" 108 | print "#{TTY::Markdown.parse(params_md, indent: 0).strip}\n\n" 109 | end 110 | end 111 | -------------------------------------------------------------------------------- /assets/pigments-default.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #f8f8f8; } 3 | .highlight .c { color: #408080; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #008000; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .ch { color: #408080; font-style: italic } /* Comment.Hashbang */ 8 | .highlight .cm { color: #408080; font-style: italic } /* Comment.Multiline */ 9 | .highlight .cp { color: #BC7A00 } /* Comment.Preproc */ 10 | .highlight .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */ 11 | .highlight .c1 { color: #408080; font-style: italic } /* Comment.Single */ 12 | .highlight .cs { color: #408080; font-style: italic } /* Comment.Special */ 13 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 14 | .highlight .ge { font-style: italic } /* Generic.Emph */ 15 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 16 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 17 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 18 | .highlight .go { color: #888888 } /* Generic.Output */ 19 | .highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ 20 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 21 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 22 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 23 | .highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ 24 | .highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ 25 | .highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ 26 | .highlight .kp { color: #008000 } /* Keyword.Pseudo */ 27 | .highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ 28 | .highlight .kt { color: #B00040 } /* Keyword.Type */ 29 | .highlight .m { color: #666666 } /* Literal.Number */ 30 | .highlight .s { color: #BA2121 } /* Literal.String */ 31 | .highlight .na { color: #7D9029 } /* Name.Attribute */ 32 | .highlight .nb { color: #008000 } /* Name.Builtin */ 33 | .highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ 34 | .highlight .no { color: #880000 } /* Name.Constant */ 35 | .highlight .nd { color: #AA22FF } /* Name.Decorator */ 36 | .highlight .ni { color: #999999; font-weight: bold } /* Name.Entity */ 37 | .highlight .ne { color: #D2413A; font-weight: bold } /* Name.Exception */ 38 | .highlight .nf { color: #0000FF } /* Name.Function */ 39 | .highlight .nl { color: #A0A000 } /* Name.Label */ 40 | .highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ 41 | .highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ 42 | .highlight .nv { color: #19177C } /* Name.Variable */ 43 | .highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ 44 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 45 | .highlight .mb { color: #666666 } /* Literal.Number.Bin */ 46 | .highlight .mf { color: #666666 } /* Literal.Number.Float */ 47 | .highlight .mh { color: #666666 } /* Literal.Number.Hex */ 48 | .highlight .mi { color: #666666 } /* Literal.Number.Integer */ 49 | .highlight .mo { color: #666666 } /* Literal.Number.Oct */ 50 | .highlight .sa { color: #BA2121 } /* Literal.String.Affix */ 51 | .highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ 52 | .highlight .sc { color: #BA2121 } /* Literal.String.Char */ 53 | .highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ 54 | .highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ 55 | .highlight .s2 { color: #BA2121 } /* Literal.String.Double */ 56 | .highlight .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */ 57 | .highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ 58 | .highlight .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */ 59 | .highlight .sx { color: #008000 } /* Literal.String.Other */ 60 | .highlight .sr { color: #BB6688 } /* Literal.String.Regex */ 61 | .highlight .s1 { color: #BA2121 } /* Literal.String.Single */ 62 | .highlight .ss { color: #19177C } /* Literal.String.Symbol */ 63 | .highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ 64 | .highlight .fm { color: #0000FF } /* Name.Function.Magic */ 65 | .highlight .vc { color: #19177C } /* Name.Variable.Class */ 66 | .highlight .vg { color: #19177C } /* Name.Variable.Global */ 67 | .highlight .vi { color: #19177C } /* Name.Variable.Instance */ 68 | .highlight .vm { color: #19177C } /* Name.Variable.Magic */ 69 | .highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ 70 | -------------------------------------------------------------------------------- /lib/monadic_chat/authenticate.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | module MonadicChat 4 | def self.authenticate(overwrite: false, message: true) 5 | check = lambda do |token| 6 | if message 7 | print TTY::Cursor.restore 8 | print TTY::Cursor.clear_screen_down 9 | print "\n" 10 | SPINNER.auto_spin 11 | end 12 | 13 | if !token || token.strip == "" 14 | if message 15 | SPINNER.stop 16 | print TTY::Cursor.restore 17 | print "\n" 18 | mdprint "- Authentication: #{PASTEL.bold.red("Failure")}\n" if message 19 | end 20 | return false 21 | end 22 | 23 | begin 24 | models = OpenAI.models(token) 25 | raise if models.empty? 26 | 27 | if message 28 | SPINNER.stop 29 | print TTY::Cursor.restore, "\n" 30 | mdprint "#{PASTEL.on_green(" System ")} Config file: `#{CONFIG}`\n" 31 | print "\n" 32 | mdprint "- Authentication: #{PASTEL.bold.green("Success")}\n" 33 | end 34 | 35 | if SETTINGS["normal_model"] && !models.map { |m| m["id"] }.index(SETTINGS["normal_model"]) 36 | if message 37 | SPINNER.stop 38 | mdprint "- Normal mode model specified in config file not available\n" 39 | mdprint "- Fallback to the default model (`#{OpenAI.default_model(research_mode: false)}`)\n" 40 | end 41 | SETTINGS["normal_model"] = false 42 | end 43 | SETTINGS["normal_model"] ||= OpenAI.default_model(research_mode: false) 44 | mdprint "- Normal mode model: `#{SETTINGS["normal_model"]}`\n" if message 45 | 46 | if SETTINGS["research_model"] && !models.map { |m| m["id"] }.index(SETTINGS["research_model"]) 47 | if message 48 | SPINNER.stop 49 | mdprint "- Research mode model specified in config file not available\n" 50 | mdprint "- Fallback to the default model (`#{OpenAI.default_model(research_mode: true)}`)\n" 51 | end 52 | SETTINGS["research_model"] = false 53 | end 54 | SETTINGS["research_model"] ||= OpenAI.default_model(research_mode: true) 55 | mdprint "- Research mode model: `#{SETTINGS["research_model"]}`\n" if message 56 | 57 | SETTINGS["max_tokens_wiki"] = 1000 unless SETTINGS["max_chars_wiki"].to_i.between?(100, 4000) 58 | SETTINGS["num_retrials"] = 2 unless SETTINGS["num_retrials"].to_i.between?(1, 10) 59 | SETTINGS["min_query_size"] = 5 unless SETTINGS["min_query_size"].to_i.between?(1, 20) 60 | SETTINGS["timeout_sec"] = 120 unless SETTINGS["timeout_sec"].to_i.between?(10, 600) 61 | 62 | OpenAI::Completion.new(token) 63 | rescue StandardError 64 | if message 65 | SPINNER.stop 66 | print TTY::Cursor.restore 67 | print "\n" 68 | mdprint "- Authentication: #{PASTEL.bold.red("Failure")}\n" if message 69 | end 70 | false 71 | end 72 | end 73 | 74 | completion = nil 75 | 76 | if overwrite 77 | access_token = PROMPT_SYSTEM.ask("Input your OpenAI access token:") 78 | return false if access_token.to_s == "" 79 | 80 | completion = check.call(access_token) 81 | 82 | if completion 83 | File.open(CONFIG, "w") do |f| 84 | SETTINGS["access_token"] = access_token 85 | f.write(JSON.pretty_generate(SETTINGS)) 86 | print "New access token has been saved to #{CONFIG}\n\n" if message 87 | print "Please #{PASTEL.bold.green("restart")} Monadic Chat\n" 88 | exit 89 | end 90 | end 91 | elsif File.exist?(CONFIG) 92 | json = File.read(CONFIG) 93 | begin 94 | config = JSON.parse(json) 95 | rescue JSON::ParserError 96 | puts "Error: config file does not contain a valid JSON object." 97 | exit 98 | end 99 | SETTINGS.merge!(config) 100 | access_token = config["access_token"] 101 | completion = check.call(access_token) 102 | else 103 | access_token ||= PROMPT_SYSTEM.ask("Input your OpenAI access token:") 104 | return false if access_token.to_s == "" 105 | 106 | completion = check.call(access_token) 107 | if completion 108 | File.open(CONFIG, "w") do |f| 109 | SETTINGS["access_token"] = access_token 110 | f.write(JSON.pretty_generate(SETTINGS)) 111 | end 112 | print "Access token has been saved to #{CONFIG}\n\n" if message 113 | print "Please #{PASTEL.bold.green("restart")} Monadic Chat\n" 114 | exit 115 | end 116 | end 117 | completion || authenticate(overwrite: true) 118 | end 119 | end 120 | -------------------------------------------------------------------------------- /lib/monadic_chat.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require "blingfire" 4 | require "tty-cursor" 5 | require "tty-screen" 6 | require "tty-markdown" 7 | require "tty-spinner" 8 | require "tty-prompt" 9 | require "tty-box" 10 | require "pastel" 11 | require "oj" 12 | require "kramdown" 13 | require "rouge" 14 | require "launchy" 15 | require "io/console" 16 | require "readline" 17 | require "nokogiri" 18 | require "open-uri" 19 | 20 | require_relative "./monadic_chat/version" 21 | require_relative "./monadic_chat/open_ai" 22 | require_relative "./monadic_chat/authenticate" 23 | require_relative "./monadic_chat/commands" 24 | require_relative "./monadic_chat/helper" 25 | 26 | Oj.mimic_JSON 27 | 28 | module MonadicChat 29 | SETTINGS = { 30 | "normal_model" => "gpt-4o-mini", 31 | "research_model" => "gpt-4o-2024-08-06", 32 | "max_tokens_wiki" => 1600, 33 | "num_retrials" => 2, 34 | "min_query_size" => 5, 35 | "timeout_sec" => 120 36 | } 37 | gpt2model_path = File.absolute_path(File.join(__dir__, "..", "assets", "gpt2.bin")) 38 | 39 | BLINGFIRE = BlingFire.load_model(gpt2model_path) 40 | CONFIG = File.join(Dir.home, "monadic_chat.conf") 41 | TITLE_WIDTH = 72 42 | APPS_DIR = File.absolute_path(File.join(__dir__, "..", "apps")) 43 | USER_APPS_DIR = File.absolute_path(File.join(__dir__, "..", "user_apps")) 44 | 45 | apps_dir_list = Dir.entries(APPS_DIR) 46 | .reject { |entry| /\A\./ =~ entry || /\A_/ =~ entry.split("/").last } 47 | .map { |entry| File.join(APPS_DIR, entry) } 48 | 49 | user_apps_dir_list = Dir.entries(USER_APPS_DIR) 50 | .reject { |entry| /\A\./ =~ entry || /\A_/ =~ entry.split("/").last } 51 | .reject { |entry| /\Aboilerplates/ =~ entry } 52 | .map { |entry| File.join(USER_APPS_DIR, entry) } 53 | 54 | APPS_DIR_LIST = apps_dir_list + user_apps_dir_list 55 | 56 | templates = {} 57 | APPS_DIR_LIST.each do |app| 58 | basename = File.basename(app, ".*") 59 | normal_mode_template = File.absolute_path(File.join(app, "#{basename}.json")) 60 | templates["normal/#{basename}"] = normal_mode_template if File.exist? normal_mode_template 61 | research_mode_template = File.absolute_path(File.join(app, "#{basename}.md")) 62 | templates["research/#{basename}"] = research_mode_template if File.exist? research_mode_template 63 | end 64 | APPS = APPS_DIR_LIST.map { |dir| File.basename(dir, ".*") } 65 | 66 | TEMPLATES = templates 67 | 68 | PASTEL = Pastel.new 69 | 70 | TEMP_HTML = File.join(Dir.home, "monadic_chat.html") 71 | TEMP_JSON = File.join(Dir.home, "monadic_chat.json") 72 | TEMP_MD = File.join(Dir.home, "monadic_chat.md") 73 | 74 | style = +File.read(File.join(__dir__, "..", "assets", "github.css")).gsub(".markdown-") { "" } 75 | style << File.read(File.join(__dir__, "..", "assets", "pigments-default.css")) 76 | style << <<~CSS 77 | body { 78 | margin: 50px; 79 | font-family: "Helvetica Neue", Arial, "Hiragino Kaku Gothic ProN", "Hiragino Sans", Meiryo, sans-serif; 80 | color: #333 81 | } 82 | .monadic_user{ 83 | display:inline-block; 84 | padding-left: 0.5em; 85 | padding-right: 0.5em; 86 | font-weight: bold; 87 | background-color: #c8e5ff; 88 | margin-bottom: 0.5em; 89 | } 90 | .monadic_chat { 91 | display:inline-block; 92 | padding-left: 0.5em; 93 | padding-right: 0.5em; 94 | font-weight: bold; 95 | background-color: #ffcaca; 96 | margin-bottom: 0.5em; 97 | } 98 | .monadic_system { 99 | display:inline-block; 100 | padding-left: 0.5em; 101 | padding-right: 0.5em; 102 | font-weight: bold; 103 | background-color: #c4ffcb; 104 | margin-bottom: 0.5em; 105 | } 106 | .monadic_search_engine { 107 | display:inline-block; 108 | padding-left: 0.5em; 109 | padding-right: 0.5em; 110 | font-weight: bold; 111 | background-color: #ffe9c4; 112 | margin-bottom: 0.5em; 113 | } 114 | .monadic_gray { 115 | display:inline-block; 116 | font-weight: bold; 117 | color: #999; 118 | margin-bottom: 0.5em; 119 | } 120 | .monadic_app { 121 | display:inline-block; 122 | font-weight: bold; 123 | color: #EB742B; 124 | margin-bottom: 0.5em; 125 | } 126 | CSS 127 | 128 | GITHUB_STYLE = style 129 | PROMPT_USER = TTY::PromptX.new(active_color: :blue, prefix: prompt_user) 130 | PROMPT_SYSTEM = TTY::PromptX.new(active_color: :blue, prefix: "#{prompt_system} ") 131 | PROMPT_ASSISTANT = TTY::PromptX.new(active_color: :red, prefix: "#{prompt_assistant} ") 132 | SPINNER = TTY::Spinner.new(format: :arrow_pulse, clear: true) 133 | BULLET = "\e[33m●\e[0m" 134 | HOME = File.expand_path(File.join(__dir__, "..")) 135 | 136 | def self.require_apps 137 | MonadicChat::APPS_DIR_LIST.each do |app_dir| 138 | basename = app_dir.split("/").last 139 | require "#{app_dir}/#{basename}" 140 | end 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /bin/monadic-chat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | # frozen_string_literal: true 4 | 5 | require_relative "../lib/monadic_chat" 6 | MonadicChat.require_apps 7 | 8 | module MonadicMenu 9 | include MonadicChat 10 | 11 | PROMPT_SYSTEM.on(:keypress) do |event| 12 | case event.key.name 13 | when :ctrl_p 14 | PROMPT_SYSTEM.trigger(:keyup) 15 | when :ctrl_n 16 | PROMPT_SYSTEM.trigger(:keydown) 17 | end 18 | end 19 | 20 | def self.clear_screen 21 | print TTY::Cursor.clear_screen_down 22 | print "\e[2J\e[f" 23 | end 24 | 25 | def self.run 26 | screen_width = TTY::Screen.width - 2 27 | width = screen_width < TITLE_WIDTH ? screen_width : TITLE_WIDTH 28 | 29 | mon = " M O N A D I C " 30 | dots = "::" 31 | chat = " C H A T " 32 | hpad = " " * ((width - (mon.size + dots.size + chat.size)) / 2).to_i 33 | title = "#{hpad}#{PASTEL.red.bold(mon + dots + chat)}#{hpad}" 34 | subtitle = "OpenAI chat/text-completion API client".center(width, " ") 35 | version = "Version: #{VERSION}".center(width, " ") 36 | vpad = " " * width 37 | 38 | banner = TTY::Box.frame "#{vpad}\n#{title}\n#{subtitle}\n#{version}\n#{vpad}" 39 | 40 | mode = "normal" 41 | openai_completion = nil 42 | parameter = "" 43 | 44 | print_mode = lambda do |m| 45 | case m 46 | when "normal" 47 | PASTEL.bold.green("Normal") 48 | when "research" 49 | PASTEL.bold.red("Research") 50 | end 51 | end 52 | 53 | loop do 54 | clear_screen 55 | print "\n", banner.strip, "\n" 56 | 57 | print TTY::Cursor.save 58 | openai_completion ||= MonadicChat.authenticate 59 | exit unless openai_completion 60 | 61 | max_app_name_width = APPS.reduce(8) { |accum, app| app.length > accum ? app.length : accum } + 2 62 | parameter = PROMPT_SYSTEM.select("Current mode: #{print_mode.call(mode)}\n\nSelect item:", 63 | per_page: 10, 64 | cycle: true, 65 | filter: true, 66 | default: 1, 67 | show_help: :never) do |menu| 68 | APPS.each do |app| 69 | next unless TEMPLATES["#{mode}/#{app}"] 70 | 71 | desc = eval("#{app.capitalize}::DESC", binding, __FILE__, __LINE__) 72 | menu.choice "#{BULLET} #{PASTEL.bold(app.capitalize.ljust(max_app_name_width))} #{desc}", app 73 | end 74 | 75 | case mode 76 | when "research" 77 | menu.choice "#{BULLET} #{PASTEL.bold("Mode".ljust(max_app_name_width))} Switch from #{PASTEL.bold.red("Research")} (current) to #{PASTEL.bold.green("Normal")}", "mode" 78 | when "normal" 79 | menu.choice "#{BULLET} #{PASTEL.bold("Mode".ljust(max_app_name_width))} Switch from #{PASTEL.bold.green("Normal")} (current) to #{PASTEL.bold.red("Research")}", "mode" 80 | end 81 | 82 | menu.choice "#{BULLET} #{PASTEL.bold("Readme".ljust(max_app_name_width))} Open Readme/Documentation", "readme" 83 | menu.choice "#{BULLET} #{PASTEL.bold("Quit".ljust(max_app_name_width))} Quit/Exit/Bye", "exit" 84 | end 85 | 86 | begin 87 | case parameter 88 | when "mode" 89 | mode = mode == "normal" ? "research" : "normal" 90 | next 91 | when "readme" 92 | MonadicChat.open_readme 93 | next 94 | when "exit" 95 | clear_screen 96 | print "#{PASTEL.bold("Bye!")}\n" 97 | exit 98 | else 99 | clear_screen 100 | eval(parameter.capitalize, binding, __FILE__, __LINE__).new(openai_completion, research_mode: mode == "research").run 101 | end 102 | rescue MonadicError 103 | next 104 | rescue StandardError => e 105 | clear_screen 106 | choice = PROMPT_SYSTEM.select("Error: Something went wrong", default: 2, show_help: :never) do |menu| 107 | menu.choice "Return to main menu", "menu" 108 | menu.choice "Show error message and exit", "debug" 109 | end 110 | 111 | case choice 112 | when "menu" 113 | next 114 | when "debug" 115 | puts "Error: #{e.message}" 116 | puts e.backtrace 117 | break 118 | end 119 | end 120 | end 121 | end 122 | end 123 | 124 | case ARGV.size 125 | when 0 126 | MonadicMenu.clear_screen 127 | MonadicMenu.run 128 | when 1 129 | case ARGV[0] 130 | when "readme", "-h" 131 | MonadicChat.open_readme 132 | when "version", "-v" 133 | print MonadicChat::PROMPT_SYSTEM.prefix, MonadicChat::VERSION, "\n" 134 | else 135 | MonadicChat::APPS.each do |app| 136 | next unless app == ARGV[0] 137 | 138 | openai_completion ||= MonadicChat.authenticate(message: false) 139 | eval(app.capitalize, binding, __FILE__, __LINE__).new(openai_completion, research_mode: false).run 140 | exit 141 | end 142 | print MonadicChat::PROMPT_SYSTEM.prefix, "Unknown command", "\n" 143 | end 144 | else 145 | if ARGV[0] == "new" 146 | if ARGV[1].to_s != "" && !MonadicChat::APPS.include?(ARGV[1]) 147 | MonadicChat.create_app(ARGV[1]) 148 | else 149 | print MonadicChat::PROMPT_SYSTEM.prefix, "Invalid app name (must be unique)", "\n" 150 | end 151 | elsif ARGV[0] == "delete" || ARGV[0] == "del" || ARGV[0] == "remove" 152 | if MonadicChat::APPS.include?(ARGV[1]) 153 | MonadicChat.delete_app(ARGV[1]) 154 | else 155 | print MonadicChat::PROMPT_SYSTEM.prefix, "Invalid app name (must exist)", "\n" 156 | end 157 | else 158 | MonadicChat::APPS.each do |app| 159 | next unless app == ARGV[0] 160 | 161 | openai_completion ||= MonadicChat.authenticate(message: false) 162 | app_obj = eval(app.capitalize, binding, __FILE__, __LINE__).new(openai_completion, research_mode: false, params: {}) 163 | app_obj.bind(ARGV[1..].join(" "), num_retrials: 2) 164 | exit 165 | end 166 | print MonadicChat::PROMPT_SYSTEM.prefix, "Unknown command", "\n" 167 | end 168 | end 169 | -------------------------------------------------------------------------------- /lib/monadic_chat/menu.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class MonadicApp 4 | ################################################## 5 | # methods for showing menu and menu items 6 | ################################################## 7 | 8 | def show_menu 9 | clear_screen 10 | print TTY::Cursor.save 11 | parameter = PROMPT_SYSTEM.select("Select function:", per_page: 10, cycle: true, filter: true, default: 1, show_help: :never) do |menu| 12 | menu.choice "#{BULLET} #{PASTEL.bold("cancel/return/escape")} Cancel this menu", "cancel" 13 | menu.choice "#{BULLET} #{PASTEL.bold("params/settings/config")} Show and change values of parameters", "params" 14 | menu.choice "#{BULLET} #{PASTEL.bold("data/context")} Show currrent contextual info", "data" 15 | menu.choice "#{BULLET} #{PASTEL.bold("html")} View contextual info on the web browser", "html" 16 | menu.choice "#{BULLET} #{PASTEL.bold("reset")} Reset context to initial state", "reset" 17 | menu.choice "#{BULLET} #{PASTEL.bold("save")} Save current contextual info to file", "save" 18 | menu.choice "#{BULLET} #{PASTEL.bold("load")} Load current contextual info from file", "load" 19 | menu.choice "#{BULLET} #{PASTEL.bold("clear/clean")} Clear screen", "clear" 20 | menu.choice "#{BULLET} #{PASTEL.bold("readme/documentation")} Open readme/documentation", "readme" 21 | menu.choice "#{BULLET} #{PASTEL.bold("exit/bye/quit")} Go back to main menu", "exit" 22 | end 23 | 24 | print TTY::Cursor.restore 25 | print TTY::Cursor.clear_screen_down 26 | print TTY::Cursor.restore 27 | 28 | case parameter 29 | when "cancel" 30 | return true 31 | when "params" 32 | change_parameter 33 | when "data" 34 | show_data 35 | when "html" 36 | @html = true 37 | show_html 38 | when "reset" 39 | reset 40 | when "save" 41 | save_data 42 | when "load" 43 | load_data 44 | when "clear" 45 | clear_screen 46 | print TTY::Cursor.clear_screen_down 47 | when "readme" 48 | MonadicChat.open_readme 49 | when "exit" 50 | return false 51 | end 52 | true 53 | end 54 | 55 | def reset 56 | @html = false 57 | @params = @params_initial.dup 58 | @messages = @messages_initial.dup 59 | @template = @template_initial.dup 60 | @template_tokens = 0 61 | 62 | if @placeholders.empty? 63 | print PROMPT_SYSTEM.prefix 64 | print "Context and parameters have been reset.\n" 65 | else 66 | fulfill_placeholders 67 | end 68 | end 69 | 70 | def ask_retrial(input, message = nil) 71 | print PROMPT_SYSTEM.prefix 72 | print "Error: #{message.capitalize}\n" if message 73 | retrial = PROMPT_USER.select("Do you want to try again?", 74 | show_help: :never) do |menu| 75 | menu.choice "Yes", "yes" 76 | menu.choice "No", "no" 77 | menu.choice "Show current contextual data", "show" 78 | end 79 | case retrial 80 | when "yes" 81 | input 82 | when "no" 83 | user_input 84 | when "show" 85 | show_data 86 | ask_retrial(input) 87 | end 88 | end 89 | 90 | def check_file(path) 91 | dirname = File.dirname(File.expand_path(path)) 92 | path == "" || (/\.json\z/ =~ path.strip && Dir.exist?(dirname)) ? true : false 93 | end 94 | 95 | def save_data 96 | input = "" 97 | loop do 98 | print TTY::Cursor.save 99 | path = PROMPT_SYSTEM.readline("Enter the file path for the JSON file (including the file name and .json extension): ") 100 | if check_file(path) 101 | input = path 102 | break 103 | else 104 | print TTY::Cursor.restore 105 | print TTY::Cursor.clear_screen_down 106 | end 107 | end 108 | print TTY::Cursor.save 109 | 110 | return if input.to_s == "" 111 | 112 | filepath = File.expand_path(input.strip) 113 | 114 | if File.exist? filepath 115 | overwrite = PROMPT_SYSTEM.select("#{filepath} already exists.\nOverwrite?", 116 | show_help: :never) do |menu| 117 | menu.choice "Yes", "yes" 118 | menu.choice "No", "no" 119 | end 120 | return if overwrite == "no" 121 | end 122 | 123 | FileUtils.touch(filepath) 124 | unless File.exist? filepath 125 | print "File cannot be created\n" 126 | save_data 127 | end 128 | 129 | begin 130 | File.open(filepath, "w") do |f| 131 | case @mode 132 | when :research 133 | m = /JSON:\n+```json\s*(\{.+\})\s*```\n\n/m.match(@template) 134 | data = JSON.parse(m[1]) 135 | data["messages"] = @messages 136 | f.write JSON.pretty_generate(data) 137 | when :normal 138 | f.write JSON.pretty_generate({ "messages" => @messages }) 139 | end 140 | 141 | print "Data has been saved successfully\n" 142 | end 143 | true 144 | rescue StandardError 145 | print "Error: Something went wrong" 146 | false 147 | end 148 | end 149 | 150 | def load_data 151 | input = "" 152 | loop do 153 | print TTY::Cursor.save 154 | path = PROMPT_SYSTEM.readline("Enter the file path for the JSON file (press Enter to cancel): ") 155 | if check_file(path) 156 | input = path 157 | break 158 | else 159 | print TTY::Cursor.restore 160 | print TTY::Cursor.clear_screen_down 161 | end 162 | end 163 | print TTY::Cursor.save 164 | 165 | return if input.to_s == "" 166 | 167 | begin 168 | filepath = File.expand_path(input.strip) 169 | json = File.read(filepath) 170 | data = JSON.parse(json) 171 | case @mode 172 | when :research 173 | self.class.name.downcase.split("::")[-1] 174 | 175 | raise unless data["mode"] == self.class.name.downcase.split("::")[-1] 176 | 177 | @messages = data.delete "messages" 178 | @template = @template.sub(/JSON:\n+```json\s*\{.+\}\s*```\n\n/m, "JSON:\n\n```json\n#{JSON.pretty_generate(data).strip}\n```\n\n") 179 | when :normal 180 | raise unless data["messages"] && data["messages"][0]["role"] 181 | 182 | @messages = data["messages"] 183 | end 184 | print "Data has been loaded successfully\n" 185 | true 186 | rescue StandardError 187 | print "The data structure is not valid for this app\n" 188 | false 189 | end 190 | end 191 | end 192 | -------------------------------------------------------------------------------- /lib/monadic_chat/open_ai.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: false 2 | 3 | require "http" 4 | require "oj" 5 | require "net/http" 6 | require "uri" 7 | require "strscan" 8 | require "tty-progressbar" 9 | 10 | Oj.mimic_JSON 11 | 12 | OPEN_TIMEOUT = 10 13 | RETRY_MAX_COUNT = 10 14 | RETRY_WAIT_TIME_SEC = 1 15 | 16 | module OpenAI 17 | def self.default_model(research_mode: false) 18 | if research_mode 19 | "gpt-4o-2024-08-06" 20 | else # normal mode 21 | "gpt-4o-mini" 22 | end 23 | end 24 | 25 | def self.model_to_method(model) 26 | res = { 27 | "gpt-3.5-turbo-0125" => "chat/completions", 28 | "gpt-3.5-turbo-1106" => "chat/completions", 29 | "gpt-3.5-turbo" => "chat/completions", 30 | "gpt-3.5-turbo-16k" => "chat/completions", 31 | "gpt-4o-mini-2024-07-18" => "chat/completions", 32 | "gpt-4o-mini" => "chat/completions", 33 | "gpt-4o" => "chat/completions", 34 | "chatgpt-4o-latest" => "chat/completions", 35 | "gpt-4o-2024-08-06" => "chat/completions", 36 | "gpt-4o-2024-05-13" => "chat/completions", 37 | "gpt-4-0125-preview" => "chat/completions", 38 | "gpt-4-turbo-preview" => "chat/completions", 39 | "gpt-4-1106-preview" => "chat/completions", 40 | "gpt-4" => "chat/completions", 41 | "gpt-4-0613" => "chat/completions", 42 | "gpt-4-32K" => "chat/completions" 43 | }[model] 44 | if res.nil? 45 | puts "" 46 | puts "=============================================================" 47 | puts "Model #{model} not found." 48 | puts "Maybe you are trying to use a model not available any more." 49 | puts "Check your monadic_chat.conf and try again." 50 | puts "=============================================================" 51 | puts "" 52 | exit 1 53 | end 54 | res 55 | end 56 | 57 | def self.query(access_token, mode, method, timeout_sec = 60, query = {}, &block) 58 | target_uri = "https://api.openai.com/v1/#{method}" 59 | headers = { 60 | "Content-Type" => "application/json", 61 | "Authorization" => "Bearer #{access_token}" 62 | } 63 | headers["Accept"] = "text/event-stream" if query["stream"] 64 | http = HTTP.headers(headers) 65 | 66 | timeout_settings = { 67 | connect: OPEN_TIMEOUT, 68 | write: timeout_sec, 69 | read: timeout_sec 70 | } 71 | 72 | case mode 73 | when "post" 74 | res = http.timeout(timeout_settings).post(target_uri, json: query) 75 | when "get" 76 | res = http.timeout(timeout_settings).get(target_uri) 77 | end 78 | 79 | if query["stream"] 80 | json = nil 81 | buffer = "" 82 | 83 | res.body.each do |chunk| 84 | break if /\Rdata: [DONE]\R/ =~ chunk 85 | 86 | buffer << chunk 87 | scanner = StringScanner.new(buffer) 88 | pattern = /data: (\{.*?\})(?=\n|\z)/m 89 | until scanner.eos? 90 | matched = scanner.scan_until(pattern) 91 | if matched 92 | json_data = matched.match(pattern)[1] 93 | 94 | begin 95 | res = JSON.parse(json_data) 96 | choice = res.dig("choices", 0) || {} 97 | 98 | fragment = choice.dig("delta", "content").to_s 99 | next if !fragment || fragment == "" 100 | 101 | block&.call fragment 102 | 103 | json ||= res 104 | json["choices"][0]["text"] ||= +"" 105 | json["choices"][0]["text"] << fragment 106 | 107 | if choice["finish_reason"] == "length" || choice["finish_reason"] == "stop" 108 | finish = { "type" => "message", "content" => "DONE" } 109 | block&.call finish 110 | break 111 | end 112 | rescue JSON::ParserError 113 | res = { "type" => "error", "content" => "Error: JSON Parsing" } 114 | pp res 115 | block&.call res 116 | res 117 | end 118 | else 119 | buffer = scanner.rest 120 | break 121 | end 122 | end 123 | end 124 | json 125 | else 126 | begin 127 | JSON.parse res.body 128 | rescue JSON::ParserError 129 | res = { "type" => "error", "content" => "Error: JSON Parsing" } 130 | pp res 131 | block&.call res 132 | res 133 | end 134 | end 135 | rescue HTTP::Error, HTTP::TimeoutError 136 | if num_retrial < MAX_RETRIES 137 | num_retrial += 1 138 | sleep RETRY_DELAY 139 | retry 140 | else 141 | res = { "type" => "error", "content" => "Error: Timeout" } 142 | pp res 143 | block&.call res 144 | res 145 | end 146 | end 147 | 148 | def self.models(access_token) 149 | res = query(access_token, "get", "models") 150 | res.fetch("data", []).sort_by { |m| -m["created"] } 151 | end 152 | 153 | class Completion 154 | attr_reader :access_token 155 | 156 | def initialize(access_token) 157 | @access_token = access_token 158 | end 159 | 160 | def models 161 | OpenAI.models(@access_token) 162 | end 163 | 164 | def run(params, research_mode: false, timeout_sec: 60, num_retrials: 1, &block) 165 | method = OpenAI.model_to_method(params["model"]) 166 | 167 | response = OpenAI.query(@access_token, "post", method, timeout_sec, params, &block) 168 | 169 | if response["error"] 170 | raise response["error"]["message"] 171 | elsif response["choices"][0]["finish_reason"] == "length" 172 | raise "finished because of length" 173 | end 174 | 175 | if research_mode 176 | get_json response["choices"][0]["text"] 177 | else 178 | response["choices"][0]["text"] 179 | end 180 | rescue StandardError => e 181 | case num_retrials 182 | when 0 183 | raise e 184 | else 185 | run(params, research_mode: research_mode, timeout_sec: timeout_sec, num_retrials: num_retrials - 1, &block) 186 | end 187 | end 188 | 189 | def get_json(data) 190 | case data 191 | when %r{\n*(\{.+?\})\n*}m 192 | json = Regexp.last_match(1).gsub(/\r\n?/, "\n").gsub(/\r\n/) { "\n" } 193 | res = JSON.parse(json) 194 | when /(\{.+\})/m 195 | json = Regexp.last_match(1).gsub(/\r\n?/, "\n").gsub(/\r\n/) { "\n" } 196 | res = JSON.parse(json) 197 | else 198 | res = data 199 | end 200 | res 201 | end 202 | 203 | def run_iteration(params, prompts, template, replace_key = "{{PROMPT}}", timeout_sec: 60, num_retrials: 0) 204 | bar = TTY::ProgressBar.new("[:bar] :current/:total :total_byte :percent ET::elapsed ETA::eta", 205 | total: prompts.size, 206 | bar_format: :box) 207 | bar.start 208 | json = "" 209 | prompts.each do |prompt| 210 | params["prompt"] = template.sub(replace_key, prompt) 211 | res = run(params, timeout_sec: timeout_sec, num_retrials: num_retrials) 212 | json = JSON.pretty_generate(get_json(res)) 213 | bar.advance(1) 214 | template = template.sub(/JSON:\n+```json.+?```\n\n/m, "JSON:\n\n```json\n#{json}\n```\n\n") 215 | end 216 | bar.finish 217 | JSON.parse(json) 218 | end 219 | end 220 | end 221 | -------------------------------------------------------------------------------- /lib/monadic_chat/internals.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | class MonadicApp 4 | ################################################## 5 | # methods for preparation and updating 6 | ################################################## 7 | 8 | def count_tokens(text) 9 | MonadicChat.tokenize(text).size 10 | end 11 | 12 | def fulfill_placeholders 13 | input = nil 14 | replacements = [] 15 | mode = :replace 16 | 17 | @placeholders.each do |key, val| 18 | if key == "mode" 19 | mode = val 20 | next 21 | end 22 | 23 | input = if mode == :replace 24 | val 25 | else 26 | PROMPT_SYSTEM.readline("#{val}: ") 27 | end 28 | 29 | unless input 30 | replacements.clear 31 | break 32 | end 33 | replacements << [key, input] 34 | end 35 | if replacements.empty? 36 | false 37 | else 38 | replacements.each do |key, value| 39 | @messages[0]["content"].gsub!(key, value) 40 | messages[0]["content"] 41 | end 42 | true 43 | end 44 | end 45 | 46 | def objectify 47 | case @mode 48 | when :research 49 | m = /JSON:\n+```json\s*(\{.+\})\s*```\n\n/m.match(@template) 50 | json = m[1].gsub(/(?!\\\\\\)\\\\"/) { '\\\"' } 51 | res = JSON.parse(json) 52 | res["messages"] = @messages 53 | res 54 | when :normal 55 | @messages 56 | end 57 | end 58 | 59 | def prepare_params(input_role, input) 60 | params = @params.dup 61 | 62 | delimited_input = case input_role 63 | when "user" 64 | "NEW PROMPT: ###\n#{input}\n###" 65 | when "system" # i.e. search engine 66 | "SEARCH SNIPPETS: ###\n#{input}\n###" 67 | end 68 | 69 | case @mode 70 | when :research 71 | messages = +"" 72 | system = +"" 73 | @messages.each do |mes| 74 | role = mes["role"] 75 | content = mes["content"] 76 | case role 77 | when "system" 78 | system << "#{content}\n" if system == "" 79 | else 80 | messages << "- #{mes["role"].strip}: #{content}\n" 81 | end 82 | end 83 | 84 | delimited_messages = "MESSAGES: ###\n#{messages}\n###" 85 | template = @template.dup.sub("{{SYSTEM}}", system) 86 | .sub("{{PROMPT}}", delimited_input) 87 | .sub("{{MESSAGES}}", delimited_messages.strip) 88 | 89 | @template_tokens = count_tokens(template) 90 | 91 | File.open(TEMP_MD, "w") { |f| f.write template } 92 | 93 | @messages << { "role" => input_role, "content" => input } 94 | 95 | case @method 96 | when "completions" 97 | params["prompt"] = template 98 | when "chat/completions" 99 | params["messages"] = [{ "role" => "system", "content" => template }] 100 | end 101 | 102 | when :normal 103 | @messages << { "role" => input_role, "content" => input } 104 | params["messages"] = @messages 105 | end 106 | 107 | @update_proc.call unless input_role == "system" 108 | 109 | params 110 | end 111 | 112 | def update_template(res, role) 113 | case @mode 114 | when :research 115 | @metadata = res 116 | @messages << { "role" => role, "content" => @metadata["response"] } 117 | json = @metadata.to_json.strip 118 | File.open(TEMP_JSON, "w") { |f| f.write json } 119 | @template.sub!(/JSON:\n+```json.+```\n\n/m, "JSON:\n\n```json\n#{json}\n```\n\n") 120 | when :normal 121 | @messages << { "role" => "assistant", "content" => res } 122 | end 123 | end 124 | 125 | ################################################## 126 | # function to package plain text into a unit 127 | ################################################## 128 | 129 | def unit(input) 130 | if input.instance_of?(Hash) 131 | input 132 | else 133 | @metadata["response"] = input 134 | @metadata 135 | end 136 | end 137 | 138 | ################################################## 139 | # function to bind data 140 | ################################################## 141 | 142 | def bind(input, role: "user", num_retrials: 0) 143 | case role 144 | when "user" 145 | @turns += 1 146 | when "system" # i.e. search engine 147 | input = "\n\n#{input}" 148 | end 149 | 150 | print PROMPT_ASSISTANT.prefix, "\n" 151 | params = prepare_params(role, input) 152 | research_mode = @mode == :research 153 | 154 | escaping = +"" 155 | finished = false 156 | 157 | res = @completion.run(params, 158 | research_mode: research_mode, 159 | timeout_sec: SETTINGS["timeout_sec"], 160 | num_retrials: num_retrials) do |chunk| 161 | if chunk.instance_of?(Hash) && chunk["content"] == "DONE" 162 | finished = true 163 | elsif chunk.instance_of?(String) && !finished 164 | if escaping 165 | chunk = escaping + chunk 166 | escaping = "" 167 | end 168 | 169 | if /(?:\\\z)/ =~ chunk 170 | escaping += chunk 171 | next 172 | else 173 | chunk = chunk.gsub('\\n') { "\n" } 174 | end 175 | 176 | print chunk 177 | end 178 | end 179 | 180 | print "\n" 181 | 182 | message = case role 183 | when "system" # i.e. search engine; the response given above should be by "assistant" 184 | { role: "assistant", content: @mode == :research ? unit(res) : res } 185 | when "user" # the response give above should be either "assistant" 186 | searched = use_tool(res) 187 | # but if the response is a search query, it should be by "system" (search engine) 188 | if searched 189 | @messages << { "role" => "assistant", 190 | "content" => @mode == :research ? unit(res)["response"] : res } 191 | if searched == "empty" 192 | print PROMPT_SYSTEM.prefix, "Search results are empty", "\n" 193 | return 194 | else 195 | bind(searched, role: "system") 196 | return 197 | end 198 | # otherwise, it should be by "assistant" 199 | else 200 | { role: "assistant", content: @mode == :researh ? unit(res) : res } 201 | end 202 | end 203 | 204 | update_template(message[:content], message[:role]) 205 | 206 | set_html if @html 207 | end 208 | 209 | ################################################## 210 | # function to have GPT use tools 211 | ################################################## 212 | 213 | def use_tool(res) 214 | case @mode 215 | when :normal 216 | text = res 217 | when :research 218 | text = res.is_a?(Hash) ? res["response"] : res 219 | end 220 | 221 | case text 222 | when /\bSEARCH_WIKI\("?(.+?)"?\)/m 223 | @wiki_search_cache ||= {} 224 | search_key = Regexp.last_match(1) 225 | wikipedia_search(search_key, @wiki_search_cache) 226 | when /\bSEARCH_WEB\("?(.+?)"?\)/m 227 | @web_search_cache ||= {} 228 | search_key = Regexp.last_match(1) 229 | bing_search(search_key, @web_searh_cache) 230 | else 231 | false 232 | end 233 | end 234 | end 235 | -------------------------------------------------------------------------------- /doc/img/state-monad.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 19 | 20 | 21 | 22 | result 3 23 | 24 | 25 | 26 | result 2 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | result 1 35 | 36 | 37 | 38 | 39 | s1 40 | 41 | 42 | 43 | 44 | process 1 45 | 46 | 47 | 48 | 49 | process 2 50 | 51 | 52 | 53 | state 54 | 55 | 56 | 57 | v1 58 | 59 | 60 | 61 | input 62 | 63 | 64 | 65 | 66 | 67 | state 68 | 69 | 70 | 71 | input 72 | 73 | 74 | 75 | 76 | input' 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | process n 86 | 87 | 88 | 89 | state 90 | 91 | 92 | 93 | input'' 94 | 95 | 96 | 97 | 98 | output 99 | 100 | 101 | 102 | 103 | 104 | state 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | state 119 | 120 | 121 | 122 | s2 123 | 124 | 125 | 126 | v2 127 | 128 | 129 | 130 | s3 131 | 132 | 133 | 134 | 135 | 136 | v3 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | bind >>= 149 | 150 | 151 | 152 | bind >>= 153 | 154 | -------------------------------------------------------------------------------- /assets/github.css: -------------------------------------------------------------------------------- 1 | @media (prefers-color-scheme: dark) { 2 | .markdown-body { 3 | color-scheme: dark; 4 | --color-prettylights-syntax-comment: #8b949e; 5 | --color-prettylights-syntax-constant: #79c0ff; 6 | --color-prettylights-syntax-entity: #d2a8ff; 7 | --color-prettylights-syntax-storage-modifier-import: #c9d1d9; 8 | --color-prettylights-syntax-entity-tag: #7ee787; 9 | --color-prettylights-syntax-keyword: #ff7b72; 10 | --color-prettylights-syntax-string: #a5d6ff; 11 | --color-prettylights-syntax-variable: #ffa657; 12 | --color-prettylights-syntax-brackethighlighter-unmatched: #f85149; 13 | --color-prettylights-syntax-invalid-illegal-text: #f0f6fc; 14 | --color-prettylights-syntax-invalid-illegal-bg: #8e1519; 15 | --color-prettylights-syntax-carriage-return-text: #f0f6fc; 16 | --color-prettylights-syntax-carriage-return-bg: #b62324; 17 | --color-prettylights-syntax-string-regexp: #7ee787; 18 | --color-prettylights-syntax-markup-list: #f2cc60; 19 | --color-prettylights-syntax-markup-heading: #1f6feb; 20 | --color-prettylights-syntax-markup-italic: #c9d1d9; 21 | --color-prettylights-syntax-markup-bold: #c9d1d9; 22 | --color-prettylights-syntax-markup-deleted-text: #ffdcd7; 23 | --color-prettylights-syntax-markup-deleted-bg: #67060c; 24 | --color-prettylights-syntax-markup-inserted-text: #aff5b4; 25 | --color-prettylights-syntax-markup-inserted-bg: #033a16; 26 | --color-prettylights-syntax-markup-changed-text: #ffdfb6; 27 | --color-prettylights-syntax-markup-changed-bg: #5a1e02; 28 | --color-prettylights-syntax-markup-ignored-text: #c9d1d9; 29 | --color-prettylights-syntax-markup-ignored-bg: #1158c7; 30 | --color-prettylights-syntax-meta-diff-range: #d2a8ff; 31 | --color-prettylights-syntax-brackethighlighter-angle: #8b949e; 32 | --color-prettylights-syntax-sublimelinter-gutter-mark: #484f58; 33 | --color-prettylights-syntax-constant-other-reference-link: #a5d6ff; 34 | --color-fg-default: #c9d1d9; 35 | --color-fg-muted: #8b949e; 36 | --color-fg-subtle: #484f58; 37 | --color-canvas-default: #0d1117; 38 | --color-canvas-subtle: #161b22; 39 | --color-border-default: #30363d; 40 | --color-border-muted: #21262d; 41 | --color-neutral-muted: rgba(110,118,129,0.4); 42 | --color-accent-fg: #58a6ff; 43 | --color-accent-emphasis: #1f6feb; 44 | --color-attention-subtle: rgba(187,128,9,0.15); 45 | --color-danger-fg: #f85149; 46 | } 47 | } 48 | 49 | @media (prefers-color-scheme: light) { 50 | .markdown-body { 51 | color-scheme: light; 52 | --color-prettylights-syntax-comment: #6e7781; 53 | --color-prettylights-syntax-constant: #0550ae; 54 | --color-prettylights-syntax-entity: #8250df; 55 | --color-prettylights-syntax-storage-modifier-import: #24292f; 56 | --color-prettylights-syntax-entity-tag: #116329; 57 | --color-prettylights-syntax-keyword: #cf222e; 58 | --color-prettylights-syntax-string: #0a3069; 59 | --color-prettylights-syntax-variable: #953800; 60 | --color-prettylights-syntax-brackethighlighter-unmatched: #82071e; 61 | --color-prettylights-syntax-invalid-illegal-text: #f6f8fa; 62 | --color-prettylights-syntax-invalid-illegal-bg: #82071e; 63 | --color-prettylights-syntax-carriage-return-text: #f6f8fa; 64 | --color-prettylights-syntax-carriage-return-bg: #cf222e; 65 | --color-prettylights-syntax-string-regexp: #116329; 66 | --color-prettylights-syntax-markup-list: #3b2300; 67 | --color-prettylights-syntax-markup-heading: #0550ae; 68 | --color-prettylights-syntax-markup-italic: #24292f; 69 | --color-prettylights-syntax-markup-bold: #24292f; 70 | --color-prettylights-syntax-markup-deleted-text: #82071e; 71 | --color-prettylights-syntax-markup-deleted-bg: #FFEBE9; 72 | --color-prettylights-syntax-markup-inserted-text: #116329; 73 | --color-prettylights-syntax-markup-inserted-bg: #dafbe1; 74 | --color-prettylights-syntax-markup-changed-text: #953800; 75 | --color-prettylights-syntax-markup-changed-bg: #ffd8b5; 76 | --color-prettylights-syntax-markup-ignored-text: #eaeef2; 77 | --color-prettylights-syntax-markup-ignored-bg: #0550ae; 78 | --color-prettylights-syntax-meta-diff-range: #8250df; 79 | --color-prettylights-syntax-brackethighlighter-angle: #57606a; 80 | --color-prettylights-syntax-sublimelinter-gutter-mark: #8c959f; 81 | --color-prettylights-syntax-constant-other-reference-link: #0a3069; 82 | --color-fg-default: #24292f; 83 | --color-fg-muted: #57606a; 84 | --color-fg-subtle: #6e7781; 85 | --color-canvas-default: #ffffff; 86 | --color-canvas-subtle: #f6f8fa; 87 | --color-border-default: #d0d7de; 88 | --color-border-muted: hsla(210,18%,87%,1); 89 | --color-neutral-muted: rgba(175,184,193,0.2); 90 | --color-accent-fg: #0969da; 91 | --color-accent-emphasis: #0969da; 92 | --color-attention-subtle: #fff8c5; 93 | --color-danger-fg: #cf222e; 94 | } 95 | } 96 | 97 | .markdown-body { 98 | -ms-text-size-adjust: 100%; 99 | -webkit-text-size-adjust: 100%; 100 | margin: 0; 101 | color: var(--color-fg-default); 102 | background-color: var(--color-canvas-default); 103 | font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji"; 104 | font-size: 16px; 105 | line-height: 1.5; 106 | word-wrap: break-word; 107 | } 108 | 109 | .markdown-body .octicon { 110 | display: inline-block; 111 | fill: currentColor; 112 | vertical-align: text-bottom; 113 | } 114 | 115 | .markdown-body h1:hover .anchor .octicon-link:before, 116 | .markdown-body h2:hover .anchor .octicon-link:before, 117 | .markdown-body h3:hover .anchor .octicon-link:before, 118 | .markdown-body h4:hover .anchor .octicon-link:before, 119 | .markdown-body h5:hover .anchor .octicon-link:before, 120 | .markdown-body h6:hover .anchor .octicon-link:before { 121 | width: 16px; 122 | height: 16px; 123 | content: ' '; 124 | display: inline-block; 125 | background-color: currentColor; 126 | -webkit-mask-image: url("data:image/svg+xml,"); 127 | mask-image: url("data:image/svg+xml,"); 128 | } 129 | 130 | .markdown-body details, 131 | .markdown-body figcaption, 132 | .markdown-body figure { 133 | display: block; 134 | } 135 | 136 | .markdown-body summary { 137 | display: list-item; 138 | } 139 | 140 | .markdown-body [hidden] { 141 | display: none !important; 142 | } 143 | 144 | .markdown-body a { 145 | background-color: transparent; 146 | color: var(--color-accent-fg); 147 | text-decoration: none; 148 | } 149 | 150 | .markdown-body a:active, 151 | .markdown-body a:hover { 152 | outline-width: 0; 153 | } 154 | 155 | .markdown-body abbr[title] { 156 | border-bottom: none; 157 | text-decoration: underline dotted; 158 | } 159 | 160 | .markdown-body b, 161 | .markdown-body strong { 162 | font-weight: 600; 163 | } 164 | 165 | .markdown-body dfn { 166 | font-style: italic; 167 | } 168 | 169 | .markdown-body h1 { 170 | margin: .67em 0; 171 | font-weight: 600; 172 | padding-bottom: .3em; 173 | font-size: 2em; 174 | border-bottom: 1px solid var(--color-border-muted); 175 | } 176 | 177 | .markdown-body mark { 178 | background-color: var(--color-attention-subtle); 179 | color: var(--color-text-primary); 180 | } 181 | 182 | .markdown-body small { 183 | font-size: 90%; 184 | } 185 | 186 | .markdown-body sub, 187 | .markdown-body sup { 188 | font-size: 75%; 189 | line-height: 0; 190 | position: relative; 191 | vertical-align: baseline; 192 | } 193 | 194 | .markdown-body sub { 195 | bottom: -0.25em; 196 | } 197 | 198 | .markdown-body sup { 199 | top: -0.5em; 200 | } 201 | 202 | .markdown-body img { 203 | border-style: none; 204 | max-width: 100%; 205 | box-sizing: content-box; 206 | background-color: var(--color-canvas-default); 207 | } 208 | 209 | .markdown-body code, 210 | .markdown-body kbd, 211 | .markdown-body pre, 212 | .markdown-body samp { 213 | font-family: monospace,monospace; 214 | font-size: 1em; 215 | } 216 | 217 | .markdown-body figure { 218 | margin: 1em 40px; 219 | } 220 | 221 | .markdown-body hr { 222 | box-sizing: content-box; 223 | overflow: hidden; 224 | background: transparent; 225 | border-bottom: 1px solid var(--color-border-muted); 226 | height: .25em; 227 | padding: 0; 228 | margin: 24px 0; 229 | background-color: var(--color-border-default); 230 | border: 0; 231 | } 232 | 233 | .markdown-body input { 234 | font: inherit; 235 | margin: 0; 236 | overflow: visible; 237 | font-family: inherit; 238 | font-size: inherit; 239 | line-height: inherit; 240 | } 241 | 242 | .markdown-body [type=button], 243 | .markdown-body [type=reset], 244 | .markdown-body [type=submit] { 245 | -webkit-appearance: button; 246 | } 247 | 248 | .markdown-body [type=button]::-moz-focus-inner, 249 | .markdown-body [type=reset]::-moz-focus-inner, 250 | .markdown-body [type=submit]::-moz-focus-inner { 251 | border-style: none; 252 | padding: 0; 253 | } 254 | 255 | .markdown-body [type=button]:-moz-focusring, 256 | .markdown-body [type=reset]:-moz-focusring, 257 | .markdown-body [type=submit]:-moz-focusring { 258 | outline: 1px dotted ButtonText; 259 | } 260 | 261 | .markdown-body [type=checkbox], 262 | .markdown-body [type=radio] { 263 | box-sizing: border-box; 264 | padding: 0; 265 | } 266 | 267 | .markdown-body [type=number]::-webkit-inner-spin-button, 268 | .markdown-body [type=number]::-webkit-outer-spin-button { 269 | height: auto; 270 | } 271 | 272 | .markdown-body [type=search] { 273 | -webkit-appearance: textfield; 274 | outline-offset: -2px; 275 | } 276 | 277 | .markdown-body [type=search]::-webkit-search-cancel-button, 278 | .markdown-body [type=search]::-webkit-search-decoration { 279 | -webkit-appearance: none; 280 | } 281 | 282 | .markdown-body ::-webkit-input-placeholder { 283 | color: inherit; 284 | opacity: .54; 285 | } 286 | 287 | .markdown-body ::-webkit-file-upload-button { 288 | -webkit-appearance: button; 289 | font: inherit; 290 | } 291 | 292 | .markdown-body a:hover { 293 | text-decoration: underline; 294 | } 295 | 296 | .markdown-body hr::before { 297 | display: table; 298 | content: ""; 299 | } 300 | 301 | .markdown-body hr::after { 302 | display: table; 303 | clear: both; 304 | content: ""; 305 | } 306 | 307 | .markdown-body table { 308 | border-spacing: 0; 309 | border-collapse: collapse; 310 | display: block; 311 | width: max-content; 312 | max-width: 100%; 313 | overflow: auto; 314 | } 315 | 316 | .markdown-body td, 317 | .markdown-body th { 318 | padding: 0; 319 | } 320 | 321 | .markdown-body details summary { 322 | cursor: pointer; 323 | } 324 | 325 | .markdown-body details:not([open])>*:not(summary) { 326 | display: none !important; 327 | } 328 | 329 | .markdown-body kbd { 330 | display: inline-block; 331 | padding: 3px 5px; 332 | font: 11px ui-monospace,SFMono-Regular,SF Mono,Menlo,Consolas,Liberation Mono,monospace; 333 | line-height: 10px; 334 | color: var(--color-fg-default); 335 | vertical-align: middle; 336 | background-color: var(--color-canvas-subtle); 337 | border: solid 1px var(--color-neutral-muted); 338 | border-bottom-color: var(--color-neutral-muted); 339 | border-radius: 6px; 340 | box-shadow: inset 0 -1px 0 var(--color-neutral-muted); 341 | } 342 | 343 | .markdown-body h1, 344 | .markdown-body h2, 345 | .markdown-body h3, 346 | .markdown-body h4, 347 | .markdown-body h5, 348 | .markdown-body h6 { 349 | margin-top: 24px; 350 | margin-bottom: 16px; 351 | font-weight: 600; 352 | line-height: 1.25; 353 | } 354 | 355 | .markdown-body h2 { 356 | font-weight: 600; 357 | padding-bottom: .3em; 358 | font-size: 1.5em; 359 | border-bottom: 1px solid var(--color-border-muted); 360 | } 361 | 362 | .markdown-body h3 { 363 | font-weight: 600; 364 | font-size: 1.25em; 365 | } 366 | 367 | .markdown-body h4 { 368 | font-weight: 600; 369 | font-size: 1em; 370 | } 371 | 372 | .markdown-body h5 { 373 | font-weight: 600; 374 | font-size: .875em; 375 | } 376 | 377 | .markdown-body h6 { 378 | font-weight: 600; 379 | font-size: .85em; 380 | color: var(--color-fg-muted); 381 | } 382 | 383 | .markdown-body p { 384 | margin-top: 0; 385 | margin-bottom: 10px; 386 | } 387 | 388 | .markdown-body blockquote { 389 | margin: 0; 390 | padding: 0 1em; 391 | color: var(--color-fg-muted); 392 | border-left: .25em solid var(--color-border-default); 393 | } 394 | 395 | .markdown-body ul, 396 | .markdown-body ol { 397 | margin-top: 0; 398 | margin-bottom: 0; 399 | padding-left: 2em; 400 | } 401 | 402 | .markdown-body ol ol, 403 | .markdown-body ul ol { 404 | list-style-type: lower-roman; 405 | } 406 | 407 | .markdown-body ul ul ol, 408 | .markdown-body ul ol ol, 409 | .markdown-body ol ul ol, 410 | .markdown-body ol ol ol { 411 | list-style-type: lower-alpha; 412 | } 413 | 414 | .markdown-body dd { 415 | margin-left: 0; 416 | } 417 | 418 | .markdown-body tt, 419 | .markdown-body code { 420 | font-family: ui-monospace,SFMono-Regular,SF Mono,Menlo,Consolas,Liberation Mono,monospace; 421 | font-size: 12px; 422 | } 423 | 424 | .markdown-body pre { 425 | margin-top: 0; 426 | margin-bottom: 0; 427 | font-family: ui-monospace,SFMono-Regular,SF Mono,Menlo,Consolas,Liberation Mono,monospace; 428 | font-size: 12px; 429 | word-wrap: normal; 430 | } 431 | 432 | .markdown-body .octicon { 433 | display: inline-block; 434 | overflow: visible !important; 435 | vertical-align: text-bottom; 436 | fill: currentColor; 437 | } 438 | 439 | .markdown-body ::placeholder { 440 | color: var(--color-fg-subtle); 441 | opacity: 1; 442 | } 443 | 444 | .markdown-body input::-webkit-outer-spin-button, 445 | .markdown-body input::-webkit-inner-spin-button { 446 | margin: 0; 447 | -webkit-appearance: none; 448 | appearance: none; 449 | } 450 | 451 | .markdown-body .pl-c { 452 | color: var(--color-prettylights-syntax-comment); 453 | } 454 | 455 | .markdown-body .pl-c1, 456 | .markdown-body .pl-s .pl-v { 457 | color: var(--color-prettylights-syntax-constant); 458 | } 459 | 460 | .markdown-body .pl-e, 461 | .markdown-body .pl-en { 462 | color: var(--color-prettylights-syntax-entity); 463 | } 464 | 465 | .markdown-body .pl-smi, 466 | .markdown-body .pl-s .pl-s1 { 467 | color: var(--color-prettylights-syntax-storage-modifier-import); 468 | } 469 | 470 | .markdown-body .pl-ent { 471 | color: var(--color-prettylights-syntax-entity-tag); 472 | } 473 | 474 | .markdown-body .pl-k { 475 | color: var(--color-prettylights-syntax-keyword); 476 | } 477 | 478 | .markdown-body .pl-s, 479 | .markdown-body .pl-pds, 480 | .markdown-body .pl-s .pl-pse .pl-s1, 481 | .markdown-body .pl-sr, 482 | .markdown-body .pl-sr .pl-cce, 483 | .markdown-body .pl-sr .pl-sre, 484 | .markdown-body .pl-sr .pl-sra { 485 | color: var(--color-prettylights-syntax-string); 486 | } 487 | 488 | .markdown-body .pl-v, 489 | .markdown-body .pl-smw { 490 | color: var(--color-prettylights-syntax-variable); 491 | } 492 | 493 | .markdown-body .pl-bu { 494 | color: var(--color-prettylights-syntax-brackethighlighter-unmatched); 495 | } 496 | 497 | .markdown-body .pl-ii { 498 | color: var(--color-prettylights-syntax-invalid-illegal-text); 499 | background-color: var(--color-prettylights-syntax-invalid-illegal-bg); 500 | } 501 | 502 | .markdown-body .pl-c2 { 503 | color: var(--color-prettylights-syntax-carriage-return-text); 504 | background-color: var(--color-prettylights-syntax-carriage-return-bg); 505 | } 506 | 507 | .markdown-body .pl-sr .pl-cce { 508 | font-weight: bold; 509 | color: var(--color-prettylights-syntax-string-regexp); 510 | } 511 | 512 | .markdown-body .pl-ml { 513 | color: var(--color-prettylights-syntax-markup-list); 514 | } 515 | 516 | .markdown-body .pl-mh, 517 | .markdown-body .pl-mh .pl-en, 518 | .markdown-body .pl-ms { 519 | font-weight: bold; 520 | color: var(--color-prettylights-syntax-markup-heading); 521 | } 522 | 523 | .markdown-body .pl-mi { 524 | font-style: italic; 525 | color: var(--color-prettylights-syntax-markup-italic); 526 | } 527 | 528 | .markdown-body .pl-mb { 529 | font-weight: bold; 530 | color: var(--color-prettylights-syntax-markup-bold); 531 | } 532 | 533 | .markdown-body .pl-md { 534 | color: var(--color-prettylights-syntax-markup-deleted-text); 535 | background-color: var(--color-prettylights-syntax-markup-deleted-bg); 536 | } 537 | 538 | .markdown-body .pl-mi1 { 539 | color: var(--color-prettylights-syntax-markup-inserted-text); 540 | background-color: var(--color-prettylights-syntax-markup-inserted-bg); 541 | } 542 | 543 | .markdown-body .pl-mc { 544 | color: var(--color-prettylights-syntax-markup-changed-text); 545 | background-color: var(--color-prettylights-syntax-markup-changed-bg); 546 | } 547 | 548 | .markdown-body .pl-mi2 { 549 | color: var(--color-prettylights-syntax-markup-ignored-text); 550 | background-color: var(--color-prettylights-syntax-markup-ignored-bg); 551 | } 552 | 553 | .markdown-body .pl-mdr { 554 | font-weight: bold; 555 | color: var(--color-prettylights-syntax-meta-diff-range); 556 | } 557 | 558 | .markdown-body .pl-ba { 559 | color: var(--color-prettylights-syntax-brackethighlighter-angle); 560 | } 561 | 562 | .markdown-body .pl-sg { 563 | color: var(--color-prettylights-syntax-sublimelinter-gutter-mark); 564 | } 565 | 566 | .markdown-body .pl-corl { 567 | text-decoration: underline; 568 | color: var(--color-prettylights-syntax-constant-other-reference-link); 569 | } 570 | 571 | .markdown-body [data-catalyst] { 572 | display: block; 573 | } 574 | 575 | .markdown-body g-emoji { 576 | font-family: "Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol"; 577 | font-size: 1em; 578 | font-style: normal !important; 579 | font-weight: 400; 580 | line-height: 1; 581 | vertical-align: -0.075em; 582 | } 583 | 584 | .markdown-body g-emoji img { 585 | width: 1em; 586 | height: 1em; 587 | } 588 | 589 | .markdown-body::before { 590 | display: table; 591 | content: ""; 592 | } 593 | 594 | .markdown-body::after { 595 | display: table; 596 | clear: both; 597 | content: ""; 598 | } 599 | 600 | .markdown-body>*:first-child { 601 | margin-top: 0 !important; 602 | } 603 | 604 | .markdown-body>*:last-child { 605 | margin-bottom: 0 !important; 606 | } 607 | 608 | .markdown-body a:not([href]) { 609 | color: inherit; 610 | text-decoration: none; 611 | } 612 | 613 | .markdown-body .absent { 614 | color: var(--color-danger-fg); 615 | } 616 | 617 | .markdown-body .anchor { 618 | float: left; 619 | padding-right: 4px; 620 | margin-left: -20px; 621 | line-height: 1; 622 | } 623 | 624 | .markdown-body .anchor:focus { 625 | outline: none; 626 | } 627 | 628 | .markdown-body p, 629 | .markdown-body blockquote, 630 | .markdown-body ul, 631 | .markdown-body ol, 632 | .markdown-body dl, 633 | .markdown-body table, 634 | .markdown-body pre, 635 | .markdown-body details { 636 | margin-top: 0; 637 | margin-bottom: 16px; 638 | } 639 | 640 | .markdown-body blockquote>:first-child { 641 | margin-top: 0; 642 | } 643 | 644 | .markdown-body blockquote>:last-child { 645 | margin-bottom: 0; 646 | } 647 | 648 | .markdown-body sup>a::before { 649 | content: "["; 650 | } 651 | 652 | .markdown-body sup>a::after { 653 | content: "]"; 654 | } 655 | 656 | .markdown-body h1 .octicon-link, 657 | .markdown-body h2 .octicon-link, 658 | .markdown-body h3 .octicon-link, 659 | .markdown-body h4 .octicon-link, 660 | .markdown-body h5 .octicon-link, 661 | .markdown-body h6 .octicon-link { 662 | color: var(--color-fg-default); 663 | vertical-align: middle; 664 | visibility: hidden; 665 | } 666 | 667 | .markdown-body h1:hover .anchor, 668 | .markdown-body h2:hover .anchor, 669 | .markdown-body h3:hover .anchor, 670 | .markdown-body h4:hover .anchor, 671 | .markdown-body h5:hover .anchor, 672 | .markdown-body h6:hover .anchor { 673 | text-decoration: none; 674 | } 675 | 676 | .markdown-body h1:hover .anchor .octicon-link, 677 | .markdown-body h2:hover .anchor .octicon-link, 678 | .markdown-body h3:hover .anchor .octicon-link, 679 | .markdown-body h4:hover .anchor .octicon-link, 680 | .markdown-body h5:hover .anchor .octicon-link, 681 | .markdown-body h6:hover .anchor .octicon-link { 682 | visibility: visible; 683 | } 684 | 685 | .markdown-body h1 tt, 686 | .markdown-body h1 code, 687 | .markdown-body h2 tt, 688 | .markdown-body h2 code, 689 | .markdown-body h3 tt, 690 | .markdown-body h3 code, 691 | .markdown-body h4 tt, 692 | .markdown-body h4 code, 693 | .markdown-body h5 tt, 694 | .markdown-body h5 code, 695 | .markdown-body h6 tt, 696 | .markdown-body h6 code { 697 | padding: 0 .2em; 698 | font-size: inherit; 699 | } 700 | 701 | .markdown-body ul.no-list, 702 | .markdown-body ol.no-list { 703 | padding: 0; 704 | list-style-type: none; 705 | } 706 | 707 | .markdown-body ol[type="1"] { 708 | list-style-type: decimal; 709 | } 710 | 711 | .markdown-body ol[type=a] { 712 | list-style-type: lower-alpha; 713 | } 714 | 715 | .markdown-body ol[type=i] { 716 | list-style-type: lower-roman; 717 | } 718 | 719 | .markdown-body div>ol:not([type]) { 720 | list-style-type: decimal; 721 | } 722 | 723 | .markdown-body ul ul, 724 | .markdown-body ul ol, 725 | .markdown-body ol ol, 726 | .markdown-body ol ul { 727 | margin-top: 0; 728 | margin-bottom: 0; 729 | } 730 | 731 | .markdown-body li>p { 732 | margin-top: 16px; 733 | } 734 | 735 | .markdown-body li+li { 736 | margin-top: .25em; 737 | } 738 | 739 | .markdown-body dl { 740 | padding: 0; 741 | } 742 | 743 | .markdown-body dl dt { 744 | padding: 0; 745 | margin-top: 16px; 746 | font-size: 1em; 747 | font-style: italic; 748 | font-weight: 600; 749 | } 750 | 751 | .markdown-body dl dd { 752 | padding: 0 16px; 753 | margin-bottom: 16px; 754 | } 755 | 756 | .markdown-body table th { 757 | font-weight: 600; 758 | } 759 | 760 | .markdown-body table th, 761 | .markdown-body table td { 762 | padding: 6px 13px; 763 | border: 1px solid var(--color-border-default); 764 | } 765 | 766 | .markdown-body table tr { 767 | background-color: var(--color-canvas-default); 768 | border-top: 1px solid var(--color-border-muted); 769 | } 770 | 771 | .markdown-body table tr:nth-child(2n) { 772 | background-color: var(--color-canvas-subtle); 773 | } 774 | 775 | .markdown-body table img { 776 | background-color: transparent; 777 | } 778 | 779 | .markdown-body img[align=right] { 780 | padding-left: 20px; 781 | } 782 | 783 | .markdown-body img[align=left] { 784 | padding-right: 20px; 785 | } 786 | 787 | .markdown-body .emoji { 788 | max-width: none; 789 | vertical-align: text-top; 790 | background-color: transparent; 791 | } 792 | 793 | .markdown-body span.frame { 794 | display: block; 795 | overflow: hidden; 796 | } 797 | 798 | .markdown-body span.frame>span { 799 | display: block; 800 | float: left; 801 | width: auto; 802 | padding: 7px; 803 | margin: 13px 0 0; 804 | overflow: hidden; 805 | border: 1px solid var(--color-border-default); 806 | } 807 | 808 | .markdown-body span.frame span img { 809 | display: block; 810 | float: left; 811 | } 812 | 813 | .markdown-body span.frame span span { 814 | display: block; 815 | padding: 5px 0 0; 816 | clear: both; 817 | color: var(--color-fg-default); 818 | } 819 | 820 | .markdown-body span.align-center { 821 | display: block; 822 | overflow: hidden; 823 | clear: both; 824 | } 825 | 826 | .markdown-body span.align-center>span { 827 | display: block; 828 | margin: 13px auto 0; 829 | overflow: hidden; 830 | text-align: center; 831 | } 832 | 833 | .markdown-body span.align-center span img { 834 | margin: 0 auto; 835 | text-align: center; 836 | } 837 | 838 | .markdown-body span.align-right { 839 | display: block; 840 | overflow: hidden; 841 | clear: both; 842 | } 843 | 844 | .markdown-body span.align-right>span { 845 | display: block; 846 | margin: 13px 0 0; 847 | overflow: hidden; 848 | text-align: right; 849 | } 850 | 851 | .markdown-body span.align-right span img { 852 | margin: 0; 853 | text-align: right; 854 | } 855 | 856 | .markdown-body span.float-left { 857 | display: block; 858 | float: left; 859 | margin-right: 13px; 860 | overflow: hidden; 861 | } 862 | 863 | .markdown-body span.float-left span { 864 | margin: 13px 0 0; 865 | } 866 | 867 | .markdown-body span.float-right { 868 | display: block; 869 | float: right; 870 | margin-left: 13px; 871 | overflow: hidden; 872 | } 873 | 874 | .markdown-body span.float-right>span { 875 | display: block; 876 | margin: 13px auto 0; 877 | overflow: hidden; 878 | text-align: right; 879 | } 880 | 881 | .markdown-body code, 882 | .markdown-body tt { 883 | padding: .2em .4em; 884 | margin: 0; 885 | font-size: 85%; 886 | background-color: var(--color-neutral-muted); 887 | border-radius: 6px; 888 | } 889 | 890 | .markdown-body code br, 891 | .markdown-body tt br { 892 | display: none; 893 | } 894 | 895 | .markdown-body del code { 896 | text-decoration: inherit; 897 | } 898 | 899 | .markdown-body pre code { 900 | font-size: 100%; 901 | } 902 | 903 | .markdown-body pre>code { 904 | padding: 0; 905 | margin: 0; 906 | word-break: normal; 907 | white-space: pre; 908 | background: transparent; 909 | border: 0; 910 | } 911 | 912 | .markdown-body .highlight { 913 | margin-bottom: 16px; 914 | } 915 | 916 | .markdown-body .highlight pre { 917 | margin-bottom: 0; 918 | word-break: normal; 919 | } 920 | 921 | .markdown-body .highlight pre, 922 | .markdown-body pre { 923 | padding: 16px; 924 | overflow: auto; 925 | font-size: 85%; 926 | line-height: 1.45; 927 | background-color: var(--color-canvas-subtle); 928 | border-radius: 6px; 929 | } 930 | 931 | .markdown-body pre code, 932 | .markdown-body pre tt { 933 | display: inline; 934 | max-width: auto; 935 | padding: 0; 936 | margin: 0; 937 | overflow: visible; 938 | line-height: inherit; 939 | word-wrap: normal; 940 | background-color: transparent; 941 | border: 0; 942 | } 943 | 944 | .markdown-body .csv-data td, 945 | .markdown-body .csv-data th { 946 | padding: 5px; 947 | overflow: hidden; 948 | font-size: 12px; 949 | line-height: 1; 950 | text-align: left; 951 | white-space: nowrap; 952 | } 953 | 954 | .markdown-body .csv-data .blob-num { 955 | padding: 10px 8px 9px; 956 | text-align: right; 957 | background: var(--color-canvas-default); 958 | border: 0; 959 | } 960 | 961 | .markdown-body .csv-data tr { 962 | border-top: 0; 963 | } 964 | 965 | .markdown-body .csv-data th { 966 | font-weight: 600; 967 | background: var(--color-canvas-subtle); 968 | border-top: 0; 969 | } 970 | 971 | .markdown-body .footnotes { 972 | font-size: 12px; 973 | color: var(--color-fg-muted); 974 | border-top: 1px solid var(--color-border-default); 975 | } 976 | 977 | .markdown-body .footnotes ol { 978 | padding-left: 16px; 979 | } 980 | 981 | .markdown-body .footnotes li { 982 | position: relative; 983 | } 984 | 985 | .markdown-body .footnotes li:target::before { 986 | position: absolute; 987 | top: -8px; 988 | right: -8px; 989 | bottom: -8px; 990 | left: -24px; 991 | pointer-events: none; 992 | content: ""; 993 | border: 2px solid var(--color-accent-emphasis); 994 | border-radius: 6px; 995 | } 996 | 997 | .markdown-body .footnotes li:target { 998 | color: var(--color-fg-default); 999 | } 1000 | 1001 | .markdown-body .footnotes .data-footnote-backref g-emoji { 1002 | font-family: monospace; 1003 | } 1004 | 1005 | .markdown-body .task-list-item { 1006 | list-style-type: none; 1007 | } 1008 | 1009 | .markdown-body .task-list-item label { 1010 | font-weight: 400; 1011 | } 1012 | 1013 | .markdown-body .task-list-item.enabled label { 1014 | cursor: pointer; 1015 | } 1016 | 1017 | .markdown-body .task-list-item+.task-list-item { 1018 | margin-top: 3px; 1019 | } 1020 | 1021 | .markdown-body .task-list-item .handle { 1022 | display: none; 1023 | } 1024 | 1025 | .markdown-body .task-list-item-checkbox { 1026 | margin: 0 .2em .25em -1.6em; 1027 | vertical-align: middle; 1028 | } 1029 | 1030 | .markdown-body .contains-task-list:dir(rtl) .task-list-item-checkbox { 1031 | margin: 0 -1.6em .25em .2em; 1032 | } 1033 | 1034 | .markdown-body ::-webkit-calendar-picker-indicator { 1035 | filter: invert(50%); 1036 | } 1037 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

 

2 | 3 |

4 | 5 |

Highly configurable CLI client app for OpenAI chat/text-completion API

6 | 7 |

 

8 | 9 | > **Note** 10 | > 11 | > **Monadic Chat** is currently being actively developed as a web browser application. The command-line version has been renamed to **Monadic Chat CLI**. New features will mainly be implemented in the web application version. At present, both versions have many common features, but please note that the format of the exported data files and the specifications for creating custom applications are different from each other. 12 | 13 | - Monadic Chat: [https://github.com/yohasebe/monadic-chat](https://github.com/yohasebe/monadic-chat) (active) 14 | - Monadic Chat CLI: [https://github.com/yohasebe/monadic-chat-cli](https://github.com/yohasebe/monadic-chat-cli) (this repo; less active) 15 | 16 |

 

17 | 18 |

19 | 20 |

21 | 22 |

23 | 24 |

25 | 26 |

 

27 | 28 | **Change Log** 29 | 30 | - [August 27, 2024] `gpt-4o-2024-08-06` set as the default model for `research` mode again 31 | - [August 15, 2024] `chatgpt-4o-latest` set as the default model for `research` mode 32 | - [July 19, 2024] `gpt-4o-mini` set as the default model for `normal` mode 33 | - [May 13, 2024] `gpt-4o` set as the default model for both `normal` and `research` modes 34 | - [February 9, 2024] Minor update; default model changed to `gpt-3.5-turbo-0125` 35 | - [November 10, 2023] Stability improvement; default model changed to `gpt-3.5-turbo` 36 | - [October 07, 2023] Stability improvement; default model changed to `gpt-3.5-turbo-0613` 37 | - [June 11, 2023] The repository renamed to `monadic-chat-cli` 38 | - [April 05, 2023] `Wikipedia` app added (experimental) 39 | - [April 05, 2023] `monadic-chat new/del app_name` command 40 | - [April 02, 2023] Architecture refined here and there 41 | - [March 26, 2023] Command line options to directly run individual apps 42 | - [March 24, 2023] `Research` mode now supports chat API in addition to text-completion API 43 | - [March 21, 2023] GPT-4 models supported (in `normal` mode) 44 | - [March 20, 2023] Text and figure in "How the research mode workds" section updated 45 | - [March 13, 2023] Text on the architecture of the `research` mode updated in accordance with Version 0.2.0 46 | 47 | ## Introduction 48 | 49 | **Monadic Chat** is a user-friendly command-line client application that utilizes OpenAI’s Text Completion API and Chat API to facilitate ChatGPT-style conversations with OpenAI’s large language models (LLM) on any terminal application of your choice. 50 | 51 | The conversation history can be saved in a JSON file, which can be loaded later to continue the conversation. Additionally, the conversation data can be converted to HTML and viewed in a web browser for better readability. 52 | 53 | Monadic Chat includes four pre-built apps (`Chat`, `Code`, `Novel`, and `Translate`) that are designed for different types of discourse through interactive conversation with the LLM. Users also have the option to create their own apps by writing new templates. 54 | 55 | Monadic Chat's `normal` mode enables ChatGPT-like conversations on the command line. The `research` mode has a mechanism to handle various related information as "state" behind the conversation. This allows, for example, to retrieve the current conversation *topic* at each utterance turn, and to keep its development as a list. 56 | 57 | ## Dependencies 58 | 59 | - Ruby 2.6.10 or greater 60 | - OpenAI API Token 61 | - A command line terminal app such as: 62 | - Terminal or [iTerm2](https://iterm2.com/) (MacOS) 63 | - [Alacritty](https://alacritty.org/) (Multi-platform) 64 | - [Windows Terminal](https://apps.microsoft.com/store/detail/windows-terminal) (Windows) 65 | - GNOME Terminal (Linux) 66 | 67 | > **Note on Using Monadic Chat on Windows** 68 | > Monadic Chat does not support running on Windows, but you can install and use Linux Destribution on WSL2. Or you can use it without WSL2 by following these steps: 69 | > 70 | > 1. install Windows Terminal 71 | > 2. install [Git Bash](https://gitforwindows.org/) (make sure to check the `Install profile for Windows Terminal` checkbox 72 | > 3. install Ruby with [Ruby Installer](https://rubyinstaller.org/) 73 | > 4. Open Windows Terminal with Git Bash profile and follow the instruction below. 74 | 75 | ## Installation 76 | 77 | ### Using RubyGems 78 | 79 | Execute the following command in an environment where Ruby 2.6.10 or higher is installed. 80 | 81 | ```text 82 | gem install monadic-chat 83 | ``` 84 | 85 | Then run the command to start the app: 86 | ```text 87 | monadic-chat 88 | ``` 89 | 90 | To update: 91 | 92 | ```text 93 | gem update monadic-chat 94 | ``` 95 | 96 | ### Clone the GitHub Repository 97 | 98 | Alternatively, clone the code from the GitHub repository and follow the steps below. 99 | 100 | 1. Clone the repo 101 | 102 | ```text 103 | git clone https://github.com/yohasebe/monadic-chat.git 104 | ``` 105 | 106 | 2. Install dependencies 107 | 108 | ```text 109 | cd monadic-chat 110 | bundle update 111 | ``` 112 | 113 | 3. Grant permission to the executable 114 | 115 | ```text 116 | chmod +x ./bin/monadic-chat 117 | ``` 118 | 119 | 4. Run the executable 120 | 121 | ```text 122 | ./bin/monadic-chat 123 | ``` 124 | 125 | ## Usage 126 | 127 | ### Authentication 128 | 129 | When you start Monadic Chat with the `monadic-chat` command for the first time, you will be asked for an OpenAI access token. If you do not have one, create an account on the [OpenAI](https://platform.openai.com/) website and obtain an access token. 130 | 131 |
132 | 133 | 134 | 135 |
136 | 137 | Once the correct access token is verified, the access token is saved in the configuration file below and will automatically be used the next time the app is started. 138 | 139 | `$HOME/monadic_chat.conf` 140 | 141 | ### Main Menu 142 | 143 | Upon successful authentication, a menu to select a specific app will appear. Each app generates different types of text through an interactive chat-style conversation between the user and the AI. Four apps are available by default: [`chat`](#chat), [`code`](#code), [`novel`](#novel), and [`translate`](#translate). 144 | 145 | Selecting the `mode` menu item allows you to change the [modes](#modes) from `normal` to `research` and vice versa. 146 | 147 | Selecting `readme` will take you to the README on the GitHub repository (the document you are looking at now). Selecting `quit` will exit Monadic Chat. 148 | 149 |
150 | 151 | 152 | 153 |
154 | 155 | In the main menu, you can use the cursor keys and the enter key to make a selection. You can also narrow down the choices each time you type a letter. 156 | 157 | ### Direct Commands 158 | 159 | The following commands can be entered to start each app directly on the command line, without using the main menu. 160 | 161 | ``` 162 | monadic-chat 163 | ``` 164 | 165 | Each of the four standard applications can be launched as follows. When launched, an interactive chat interface appears. 166 | 167 | ``` 168 | monadic-chat chat 169 | monadic-chat code 170 | monadic-chat novel 171 | monadic-chat translate 172 | ``` 173 | 174 | You can also give text input directly to each app in the following format and get only a response to it (without starting the interactive chat interface) 175 | 176 | ``` 177 | monadic-chat 178 | ``` 179 | 180 | ### Roles 181 | 182 | Each message in the conversation is labeled with one of three roles: `User`, `GPT`, or `System`. 183 | 184 | - `User`: messages from the user of the Monadic Chat app (that's you!) 185 | - `GPT`: messages from the OpenAI large-scale language model 186 | - `System`: messages from the Monadic Chat system 187 | 188 | ### System-Wide Functions 189 | 190 | You can call up the function menu anytime. To invoke the function menu, type `help` or `menu`. 191 | 192 |
193 | 194 | 195 | 196 |
197 | 198 | In the function menu, you can use the cursor keys and the enter key to make a selection. You can also narrow down the choices each time you type a letter. Some functions are given multiple names, so typing on the keyboard quickly locates the necessary function. 199 | 200 | **params/settings/config** 201 | 202 | You can set parameters to be sent to OpenAI's APIs. The items that can be set are listed below. 203 | 204 | - `model` 205 | - `max_tokens` 206 | - `temperature` 207 | - `top_p` 208 | - `frequency_penalty` 209 | - `presence_penalty` 210 | 211 | For detailed information on each parameter, please refer to OpenAI's [API Documentation](https://platform.openai.com/docs/). The default value of each parameter depends on the individual "mode" and "app." 212 | 213 | **data/context** 214 | 215 | In `normal` mode, this function only displays the conversation history between User and GPT. In `research` mode, metadata (e.g., topics, language being used, number of turns) values are presented. In addition to the metadata returned in the API response, the approximate number of tokens in the current template is also displayed. 216 | 217 | Program code in the conversation history will be syntax highlighted (if possible). The same applies to output via the `html` command available from the function menu. 218 | 219 | **html** 220 | 221 | All the information retrievable by running the `data/context` function can be presented in HTML. The HTML file is automatically opened in the default web browser. 222 | 223 | The generated HTML will be saved in the user’s home directory (`$HOME`) with the file `monadic_chat.html`. Once the `html` command is executed, the file contents will continue to be updated until you `reset` or quit the running app. Reload the browser tab or rerun the `html` command to show the latest data. HTML data is written to this file regardless of the app. 224 | 225 | **reset** 226 | 227 | You can reset all the conversation history (messages by both User and GPT). Note that API parameter settings will be reset to default as well. 228 | 229 | **save and load** 230 | 231 | The conversation history (messages by both User and GPT, and metadata in `research` mode) can be saved as a JSON file in a specified path. Note that the saved file can only be read by the same application that saved it in the `research` mode. 232 | 233 | **clear/clean** 234 | 235 | Selecting this, you can scroll and clear the screen so that the cursor is at the top. 236 | 237 | **readme/documentation** 238 | 239 | The README page on the GitHub repository (the document you are looking at now) will be opened. 240 | 241 | **exit/bye/quit** 242 | 243 | Selecting this will exit the current app and return to the main menu. 244 | 245 | ## Apps 246 | 247 | ### Chat 248 | 249 | Monadic Chat's `chat` app is the most basic and generic app among others offered by default. 250 | 251 |
252 | 253 | 254 | 255 | 256 | 257 |
258 | 259 | In the `chat` app, OpenAI's large-scale language model acts as a competent assistant that can do anything. It can write computer code, create fiction and poetry texts, and translate texts from one language into another. Of course, it can also engage in casual or academic discussions on specific topics. 260 | 261 | - [basic template for `chat`](https://github.com/yohasebe/monadic-chat/blob/main/apps/chat/chat.json) 262 | - [extra template for `chat` in `research` mode](https://github.com/yohasebe/monadic-chat/blob/main/apps/chat/chat.md) 263 | 264 | 265 | ### Code 266 | 267 | Monadic Chat's `code` is designed to be an app that can write computer code for you. 268 | 269 |
270 | 271 | 272 | 273 | 274 | 275 |
276 | 277 | In the `code` app, OpenAI's GPT behaves as a competent software engineer. The main difference from the `chat` app is that the `temperature` parameter is set to `0.0` so that as less randomness as possible is introduced to the responses. 278 | 279 | - [basic template for `code`](https://github.com/yohasebe/monadic-chat/blob/main/apps/code/code.json) 280 | - [extra template for `code` in `research` mode](https://github.com/yohasebe/monadic-chat/blob/main/apps/code/code.md) 281 | 282 | ### Novel 283 | 284 | Monadic Chat's `novel` is designed to help you develop novel plots; the app instructs OpenAI's GPT model to write text based on a topic, theme, or brief description of an event indicated in the user prompt. Each new response is based on what was generated in previous responses. The interactive nature of the app allows the user to control the plot development rather than having an AI agent create a new novel all at once. 285 | 286 | - [basic template for `novel`](https://github.com/yohasebe/monadic-chat/blob/main/apps/novel/novel.json) 287 | - [extra template for `novel` in `research` mode](https://github.com/yohasebe/monadic-chat/blob/main/apps/novel/novel.md) 288 | 289 | ### Translate 290 | 291 | Monadic Chat's `translate` is an app that helps translate text written in one language into another. Rather than translating the entire text simultaneously, the app allows users to work sentence by sentence or paragraph by paragraph. 292 | 293 | The preferred translation for a given expression can be specified in a pair of parentheses ( ) right after the original expression in the source text. 294 | 295 |
296 | 297 | 298 | 299 |
300 | 301 | Sometimes, however, problematic translations are created. The user can "save" the set of source and target texts and make any necessary corrections. The same unwanted expressions can be prevented or avoided later by providing the corrected translation data to the app. 302 | 303 | - [basic template for `translate` ](https://github.com/yohasebe/monadic-chat/blob/main/apps/translate/translate.json) 304 | - [extra template for `translate` in `research` mode](https://github.com/yohasebe/monadic-chat/blob/main/apps/translate/translate.md) 305 | 306 | ## Modes 307 | 308 | Monadic Chat has two modes. The `normal` mode utilizes OpenAI's chat API to achieve ChatGPT-like functionality. It is suitable for using a large language model as a competent companion for various pragmatic purposes. On the other hand, the `research` mode supports OpenAI's both chat API and text-completion (instruction) API. This mode allows for acquiring **metadata** in the background while receiving the primary response at each conversation turn. It may be especially useful for researchers exploring the possibilities of large-scale language models and their applications. 309 | 310 | ### Normal Mode 311 | 312 | The current default language model for `normal` mode is `gpt-4o-mini`. 313 | 314 | In the default configuration, the dialogue messages are reduced after ten turns by deleting the oldest ones (but not the messages that the `system` role has given as instructions). 315 | 316 | ### Research Mode 317 | 318 | The current default language model for `research` mode is `gpt-4o-2024-08-06`. 319 | 320 | In `research` mode, the conversation between the user and the large-scale language model is accomplished with a mechanism that tracks the conversation history in a monadic structure. In the default configuration, the dialogue messages are reduced after ten turns by deleting the oldest ones (but not the messages that the `system` role has given as instructions). 321 | 322 | If you wish to specify how the conversation history is handled as the interaction with the GPT model unfolds, you can write a `Proc` object containing Ruby code. Since various metadata are available in this mode, finer-grained control is possible. 323 | 324 | See the next section for more details a bout `research` mode 325 | 326 | ## What is Research Mode? 327 | 328 | Monadic Chat's `research` mode has the following advantages: 329 | 330 | - In `research` mode, each turn of the conversation can capture **metadata** as well as the **primary responses** 331 | - You can define the **accumulator** and **reducer** mechanism and control the **flow** of the conversation 332 | - It has structural features that mimic the **monadic** nature of natural language discourse 333 | 334 | There are some drawbacks, however: 335 | 336 | - Templates for `research` mode are larger and more complex, requiring more effort to create and fine-tune. 337 | - `Research` mode requires more extensive input/output data and consumes more tokens than `normal` mode. 338 | - In `research` mode, responses are returned in JSON with metadata. This may make it somewhat unsuitable casual conversation. 339 | - In `research` mode, the response is returned in JSON with metadata. This may seem a bit visually complex. 340 | 341 | For these reasons, `normal` mode is recommended for casual use as an alternative CLI to ChatGPT. Nevertheless, as described below, the research mode makes Monadic Chat definitively different from other GPT client applications. 342 | 343 | ### How Research Mode Works 344 | 345 | The following is a schematic of the process flow in the `research` mode. 346 | 347 |
348 | 349 | 350 | 351 |
352 | 353 | ### Accumulator 354 | 355 | `Normal` mode uses OpenAI's chat API, where the following basic structure is used for conversation history management. 356 | 357 | ```json 358 | {"messages": [ 359 | {"role": "system", "content": "You are a friendly but professional consultant who answers various questions ... "}, 360 | {"role": "user", "content": "Can I ask something?"}, 361 | {"role": "assistant", "content": "Sure!"} 362 | ]} 363 | ``` 364 | 365 | The accumulator in `research` mode also looks like this. 366 | 367 | The conversation history is kept entirely in memory until the reducer mechanism modifies it (or the running app is terminated or reset by the user). 368 | 369 | ### Reducer 370 | 371 | The reducer mechanism must be implemented in Ruby code for each application. In many cases, it is sufficient to keep the size of the accumulator within a specific range by deleting old messages when a certain number of conversation turns are reached. Other possible implementations include the following. 372 | 373 | **Example 1** 374 | 375 | - Retrieve the current conversation topic as metadata at each turn and delete old exchanges if the conversation topic has changed. 376 | - The metadata about the conversation topic can be retained in list form even if old messages are deleted. 377 | 378 | **Example 2** 379 | 380 | - After a certain number of turns, the reducer writes the history of the conversation up to that point to an external file and deletes it from the accumulator. 381 | - A summary of the deleted content is returned to the accumulator as an annotation message by the `system`, and the conversation continues with that summary information as context. 382 | 383 | A sample Ruby implementation of the "reducer" mechanism for each default app can be found below: 384 | 385 | - [`apps/chat/chat.rb`](https://github.com/yohasebe/monadic-chat/blob/main/apps/chat/chat.rb) 386 | - [`apps/code/code.rb`](https://github.com/yohasebe/monadic-chat/blob/main/apps/code/code.rb) 387 | - [`apps/novel/novel.rb`](https://github.com/yohasebe/monadic-chat/blob/main/apps/novel/novel.rb) 388 | - [`apps/translate/translate.rb`](https://github.com/yohasebe/monadic-chat/blob/main/apps/translation/translation.rb) 389 | 390 | ## Creating New App 391 | 392 | This section describes how users can create their own original Monadic Chat apps. 393 | 394 | As an example, let us create an app named `linguistic`. It will do the following on the user input all at once: 395 | 396 | - Return the result of syntactic parsing of the input as a primary response. 397 | - Classify syntactic types of the input ("declarative," "interrogative," "imperative," "exclamatory," etc.) 398 | - Perform sentiment analysis of the input ("happy," "sad," "troubled," "sad," etc.) 399 | - Write text summarizing all the user input up to that point. 400 | 401 | The specifications for Monadic Chat's command-line user interface for this app are as follows. 402 | 403 | - The syntax structure corresponding to the user input is returned as a response. 404 | - Parsed data will be formatted in Penn Treebank format. However, square brackets [ ] are used instead of parentheses ( ). 405 | - The parsed data is returned as Markdown inline code enclosed in backticks \` \`. 406 | 407 | > **Note** 408 | > The use of square brackets (instead of parentheses) in the notation of syntactic analysis here is to conform to the format of [RSyntaxTree](https://yohasebe.com/rsyntaxtree), a tree-drawing program for linguistic research developed by the author of Monadic Chat. 409 | > 410 | > 411 | 412 | Below is a sample HTML displaying the conversation (paris of an input sentence and its syntactic structure) and metadata. 413 | 414 |
415 | 416 | 417 | 418 |
419 | 420 | ### File Structure 421 | 422 | New Monadic Chat apps must be placed inside the `user_apps` folder. Experimental apps `wikipedia` and `linguistic` are also in this folder. `boilerplates` folder and its contents do not constitute an app; these files are copied when a new app is created. 423 | 424 | ```text 425 | user_apps 426 | ├── boilerplates 427 | │ ├── boilerplate.json 428 | │ ├── boilerplate.md 429 | │ └── boilerplate.rb 430 | ├── wikipedia 431 | │ ├── wikipedia.json 432 | │ ├── wikipedia.md 433 | │ └── wikipedia.rb 434 | └─── linguistic 435 | ├── linguistic.json 436 | ├── linguistic.md 437 | └── linguistic.rb 438 | ``` 439 | 440 | Notice in the figure above that three files with the same name but different extensions (`.rb`, `.json`, and `.md`) are stored under each of the four default app folders. 441 | 442 | The following command will create a new folder and the three files within it using this naming convention. 443 | 444 | ``` 445 | monadic-chat new app_name 446 | ``` 447 | 448 | If you feel like removing an app that you have created before, run: 449 | 450 | ``` 451 | monadic-chat del app_name 452 | ``` 453 | 454 | Let's assume we are creating a new application `linguistic`. In fact, an app with the same name already exists, so this is just for illustrative purposes. Anyway, running `monadic-chat new linguistic` generates the following three files inside `linguistic` folder. 455 | 456 | - `linguistic.rb`: Ruby code to define the "reducer" 457 | - `linguistic.json`: JSON template describing GPT's basic behavior in `normal` and `research` modes 458 | - `linguistic.md`: Markdown template describing the specifications of the "metadata" to be captured in research mode. 459 | 460 | ### Reducer Code 461 | 462 | We do not need to make the reducer do anything special for the current purposes. So, let's copy the code from the default `chat` app and make a minor modification, such as changing the class name and the app name so that it matches the app name. We save it as `apps/linguistic/linguistic.rb`. 463 | 464 | ### Basic Template 465 | 466 | In `normal` mode, achieving all the necessary functions shown earlier is impossible or very tough, to say the least. All we do here is display the results of syntactic analysis and define a user interface. Create a JSON file `apps/linguistic/linguistic.rb` and save it with the following contents: 467 | 468 | ```json 469 | {"messages": [ 470 | {"role": "system", 471 | "content": "You are a syntactic parser for natural languages. Analyze the given input sentence from the user and execute a syntactic parsing. Give your response in a variation of the penn treebank format, but use brackets [ ] instead of parentheses ( ). Also, give your response in a markdown code span."}, 472 | {"role": "user", "content": "\"We saw a beautiful sunset.\""}, 473 | {"role": "assistant", 474 | "content": "`[S [NP He] [VP [V saw] [NP [det a] [N' [Adj beautiful] [N sunset] ] ] ] ]`"}, 475 | {"role": "user", "content": "\"We didn't take a picture.\"" }, 476 | {"role": "assistant", 477 | "content": "`[S [NP We] [IP [I didn't] [VP [V take] [NP [Det a] [N picture] ] ] ] ] ]`"} 478 | ]} 479 | ``` 480 | 481 | The data structure here is no different from that specified in [OpenAI Chat API](https://platform.openai.com/docs/guides/chat). The `normal` mode of Monadic Chat is just a client application that uses this API to achieve ChatGPT-like functionality on the command line. 482 | 483 | ### Extra Template for `Research` Mode 484 | 485 | In the `research` mode, you can obtain metadata at each turn as you progress through an interactive conversation with GPT. Compressing and modifying the conversation history based on the metadata (or any other data) is also possible. However, you must create an extra template besides the `normal` mode JSON template. 486 | 487 | This extra template for `research` mode is a Markdown file comprising six sections. The role and content of each section are shown in the following figure. 488 | 489 |
490 | 491 | 492 | 493 |
494 | 495 | Below we will look at this extra template for `research` mode of the `linguistic` app, section by section. 496 | 497 | **Main Section** 498 | 499 |
{{SYSTEM}}
500 | 
501 | Create a response to "NEW PROMPT" from the user and set your response to the "response" property of the JSON object shown below. The preceding conversation is stored in "MESSAGES". In "MESSAGES", "assistant" refers to you.
502 | 503 | Monadic Chat automatically replaces `{{SYSTEM}}} with the message from the `system` role when the template is sent via API. However, the above text also includes a few additional paragpraphs, including the one instructing the response from GPT to be presented as a JSON object. 504 | 505 | **New Prompt** 506 | 507 | ```markdown 508 | {{PROMPT}} 509 | ``` 510 | 511 | Monadic Chat replaces `{{PROMPT}}` with input from the user when sending the template through the API. 512 | 513 | **Messages** 514 | 515 | ```markdown 516 | {{MESSAGES}} 517 | ``` 518 | 519 | Monadic Chat replaces `{{MESSAGES}}` with messages from past conversations when sending the template. Note that not all the past messages always have to be copied here: the reducer mechanism could select, modify, or even "generate" messages and include them instead. 520 | 521 | **JSON Object** 522 | 523 | ```json 524 | { 525 | "mode": "linguistic", 526 | "response": "`[S [NP We] [VP [V didn't] [VP [V have] [NP [Det a] [N camera] ] ] ] ] ]`\n\n###\n\n", 527 | "sentence_type": ["declarative"], 528 | "sentiment": ["sad"], 529 | "summary": "The user saw a beautiful sunset, but did not take a picture because the user did not have a camera.", 530 | } 531 | ``` 532 | 533 | This is the core of the extra template for `research` mode. 534 | 535 | Note that the extra template is written in Markdown format, so the above JSON object is actually separated from the rest of the template as a [fenced code block](https://www.markdownguide.org/extended-syntax/#fenced-code-blocks). 536 | 537 | The required properties of this JSON object are `mode` and `response`. Other properties are optional. The `mode` property is used to check the app name when saving the conversation data or loading from an external file. 538 | 539 | The JSON object in the `research` mode template is saved in the user’s home directory (`$HOME`) with the file `monadic_chat.json`. The content is overwritten every time the JSON object is updated. Note that this JSON file is created for logging purposes . Modifying its content does not affect the processes carried out by the app. 540 | 541 | **Content Requirements** 542 | 543 | ```markdown 544 | Make sure the following content requirements are all fulfilled: 545 | 546 | - keep the value of the "mode" property at "linguistic" 547 | - set the new prompt to the "prompt" property 548 | - create your response to the new prompt based on "MESSAGES" and set it to "response" 549 | - analyze the new prompt's sentence type and set a sentence type value such as "interrogative", "imperative", "exclamatory", or "declarative" to the "sentence_type" property 550 | - analyze the new prompt's sentiment and set one or more sentiment types such as "happy", "excited", "troubled", "upset", or "sad" to the "sentiment" property 551 | - summarize the user's messages so far and update the "summary" property with a text of fewer than 100 words using as many discourse markers such as "because", "therefore", "but", and "so" to show the logical connection between the events. 552 | - increment the value of "turns" by 1 553 | ``` 554 | 555 | Note that all the properties of the JSON object are mentioned here so that GPT can update them accordingly. 556 | 557 | **Formal Requirements** 558 | 559 | ```markdown 560 | Make sure the following formal requirements are all fulfilled: 561 | 562 | - do not use invalid characters in the JSON object 563 | - escape double quotes and other special characters in the text values in the resulting JSON object 564 | - check the validity of the generated JSON object and correct any possible parsing problems before returning it 565 | 566 | Wrap the JSON object with "\n" and "\n". 567 | ``` 568 | 569 | This section details the format of the response returned through the API. JSON is essentially text data, and some characters must be escaped appropriately. 570 | 571 | To ensure that a valid JSON object is retrieved, Monadic Chat requires `...` tags to enclose the whole JSON data. Due to its importance, this formal requirement is described as an independent sentence rather than in list form. 572 | 573 | ## What is Monadic about Monadic Chat? 574 | 575 | A monad is a type of data structure in functional programming (leaving aside for the moment the notion of the monad in mathematical category theory). An element with a monadic structure can be manipulated in a certain way to change its internal data. However, no matter how much the internal data changes, the external structure of the monadic process remains the same and can be manipulated in the same way as it was at first. 576 | 577 | Many such monadic processes surround us, and natural language discourse is one of them. A "chat" between a human user and an AI agent can be thought of as a form of natural language discourse. If so, an application that provides an interactive interface to a large-scale language model would most naturally be designed with the monadic nature of natural language discourse in mind. 578 | 579 | ### Unit, Map, and Join 580 | 581 | Many “functional” programming languages, such as Haskell, have monads as a core feature. However, Monadic Chat is developed using the Ruby programming language, which does not. This is because, with Ruby, it will be easier for users to write their apps. Ruby is not classified as a "functional language" per se. Still, Monadic Chat has the following three features required of a monad, and in this sense, this program can be considered "monadic." 582 | 583 | - **unit**: a monadic process has a means of taking data and enclosing it in a monadic structure 584 | - **map**: a monadic process has a means of performing some operation on the data inside a monadic structure and returning the result in a monadic structure 585 | - **join**: a monadic process has a means of flattening a structure with multiple monadic layers into a single monadic layer 586 | 587 | ### Discourse Management Object 588 | 589 | In Monadic Chat's `research` mode, the discourse management object described in JSON serves as an environment to keep a conversation going between the user and the large language model. Any sample/past interaction data can be wrapped inside such an environment (***unit***). 590 | 591 | The interaction between the user and the AI can be interpreted as an operation on the *discourse world* built in the previous conversational exchanges. Monadic Chat updates the discourse world by retrieving the conversation history embedded in the template and performing operations responding to user input (***map***). 592 | 593 | In Monadic Chat, responses from OpenAI's language model APIs (chat API and text completion API) are also returned in the same JSON format. The main response content of the conversation is wrapped within this environment. If the entire JSON object were treated as a conversational response to user input, the discourse management object would become a large nested structure with many layers. Therefore, Monadic Chat extracts only the necessary values from the response object and reconstructs the (single-layer) discourse management object using them (***join***). 594 |
595 | 596 | 597 | 598 |
599 | 600 | Thus, the architecture of the `research` mode of Monad Chat, with its ability to generate and manage metadata properties inside the monadic structure, is parallel to the architecture of natural language discourse in general: both can be seen as a kind of "state monad" (Hasebe 2021). 601 | ## Future Plans 602 | 603 | - Refactoring the current implementation code into `unit`, `map`, and `flatten` 604 | - More test cases to verify command line user interaction behavior 605 | - Improved error handling mechanism to catch incorrect responses from GPT 606 | - Develop a DSL to define templates in a more efficient and systematic manner 607 | - Develop scaffolding capabilities to build new apps quickly 608 | 609 | ## Bibliographical Data 610 | 611 | When referring to monadic chat and its underlying concepts, please refer to one of the following entries 612 | 613 | ``` 614 | @inproceedings{hasebe_2023j, 615 | author = {長谷部陽一郎}, 616 | title = {Monadic Chat:テキスト補完APIで文脈を保持するためのフレームワーク}, 617 | booktitle = {言語処理学会第29回年次大会発表論文集}, 618 | url = {https://www.anlp.jp/proceedings/annual_meeting/2023/pdf_dir/Q12-9.pdf}, 619 | year = {2023}, 620 | pages = {3138--3143} 621 | } 622 | 623 | @inproceedings{hasebe_2023e, 624 | author = {Yoichiro Hasebe}, 625 | title = {Monadic Chat: Framework for managing context with text completion API}, 626 | booktitle = {Proceedings of the 29th annual meeting of the Association for Natural Language Processing}, 627 | url = {https://www.anlp.jp/proceedings/annual_meeting/2023/pdf_dir/Q12-9.pdf}, 628 | year = {2023}, 629 | pages = {3138--3143} 630 | } 631 | 632 | @phdthesis{hasebe_2021, 633 | author = {Yoichiro Hasebe}, 634 | title = {An Integrated Approach to Discourse Connectives as Grammatical Constructions}, 635 | school = {Kyoto University}, 636 | url = {https://repository.kulib.kyoto-u.ac.jp/dspace/bitstream/2433/261627/2/dnink00969.pdf}, 637 | year = {2021} 638 | } 639 | 640 | ``` 641 | 642 | ## Acknowledgments 643 | 644 | This work was partially supported by JSPS KAKENHI Grant Number JP18K00670. 645 | 646 | ## Contributing 647 | 648 | Bug reports and pull requests are welcome on GitHub at [https://github.com/yohasebe/monadic_chat]([https://github.com/yohasebe/monadic_chat]). 649 | 650 | ## Author 651 | 652 | Yoichiro HASEBE 653 | 654 | [yohasebe@gmail.com](yohasebe@gmail.com) 655 | 656 | ## License 657 | 658 | The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT). 659 | 660 | --------------------------------------------------------------------------------