├── Python └── src │ ├── GeAI.egg-info │ ├── top_level.txt │ ├── dependency_links.txt │ ├── SOURCES.txt │ └── PKG-INFO │ ├── GeAI │ ├── _utils │ │ ├── __init__.py │ │ ├── _image_to_data_uri.py │ │ └── _moderation_openai.py │ ├── __init__.py │ ├── txt │ │ ├── __init__.py │ │ ├── txt_default.py │ │ ├── explain_code.py │ │ ├── fix_grammar.py │ │ ├── optimize_code.py │ │ └── image.py │ ├── available_models.py │ ├── connect.py │ └── txt.py │ ├── setup.cfg │ ├── PKG-INFO │ └── setup.py ├── R └── src │ ├── R │ ├── genai.google.chat.history.export.R │ ├── genai.google.chat.history.reset.R │ ├── genai.openai.chat.history.export.R │ ├── genai.moonshot.chat.history.export.R │ ├── imports.R │ ├── genai.openai.chat.history.reset.R │ ├── genai.google.chat.history.save.R │ ├── genai.openai.chat.history.save.R │ ├── genai.moonshot.chat.history.save.R │ ├── genai.utils.R │ ├── genai.moonshot.chat.history.reset.R │ ├── genai.openai.chat.history.import.R │ ├── genai.moonshot.chat.history.import.R │ ├── genai.google.chat.history.import.R │ ├── genai.moonshot.chat.history.convert.R │ ├── genai.openai.chat.history.convert.R │ ├── genai.openai.chat.history.print.R │ ├── genai.google.chat.history.print.R │ ├── genai.moonshot.chat.history.print.R │ ├── available.models.R │ ├── genai.google.chat.history.convert.R │ ├── chat.history.reset.R │ ├── chat.history.export.R │ ├── chat.history.import.R │ ├── chat.history.save.R │ ├── chat.history.print.R │ ├── chat.history.convert.R │ ├── genai.moonshot.txt.R │ ├── genai.openai.img.R │ ├── genai.google.class.R │ ├── genai.google.txt.R │ ├── genai.moonshot.class.R │ ├── genai.openai.txt.R │ ├── genai.moonshot.R │ ├── genai.google.R │ ├── genai.moonshot.chat.R │ ├── genai.openai.class.R │ ├── genai.openai.R │ ├── img.R │ ├── genai.google.txt.image.R │ ├── genai.moonshot.utils.R │ ├── genai.google.chat.R │ ├── genai.openai.chat.R │ ├── genai.moonshot.chat.edit.R │ ├── genai.openai.txt.image.R │ ├── genai.google.chat.edit.R │ ├── genai.openai.chat.edit.R │ ├── genai.google.utils.R │ ├── genai.openai.utils.R │ └── txt.image.R │ ├── NAMESPACE │ ├── DESCRIPTION │ └── man │ ├── available.models.Rd │ ├── chat.history.reset.Rd │ ├── chat.history.export.Rd │ ├── chat.history.import.Rd │ ├── chat.history.save.Rd │ ├── chat.history.print.Rd │ ├── chat.history.convert.Rd │ ├── genai.moonshot.Rd │ ├── genai.google.Rd │ ├── genai.openai.Rd │ ├── img.Rd │ ├── txt.image.Rd │ ├── txt.Rd │ └── chat.Rd └── README.md /Python/src/GeAI.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | GeAI 2 | -------------------------------------------------------------------------------- /Python/src/GeAI.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Python/src/GeAI/_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # GeAI/utils/__init__.py -------------------------------------------------------------------------------- /Python/src/setup.cfg: -------------------------------------------------------------------------------- 1 | [egg_info] 2 | tag_build = 3 | tag_date = 0 4 | 5 | -------------------------------------------------------------------------------- /Python/src/GeAI/__init__.py: -------------------------------------------------------------------------------- 1 | from .available_models import available_models 2 | from .connect import connect 3 | from .txt import txt -------------------------------------------------------------------------------- /R/src/R/genai.google.chat.history.export.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.chat.history.export = function(genai.google.object) { 3 | return (genai.google.object$chat.history$contents) 4 | } 5 | -------------------------------------------------------------------------------- /R/src/R/genai.google.chat.history.reset.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.chat.history.reset = function(genai.google.object) { 3 | genai.google.object$chat.history$contents = list() 4 | } 5 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.chat.history.export.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.chat.history.export = function(genai.openai.object) { 3 | return (genai.openai.object$chat.history$messages) 4 | } 5 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.chat.history.export.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.chat.history.export = function(genai.moonshot.object) { 3 | return (genai.moonshot.object$chat.history$messages) 4 | } 5 | -------------------------------------------------------------------------------- /R/src/R/imports.R: -------------------------------------------------------------------------------- 1 | #' @import R6 2 | #' @importFrom jsonlite toJSON 3 | #' @importFrom httr GET POST add_headers content 4 | #' @importFrom listenv listenv 5 | #' @importFrom magrittr %>% 6 | #' @importFrom magick image_read 7 | #' @importFrom ggplotify as.ggplot 8 | NULL 9 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.chat.history.reset.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.chat.history.reset = function(genai.openai.object) { 3 | genai.openai.object$chat.history$messages = list( 4 | list( 5 | role = "system", 6 | content = "You are a helpful assistant." 7 | ) 8 | ) 9 | } 10 | -------------------------------------------------------------------------------- /R/src/R/genai.google.chat.history.save.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.chat.history.save = function(genai.google.object, 3 | file.name) { 4 | write(jsonlite::toJSON(genai.google.object$chat.history$contents, pretty = TRUE), 5 | paste0(file.name, ".json")) 6 | } 7 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.chat.history.save.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.chat.history.save = function(genai.openai.object, 3 | file.name) { 4 | write(jsonlite::toJSON(genai.openai.object$chat.history$messages, pretty = TRUE), 5 | paste0(file.name, ".json")) 6 | } 7 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.chat.history.save.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.chat.history.save = function(genai.moonshot.object, 3 | file.name) { 4 | write(jsonlite::toJSON(genai.moonshot.object$chat.history$messages, pretty = TRUE), 5 | paste0(file.name, ".json")) 6 | } 7 | -------------------------------------------------------------------------------- /Python/src/GeAI/txt/__init__.py: -------------------------------------------------------------------------------- 1 | from .explain_code import explain_code 2 | from .fix_grammar import fix_grammar 3 | from .image import image 4 | from .optimize_code import optimize_code 5 | from .txt_default import txt_default 6 | 7 | txt = txt_default 8 | txt.explain_code = explain_code 9 | txt.fix_grammar = fix_grammar 10 | txt.image = image 11 | txt.optimize_code = optimize_code -------------------------------------------------------------------------------- /Python/src/GeAI/available_models.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | def available_models(): 5 | response = requests.get("https://genai.gd.edu.kg/model.json") 6 | if response.status_code == 200: 7 | json_data = response.json() 8 | return json_data 9 | else: 10 | print(f"Failed to fetch data. Status code: {response.status_code}") 11 | return None -------------------------------------------------------------------------------- /R/src/R/genai.utils.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | image.to.data.uri = function(image.path) { 3 | image.data = "" 4 | if (grepl("^https?://", tolower(image.path))) { 5 | response = httr::GET(image.path) 6 | image.data = base64enc::base64encode(httr::content(response, type = "raw")) 7 | } else { 8 | image.data = base64enc::base64encode(readBin(image.path, "raw", file.info(image.path)$size)) 9 | } 10 | return(c(tools::file_ext(image.path), image.data)) 11 | } 12 | -------------------------------------------------------------------------------- /Python/src/GeAI.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | setup.py 2 | GeAI/__init__.py 3 | GeAI/available_models.py 4 | GeAI/connect.py 5 | GeAI/txt.py 6 | GeAI.egg-info/PKG-INFO 7 | GeAI.egg-info/SOURCES.txt 8 | GeAI.egg-info/dependency_links.txt 9 | GeAI.egg-info/top_level.txt 10 | GeAI/_utils/__init__.py 11 | GeAI/_utils/_image_to_data_uri.py 12 | GeAI/_utils/_moderation_openai.py 13 | GeAI/txt/__init__.py 14 | GeAI/txt/explain_code.py 15 | GeAI/txt/fix_grammar.py 16 | GeAI/txt/image.py 17 | GeAI/txt/optimize_code.py 18 | GeAI/txt/txt_default.py -------------------------------------------------------------------------------- /Python/src/GeAI/_utils/_image_to_data_uri.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import base64 3 | 4 | def image_to_data_uri(image_path): 5 | image_data = "" 6 | 7 | if image_path.lower().startswith(("http://", "https://")): 8 | response = requests.get(image_path) 9 | response.raise_for_status() 10 | image_data = base64.b64encode(response.content).decode("utf-8") 11 | else: 12 | with open(image_path, "rb") as image_file: 13 | image_data = base64.b64encode(image_file.read()).decode("utf-8") 14 | 15 | extension = image_path.split(".")[-1].lower() 16 | return extension, image_data -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.chat.history.reset.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.chat.history.reset = function(genai.moonshot.object) { 3 | genai.moonshot.object$chat.history$messages = list( 4 | list( 5 | role = "system", 6 | content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages." 7 | ) 8 | ) 9 | } 10 | -------------------------------------------------------------------------------- /R/src/NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | export(available.models) 4 | export(chat) 5 | export(chat.edit) 6 | export(chat.history.convert) 7 | export(chat.history.export) 8 | export(chat.history.import) 9 | export(chat.history.print) 10 | export(chat.history.reset) 11 | export(chat.history.save) 12 | export(genai.google) 13 | export(genai.moonshot) 14 | export(genai.openai) 15 | export(img) 16 | export(txt) 17 | export(txt.image) 18 | import(R6) 19 | importFrom(ggplotify,as.ggplot) 20 | importFrom(httr,GET) 21 | importFrom(httr,POST) 22 | importFrom(httr,add_headers) 23 | importFrom(httr,content) 24 | importFrom(jsonlite,toJSON) 25 | importFrom(listenv,listenv) 26 | importFrom(magick,image_read) 27 | importFrom(magrittr,"%>%") 28 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.chat.history.import.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.chat.history.import = function(genai.openai.object, 3 | new.chat.history) { 4 | # Imported chat history is a list 5 | if (is.list(new.chat.history)) { 6 | expected.format = list( 7 | role = NA, 8 | content = NA 9 | ) 10 | for (message in new.chat.history) { 11 | if (!identical(names(message), names(expected.format)) || 12 | !is.character(message$role) || 13 | !is.character(message$content)) { 14 | stop("Invalid value for new.chat.history. Please make sure the format of the imported chat history is correct.") 15 | } 16 | } 17 | genai.openai.object$chat.history$messages = new.chat.history 18 | } 19 | else { 20 | stop("Invalid new.chat.history. Please make sure it is a list.") 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.chat.history.import.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.chat.history.import = function(genai.moonshot.object, 3 | new.chat.history) { 4 | # Imported chat history is a list 5 | if (is.list(new.chat.history)) { 6 | expected.format = list( 7 | role = NA, 8 | content = NA 9 | ) 10 | for (message in new.chat.history) { 11 | if (!identical(names(message), names(expected.format)) || 12 | !is.character(message$role) || 13 | !is.character(message$content)) { 14 | stop("Invalid value for new.chat.history. Please make sure the format of the imported chat history is correct.") 15 | } 16 | } 17 | genai.moonshot.object$chat.history$messages = new.chat.history 18 | } 19 | else { 20 | stop("Invalid new.chat.history. Please make sure it is a list.") 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /Python/src/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: GeAI 3 | Version: 0.1.8 4 | Summary: Generative Artificial Intelligence 5 | Home-page: https://genai.gd.edu.kg/ 6 | Author: Li Yuan 7 | Author-email: 8 | Keywords: Generative AI,LLM,API 9 | Classifier: Development Status :: 3 - Alpha 10 | Classifier: Intended Audience :: Education 11 | Classifier: Programming Language :: Python :: 3 12 | Classifier: Operating System :: MacOS :: MacOS X 13 | Classifier: Operating System :: Microsoft :: Windows 14 | 15 | Utilizing Generative Artificial Intelligence models like "GPT-4" and "Gemini Pro" as coding and writing assistants for "Python" users. Through these models, "GenAIPy" offers a variety of functions, encompassing text generation, code optimization, natural language processing, chat, and image interpretation. The goal is to aid "Python" users in streamlining laborious coding and language processing tasks. 16 | -------------------------------------------------------------------------------- /Python/src/GeAI/_utils/_moderation_openai.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | def moderation_openai(model_parameter, prompt): 5 | safety_url = ( 6 | f"https://api.genai.gd.edu.kg/openai/{model_parameter['version']}/moderations" 7 | if model_parameter["proxy"] 8 | else f"https://api.openai.com/{model_parameter['version']}/moderations" 9 | ) 10 | safety_check = {"input": prompt} 11 | safety_request = json.dumps(safety_check, separators=(",", ":"), ensure_ascii=False) 12 | headers = { 13 | "Content-Type": "application/json", 14 | "Authorization": f"Bearer {model_parameter['api']}" 15 | } 16 | safety_response = requests.post(safety_url, data=safety_request, headers=headers) 17 | safety_response_json = safety_response.json() 18 | if safety_response_json["results"][0]["flagged"]: 19 | raise ValueError("The prompt may contain harmful content.") -------------------------------------------------------------------------------- /Python/src/GeAI.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: GeAI 3 | Version: 0.1.8 4 | Summary: Generative Artificial Intelligence 5 | Home-page: https://genai.gd.edu.kg/ 6 | Author: Li Yuan 7 | Author-email: 8 | Keywords: Generative AI,LLM,API 9 | Classifier: Development Status :: 3 - Alpha 10 | Classifier: Intended Audience :: Education 11 | Classifier: Programming Language :: Python :: 3 12 | Classifier: Operating System :: MacOS :: MacOS X 13 | Classifier: Operating System :: Microsoft :: Windows 14 | 15 | Utilizing Generative Artificial Intelligence models like "GPT-4" and "Gemini Pro" as coding and writing assistants for "Python" users. Through these models, "GenAIPy" offers a variety of functions, encompassing text generation, code optimization, natural language processing, chat, and image interpretation. The goal is to aid "Python" users in streamlining laborious coding and language processing tasks. 16 | -------------------------------------------------------------------------------- /R/src/R/genai.google.chat.history.import.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.chat.history.import = function(genai.google.object, 3 | new.chat.history) { 4 | # Imported chat history is a list 5 | if (is.list(new.chat.history)) { 6 | expected.format = list( 7 | role = NA, 8 | parts = list( 9 | text = NA 10 | ) 11 | ) 12 | for (message in new.chat.history) { 13 | if (!identical(names(message), names(expected.format)) || 14 | !is.character(message$role) || 15 | !is.list(message$parts) || 16 | length(message$parts) != 1 || 17 | !is.character(message$parts$text)) { 18 | stop("Invalid value for new.chat.history. Please make sure the format of the imported chat history is correct.") 19 | } 20 | } 21 | genai.google.object$chat.history$contents = new.chat.history 22 | } 23 | else { 24 | stop("Invalid new.chat.history. Please make sure it is a list.") 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.chat.history.convert.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.chat.history.convert = function(from.genai.moonshot.object, 3 | to.genai.object) { 4 | if (class(to.genai.object)[1] == "genai.google") { 5 | moonshot.history = from.genai.moonshot.object$chat.history$messages[2:length(from.genai.moonshot.object$chat.history$messages)] 6 | contents = lapply(moonshot.history, function(entry) { 7 | list( 8 | role = ifelse(entry$role == "assistant", "model", "user"), 9 | parts = list(text = entry$content) 10 | ) 11 | }) 12 | google.history = contents 13 | return(google.history) 14 | } 15 | else if (class(to.genai.object)[1] == "genai.openai") { 16 | openai.history = from.genai.moonshot.object$chat.history$messages 17 | openai.history[[1]]$content = "You are a helpful assistant." 18 | return(openai.history) 19 | } 20 | else { 21 | stop("Invalid value for to.genai.object.") 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /R/src/DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: GenAI 2 | Type: Package 3 | Title: Generative Artificial Intelligence 4 | Version: 0.2.0 5 | Authors@R: c( 6 | person( 7 | given = "Li", 8 | family = "Yuan", 9 | role = c("aut", "cre"), 10 | email = "lyuan@gd.edu.kg", 11 | comment = c(ORCID = "0009-0008-1075-9922") 12 | ) 13 | ) 14 | Maintainer: Li Yuan 15 | Description: Utilizing Generative Artificial Intelligence models like 'GPT-4' and 'Gemini Pro' as coding and writing assistants for 'R' users. Through these models, 'GenAI' offers a variety of functions, encompassing text generation, code optimization, natural language processing, chat, and image interpretation. The goal is to aid 'R' users in streamlining laborious coding and language processing tasks. 16 | License: CC BY 4.0 17 | URL: https://genai.gd.edu.kg/ 18 | BugReports: https://github.com/GitData-GA/GenAI/issues 19 | Encoding: UTF-8 20 | RoxygenNote: 7.3.0 21 | Depends: magrittr 22 | Imports: base64enc, httr, jsonlite, tools, R6, listenv, magick, 23 | ggplotify 24 | NeedsCompilation: no 25 | Packaged: 2024-02-15 03:28:07 UTC; lp130 26 | Author: Li Yuan [aut, cre] () 27 | -------------------------------------------------------------------------------- /Python/src/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | VERSION = '0.1.8' 4 | DESCRIPTION = 'Generative Artificial Intelligence' 5 | LONG_DESCRIPTION = 'Utilizing Generative Artificial Intelligence models like "GPT-4" and "Gemini Pro" as coding and writing assistants for "Python" users. Through these models, "GenAIPy" offers a variety of functions, encompassing text generation, code optimization, natural language processing, chat, and image interpretation. The goal is to aid "Python" users in streamlining laborious coding and language processing tasks.' 6 | 7 | setup( 8 | name="GeAI", 9 | version=VERSION, 10 | author="Li Yuan", 11 | author_email="", 12 | url='https://genai.gd.edu.kg/', 13 | description=DESCRIPTION, 14 | long_description=LONG_DESCRIPTION, 15 | packages=find_packages(include=['GeAI', 'GeAI.*']), 16 | install_requires=[], 17 | keywords=['Generative AI', 'LLM', 'API'], 18 | classifiers= [ 19 | "Development Status :: 3 - Alpha", 20 | "Intended Audience :: Education", 21 | "Programming Language :: Python :: 3", 22 | "Operating System :: MacOS :: MacOS X", 23 | "Operating System :: Microsoft :: Windows", 24 | ] 25 | ) -------------------------------------------------------------------------------- /R/src/R/genai.openai.chat.history.convert.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.chat.history.convert = function(from.genai.openai.object, 3 | to.genai.object) { 4 | if (class(to.genai.object)[1] == "genai.google") { 5 | openai.history = from.genai.openai.object$chat.history$messages[2:length(from.genai.openai.object$chat.history$messages)] 6 | contents = lapply(openai.history, function(entry) { 7 | list( 8 | role = ifelse(entry$role == "assistant", "model", "user"), 9 | parts = list(text = entry$content) 10 | ) 11 | }) 12 | google.history = contents 13 | return(google.history) 14 | } 15 | else if (class(to.genai.object)[1] == "genai.moonshot") { 16 | moonshot.history = from.genai.openai.object$chat.history$messages 17 | moonshot.history[[1]]$content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages." 18 | return(moonshot.history) 19 | } 20 | else { 21 | stop("Invalid value for to.genai.object.") 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.chat.history.print.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.chat.history.print = function(genai.openai.object, 3 | from, 4 | to) { 5 | if (!is.numeric(from) || from < 1) { 6 | stop("Invalid value for from. It should be an integer greater than or equal to 1.") 7 | } 8 | 9 | if (is.numeric(to) && to < from) { 10 | stop("Invalid value for to. It should be an integer greater than or equal to from") 11 | } 12 | 13 | chat.length = length(genai.openai.object$chat.history$messages) 14 | 15 | if (is.numeric(to) && to > chat.length) { 16 | stop("Invalid value for to. It should be an integer less than or equal to ", chat.length, ".") 17 | } 18 | 19 | if (is.numeric(to)) { 20 | chat.length = to 21 | } 22 | 23 | if (chat.length > 0) { 24 | for (i in from:chat.length) { 25 | cat( 26 | sprintf( 27 | "-------------------------------- Message %2d ---------------------------------\n", 28 | i 29 | ) 30 | ) 31 | cat("Role:", 32 | genai.openai.object$chat.history$messages[[i]]$role, 33 | "\n") 34 | cat("Text: ") 35 | cat(paste(strwrap(genai.openai.object$chat.history$messages[[i]]$content, 36 | width = 76, exdent = 0), collapse = "\n")) 37 | cat("\n\n") 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /R/src/R/genai.google.chat.history.print.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.chat.history.print = function(genai.google.object, 3 | from, 4 | to) { 5 | if (!is.numeric(from) || from < 1) { 6 | stop("Invalid value for from. It should be an integer greater than or equal to 1.") 7 | } 8 | 9 | if (is.numeric(to) && to < from) { 10 | stop("Invalid value for to. It should be an integer greater than or equal to from") 11 | } 12 | 13 | chat.length = length(genai.google.object$chat.history$contents) 14 | 15 | if (is.numeric(to) && to > chat.length) { 16 | stop("Invalid value for to. It should be an integer less than or equal to ", chat.length, ".") 17 | } 18 | 19 | if (is.numeric(to)) { 20 | chat.length = to 21 | } 22 | 23 | if (chat.length > 0) { 24 | for (i in from:chat.length) { 25 | cat( 26 | sprintf( 27 | "-------------------------------- Message %2d ---------------------------------\n", 28 | i 29 | ) 30 | ) 31 | cat("Role:", 32 | genai.google.object$chat.history$contents[[i]]$role, 33 | "\n") 34 | cat("Text: ") 35 | cat(paste(strwrap(genai.google.object$chat.history$contents[[i]]$parts$text, 36 | width = 76, exdent = 0), collapse = "\n")) 37 | cat("\n\n") 38 | } 39 | } 40 | } 41 | 42 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.chat.history.print.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.chat.history.print = function(genai.moonshot.object, 3 | from, 4 | to) { 5 | if (!is.numeric(from) || from < 1) { 6 | stop("Invalid value for from. It should be an integer greater than or equal to 1.") 7 | } 8 | 9 | if (is.numeric(to) && to < from) { 10 | stop("Invalid value for to. It should be an integer greater than or equal to from") 11 | } 12 | 13 | chat.length = length(genai.moonshot.object$chat.history$messages) 14 | 15 | if (is.numeric(to) && to > chat.length) { 16 | stop("Invalid value for to. It should be an integer less than or equal to ", chat.length, ".") 17 | } 18 | 19 | if (is.numeric(to)) { 20 | chat.length = to 21 | } 22 | 23 | if (chat.length > 0) { 24 | for (i in from:chat.length) { 25 | cat( 26 | sprintf( 27 | "-------------------------------- Message %2d ---------------------------------\n", 28 | i 29 | ) 30 | ) 31 | cat("Role:", 32 | genai.moonshot.object$chat.history$messages[[i]]$role, 33 | "\n") 34 | cat("Text: ") 35 | cat(paste(strwrap(genai.moonshot.object$chat.history$messages[[i]]$content, 36 | width = 76, exdent = 0), collapse = "\n")) 37 | cat("\n\n") 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /R/src/man/available.models.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/available.models.R 3 | \name{available.models} 4 | \alias{available.models} 5 | \title{Get Supported Generative AI Models} 6 | \usage{ 7 | available.models() 8 | } 9 | \value{ 10 | If successful, the function returns a list containing generative AI 11 | service providers and their corresponding models. If the function encounters an error, 12 | it will halt execution and provide an error message. 13 | } 14 | \description{ 15 | This function sends a request to GenAI database API to retrieve information 16 | about available generative AI models. 17 | } 18 | \details{ 19 | The function utilizes the GenAI database API to fetch the latest information about 20 | available Generative AI models. The retrieved data includes details about different models 21 | offered by various service providers. 22 | } 23 | \examples{ 24 | \dontrun{ 25 | # Assuming there is a GenAI object named 'genai.model' supporting this 26 | # function, please refer to the "Live Demo in Colab" above for real 27 | # examples. The following examples are just some basic guidelines. 28 | 29 | all.models = available.models() \%>\% print() 30 | } 31 | 32 | } 33 | \seealso{ 34 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 35 | 36 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/available_models.ipynb}{Live Demo in Colab} 37 | } 38 | -------------------------------------------------------------------------------- /R/src/R/available.models.R: -------------------------------------------------------------------------------- 1 | #' Get Supported Generative AI Models 2 | #' 3 | #' This function sends a request to GenAI database API to retrieve information 4 | #' about available generative AI models. 5 | #' 6 | #' @return If successful, the function returns a list containing generative AI 7 | #' service providers and their corresponding models. If the function encounters an error, 8 | #' it will halt execution and provide an error message. 9 | #' 10 | #' @details 11 | #' The function utilizes the GenAI database API to fetch the latest information about 12 | #' available Generative AI models. The retrieved data includes details about different models 13 | #' offered by various service providers. 14 | #' 15 | #' @seealso 16 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 17 | #' 18 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/available_models.ipynb}{Live Demo in Colab} 19 | #' 20 | #' @examples 21 | #' \dontrun{ 22 | #' # Assuming there is a GenAI object named 'genai.model' supporting this 23 | #' # function, please refer to the "Live Demo in Colab" above for real 24 | #' # examples. The following examples are just some basic guidelines. 25 | #' 26 | #' all.models = available.models() %>% print() 27 | #' } 28 | #' 29 | #' @export 30 | available.models = function() { 31 | json.data = jsonlite::fromJSON("https://genai.gd.edu.kg/model.json") 32 | return (json.data) 33 | } 34 | -------------------------------------------------------------------------------- /R/src/R/genai.google.chat.history.convert.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.chat.history.convert = function(from.genai.google.object, 3 | to.genai.object) { 4 | if (class(to.genai.object)[1] == "genai.openai") { 5 | system.message = list(role = "system", content = "You are a helpful assistant.") 6 | messages = lapply(from.genai.google.object$chat.history$contents, function(entry) { 7 | list( 8 | role = ifelse(entry$role == "model", "assistant", "user"), 9 | content = entry$parts$text 10 | ) 11 | }) 12 | openai.history = c(list(system.message), messages) 13 | return(openai.history) 14 | } 15 | else if (class(to.genai.object)[1] == "genai.moonshot") { 16 | system.message = list(role = "system", content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages.") 17 | messages = lapply(from.genai.google.object$chat.history$contents, function(entry) { 18 | list( 19 | role = ifelse(entry$role == "model", "assistant", "user"), 20 | content = entry$parts$text 21 | ) 22 | }) 23 | moonshot.history = c(list(system.message), messages) 24 | return(moonshot.history) 25 | } 26 | else { 27 | stop("Invalid value for to.genai.object.") 28 | } 29 | } 30 | 31 | -------------------------------------------------------------------------------- /R/src/R/chat.history.reset.R: -------------------------------------------------------------------------------- 1 | #' Chat History Reset 2 | #' 3 | #' This function resets the chat history along with a generative AI object. 4 | #' 5 | #' @param genai.object A generative AI object containing necessary and correct information. 6 | #' 7 | #' @details Providing accurate and valid information for each argument is crucial for successful chat 8 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 9 | #' error message based on the API feedback. To view all supported generative AI models, use the 10 | #' function \code{\link{available.models}}. 11 | #' 12 | #' @seealso 13 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 14 | #' 15 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_reset.ipynb}{Live Demo in Colab} 16 | #' 17 | #' @examples 18 | #' \dontrun{ 19 | #' # Assuming there is a GenAI object named 'genai.model' supporting this 20 | #' # function, please refer to the "Live Demo in Colab" above for real 21 | #' # examples. The following examples are just some basic guidelines. 22 | #' 23 | #' # Method 1 (recommended): use the pipe operator "%>%" 24 | #' genai.model %>% 25 | #' chat.history.reset() 26 | #' 27 | #' # Method 2: use the reference operator "$" 28 | #' genai.model$chat.history.reset() 29 | #' 30 | #' # Method 3: use the function chat.history.reset() directly 31 | #' chat.history.reset(genai.object = genai.model) 32 | #' } 33 | #' 34 | #' @export 35 | chat.history.reset = function(genai.object) { 36 | genai.object$chat.history.reset() 37 | } 38 | -------------------------------------------------------------------------------- /R/src/man/chat.history.reset.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.history.reset.R 3 | \name{chat.history.reset} 4 | \alias{chat.history.reset} 5 | \title{Chat History Reset} 6 | \usage{ 7 | chat.history.reset(genai.object) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | } 12 | \description{ 13 | This function resets the chat history along with a generative AI object. 14 | } 15 | \details{ 16 | Providing accurate and valid information for each argument is crucial for successful chat 17 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 18 | error message based on the API feedback. To view all supported generative AI models, use the 19 | function \code{\link{available.models}}. 20 | } 21 | \examples{ 22 | \dontrun{ 23 | # Assuming there is a GenAI object named 'genai.model' supporting this 24 | # function, please refer to the "Live Demo in Colab" above for real 25 | # examples. The following examples are just some basic guidelines. 26 | 27 | # Method 1 (recommended): use the pipe operator "\%>\%" 28 | genai.model \%>\% 29 | chat.history.reset() 30 | 31 | # Method 2: use the reference operator "$" 32 | genai.model$chat.history.reset() 33 | 34 | # Method 3: use the function chat.history.reset() directly 35 | chat.history.reset(genai.object = genai.model) 36 | } 37 | 38 | } 39 | \seealso{ 40 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 41 | 42 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_reset.ipynb}{Live Demo in Colab} 43 | } 44 | -------------------------------------------------------------------------------- /Python/src/GeAI/connect.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | def connect(provider, model, version, api, proxy = False): 5 | response = requests.get("https://genai.gd.edu.kg/model.json") 6 | json_data = response.json() 7 | if provider == "google": 8 | version = next(arg for arg in [version] if arg in json_data["google"]["version"]) 9 | model = next(arg for arg in [model] if arg in json_data["google"]["model"]) 10 | api_url = ( 11 | f"https://api.genai.gd.edu.kg/google/{version}/models/{model}?key={api}" 12 | if proxy 13 | else f"https://generativelanguage.googleapis.com/{version}/models/{model}?key={api}" 14 | ) 15 | response = requests.get(api_url, headers={"Content-Type": "application/json"}) 16 | elif provider == "openai": 17 | version = next(arg for arg in [version] if arg in json_data["openai"]["version"]) 18 | model = next(arg for arg in [model] if arg in json_data["openai"]["model"]) 19 | api_url = ( 20 | f"https://api.genai.gd.edu.kg/openai/{version}/models" 21 | if proxy 22 | else f"https://api.openai.com/{version}/models" 23 | ) 24 | response = requests.get(api_url, headers={"Content-Type": "application/json", "Authorization": f"Bearer {api}"}) 25 | else: 26 | raise ValueError("Invalid provider") 27 | response_json = response.json() 28 | if "error" in response_json and response_json["error"] is not None: 29 | raise ValueError(response_json["error"]["message"]) 30 | return { 31 | "provider": provider, 32 | "model": model, 33 | "version": version, 34 | "api": api, 35 | "proxy": proxy 36 | } -------------------------------------------------------------------------------- /R/src/R/chat.history.export.R: -------------------------------------------------------------------------------- 1 | #' Chat History Export 2 | #' 3 | #' This function exports the chat history along with a generative AI object as a list. 4 | #' 5 | #' @param genai.object A generative AI object containing necessary and correct information. 6 | #' 7 | #' @return If successful, the chat history list will be returned. 8 | #' 9 | #' @details Providing accurate and valid information for each argument is crucial for successful chat 10 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 11 | #' error message based on the API feedback. To view all supported generative AI models, use the 12 | #' function \code{\link{available.models}}. 13 | #' 14 | #' @seealso 15 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 16 | #' 17 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_export.ipynb}{Live Demo in Colab} 18 | #' 19 | #' @examples 20 | #' \dontrun{ 21 | #' # Assuming there is a GenAI object named 'genai.model' supporting this 22 | #' # function, please refer to the "Live Demo in Colab" above for real 23 | #' # examples. The following examples are just some basic guidelines. 24 | #' 25 | #' # Method 1 (recommended): use the pipe operator "%>%" 26 | #' exported.history = genai.model %>% 27 | #' chat.history.export() 28 | #' 29 | #' # Method 2: use the reference operator "$" 30 | #' exported.history = genai.model$chat.history.export() 31 | #' 32 | #' # Method 3: use the function chat.history.export() directly 33 | #' exported.history = chat.history.export(genai.object = genai.model) 34 | #' } 35 | #' 36 | #' @export 37 | chat.history.export = function(genai.object) { 38 | genai.object$chat.history.export() 39 | } 40 | -------------------------------------------------------------------------------- /R/src/man/chat.history.export.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.history.export.R 3 | \name{chat.history.export} 4 | \alias{chat.history.export} 5 | \title{Chat History Export} 6 | \usage{ 7 | chat.history.export(genai.object) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | } 12 | \value{ 13 | If successful, the chat history list will be returned. 14 | } 15 | \description{ 16 | This function exports the chat history along with a generative AI object as a list. 17 | } 18 | \details{ 19 | Providing accurate and valid information for each argument is crucial for successful chat 20 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 21 | error message based on the API feedback. To view all supported generative AI models, use the 22 | function \code{\link{available.models}}. 23 | } 24 | \examples{ 25 | \dontrun{ 26 | # Assuming there is a GenAI object named 'genai.model' supporting this 27 | # function, please refer to the "Live Demo in Colab" above for real 28 | # examples. The following examples are just some basic guidelines. 29 | 30 | # Method 1 (recommended): use the pipe operator "\%>\%" 31 | exported.history = genai.model \%>\% 32 | chat.history.export() 33 | 34 | # Method 2: use the reference operator "$" 35 | exported.history = genai.model$chat.history.export() 36 | 37 | # Method 3: use the function chat.history.export() directly 38 | exported.history = chat.history.export(genai.object = genai.model) 39 | } 40 | 41 | } 42 | \seealso{ 43 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 44 | 45 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_export.ipynb}{Live Demo in Colab} 46 | } 47 | -------------------------------------------------------------------------------- /R/src/R/chat.history.import.R: -------------------------------------------------------------------------------- 1 | #' Chat History Import 2 | #' 3 | #' This function imports a chat history in list format to a generative AI object. 4 | #' 5 | #' @param genai.object A generative AI object containing necessary and correct information. 6 | #' @param new.chat.history A list containing a chat history in correct format. 7 | #' 8 | #' @details Providing accurate and valid information for each argument is crucial for successful chat 9 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 10 | #' error message based on the API feedback. To view all supported generative AI models, use the 11 | #' function \code{\link{available.models}}. 12 | #' 13 | #' @seealso 14 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 15 | #' 16 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_import.ipynb}{Live Demo in Colab} 17 | #' 18 | #' @examples 19 | #' \dontrun{ 20 | #' # Assuming there is a GenAI object named 'genai.model' supporting this 21 | #' # function and a valid chat history list named 'new.history', please 22 | #' # refer to the "Live Demo in Colab" above for real examples. The 23 | #' # following examples are just some basic guidelines. 24 | #' 25 | #' # Method 1 (recommended): use the pipe operator "%>%" 26 | #' genai.model %>% 27 | #' chat.history.import(new.chat.history = new.history) 28 | #' 29 | #' # Method 2: use the reference operator "$" 30 | #' genai.model$chat.history.import(new.chat.history = new.history) 31 | #' 32 | #' # Method 3: use the function chat.history.import() directly 33 | #' chat.history.import(genai.object = genai.model, 34 | #' new.chat.history = new.history) 35 | #' } 36 | #' 37 | #' @export 38 | chat.history.import = function(genai.object, 39 | new.chat.history) { 40 | genai.object$chat.history.import(new.chat.history) 41 | } 42 | -------------------------------------------------------------------------------- /R/src/man/chat.history.import.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.history.import.R 3 | \name{chat.history.import} 4 | \alias{chat.history.import} 5 | \title{Chat History Import} 6 | \usage{ 7 | chat.history.import(genai.object, new.chat.history) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | 12 | \item{new.chat.history}{A list containing a chat history in correct format.} 13 | } 14 | \description{ 15 | This function imports a chat history in list format to a generative AI object. 16 | } 17 | \details{ 18 | Providing accurate and valid information for each argument is crucial for successful chat 19 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 20 | error message based on the API feedback. To view all supported generative AI models, use the 21 | function \code{\link{available.models}}. 22 | } 23 | \examples{ 24 | \dontrun{ 25 | # Assuming there is a GenAI object named 'genai.model' supporting this 26 | # function and a valid chat history list named 'new.history', please 27 | # refer to the "Live Demo in Colab" above for real examples. The 28 | # following examples are just some basic guidelines. 29 | 30 | # Method 1 (recommended): use the pipe operator "\%>\%" 31 | genai.model \%>\% 32 | chat.history.import(new.chat.history = new.history) 33 | 34 | # Method 2: use the reference operator "$" 35 | genai.model$chat.history.import(new.chat.history = new.history) 36 | 37 | # Method 3: use the function chat.history.import() directly 38 | chat.history.import(genai.object = genai.model, 39 | new.chat.history = new.history) 40 | } 41 | 42 | } 43 | \seealso{ 44 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 45 | 46 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_import.ipynb}{Live Demo in Colab} 47 | } 48 | -------------------------------------------------------------------------------- /R/src/R/chat.history.save.R: -------------------------------------------------------------------------------- 1 | #' Chat History Save 2 | #' 3 | #' This function saves a chat history along with a generative AI object as a JSON file. 4 | #' 5 | #' @param genai.object A generative AI object containing necessary and correct information. 6 | #' @param file.name A character string representing the name of the JSON file for the chat history. 7 | #' 8 | #' @return If successful, the chat history will be saved as a JSON file in your current or specified 9 | #' directory. 10 | #' 11 | #' @details Providing accurate and valid information for each argument is crucial for successful chat 12 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 13 | #' error message based on the API feedback. To view all supported generative AI models, use the 14 | #' function \code{\link{available.models}}. 15 | #' 16 | #' @seealso 17 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 18 | #' 19 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_save.ipynb}{Live Demo in Colab} 20 | #' 21 | #' @examples 22 | #' \dontrun{ 23 | #' # Assuming there is a GenAI object named 'genai.model' supporting this 24 | #' # function, please refer to the "Live Demo in Colab" above for real 25 | #' # examples. The following examples are just some basic guidelines. 26 | #' 27 | #' # Method 1 (recommended): use the pipe operator "%>%" 28 | #' genai.model %>% 29 | #' chat.history.save(file.name = "saved_history") 30 | #' 31 | #' # Method 2: use the reference operator "$" 32 | #' genai.model$chat.history.save(file.name = "saved_history") 33 | #' 34 | #' # Method 3: use the function chat.history.save() directly 35 | #' chat.history.save(genai.object = genai.model, 36 | #' file.name = "saved_history") 37 | #' } 38 | #' 39 | #' @export 40 | chat.history.save = function(genai.object, file.name) { 41 | genai.object$chat.history.save(file.name) 42 | } 43 | -------------------------------------------------------------------------------- /R/src/man/chat.history.save.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.history.save.R 3 | \name{chat.history.save} 4 | \alias{chat.history.save} 5 | \title{Chat History Save} 6 | \usage{ 7 | chat.history.save(genai.object, file.name) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | 12 | \item{file.name}{A character string representing the name of the JSON file for the chat history.} 13 | } 14 | \value{ 15 | If successful, the chat history will be saved as a JSON file in your current or specified 16 | directory. 17 | } 18 | \description{ 19 | This function saves a chat history along with a generative AI object as a JSON file. 20 | } 21 | \details{ 22 | Providing accurate and valid information for each argument is crucial for successful chat 23 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 24 | error message based on the API feedback. To view all supported generative AI models, use the 25 | function \code{\link{available.models}}. 26 | } 27 | \examples{ 28 | \dontrun{ 29 | # Assuming there is a GenAI object named 'genai.model' supporting this 30 | # function, please refer to the "Live Demo in Colab" above for real 31 | # examples. The following examples are just some basic guidelines. 32 | 33 | # Method 1 (recommended): use the pipe operator "\%>\%" 34 | genai.model \%>\% 35 | chat.history.save(file.name = "saved_history") 36 | 37 | # Method 2: use the reference operator "$" 38 | genai.model$chat.history.save(file.name = "saved_history") 39 | 40 | # Method 3: use the function chat.history.save() directly 41 | chat.history.save(genai.object = genai.model, 42 | file.name = "saved_history") 43 | } 44 | 45 | } 46 | \seealso{ 47 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 48 | 49 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_save.ipynb}{Live Demo in Colab} 50 | } 51 | -------------------------------------------------------------------------------- /R/src/man/chat.history.print.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.history.print.R 3 | \name{chat.history.print} 4 | \alias{chat.history.print} 5 | \title{Chat History Print} 6 | \usage{ 7 | chat.history.print(genai.object, from = 1, to = NULL) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | 12 | \item{from}{Optional. Default to 1. An integer representing the first message in the chat history that needs 13 | to be printed.} 14 | 15 | \item{to}{Optional. Default to \code{NULL}, prints until the last message in the chat history. An integer 16 | representing the last message in the chat history that needs to be printed.} 17 | } 18 | \description{ 19 | This function prints out the chat history along with a generative AI object. 20 | } 21 | \details{ 22 | Providing accurate and valid information for each argument is crucial for successful chat 23 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 24 | error message based on the API feedback. To view all supported generative AI models, use the 25 | function \code{\link{available.models}}. 26 | } 27 | \examples{ 28 | \dontrun{ 29 | # Assuming there is a GenAI object named 'genai.model' supporting this 30 | # function, please refer to the "Live Demo in Colab" above for real 31 | # examples. The following examples are just some basic guidelines. 32 | 33 | # Method 1 (recommended): use the pipe operator "\%>\%" 34 | genai.model \%>\% 35 | chat.history.print() 36 | 37 | # Method 2: use the reference operator "$" 38 | genai.model$chat.history.print(from = 3) 39 | 40 | # Method 3: use the function chat.history.print() directly 41 | chat.history.print(genai.object = genai.model, 42 | from = 3, 43 | to = 5) 44 | } 45 | 46 | } 47 | \seealso{ 48 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 49 | 50 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_print.ipynb}{Live Demo in Colab} 51 | } 52 | -------------------------------------------------------------------------------- /R/src/R/chat.history.print.R: -------------------------------------------------------------------------------- 1 | #' Chat History Print 2 | #' 3 | #' This function prints out the chat history along with a generative AI object. 4 | #' 5 | #' @param genai.object A generative AI object containing necessary and correct information. 6 | #' @param from Optional. Default to 1. An integer representing the first message in the chat history that needs 7 | #' to be printed. 8 | #' @param to Optional. Default to \code{NULL}, prints until the last message in the chat history. An integer 9 | #' representing the last message in the chat history that needs to be printed. 10 | #' 11 | #' @details Providing accurate and valid information for each argument is crucial for successful chat 12 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 13 | #' error message based on the API feedback. To view all supported generative AI models, use the 14 | #' function \code{\link{available.models}}. 15 | #' 16 | #' @seealso 17 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 18 | #' 19 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_print.ipynb}{Live Demo in Colab} 20 | #' 21 | #' @examples 22 | #' \dontrun{ 23 | #' # Assuming there is a GenAI object named 'genai.model' supporting this 24 | #' # function, please refer to the "Live Demo in Colab" above for real 25 | #' # examples. The following examples are just some basic guidelines. 26 | #' 27 | #' # Method 1 (recommended): use the pipe operator "%>%" 28 | #' genai.model %>% 29 | #' chat.history.print() 30 | #' 31 | #' # Method 2: use the reference operator "$" 32 | #' genai.model$chat.history.print(from = 3) 33 | #' 34 | #' # Method 3: use the function chat.history.print() directly 35 | #' chat.history.print(genai.object = genai.model, 36 | #' from = 3, 37 | #' to = 5) 38 | #' } 39 | #' 40 | #' @export 41 | chat.history.print = function(genai.object, 42 | from = 1, 43 | to = NULL) { 44 | genai.object$chat.history.print(from, to) 45 | } 46 | -------------------------------------------------------------------------------- /R/src/man/chat.history.convert.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.history.convert.R 3 | \name{chat.history.convert} 4 | \alias{chat.history.convert} 5 | \title{Chat History Convert} 6 | \usage{ 7 | chat.history.convert(from.genai.object, to.genai.object) 8 | } 9 | \arguments{ 10 | \item{from.genai.object}{A source generative AI object containing necessary and correct information.} 11 | 12 | \item{to.genai.object}{A target generative AI object containing necessary and correct information.} 13 | } 14 | \value{ 15 | If successful, the converted chat history list will be returned. 16 | } 17 | \description{ 18 | This function converts the chat history along with a generative AI object to a valid format 19 | for another generative AI object. 20 | } 21 | \details{ 22 | Providing accurate and valid information for each argument is crucial for successful chat 23 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 24 | error message based on the API feedback. To view all supported generative AI models, use the 25 | function \code{\link{available.models}}. Moreover, you can print out the chat history using the 26 | function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} during the chat. 27 | } 28 | \examples{ 29 | \dontrun{ 30 | # Assuming there are two GenAI objects named 'genai.model' and 'another.genai.model' 31 | # supporting this function, please refer to the "Live Demo in Colab" above for 32 | # real examples. The following examples are just some basic guidelines. 33 | 34 | # Method 1 (recommended): use the pipe operator "\%>\%" 35 | converted.history = genai.model \%>\% 36 | chat.history.convert(to.genai.object = another.genai.model) 37 | 38 | # Method 2: use the reference operator "$" 39 | converted.history = genai.model$chat.history.convert(to.genai.object = another.genai.model) 40 | 41 | # Method 3: use the function chat.history.convert() directly 42 | converted.history = chat.history.convert(from.genai.object = genai.model, 43 | to.genai.object = another.genai.model) 44 | } 45 | 46 | } 47 | \seealso{ 48 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 49 | 50 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_convert.ipynb}{Live Demo in Colab} 51 | } 52 | -------------------------------------------------------------------------------- /R/src/R/chat.history.convert.R: -------------------------------------------------------------------------------- 1 | #' Chat History Convert 2 | #' 3 | #' This function converts the chat history along with a generative AI object to a valid format 4 | #' for another generative AI object. 5 | #' 6 | #' @param from.genai.object A source generative AI object containing necessary and correct information. 7 | #' @param to.genai.object A target generative AI object containing necessary and correct information. 8 | #' 9 | #' @return If successful, the converted chat history list will be returned. 10 | #' 11 | #' @details Providing accurate and valid information for each argument is crucial for successful chat 12 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 13 | #' error message based on the API feedback. To view all supported generative AI models, use the 14 | #' function \code{\link{available.models}}. Moreover, you can print out the chat history using the 15 | #' function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} during the chat. 16 | #' 17 | #' @seealso 18 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 19 | #' 20 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat_history_convert.ipynb}{Live Demo in Colab} 21 | #' 22 | #' @examples 23 | #' \dontrun{ 24 | #' # Assuming there are two GenAI objects named 'genai.model' and 'another.genai.model' 25 | #' # supporting this function, please refer to the "Live Demo in Colab" above for 26 | #' # real examples. The following examples are just some basic guidelines. 27 | #' 28 | #' # Method 1 (recommended): use the pipe operator "%>%" 29 | #' converted.history = genai.model %>% 30 | #' chat.history.convert(to.genai.object = another.genai.model) 31 | #' 32 | #' # Method 2: use the reference operator "$" 33 | #' converted.history = genai.model$chat.history.convert(to.genai.object = another.genai.model) 34 | #' 35 | #' # Method 3: use the function chat.history.convert() directly 36 | #' converted.history = chat.history.convert(from.genai.object = genai.model, 37 | #' to.genai.object = another.genai.model) 38 | #' } 39 | #' 40 | #' @export 41 | chat.history.convert = function(from.genai.object, 42 | to.genai.object) { 43 | from.genai.object$chat.history.convert(from.genai.object, 44 | to.genai.object) 45 | } 46 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.txt.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.txt = function(genai.moonshot.object, 3 | prompt, 4 | verbose, 5 | config = list( 6 | max.tokens = NULL, 7 | temperature = NULL, 8 | top.p = NULL 9 | )) { 10 | # Check configurations 11 | genai.moonshot.config.check(config) 12 | 13 | # Get api url 14 | api.url = paste0( 15 | "https://api.moonshot.cn/", 16 | genai.moonshot.object$version, 17 | "/chat/completions" 18 | ) 19 | if (genai.moonshot.object$proxy) { 20 | api.url = paste0( 21 | "https://api.genai.gd.edu.kg/moonshot/", 22 | genai.moonshot.object$version, 23 | "/chat/completions" 24 | ) 25 | } 26 | 27 | # Initialize the request body 28 | requestBody = list( 29 | model = genai.moonshot.object$model, 30 | messages = list( 31 | list(role = "system", 32 | content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages."), 33 | list(role = "user", 34 | content = prompt) 35 | ) 36 | ) 37 | 38 | # Get the generation configuration 39 | if (length(config) > 0) { 40 | requestBody = genai.moonshot.generation.config(requestBody, config) 41 | } 42 | 43 | # Convert the request as JSON format 44 | requestBodyJSON = jsonlite::toJSON(requestBody, 45 | auto_unbox = TRUE, 46 | pretty = TRUE) 47 | 48 | # Send request and get response 49 | response = httr::POST( 50 | url = api.url, 51 | body = requestBodyJSON, 52 | httr::add_headers( 53 | "Content-Type" = "application/json", 54 | "Authorization" = paste("Bearer", genai.moonshot.object$api) 55 | ) 56 | ) 57 | responseJSON = httr::content(response, "parsed") 58 | 59 | # Check for response error 60 | if (!is.null(responseJSON$error)) { 61 | stop(responseJSON$error$message) 62 | } 63 | 64 | # Print detail if verbose is TRUE 65 | if (verbose) { 66 | genai.moonshot.formated.confguration(requestBody, prompt) 67 | cat("\n") 68 | } 69 | 70 | # Get the response text 71 | return (responseJSON$choices[[1]]$message$content) 72 | } 73 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.img.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.img = function(genai.openai.object, 3 | prompt, 4 | verbose, 5 | config = list( 6 | quality = NULL, 7 | size = NULL, 8 | style = NULL, 9 | user = NULL 10 | )) { 11 | # Check configurations 12 | genai.openai.img.config.check(config) 13 | 14 | # Get api url 15 | api.url = paste0( 16 | "https://api.openai.com/", 17 | genai.openai.object$version, 18 | "/images/generations" 19 | ) 20 | if (genai.openai.object$proxy) { 21 | api.url = paste0( 22 | "https://api.genai.gd.edu.kg/openai/", 23 | genai.openai.object$version, 24 | "/images/generations" 25 | ) 26 | } 27 | 28 | # Initialize the request body 29 | requestBody = list( 30 | model = genai.openai.object$model, 31 | prompt = prompt, 32 | response_format = "b64_json" 33 | ) 34 | 35 | # Get the generation configuration 36 | if (length(config) > 0) { 37 | requestBody = genai.openai.img.generation.config(requestBody, config) 38 | } 39 | 40 | # Convert the request as JSON format 41 | requestBodyJSON = jsonlite::toJSON(requestBody, 42 | auto_unbox = TRUE, 43 | pretty = TRUE) 44 | 45 | # Send request and get response 46 | response = httr::POST( 47 | url = api.url, 48 | body = requestBodyJSON, 49 | httr::add_headers( 50 | "Content-Type" = "application/json", 51 | "Authorization" = paste("Bearer", genai.openai.object$api) 52 | ) 53 | ) 54 | if (!is.null(genai.openai.object$organization.id) && 55 | is.character(genai.openai.object$organization.id)) { 56 | response = httr::POST( 57 | url = api.url, 58 | body = requestBodyJSON, 59 | httr::add_headers( 60 | "Content-Type" = "application/json", 61 | "Authorization" = paste("Bearer", genai.openai.object$api), 62 | "OpenAI-Organization" = genai.openai.object$organization.id 63 | ) 64 | ) 65 | } 66 | responseJSON = httr::content(response, "parsed") 67 | 68 | # Check for response error 69 | if (!is.null(responseJSON$error)) { 70 | stop(responseJSON$error$message) 71 | } 72 | 73 | # Print detail if verbose is TRUE 74 | if (verbose) { 75 | genai.openai.img.formated.confguration(requestBody, prompt) 76 | cat("\n") 77 | } 78 | 79 | # Store the image 80 | image.data = base64enc::base64decode(responseJSON$data[[1]]$b64_json[1]) 81 | tmp.img = tempfile(fileext = ".png") 82 | writeBin(image.data, tmp.img) 83 | export.img = ggplotify::as.ggplot(magick::image_read(tmp.img)) 84 | 85 | # Return the image 86 | return (export.img) 87 | } 88 | -------------------------------------------------------------------------------- /Python/src/GeAI/txt.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from GeAI._utils._moderation_openai import moderation_openai 4 | 5 | def txt_default(model_parameter, temperature, prompt): 6 | if prompt == "" or prompt is None or not isinstance(prompt, str): 7 | raise ValueError("Prompt is not in correct format.") 8 | provider = model_parameter["provider"] 9 | if provider == "google": 10 | api_url = ( 11 | f"https://api.genai.gd.edu.kg/google/{model_parameter['version']}/models/" 12 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 13 | if model_parameter["proxy"] 14 | else f"https://generativelanguage.googleapis.com/{model_parameter['version']}/models/" 15 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 16 | ) 17 | request_body = { 18 | "contents": {"parts": [{"text": prompt}]}, 19 | "generationConfig": {"temperature": temperature} 20 | } 21 | headers = {"Content-Type": "application/json"} 22 | 23 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 24 | response = requests.post(api_url, data=request_body_json, headers=headers) 25 | response_json = response.json() 26 | 27 | if "error" in response_json: 28 | raise ValueError(response_json["error"]["message"]) 29 | 30 | if "blockReason" in response_json.get("promptFeedback", {}): 31 | raise ValueError("The prompt may contain harmful content.") 32 | 33 | return str(response_json["candidates"][0]["content"]["parts"][0]["text"]) 34 | elif provider == "openai": 35 | moderation_openai(model_parameter, prompt) 36 | api_url = ( 37 | f"https://api.genai.gd.edu.kg/openai/{model_parameter['version']}/chat/completions" 38 | if model_parameter["proxy"] 39 | else f"https://api.openai.com/{model_parameter['version']}/chat/completions" 40 | ) 41 | request_body = { 42 | "model": model_parameter["model"], 43 | "messages": [ 44 | {"role": "system", "content": "You are a helpful assistant."}, 45 | {"role": "user", "content": prompt} 46 | ], 47 | "temperature": temperature 48 | } 49 | headers = { 50 | "Content-Type": "application/json", 51 | "Authorization": f"Bearer {model_parameter['api']}" 52 | } 53 | 54 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 55 | response = requests.post(api_url, data=request_body_json, headers=headers) 56 | response_json = response.json() 57 | 58 | if "error" in response_json: 59 | raise ValueError(response_json["error"]["message"]) 60 | 61 | return str(response_json["choices"][0]["message"]["content"]) 62 | else: 63 | raise ValueError("Invalid provider") -------------------------------------------------------------------------------- /Python/src/GeAI/txt/txt_default.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from GeAI._utils._moderation_openai import moderation_openai 4 | 5 | def txt_default(model_parameter, temperature, prompt): 6 | if prompt == "" or prompt is None or not isinstance(prompt, str): 7 | raise ValueError("Prompt is not in correct format.") 8 | provider = model_parameter["provider"] 9 | if provider == "google": 10 | api_url = ( 11 | f"https://api.genai.gd.edu.kg/google/{model_parameter['version']}/models/" 12 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 13 | if model_parameter["proxy"] 14 | else f"https://generativelanguage.googleapis.com/{model_parameter['version']}/models/" 15 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 16 | ) 17 | request_body = { 18 | "contents": {"parts": [{"text": prompt}]}, 19 | "generationConfig": {"temperature": temperature} 20 | } 21 | headers = {"Content-Type": "application/json"} 22 | 23 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 24 | response = requests.post(api_url, data=request_body_json, headers=headers) 25 | response_json = response.json() 26 | 27 | if "error" in response_json: 28 | raise ValueError(response_json["error"]["message"]) 29 | 30 | if "blockReason" in response_json.get("promptFeedback", {}): 31 | raise ValueError("The prompt may contain harmful content.") 32 | 33 | return str(response_json["candidates"][0]["content"]["parts"][0]["text"]) 34 | elif provider == "openai": 35 | moderation_openai(model_parameter, prompt) 36 | api_url = ( 37 | f"https://api.genai.gd.edu.kg/openai/{model_parameter['version']}/chat/completions" 38 | if model_parameter["proxy"] 39 | else f"https://api.openai.com/{model_parameter['version']}/chat/completions" 40 | ) 41 | request_body = { 42 | "model": model_parameter["model"], 43 | "messages": [ 44 | {"role": "system", "content": "You are a helpful assistant."}, 45 | {"role": "user", "content": prompt} 46 | ], 47 | "temperature": temperature 48 | } 49 | headers = { 50 | "Content-Type": "application/json", 51 | "Authorization": f"Bearer {model_parameter['api']}" 52 | } 53 | 54 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 55 | response = requests.post(api_url, data=request_body_json, headers=headers) 56 | response_json = response.json() 57 | 58 | if "error" in response_json: 59 | raise ValueError(response_json["error"]["message"]) 60 | 61 | return str(response_json["choices"][0]["message"]["content"]) 62 | else: 63 | raise ValueError("Invalid provider") -------------------------------------------------------------------------------- /R/src/R/genai.google.class.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.class = R6Class( 3 | classname = "genai.google", 4 | public = list( 5 | # Initialize method 6 | initialize = function(api, model, version, proxy = FALSE) { 7 | genai.google.check(api, model, version, proxy) 8 | private$api = api 9 | private$model = model 10 | private$version = version 11 | private$proxy = proxy 12 | }, 13 | # Chat generation 14 | chat = function(prompt, 15 | verbose = FALSE, 16 | config = list()) { 17 | genai.google.chat(private, 18 | prompt, 19 | verbose, 20 | config) 21 | }, 22 | # Chat edit 23 | chat.edit = function(prompt, 24 | message.to.edit, 25 | verbose = FALSE, 26 | config = list()) { 27 | genai.google.chat.edit(private, 28 | prompt, 29 | message.to.edit, 30 | verbose, 31 | config) 32 | }, 33 | # Convert chat history 34 | chat.history.convert = function(from.genai.object, to.genai.object) { 35 | genai.google.chat.history.convert(private, to.genai.object) 36 | }, 37 | # Export chat history 38 | chat.history.export = function() { 39 | genai.google.chat.history.export(private) 40 | }, 41 | # Import chat history 42 | chat.history.import = function(new.chat.history) { 43 | genai.google.chat.history.import(private, new.chat.history) 44 | }, 45 | # Print chat history 46 | chat.history.print = function(from = 1, to = NULL) { 47 | genai.google.chat.history.print(private, from, to) 48 | }, 49 | # Reset chat history 50 | chat.history.reset = function() { 51 | genai.google.chat.history.reset(private) 52 | }, 53 | # Save chat history 54 | chat.history.save = function(file.name) { 55 | genai.google.chat.history.save(private, file.name) 56 | }, 57 | # Text generation 58 | txt = function(prompt, 59 | verbose = FALSE, 60 | config = list()) { 61 | genai.google.txt(private, 62 | prompt, 63 | verbose, 64 | config) 65 | }, 66 | # Text generation with image as input 67 | txt.image = function(prompt, 68 | image.path, 69 | verbose = FALSE, 70 | config = list()) { 71 | genai.google.txt.image(private, 72 | prompt, 73 | image.path, 74 | verbose, 75 | config) 76 | } 77 | ), 78 | private = list( 79 | api = NULL, 80 | model = NULL, 81 | version = NULL, 82 | proxy = FALSE, 83 | chat.history = listenv::listenv(contents = list()) 84 | ) 85 | ) 86 | -------------------------------------------------------------------------------- /R/src/R/genai.google.txt.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.txt = function(genai.google.object, 3 | prompt, 4 | verbose, 5 | config = list( 6 | harm.category.dangerous.content = NULL, 7 | harm.category.harassment = NULL, 8 | harm.category.hate.speech = NULL, 9 | harm.category.sexually.explicit = NULL, 10 | stop.sequences = NULL, 11 | max.output.tokens = NULL, 12 | temperature = NULL, 13 | top.p = NULL, 14 | top.k = NULL 15 | )) { 16 | # Check configurations 17 | genai.google.config.check(config) 18 | 19 | # Get api url 20 | api.url = paste0( 21 | "https://generativelanguage.googleapis.com/", 22 | genai.google.object$version, 23 | "/models/", 24 | genai.google.object$model, 25 | ":generateContent?key=", 26 | genai.google.object$api 27 | ) 28 | if (genai.google.object$proxy) { 29 | api.url = paste0( 30 | "https://api.genai.gd.edu.kg/google/", 31 | genai.google.object$version, 32 | "/models/", 33 | genai.google.object$model, 34 | ":generateContent?key=", 35 | genai.google.object$api 36 | ) 37 | } 38 | 39 | # Initialize the request body 40 | requestBody = list(contents = list(parts = list(text = prompt))) 41 | 42 | # Get the safety settings 43 | safety.setting = genai.google.safety.setting(config) 44 | if (length(safety.setting) > 0) { 45 | requestBody$safetySettings = safety.setting 46 | } 47 | 48 | # Get the generation configuration 49 | generation.config = genai.google.generation.config(config) 50 | if (length(generation.config) > 0) { 51 | requestBody$generationConfig = generation.config 52 | } 53 | 54 | # Convert the request as JSON format 55 | requestBodyJSON = jsonlite::toJSON(requestBody, 56 | auto_unbox = TRUE, 57 | pretty = TRUE) 58 | 59 | # Send request and get response 60 | response = httr::POST( 61 | url = api.url, 62 | body = requestBodyJSON, 63 | httr::add_headers("Content-Type" = "application/json") 64 | ) 65 | responseJSON = httr::content(response, "parsed") 66 | 67 | # Check for harmful prompt 68 | if (!is.null(responseJSON$promptFeedback$blockReason)) { 69 | stop("Invalid prompt. The prompt may contain harmful content.") 70 | } 71 | 72 | # Check for response error 73 | if (!is.null(responseJSON$error)) { 74 | stop(responseJSON$error$message) 75 | } 76 | 77 | # Print detail if verbose is TRUE 78 | if (verbose) { 79 | genai.google.formated.confguration(requestBody, prompt) 80 | cat("\n") 81 | } 82 | 83 | # Get the response text 84 | return (responseJSON$candidates[[1]]$content$parts[[1]]$text) 85 | } 86 | -------------------------------------------------------------------------------- /R/src/man/genai.moonshot.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/genai.moonshot.R 3 | \name{genai.moonshot} 4 | \alias{genai.moonshot} 5 | \title{Moonshot AI Object Creation} 6 | \usage{ 7 | genai.moonshot(api, model, version, proxy = FALSE) 8 | } 9 | \arguments{ 10 | \item{api}{A character string representing the API key required for accessing the model.} 11 | 12 | \item{model}{A character string representing the specific model.} 13 | 14 | \item{version}{A character string representing the version of the chosen model.} 15 | 16 | \item{proxy}{Optional. Default to \code{FALSE}. A boolean value indicating whether to use a 17 | proxy for accessing the API URL. If your local internet cannot access the API, set this 18 | parameter to \code{TRUE}.} 19 | } 20 | \value{ 21 | If successful, the function returns an moonshot object. If the API response 22 | indicates an error, the function halts execution and provides an error message. 23 | } 24 | \description{ 25 | This function establishes a connection to a Moonshot AI model by providing essential parameters. 26 | } 27 | \details{ 28 | Providing accurate and valid information for each argument is crucial for successful text 29 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 30 | error message based on the API feedback. To view all supported generative AI models, use the 31 | function \code{\link{available.models}}. 32 | 33 | Please refer to \code{https://platform.moonshot.cn/console/api-keys} for the API key. 34 | 35 | The API proxy service is designed to address the needs of users who hold a valid API key but find 36 | themselves outside their home countries or regions due to reasons such as travel, work, or study 37 | in locations that may not be covered by certain Generative AI service providers. 38 | 39 | Please be aware that although GenAI and its affiliated organization - GitData - do not gather user 40 | information through this service, the server providers for GenAI API proxy service and the Generative 41 | AI service providers may engage in such data collection. Furthermore, the proxy service cannot 42 | guarantee a consistent connection speed. Users are strongly encouraged to utilize this service 43 | with caution and at their own discretion. 44 | } 45 | \examples{ 46 | \dontrun{ 47 | # Please change YOUR_MOONSHOT_API to your own API key of Moonshot AI 48 | Sys.setenv(MOONSHOT_API = "YOUR_MOONSHOT_API") 49 | 50 | all.models = available.models() \%>\% print() 51 | 52 | # Create an moonshot object 53 | moonshot = genai.moonshot(api = Sys.getenv("MOONSHOT_API"), 54 | model = all.models$moonshot$model[1], 55 | version = all.models$moonshot$version[1], 56 | proxy = FALSE) 57 | } 58 | 59 | } 60 | \seealso{ 61 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 62 | 63 | \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} 64 | 65 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_moonshot.ipynb}{Live Demo in Colab} 66 | } 67 | -------------------------------------------------------------------------------- /R/src/man/genai.google.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/genai.google.R 3 | \name{genai.google} 4 | \alias{genai.google} 5 | \title{Google Generative AI Object Creation} 6 | \usage{ 7 | genai.google(api, model, version, proxy = FALSE) 8 | } 9 | \arguments{ 10 | \item{api}{A character string representing the API key required for accessing the model.} 11 | 12 | \item{model}{A character string representing the specific model.} 13 | 14 | \item{version}{A character string representing the version of the chosen model.} 15 | 16 | \item{proxy}{Optional. Default to \code{FALSE}. A boolean value indicating whether to use a 17 | proxy for accessing the API URL. If your local internet cannot access the API, set this 18 | parameter to \code{TRUE}.} 19 | } 20 | \value{ 21 | If successful, the function returns a Google generative AI object. If the API response 22 | indicates an error, the function halts execution and provides an error message. 23 | } 24 | \description{ 25 | This function establishes a connection to a Google generative AI model by providing essential 26 | parameters. 27 | } 28 | \details{ 29 | Providing accurate and valid information for each argument is crucial for successful text 30 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 31 | error message based on the API feedback. To view all supported generative AI models, use the 32 | function \code{\link{available.models}}. 33 | 34 | Please refer to \code{https://ai.google.dev/tutorials/setup} for the API key. 35 | 36 | The API proxy service is designed to address the needs of users who hold a valid API key but find 37 | themselves outside their home countries or regions due to reasons such as travel, work, or study 38 | in locations that may not be covered by certain Generative AI service providers. 39 | 40 | Please be aware that although GenAI and its affiliated organization - GitData - do not gather user 41 | information through this service, the server providers for GenAI API proxy service and the Generative 42 | AI service providers may engage in such data collection. Furthermore, the proxy service cannot 43 | guarantee a consistent connection speed. Users are strongly encouraged to utilize this service 44 | with caution and at their own discretion. 45 | } 46 | \examples{ 47 | \dontrun{ 48 | # Please change YOUR_GOOGLE_API to your own API key of Google Generative AI 49 | Sys.setenv(GOOGLE_API = "YOUR_GOOGLE_API") 50 | 51 | all.models = available.models() \%>\% print() 52 | 53 | # Create a Google Generative AI object 54 | google = genai.google(api = Sys.getenv("GOOGLE_API"), 55 | model = all.models$google$model[1], 56 | version = all.models$google$version[1], 57 | proxy = FALSE) 58 | } 59 | 60 | } 61 | \seealso{ 62 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 63 | 64 | \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} 65 | 66 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_google.ipynb}{Live Demo in Colab} 67 | } 68 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.class.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.class = R6Class( 3 | classname = "genai.moonshot", 4 | public = list( 5 | # Initialize method 6 | initialize = function(api, model, version, proxy = FALSE) { 7 | genai.moonshot.check(api, model, version, proxy) 8 | private$api = api 9 | private$model = model 10 | private$version = version 11 | private$proxy = proxy 12 | }, 13 | # Chat generation 14 | chat = function(prompt, 15 | verbose = FALSE, 16 | config = list()) { 17 | genai.moonshot.chat(private, 18 | prompt, 19 | verbose, 20 | config) 21 | }, 22 | # Chat edit 23 | chat.edit = function(prompt, 24 | message.to.edit, 25 | verbose = FALSE, 26 | config = list()) { 27 | genai.moonshot.chat.edit(private, 28 | prompt, 29 | message.to.edit, 30 | verbose, 31 | config) 32 | }, 33 | # Convert chat history 34 | chat.history.convert = function(from.genai.object, to.genai.object) { 35 | genai.moonshot.chat.history.convert(private, to.genai.object) 36 | }, 37 | # Export chat history 38 | chat.history.export = function() { 39 | genai.moonshot.chat.history.export(private) 40 | }, 41 | # Import chat history 42 | chat.history.import = function(new.chat.history) { 43 | genai.moonshot.chat.history.import(private, new.chat.history) 44 | }, 45 | # Print chat history 46 | chat.history.print = function(from = 1, to = NULL) { 47 | genai.moonshot.chat.history.print(private, from, to) 48 | }, 49 | # Reset chat history 50 | chat.history.reset = function() { 51 | genai.moonshot.chat.history.reset(private) 52 | }, 53 | # Save chat history 54 | chat.history.save = function(file.name) { 55 | genai.moonshot.chat.history.save(private, file.name) 56 | }, 57 | # Text generation 58 | txt = function(prompt, 59 | verbose = FALSE, 60 | config = list()) { 61 | genai.moonshot.txt(private, 62 | prompt, 63 | verbose, 64 | config) 65 | } 66 | ), 67 | private = list( 68 | api = NULL, 69 | model = NULL, 70 | version = NULL, 71 | proxy = FALSE, 72 | chat.history = listenv::listenv( 73 | messages = list( 74 | list( 75 | role = "system", 76 | content = "You are Kimi, an Artificial Intelligence Assistant powered by Moonshot AI, and you are better at conversations in Chinese and English. You will provide users with safe, helpful and accurate answers. At the same time, you will reject answers to questions about terrorism, racism, pornography, etc. Moonshot AI is a proper noun and cannot be translated into other languages." 77 | ) 78 | ) 79 | ) 80 | ) 81 | ) 82 | -------------------------------------------------------------------------------- /Python/src/GeAI/txt/explain_code.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from GeAI._utils._moderation_openai import moderation_openai 4 | 5 | def explain_code(model_parameter, temperature, prompt, language = "Python"): 6 | if prompt == "" or prompt is None or not isinstance(prompt, str): 7 | raise ValueError("Prompt is not in correct format.") 8 | provider = model_parameter["provider"] 9 | if provider == "google": 10 | api_url = ( 11 | f"https://api.genai.gd.edu.kg/google/{model_parameter['version']}/models/" 12 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 13 | if model_parameter["proxy"] 14 | else f"https://generativelanguage.googleapis.com/{model_parameter['version']}/models/" 15 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 16 | ) 17 | request_body = { 18 | "contents": {"parts": [{"text": f"Explain the following {language} code\n# Code starts #\n{prompt}\n# Code ends #\n"}]}, 19 | "generationConfig": {"temperature": temperature} 20 | } 21 | headers = {"Content-Type": "application/json"} 22 | 23 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 24 | response = requests.post(api_url, data=request_body_json, headers=headers) 25 | response_json = response.json() 26 | 27 | if "error" in response_json: 28 | raise ValueError(response_json["error"]["message"]) 29 | 30 | if "blockReason" in response_json.get("promptFeedback", {}): 31 | raise ValueError("The prompt may contain harmful content.") 32 | 33 | return str(response_json["candidates"][0]["content"]["parts"][0]["text"]) 34 | elif provider == "openai": 35 | moderation_openai(model_parameter, prompt) 36 | api_url = ( 37 | f"https://api.genai.gd.edu.kg/openai/{model_parameter['version']}/chat/completions" 38 | if model_parameter["proxy"] 39 | else f"https://api.openai.com/{model_parameter['version']}/chat/completions" 40 | ) 41 | request_body = { 42 | "model": model_parameter["model"], 43 | "messages": [ 44 | {"role": "system", "content": "You are a helpful assistant."}, 45 | {"role": "user", "content": f"Explain the following {language} code\n# Code starts #\n{prompt}\n# Code ends #\n"} 46 | ], 47 | "temperature": temperature 48 | } 49 | headers = { 50 | "Content-Type": "application/json", 51 | "Authorization": f"Bearer {model_parameter['api']}" 52 | } 53 | 54 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 55 | response = requests.post(api_url, data=request_body_json, headers=headers) 56 | response_json = response.json() 57 | 58 | if "error" in response_json: 59 | raise ValueError(response_json["error"]["message"]) 60 | 61 | return str(response_json["choices"][0]["message"]["content"]) 62 | else: 63 | raise ValueError("Invalid provider") -------------------------------------------------------------------------------- /Python/src/GeAI/txt/fix_grammar.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from GeAI._utils._moderation_openai import moderation_openai 4 | 5 | def fix_grammar(model_parameter, temperature, prompt): 6 | if prompt == "" or prompt is None or not isinstance(prompt, str): 7 | raise ValueError("Prompt is not in correct format.") 8 | provider = model_parameter["provider"] 9 | if provider == "google": 10 | api_url = ( 11 | f"https://api.genai.gd.edu.kg/google/{model_parameter['version']}/models/" 12 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 13 | if model_parameter["proxy"] 14 | else f"https://generativelanguage.googleapis.com/{model_parameter['version']}/models/" 15 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 16 | ) 17 | request_body = { 18 | "contents": {"parts": [{"text": f"Rewrite the following text and fix any grammar issues:\n # Text starts #\n{prompt}\n# Text ends #\n"}]}, 19 | "generationConfig": {"temperature": temperature} 20 | } 21 | headers = {"Content-Type": "application/json"} 22 | 23 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 24 | response = requests.post(api_url, data=request_body_json, headers=headers) 25 | response_json = response.json() 26 | 27 | if "error" in response_json: 28 | raise ValueError(response_json["error"]["message"]) 29 | 30 | if "blockReason" in response_json.get("promptFeedback", {}): 31 | raise ValueError("The prompt may contain harmful content.") 32 | 33 | return str(response_json["candidates"][0]["content"]["parts"][0]["text"]) 34 | elif provider == "openai": 35 | moderation_openai(model_parameter, prompt) 36 | api_url = ( 37 | f"https://api.genai.gd.edu.kg/openai/{model_parameter['version']}/chat/completions" 38 | if model_parameter["proxy"] 39 | else f"https://api.openai.com/{model_parameter['version']}/chat/completions" 40 | ) 41 | request_body = { 42 | "model": model_parameter["model"], 43 | "messages": [ 44 | {"role": "system", "content": "You are a helpful assistant."}, 45 | {"role": "user", "content": f"Rewrite the following text and fix any grammar issues:\n # Text starts #\n{prompt}\n# Text ends #\n"} 46 | ], 47 | "temperature": temperature 48 | } 49 | headers = { 50 | "Content-Type": "application/json", 51 | "Authorization": f"Bearer {model_parameter['api']}" 52 | } 53 | 54 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 55 | response = requests.post(api_url, data=request_body_json, headers=headers) 56 | response_json = response.json() 57 | 58 | if "error" in response_json: 59 | raise ValueError(response_json["error"]["message"]) 60 | 61 | return str(response_json["choices"][0]["message"]["content"]) 62 | else: 63 | raise ValueError("Invalid provider") -------------------------------------------------------------------------------- /Python/src/GeAI/txt/optimize_code.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from GeAI._utils._moderation_openai import moderation_openai 4 | 5 | def optimize_code(model_parameter, temperature, prompt, goal, language = "Python"): 6 | if prompt == "" or prompt is None or not isinstance(prompt, str): 7 | raise ValueError("Prompt is not in correct format.") 8 | provider = model_parameter["provider"] 9 | if provider == "google": 10 | api_url = ( 11 | f"https://api.genai.gd.edu.kg/google/{model_parameter['version']}/models/" 12 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 13 | if model_parameter["proxy"] 14 | else f"https://generativelanguage.googleapis.com/{model_parameter['version']}/models/" 15 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 16 | ) 17 | request_body = { 18 | "contents": {"parts": [{"text": f"Optimize the following {language} code.\nThe goal is: {goal}\n# Code starts #\n{prompt}\n# Code ends #\n"}]}, 19 | "generationConfig": {"temperature": temperature} 20 | } 21 | headers = {"Content-Type": "application/json"} 22 | 23 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 24 | response = requests.post(api_url, data=request_body_json, headers=headers) 25 | response_json = response.json() 26 | 27 | if "error" in response_json: 28 | raise ValueError(response_json["error"]["message"]) 29 | 30 | if "blockReason" in response_json.get("promptFeedback", {}): 31 | raise ValueError("The prompt may contain harmful content.") 32 | 33 | return str(response_json["candidates"][0]["content"]["parts"][0]["text"]) 34 | elif provider == "openai": 35 | moderation_openai(model_parameter, prompt) 36 | api_url = ( 37 | f"https://api.genai.gd.edu.kg/openai/{model_parameter['version']}/chat/completions" 38 | if model_parameter["proxy"] 39 | else f"https://api.openai.com/{model_parameter['version']}/chat/completions" 40 | ) 41 | request_body = { 42 | "model": model_parameter["model"], 43 | "messages": [ 44 | {"role": "system", "content": "You are a helpful assistant."}, 45 | {"role": "user", "content": f"Optimize the following {language} code.\nThe goal is: {goal}\n# Code starts #\n{prompt}\n# Code ends #\n"} 46 | ], 47 | "temperature": temperature 48 | } 49 | headers = { 50 | "Content-Type": "application/json", 51 | "Authorization": f"Bearer {model_parameter['api']}" 52 | } 53 | 54 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 55 | response = requests.post(api_url, data=request_body_json, headers=headers) 56 | response_json = response.json() 57 | 58 | if "error" in response_json: 59 | raise ValueError(response_json["error"]["message"]) 60 | 61 | return str(response_json["choices"][0]["message"]["content"]) 62 | else: 63 | raise ValueError("Invalid provider") -------------------------------------------------------------------------------- /R/src/R/genai.openai.txt.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.txt = function(genai.openai.object, 3 | prompt, 4 | verbose, 5 | config = list( 6 | frequency.penalty = NULL, 7 | logit.bias = NULL, 8 | logprobs = NULL, 9 | top.logprobs = NULL, 10 | max.tokens = NULL, 11 | presence.penalty = NULL, 12 | response.format = NULL, 13 | seed = NULL, 14 | stop = NULL, 15 | temperature = NULL, 16 | top.p = NULL, 17 | tools = NULL, 18 | tool.choice = NULL, 19 | user = NULL 20 | )) { 21 | # Check configurations 22 | genai.openai.config.check(config) 23 | 24 | # Get api url 25 | api.url = paste0( 26 | "https://api.openai.com/", 27 | genai.openai.object$version, 28 | "/chat/completions" 29 | ) 30 | if (genai.openai.object$proxy) { 31 | api.url = paste0( 32 | "https://api.genai.gd.edu.kg/openai/", 33 | genai.openai.object$version, 34 | "/chat/completions" 35 | ) 36 | } 37 | 38 | # Initialize the request body 39 | requestBody = list( 40 | model = genai.openai.object$model, 41 | messages = list( 42 | list(role = "system", 43 | content = "You are a helpful assistant."), 44 | list(role = "user", 45 | content = prompt) 46 | ) 47 | ) 48 | 49 | # Get the generation configuration 50 | if (length(config) > 0) { 51 | requestBody = genai.openai.generation.config(requestBody, config) 52 | } 53 | 54 | # Convert the request as JSON format 55 | requestBodyJSON = jsonlite::toJSON(requestBody, 56 | auto_unbox = TRUE, 57 | pretty = TRUE) 58 | 59 | # Send request and get response 60 | response = httr::POST( 61 | url = api.url, 62 | body = requestBodyJSON, 63 | httr::add_headers( 64 | "Content-Type" = "application/json", 65 | "Authorization" = paste("Bearer", genai.openai.object$api) 66 | ) 67 | ) 68 | if (!is.null(genai.openai.object$organization.id) && 69 | is.character(genai.openai.object$organization.id)) { 70 | response = httr::POST( 71 | url = api.url, 72 | body = requestBodyJSON, 73 | httr::add_headers( 74 | "Content-Type" = "application/json", 75 | "Authorization" = paste("Bearer", genai.openai.object$api), 76 | "OpenAI-Organization" = genai.openai.object$organization.id 77 | ) 78 | ) 79 | } 80 | responseJSON = httr::content(response, "parsed") 81 | 82 | # Check for response error 83 | if (!is.null(responseJSON$error)) { 84 | stop(responseJSON$error$message) 85 | } 86 | 87 | # Print detail if verbose is TRUE 88 | if (verbose) { 89 | genai.openai.formated.confguration(requestBody, prompt) 90 | cat("\n") 91 | } 92 | 93 | # Get the response text 94 | return (responseJSON$choices[[1]]$message$content) 95 | } 96 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.R: -------------------------------------------------------------------------------- 1 | #' Moonshot AI Object Creation 2 | #' 3 | #' This function establishes a connection to a Moonshot AI model by providing essential parameters. 4 | #' 5 | #' @param api A character string representing the API key required for accessing the model. 6 | #' @param model A character string representing the specific model. 7 | #' @param version A character string representing the version of the chosen model. 8 | #' @param proxy Optional. Default to \code{FALSE}. A boolean value indicating whether to use a 9 | #' proxy for accessing the API URL. If your local internet cannot access the API, set this 10 | #' parameter to \code{TRUE}. 11 | #' 12 | #' @return If successful, the function returns an moonshot object. If the API response 13 | #' indicates an error, the function halts execution and provides an error message. 14 | #' 15 | #' @details Providing accurate and valid information for each argument is crucial for successful text 16 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 17 | #' error message based on the API feedback. To view all supported generative AI models, use the 18 | #' function \code{\link{available.models}}. 19 | #' 20 | #' Please refer to \code{https://platform.moonshot.cn/console/api-keys} for the API key. 21 | #' 22 | #' The API proxy service is designed to address the needs of users who hold a valid API key but find 23 | #' themselves outside their home countries or regions due to reasons such as travel, work, or study 24 | #' in locations that may not be covered by certain Generative AI service providers. 25 | #' 26 | #' Please be aware that although GenAI and its affiliated organization - GitData - do not gather user 27 | #' information through this service, the server providers for GenAI API proxy service and the Generative 28 | #' AI service providers may engage in such data collection. Furthermore, the proxy service cannot 29 | #' guarantee a consistent connection speed. Users are strongly encouraged to utilize this service 30 | #' with caution and at their own discretion. 31 | #' 32 | #' @seealso 33 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 34 | #' 35 | #' \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} 36 | #' 37 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_moonshot.ipynb}{Live Demo in Colab} 38 | #' 39 | #' @examples 40 | #' \dontrun{ 41 | #' # Please change YOUR_MOONSHOT_API to your own API key of Moonshot AI 42 | #' Sys.setenv(MOONSHOT_API = "YOUR_MOONSHOT_API") 43 | #' 44 | #' all.models = available.models() %>% print() 45 | #' 46 | #' # Create an moonshot object 47 | #' moonshot = genai.moonshot(api = Sys.getenv("MOONSHOT_API"), 48 | #' model = all.models$moonshot$model[1], 49 | #' version = all.models$moonshot$version[1], 50 | #' proxy = FALSE) 51 | #' } 52 | #' 53 | #' @export 54 | genai.moonshot = function(api, 55 | model, 56 | version, 57 | proxy = FALSE) { 58 | return (genai.moonshot.class$new(api, 59 | model, 60 | version, 61 | proxy)) 62 | } 63 | -------------------------------------------------------------------------------- /R/src/R/genai.google.R: -------------------------------------------------------------------------------- 1 | #' Google Generative AI Object Creation 2 | #' 3 | #' This function establishes a connection to a Google generative AI model by providing essential 4 | #' parameters. 5 | #' 6 | #' @param api A character string representing the API key required for accessing the model. 7 | #' @param model A character string representing the specific model. 8 | #' @param version A character string representing the version of the chosen model. 9 | #' @param proxy Optional. Default to \code{FALSE}. A boolean value indicating whether to use a 10 | #' proxy for accessing the API URL. If your local internet cannot access the API, set this 11 | #' parameter to \code{TRUE}. 12 | #' 13 | #' @return If successful, the function returns a Google generative AI object. If the API response 14 | #' indicates an error, the function halts execution and provides an error message. 15 | #' 16 | #' @details Providing accurate and valid information for each argument is crucial for successful text 17 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 18 | #' error message based on the API feedback. To view all supported generative AI models, use the 19 | #' function \code{\link{available.models}}. 20 | #' 21 | #' Please refer to \code{https://ai.google.dev/tutorials/setup} for the API key. 22 | #' 23 | #' The API proxy service is designed to address the needs of users who hold a valid API key but find 24 | #' themselves outside their home countries or regions due to reasons such as travel, work, or study 25 | #' in locations that may not be covered by certain Generative AI service providers. 26 | #' 27 | #' Please be aware that although GenAI and its affiliated organization - GitData - do not gather user 28 | #' information through this service, the server providers for GenAI API proxy service and the Generative 29 | #' AI service providers may engage in such data collection. Furthermore, the proxy service cannot 30 | #' guarantee a consistent connection speed. Users are strongly encouraged to utilize this service 31 | #' with caution and at their own discretion. 32 | #' 33 | #' @seealso 34 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 35 | #' 36 | #' \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} 37 | #' 38 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_google.ipynb}{Live Demo in Colab} 39 | #' 40 | #' @examples 41 | #' \dontrun{ 42 | #' # Please change YOUR_GOOGLE_API to your own API key of Google Generative AI 43 | #' Sys.setenv(GOOGLE_API = "YOUR_GOOGLE_API") 44 | #' 45 | #' all.models = available.models() %>% print() 46 | #' 47 | #' # Create a Google Generative AI object 48 | #' google = genai.google(api = Sys.getenv("GOOGLE_API"), 49 | #' model = all.models$google$model[1], 50 | #' version = all.models$google$version[1], 51 | #' proxy = FALSE) 52 | #' } 53 | #' 54 | #' @export 55 | genai.google = function(api, 56 | model, 57 | version, 58 | proxy = FALSE) { 59 | return (genai.google.class$new(api, 60 | model, 61 | version, 62 | proxy)) 63 | } 64 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.chat.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.chat = function(genai.moonshot.object, 3 | prompt, 4 | verbose, 5 | config = list( 6 | max.tokens = NULL, 7 | temperature = NULL, 8 | top.p = NULL 9 | )) { 10 | # Check configurations 11 | genai.moonshot.config.check(config) 12 | 13 | # Get api url 14 | api.url = paste0( 15 | "https://api.moonshot.cn/", 16 | genai.moonshot.object$version, 17 | "/chat/completions" 18 | ) 19 | if (genai.moonshot.object$proxy) { 20 | api.url = paste0( 21 | "https://api.genai.gd.edu.kg/moonshot/", 22 | genai.moonshot.object$version, 23 | "/chat/completions" 24 | ) 25 | } 26 | 27 | # Initialize the request body 28 | requestNewContent = list(list(role = "user", 29 | content = prompt)) 30 | requestBody = as.list(genai.moonshot.object$chat.history) 31 | requestBody$messages = append(requestBody$messages, requestNewContent) 32 | 33 | # Get the generation configuration 34 | if (length(config) > 0) { 35 | requestBody = genai.moonshot.generation.config(requestBody, config) 36 | } 37 | 38 | # Convert the request as JSON format 39 | requestBodyJSON = jsonlite::toJSON(c(model = genai.moonshot.object$model, 40 | requestBody), 41 | auto_unbox = TRUE, 42 | pretty = TRUE) 43 | 44 | # Send request and get response 45 | response = httr::POST( 46 | url = api.url, 47 | body = requestBodyJSON, 48 | httr::add_headers( 49 | "Content-Type" = "application/json", 50 | "Authorization" = paste("Bearer", genai.moonshot.object$api) 51 | ) 52 | ) 53 | responseJSON = httr::content(response, "parsed") 54 | 55 | # Check for response error 56 | if (!is.null(responseJSON$error)) { 57 | stop(responseJSON$error$message) 58 | } 59 | 60 | # Save the most recent prompt to the chat history 61 | genai.moonshot.object$chat.history$messages = append(genai.moonshot.object$chat.history$messages, 62 | requestNewContent) 63 | 64 | # Save the most recent model response to the chat history 65 | respondContent = list(list( 66 | role = "assistant", 67 | content = responseJSON$choices[[1]]$message$content 68 | )) 69 | genai.moonshot.object$chat.history$messages = append(genai.moonshot.object$chat.history$messages, 70 | respondContent) 71 | 72 | # Print detail if verbose is TRUE 73 | if (verbose) { 74 | genai.moonshot.formated.confguration(requestBody, prompt) 75 | cat("=============================================================================\n") 76 | cat(" Chat history \n") 77 | cat("-----------------------------------------------------------------------------\n\n") 78 | genai.moonshot.chat.history.print(genai.moonshot.object, from = 1, to = NULL) 79 | cat("=============================================================================\n\n\n\n") 80 | } 81 | 82 | # Get the response text 83 | return (responseJSON$choices[[1]]$message$content) 84 | } 85 | -------------------------------------------------------------------------------- /R/src/man/genai.openai.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/genai.openai.R 3 | \name{genai.openai} 4 | \alias{genai.openai} 5 | \title{OpenAI Object Creation} 6 | \usage{ 7 | genai.openai(api, model, version, proxy = FALSE, organization.id = NULL) 8 | } 9 | \arguments{ 10 | \item{api}{A character string representing the API key required for accessing the model.} 11 | 12 | \item{model}{A character string representing the specific model.} 13 | 14 | \item{version}{A character string representing the version of the chosen model.} 15 | 16 | \item{proxy}{Optional. Default to \code{FALSE}. A boolean value indicating whether to use a 17 | proxy for accessing the API URL. If your local internet cannot access the API, set this 18 | parameter to \code{TRUE}.} 19 | 20 | \item{organization.id}{Optional. Default to \code{NULL}. A character string representing the 21 | organization ID.} 22 | } 23 | \value{ 24 | If successful, the function returns an OpenAI object. If the API response 25 | indicates an error, the function halts execution and provides an error message. 26 | } 27 | \description{ 28 | This function establishes a connection to an OpenAI model by providing essential parameters. 29 | } 30 | \details{ 31 | Providing accurate and valid information for each argument is crucial for successful text 32 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 33 | error message based on the API feedback. To view all supported generative AI models, use the 34 | function \code{\link{available.models}}. 35 | 36 | Please refer to \code{https://platform.openai.com/api-keys} for the API key. Moreover, please refer 37 | to \code{https://platform.openai.com/account/organization} for the optional organization ID. 38 | 39 | The API proxy service is designed to address the needs of users who hold a valid API key but find 40 | themselves outside their home countries or regions due to reasons such as travel, work, or study 41 | in locations that may not be covered by certain Generative AI service providers. 42 | 43 | Please be aware that although GenAI and its affiliated organization - GitData - do not gather user 44 | information through this service, the server providers for GenAI API proxy service and the Generative 45 | AI service providers may engage in such data collection. Furthermore, the proxy service cannot 46 | guarantee a consistent connection speed. Users are strongly encouraged to utilize this service 47 | with caution and at their own discretion. 48 | } 49 | \examples{ 50 | \dontrun{ 51 | # Please change YOUR_OPENAI_API to your own API key of OpenAI 52 | Sys.setenv(OPENAI_API = "YOUR_OPENAI_API") 53 | 54 | # Oprional. Please change YOUR_OPENAI_ORG to your own organization ID for OpenAI 55 | Sys.setenv(OPENAI_ORG = "YOUR_OPENAI_ORG") 56 | 57 | all.models = available.models() \%>\% print() 58 | 59 | # Create an OpenAI object 60 | openai = genai.openai(api = Sys.getenv("OPENAI_API"), 61 | model = all.models$openai$model[1], 62 | version = all.models$openai$version[1], 63 | proxy = FALSE, 64 | organization.id = Sys.getenv("OPENAI_ORG")) 65 | } 66 | 67 | } 68 | \seealso{ 69 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 70 | 71 | \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} 72 | 73 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_openai.ipynb}{Live Demo in Colab} 74 | } 75 | -------------------------------------------------------------------------------- /R/src/man/img.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/img.R 3 | \name{img} 4 | \alias{img} 5 | \title{Image Generation with Text as the Input} 6 | \usage{ 7 | img(genai.object, prompt, verbose = FALSE, config = list()) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | 12 | \item{prompt}{A character string representing the query for image generation.} 13 | 14 | \item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print 15 | out the details of the image request.} 16 | 17 | \item{config}{Optional. Default to \code{list()}. A list of configuration parameters for image generation.} 18 | } 19 | \value{ 20 | If successful, a image in \code{ggplot} format will be returned. If the API response indicates 21 | an error, the function halts execution and provides an error message. 22 | } 23 | \description{ 24 | This function establishes a connection to a generative AI model through a generative AI object. 25 | It generates an image response based on the provided prompt. 26 | } 27 | \details{ 28 | Providing accurate and valid information for each argument is crucial for successful image 29 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 30 | error message based on the API feedback. To view all supported generative AI models, use the 31 | function \code{\link{available.models}}. 32 | 33 | This function is only available when using OpenAI's models. 34 | 35 | For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to 36 | \code{https://platform.openai.com/docs/api-reference/images/create}. 37 | 38 | \itemize{ 39 | \item \code{quality} 40 | 41 | Optional. A character string. The quality of the image that will be generated. \code{hd} creates 42 | images with finer details and greater consistency across the image. 43 | 44 | \item \code{size} 45 | 46 | Optional. A character string. The size of the generated images. Must be one of \code{256x256}, 47 | \code{512x512}, or \code{1024x1024} for \code{dall-e-2}. Must be one of \code{1024x1024}, \code{1792x1024}, or 48 | \code{1024x1792} for \code{dall-e-3} models. 49 | 50 | \item \code{style} 51 | 52 | Optional. The style of the generated images. Must be one of \code{vivid} or \code{natural}. Vivid causes 53 | the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce 54 | more natural, less hyper-real looking images. 55 | 56 | \item \code{user} 57 | 58 | Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor 59 | and detect abuse. 60 | } 61 | } 62 | \examples{ 63 | \dontrun{ 64 | # Assuming there is a GenAI object named 'genai.model' supporting this 65 | # function, please refer to the "Live Demo in Colab" above for real 66 | # examples. The following examples are just some basic guidelines. 67 | 68 | # Method 1 (recommended): use the pipe operator "\%>\%" 69 | generated.image = genai.model \%>\% 70 | img(prompt = "A very cute panda eating banboo.") 71 | generated.image 72 | 73 | # Method 2: use the reference operator "$" 74 | generated.image = genai.model$img(prompt = "A very cute sea otter on a rock.") 75 | generated.image 76 | 77 | # Method 3: use the function img() directly 78 | generated.image = img(genai.object = genai.model, 79 | prompt = "A very cute bear.") 80 | generated.image 81 | } 82 | 83 | } 84 | \seealso{ 85 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 86 | 87 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/img.ipynb}{Live Demo in Colab} 88 | } 89 | -------------------------------------------------------------------------------- /Python/src/GeAI/txt/image.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from GeAI._utils._moderation_openai import moderation_openai 4 | from GeAI._utils._image_to_data_uri import image_to_data_uri 5 | 6 | def image(model_parameter, temperature, prompt, image_path): 7 | if prompt == "" or prompt is None or not isinstance(prompt, str): 8 | raise ValueError("Prompt is not in the correct format.") 9 | 10 | if image_path == "" or image_path is None or not isinstance(image_path, str): 11 | raise ValueError("image_path is not in the correct format.") 12 | 13 | if model_parameter["provider"] == "google": 14 | api_url = ( 15 | f"https://api.genai.gd.edu.kg/google/{model_parameter['version']}/models/" 16 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 17 | if model_parameter["proxy"] 18 | else f"https://generativelanguage.googleapis.com/{model_parameter['version']}/models/" 19 | f"{model_parameter['model']}:generateContent?key={model_parameter['api']}" 20 | ) 21 | 22 | extension, img_info = image_to_data_uri(image_path) 23 | if extension == "jpg": 24 | extension = "jpeg" 25 | 26 | request_body = { 27 | "contents": [ 28 | {"parts": [{"text": prompt}, {"inline_data": {"mime_type": f"image/{extension}", "data": img_info}}]} 29 | ], 30 | "generationConfig": {"temperature": temperature} 31 | } 32 | 33 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 34 | headers = {"Content-Type": "application/json"} 35 | response = requests.post(api_url, data=request_body_json, headers=headers) 36 | 37 | response_json = response.json() 38 | if "error" in response_json: 39 | raise ValueError(response_json["error"]["message"]) 40 | 41 | if "blockReason" in response_json.get("promptFeedback", {}): 42 | raise ValueError("The prompt may contain harmful content.") 43 | 44 | return str(response_json["candidates"][0]["content"]["parts"][0]["text"]) 45 | 46 | elif model_parameter["provider"] == "openai": 47 | moderation_openai(model_parameter, prompt) 48 | api_url = ( 49 | f"https://api.genai.gd.edu.kg/openai/{model_parameter['version']}/chat/completions" 50 | if model_parameter["proxy"] 51 | else f"https://api.openai.com/{model_parameter['version']}/chat/completions" 52 | ) 53 | 54 | extension, img_info = image_to_data_uri(image_path) 55 | request_body = { 56 | "model": model_parameter["model"], 57 | "messages": [ 58 | {"role": "user", "content": [ 59 | {"type": "text", "text": prompt}, 60 | {"type": "image_url", "image_url": {"url": f"data:image/{extension};base64,{img_info}"}}] 61 | } 62 | ], 63 | "temperature": temperature, 64 | "max_tokens": 4096 65 | } 66 | 67 | request_body_json = json.dumps(request_body, separators=(",", ":"), ensure_ascii=False) 68 | headers = { 69 | "Content-Type": "application/json", 70 | "Authorization": f"Bearer {model_parameter['api']}" 71 | } 72 | 73 | response = requests.post(api_url, data=request_body_json, headers=headers) 74 | 75 | response_json = response.json() 76 | if "error" in response_json: 77 | raise ValueError(response_json["error"]["message"]) 78 | 79 | return str(response_json["choices"][0]["message"]["content"]) 80 | else: 81 | raise ValueError("Invalid provider") -------------------------------------------------------------------------------- /R/src/R/genai.openai.class.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.class = R6Class( 3 | classname = "genai.openai", 4 | public = list( 5 | # Initialize method 6 | initialize = function(api, model, version, proxy = FALSE, organization.id = NULL) { 7 | genai.openai.check(api, model, version, proxy, organization.id) 8 | private$api = api 9 | private$model = model 10 | private$version = version 11 | private$proxy = proxy 12 | if (!is.null(organization.id) && is.character(organization.id)) { 13 | private$organization.id = organization.id 14 | } 15 | }, 16 | # Chat generation 17 | chat = function(prompt, 18 | verbose = FALSE, 19 | config = list()) { 20 | genai.openai.chat(private, 21 | prompt, 22 | verbose, 23 | config) 24 | }, 25 | # Chat edit 26 | chat.edit = function(prompt, 27 | message.to.edit, 28 | verbose = FALSE, 29 | config = list()) { 30 | genai.openai.chat.edit(private, 31 | prompt, 32 | message.to.edit, 33 | verbose, 34 | config) 35 | }, 36 | # Convert chat history 37 | chat.history.convert = function(from.genai.object, to.genai.object) { 38 | genai.openai.chat.history.convert(private, to.genai.object) 39 | }, 40 | # Export chat history 41 | chat.history.export = function() { 42 | genai.openai.chat.history.export(private) 43 | }, 44 | # Import chat history 45 | chat.history.import = function(new.chat.history) { 46 | genai.openai.chat.history.import(private, new.chat.history) 47 | }, 48 | # Print chat history 49 | chat.history.print = function(from = 1, to = NULL) { 50 | genai.openai.chat.history.print(private, from, to) 51 | }, 52 | # Reset chat history 53 | chat.history.reset = function() { 54 | genai.openai.chat.history.reset(private) 55 | }, 56 | # Save chat history 57 | chat.history.save = function(file.name) { 58 | genai.openai.chat.history.save(private, file.name) 59 | }, 60 | # Image generation 61 | img = function(prompt, 62 | verbose = FALSE, 63 | config = list()) { 64 | genai.openai.img(private, 65 | prompt, 66 | verbose, 67 | config) 68 | }, 69 | # Text generation 70 | txt = function(prompt, 71 | verbose = FALSE, 72 | config = list()) { 73 | genai.openai.txt(private, 74 | prompt, 75 | verbose, 76 | config) 77 | }, 78 | # Text generation with image as input 79 | txt.image = function(prompt, 80 | image.path, 81 | verbose = FALSE, 82 | config = list()) { 83 | genai.openai.txt.image(private, 84 | prompt, 85 | image.path, 86 | verbose, 87 | config) 88 | } 89 | ), 90 | private = list( 91 | api = NULL, 92 | organization.id = NULL, 93 | model = NULL, 94 | version = NULL, 95 | proxy = FALSE, 96 | chat.history = listenv::listenv( 97 | messages = list( 98 | list( 99 | role = "system", 100 | content = "You are a helpful assistant." 101 | ) 102 | ) 103 | ) 104 | ) 105 | ) 106 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.R: -------------------------------------------------------------------------------- 1 | #' OpenAI Object Creation 2 | #' 3 | #' This function establishes a connection to an OpenAI model by providing essential parameters. 4 | #' 5 | #' @param api A character string representing the API key required for accessing the model. 6 | #' @param model A character string representing the specific model. 7 | #' @param version A character string representing the version of the chosen model. 8 | #' @param proxy Optional. Default to \code{FALSE}. A boolean value indicating whether to use a 9 | #' proxy for accessing the API URL. If your local internet cannot access the API, set this 10 | #' parameter to \code{TRUE}. 11 | #' @param organization.id Optional. Default to \code{NULL}. A character string representing the 12 | #' organization ID. 13 | #' 14 | #' @return If successful, the function returns an OpenAI object. If the API response 15 | #' indicates an error, the function halts execution and provides an error message. 16 | #' 17 | #' @details Providing accurate and valid information for each argument is crucial for successful text 18 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 19 | #' error message based on the API feedback. To view all supported generative AI models, use the 20 | #' function \code{\link{available.models}}. 21 | #' 22 | #' Please refer to \code{https://platform.openai.com/api-keys} for the API key. Moreover, please refer 23 | #' to \code{https://platform.openai.com/account/organization} for the optional organization ID. 24 | #' 25 | #' The API proxy service is designed to address the needs of users who hold a valid API key but find 26 | #' themselves outside their home countries or regions due to reasons such as travel, work, or study 27 | #' in locations that may not be covered by certain Generative AI service providers. 28 | #' 29 | #' Please be aware that although GenAI and its affiliated organization - GitData - do not gather user 30 | #' information through this service, the server providers for GenAI API proxy service and the Generative 31 | #' AI service providers may engage in such data collection. Furthermore, the proxy service cannot 32 | #' guarantee a consistent connection speed. Users are strongly encouraged to utilize this service 33 | #' with caution and at their own discretion. 34 | #' 35 | #' @seealso 36 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 37 | #' 38 | #' \href{https://genai.gd.edu.kg/api/}{GenAI - Generative Artificial Intelligence API Proxy Service} 39 | #' 40 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/genai_openai.ipynb}{Live Demo in Colab} 41 | #' 42 | #' @examples 43 | #' \dontrun{ 44 | #' # Please change YOUR_OPENAI_API to your own API key of OpenAI 45 | #' Sys.setenv(OPENAI_API = "YOUR_OPENAI_API") 46 | #' 47 | #' # Oprional. Please change YOUR_OPENAI_ORG to your own organization ID for OpenAI 48 | #' Sys.setenv(OPENAI_ORG = "YOUR_OPENAI_ORG") 49 | #' 50 | #' all.models = available.models() %>% print() 51 | #' 52 | #' # Create an OpenAI object 53 | #' openai = genai.openai(api = Sys.getenv("OPENAI_API"), 54 | #' model = all.models$openai$model[1], 55 | #' version = all.models$openai$version[1], 56 | #' proxy = FALSE, 57 | #' organization.id = Sys.getenv("OPENAI_ORG")) 58 | #' } 59 | #' 60 | #' @export 61 | genai.openai = function(api, 62 | model, 63 | version, 64 | proxy = FALSE, 65 | organization.id = NULL) { 66 | return (genai.openai.class$new(api, 67 | model, 68 | version, 69 | proxy, 70 | organization.id)) 71 | } 72 | -------------------------------------------------------------------------------- /R/src/R/img.R: -------------------------------------------------------------------------------- 1 | #' Image Generation with Text as the Input 2 | #' 3 | #' This function establishes a connection to a generative AI model through a generative AI object. 4 | #' It generates an image response based on the provided prompt. 5 | #' 6 | #' @param genai.object A generative AI object containing necessary and correct information. 7 | #' @param prompt A character string representing the query for image generation. 8 | #' @param verbose Optional. Default to \code{FALSE}. A boolean value determining whether or not to print 9 | #' out the details of the image request. 10 | #' @param config Optional. Default to \code{list()}. A list of configuration parameters for image generation. 11 | #' 12 | #' @return If successful, a image in \code{ggplot} format will be returned. If the API response indicates 13 | #' an error, the function halts execution and provides an error message. 14 | #' 15 | #' @details Providing accurate and valid information for each argument is crucial for successful image 16 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 17 | #' error message based on the API feedback. To view all supported generative AI models, use the 18 | #' function \code{\link{available.models}}. 19 | #' 20 | #' This function is only available when using OpenAI's models. 21 | #' 22 | #' For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to 23 | #' \code{https://platform.openai.com/docs/api-reference/images/create}. 24 | #' 25 | #' \itemize{ 26 | #' \item \code{quality} 27 | #' 28 | #' Optional. A character string. The quality of the image that will be generated. \code{hd} creates 29 | #' images with finer details and greater consistency across the image. 30 | #' 31 | #' \item \code{size} 32 | #' 33 | #' Optional. A character string. The size of the generated images. Must be one of \code{256x256}, 34 | #' \code{512x512}, or \code{1024x1024} for \code{dall-e-2}. Must be one of \code{1024x1024}, \code{1792x1024}, or 35 | #' \code{1024x1792} for \code{dall-e-3} models. 36 | #' 37 | #' \item \code{style} 38 | #' 39 | #' Optional. The style of the generated images. Must be one of \code{vivid} or \code{natural}. Vivid causes 40 | #' the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce 41 | #' more natural, less hyper-real looking images. 42 | #' 43 | #' \item \code{user} 44 | #' 45 | #' Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor 46 | #' and detect abuse. 47 | #' } 48 | #' 49 | #' @seealso 50 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 51 | #' 52 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/img.ipynb}{Live Demo in Colab} 53 | #' 54 | #' @examples 55 | #' \dontrun{ 56 | #' # Assuming there is a GenAI object named 'genai.model' supporting this 57 | #' # function, please refer to the "Live Demo in Colab" above for real 58 | #' # examples. The following examples are just some basic guidelines. 59 | #' 60 | #' # Method 1 (recommended): use the pipe operator "%>%" 61 | #' generated.image = genai.model %>% 62 | #' img(prompt = "A very cute panda eating banboo.") 63 | #' generated.image 64 | #' 65 | #' # Method 2: use the reference operator "$" 66 | #' generated.image = genai.model$img(prompt = "A very cute sea otter on a rock.") 67 | #' generated.image 68 | #' 69 | #' # Method 3: use the function img() directly 70 | #' generated.image = img(genai.object = genai.model, 71 | #' prompt = "A very cute bear.") 72 | #' generated.image 73 | #' } 74 | #' 75 | #' @export 76 | img = function(genai.object, 77 | prompt, 78 | verbose = FALSE, 79 | config = list()) { 80 | genai.object$img(prompt, 81 | verbose, 82 | config) 83 | } 84 | -------------------------------------------------------------------------------- /R/src/R/genai.google.txt.image.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.txt.image = function(genai.google.object, 3 | prompt, 4 | image.path, 5 | verbose, 6 | config = list( 7 | harm.category.dangerous.content = NULL, 8 | harm.category.harassment = NULL, 9 | harm.category.hate.speech = NULL, 10 | harm.category.sexually.explicit = NULL, 11 | stop.sequences = NULL, 12 | max.output.tokens = NULL, 13 | temperature = NULL, 14 | top.p = NULL, 15 | top.k = NULL 16 | )) { 17 | # Check configurations 18 | genai.google.config.check(config) 19 | 20 | # Get api url 21 | api.url = paste0( 22 | "https://generativelanguage.googleapis.com/", 23 | genai.google.object$version, 24 | "/models/", 25 | genai.google.object$model, 26 | ":generateContent?key=", 27 | genai.google.object$api 28 | ) 29 | if (genai.google.object$proxy) { 30 | api.url = paste0( 31 | "https://api.genai.gd.edu.kg/google/", 32 | genai.google.object$version, 33 | "/models/", 34 | genai.google.object$model, 35 | ":generateContent?key=", 36 | genai.google.object$api 37 | ) 38 | } 39 | 40 | # Convert image to data uri 41 | img.info = image.to.data.uri(image.path) 42 | if (img.info[1] == "jpg") { 43 | img.info[1] = "jpeg" 44 | } 45 | 46 | # Initialize the request body 47 | requestBody = list(contents = list(parts = list( 48 | list(text = prompt), 49 | list(inline_data = list( 50 | mime_type = paste0("image/", img.info[1]), 51 | data = img.info[2] 52 | )) 53 | ))) 54 | 55 | # Get the safety settings 56 | safety.setting = genai.google.safety.setting(config) 57 | if (length(safety.setting) > 0) { 58 | requestBody$safetySettings = safety.setting 59 | } 60 | 61 | # Get the generation configuration 62 | generation.config = genai.google.generation.config(config) 63 | if (length(generation.config) > 0) { 64 | requestBody$generationConfig = generation.config 65 | } 66 | 67 | # Convert the request as JSON format 68 | requestBodyJSON = jsonlite::toJSON(requestBody, 69 | auto_unbox = TRUE, 70 | pretty = TRUE) 71 | 72 | # Send request and get response 73 | response = httr::POST( 74 | url = api.url, 75 | body = requestBodyJSON, 76 | httr::add_headers("Content-Type" = "application/json") 77 | ) 78 | responseJSON = httr::content(response, "parsed") 79 | 80 | # Check for harmful prompt 81 | if (!is.null(responseJSON$promptFeedback$blockReason)) { 82 | stop("Invalid prompt. The prompt may contain harmful content.") 83 | } 84 | 85 | # Check for response error 86 | if (!is.null(responseJSON$error)) { 87 | stop(responseJSON$error$message) 88 | } 89 | 90 | # Print detail if verbose is TRUE 91 | if (verbose) { 92 | genai.google.formated.confguration(requestBody, prompt) 93 | cat("=============================================================================\n") 94 | cat(" Image Path\n") 95 | cat("-----------------------------------------------------------------------------\n") 96 | cat(paste(strwrap(image.path, width = 76, exdent = 0), collapse = "\n")) 97 | cat("\n") 98 | cat("=============================================================================\n\n\n\n") 99 | cat("\n") 100 | } 101 | 102 | # Get the response text 103 | return (responseJSON$candidates[[1]]$content$parts[[1]]$text) 104 | } 105 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.utils.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.check = function(api, model, version, proxy) { 3 | json.data = jsonlite::fromJSON("https://genai.gd.edu.kg/model.json") 4 | if (is.na(match(model, json.data$moonshot$model))) { 5 | stop( 6 | "Invalid value for model. Refer to 'available.models()' to view the supported models." 7 | ) 8 | } 9 | if (is.na(match(version, json.data$moonshot$version))) { 10 | stop( 11 | "Invalid value for version. Refer to 'available.models()' to view the supported versions." 12 | ) 13 | } 14 | if (!proxy %in% c(TRUE, FALSE)) { 15 | stop("Invalid value for proxy. It must be either TRUE or FALSE.") 16 | } 17 | 18 | # Check connection 19 | api.url = paste0( 20 | "https://api.moonshot.cn/", 21 | version, 22 | "/models" 23 | ) 24 | if (proxy) { 25 | api.url = paste0( 26 | "https://api.genai.gd.edu.kg/moonshot/", 27 | version, 28 | "/models") 29 | } 30 | response = httr::GET(url = api.url, 31 | httr::add_headers( 32 | "Content-Type" = "application/json", 33 | "Authorization" = paste("Bearer", api) 34 | ) 35 | ) 36 | responseJSON = httr::content(response, "parsed") 37 | if (!is.null(responseJSON$error)) { 38 | stop(responseJSON$error$message) 39 | } 40 | if (response$status_code != 200) { 41 | stop( 42 | "Invalid parameter(s) detected. Please check the values for api, model, version, and proxy." 43 | ) 44 | } 45 | } 46 | 47 | #' @noRd 48 | genai.moonshot.config.check = function(config) { 49 | if (!is.list(config)) { 50 | stop("Invalid configuration. It must be a list.") 51 | } 52 | config.names = c( 53 | "max.tokens", 54 | "temperature", 55 | "top.p" 56 | ) 57 | wrong.config = setdiff(names(config), config.names) 58 | if (length(wrong.config) > 0) { 59 | stop("Invalid configuration(s) detected: ", 60 | paste0(wrong.config, collapse = ", ")) 61 | } 62 | if (length(unique(names(config))) != length(names(config))) { 63 | stop("Invalid configurations. Duplicate parameters detected.") 64 | } 65 | } 66 | 67 | #' @noRd 68 | genai.moonshot.generation.config = function(requestBody, config) { 69 | config.names = c( 70 | max.tokens = "max_tokens", 71 | temperature = "temperature", 72 | top.p = "top_p" 73 | ) 74 | for (param_name in names(config)) { 75 | if (!is.null(config[[param_name]])) { 76 | requestBody[[config.names[param_name]]] = config[[param_name]] 77 | } 78 | } 79 | return(requestBody) 80 | } 81 | 82 | #' @noRd 83 | genai.moonshot.formated.confguration = function(request.body, prompt) { 84 | config.names = c( 85 | max.tokens = "max_tokens", 86 | temperature = "temperature", 87 | top.p = "top_p" 88 | ) 89 | intersect.param = intersect(names(request.body), config.names) 90 | if (length(intersect.param) > 0) { 91 | cat("=============================================================================\n") 92 | cat(" Generation Configuration\n") 93 | cat("-----------------------------------------------------------------------------\n") 94 | for (param in intersect.param) { 95 | if (is.list(request.body[[param]])) { 96 | cat("stop:", 97 | paste0(request.body[[param]], 98 | collapse = ", "), 99 | "\n") 100 | } 101 | else { 102 | cat(paste0(param, ":"), 103 | request.body[[param]], 104 | "\n") 105 | } 106 | } 107 | cat("=============================================================================\n\n\n\n") 108 | } 109 | cat("=============================================================================\n") 110 | cat(" Prompt\n") 111 | cat("-----------------------------------------------------------------------------\n") 112 | cat(paste(strwrap(prompt, width = 76, exdent = 0), collapse = "\n")) 113 | cat("\n") 114 | cat("=============================================================================\n\n\n\n") 115 | } 116 | -------------------------------------------------------------------------------- /R/src/R/genai.google.chat.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.chat = function(genai.google.object, 3 | prompt, 4 | verbose, 5 | config = list( 6 | harm.category.dangerous.content = NULL, 7 | harm.category.harassment = NULL, 8 | harm.category.hate.speech = NULL, 9 | harm.category.sexually.explicit = NULL, 10 | stop.sequences = NULL, 11 | max.output.tokens = NULL, 12 | temperature = NULL, 13 | top.p = NULL, 14 | top.k = NULL 15 | )) { 16 | # Check configurations 17 | genai.google.config.check(config) 18 | 19 | # Get api url 20 | api.url = paste0( 21 | "https://generativelanguage.googleapis.com/", 22 | genai.google.object$version, 23 | "/models/", 24 | genai.google.object$model, 25 | ":generateContent?key=", 26 | genai.google.object$api 27 | ) 28 | if (genai.google.object$proxy) { 29 | api.url = paste0( 30 | "https://api.genai.gd.edu.kg/google/", 31 | genai.google.object$version, 32 | "/models/", 33 | genai.google.object$model, 34 | ":generateContent?key=", 35 | genai.google.object$api 36 | ) 37 | } 38 | 39 | # Initialize the request body 40 | requestNewContent = list(list(role = "user", 41 | parts = list(text = prompt))) 42 | requestBody = as.list(genai.google.object$chat.history) 43 | requestBody$contents = append(requestBody$contents, requestNewContent) 44 | 45 | # Get the safety settings 46 | safety.setting = genai.google.safety.setting(config) 47 | if (length(safety.setting) > 0) { 48 | requestBody$safetySettings = safety.setting 49 | } 50 | 51 | # Get the generation configuration 52 | generation.config = genai.google.generation.config(config) 53 | if (length(generation.config) > 0) { 54 | requestBody$generationConfig = generation.config 55 | } 56 | 57 | # Convert the request as JSON format 58 | requestBodyJSON = jsonlite::toJSON(requestBody, 59 | auto_unbox = TRUE, 60 | pretty = TRUE) 61 | 62 | # Send request and get response 63 | response = httr::POST( 64 | url = api.url, 65 | body = requestBodyJSON, 66 | httr::add_headers("Content-Type" = "application/json") 67 | ) 68 | responseJSON = httr::content(response, "parsed") 69 | 70 | # Check for harmful prompt 71 | if (!is.null(responseJSON$promptFeedback$blockReason)) { 72 | stop("Invalid prompt. The prompt may contain harmful content.") 73 | } 74 | 75 | # Check for response error 76 | if (!is.null(responseJSON$error)) { 77 | stop(responseJSON$error$message) 78 | } 79 | 80 | # Save the most recent prompt to the chat history 81 | genai.google.object$chat.history$contents = append(genai.google.object$chat.history$contents, 82 | requestNewContent) 83 | 84 | # Save the most recent model response to the chat history 85 | respondContent = list(list( 86 | role = "model", 87 | parts = list(text = responseJSON$candidates[[1]]$content$parts[[1]]$text) 88 | )) 89 | genai.google.object$chat.history$contents = append(genai.google.object$chat.history$contents, 90 | respondContent) 91 | 92 | # Print detail if verbose is TRUE 93 | if (verbose) { 94 | genai.google.formated.confguration(requestBody, prompt) 95 | cat("=============================================================================\n") 96 | cat(" Chat history \n") 97 | cat("-----------------------------------------------------------------------------\n\n") 98 | genai.google.chat.history.print(genai.google.object, from = 1, to = NULL) 99 | cat("=============================================================================\n\n\n\n") 100 | } 101 | 102 | # Get the response text 103 | return (responseJSON$candidates[[1]]$content$parts[[1]]$text) 104 | } 105 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.chat.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.chat = function(genai.openai.object, 3 | prompt, 4 | verbose, 5 | config = list( 6 | frequency.penalty = NULL, 7 | logit.bias = NULL, 8 | logprobs = NULL, 9 | top.logprobs = NULL, 10 | max.tokens = NULL, 11 | presence.penalty = NULL, 12 | response.format = NULL, 13 | seed = NULL, 14 | stop = NULL, 15 | temperature = NULL, 16 | top.p = NULL, 17 | tools = NULL, 18 | tool.choice = NULL, 19 | user = NULL 20 | )) { 21 | # Check configurations 22 | genai.openai.config.check(config) 23 | 24 | # Get api url 25 | api.url = paste0( 26 | "https://api.openai.com/", 27 | genai.openai.object$version, 28 | "/chat/completions" 29 | ) 30 | if (genai.openai.object$proxy) { 31 | api.url = paste0( 32 | "https://api.genai.gd.edu.kg/openai/", 33 | genai.openai.object$version, 34 | "/chat/completions" 35 | ) 36 | } 37 | 38 | # Initialize the request body 39 | requestNewContent = list(list(role = "user", 40 | content = prompt)) 41 | requestBody = as.list(genai.openai.object$chat.history) 42 | requestBody$messages = append(requestBody$messages, requestNewContent) 43 | 44 | # Get the generation configuration 45 | if (length(config) > 0) { 46 | requestBody = genai.openai.generation.config(requestBody, config) 47 | } 48 | 49 | # Convert the request as JSON format 50 | requestBodyJSON = jsonlite::toJSON(c(model = genai.openai.object$model, 51 | requestBody), 52 | auto_unbox = TRUE, 53 | pretty = TRUE) 54 | 55 | # Send request and get response 56 | response = httr::POST( 57 | url = api.url, 58 | body = requestBodyJSON, 59 | httr::add_headers( 60 | "Content-Type" = "application/json", 61 | "Authorization" = paste("Bearer", genai.openai.object$api) 62 | ) 63 | ) 64 | if (!is.null(genai.openai.object$organization.id) && 65 | is.character(genai.openai.object$organization.id)) { 66 | response = httr::POST( 67 | url = api.url, 68 | body = requestBodyJSON, 69 | httr::add_headers( 70 | "Content-Type" = "application/json", 71 | "Authorization" = paste("Bearer", genai.openai.object$api), 72 | "OpenAI-Organization" = genai.openai.object$organization.id 73 | ) 74 | ) 75 | } 76 | responseJSON = httr::content(response, "parsed") 77 | 78 | # Check for response error 79 | if (!is.null(responseJSON$error)) { 80 | stop(responseJSON$error$message) 81 | } 82 | 83 | # Save the most recent prompt to the chat history 84 | genai.openai.object$chat.history$messages = append(genai.openai.object$chat.history$messages, 85 | requestNewContent) 86 | 87 | # Save the most recent model response to the chat history 88 | respondContent = list(list( 89 | role = "assistant", 90 | content = responseJSON$choices[[1]]$message$content 91 | )) 92 | genai.openai.object$chat.history$messages = append(genai.openai.object$chat.history$messages, 93 | respondContent) 94 | 95 | # Print detail if verbose is TRUE 96 | if (verbose) { 97 | genai.openai.formated.confguration(requestBody, prompt) 98 | cat("=============================================================================\n") 99 | cat(" Chat history \n") 100 | cat("-----------------------------------------------------------------------------\n\n") 101 | genai.openai.chat.history.print(genai.openai.object, from = 1, to = NULL) 102 | cat("=============================================================================\n\n\n\n") 103 | } 104 | 105 | # Get the response text 106 | return (responseJSON$choices[[1]]$message$content) 107 | } 108 | -------------------------------------------------------------------------------- /R/src/R/genai.moonshot.chat.edit.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.moonshot.chat.edit = function(genai.moonshot.object, 3 | prompt, 4 | message.to.edit, 5 | verbose, 6 | config = list( 7 | max.tokens = NULL, 8 | temperature = NULL, 9 | top.p = NULL 10 | )) { 11 | # Check if there are messages in the chat history 12 | if (length(genai.moonshot.object$chat.history$messages) == 0) { 13 | stop("Invalid chat.history. The chat history is empty.") 14 | } 15 | 16 | # Check message.to.edit with chat.history length 17 | if (message.to.edit > length(genai.moonshot.object$chat.history$messages) || 18 | message.to.edit < 1) { 19 | stop( 20 | "Invalid value for message.to.edit. You can only edit existing messages. Please use 'chat.history.print()' to review the formatted chat history." 21 | ) 22 | } 23 | 24 | # Check message.to.edit (must be a even number) 25 | if (message.to.edit %% 2 == 1) { 26 | stop( 27 | "Invalid value for message.to.edit. You can only edit messages sent by a user role. Please use 'chat.history.print()' to review the formatted chat history." 28 | ) 29 | } 30 | 31 | # Check configurations 32 | genai.moonshot.config.check(config) 33 | 34 | # Get api url 35 | api.url = paste0( 36 | "https://api.moonshot.cn/", 37 | genai.moonshot.object$version, 38 | "/chat/completions" 39 | ) 40 | if (genai.moonshot.object$proxy) { 41 | api.url = paste0( 42 | "https://api.genai.gd.edu.kg/moonshot/", 43 | genai.moonshot.object$version, 44 | "/chat/completions" 45 | ) 46 | } 47 | 48 | # Initialize the request body 49 | requestNewContent = list(list(role = "user", 50 | content = prompt)) 51 | requestBody = as.list(genai.moonshot.object$chat.history) 52 | requestBody$messages = append(requestBody$messages[1:message.to.edit - 1], 53 | requestNewContent) 54 | 55 | # Get the generation configuration 56 | if (length(config) > 0) { 57 | requestBody = genai.moonshot.generation.config(requestBody, config) 58 | } 59 | 60 | # Convert the request as JSON format 61 | requestBodyJSON = jsonlite::toJSON(c(model = genai.moonshot.object$model, 62 | requestBody), 63 | auto_unbox = TRUE, 64 | pretty = TRUE) 65 | 66 | # Send request and get response 67 | response = httr::POST( 68 | url = api.url, 69 | body = requestBodyJSON, 70 | httr::add_headers( 71 | "Content-Type" = "application/json", 72 | "Authorization" = paste("Bearer", genai.moonshot.object$api) 73 | ) 74 | ) 75 | responseJSON = httr::content(response, "parsed") 76 | 77 | # Check for response error 78 | if (!is.null(responseJSON$error)) { 79 | stop(responseJSON$error$message) 80 | } 81 | 82 | # Save the most recent prompt to the chat history 83 | genai.moonshot.object$chat.history$messages = append(genai.moonshot.object$chat.history$messages[1:message.to.edit - 1], 84 | requestNewContent) 85 | 86 | # Save the most recent model response to the chat history 87 | respondContent = list(list( 88 | role = "assistant", 89 | content = responseJSON$choices[[1]]$message$content 90 | )) 91 | genai.moonshot.object$chat.history$messages = append(genai.moonshot.object$chat.history$messages, 92 | respondContent) 93 | 94 | # Print detail if verbose is TRUE 95 | if (verbose) { 96 | genai.moonshot.formated.confguration(requestBody, prompt) 97 | cat("=============================================================================\n") 98 | cat(" Chat history \n") 99 | cat("-----------------------------------------------------------------------------\n\n") 100 | genai.moonshot.chat.history.print(genai.moonshot.object, from = 1, to = NULL) 101 | cat("=============================================================================\n\n\n\n") 102 | } 103 | 104 | # Get the response text 105 | return (responseJSON$choices[[1]]$message$content) 106 | } 107 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.txt.image.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.txt.image = function(genai.openai.object, 3 | prompt, 4 | image.path, 5 | verbose, 6 | config = list( 7 | frequency.penalty = NULL, 8 | logit.bias = NULL, 9 | logprobs = NULL, 10 | top.logprobs = NULL, 11 | max.tokens = NULL, 12 | presence.penalty = NULL, 13 | response.format = NULL, 14 | seed = NULL, 15 | stop = NULL, 16 | temperature = NULL, 17 | top.p = NULL, 18 | tools = NULL, 19 | tool.choice = NULL, 20 | user = NULL 21 | )) { 22 | # Check configurations 23 | genai.openai.config.check(config) 24 | 25 | # Get api url 26 | api.url = paste0( 27 | "https://api.openai.com/", 28 | genai.openai.object$version, 29 | "/chat/completions" 30 | ) 31 | if (genai.openai.object$proxy) { 32 | api.url = paste0( 33 | "https://api.genai.gd.edu.kg/openai/", 34 | genai.openai.object$version, 35 | "/chat/completions" 36 | ) 37 | } 38 | 39 | # Convert image to base64 code 40 | img.info = image.to.data.uri(image.path) 41 | 42 | # Initialize the request body 43 | requestBody = list(model = genai.openai.object$model, 44 | messages = list( 45 | list(role = "system", 46 | content = "You are a helpful assistant."), 47 | list(role = "user", 48 | content = list( 49 | list(type = "text", 50 | text = prompt), 51 | list(type = "image_url", 52 | image_url = list( 53 | url = paste0("data:image/", 54 | img.info[1], 55 | ";base64,", 56 | img.info[2]) 57 | )) 58 | )) 59 | )) 60 | 61 | # Get the generation configuration 62 | if (length(config) > 0) { 63 | requestBody = genai.openai.generation.config(requestBody, config) 64 | } 65 | 66 | # Convert the request as JSON format 67 | requestBodyJSON = jsonlite::toJSON(requestBody, 68 | auto_unbox = TRUE, 69 | pretty = TRUE) 70 | 71 | # Send request and get response 72 | response = httr::POST( 73 | url = api.url, 74 | body = requestBodyJSON, 75 | httr::add_headers( 76 | "Content-Type" = "application/json", 77 | "Authorization" = paste("Bearer", genai.openai.object$api) 78 | ) 79 | ) 80 | if (!is.null(genai.openai.object$organization.id) && 81 | is.character(genai.openai.object$organization.id)) { 82 | response = httr::POST( 83 | url = api.url, 84 | body = requestBodyJSON, 85 | httr::add_headers( 86 | "Content-Type" = "application/json", 87 | "Authorization" = paste("Bearer", genai.openai.object$api), 88 | "OpenAI-Organization" = genai.openai.object$organization.id 89 | ) 90 | ) 91 | } 92 | responseJSON = httr::content(response, "parsed") 93 | 94 | # Check for response error 95 | if (!is.null(responseJSON$error)) { 96 | stop(responseJSON$error$message) 97 | } 98 | 99 | # Print detail if verbose is TRUE 100 | if (verbose) { 101 | genai.openai.formated.confguration(requestBody, prompt) 102 | cat("=============================================================================\n") 103 | cat(" Image Path\n") 104 | cat("-----------------------------------------------------------------------------\n") 105 | cat(paste(strwrap(image.path, width = 76, exdent = 0), collapse = "\n")) 106 | cat("\n") 107 | cat("=============================================================================\n\n\n\n") 108 | cat("\n") 109 | } 110 | 111 | # Get the response text 112 | return (responseJSON$choices[[1]]$message$content) 113 | } 114 | -------------------------------------------------------------------------------- /R/src/R/genai.google.chat.edit.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.chat.edit = function(genai.google.object, 3 | prompt, 4 | message.to.edit, 5 | verbose, 6 | config = list( 7 | harm.category.dangerous.content = NULL, 8 | harm.category.harassment = NULL, 9 | harm.category.hate.speech = NULL, 10 | harm.category.sexually.explicit = NULL, 11 | stop.sequences = NULL, 12 | max.output.tokens = NULL, 13 | temperature = NULL, 14 | top.p = NULL, 15 | top.k = NULL 16 | )) { 17 | # Check if there are messages in the chat history 18 | if (length(genai.google.object$chat.history$contents) == 0) { 19 | stop("Invalid chat.history. The chat history is empty.") 20 | } 21 | 22 | # Check message.to.edit with chat.history length 23 | if (message.to.edit > length(genai.google.object$chat.history$contents) || 24 | message.to.edit < 1) { 25 | stop( 26 | "Invalid value for message.to.edit. You can only edit existing messages. Please use 'chat.history.print()' to review the formatted chat history." 27 | ) 28 | } 29 | 30 | # Check message.to.edit (must be an odd number) 31 | if (message.to.edit %% 2 == 0) { 32 | stop( 33 | "Invalid value for message.to.edit. You can only edit messages sent by a user role. Please use 'chat.history.print()' to review the formatted chat history." 34 | ) 35 | } 36 | 37 | # Check configurations 38 | genai.google.config.check(config) 39 | 40 | # Get api url 41 | api.url = paste0( 42 | "https://generativelanguage.googleapis.com/", 43 | genai.google.object$version, 44 | "/models/", 45 | genai.google.object$model, 46 | ":generateContent?key=", 47 | genai.google.object$api 48 | ) 49 | if (genai.google.object$proxy) { 50 | api.url = paste0( 51 | "https://api.genai.gd.edu.kg/google/", 52 | genai.google.object$version, 53 | "/models/", 54 | genai.google.object$model, 55 | ":generateContent?key=", 56 | genai.google.object$api 57 | ) 58 | } 59 | 60 | # Initialize the request body 61 | requestNewContent = list(list(role = "user", 62 | parts = list(text = prompt))) 63 | requestBody = as.list(genai.google.object$chat.history) 64 | requestBody$contents = append(requestBody$contents[1:message.to.edit - 1], 65 | requestNewContent) 66 | 67 | # Get the safety settings 68 | safety.setting = genai.google.safety.setting(config) 69 | if (length(safety.setting) > 0) { 70 | requestBody$safetySettings = safety.setting 71 | } 72 | 73 | # Get the generation configuration 74 | generation.config = genai.google.generation.config(config) 75 | if (length(generation.config) > 0) { 76 | requestBody$generationConfig = generation.config 77 | } 78 | 79 | # Convert the request as JSON format 80 | requestBodyJSON = jsonlite::toJSON(requestBody, 81 | auto_unbox = TRUE, 82 | pretty = TRUE) 83 | 84 | # Send request and get response 85 | response = httr::POST( 86 | url = api.url, 87 | body = requestBodyJSON, 88 | httr::add_headers("Content-Type" = "application/json") 89 | ) 90 | responseJSON = httr::content(response, "parsed") 91 | 92 | # Check for harmful prompt 93 | if (!is.null(responseJSON$promptFeedback$blockReason)) { 94 | stop("Invalid prompt. The prompt may contain harmful content.") 95 | } 96 | 97 | # Check for response error 98 | if (!is.null(responseJSON$error)) { 99 | stop(responseJSON$error$message) 100 | } 101 | 102 | # Save the most recent prompt to the chat history 103 | genai.google.object$chat.history$contents = append(genai.google.object$chat.history$contents[1:message.to.edit - 1], 104 | requestNewContent) 105 | 106 | # Save the most recent model response to the chat history 107 | respondContent = list(list( 108 | role = "model", 109 | parts = list(text = responseJSON$candidates[[1]]$content$parts[[1]]$text) 110 | )) 111 | genai.google.object$chat.history$contents = append(genai.google.object$chat.history$contents, 112 | respondContent) 113 | 114 | # Print detail if verbose is TRUE 115 | if (verbose) { 116 | genai.google.formated.confguration(requestBody, prompt) 117 | cat("=============================================================================\n") 118 | cat(" Chat history \n") 119 | cat("-----------------------------------------------------------------------------\n\n") 120 | genai.google.chat.history.print(genai.google.object, from = 1, to = NULL) 121 | cat("=============================================================================\n\n\n\n") 122 | } 123 | 124 | # Get the response text 125 | return (responseJSON$candidates[[1]]$content$parts[[1]]$text) 126 | } 127 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.chat.edit.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.chat.edit = function(genai.openai.object, 3 | prompt, 4 | message.to.edit, 5 | verbose, 6 | config = list( 7 | frequency.penalty = NULL, 8 | logit.bias = NULL, 9 | logprobs = NULL, 10 | top.logprobs = NULL, 11 | max.tokens = NULL, 12 | presence.penalty = NULL, 13 | response.format = NULL, 14 | seed = NULL, 15 | stop = NULL, 16 | temperature = NULL, 17 | top.p = NULL, 18 | tools = NULL, 19 | tool.choice = NULL, 20 | user = NULL 21 | )) { 22 | # Check if there are messages in the chat history 23 | if (length(genai.openai.object$chat.history$messages) == 0) { 24 | stop("Invalid chat.history. The chat history is empty.") 25 | } 26 | 27 | # Check message.to.edit with chat.history length 28 | if (message.to.edit > length(genai.openai.object$chat.history$messages) || 29 | message.to.edit < 1) { 30 | stop( 31 | "Invalid value for message.to.edit. You can only edit existing messages. Please use 'chat.history.print()' to review the formatted chat history." 32 | ) 33 | } 34 | 35 | # Check message.to.edit (must be a even number) 36 | if (message.to.edit %% 2 == 1) { 37 | stop( 38 | "Invalid value for message.to.edit. You can only edit messages sent by a user role. Please use 'chat.history.print()' to review the formatted chat history." 39 | ) 40 | } 41 | 42 | # Check configurations 43 | genai.openai.config.check(config) 44 | 45 | # Get api url 46 | api.url = paste0( 47 | "https://api.openai.com/", 48 | genai.openai.object$version, 49 | "/chat/completions" 50 | ) 51 | if (genai.openai.object$proxy) { 52 | api.url = paste0( 53 | "https://api.genai.gd.edu.kg/openai/", 54 | genai.openai.object$version, 55 | "/chat/completions" 56 | ) 57 | } 58 | 59 | # Initialize the request body 60 | requestNewContent = list(list(role = "user", 61 | content = prompt)) 62 | requestBody = as.list(genai.openai.object$chat.history) 63 | requestBody$messages = append(requestBody$messages[1:message.to.edit - 1], 64 | requestNewContent) 65 | 66 | # Get the generation configuration 67 | if (length(config) > 0) { 68 | requestBody = genai.openai.generation.config(requestBody, config) 69 | } 70 | 71 | # Convert the request as JSON format 72 | requestBodyJSON = jsonlite::toJSON(c(model = genai.openai.object$model, 73 | requestBody), 74 | auto_unbox = TRUE, 75 | pretty = TRUE) 76 | 77 | # Send request and get response 78 | response = httr::POST( 79 | url = api.url, 80 | body = requestBodyJSON, 81 | httr::add_headers( 82 | "Content-Type" = "application/json", 83 | "Authorization" = paste("Bearer", genai.openai.object$api) 84 | ) 85 | ) 86 | if (!is.null(genai.openai.object$organization.id) && 87 | is.character(genai.openai.object$organization.id)) { 88 | response = httr::POST( 89 | url = api.url, 90 | body = requestBodyJSON, 91 | httr::add_headers( 92 | "Content-Type" = "application/json", 93 | "Authorization" = paste("Bearer", genai.openai.object$api), 94 | "OpenAI-Organization" = genai.openai.object$organization.id 95 | ) 96 | ) 97 | } 98 | responseJSON = httr::content(response, "parsed") 99 | 100 | # Check for response error 101 | if (!is.null(responseJSON$error)) { 102 | stop(responseJSON$error$message) 103 | } 104 | 105 | # Save the most recent prompt to the chat history 106 | genai.openai.object$chat.history$messages = append(genai.openai.object$chat.history$messages[1:message.to.edit - 1], 107 | requestNewContent) 108 | 109 | # Save the most recent model response to the chat history 110 | respondContent = list(list( 111 | role = "assistant", 112 | content = responseJSON$choices[[1]]$message$content 113 | )) 114 | genai.openai.object$chat.history$messages = append(genai.openai.object$chat.history$messages, 115 | respondContent) 116 | 117 | # Print detail if verbose is TRUE 118 | if (verbose) { 119 | genai.openai.formated.confguration(requestBody, prompt) 120 | cat("=============================================================================\n") 121 | cat(" Chat history \n") 122 | cat("-----------------------------------------------------------------------------\n\n") 123 | genai.openai.chat.history.print(genai.openai.object, from = 1, to = NULL) 124 | cat("=============================================================================\n\n\n\n") 125 | } 126 | 127 | # Get the response text 128 | return (responseJSON$choices[[1]]$message$content) 129 | } 130 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GenAI - Generative Artificial Intelligence Toolbox 2 | 3 | Logo 4 | 5 | *Last update: 2025/01/28* 6 | 7 | Now, you can seamlessly utilize both ChatGPT from OpenAI and Gemini Pro from Google! Enjoy enhanced chat conversion and the ability to engage in multiple chat sessions with ease. 8 | 9 | | R Package "GenAI" | Python Package "GeAI" | 10 | |----------------------------------------------------|---------------------------------------------------------------| 11 | | [![](https://img.shields.io/cran/v/GenAI?color=green)](https://cran.r-project.org/package=GenAI) ![](https://cranlogs.r-pkg.org/badges/grand-total/GenAI) | [![](https://img.shields.io/pypi/v/geai.svg?color=green)](https://pypi.org/project/GeAI/) ![](https://img.shields.io/pepy/dt/geai.svg?color=blue) | 12 | | [Overview](https://genai.gd.edu.kg/r/) | [Overview](https://genai.gd.edu.kg/python/) | 13 | | [Documentation](https://genai.gd.edu.kg/r/documentation/) | [Documentation](https://genai.gd.edu.kg/python/documentation/) | 14 | 15 | 16 | > 💡 2025/01/28 - API Proxy: DeepSeek API Proxy Added! 💡 17 | > 18 | > As the amazing model made by DeepSeek has been published, our API proxy service can now redirect its API requests and response for general usage. Please see our [API proxy page](https://genai.gd.edu.kg/api/#general) for more detail. 19 | 20 | 21 | > ⚠️ NOTICE for Python package "GeAI" ⚠️ 22 | > 23 | > The Python package is currently in the development phase. Please exercise caution when using it, as it may not be entirely stable or free of potential issues. 24 | 25 | # Overview 26 | 27 | GenAI harnesses the power of advanced models like GPT-4 and Gemini Pro to serve as versatile coding and writing assistants for users in both R and, soon, Python. This toolbox empowers users with a range of capabilities, including text generation, code optimization, natural language processing, chat assistance, and image interpretation. The ultimate objective is to simplify and enhance the coding and language processing experience for users of both R and Python. 28 | 29 | The following image is a flowchart that describes how to use the GenAI package to generate text and chat. The flowchart starts with the user importing the GenAI package into their R or Python environment. The user is then prompted to connect to the Generative AI service providers' APIs with or without GenAI's API proxy service. If the user is successful in connecting to the API, they will be able to use the GenAI functions to generate text and chat. 30 | 31 | How GenAI works? 32 | 33 | The GenAI package provides a variety of functions for generating text, including functions for generating text explanations of code, fixing grammar, optimizing code, and generating images from text. The package also provides a variety of functions for generating chat, including functions for editing chat, converting chat to text, and setting up chat. 34 | 35 | The output of the GenAI package is a text or chat response. The response is generated using the user's input and the GenAI functions. 36 | 37 | # Prerequisites 38 | 39 | 1. Prior to utilizing the GenAI package, several prerequisites must be met. 40 | 41 | 2. Ensure that you possess an eligible device equipped with either R or Python. 42 | 43 | 3. Access to the internet is essential to generate text or engage in chat through GenAI. 44 | 45 | 4. Obtain an API key from the selected Generative AI service provider. GenAI currently supports Generative AI models from both Google and OpenAI. 46 | 47 | - To acquire an API key for Google's models, refer to: [Get an API key](https://ai.google.dev/tutorials/setup) 48 | 49 | - To acquire an API key for OpenAI's models, refer to: [Account setup](https://platform.openai.com/docs/quickstart/account-setup?context=python) 50 | 51 | # API Proxy Service 52 | 53 | For individuals utilizing Generative AI service providers with API usage location restrictions in certain countries/regions, this service is tailored for those who possess a valid API key but find themselves outside their home countries/regions due to travel, work, or study in regions not covered by some Generative AI service providers. 54 | 55 | **How does it work?** GenAI's API proxy service functions by directing your request initially to our server located in a country/region eligible for specific Generative AI services. Subsequently, GenAI's server forwards your request to the official server of the Generative AI service provider. Once the provider's server completes processing the request, the response is sent back to GenAI's server, which then relays it back to the user's device. This process is illustrated in the accompanying image. 56 | 57 | How proxy work? 58 | 59 | **How to use this service?** 60 | 61 | - GenAI package in R: Set TRUE to parameter proxy in function connect.genai and use the model parameter for text generations and chat generations. 62 | 63 | - GenAI package in Python: Under development. 64 | 65 | - Use the proxy URL directly: 66 | 67 | - To proxy an API request for Google's models, substitute the original URL: Replace the original URL `https://generativelanguage.googleapis.com/` with the proxied URL `https://api.genai.gd.edu.kg/google/`. 68 | 69 | - To proxy an API request for OpenAI's models, substitute the original URL: Replace the original URL `https://api.openai.com/` with the proxied URL `https://api.genai.gd.edu.kg/openai/`. 70 | 71 | It's important to note that while GenAI and its associated organization do not collect user information, the server providers for GenAI and the Generative AI service provider may do so. Additionally, the connection speed using the proxy service is not guaranteed. Users are advised to use this service at their own risk. 72 | 73 | # License 74 | 75 | This work is licensed under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/?ref=chooser-v1). 76 | -------------------------------------------------------------------------------- /R/src/R/genai.google.utils.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.google.check = function(api, model, version, proxy) { 3 | json.data = jsonlite::fromJSON("https://genai.gd.edu.kg/model.json") 4 | if (is.na(match(model, json.data$google$model))) { 5 | stop( 6 | "Invalid value for model. Refer to 'available.models()' to view the supported models." 7 | ) 8 | } 9 | if (is.na(match(version, json.data$google$version))) { 10 | stop( 11 | "Invalid value for version. Refer to 'available.models()' to view the supported versions." 12 | ) 13 | } 14 | if (!proxy %in% c(TRUE, FALSE)) { 15 | stop("Invalid value for proxy. It must be either TRUE or FALSE.") 16 | } 17 | 18 | # Check connection 19 | api.url = paste0( 20 | "https://generativelanguage.googleapis.com/", 21 | version, 22 | "/models/", 23 | model, 24 | "?key=", 25 | api 26 | ) 27 | if (proxy) { 28 | api.url = paste0("https://api.genai.gd.edu.kg/google/", 29 | version, 30 | "/models/", 31 | model, 32 | "?key=", 33 | api) 34 | } 35 | response = httr::GET(url = api.url, 36 | httr::add_headers("Content-Type" = "application/json")) 37 | responseJSON = httr::content(response, "parsed") 38 | if (!is.null(responseJSON$error)) { 39 | stop(responseJSON$error$message) 40 | } 41 | if (response$status_code != 200) { 42 | stop( 43 | "Invalid parameter(s) detected. Please check the values for api, model, version, and proxy." 44 | ) 45 | } 46 | } 47 | 48 | #' @noRd 49 | genai.google.config.check = function(config) { 50 | if (!is.list(config)) { 51 | stop("Invalid configuration. It must be a list.") 52 | } 53 | config.names = c( 54 | "harm.category.dangerous.content", 55 | "harm.category.harassment", 56 | "harm.category.hate.speech", 57 | "harm.category.sexually.explicit", 58 | "stop.sequences", 59 | "max.output.tokens", 60 | "temperature", 61 | "top.p", 62 | "top.k" 63 | ) 64 | wrong.config = setdiff(names(config), config.names) 65 | if (length(wrong.config) > 0) { 66 | stop("Invalid configuration(s) detected: ", 67 | paste0(wrong.config, collapse = ", ")) 68 | } 69 | if (length(unique(names(config))) != length(names(config))) { 70 | stop("Invalid configurations. Duplicate parameters detected.") 71 | } 72 | 73 | # Check harm categories 74 | invalid.harm = lapply(config.names[1:4], function(harm) { 75 | if (!is.null(config[[harm]]) && 76 | is.na(match(config[[harm]], c(1, 2, 3, 4, 5)))) { 77 | return(paste0("Invalid value for ", harm, ". It must be 1, 2, 3, 4, or 5.\n")) 78 | } 79 | }) 80 | invalid.harm = Filter(Negate(is.null), invalid.harm) 81 | if (length(invalid.harm) > 0) { 82 | stop(invalid.harm) 83 | } 84 | 85 | # Check stop sequence 86 | if (!is.null(config[["stop.sequences"]]) && 87 | !is.list(config[["stop.sequences"]])) { 88 | stop("Invalid stop.sequences. It must be a list.") 89 | } 90 | if (length(config[["stop.sequences"]]) > 5) { 91 | stop("Invalid value for stop.sequences. It can only have at most 5 strings.") 92 | } 93 | } 94 | 95 | #' @noRd 96 | genai.google.formated.confguration = function(request.body, prompt) { 97 | if (!is.null(request.body$safetySettings)) { 98 | cat("=============================================================================\n") 99 | cat(" Safety Settings\n") 100 | cat("-----------------------------------------------------------------------------\n") 101 | for (i in 1:length(request.body$safetySettings)) { 102 | cat( 103 | paste0(request.body$safetySettings[[i]]$category, ":"), 104 | request.body$safetySettings[[i]]$threshold, 105 | "\n" 106 | ) 107 | } 108 | cat("=============================================================================\n\n\n\n") 109 | } 110 | if (!is.null(request.body$generationConfig)) { 111 | cat("=============================================================================\n") 112 | cat(" Generation Configuration\n") 113 | cat("-----------------------------------------------------------------------------\n") 114 | has.stop.sequences = FALSE 115 | if (!is.null(request.body$generationConfig$stopSequences)) { 116 | has.stop.sequences = TRUE 117 | cat( 118 | "stopSequences:", 119 | paste0( 120 | request.body$generationConfig$stopSequences, 121 | collapse = ", " 122 | ), 123 | "\n" 124 | ) 125 | } 126 | config.length = length(request.body$generationConfig) 127 | config.names = names(request.body$generationConfig) 128 | if (has.stop.sequences) { 129 | if (config.length > 1) { 130 | for (i in 2:config.length) { 131 | cat(paste0(config.names[i], ":"), 132 | request.body$generationConfig[[config.names[i]]], 133 | "\n") 134 | } 135 | } 136 | } 137 | else { 138 | for (i in 1:config.length) { 139 | cat(paste0(config.names[i], ":"), 140 | request.body$generationConfig[[config.names[i]]], 141 | "\n") 142 | } 143 | } 144 | cat("=============================================================================\n\n\n\n") 145 | } 146 | cat("=============================================================================\n") 147 | cat(" Prompt\n") 148 | cat("-----------------------------------------------------------------------------\n") 149 | cat(paste(strwrap(prompt, width = 76, exdent = 0), collapse = "\n")) 150 | cat("\n") 151 | cat("=============================================================================\n\n\n\n") 152 | } 153 | 154 | #' @noRd 155 | genai.google.generation.config = function(config) { 156 | configuration = list() 157 | if (!is.null(config[["stop.sequences"]])) { 158 | configuration$stopSequences = config[["stop.sequences"]] 159 | } 160 | if (!is.null(config[["max.output.tokens"]])) { 161 | configuration$maxOutputTokens = config[["max.output.tokens"]] 162 | } 163 | if (!is.null(config[["temperature"]])) { 164 | configuration$temperature = config[["temperature"]] 165 | } 166 | if (!is.null(config[["top.p"]])) { 167 | configuration$topP = config[["top.p"]] 168 | } 169 | if (!is.null(config[["top.k"]])) { 170 | configuration$topK = config[["top.k"]] 171 | } 172 | return(configuration) 173 | } 174 | 175 | #' @noRd 176 | genai.google.safety.setting = function(config) { 177 | raw.harm.category = c( 178 | harm.category.dangerous.content = "HARM_CATEGORY_DANGEROUS_CONTENT", 179 | harm.category.harassment = "HARM_CATEGORY_HARASSMENT", 180 | harm.category.hate.speech = "HARM_CATEGORY_HATE_SPEECH", 181 | harm.category.sexually.explicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT" 182 | ) 183 | raw.harm.block.threshold = c( 184 | "HARM_BLOCK_THRESHOLD_UNSPECIFIED", 185 | "BLOCK_LOW_AND_ABOVE", 186 | "BLOCK_MEDIUM_AND_ABOVE", 187 | "BLOCK_ONLY_HIGH", 188 | "BLOCK_NONE" 189 | ) 190 | filled.harm = 191 | lapply(names(raw.harm.category), function(harm) { 192 | if (!is.null(config[[harm]])) { 193 | safety.setting.object = list("category" = raw.harm.category[harm], 194 | "threshold" = raw.harm.block.threshold[config[[harm]]]) 195 | return(safety.setting.object) 196 | } else { 197 | return(NULL) 198 | } 199 | }) 200 | filled.harm = Filter(Negate(is.null), filled.harm) 201 | return(filled.harm) 202 | } 203 | -------------------------------------------------------------------------------- /R/src/R/genai.openai.utils.R: -------------------------------------------------------------------------------- 1 | #' @noRd 2 | genai.openai.check = function(api, model, version, proxy, organization.id) { 3 | json.data = jsonlite::fromJSON("https://genai.gd.edu.kg/model.json") 4 | if (is.na(match(model, json.data$openai$model))) { 5 | stop( 6 | "Invalid value for model. Refer to 'available.models()' to view the supported models." 7 | ) 8 | } 9 | if (is.na(match(version, json.data$openai$version))) { 10 | stop( 11 | "Invalid value for version. Refer to 'available.models()' to view the supported versions." 12 | ) 13 | } 14 | if (!proxy %in% c(TRUE, FALSE)) { 15 | stop("Invalid value for proxy. It must be either TRUE or FALSE.") 16 | } 17 | if (!is.null(organization.id) && !is.character(organization.id)) { 18 | stop("Invalid value for organization.id. It must be either NULL (by default) or a character string.") 19 | } 20 | 21 | # Check connection 22 | api.url = paste0( 23 | "https://api.openai.com/", 24 | version, 25 | "/models" 26 | ) 27 | if (proxy) { 28 | api.url = paste0( 29 | "https://api.genai.gd.edu.kg/openai/", 30 | version, 31 | "/models") 32 | } 33 | response = httr::GET(url = api.url, 34 | httr::add_headers( 35 | "Content-Type" = "application/json", 36 | "Authorization" = paste("Bearer", api) 37 | ) 38 | ) 39 | if (!is.null(organization.id) && is.character(organization.id)) { 40 | response = httr::GET(url = api.url, 41 | httr::add_headers( 42 | "Content-Type" = "application/json", 43 | "Authorization" = paste("Bearer", api), 44 | "OpenAI-Organization" = organization.id 45 | ) 46 | ) 47 | } 48 | responseJSON = httr::content(response, "parsed") 49 | if (!is.null(responseJSON$error)) { 50 | stop(responseJSON$error$message) 51 | } 52 | if (response$status_code != 200) { 53 | stop( 54 | "Invalid parameter(s) detected. Please check the values for api, model, version, and proxy." 55 | ) 56 | } 57 | } 58 | 59 | #' @noRd 60 | genai.openai.config.check = function(config) { 61 | if (!is.list(config)) { 62 | stop("Invalid configuration. It must be a list.") 63 | } 64 | config.names = c( 65 | "frequency.penalty", 66 | "logit.bias", 67 | "logprobs", 68 | "top.logprobs", 69 | "max.tokens", 70 | "presence.penalty", 71 | "response.format", 72 | "seed", 73 | "stop", 74 | "temperature", 75 | "top.p", 76 | "tools", 77 | "tool.choice", 78 | "user" 79 | ) 80 | wrong.config = setdiff(names(config), config.names) 81 | if (length(wrong.config) > 0) { 82 | stop("Invalid configuration(s) detected: ", 83 | paste0(wrong.config, collapse = ", ")) 84 | } 85 | if (length(unique(names(config))) != length(names(config))) { 86 | stop("Invalid configurations. Duplicate parameters detected.") 87 | } 88 | } 89 | 90 | #' @noRd 91 | genai.openai.img.config.check = function(config) { 92 | if (!is.list(config)) { 93 | stop("Invalid configuration. It must be a list.") 94 | } 95 | config.names = c( 96 | "quality", 97 | "size", 98 | "style", 99 | "user" 100 | ) 101 | wrong.config = setdiff(names(config), config.names) 102 | if (length(wrong.config) > 0) { 103 | stop("Invalid configuration(s) detected: ", 104 | paste0(wrong.config, collapse = ", ")) 105 | } 106 | if (length(unique(names(config))) != length(names(config))) { 107 | stop("Invalid configurations. Duplicate parameters detected.") 108 | } 109 | } 110 | 111 | #' @noRd 112 | genai.openai.generation.config = function(requestBody, config) { 113 | config.names = c( 114 | frequency.penalty = "frequency_penalty", 115 | logit.bias = "logit_bias", 116 | logprobs = "logprobs", 117 | top.logprobs = "top_logprobs", 118 | max.tokens = "max_tokens", 119 | presence.penalty = "presence_penalty", 120 | response.format = "response_format", 121 | seed = "seed", 122 | stop = "stop", 123 | temperature = "temperature", 124 | top.p = "top_p", 125 | tools = "tools", 126 | tool.choice = "tool_choice", 127 | user = "user" 128 | ) 129 | for (param_name in names(config)) { 130 | if (!is.null(config[[param_name]])) { 131 | requestBody[[config.names[param_name]]] = config[[param_name]] 132 | } 133 | } 134 | return(requestBody) 135 | } 136 | 137 | #' @noRd 138 | genai.openai.img.generation.config = function(requestBody, config) { 139 | config.names = c( 140 | quality = "quality", 141 | size = "size", 142 | style = "style", 143 | user = "user" 144 | ) 145 | for (param_name in names(config)) { 146 | if (!is.null(config[[param_name]])) { 147 | requestBody[[config.names[param_name]]] = config[[param_name]] 148 | } 149 | } 150 | return(requestBody) 151 | } 152 | 153 | #' @noRd 154 | genai.openai.formated.confguration = function(request.body, prompt) { 155 | config.names = c( 156 | frequency.penalty = "frequency_penalty", 157 | logit.bias = "logit_bias", 158 | logprobs = "logprobs", 159 | top.logprobs = "top_logprobs", 160 | max.tokens = "max_tokens", 161 | presence.penalty = "presence_penalty", 162 | response.format = "response_format", 163 | seed = "seed", 164 | stop = "stop", 165 | temperature = "temperature", 166 | top.p = "top_p", 167 | tools = "tools", 168 | tool.choice = "tool_choice", 169 | user = "user" 170 | ) 171 | intersect.param = intersect(names(request.body), config.names) 172 | if (length(intersect.param) > 0) { 173 | cat("=============================================================================\n") 174 | cat(" Generation Configuration\n") 175 | cat("-----------------------------------------------------------------------------\n") 176 | for (param in intersect.param) { 177 | if (is.list(request.body[[param]])) { 178 | cat("stop:", 179 | paste0(request.body[[param]], 180 | collapse = ", "), 181 | "\n") 182 | } 183 | else { 184 | cat(paste0(param, ":"), 185 | request.body[[param]], 186 | "\n") 187 | } 188 | } 189 | cat("=============================================================================\n\n\n\n") 190 | } 191 | cat("=============================================================================\n") 192 | cat(" Prompt\n") 193 | cat("-----------------------------------------------------------------------------\n") 194 | cat(paste(strwrap(prompt, width = 76, exdent = 0), collapse = "\n")) 195 | cat("\n") 196 | cat("=============================================================================\n\n\n\n") 197 | } 198 | 199 | #' @noRd 200 | genai.openai.img.formated.confguration = function(request.body, prompt) { 201 | config.names = c( 202 | quality = "quality", 203 | size = "size", 204 | style = "style", 205 | user = "user" 206 | ) 207 | intersect.param = intersect(names(request.body), config.names) 208 | if (length(intersect.param) > 0) { 209 | cat("=============================================================================\n") 210 | cat(" Generation Configuration\n") 211 | cat("-----------------------------------------------------------------------------\n") 212 | for (param in intersect.param) { 213 | if (is.list(request.body[[param]])) { 214 | cat("stop:", 215 | paste0(request.body[[param]], 216 | collapse = ", "), 217 | "\n") 218 | } 219 | else { 220 | cat(paste0(param, ":"), 221 | request.body[[param]], 222 | "\n") 223 | } 224 | } 225 | cat("=============================================================================\n\n\n\n") 226 | } 227 | cat("=============================================================================\n") 228 | cat(" Prompt\n") 229 | cat("-----------------------------------------------------------------------------\n") 230 | cat(paste(strwrap(prompt, width = 76, exdent = 0), collapse = "\n")) 231 | cat("\n") 232 | cat("=============================================================================\n\n\n\n") 233 | } 234 | -------------------------------------------------------------------------------- /R/src/man/txt.image.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/txt.image.R 3 | \name{txt.image} 4 | \alias{txt.image} 5 | \title{Text Generation with Text and Image as the Input} 6 | \usage{ 7 | txt.image(genai.object, prompt, image.path, verbose = FALSE, config = list()) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | 12 | \item{prompt}{A character string representing the query for text generation.} 13 | 14 | \item{image.path}{A character string representing the path to the image. It should be a link 15 | starting with \code{https}/\code{http} or a local directory path to an image.} 16 | 17 | \item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print 18 | out the details of the text request.} 19 | 20 | \item{config}{Optional. Default to \code{list()}. A list of configuration parameters for text generation.} 21 | } 22 | \value{ 23 | If successful, a text response will be returned. If the API response indicates 24 | an error, the function halts execution and provides an error message. 25 | } 26 | \description{ 27 | This function establishes a connection to a generative AI model through a generative AI object. 28 | It generates a text response based on the provided prompt. 29 | } 30 | \details{ 31 | Providing accurate and valid information for each argument is crucial for successful text 32 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 33 | error message based on the API feedback. To view all supported generative AI models, use the 34 | function \code{\link{available.models}}. 35 | 36 | For \strong{Google Generative AI} models, available configurations are as follows. For more detail, 37 | please refer 38 | to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, 39 | \code{https://ai.google.dev/api/rest/v1/SafetySetting}, and 40 | \code{https://ai.google.dev/api/rest/v1/GenerationConfig}. 41 | 42 | \itemize{ 43 | \item \code{harm.category.dangerous.content} 44 | 45 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, 46 | with a higher value representing a lower probability of being blocked. 47 | 48 | \item \code{harm.category.harassment} 49 | 50 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, 51 | with a higher value representing a lower probability of being blocked. 52 | 53 | \item \code{harm.category.hate.speech} 54 | 55 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and 56 | content, with a higher value representing a lower probability of being blocked. 57 | 58 | \item \code{harm.category.sexually.explicit} 59 | 60 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit 61 | content, with a higher value representing a lower probability of being blocked. 62 | 63 | \item \code{stop.sequences} 64 | 65 | Optional. A list of character sequences (up to 5) that will stop output generation. If specified, 66 | the API will stop at the first appearance of a stop sequence. The stop sequence will not be 67 | included as part of the response. 68 | 69 | \item \code{max.output.tokens} 70 | 71 | Optional. An integer, value varies by model, representing maximum number of tokens to include 72 | in a candidate. 73 | 74 | \item \code{temperature} 75 | 76 | Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. 77 | 78 | \item \code{top.p} 79 | 80 | Optional. A number, value varies by model, representing maximum cumulative probability of tokens 81 | to consider when sampling. 82 | 83 | \item \code{top.k} 84 | 85 | Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. 86 | } 87 | 88 | For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to 89 | \code{https://platform.openai.com/docs/api-reference/chat/create}. 90 | 91 | \itemize{ 92 | \item \code{frequency.penalty} 93 | 94 | Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their 95 | existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 96 | 97 | \item \code{logit.bias} 98 | 99 | Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object 100 | that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 101 | 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact 102 | effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; 103 | values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 104 | 105 | \item \code{logprobs} 106 | 107 | Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log 108 | probabilities of each output token returned in the content of message 109 | 110 | \item \code{top.logprobs} 111 | 112 | Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token 113 | position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this 114 | parameter is used. 115 | 116 | \item \code{max.tokens} 117 | 118 | Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of 119 | input tokens and generated tokens is limited by the model's context length. 120 | 121 | \item \code{presence.penalty} 122 | 123 | Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear 124 | in the text so far, increasing the model's likelihood to talk about new topics. 125 | 126 | \item \code{response.format} 127 | 128 | Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and 129 | all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. 130 | 131 | \item \code{seed} 132 | 133 | Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated 134 | requests with the same seed and parameters should return the same result. 135 | 136 | \item \code{stop} 137 | 138 | Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. 139 | 140 | \item \code{temperature} 141 | 142 | Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output 143 | more random, while lower values like 0.2 will make it more focused and deterministic. 144 | 145 | \item \code{top.p} 146 | 147 | Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers 148 | the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top 149 | 10% probability mass are considered. 150 | 151 | \item \code{tools} 152 | 153 | Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this 154 | to provide a list of functions the model may generate JSON inputs for. 155 | 156 | \item \code{tool.choice} 157 | 158 | Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means 159 | the model will not call a function and instead generates a message. \code{auto} means the model can pick 160 | between generating a message or calling a function. 161 | 162 | \item \code{user} 163 | 164 | Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor 165 | and detect abuse. 166 | } 167 | } 168 | \examples{ 169 | \dontrun{ 170 | # Assuming there is a GenAI object named 'genai.model' supporting this 171 | # function, an image in your current directory named 'example.png', and 172 | # an online image 'https://example.com/example.png/', please refer to 173 | # the "Live Demo in Colab" above for real examples. The following examples 174 | # are just some basic guidelines. 175 | 176 | # Method 1 (recommended): use the pipe operator "\%>\%" 177 | genai.model \%>\% 178 | txt.image(prompt = "Please describe the following image.", 179 | image.path = "https://example.com/example.png/") \%>\% 180 | cat() 181 | 182 | # Method 2: use the reference operator "$" 183 | cat(genai.model$txt.image(prompt = "Please describe the following image.", 184 | image.path = "https://example.com/example.png/")) 185 | 186 | # Method 3: use the function txt.image() directly 187 | cat(txt.image(genai.object = genai.model, 188 | prompt = "Please describe the following image.", 189 | image.path = "example.png")) 190 | } 191 | 192 | } 193 | \seealso{ 194 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 195 | 196 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/txt_image.ipynb}{Live Demo in Colab} 197 | } 198 | -------------------------------------------------------------------------------- /R/src/man/txt.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/txt.R 3 | \name{txt} 4 | \alias{txt} 5 | \title{Text Generation with Text as the Input} 6 | \usage{ 7 | txt(genai.object, prompt, verbose = FALSE, config = list()) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | 12 | \item{prompt}{A character string representing the query for text generation.} 13 | 14 | \item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print 15 | out the details of the text request.} 16 | 17 | \item{config}{Optional. Default to \code{list()}. A list of configuration parameters for text generation.} 18 | } 19 | \value{ 20 | If successful, a text response will be returned. If the API response indicates 21 | an error, the function halts execution and provides an error message. 22 | } 23 | \description{ 24 | This function establishes a connection to a generative AI model through a generative AI object. 25 | It generates a text response based on the provided prompt. 26 | } 27 | \details{ 28 | Providing accurate and valid information for each argument is crucial for successful text 29 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 30 | error message based on the API feedback. To view all supported generative AI models, use the 31 | function \code{\link{available.models}}. 32 | 33 | For \strong{Google Generative AI} models, available configurations are as follows. For more detail, 34 | please refer 35 | to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, 36 | \code{https://ai.google.dev/api/rest/v1/SafetySetting}, and 37 | \code{https://ai.google.dev/api/rest/v1/GenerationConfig}. 38 | 39 | \itemize{ 40 | \item \code{harm.category.dangerous.content} 41 | 42 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, 43 | with a higher value representing a lower probability of being blocked. 44 | 45 | \item \code{harm.category.harassment} 46 | 47 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, 48 | with a higher value representing a lower probability of being blocked. 49 | 50 | \item \code{harm.category.hate.speech} 51 | 52 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and 53 | content, with a higher value representing a lower probability of being blocked. 54 | 55 | \item \code{harm.category.sexually.explicit} 56 | 57 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit 58 | content, with a higher value representing a lower probability of being blocked. 59 | 60 | \item \code{stop.sequences} 61 | 62 | Optional. A list of character sequences (up to 5) that will stop output generation. If specified, 63 | the API will stop at the first appearance of a stop sequence. The stop sequence will not be 64 | included as part of the response. 65 | 66 | \item \code{max.output.tokens} 67 | 68 | Optional. An integer, value varies by model, representing maximum number of tokens to include 69 | in a candidate. 70 | 71 | \item \code{temperature} 72 | 73 | Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. 74 | 75 | \item \code{top.p} 76 | 77 | Optional. A number, value varies by model, representing maximum cumulative probability of tokens 78 | to consider when sampling. 79 | 80 | \item \code{top.k} 81 | 82 | Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. 83 | } 84 | 85 | For \strong{Moonshot AI} models, available configurations are as follows. For more detail, please refer to 86 | \code{https://platform.moonshot.cn/api.html#chat-completion}. 87 | 88 | \itemize{ 89 | \item \code{max.tokens} 90 | 91 | Optional. An integer. The maximum number of tokens that will be generated when the chat completes. 92 | If the chat is not finished by the maximum number of tokens generated, the finish reason will be 93 | "length", otherwise it will be "stop". 94 | 95 | \item \code{temperature} 96 | 97 | Optional. A number. What sampling temperature to use, between 0 and 1. Higher values (e.g. 0.7) will 98 | make the output more random, while lower values (e.g. 0.2) will make it more focused and deterministic. 99 | 100 | \item \code{top.p} 101 | 102 | Optional. A number. Another sampling temperature. 103 | } 104 | 105 | For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to 106 | \code{https://platform.openai.com/docs/api-reference/chat/create}. 107 | 108 | \itemize{ 109 | \item \code{frequency.penalty} 110 | 111 | Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their 112 | existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 113 | 114 | \item \code{logit.bias} 115 | 116 | Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object 117 | that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 118 | 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact 119 | effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; 120 | values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 121 | 122 | \item \code{logprobs} 123 | 124 | Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log 125 | probabilities of each output token returned in the content of message 126 | 127 | \item \code{top.logprobs} 128 | 129 | Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token 130 | position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this 131 | parameter is used. 132 | 133 | \item \code{max.tokens} 134 | 135 | Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of 136 | input tokens and generated tokens is limited by the model's context length. 137 | 138 | \item \code{presence.penalty} 139 | 140 | Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear 141 | in the text so far, increasing the model's likelihood to talk about new topics. 142 | 143 | \item \code{response.format} 144 | 145 | Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and 146 | all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. 147 | 148 | \item \code{seed} 149 | 150 | Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated 151 | requests with the same seed and parameters should return the same result. 152 | 153 | \item \code{stop} 154 | 155 | Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. 156 | 157 | \item \code{temperature} 158 | 159 | Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output 160 | more random, while lower values like 0.2 will make it more focused and deterministic. 161 | 162 | \item \code{top.p} 163 | 164 | Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers 165 | the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top 166 | 10% probability mass are considered. 167 | 168 | \item \code{tools} 169 | 170 | Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this 171 | to provide a list of functions the model may generate JSON inputs for. 172 | 173 | \item \code{tool.choice} 174 | 175 | Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means 176 | the model will not call a function and instead generates a message. \code{auto} means the model can pick 177 | between generating a message or calling a function. 178 | 179 | \item \code{user} 180 | 181 | Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor 182 | and detect abuse. 183 | } 184 | } 185 | \examples{ 186 | \dontrun{ 187 | # Assuming there is a GenAI object named 'genai.model' supporting this 188 | # function, please refer to the "Live Demo in Colab" above for real 189 | # examples. The following examples are just some basic guidelines. 190 | 191 | # Method 1 (recommended): use the pipe operator "\%>\%" 192 | genai.model \%>\% 193 | txt(prompt = "Write a story about Mars in 50 words.") \%>\% 194 | cat() 195 | 196 | # Method 2: use the reference operator "$" 197 | cat(genai.model$txt(prompt = "Write a story about Jupiter in 50 words.")) 198 | 199 | # Method 3: use the function txt() directly 200 | # Set verbose to TRUE to see the detail 201 | cat(txt(genai.object = genai.model, 202 | prompt = "Write a story about Earth in 50 words.")) 203 | } 204 | 205 | } 206 | \seealso{ 207 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 208 | 209 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/txt.ipynb}{Live Demo in Colab} 210 | } 211 | -------------------------------------------------------------------------------- /R/src/R/txt.image.R: -------------------------------------------------------------------------------- 1 | #' Text Generation with Text and Image as the Input 2 | #' 3 | #' This function establishes a connection to a generative AI model through a generative AI object. 4 | #' It generates a text response based on the provided prompt. 5 | #' 6 | #' @param genai.object A generative AI object containing necessary and correct information. 7 | #' @param prompt A character string representing the query for text generation. 8 | #' @param image.path A character string representing the path to the image. It should be a link 9 | #' starting with \code{https}/\code{http} or a local directory path to an image. 10 | #' @param verbose Optional. Default to \code{FALSE}. A boolean value determining whether or not to print 11 | #' out the details of the text request. 12 | #' @param config Optional. Default to \code{list()}. A list of configuration parameters for text generation. 13 | #' 14 | #' @return If successful, a text response will be returned. If the API response indicates 15 | #' an error, the function halts execution and provides an error message. 16 | #' 17 | #' @details Providing accurate and valid information for each argument is crucial for successful text 18 | #' generation by the generative AI model. If any parameter is incorrect, the function responds with an 19 | #' error message based on the API feedback. To view all supported generative AI models, use the 20 | #' function \code{\link{available.models}}. 21 | #' 22 | #' For \strong{Google Generative AI} models, available configurations are as follows. For more detail, 23 | #' please refer 24 | #' to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, 25 | #' \code{https://ai.google.dev/api/rest/v1/SafetySetting}, and 26 | #' \code{https://ai.google.dev/api/rest/v1/GenerationConfig}. 27 | #' 28 | #' \itemize{ 29 | #' \item \code{harm.category.dangerous.content} 30 | #' 31 | #' Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, 32 | #' with a higher value representing a lower probability of being blocked. 33 | #' 34 | #' \item \code{harm.category.harassment} 35 | #' 36 | #' Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, 37 | #' with a higher value representing a lower probability of being blocked. 38 | #' 39 | #' \item \code{harm.category.hate.speech} 40 | #' 41 | #' Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and 42 | #' content, with a higher value representing a lower probability of being blocked. 43 | #' 44 | #' \item \code{harm.category.sexually.explicit} 45 | #' 46 | #' Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit 47 | #' content, with a higher value representing a lower probability of being blocked. 48 | #' 49 | #' \item \code{stop.sequences} 50 | #' 51 | #' Optional. A list of character sequences (up to 5) that will stop output generation. If specified, 52 | #' the API will stop at the first appearance of a stop sequence. The stop sequence will not be 53 | #' included as part of the response. 54 | #' 55 | #' \item \code{max.output.tokens} 56 | #' 57 | #' Optional. An integer, value varies by model, representing maximum number of tokens to include 58 | #' in a candidate. 59 | #' 60 | #' \item \code{temperature} 61 | #' 62 | #' Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. 63 | #' 64 | #' \item \code{top.p} 65 | #' 66 | #' Optional. A number, value varies by model, representing maximum cumulative probability of tokens 67 | #' to consider when sampling. 68 | #' 69 | #' \item \code{top.k} 70 | #' 71 | #' Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. 72 | #' } 73 | #' 74 | #' For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to 75 | #' \code{https://platform.openai.com/docs/api-reference/chat/create}. 76 | #' 77 | #' \itemize{ 78 | #' \item \code{frequency.penalty} 79 | #' 80 | #' Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their 81 | #' existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 82 | #' 83 | #' \item \code{logit.bias} 84 | #' 85 | #' Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object 86 | #' that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 87 | #' 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact 88 | #' effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; 89 | #' values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 90 | #' 91 | #' \item \code{logprobs} 92 | #' 93 | #' Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log 94 | #' probabilities of each output token returned in the content of message 95 | #' 96 | #' \item \code{top.logprobs} 97 | #' 98 | #' Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token 99 | #' position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this 100 | #' parameter is used. 101 | #' 102 | #' \item \code{max.tokens} 103 | #' 104 | #' Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of 105 | #' input tokens and generated tokens is limited by the model's context length. 106 | #' 107 | #' \item \code{presence.penalty} 108 | #' 109 | #' Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear 110 | #' in the text so far, increasing the model's likelihood to talk about new topics. 111 | #' 112 | #' \item \code{response.format} 113 | #' 114 | #' Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and 115 | #' all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. 116 | #' 117 | #' \item \code{seed} 118 | #' 119 | #' Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated 120 | #' requests with the same seed and parameters should return the same result. 121 | #' 122 | #' \item \code{stop} 123 | #' 124 | #' Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. 125 | #' 126 | #' \item \code{temperature} 127 | #' 128 | #' Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output 129 | #' more random, while lower values like 0.2 will make it more focused and deterministic. 130 | #' 131 | #' \item \code{top.p} 132 | #' 133 | #' Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers 134 | #' the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top 135 | #' 10% probability mass are considered. 136 | #' 137 | #' \item \code{tools} 138 | #' 139 | #' Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this 140 | #' to provide a list of functions the model may generate JSON inputs for. 141 | #' 142 | #' \item \code{tool.choice} 143 | #' 144 | #' Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means 145 | #' the model will not call a function and instead generates a message. \code{auto} means the model can pick 146 | #' between generating a message or calling a function. 147 | #' 148 | #' \item \code{user} 149 | #' 150 | #' Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor 151 | #' and detect abuse. 152 | #' } 153 | #' 154 | #' @seealso 155 | #' \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 156 | #' 157 | #' \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/txt_image.ipynb}{Live Demo in Colab} 158 | #' 159 | #' @examples 160 | #' \dontrun{ 161 | #' # Assuming there is a GenAI object named 'genai.model' supporting this 162 | #' # function, an image in your current directory named 'example.png', and 163 | #' # an online image 'https://example.com/example.png/', please refer to 164 | #' # the "Live Demo in Colab" above for real examples. The following examples 165 | #' # are just some basic guidelines. 166 | #' 167 | #' # Method 1 (recommended): use the pipe operator "%>%" 168 | #' genai.model %>% 169 | #' txt.image(prompt = "Please describe the following image.", 170 | #' image.path = "https://example.com/example.png/") %>% 171 | #' cat() 172 | #' 173 | #' # Method 2: use the reference operator "$" 174 | #' cat(genai.model$txt.image(prompt = "Please describe the following image.", 175 | #' image.path = "https://example.com/example.png/")) 176 | #' 177 | #' # Method 3: use the function txt.image() directly 178 | #' cat(txt.image(genai.object = genai.model, 179 | #' prompt = "Please describe the following image.", 180 | #' image.path = "example.png")) 181 | #' } 182 | #' 183 | #' @export 184 | txt.image = function(genai.object, 185 | prompt, 186 | image.path, 187 | verbose = FALSE, 188 | config = list()) { 189 | genai.object$txt.image(prompt, 190 | image.path, 191 | verbose, 192 | config) 193 | } 194 | -------------------------------------------------------------------------------- /R/src/man/chat.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.R 3 | \name{chat} 4 | \alias{chat} 5 | \title{Chat Generation with Text as the Input} 6 | \usage{ 7 | chat(genai.object, prompt, verbose = FALSE, config = list()) 8 | } 9 | \arguments{ 10 | \item{genai.object}{A generative AI object containing necessary and correct information.} 11 | 12 | \item{prompt}{A character string representing the query for chat generation.} 13 | 14 | \item{verbose}{Optional. Default to \code{FALSE}. A boolean value determining whether or not to print 15 | out the details of the chat request.} 16 | 17 | \item{config}{Optional. Default to \code{list()}. A list of configuration parameters for chat generation.} 18 | } 19 | \value{ 20 | If successful, the most recent chat response will be returned. If the API response indicates 21 | an error, the function halts execution and provides an error message. 22 | } 23 | \description{ 24 | This function establishes a connection to a generative AI model through a generative AI object. 25 | It generates a chat response based on the provided prompt and stores it in the chat history along 26 | with the generative AI object. 27 | } 28 | \details{ 29 | Providing accurate and valid information for each argument is crucial for successful chat 30 | generation by the generative AI model. If any parameter is incorrect, the function responds with an 31 | error message based on the API feedback. To view all supported generative AI models, use the 32 | function \code{\link{available.models}}. 33 | 34 | In addition, this function modifies the chat history along with the generative AI object directly, 35 | meaning the chat history is mutable. You can print out the chat history using the 36 | function \code{\link{chat.history.print}} or simply use \code{verbose = TRUE} in this function. If you 37 | want to edit a message, use the function \code{\link{chat.edit}}. To reset the chat history along with 38 | the generative AI object, use the function \code{\link{chat.history.reset}}. 39 | 40 | For \strong{Google Generative AI} models, available configurations are as follows. For more detail, 41 | please refer 42 | to \code{https://ai.google.dev/api/rest/v1/HarmCategory}, 43 | \code{https://ai.google.dev/api/rest/v1/SafetySetting}, and 44 | \code{https://ai.google.dev/api/rest/v1/GenerationConfig}. 45 | 46 | \itemize{ 47 | \item \code{harm.category.dangerous.content} 48 | 49 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for dangerous content, 50 | with a higher value representing a lower probability of being blocked. 51 | 52 | \item \code{harm.category.harassment} 53 | 54 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for harasment content, 55 | with a higher value representing a lower probability of being blocked. 56 | 57 | \item \code{harm.category.hate.speech} 58 | 59 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for hate speech and 60 | content, with a higher value representing a lower probability of being blocked. 61 | 62 | \item \code{harm.category.sexually.explicit} 63 | 64 | Optional. An integer, from 1 to 5 inclusive, representing the threshold for sexually explicit 65 | content, with a higher value representing a lower probability of being blocked. 66 | 67 | \item \code{stop.sequences} 68 | 69 | Optional. A list of character sequences (up to 5) that will stop output generation. If specified, 70 | the API will stop at the first appearance of a stop sequence. The stop sequence will not be 71 | included as part of the response. 72 | 73 | \item \code{max.output.tokens} 74 | 75 | Optional. An integer, value varies by model, representing maximum number of tokens to include 76 | in a candidate. 77 | 78 | \item \code{temperature} 79 | 80 | Optional. A number, from 0.0 to 1.0 inclusive, controlling the randomness of the output. 81 | 82 | \item \code{top.p} 83 | 84 | Optional. A number, value varies by model, representing maximum cumulative probability of tokens 85 | to consider when sampling. 86 | 87 | \item \code{top.k} 88 | 89 | Optional. A number, value varies by model, representing maximum number of tokens to consider when sampling. 90 | } 91 | 92 | For \strong{Moonshot AI} models, available configurations are as follows. For more detail, please refer to 93 | \code{https://platform.moonshot.cn/api.html#chat-completion}. 94 | 95 | \itemize{ 96 | \item \code{max.tokens} 97 | 98 | Optional. An integer. The maximum number of tokens that will be generated when the chat completes. 99 | If the chat is not finished by the maximum number of tokens generated, the finish reason will be 100 | "length", otherwise it will be "stop". 101 | 102 | \item \code{temperature} 103 | 104 | Optional. A number. What sampling temperature to use, between 0 and 1. Higher values (e.g. 0.7) will 105 | make the output more random, while lower values (e.g. 0.2) will make it more focused and deterministic. 106 | 107 | \item \code{top.p} 108 | 109 | Optional. A number. Another sampling temperature. 110 | } 111 | 112 | For \strong{OpenAI} models, available configurations are as follows. For more detail, please refer to 113 | \code{https://platform.openai.com/docs/api-reference/chat/create}. 114 | 115 | \itemize{ 116 | \item \code{frequency.penalty} 117 | 118 | Optional. A number from -2.0 to 2.0 inclusive. Positive values penalize new tokens based on their 119 | existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. 120 | 121 | \item \code{logit.bias} 122 | 123 | Optional. A map. Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object 124 | that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 125 | 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact 126 | effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; 127 | values like -100 or 100 should result in a ban or exclusive selection of the relevant token. 128 | 129 | \item \code{logprobs} 130 | 131 | Optional. A boolean value. Whether to return log probabilities of the output tokens or not. If true, returns the log 132 | probabilities of each output token returned in the content of message 133 | 134 | \item \code{top.logprobs} 135 | 136 | Optional. An integer between 0 and 5 specifying the number of most likely tokens to return at each token 137 | position, each with an associated log probability. \code{logprobs} must be set to \code{TRUE} if this 138 | parameter is used. 139 | 140 | \item \code{max.tokens} 141 | 142 | Optional. An integer. The maximum number of tokens that can be generated in the chat completion. The total length of 143 | input tokens and generated tokens is limited by the model's context length. 144 | 145 | \item \code{presence.penalty} 146 | 147 | Optional. A Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear 148 | in the text so far, increasing the model's likelihood to talk about new topics. 149 | 150 | \item \code{response.format} 151 | 152 | Optional. An object specifying the format that the model must output. Compatible with GPT-4 Turbo and 153 | all GPT-3.5 Turbo models newer than \code{gpt-3.5-turbo-1106}. 154 | 155 | \item \code{seed} 156 | 157 | Optional. An integer. If specified, our system will make a best effort to sample deterministically, such that repeated 158 | requests with the same seed and parameters should return the same result. 159 | 160 | \item \code{stop} 161 | 162 | Optional. A character string or list contains up to 4 sequences where the API will stop generating further tokens. 163 | 164 | \item \code{temperature} 165 | 166 | Optional. A number. What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output 167 | more random, while lower values like 0.2 will make it more focused and deterministic. 168 | 169 | \item \code{top.p} 170 | 171 | Optional. A number. An alternative to sampling with temperature, called nucleus sampling, where the model considers 172 | the results of the tokens with \code{top.p} probability mass. So 0.1 means only the tokens comprising the top 173 | 10% probability mass are considered. 174 | 175 | \item \code{tools} 176 | 177 | Optional. A list of tools the model may call. Currently, only functions are supported as a tool. Use this 178 | to provide a list of functions the model may generate JSON inputs for. 179 | 180 | \item \code{tool.choice} 181 | 182 | Optional. A character string or object. Controls which (if any) function is called by the model. \code{none} means 183 | the model will not call a function and instead generates a message. \code{auto} means the model can pick 184 | between generating a message or calling a function. 185 | 186 | \item \code{user} 187 | 188 | Optional. A character string. A unique identifier representing your end-user, which can help OpenAI to monitor 189 | and detect abuse. 190 | } 191 | } 192 | \examples{ 193 | \dontrun{ 194 | # Assuming there is a GenAI object named 'genai.model' supporting this 195 | # function, please refer to the "Live Demo in Colab" above for real 196 | # examples. The following examples are just some basic guidelines. 197 | 198 | # Method 1 (recommended): use the pipe operator "\%>\%" 199 | genai.model \%>\% 200 | chat(prompt = "Write a story about Mars in 50 words.") \%>\% 201 | cat() 202 | 203 | # Method 2: use the reference operator "$" 204 | cat(genai.model$chat(prompt = "Write a story about Jupiter in 50 words.")) 205 | 206 | # Method 3: use the function chat() directly 207 | cat(chat(genai.object = genai.model, 208 | prompt = "Summarize the chat.")) 209 | } 210 | 211 | } 212 | \seealso{ 213 | \href{https://genai.gd.edu.kg/r/documentation/}{GenAI - R Package "GenAI" Documentation} 214 | 215 | \href{https://colab.research.google.com/github/GitData-GA/GenAI/blob/gh-pages/r/example/chat.ipynb}{Live Demo in Colab} 216 | } 217 | --------------------------------------------------------------------------------