diff --git a/DESCRIPTION b/DESCRIPTION index e80b2f6..4e1b531 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: chatAI4R Type: Package Title: Chat-Based Interactive Artificial Intelligence for R -Version: 0.3.2 +Version: 0.3.3 Date: 2024-05-10 Authors@R: c( person (given = "Satoshi", family = "Kume", @@ -29,10 +29,11 @@ Imports: httr, deepRstudio, pdftools, xml2, - rvest + rvest, + curl Suggests: testthat, knitr License: Artistic-2.0 URL: https://kumes.github.io/chatAI4R/, https://github.com/kumeS/chatAI4R BugReports: https://github.com/kumeS/chatAI4R/issues -RoxygenNote: 7.3.1 +RoxygenNote: 7.3.2 Encoding: UTF-8 diff --git a/NAMESPACE b/NAMESPACE index 9bb10eb..50717b3 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -9,6 +9,8 @@ export(addRoxygenDescription) export(autocreateFunction4R) export(chat4R) export(chat4R_history) +export(chat4R_streaming) +export(chat4Rv2) export(chatAI4pdf) export(checkErrorDet) export(checkErrorDet_JP) @@ -32,6 +34,7 @@ export(ngsub) export(proofreadEnglishText) export(proofreadText) export(removeQuotations) +export(replicatellmAPI4R) export(revisedText) export(searchFunction) export(slow_print_v2) @@ -49,12 +52,17 @@ importFrom(assertthat,noNA) importFrom(clipr,read_clip) importFrom(clipr,write_clip) importFrom(crayon,red) +importFrom(curl,curl_fetch_stream) +importFrom(curl,handle_setheaders) +importFrom(curl,handle_setopt) +importFrom(curl,new_handle) importFrom(deepRstudio,deepel) importFrom(deepRstudio,is_mac) importFrom(future,future) importFrom(future,multisession) importFrom(future,plan) importFrom(future,resolved) +importFrom(httr,GET) importFrom(httr,POST) importFrom(httr,add_headers) importFrom(httr,content) @@ -63,6 +71,7 @@ importFrom(igraph,add_vertices) importFrom(igraph,graph) importFrom(igraph,layout_nicely) importFrom(igraph,layout_with_fr) +importFrom(jsonlite,fromJSON) importFrom(jsonlite,toJSON) importFrom(pdftools,pdf_text) importFrom(rstudioapi,getActiveDocumentContext) diff --git a/R/OptimizeRcode.R b/R/OptimizeRcode.R index 804bf50..f9831ef 100644 --- a/R/OptimizeRcode.R +++ b/R/OptimizeRcode.R @@ -98,9 +98,9 @@ OptimizeRcode <- function(Model = "gpt-4-0613", template2 = "Please describe in detail the changes you previously made to the R code without apology." history[[4]] <- list('role' = 'user', 'content' = template2) - res1 <- chat4R_history(history = history, + res1 <- as.character(chat4R_history(history = history, Model = Model, - temperature = temperature) + temperature = temperature)) if(SlowTone) { d <- ifelse(20/nchar(res1) < 0.3, 20/nchar(res1), 0.3) * stats::runif(1, min = 0.95, max = 1.05) @@ -111,6 +111,6 @@ OptimizeRcode <- function(Model = "gpt-4-0613", } } } else { - return(clipr::write_clip(res)) + return(clipr::write_clip(as.character(res))) } } diff --git a/R/RcodeImprovements.R b/R/RcodeImprovements.R index c375efd..ef624a8 100644 --- a/R/RcodeImprovements.R +++ b/R/RcodeImprovements.R @@ -64,9 +64,9 @@ RcodeImprovements <- function(Summary_nch = 100, list('role' = 'user', 'content' = template1s)) # Generate the suggestions using the GPT model - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) # Print the suggestions based on the verbosity and speed settings if(verbose){ diff --git a/R/RevisedText.R b/R/RevisedText.R index 1d93c2c..a6a52e5 100644 --- a/R/RevisedText.R +++ b/R/RevisedText.R @@ -119,9 +119,9 @@ history <- list(list('role' = 'system', 'content' = template0s), list('role' = 'user', 'content' = template1s)) # Execute the chat model -res <- chat4R_history(history=history, +res <- as.character(chat4R_history(history=history, Model = Model, - temperature = 1) + temperature = 1)) # Output final result or relevant messages if(verbose) { diff --git a/R/TextSummaryAsBullet.R b/R/TextSummaryAsBullet.R index 6a6117b..f4121ac 100644 --- a/R/TextSummaryAsBullet.R +++ b/R/TextSummaryAsBullet.R @@ -104,9 +104,9 @@ TextSummaryAsBullet <- function(Model = "gpt-4-0613", if(verbose){utils::setTxtProgressBar(pb, 3)} # Execute text generation - res <- chat4R_history(history = history, + res <- as.character(chat4R_history(history = history, Model = Model, - temperature = temperature) + temperature = temperature)) if(verbose){utils::setTxtProgressBar(pb, 4)} diff --git a/R/addCommentCode.R b/R/addCommentCode.R index 5a989c8..451a7bc 100644 --- a/R/addCommentCode.R +++ b/R/addCommentCode.R @@ -68,9 +68,9 @@ addCommentCode <- function(Model = "gpt-4-0613", list('role' = 'user', 'content' = template1s)) # Execute text generation - res <- chat4R_history(history = history, + res <- as.character(chat4R_history(history = history, Model = Model, - temperature = temperature) + temperature = temperature)) # Output the enriched text if(SelectedCode){ diff --git a/R/addRoxygenDescription.R b/R/addRoxygenDescription.R index 1e02223..c377d6f 100644 --- a/R/addRoxygenDescription.R +++ b/R/addRoxygenDescription.R @@ -81,9 +81,9 @@ addRoxygenDescription <- function(Model = "gpt-4-0613", if(verbose){utils::setTxtProgressBar(pb, 2)} # Execute text generation - res <- chat4R_history(history = history, + res <- as.character(chat4R_history(history = history, Model = Model, - temperature = temperature) + temperature = temperature)) if(verbose){ utils::setTxtProgressBar(pb, 3) diff --git a/R/chat4R.R b/R/chat4R.R index b0008c6..8fd744a 100644 --- a/R/chat4R.R +++ b/R/chat4R.R @@ -1,11 +1,14 @@ #' Chat4R Function #' -#' @title Chat4R: Interact with GPT-3.5 (default) using OpenAI API +#' @title Chat4R: Interact with gpt-4o-mini (default) using OpenAI API #' @description This function uses the OpenAI API to interact with the -#' GPT-3.5 model (default) and generates responses based on user input. +#' gpt-4o-mini model (default) and generates responses based on user input. +#' In this function, currently, "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-turbo" and "gpt-3.5-turbo" +#' can be selected as OpenAI's LLM model. #' @param content A string containing the user's input message. -#' @param api_key A string containing the user's OpenAI API key. Defaults to the value of the environment variable "OPENAI_API_KEY". -#' @param Model A string specifying the GPT model to use (default: "gpt-3.5-turbo-16k"). +#' @param api_key A string containing the user's OpenAI API key. +#' Defaults to the value of the environment variable "OPENAI_API_KEY". +#' @param Model A string specifying the GPT model to use (default: "gpt-4o-mini"). #' @param temperature A numeric value controlling the randomness of the model's output (default: 1). #' @param simple Logical, if TRUE, only the content of the model's message will be returned. #' @param fromJSON_parsed Logical, if TRUE, content will be parsed from JSON. @@ -22,7 +25,7 @@ #' } chat4R <- function(content, - Model = "gpt-3.5-turbo-16k", + Model = "gpt-4o-mini", temperature = 1, simple=TRUE, fromJSON_parsed=FALSE, diff --git a/R/chat4R_streaming.R b/R/chat4R_streaming.R new file mode 100644 index 0000000..b4e44e3 --- /dev/null +++ b/R/chat4R_streaming.R @@ -0,0 +1,81 @@ +#' Chat4R Function with Streaming +#' +#' @title chat4R_streaming: Interact with GPT-4o (default) with streaming using OpenAI API +#' @description This function uses the OpenAI API to interact with the +#' GPT-4o model (default) and generates responses based on user input with +#' streaming data back to R +#' In this function, currently, "gpt-4o-mini", "gpt-4o", and "gpt-4-turbo" +#' can be selected as OpenAI's LLM model. +#' @param content A string containing the user's input message. +#' @param api_key A string containing the user's OpenAI API key. +#' Defaults to the value of the environment variable "OPENAI_API_KEY". +#' @param Model A string specifying the GPT model to use (default: "GPT-4o"). +#' @param temperature A numeric value controlling the randomness of the model's output (default: 1). +#' @importFrom httr POST add_headers content +#' @importFrom jsonlite toJSON fromJSON +#' @return A data frame containing the response from the GPT model. +#' @export chat4R_streaming +#' @author Satoshi Kume +#' @examples +#' \dontrun{ +#' Sys.setenv(OPENAI_API_KEY = "Your API key") +#' response <- chat4R_streaming(content = "What is the capital of France?") +#' response +#' } + +chat4R_streaming <- function(content, + Model = "gpt-4o-mini", + temperature = 1, + api_key = Sys.getenv("OPENAI_API_KEY")) { + + # Define parameters + api_url <- "https://api.openai.com/v1/chat/completions" + n <- 1 + top_p <- 1 + + # Configure headers for the API request + headers <- c(`Content-Type` = "application/json", + `Authorization` = paste("Bearer", api_key)) + + # Define the body of the API request + body <- list(model = Model, + messages = list(list(role = "user", content = content)), + temperature = temperature, top_p = top_p, n = n, + stream = TRUE) + +# Function to process streaming data +streaming_callback <- function(data) { + + #print(data) + # Convert raw data to character + message <- rawToChar(data) + #print(message) + + # Split message into lines + lines <- unlist(strsplit(message, "\n")) + #print(lines) + + for (line in lines) { + if(line == "data: [DONE]"){}else{ + if(line != ""){ + # Remove the "data: " prefix + json_line <- sub("data: ", "", line) + #print(json_line) + json <- fromJSON(json_line)$choices$delta$content + cat(json) + }}} +} + +# Perform the request with streaming using httr::write_stream +response <- httr::POST( + url = api_url, + body = body, + encode = "json", + httr::add_headers(.headers = headers), + httr::write_stream(streaming_callback) +) + +# Check response status +#print(response) + +} diff --git a/R/chat4Rv2.R b/R/chat4Rv2.R new file mode 100644 index 0000000..f4430e6 --- /dev/null +++ b/R/chat4Rv2.R @@ -0,0 +1,96 @@ +#' chat4Rv2 Function +#' +#' @title chat4Rv2: Interact with gpt-4o-mini (default) using OpenAI API +#' @description This function uses the OpenAI API to interact with the +#' gpt-4o-mini model (default) and generates responses based on user input. +#' In this function, currently, "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-turbo" and "gpt-3.5-turbo" +#' can be selected as OpenAI's LLM model. +#' @param content A string containing the user's input message. +#' @param api_key A string containing the user's OpenAI API key. +#' Defaults to the value of the environment variable "OPENAI_API_KEY". +#' @param Model A string specifying the GPT model to use (default: "gpt-4o-mini"). +#' @param temperature A numeric value controlling the randomness of the model's output (default: 1). +#' @param simple Logical, if TRUE, only the content of the model's message will be returned. +#' @param fromJSON_parsed Logical, if TRUE, content will be parsed from JSON. +#' @param system_set A string containing the system message to set the context. +#' If provided, it will be added as the first message in the conversation. +#' Default is an empty string. +#' @importFrom httr POST add_headers content +#' @importFrom jsonlite toJSON +#' @return A data frame containing the response from the GPT model. +#' @export chat4Rv2 +#' @author Satoshi Kume +#' @examples +#' \dontrun{ +#' Sys.setenv(OPENAI_API_KEY = "Your API key") +#' # Using chat4Rv2 without system_set (default behavior) +#' response <- chat4Rv2(content = "What is the capital of France?") +#' response +#' +#' # Using chat4Rv2 with a system_set provided +#' response <- chat4Rv2(content = "What is the capital of France?", +#' system_set = "You are a helpful assistant.") +#' response +#' } + + +chat4Rv2 <- function(content, + Model = "gpt-4o-mini", + temperature = 1, + simple = TRUE, + fromJSON_parsed = FALSE, + system_set = "", + api_key = Sys.getenv("OPENAI_API_KEY")) { + + # Define parameters + api_url <- "https://api.openai.com/v1/chat/completions" + n <- 1 + top_p <- 1 + + # Configure headers for the API request + headers <- httr::add_headers(`Content-Type` = "application/json", + `Authorization` = paste("Bearer", api_key)) + + # Construct messages list depending on system_set parameter + if (nzchar(system_set)) { + # Include system message if provided + messages_list <- list( + list(role = "system", content = system_set), + list(role = "user", content = content) + ) + } else { + # Only include user message if system_set is empty + messages_list <- list( + list(role = "user", content = content) + ) + } + + # Define the body of the API request + body <- list(model = Model, + messages = messages_list, + temperature = temperature, + top_p = top_p, + n = n) + + # Send a POST request to the OpenAI server + response <- httr::POST(url = api_url, + body = jsonlite::toJSON(body, auto_unbox = TRUE), + encode = "json", + config = headers) + + # Extract and return the response content + if (simple) { + return(data.frame(content = httr::content(response, "parsed")$choices[[1]]$message$content)) + } else { + if (fromJSON_parsed) { + raw_content <- httr::content(response, "raw") + char_content <- rawToChar(raw_content) + parsed_data <- jsonlite::fromJSON(char_content) + return(parsed_data) + } else { + return(data.frame(httr::content(response, "parsed"))) + } + } +} + + diff --git a/R/checkErrorDet.R b/R/checkErrorDet.R index 0cf1618..01005e9 100644 --- a/R/checkErrorDet.R +++ b/R/checkErrorDet.R @@ -6,8 +6,8 @@ #' @title Check Error Details #' @description A function to analyze and provide guidance on how to fix an error message copied from the R console. #' @param Summary_nch An integer specifying the maximum number of characters for the summary. -#' @param Model A string specifying the model to be used, default is "gpt-4-0314". -#' Currently, "gpt-4", "gpt-4-0314" and "gpt-4-0613" can be selected as gpt-4 models. +#' @param Model A string specifying the model to be used, default is "gpt-4o-mini". +#' Currently, "gpt-4", "gpt-4-0314" and "gpt-4o-mini" can be selected as gpt-4 models. #' Execution with GPT-4 is recommended. #' @param language A string specifying the output language, default is "English". #' @param verbose A logical value to control the verbosity of the output, default is TRUE. @@ -25,7 +25,7 @@ #' } checkErrorDet <- function(Summary_nch = 100, - Model = "gpt-4-0613", + Model = "gpt-4o-mini", language = "English", verbose = TRUE, SlowTone = FALSE) { @@ -64,9 +64,9 @@ checkErrorDet <- function(Summary_nch = 100, list('role' = 'user', 'content' = template1s)) # Execution - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) if(verbose) { if(SlowTone) { diff --git a/R/checkErrorDet_JP.R b/R/checkErrorDet_JP.R index 20a3a59..1b27440 100644 --- a/R/checkErrorDet_JP.R +++ b/R/checkErrorDet_JP.R @@ -69,9 +69,9 @@ checkErrorDet_JP <- function(Summary_nch = 100, if(verbose){utils::setTxtProgressBar(pb, 2)} # Execution - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) if(verbose){ utils::setTxtProgressBar(pb, 3) diff --git a/R/conversation4R.R b/R/conversation4R.R index 682003f..55bc6a4 100644 --- a/R/conversation4R.R +++ b/R/conversation4R.R @@ -6,9 +6,9 @@ #' @description Interface to communicate with OpenAI's models using R, maintaining a conversation history and allowing for initialization of a new conversation. #' @param message A string containing the message to be sent to the model. #' @param api_key A string containing the OpenAI API key. Default is retrieved from the system environment variable "OPENAI_API_KEY". -#' @param template A string containing the template for the conversation. Default is an empty string. +#' @param system_set A string containing the system_set for the conversation. Default is an empty string. #' @param ConversationBufferWindowMemory_k An integer representing the conversation buffer window memory. Default is 2. -#' @param Model A string representing the model to be used. Default is "gpt-3.5-turbo-16k". +#' @param Model A string representing the model to be used. Default is "gpt-4o-mini". #' @param language A string representing the language to be used in the conversation. Default is "English". #' @param initialization A logical flag to initialize a new conversation. Default is FALSE. #' @param verbose A logical flag to print the conversation. Default is TRUE. @@ -26,9 +26,9 @@ conversation4R <- function(message, api_key = Sys.getenv("OPENAI_API_KEY"), - template = "", + system_set = "", ConversationBufferWindowMemory_k = 2, - Model = "gpt-3.5-turbo-16k", + Model = "gpt-4o-mini", language = "English", initialization = FALSE, verbose = TRUE){ @@ -36,7 +36,7 @@ conversation4R <- function(message, # Assertions to verify the types of the input parameters assertthat::assert_that(assertthat::is.string(message)) assertthat::assert_that(assertthat::is.string(api_key)) -assertthat::assert_that(assertthat::is.string(template)) +assertthat::assert_that(assertthat::is.string(system_set)) assertthat::assert_that(assertthat::is.count(ConversationBufferWindowMemory_k)) assertthat::assert_that(assertthat::is.flag(initialization)) @@ -53,24 +53,24 @@ chat_history$history <- c() # Define temperature = 1 -# Prompt Template -if(template == ""){ - template = paste0("You are an excellent assistant. Please reply in ", language, ".") +# Prompt system_set +if(system_set == ""){ + system_set = paste0("You are an excellent assistant. Please reply in ", language, ".") } -template2 = " +system_set2 = " History:%s" -template3 = " +system_set3 = " Human: %s" -template4 = " +system_set4 = " Assistant: %s" if(identical(as.character(chat_history$history), character(0))){ chat_historyR <- list( - list(role = "system", content = template), + list(role = "system", content = system_set), list(role = "user", content = message)) # Run @@ -80,18 +80,18 @@ res <- chatAI4R::chat4R_history(history = chat_historyR, temperature = temperature) -template3s <- sprintf(template3, message) -template4s <- sprintf(template4, res) +system_set3s <- sprintf(system_set3, message) +system_set4s <- sprintf(system_set4, res) chat_history$history <- list( - list(role = "system", content = template), + list(role = "system", content = system_set), list(role = "user", content = message), list(role = "assistant", content = res) ) -out <- c(paste0("System: ", template), - crayon::red(template3s), - crayon::blue(template4s)) +out <- c(paste0("System: ", system_set), + crayon::red(system_set3s), + crayon::blue(system_set4s)) if(verbose){ cat(out) @@ -129,12 +129,12 @@ r <- switch(chat_history$history[[n]]$role, rr <- c(rr, r) } -template2s <- sprintf(template2, paste0(rr, collapse = "")) +system_set2s <- sprintf(system_set2, paste0(rr, collapse = "")) -out <- c(paste0("System: ", template), - template2s, - crayon::red(sprintf(template3, new_conversation[[1]]$content)), - crayon::blue(sprintf(template4, assistant_conversation[[1]]$content))) +out <- c(paste0("System: ", system_set), + system_set2s, + crayon::red(sprintf(system_set3, new_conversation[[1]]$content)), + crayon::blue(sprintf(system_set4, assistant_conversation[[1]]$content))) chat_history$history <- chat_historyR diff --git a/R/convertBullet2Sentence.R b/R/convertBullet2Sentence.R index f37bf6f..9f4daac 100644 --- a/R/convertBullet2Sentence.R +++ b/R/convertBullet2Sentence.R @@ -81,9 +81,9 @@ convertBullet2Sentence <- function(Model = "gpt-4-0613", if(verbose){utils::setTxtProgressBar(pb, 3)} # Execute text generation - res <- chat4R_history(history = history, + res <- as.character(chat4R_history(history = history, Model = Model, - temperature = temperature) + temperature = temperature)) # Output the summarized text if(all(verbose, SpeakJA)){ @@ -97,6 +97,6 @@ convertBullet2Sentence <- function(Model = "gpt-4-0613", if(SelectedCode){ rstudioapi::insertText(text = as.character(res)) } else { - return(clipr::write_clip(res)) + return(clipr::write_clip(as.character(res))) } } diff --git a/R/convertRscript2Function.R b/R/convertRscript2Function.R index 580ff6f..ddc2675 100644 --- a/R/convertRscript2Function.R +++ b/R/convertRscript2Function.R @@ -92,6 +92,6 @@ convertRscript2Function <- function(Model = "gpt-4-0613", if(SelectedCode){ rstudioapi::insertText(text = as.character(res)) } else { - return(clipr::write_clip(res)) + return(clipr::write_clip(as.character(res))) } } diff --git a/R/convertScientificLiterature.R b/R/convertScientificLiterature.R index 2c71a25..d2ac5db 100644 --- a/R/convertScientificLiterature.R +++ b/R/convertScientificLiterature.R @@ -74,6 +74,6 @@ convertScientificLiterature <- function(Model = "gpt-4-0613", rstudioapi::insertText(text = as.character(res)) } else { # Write to the clipboard - return(clipr::write_clip(res)) + return(clipr::write_clip(as.character(res))) } } diff --git a/R/createRcode.R b/R/createRcode.R index 5f672c7..3547fe2 100644 --- a/R/createRcode.R +++ b/R/createRcode.R @@ -100,7 +100,7 @@ createRcode <- function(Summary_nch = 100, } } - return(clipr::write_clip(res)) + return(clipr::write_clip(as.character(res))) } } diff --git a/R/createRfunction.R b/R/createRfunction.R index ffa9e8a..f3ad64b 100644 --- a/R/createRfunction.R +++ b/R/createRfunction.R @@ -105,7 +105,7 @@ createRfunction <- function(Model = "gpt-4-0613", } } - return(clipr::write_clip(res)) + return(clipr::write_clip(as.character(res))) } } diff --git a/R/createSpecifications4R.R b/R/createSpecifications4R.R index 8d014a8..f94395c 100644 --- a/R/createSpecifications4R.R +++ b/R/createSpecifications4R.R @@ -99,7 +99,7 @@ createSpecifications4R <- function(Model = "gpt-4-0613", } } - return(clipr::write_clip(res)) + return(clipr::write_clip(as.character(res))) } } diff --git a/R/create_eBay_Description.R b/R/create_eBay_Description.R index f667941..cbaa106 100644 --- a/R/create_eBay_Description.R +++ b/R/create_eBay_Description.R @@ -118,7 +118,7 @@ if(verbose) { } } -return(clipr::write_clip(res)) +return(clipr::write_clip(as.character(res))) } } diff --git a/R/designPackage.R b/R/designPackage.R index 5fa1d59..b31b428 100644 --- a/R/designPackage.R +++ b/R/designPackage.R @@ -59,9 +59,9 @@ designPackage <- function(Model = "gpt-4-0613", list('role' = 'user', 'content' = template1s)) # Execute the chat model - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) # Print the result based on verbosity and tone speed if(verbose) { diff --git a/R/enrichTextContent.R b/R/enrichTextContent.R index 624fa78..27ce791 100644 --- a/R/enrichTextContent.R +++ b/R/enrichTextContent.R @@ -85,6 +85,6 @@ You are an excellent assistant. Your expertise as an assistant is truly unparall rstudioapi::insertText(text = as.character(res)) return(message("Finished!!")) } else { - return(clipr::write_clip(res)) + return(clipr::write_clip(as.character(res))) } } diff --git a/R/extractKeywords.R b/R/extractKeywords.R index c2d8372..fa3dca5 100644 --- a/R/extractKeywords.R +++ b/R/extractKeywords.R @@ -69,9 +69,9 @@ extractKeywords <- function(Model = "gpt-4-0613", if(verbose){utils::setTxtProgressBar(pb, 2)} # Execute the chat model - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) if(verbose){utils::setTxtProgressBar(pb, 3)} # Print the result based on verbosity and tone speed diff --git a/R/proofreadEnglishText.R b/R/proofreadEnglishText.R index 879522d..9ebd595 100644 --- a/R/proofreadEnglishText.R +++ b/R/proofreadEnglishText.R @@ -76,9 +76,9 @@ proofreadEnglishText <- function(Model = "gpt-4", if(verbose){utils::setTxtProgressBar(pb, 2)} # Execution - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) #str(res) if(verbose){utils::setTxtProgressBar(pb, 3)} diff --git a/R/proofreadText.R b/R/proofreadText.R index 52ee00d..221ce62 100644 --- a/R/proofreadText.R +++ b/R/proofreadText.R @@ -71,9 +71,9 @@ proofreadText <- function(Model = "gpt-4-0613", if(verbose){utils::setTxtProgressBar(pb, 2)} # Execute the chat model - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) if(verbose){utils::setTxtProgressBar(pb, 3)} diff --git a/R/replicateAPI4R.R b/R/replicateAPI4R.R new file mode 100644 index 0000000..60e5f6a --- /dev/null +++ b/R/replicateAPI4R.R @@ -0,0 +1,142 @@ +#' replicatellmAPI4R: Interact with Replicate API for LLM models in R +#' +#' @description This function interacts with the Replicate API (v1) to utilize language models (LLM) such as Llama. It sends a POST request with the provided input and handles both streaming and non-streaming responses. +#' +#' @param input A list containing the API request body with parameters including prompt, max_tokens, top_k, top_p, min_tokens, temperature, system_prompt, presence_penalty, and frequency_penalty. +#' @param model_url A character string specifying the model endpoint URL (e.g., "/models/meta/meta-llama-3.1-405b-instruct/predictions"). +#' @param simple A logical value indicating whether to return a simplified output (only the model output) if TRUE, or the full API response if FALSE. Default is TRUE. +#' @param fetch_stream A logical value indicating whether to fetch a streaming response. Default is FALSE. +#' @param api_key A character string representing the Replicate API key. Defaults to the environment variable "Replicate_API_KEY". +#' +#' @importFrom httr add_headers POST GET content +#' @importFrom jsonlite toJSON fromJSON +#' @importFrom curl new_handle handle_setopt handle_setheaders curl_fetch_stream +#' @importFrom assertthat assert_that is.string is.flag noNA +#' +#' @return If fetch_stream is FALSE, returns either a simplified output (if simple is TRUE) or the full API response. In streaming mode, outputs the response stream directly to the console. +#' +#' @examples +#' \dontrun{ +#' Sys.setenv(Replicate_API_KEY = "Your API key") +#' input <- list( +#' input = list( +#' prompt = "What is the capital of France?", +#' max_tokens = 1024, +#' top_k = 50, +#' top_p = 0.9, +#' min_tokens = 0, +#' temperature = 0.6, +#' system_prompt = "You are a helpful assistant.", +#' presence_penalty = 0, +#' frequency_penalty = 0 +#' ) +#' ) +#' model_url <- "/models/meta/meta-llama-3.1-405b-instruct/predictions" +#' response <- replicatellmAPI4R(input, model_url) +#' print(response) +#' } +#' +#' @export replicatellmAPI4R +#' @author Satoshi Kume +replicatellmAPI4R <- function(input, + model_url, + simple = TRUE, + fetch_stream = FALSE, + api_key = Sys.getenv("Replicate_API_KEY")) { + + # Validate input arguments using assertthat functions + assertthat::assert_that( + #input = list(NA) + is.list(input), + assertthat::is.string(model_url), + assertthat::is.flag(simple), + assertthat::is.flag(fetch_stream), + assertthat::is.string(api_key), + assertthat::noNA(api_key) + ) + + # Define the base API URL and construct the full API endpoint URL by concatenating the base URL and model URL + api_url <- "https://api.replicate.com/v1/" + api_url0 <- paste0(api_url, model_url) + # Remove any accidental double slashes from the URL + api_url0 <- gsub("\\/\\/", "\\/", api_url0) + + # Configure HTTP headers for the API request, including content type and authorization + headers <- httr::add_headers( + `Content-Type` = "application/json", + `Authorization` = paste("Bearer", api_key) + ) + + # Define the body of the API request using the input provided + body <- input + + # Send a POST request to the Replicate API endpoint with the JSON-encoded body and headers + response <- httr::POST( + url = api_url0, + body = jsonlite::toJSON(body, auto_unbox = TRUE), + encode = "json", + config = headers + ) + + # If fetch_stream is TRUE, handle streaming response mode + if (fetch_stream) { + # Get the URL to poll for the prediction result + get_url <- httr::content(response, "parsed")$urls$get + + # Initialize result as NULL to start polling + result <- NULL + # Poll the get_url until the result is ready + while (is.null(result)) { + response_output <- httr::GET(get_url, headers) + # Parse the response text into JSON + content <- jsonlite::fromJSON(httr::content(response_output, "text", encoding = "UTF-8")) + + if (content$status == "succeeded") { + # If prediction succeeded, assign the result to response_result and update result to exit the loop + response_result <- content + result <- response_result + } else if (content$status == "failed") { + # If prediction failed, throw an error + stop("Prediction failed") + } else { + # Wait for 1 second before polling again + Sys.sleep(1) + } + } + + # Return a simplified output if simple is TRUE, otherwise return the full response + if (simple) { + return(response_result$output) + } else { + return(response_result) + } + } else { + # If fetch_stream is FALSE, handle non-streaming mode + + # Get the streaming URL from the API response + stream_url <- httr::content(response, "parsed")$urls$stream + + # Define a callback function to process streaming data chunks as they arrive + streaming_callback <- function(data) { + # Convert raw data to character + message <- rawToChar(data) + # Output the message with a newline for clarity + cat(message, "\n") + # Return TRUE to indicate processing was successful + TRUE + } + + # Create a new curl handle for the streaming request + stream_handle <- curl::new_handle() + # Set the streaming URL on the handle + curl::handle_setopt(stream_handle, url = stream_url) + # Set the required headers for the streaming request, including authorization and content type + curl::handle_setheaders(stream_handle, + Authorization = paste("Bearer", api_key), + `Content-Type` = "application/json" + ) + + # Send the streaming request, processing data using the defined callback function + curl::curl_fetch_stream(url = stream_url, fun = streaming_callback, handle = stream_handle) + } +} diff --git a/R/searchFunction.R b/R/searchFunction.R index ff54f00..81930e1 100644 --- a/R/searchFunction.R +++ b/R/searchFunction.R @@ -81,9 +81,9 @@ searchFunction <- function(Summary_nch = 100, if(verbose){utils::setTxtProgressBar(pb, 2)} # Execute the function that interacts with the API - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) if(verbose){ utils::setTxtProgressBar(pb, 3) diff --git a/R/supportIdeaGeneration.R b/R/supportIdeaGeneration.R index 6ba4ddf..65415f9 100644 --- a/R/supportIdeaGeneration.R +++ b/R/supportIdeaGeneration.R @@ -80,9 +80,9 @@ supportIdeaGeneration <- function(Model = "gpt-4-0613", if(verbose){utils::setTxtProgressBar(pb, 2)} # Execute the chat model - res <- chat4R_history(history=history, + res <- as.character(chat4R_history(history=history, Model = Model, - temperature = temperature) + temperature = temperature)) if(verbose){ utils::setTxtProgressBar(pb, 3) diff --git a/man/chat4R.Rd b/man/chat4R.Rd index ab985aa..0956e29 100644 --- a/man/chat4R.Rd +++ b/man/chat4R.Rd @@ -2,11 +2,11 @@ % Please edit documentation in R/chat4R.R \name{chat4R} \alias{chat4R} -\title{Chat4R: Interact with GPT-3.5 (default) using OpenAI API} +\title{Chat4R: Interact with gpt-4o-mini (default) using OpenAI API} \usage{ chat4R( content, - Model = "gpt-3.5-turbo-16k", + Model = "gpt-4o-mini", temperature = 1, simple = TRUE, fromJSON_parsed = FALSE, @@ -16,7 +16,7 @@ chat4R( \arguments{ \item{content}{A string containing the user's input message.} -\item{Model}{A string specifying the GPT model to use (default: "gpt-3.5-turbo-16k").} +\item{Model}{A string specifying the GPT model to use (default: "gpt-4o-mini").} \item{temperature}{A numeric value controlling the randomness of the model's output (default: 1).} @@ -24,14 +24,17 @@ chat4R( \item{fromJSON_parsed}{Logical, if TRUE, content will be parsed from JSON.} -\item{api_key}{A string containing the user's OpenAI API key. Defaults to the value of the environment variable "OPENAI_API_KEY".} +\item{api_key}{A string containing the user's OpenAI API key. +Defaults to the value of the environment variable "OPENAI_API_KEY".} } \value{ A data frame containing the response from the GPT model. } \description{ This function uses the OpenAI API to interact with the - GPT-3.5 model (default) and generates responses based on user input. + gpt-4o-mini model (default) and generates responses based on user input. + In this function, currently, "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-turbo" and "gpt-3.5-turbo" + can be selected as OpenAI's LLM model. } \details{ Chat4R Function diff --git a/man/chat4R_streaming.Rd b/man/chat4R_streaming.Rd new file mode 100644 index 0000000..4fc237f --- /dev/null +++ b/man/chat4R_streaming.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat4R_streaming.R +\name{chat4R_streaming} +\alias{chat4R_streaming} +\title{chat4R_streaming: Interact with GPT-4o (default) with streaming using OpenAI API} +\usage{ +chat4R_streaming( + content, + Model = "gpt-4o-mini", + temperature = 1, + api_key = Sys.getenv("OPENAI_API_KEY") +) +} +\arguments{ +\item{content}{A string containing the user's input message.} + +\item{Model}{A string specifying the GPT model to use (default: "GPT-4o").} + +\item{temperature}{A numeric value controlling the randomness of the model's output (default: 1).} + +\item{api_key}{A string containing the user's OpenAI API key. +Defaults to the value of the environment variable "OPENAI_API_KEY".} +} +\value{ +A data frame containing the response from the GPT model. +} +\description{ +This function uses the OpenAI API to interact with the + GPT-4o model (default) and generates responses based on user input with + streaming data back to R + In this function, currently, "gpt-4o-mini", "gpt-4o", and "gpt-4-turbo" + can be selected as OpenAI's LLM model. +} +\details{ +Chat4R Function with Streaming +} +\examples{ +\dontrun{ +Sys.setenv(OPENAI_API_KEY = "Your API key") +response <- chat4R_streaming(content = "What is the capital of France?") +response +} +} +\author{ +Satoshi Kume +} diff --git a/man/chat4Rv2.Rd b/man/chat4Rv2.Rd new file mode 100644 index 0000000..c4b1668 --- /dev/null +++ b/man/chat4Rv2.Rd @@ -0,0 +1,62 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/chat4Rv2.R +\name{chat4Rv2} +\alias{chat4Rv2} +\title{chat4Rv2: Interact with gpt-4o-mini (default) using OpenAI API} +\usage{ +chat4Rv2( + content, + Model = "gpt-4o-mini", + temperature = 1, + simple = TRUE, + fromJSON_parsed = FALSE, + system_set = "", + api_key = Sys.getenv("OPENAI_API_KEY") +) +} +\arguments{ +\item{content}{A string containing the user's input message.} + +\item{Model}{A string specifying the GPT model to use (default: "gpt-4o-mini").} + +\item{temperature}{A numeric value controlling the randomness of the model's output (default: 1).} + +\item{simple}{Logical, if TRUE, only the content of the model's message will be returned.} + +\item{fromJSON_parsed}{Logical, if TRUE, content will be parsed from JSON.} + +\item{system_set}{A string containing the system message to set the context. +If provided, it will be added as the first message in the conversation. +Default is an empty string.} + +\item{api_key}{A string containing the user's OpenAI API key. +Defaults to the value of the environment variable "OPENAI_API_KEY".} +} +\value{ +A data frame containing the response from the GPT model. +} +\description{ +This function uses the OpenAI API to interact with the + gpt-4o-mini model (default) and generates responses based on user input. + In this function, currently, "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-turbo" and "gpt-3.5-turbo" + can be selected as OpenAI's LLM model. +} +\details{ +chat4Rv2 Function +} +\examples{ +\dontrun{ +Sys.setenv(OPENAI_API_KEY = "Your API key") +# Using chat4Rv2 without system_set (default behavior) +response <- chat4Rv2(content = "What is the capital of France?") +response + +# Using chat4Rv2 with a system_set provided +response <- chat4Rv2(content = "What is the capital of France?", + system_set = "You are a helpful assistant.") +response +} +} +\author{ +Satoshi Kume +} diff --git a/man/checkErrorDet.Rd b/man/checkErrorDet.Rd index b077546..3348a4e 100644 --- a/man/checkErrorDet.Rd +++ b/man/checkErrorDet.Rd @@ -6,7 +6,7 @@ \usage{ checkErrorDet( Summary_nch = 100, - Model = "gpt-4-0613", + Model = "gpt-4o-mini", language = "English", verbose = TRUE, SlowTone = FALSE @@ -15,8 +15,8 @@ checkErrorDet( \arguments{ \item{Summary_nch}{An integer specifying the maximum number of characters for the summary.} -\item{Model}{A string specifying the model to be used, default is "gpt-4-0314". -Currently, "gpt-4", "gpt-4-0314" and "gpt-4-0613" can be selected as gpt-4 models. +\item{Model}{A string specifying the model to be used, default is "gpt-4o-mini". +Currently, "gpt-4", "gpt-4-0314" and "gpt-4o-mini" can be selected as gpt-4 models. Execution with GPT-4 is recommended.} \item{language}{A string specifying the output language, default is "English".} diff --git a/man/conversation4R.Rd b/man/conversation4R.Rd index d60253d..5862ad8 100644 --- a/man/conversation4R.Rd +++ b/man/conversation4R.Rd @@ -7,9 +7,9 @@ conversation4R( message, api_key = Sys.getenv("OPENAI_API_KEY"), - template = "", + system_set = "", ConversationBufferWindowMemory_k = 2, - Model = "gpt-3.5-turbo-16k", + Model = "gpt-4o-mini", language = "English", initialization = FALSE, verbose = TRUE @@ -20,11 +20,11 @@ conversation4R( \item{api_key}{A string containing the OpenAI API key. Default is retrieved from the system environment variable "OPENAI_API_KEY".} -\item{template}{A string containing the template for the conversation. Default is an empty string.} +\item{system_set}{A string containing the system_set for the conversation. Default is an empty string.} \item{ConversationBufferWindowMemory_k}{An integer representing the conversation buffer window memory. Default is 2.} -\item{Model}{A string representing the model to be used. Default is "gpt-3.5-turbo-16k".} +\item{Model}{A string representing the model to be used. Default is "gpt-4o-mini".} \item{language}{A string representing the language to be used in the conversation. Default is "English".} diff --git a/man/replicatellmAPI4R.Rd b/man/replicatellmAPI4R.Rd new file mode 100644 index 0000000..3714a27 --- /dev/null +++ b/man/replicatellmAPI4R.Rd @@ -0,0 +1,56 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/replicateAPI4R.R +\name{replicatellmAPI4R} +\alias{replicatellmAPI4R} +\title{replicatellmAPI4R: Interact with Replicate API for LLM models in R} +\usage{ +replicatellmAPI4R( + input, + model_url, + simple = TRUE, + fetch_stream = FALSE, + api_key = Sys.getenv("Replicate_API_KEY") +) +} +\arguments{ +\item{input}{A list containing the API request body with parameters including prompt, max_tokens, top_k, top_p, min_tokens, temperature, system_prompt, presence_penalty, and frequency_penalty.} + +\item{model_url}{A character string specifying the model endpoint URL (e.g., "/models/meta/meta-llama-3.1-405b-instruct/predictions").} + +\item{simple}{A logical value indicating whether to return a simplified output (only the model output) if TRUE, or the full API response if FALSE. Default is TRUE.} + +\item{fetch_stream}{A logical value indicating whether to fetch a streaming response. Default is FALSE.} + +\item{api_key}{A character string representing the Replicate API key. Defaults to the environment variable "Replicate_API_KEY".} +} +\value{ +If fetch_stream is FALSE, returns either a simplified output (if simple is TRUE) or the full API response. In streaming mode, outputs the response stream directly to the console. +} +\description{ +This function interacts with the Replicate API (v1) to utilize language models (LLM) such as Llama. It sends a POST request with the provided input and handles both streaming and non-streaming responses. +} +\examples{ +\dontrun{ + Sys.setenv(Replicate_API_KEY = "Your API key") + input <- list( + input = list( + prompt = "What is the capital of France?", + max_tokens = 1024, + top_k = 50, + top_p = 0.9, + min_tokens = 0, + temperature = 0.6, + system_prompt = "You are a helpful assistant.", + presence_penalty = 0, + frequency_penalty = 0 + ) + ) + model_url <- "/models/meta/meta-llama-3.1-405b-instruct/predictions" + response <- replicatellmAPI4R(input, model_url) + print(response) +} + +} +\author{ +Satoshi Kume +}