Skip to content

Commit

Permalink
v0.3.3: gpt-4o-mini version
Browse files Browse the repository at this point in the history
  • Loading branch information
kumeS committed Feb 2, 2025
1 parent da56b6c commit ff06530
Show file tree
Hide file tree
Showing 35 changed files with 584 additions and 85 deletions.
7 changes: 4 additions & 3 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Package: chatAI4R
Type: Package
Title: Chat-Based Interactive Artificial Intelligence for R
Version: 0.3.2
Version: 0.3.3
Date: 2024-05-10
Authors@R: c(
person (given = "Satoshi", family = "Kume",
Expand Down Expand Up @@ -29,10 +29,11 @@ Imports: httr,
deepRstudio,
pdftools,
xml2,
rvest
rvest,
curl
Suggests: testthat, knitr
License: Artistic-2.0
URL: https://kumes.github.io/chatAI4R/, https://github.com/kumeS/chatAI4R
BugReports: https://github.com/kumeS/chatAI4R/issues
RoxygenNote: 7.3.1
RoxygenNote: 7.3.2
Encoding: UTF-8
9 changes: 9 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ export(addRoxygenDescription)
export(autocreateFunction4R)
export(chat4R)
export(chat4R_history)
export(chat4R_streaming)
export(chat4Rv2)
export(chatAI4pdf)
export(checkErrorDet)
export(checkErrorDet_JP)
Expand All @@ -32,6 +34,7 @@ export(ngsub)
export(proofreadEnglishText)
export(proofreadText)
export(removeQuotations)
export(replicatellmAPI4R)
export(revisedText)
export(searchFunction)
export(slow_print_v2)
Expand All @@ -49,12 +52,17 @@ importFrom(assertthat,noNA)
importFrom(clipr,read_clip)
importFrom(clipr,write_clip)
importFrom(crayon,red)
importFrom(curl,curl_fetch_stream)
importFrom(curl,handle_setheaders)
importFrom(curl,handle_setopt)
importFrom(curl,new_handle)
importFrom(deepRstudio,deepel)
importFrom(deepRstudio,is_mac)
importFrom(future,future)
importFrom(future,multisession)
importFrom(future,plan)
importFrom(future,resolved)
importFrom(httr,GET)
importFrom(httr,POST)
importFrom(httr,add_headers)
importFrom(httr,content)
Expand All @@ -63,6 +71,7 @@ importFrom(igraph,add_vertices)
importFrom(igraph,graph)
importFrom(igraph,layout_nicely)
importFrom(igraph,layout_with_fr)
importFrom(jsonlite,fromJSON)
importFrom(jsonlite,toJSON)
importFrom(pdftools,pdf_text)
importFrom(rstudioapi,getActiveDocumentContext)
Expand Down
6 changes: 3 additions & 3 deletions R/OptimizeRcode.R
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,9 @@ OptimizeRcode <- function(Model = "gpt-4-0613",
template2 = "Please describe in detail the changes you previously made to the R code without apology."
history[[4]] <- list('role' = 'user', 'content' = template2)

res1 <- chat4R_history(history = history,
res1 <- as.character(chat4R_history(history = history,
Model = Model,
temperature = temperature)
temperature = temperature))

if(SlowTone) {
d <- ifelse(20/nchar(res1) < 0.3, 20/nchar(res1), 0.3) * stats::runif(1, min = 0.95, max = 1.05)
Expand All @@ -111,6 +111,6 @@ OptimizeRcode <- function(Model = "gpt-4-0613",
}
}
} else {
return(clipr::write_clip(res))
return(clipr::write_clip(as.character(res)))
}
}
4 changes: 2 additions & 2 deletions R/RcodeImprovements.R
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@ RcodeImprovements <- function(Summary_nch = 100,
list('role' = 'user', 'content' = template1s))

# Generate the suggestions using the GPT model
res <- chat4R_history(history=history,
res <- as.character(chat4R_history(history=history,
Model = Model,
temperature = temperature)
temperature = temperature))

# Print the suggestions based on the verbosity and speed settings
if(verbose){
Expand Down
4 changes: 2 additions & 2 deletions R/RevisedText.R
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,9 @@ history <- list(list('role' = 'system', 'content' = template0s),
list('role' = 'user', 'content' = template1s))

# Execute the chat model
res <- chat4R_history(history=history,
res <- as.character(chat4R_history(history=history,
Model = Model,
temperature = 1)
temperature = 1))

# Output final result or relevant messages
if(verbose) {
Expand Down
4 changes: 2 additions & 2 deletions R/TextSummaryAsBullet.R
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,9 @@ TextSummaryAsBullet <- function(Model = "gpt-4-0613",
if(verbose){utils::setTxtProgressBar(pb, 3)}

# Execute text generation
res <- chat4R_history(history = history,
res <- as.character(chat4R_history(history = history,
Model = Model,
temperature = temperature)
temperature = temperature))

if(verbose){utils::setTxtProgressBar(pb, 4)}

Expand Down
4 changes: 2 additions & 2 deletions R/addCommentCode.R
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ addCommentCode <- function(Model = "gpt-4-0613",
list('role' = 'user', 'content' = template1s))

# Execute text generation
res <- chat4R_history(history = history,
res <- as.character(chat4R_history(history = history,
Model = Model,
temperature = temperature)
temperature = temperature))

# Output the enriched text
if(SelectedCode){
Expand Down
4 changes: 2 additions & 2 deletions R/addRoxygenDescription.R
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,9 @@ addRoxygenDescription <- function(Model = "gpt-4-0613",
if(verbose){utils::setTxtProgressBar(pb, 2)}

# Execute text generation
res <- chat4R_history(history = history,
res <- as.character(chat4R_history(history = history,
Model = Model,
temperature = temperature)
temperature = temperature))

if(verbose){
utils::setTxtProgressBar(pb, 3)
Expand Down
13 changes: 8 additions & 5 deletions R/chat4R.R
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
#' Chat4R Function
#'
#' @title Chat4R: Interact with GPT-3.5 (default) using OpenAI API
#' @title Chat4R: Interact with gpt-4o-mini (default) using OpenAI API
#' @description This function uses the OpenAI API to interact with the
#' GPT-3.5 model (default) and generates responses based on user input.
#' gpt-4o-mini model (default) and generates responses based on user input.
#' In this function, currently, "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-turbo" and "gpt-3.5-turbo"
#' can be selected as OpenAI's LLM model.
#' @param content A string containing the user's input message.
#' @param api_key A string containing the user's OpenAI API key. Defaults to the value of the environment variable "OPENAI_API_KEY".
#' @param Model A string specifying the GPT model to use (default: "gpt-3.5-turbo-16k").
#' @param api_key A string containing the user's OpenAI API key.
#' Defaults to the value of the environment variable "OPENAI_API_KEY".
#' @param Model A string specifying the GPT model to use (default: "gpt-4o-mini").
#' @param temperature A numeric value controlling the randomness of the model's output (default: 1).
#' @param simple Logical, if TRUE, only the content of the model's message will be returned.
#' @param fromJSON_parsed Logical, if TRUE, content will be parsed from JSON.
Expand All @@ -22,7 +25,7 @@
#' }

chat4R <- function(content,
Model = "gpt-3.5-turbo-16k",
Model = "gpt-4o-mini",
temperature = 1,
simple=TRUE,
fromJSON_parsed=FALSE,
Expand Down
81 changes: 81 additions & 0 deletions R/chat4R_streaming.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
#' Chat4R Function with Streaming
#'
#' @title chat4R_streaming: Interact with GPT-4o (default) with streaming using OpenAI API
#' @description This function uses the OpenAI API to interact with the
#' GPT-4o model (default) and generates responses based on user input with
#' streaming data back to R
#' In this function, currently, "gpt-4o-mini", "gpt-4o", and "gpt-4-turbo"
#' can be selected as OpenAI's LLM model.
#' @param content A string containing the user's input message.
#' @param api_key A string containing the user's OpenAI API key.
#' Defaults to the value of the environment variable "OPENAI_API_KEY".
#' @param Model A string specifying the GPT model to use (default: "GPT-4o").
#' @param temperature A numeric value controlling the randomness of the model's output (default: 1).
#' @importFrom httr POST add_headers content
#' @importFrom jsonlite toJSON fromJSON
#' @return A data frame containing the response from the GPT model.
#' @export chat4R_streaming
#' @author Satoshi Kume
#' @examples
#' \dontrun{
#' Sys.setenv(OPENAI_API_KEY = "Your API key")
#' response <- chat4R_streaming(content = "What is the capital of France?")
#' response
#' }

chat4R_streaming <- function(content,
Model = "gpt-4o-mini",
temperature = 1,
api_key = Sys.getenv("OPENAI_API_KEY")) {

# Define parameters
api_url <- "https://api.openai.com/v1/chat/completions"
n <- 1
top_p <- 1

# Configure headers for the API request
headers <- c(`Content-Type` = "application/json",
`Authorization` = paste("Bearer", api_key))

# Define the body of the API request
body <- list(model = Model,
messages = list(list(role = "user", content = content)),
temperature = temperature, top_p = top_p, n = n,
stream = TRUE)

# Function to process streaming data
streaming_callback <- function(data) {

#print(data)
# Convert raw data to character
message <- rawToChar(data)
#print(message)

# Split message into lines
lines <- unlist(strsplit(message, "\n"))
#print(lines)

for (line in lines) {
if(line == "data: [DONE]"){}else{
if(line != ""){
# Remove the "data: " prefix
json_line <- sub("data: ", "", line)
#print(json_line)
json <- fromJSON(json_line)$choices$delta$content
cat(json)
}}}
}

# Perform the request with streaming using httr::write_stream
response <- httr::POST(
url = api_url,
body = body,
encode = "json",
httr::add_headers(.headers = headers),
httr::write_stream(streaming_callback)
)

# Check response status
#print(response)

}
96 changes: 96 additions & 0 deletions R/chat4Rv2.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#' chat4Rv2 Function
#'
#' @title chat4Rv2: Interact with gpt-4o-mini (default) using OpenAI API
#' @description This function uses the OpenAI API to interact with the
#' gpt-4o-mini model (default) and generates responses based on user input.
#' In this function, currently, "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-turbo" and "gpt-3.5-turbo"
#' can be selected as OpenAI's LLM model.
#' @param content A string containing the user's input message.
#' @param api_key A string containing the user's OpenAI API key.
#' Defaults to the value of the environment variable "OPENAI_API_KEY".
#' @param Model A string specifying the GPT model to use (default: "gpt-4o-mini").
#' @param temperature A numeric value controlling the randomness of the model's output (default: 1).
#' @param simple Logical, if TRUE, only the content of the model's message will be returned.
#' @param fromJSON_parsed Logical, if TRUE, content will be parsed from JSON.
#' @param system_set A string containing the system message to set the context.
#' If provided, it will be added as the first message in the conversation.
#' Default is an empty string.
#' @importFrom httr POST add_headers content
#' @importFrom jsonlite toJSON
#' @return A data frame containing the response from the GPT model.
#' @export chat4Rv2
#' @author Satoshi Kume
#' @examples
#' \dontrun{
#' Sys.setenv(OPENAI_API_KEY = "Your API key")
#' # Using chat4Rv2 without system_set (default behavior)
#' response <- chat4Rv2(content = "What is the capital of France?")
#' response
#'
#' # Using chat4Rv2 with a system_set provided
#' response <- chat4Rv2(content = "What is the capital of France?",
#' system_set = "You are a helpful assistant.")
#' response
#' }


chat4Rv2 <- function(content,
Model = "gpt-4o-mini",
temperature = 1,
simple = TRUE,
fromJSON_parsed = FALSE,
system_set = "",
api_key = Sys.getenv("OPENAI_API_KEY")) {

# Define parameters
api_url <- "https://api.openai.com/v1/chat/completions"
n <- 1
top_p <- 1

# Configure headers for the API request
headers <- httr::add_headers(`Content-Type` = "application/json",
`Authorization` = paste("Bearer", api_key))

# Construct messages list depending on system_set parameter
if (nzchar(system_set)) {
# Include system message if provided
messages_list <- list(
list(role = "system", content = system_set),
list(role = "user", content = content)
)
} else {
# Only include user message if system_set is empty
messages_list <- list(
list(role = "user", content = content)
)
}

# Define the body of the API request
body <- list(model = Model,
messages = messages_list,
temperature = temperature,
top_p = top_p,
n = n)

# Send a POST request to the OpenAI server
response <- httr::POST(url = api_url,
body = jsonlite::toJSON(body, auto_unbox = TRUE),
encode = "json",
config = headers)

# Extract and return the response content
if (simple) {
return(data.frame(content = httr::content(response, "parsed")$choices[[1]]$message$content))
} else {
if (fromJSON_parsed) {
raw_content <- httr::content(response, "raw")
char_content <- rawToChar(raw_content)
parsed_data <- jsonlite::fromJSON(char_content)
return(parsed_data)
} else {
return(data.frame(httr::content(response, "parsed")))
}
}
}


10 changes: 5 additions & 5 deletions R/checkErrorDet.R
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
#' @title Check Error Details
#' @description A function to analyze and provide guidance on how to fix an error message copied from the R console.
#' @param Summary_nch An integer specifying the maximum number of characters for the summary.
#' @param Model A string specifying the model to be used, default is "gpt-4-0314".
#' Currently, "gpt-4", "gpt-4-0314" and "gpt-4-0613" can be selected as gpt-4 models.
#' @param Model A string specifying the model to be used, default is "gpt-4o-mini".
#' Currently, "gpt-4", "gpt-4-0314" and "gpt-4o-mini" can be selected as gpt-4 models.
#' Execution with GPT-4 is recommended.
#' @param language A string specifying the output language, default is "English".
#' @param verbose A logical value to control the verbosity of the output, default is TRUE.
Expand All @@ -25,7 +25,7 @@
#' }

checkErrorDet <- function(Summary_nch = 100,
Model = "gpt-4-0613",
Model = "gpt-4o-mini",
language = "English",
verbose = TRUE,
SlowTone = FALSE) {
Expand Down Expand Up @@ -64,9 +64,9 @@ checkErrorDet <- function(Summary_nch = 100,
list('role' = 'user', 'content' = template1s))

# Execution
res <- chat4R_history(history=history,
res <- as.character(chat4R_history(history=history,
Model = Model,
temperature = temperature)
temperature = temperature))

if(verbose) {
if(SlowTone) {
Expand Down
4 changes: 2 additions & 2 deletions R/checkErrorDet_JP.R
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,9 @@ checkErrorDet_JP <- function(Summary_nch = 100,
if(verbose){utils::setTxtProgressBar(pb, 2)}

# Execution
res <- chat4R_history(history=history,
res <- as.character(chat4R_history(history=history,
Model = Model,
temperature = temperature)
temperature = temperature))

if(verbose){
utils::setTxtProgressBar(pb, 3)
Expand Down
Loading

0 comments on commit ff06530

Please sign in to comment.