From 3e457b40649108c435ea095704b5dbf23e7e9b75 Mon Sep 17 00:00:00 2001 From: jcrodriguez1989 Date: Fri, 2 Aug 2024 12:41:36 -0300 Subject: [PATCH] Feature: API metadata as attr --- DESCRIPTION | 2 +- R/gpt_get_completions.R | 3 ++- R/parse_response.R | 13 +++++++++---- README.Rmd | 1 + README.md | 1 + 5 files changed, 14 insertions(+), 6 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index e47f0e4..0cb61f6 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -16,7 +16,7 @@ URL: https://github.com/jcrodriguez1989/chatgpt BugReports: https://github.com/jcrodriguez1989/chatgpt/issues Encoding: UTF-8 LazyData: true -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.1 Imports: clipr, httr, diff --git a/R/gpt_get_completions.R b/R/gpt_get_completions.R index 9ef1a47..04158e7 100644 --- a/R/gpt_get_completions.R +++ b/R/gpt_get_completions.R @@ -19,7 +19,8 @@ gpt_get_completions <- function(prompt, openai_api_key = Sys.getenv("OPENAI_API_ temperature = as.numeric(Sys.getenv("OPENAI_TEMPERATURE", 1)), top_p = as.numeric(Sys.getenv("OPENAI_TOP_P", 1)), frequency_penalty = as.numeric(Sys.getenv("OPENAI_FREQUENCY_PENALTY", 0)), - presence_penalty = as.numeric(Sys.getenv("OPENAI_PRESENCE_PENALTY", 0)) + presence_penalty = as.numeric(Sys.getenv("OPENAI_PRESENCE_PENALTY", 0)), + logprobs = as.logical(Sys.getenv("OPENAI_LOGPROBS", FALSE)) ) if (get_verbosity()) { message(paste0("\n*** ChatGPT input:\n\n", prompt, "\n")) diff --git a/R/parse_response.R b/R/parse_response.R index 0d74722..782f717 100644 --- a/R/parse_response.R +++ b/R/parse_response.R @@ -10,12 +10,17 @@ #' @return Returns a character vector containing the text content of the response. #' parse_response <- function(raw_responses, verbosity = get_verbosity()) { + # Parse the message content of the list of raw_responses. Trim those messages, and paste them. + parsed_response <- paste(trimws(sapply(raw_responses, function(response) { + sapply(response$choices, function(x) x$message$content) + })), collapse = "") # If we provide a numeric value to `OPENAI_VERBOSE`, and it is `> 1` print return verbosity. if (verbosity > 1) { lapply(raw_responses, function(response) message(toJSON(response, pretty = TRUE))) + if (verbosity > 2) { + # If we are in 3-verbose mode, add the raw_responses as an attribute to the return object. + attr(parsed_response, "raw_responses") <- raw_responses + } } - # Parse the message content of the list of raw_responses. Trim those messages, and paste them. - paste(trimws(sapply(raw_responses, function(response) { - sapply(response$choices, function(x) x$message$content) - })), collapse = "") + parsed_response } diff --git a/README.Rmd b/README.Rmd index 7a65f03..f892f3c 100644 --- a/README.Rmd +++ b/README.Rmd @@ -156,3 +156,4 @@ The following environment variables can be set to tweak the behavior, as documen * `OPENAI_TOP_P`; defaults to `1` * `OPENAI_FREQUENCY_PENALTY`; defaults to `0` * `OPENAI_PRESENCE_PENALTY`; defaults to `0` +* `OPENAI_LOGPROBS`; defaults to `FALSE` diff --git a/README.md b/README.md index c18c5c2..d2a66c5 100644 --- a/README.md +++ b/README.md @@ -361,3 +361,4 @@ documented in - `OPENAI_TOP_P`; defaults to `1` - `OPENAI_FREQUENCY_PENALTY`; defaults to `0` - `OPENAI_PRESENCE_PENALTY`; defaults to `0` +- `OPENAI_LOGPROBS`; defaults to `FALSE`