Skip to content

Commit

Permalink
feat(ai-help): upgrade to GPT-4o model
Browse files Browse the repository at this point in the history
  • Loading branch information
caugner committed May 17, 2024
1 parent 953edfc commit d24a195
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 4 deletions.
5 changes: 4 additions & 1 deletion src/ai/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use crate::ai::embeddings::RelatedDoc;
pub struct AIHelpConfig {
pub name: &'static str,
pub model: &'static str,
pub token_model: &'static str,
pub full_doc: bool,
pub system_prompt: &'static str,
pub user_prompt: Option<&'static str>,
Expand All @@ -27,6 +28,7 @@ fn join_with_tags(related_docs: Vec<RelatedDoc>) -> String {
pub const AI_HELP_GPT3_5_FULL_DOC_NEW_PROMPT: AIHelpConfig = AIHelpConfig {
name: "20230901-full_doc-new_prompt",
model: "gpt-3.5-turbo-0125",
token_model: "gpt-3.5-turbo-0125",
full_doc: true,
system_prompt: include_str!("prompts/new_prompt/system.md"),
user_prompt: None,
Expand All @@ -39,7 +41,8 @@ pub const AI_HELP_GPT3_5_FULL_DOC_NEW_PROMPT: AIHelpConfig = AIHelpConfig {

pub const AI_HELP_GPT4_FULL_DOC_NEW_PROMPT: AIHelpConfig = AIHelpConfig {
name: "20240125-gpt4-full_doc-new_prompt",
model: "gpt-4-0125-preview",
model: "gpt-4o-2024-05-13",
token_model: "gpt-4-0125-preview",
full_doc: true,
system_prompt: include_str!("prompts/new_prompt/system.md"),
user_prompt: None,
Expand Down
6 changes: 3 additions & 3 deletions src/ai/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,19 @@ pub fn cap_messages(
mut init_messages: Vec<ChatCompletionRequestMessage>,
context_messages: Vec<ChatCompletionRequestMessage>,
) -> Result<Vec<ChatCompletionRequestMessage>, AIError> {
let init_tokens = num_tokens_from_messages(config.model, &init_messages)?;
let init_tokens = num_tokens_from_messages(config.token_model, &init_messages)?;
if init_tokens + config.max_completion_tokens > config.token_limit {
return Err(AIError::TokenLimit);
}
let mut context_tokens = num_tokens_from_messages(config.model, &context_messages)?;
let mut context_tokens = num_tokens_from_messages(config.token_model, &context_messages)?;

let mut skip = 0;
while context_tokens + init_tokens + config.max_completion_tokens > config.token_limit {
skip += 1;
if skip >= context_messages.len() {
return Err(AIError::TokenLimit);
}
context_tokens = num_tokens_from_messages(config.model, &context_messages[skip..])?;
context_tokens = num_tokens_from_messages(config.token_model, &context_messages[skip..])?;
}
init_messages.extend(context_messages.into_iter().skip(skip));
Ok(init_messages)
Expand Down

0 comments on commit d24a195

Please sign in to comment.