From 5b2fc74e8752a126829f2a3d98167e5f47bda45c Mon Sep 17 00:00:00 2001 From: Claas Augner Date: Fri, 17 May 2024 08:48:00 -0400 Subject: [PATCH] chore(ai-help): remove token_model --- src/ai/constants.rs | 3 --- src/ai/helpers.rs | 6 +++--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/ai/constants.rs b/src/ai/constants.rs index 7a6fc4da..9f30e71d 100644 --- a/src/ai/constants.rs +++ b/src/ai/constants.rs @@ -7,7 +7,6 @@ use crate::ai::embeddings::RelatedDoc; pub struct AIHelpConfig { pub name: &'static str, pub model: &'static str, - pub token_model: &'static str, pub full_doc: bool, pub system_prompt: &'static str, pub user_prompt: Option<&'static str>, @@ -28,7 +27,6 @@ fn join_with_tags(related_docs: Vec) -> String { pub const AI_HELP_GPT3_5_FULL_DOC_NEW_PROMPT: AIHelpConfig = AIHelpConfig { name: "20230901-full_doc-new_prompt", model: "gpt-3.5-turbo-0125", - token_model: "gpt-3.5-turbo-0125", full_doc: true, system_prompt: include_str!("prompts/new_prompt/system.md"), user_prompt: None, @@ -42,7 +40,6 @@ pub const AI_HELP_GPT3_5_FULL_DOC_NEW_PROMPT: AIHelpConfig = AIHelpConfig { pub const AI_HELP_GPT4_FULL_DOC_NEW_PROMPT: AIHelpConfig = AIHelpConfig { name: "20240125-gpt4-full_doc-new_prompt", model: "gpt-4o-2024-05-13", - token_model: "gpt-4-0125-preview", full_doc: true, system_prompt: include_str!("prompts/new_prompt/system.md"), user_prompt: None, diff --git a/src/ai/helpers.rs b/src/ai/helpers.rs index 15ec600e..6188c29e 100644 --- a/src/ai/helpers.rs +++ b/src/ai/helpers.rs @@ -26,11 +26,11 @@ pub fn cap_messages( mut init_messages: Vec, context_messages: Vec, ) -> Result, AIError> { - let init_tokens = num_tokens_from_messages(config.token_model, &init_messages)?; + let init_tokens = num_tokens_from_messages(config.model, &init_messages)?; if init_tokens + config.max_completion_tokens > config.token_limit { return Err(AIError::TokenLimit); } - let mut context_tokens = num_tokens_from_messages(config.token_model, &context_messages)?; + let mut context_tokens = num_tokens_from_messages(config.model, &context_messages)?; let mut skip = 0; while context_tokens + init_tokens + config.max_completion_tokens > config.token_limit { @@ -38,7 +38,7 @@ pub fn cap_messages( if skip >= context_messages.len() { return Err(AIError::TokenLimit); } - context_tokens = num_tokens_from_messages(config.token_model, &context_messages[skip..])?; + context_tokens = num_tokens_from_messages(config.model, &context_messages[skip..])?; } init_messages.extend(context_messages.into_iter().skip(skip)); Ok(init_messages)