diff --git a/app/client/platforms/NewStuffLLMs.ts b/app/client/platforms/NewStuffLLMs.ts index a08eae9f416..8b548b6674a 100644 --- a/app/client/platforms/NewStuffLLMs.ts +++ b/app/client/platforms/NewStuffLLMs.ts @@ -14,7 +14,6 @@ import { * * @param model - The model name. * @param max_tokens - The maximum number of tokens. - * @param system_fingerprint - The system fingerprint. * @param useMaxTokens - Indicates whether to use the maximum number of tokens. * @returns An object containing the configuration for new features: * - max_tokens: The maximum number of tokens. @@ -27,12 +26,10 @@ import { export function getNewStuff( model: string, max_tokens?: number, - system_fingerprint?: string, useMaxTokens: boolean = true, ): { max_tokens?: number; maxOutputTokens?: number; // This is the same as maxTokens but for Google AI - system_fingerprint?: string; isNewModel: boolean; payloadType: 'chat' | 'image'; isDalle: boolean; @@ -52,7 +49,6 @@ export function getNewStuff( return { max_tokens: tokens, maxOutputTokens: tokens, // Assign the same value to maxOutputTokens - system_fingerprint: system_fingerprint ?? modelConfig.system_fingerprint, isNewModel: true, payloadType, isDalle, diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 9d9e9762bd7..695e237a8fc 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -161,7 +161,6 @@ export class GeminiProApi implements LLMApi { const { max_tokens } = getNewStuff( options.config.model, chatConfig.max_tokens, - chatConfig.system_fingerprint, chatConfig.useMaxTokens, ); diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 36d594fb32b..9c5c10d2f8c 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -187,10 +187,9 @@ export class ChatGPTApi implements LLMApi { * @example : A Best Picture of Andromeda Galaxy */ const actualModel = getModelForInstructVersion(modelConfig.model); - const { max_tokens, system_fingerprint } = getNewStuff( + const { max_tokens } = getNewStuff( modelConfig.model, modelConfig.max_tokens, - modelConfig.system_fingerprint, modelConfig.useMaxTokens, ); diff --git a/app/masks/en.ts b/app/masks/en.ts index 287189769e4..e7779c4c531 100644 --- a/app/masks/en.ts +++ b/app/masks/en.ts @@ -25,7 +25,6 @@ export const EN_MASKS: BuiltinMask[] = [ quality: "hd", size: "1024x1024", style: "vivid", - system_fingerprint: "", sendMemory: true, useMaxTokens: false, historyMessageCount: 10, @@ -60,7 +59,6 @@ export const EN_MASKS: BuiltinMask[] = [ quality: "hd", size: "1024x1024", style: "vivid", - system_fingerprint: "", sendMemory: true, historyMessageCount: 10, compressMessageLengthThreshold: 5000, diff --git a/app/store/config.ts b/app/store/config.ts index c77edc667c5..bcce841a1ba 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -80,7 +80,6 @@ export const DEFAULT_CONFIG = { * `Natural` causes the model to produce more natural, less hyper-real looking images. */ style: "vivid", // Only DALL·E-3 for DALL·E-2 not not really needed - system_fingerprint: "", sendMemory: true, useMaxTokens: false, historyMessageCount: 4, @@ -260,15 +259,6 @@ export const useAppConfig = createPersistStore( }; } - // In the wilds 🚀 (still wip because it confusing for LLM + Generative AI Method) - - if (version < 4.2) { - state.modelConfig = { - ...state.modelConfig, - system_fingerprint: "", - }; - } - // Speed Animation default is 30, Lower values will result in faster animation if (version < 4.3) {