diff --git a/langchain-core/src/output_parsers/tests/xml.test.ts b/langchain-core/src/output_parsers/tests/xml.test.ts index bec47cfbc6f8..0a2bd5f4fd2b 100644 --- a/langchain-core/src/output_parsers/tests/xml.test.ts +++ b/langchain-core/src/output_parsers/tests/xml.test.ts @@ -87,7 +87,7 @@ test("Can parse streams", async () => { const result = await streamingLlm.stream(XML_EXAMPLE); let finalResult = {}; for await (const chunk of result) { - console.log(chunk); + // console.log(chunk); finalResult = chunk; } expect(finalResult).toStrictEqual(expectedResult); diff --git a/langchain-core/src/prompts/chat.ts b/langchain-core/src/prompts/chat.ts index 0fcb0330489b..15192e7df984 100644 --- a/langchain-core/src/prompts/chat.ts +++ b/langchain-core/src/prompts/chat.ts @@ -10,8 +10,8 @@ import { ChatMessage, type BaseMessageLike, coerceMessageLikeToMessage, - isBaseMessage, MessageContent, + isBaseMessage, } from "../messages/index.js"; import { type ChatPromptValueInterface, @@ -729,12 +729,19 @@ function _coerceMessagePromptTemplateLike< messagePromptTemplateLike: BaseMessagePromptTemplateLike, extra?: Extra ): BaseMessagePromptTemplate | BaseMessage { + if (_isBaseMessagePromptTemplate(messagePromptTemplateLike)) { + return messagePromptTemplateLike; + } + const allowedCoercionMessageTypes = ["system", "ai", "human", "generic"]; + // Do not coerce if it's an instance of `BaseMessage` AND it's not one of the allowed message types. if ( - _isBaseMessagePromptTemplate(messagePromptTemplateLike) || - isBaseMessage(messagePromptTemplateLike) + isBaseMessage(messagePromptTemplateLike) && + !allowedCoercionMessageTypes.includes(messagePromptTemplateLike._getType()) ) { + console.log("Returning", messagePromptTemplateLike._getType()) return messagePromptTemplateLike; } + if ( Array.isArray(messagePromptTemplateLike) && messagePromptTemplateLike[0] === "placeholder" @@ -1118,9 +1125,7 @@ export class ChatPromptTemplate< // eslint-disable-next-line no-instanceof/no-instanceof if (promptMessage instanceof BaseMessage) continue; for (const inputVariable of promptMessage.inputVariables) { - if (inputVariable in flattenedPartialVariables) { - continue; - } + if (inputVariable in flattenedPartialVariables) continue; inputVariables.add(inputVariable); } } diff --git a/langchain-core/src/prompts/tests/chat.test.ts b/langchain-core/src/prompts/tests/chat.test.ts index 3f5125861a73..95d24744a2a9 100644 --- a/langchain-core/src/prompts/tests/chat.test.ts +++ b/langchain-core/src/prompts/tests/chat.test.ts @@ -622,3 +622,96 @@ test("Multi-modal, multi part chat prompt works with instances of BaseMessage", }); expect(messages).toMatchSnapshot(); }); + +test("extract input variables from complex message contents", async () => { + const promptComplexContent = ChatPromptTemplate.fromMessages([ + [ + "human", + [ + { + type: "text", + text: "{input}", + }, + ], + ], + [ + "human", + [ + { + type: "image_url", + image_url: { + url: "{image_url}", + detail: "high", + }, + }, + ], + ], + [ + "human", + [ + { + type: "image_url", + image_url: "{image_url_2}", + }, + { + type: "text", + text: "{input_2}", + }, + { + type: "text", + text: "{input}", // Intentionally duplicated + }, + ], + ], + ]); + + expect(promptComplexContent.inputVariables).toHaveLength(4); + expect(promptComplexContent.inputVariables.sort()).toEqual( + ["input", "image_url", "image_url_2", "input_2"].sort() + ); +}); + +test("extract input variables from complex message contents inside BaseMessages", async () => { + const promptComplexContent = ChatPromptTemplate.fromMessages([ + new HumanMessage({ + content: [ + { + type: "text", + text: "{input}", + }, + ], + }), + new HumanMessage({ + content: [ + { + type: "image_url", + image_url: { + url: "{image_url}", + detail: "high", + }, + }, + ], + }), + new HumanMessage({ + content: [ + { + type: "image_url", + image_url: "{image_url_2}", + }, + { + type: "text", + text: "{input_2}", + }, + { + type: "text", + text: "{input}", // Intentionally duplicated + }, + ], + }), + ]); + + expect(promptComplexContent.inputVariables).toHaveLength(4); + expect(promptComplexContent.inputVariables.sort()).toEqual( + ["input", "image_url", "image_url_2", "input_2"].sort() + ); +}); diff --git a/langchain-core/src/prompts/tests/few_shot.test.ts b/langchain-core/src/prompts/tests/few_shot.test.ts index a183ad68668c..c92ea8d93699 100644 --- a/langchain-core/src/prompts/tests/few_shot.test.ts +++ b/langchain-core/src/prompts/tests/few_shot.test.ts @@ -252,4 +252,75 @@ An example about bar ` ); }); + + test("Can format messages with complex contents", async () => { + const examplePrompt = ChatPromptTemplate.fromMessages([ + new AIMessage({ + content: [ + { + type: "text", + text: "{ai_input_var}", + }, + ], + }), + new HumanMessage({ + content: [ + { + type: "text", + text: "{human_input_var}", + }, + ], + }), + ]); + const examples = [ + { + ai_input_var: "ai-foo", + human_input_var: "human-bar", + }, + { + ai_input_var: "ai-foo2", + human_input_var: "human-bar2", + }, + ]; + const prompt = new FewShotChatMessagePromptTemplate({ + examplePrompt, + inputVariables: ["ai_input_var", "human_input_var"], + examples, + }); + const messages = await prompt.formatMessages({}); + expect(messages).toEqual([ + new AIMessage({ + content: [ + { + type: "text", + text: "ai-foo", + }, + ], + }), + new HumanMessage({ + content: [ + { + type: "text", + text: "human-bar", + }, + ], + }), + new AIMessage({ + content: [ + { + type: "text", + text: "ai-foo2", + }, + ], + }), + new HumanMessage({ + content: [ + { + type: "text", + text: "human-bar2", + }, + ], + }), + ]); + }); }); diff --git a/langchain-core/src/runnables/tests/runnable.test.ts b/langchain-core/src/runnables/tests/runnable.test.ts index ca597ab35872..ee23d3202e3a 100644 --- a/langchain-core/src/runnables/tests/runnable.test.ts +++ b/langchain-core/src/runnables/tests/runnable.test.ts @@ -70,7 +70,7 @@ test("Test chat model stream", async () => { let done = false; while (!done) { const chunk = await reader.read(); - console.log(chunk); + // console.log(chunk); done = chunk.done; } }); @@ -80,7 +80,7 @@ test("Pipe from one runnable to the next", async () => { const llm = new FakeLLM({}); const runnable = promptTemplate.pipe(llm); const result = await runnable.invoke({ input: "Hello world!" }); - console.log(result); + // console.log(result); expect(result).toBe("Hello world!"); }); @@ -90,7 +90,7 @@ test("Stream the entire way through", async () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk); - console.log(chunk); + // console.log(chunk); } expect(chunks.length).toEqual("Hi there!".length); expect(chunks.join("")).toEqual("Hi there!"); @@ -118,7 +118,7 @@ test("Callback order with transform streaming", async () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk); - console.log(chunk); + // console.log(chunk); } expect(order).toEqual([ "RunnableSequence", @@ -139,7 +139,7 @@ test("Don't use intermediate streaming", async () => { const chunks = []; for await (const chunk of stream) { chunks.push(chunk); - console.log(chunk); + // console.log(chunk); } expect(chunks.length).toEqual(1); expect(chunks[0]).toEqual("Hi there!"); @@ -400,7 +400,7 @@ test("Create a runnable sequence and run it", async () => { const text = `Jello world`; const runnable = promptTemplate.pipe(llm).pipe(parser); const result = await runnable.invoke({ input: text }); - console.log(result); + // console.log(result); expect(result).toEqual("Jello world"); }); @@ -408,7 +408,7 @@ test("Create a runnable sequence with a static method with invalid output and ca const promptTemplate = PromptTemplate.fromTemplate("{input}"); const llm = new FakeChatModel({}); const parser = (input: BaseMessage) => { - console.log(input); + // console.log(input); try { const parsedInput = typeof input.content === "string" @@ -428,8 +428,8 @@ test("Create a runnable sequence with a static method with invalid output and ca }; const runnable = RunnableSequence.from([promptTemplate, llm, parser]); await expect(async () => { - const result = await runnable.invoke({ input: "Hello sequence!" }); - console.log(result); + await runnable.invoke({ input: "Hello sequence!" }); + // console.log(result); }).rejects.toThrow(OutputParserException); });