From 94d7c39558f548b277945b735e517a02ce96a270 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Fri, 16 Aug 2024 16:58:50 -0700 Subject: [PATCH] google-genai[minor]: Update JSDoc with examples (#6561) --- .../langchain-google-genai/src/chat_models.ts | 344 +++++++++++++++++- 1 file changed, 325 insertions(+), 19 deletions(-) diff --git a/libs/langchain-google-genai/src/chat_models.ts b/libs/langchain-google-genai/src/chat_models.ts index 197febd63bbb..2120b5c88b91 100644 --- a/libs/langchain-google-genai/src/chat_models.ts +++ b/libs/langchain-google-genai/src/chat_models.ts @@ -179,33 +179,339 @@ export interface GoogleGenerativeAIChatInput } /** - * A class that wraps the Google Palm chat model. - * @example + * Google Generative AI chat model integration. + * + * Setup: + * Install `@langchain/google-genai` and set an environment variable named `GOOGLE_API_KEY`. + * + * ```bash + * npm install @langchain/google-genai + * export GOOGLE_API_KEY="your-api-key" + * ``` + * + * ## [Constructor args](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html#constructor) + * + * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_google_genai.GoogleGenerativeAIChatCallOptions.html) + * + * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc. + * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below: + * + * ```typescript + * // When calling `.bind`, call options should be passed via the first argument + * const llmWithArgsBound = llm.bind({ + * stop: ["\n"], + * tools: [...], + * }); + * + * // When calling `.bindTools`, call options should be passed via the second argument + * const llmWithTools = llm.bindTools( + * [...], + * { + * stop: ["\n"], + * } + * ); + * ``` + * + * ## Examples + * + *
+ * Instantiate + * * ```typescript - * const model = new ChatGoogleGenerativeAI({ - * apiKey: "", - * temperature: 0.7, - * modelName: "gemini-pro", - * topK: 40, - * topP: 1, + * import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; + * + * const llm = new ChatGoogleGenerativeAI({ + * model: "gemini-1.5-flash", + * temperature: 0, + * maxRetries: 2, + * // apiKey: "...", + * // other params... * }); - * const questions = [ - * new HumanMessage({ - * content: [ + * ``` + *
+ * + *
+ * + *
+ * Invoking + * + * ```typescript + * const input = `Translate "I love programming" into French.`; + * + * // Models also accept a list of chat messages or a formatted prompt + * const result = await llm.invoke(input); + * console.log(result); + * ``` + * + * ```txt + * AIMessage { + * "content": "There are a few ways to translate \"I love programming\" into French, depending on the level of formality and nuance you want to convey:\n\n**Formal:**\n\n* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and your intended audience. \n", + * "response_metadata": { + * "finishReason": "STOP", + * "index": 0, + * "safetyRatings": [ + * { + * "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + * "probability": "NEGLIGIBLE" + * }, * { - * type: "text", - * text: "You are a funny assistant that answers in pirate language.", + * "category": "HARM_CATEGORY_HATE_SPEECH", + * "probability": "NEGLIGIBLE" * }, * { - * type: "text", - * text: "What is your favorite food?", + * "category": "HARM_CATEGORY_HARASSMENT", + * "probability": "NEGLIGIBLE" * }, + * { + * "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + * "probability": "NEGLIGIBLE" + * } * ] - * }) - * ]; - * const res = await model.invoke(questions); - * console.log({ res }); + * }, + * "usage_metadata": { + * "input_tokens": 10, + * "output_tokens": 149, + * "total_tokens": 159 + * } + * } + * ``` + *
+ * + *
+ * + *
+ * Streaming Chunks + * + * ```typescript + * for await (const chunk of await llm.stream(input)) { + * console.log(chunk); + * } + * ``` + * + * ```txt + * AIMessageChunk { + * "content": "There", + * "response_metadata": { + * "index": 0 + * } + * "usage_metadata": { + * "input_tokens": 10, + * "output_tokens": 1, + * "total_tokens": 11 + * } + * } + * AIMessageChunk { + * "content": " are a few ways to translate \"I love programming\" into French, depending on", + * } + * AIMessageChunk { + * "content": " the level of formality and nuance you want to convey:\n\n**Formal:**\n\n", + * } + * AIMessageChunk { + * "content": "* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This", + * } + * AIMessageChunk { + * "content": " is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More", + * } + * AIMessageChunk { + * "content": " specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and", + * } + * AIMessageChunk { + * "content": " your intended audience. \n", + * } + * ``` + *
+ * + *
+ * + *
+ * Aggregate Streamed Chunks + * + * ```typescript + * import { AIMessageChunk } from '@langchain/core/messages'; + * import { concat } from '@langchain/core/utils/stream'; + * + * const stream = await llm.stream(input); + * let full: AIMessageChunk | undefined; + * for await (const chunk of stream) { + * full = !full ? chunk : concat(full, chunk); + * } + * console.log(full); + * ``` + * + * ```txt + * AIMessageChunk { + * "content": "There are a few ways to translate \"I love programming\" into French, depending on the level of formality and nuance you want to convey:\n\n**Formal:**\n\n* **J'aime la programmation.** (This is the most literal and formal translation.)\n\n**Informal:**\n\n* **J'adore programmer.** (This is a more enthusiastic and informal translation.)\n* **J'aime beaucoup programmer.** (This is a slightly less enthusiastic but still informal translation.)\n\n**More specific:**\n\n* **J'aime beaucoup coder.** (This specifically refers to writing code.)\n* **J'aime beaucoup développer des logiciels.** (This specifically refers to developing software.)\n\nThe best translation will depend on the context and your intended audience. \n", + * "usage_metadata": { + * "input_tokens": 10, + * "output_tokens": 277, + * "total_tokens": 287 + * } + * } + * ``` + *
+ * + *
+ * + *
+ * Bind tools + * + * ```typescript + * import { z } from 'zod'; + * + * const GetWeather = { + * name: "GetWeather", + * description: "Get the current weather in a given location", + * schema: z.object({ + * location: z.string().describe("The city and state, e.g. San Francisco, CA") + * }), + * } + * + * const GetPopulation = { + * name: "GetPopulation", + * description: "Get the current population in a given location", + * schema: z.object({ + * location: z.string().describe("The city and state, e.g. San Francisco, CA") + * }), + * } + * + * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]); + * const aiMsg = await llmWithTools.invoke( + * "Which city is hotter today and which is bigger: LA or NY?" + * ); + * console.log(aiMsg.tool_calls); + * ``` + * + * ```txt + * [ + * { + * name: 'GetWeather', + * args: { location: 'Los Angeles, CA' }, + * type: 'tool_call' + * }, + * { + * name: 'GetWeather', + * args: { location: 'New York, NY' }, + * type: 'tool_call' + * }, + * { + * name: 'GetPopulation', + * args: { location: 'Los Angeles, CA' }, + * type: 'tool_call' + * }, + * { + * name: 'GetPopulation', + * args: { location: 'New York, NY' }, + * type: 'tool_call' + * } + * ] + * ``` + *
+ * + *
+ * + *
+ * Structured Output + * + * ```typescript + * const Joke = z.object({ + * setup: z.string().describe("The setup of the joke"), + * punchline: z.string().describe("The punchline to the joke"), + * rating: z.number().optional().describe("How funny the joke is, from 1 to 10") + * }).describe('Joke to tell user.'); + * + * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" }); + * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats"); + * console.log(jokeResult); + * ``` + * + * ```txt + * { + * setup: "Why don\\'t cats play poker?", + * punchline: "Why don\\'t cats play poker? Because they always have an ace up their sleeve!" + * } + * ``` + *
+ * + *
+ * + *
+ * Multimodal + * + * ```typescript + * import { HumanMessage } from '@langchain/core/messages'; + * + * const imageUrl = "https://example.com/image.jpg"; + * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer()); + * const base64Image = Buffer.from(imageData).toString('base64'); + * + * const message = new HumanMessage({ + * content: [ + * { type: "text", text: "describe the weather in this image" }, + * { + * type: "image_url", + * image_url: { url: `data:image/jpeg;base64,${base64Image}` }, + * }, + * ] + * }); + * + * const imageDescriptionAiMsg = await llm.invoke([message]); + * console.log(imageDescriptionAiMsg.content); + * ``` + * + * ```txt + * The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions. + * ``` + *
+ * + *
+ * + *
+ * Usage Metadata + * + * ```typescript + * const aiMsgForMetadata = await llm.invoke(input); + * console.log(aiMsgForMetadata.usage_metadata); + * ``` + * + * ```txt + * { input_tokens: 10, output_tokens: 149, total_tokens: 159 } + * ``` + *
+ * + *
+ * + *
+ * Response Metadata + * + * ```typescript + * const aiMsgForResponseMetadata = await llm.invoke(input); + * console.log(aiMsgForResponseMetadata.response_metadata); + * ``` + * + * ```txt + * { + * finishReason: 'STOP', + * index: 0, + * safetyRatings: [ + * { + * category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', + * probability: 'NEGLIGIBLE' + * }, + * { + * category: 'HARM_CATEGORY_HATE_SPEECH', + * probability: 'NEGLIGIBLE' + * }, + * { category: 'HARM_CATEGORY_HARASSMENT', probability: 'NEGLIGIBLE' }, + * { + * category: 'HARM_CATEGORY_DANGEROUS_CONTENT', + * probability: 'NEGLIGIBLE' + * } + * ] + * } * ``` + *
+ * + *
*/ export class ChatGoogleGenerativeAI extends BaseChatModel