diff --git a/.stats.yml b/.stats.yml
index fcbfe481..a2e9ecf6 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 2
+configured_endpoints: 3
diff --git a/api.md b/api.md
index 5df7c25c..ae434947 100644
--- a/api.md
+++ b/api.md
@@ -25,3 +25,23 @@ Methods:
- client.messages.create({ ...params }) -> Message
- client.messages.stream(body, options?) -> MessageStream
+
+# Beta
+
+## Tools
+
+### Messages
+
+Types:
+
+- Tool
+- ToolResultBlockParam
+- ToolUseBlock
+- ToolUseBlockParam
+- ToolsBetaContentBlock
+- ToolsBetaMessage
+- ToolsBetaMessageParam
+
+Methods:
+
+- client.beta.tools.messages.create({ ...params }) -> ToolsBetaMessage
diff --git a/examples/tools.ts b/examples/tools.ts
new file mode 100644
index 00000000..b4887a24
--- /dev/null
+++ b/examples/tools.ts
@@ -0,0 +1,63 @@
+#!/usr/bin/env -S npm run tsn -T
+
+import Anthropic from '@anthropic-ai/sdk';
+import assert from 'node:assert';
+
+const client = new Anthropic(); // gets API Key from environment variable ANTHROPIC_API_KEY
+
+async function main() {
+ const userMessage: Anthropic.Beta.Tools.ToolsBetaMessageParam = {
+ role: 'user',
+ content: 'What is the weather in SF?',
+ };
+ const tools: Anthropic.Beta.Tools.Tool[] = [
+ {
+ name: 'get_weather',
+ description: 'Get the weather for a specific location',
+ input_schema: {
+ type: 'object',
+ properties: { location: { type: 'string' } },
+ },
+ },
+ ];
+
+ const message = await client.beta.tools.messages.create({
+ model: 'claude-3-opus-20240229',
+ max_tokens: 1024,
+ messages: [userMessage],
+ tools,
+ });
+ console.log('Initial response:');
+ console.dir(message, { depth: 4 });
+
+ assert(message.stop_reason === 'tool_use');
+
+ const tool = message.content.find(
+ (content): content is Anthropic.Beta.Tools.ToolUseBlock => content.type === 'tool_use',
+ );
+ assert(tool);
+
+ const result = await client.beta.tools.messages.create({
+ model: 'claude-3-opus-20240229',
+ max_tokens: 1024,
+ messages: [
+ userMessage,
+ { role: message.role, content: message.content },
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'tool_result',
+ tool_use_id: tool.id,
+ content: [{ type: 'text', text: 'The weather is 73f' }],
+ },
+ ],
+ },
+ ],
+ tools,
+ });
+ console.log('\nFinal response');
+ console.dir(result, { depth: 4 });
+}
+
+main();
diff --git a/src/index.ts b/src/index.ts
index a89e0bb0..4798cbaa 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -122,6 +122,7 @@ export class Anthropic extends Core.APIClient {
completions: API.Completions = new API.Completions(this);
messages: API.Messages = new API.Messages(this);
+ beta: API.Beta = new API.Beta(this);
protected override defaultQuery(): Core.DefaultQuery | undefined {
return this._options.defaultQuery;
@@ -257,6 +258,8 @@ export namespace Anthropic {
export import MessageCreateParamsNonStreaming = API.MessageCreateParamsNonStreaming;
export import MessageCreateParamsStreaming = API.MessageCreateParamsStreaming;
export import MessageStreamParams = API.MessageStreamParams;
+
+ export import Beta = API.Beta;
}
export default Anthropic;
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
new file mode 100644
index 00000000..6e73e5ae
--- /dev/null
+++ b/src/resources/beta/beta.ts
@@ -0,0 +1,12 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '@anthropic-ai/sdk/resource';
+import * as ToolsAPI from '@anthropic-ai/sdk/resources/beta/tools/tools';
+
+export class Beta extends APIResource {
+ tools: ToolsAPI.Tools = new ToolsAPI.Tools(this._client);
+}
+
+export namespace Beta {
+ export import Tools = ToolsAPI.Tools;
+}
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
new file mode 100644
index 00000000..670f3861
--- /dev/null
+++ b/src/resources/beta/index.ts
@@ -0,0 +1,4 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export { Beta } from './beta';
+export { Tools } from './tools/index';
diff --git a/src/resources/beta/tools/index.ts b/src/resources/beta/tools/index.ts
new file mode 100644
index 00000000..9e77fc23
--- /dev/null
+++ b/src/resources/beta/tools/index.ts
@@ -0,0 +1,16 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ Tool,
+ ToolResultBlockParam,
+ ToolUseBlock,
+ ToolUseBlockParam,
+ ToolsBetaContentBlock,
+ ToolsBetaMessage,
+ ToolsBetaMessageParam,
+ MessageCreateParams,
+ MessageCreateParamsNonStreaming,
+ MessageCreateParamsStreaming,
+ Messages,
+} from './messages';
+export { Tools } from './tools';
diff --git a/src/resources/beta/tools/messages.ts b/src/resources/beta/tools/messages.ts
new file mode 100644
index 00000000..80856085
--- /dev/null
+++ b/src/resources/beta/tools/messages.ts
@@ -0,0 +1,531 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import * as Core from '@anthropic-ai/sdk/core';
+import { APIPromise } from '@anthropic-ai/sdk/core';
+import { APIResource } from '@anthropic-ai/sdk/resource';
+import * as ToolsMessagesAPI from '@anthropic-ai/sdk/resources/beta/tools/messages';
+import * as MessagesAPI from '@anthropic-ai/sdk/resources/messages';
+import { Stream } from '@anthropic-ai/sdk/streaming';
+
+export class Messages extends APIResource {
+ /**
+ * Create a Message.
+ *
+ * Send a structured list of input messages with text and/or image content, and the
+ * model will generate the next message in the conversation.
+ *
+ * The Messages API can be used for for either single queries or stateless
+ * multi-turn conversations.
+ */
+ create(body: MessageCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
+ create(
+ body: MessageCreateParamsStreaming,
+ options?: Core.RequestOptions,
+ ): APIPromise>;
+ create(
+ body: MessageCreateParamsBase,
+ options?: Core.RequestOptions,
+ ): APIPromise | ToolsBetaMessage>;
+ create(
+ body: MessageCreateParams,
+ options?: Core.RequestOptions,
+ ): APIPromise | APIPromise> {
+ return this._client.post('/v1/messages?beta=tools', {
+ body,
+ timeout: 600000,
+ ...options,
+ headers: { 'anthropic-beta': 'tools-2024-04-04', ...options?.headers },
+ stream: body.stream ?? false,
+ }) as APIPromise | APIPromise>;
+ }
+}
+
+export interface Tool {
+ /**
+ * [JSON schema](https://json-schema.org/) for this tool's input.
+ *
+ * This defines the shape of the `input` that your tool accepts and that the model
+ * will produce.
+ */
+ input_schema: Tool.InputSchema;
+
+ name: string;
+
+ /**
+ * Description of what this tool does.
+ *
+ * Tool descriptions should be as detailed as possible. The more information that
+ * the model has about what the tool is and how to use it, the better it will
+ * perform. You can use natural language descriptions to reinforce important
+ * aspects of the tool input JSON schema.
+ */
+ description?: string;
+}
+
+export namespace Tool {
+ /**
+ * [JSON schema](https://json-schema.org/) for this tool's input.
+ *
+ * This defines the shape of the `input` that your tool accepts and that the model
+ * will produce.
+ */
+ export interface InputSchema {
+ type: 'object';
+
+ properties?: unknown | null;
+ [k: string]: unknown;
+ }
+}
+
+export interface ToolResultBlockParam {
+ tool_use_id: string;
+
+ type: 'tool_result';
+
+ content?: Array;
+
+ is_error?: boolean;
+}
+
+export interface ToolUseBlock {
+ id: string;
+
+ input: unknown;
+
+ name: string;
+
+ type: 'tool_use';
+}
+
+export interface ToolUseBlockParam {
+ id: string;
+
+ input: unknown;
+
+ name: string;
+
+ type: 'tool_use';
+}
+
+export type ToolsBetaContentBlock = MessagesAPI.TextBlock | ToolUseBlock;
+
+export interface ToolsBetaMessage {
+ /**
+ * Unique object identifier.
+ *
+ * The format and length of IDs may change over time.
+ */
+ id: string;
+
+ /**
+ * Content generated by the model.
+ *
+ * This is an array of content blocks, each of which has a `type` that determines
+ * its shape. Currently, the only `type` in responses is `"text"`.
+ *
+ * Example:
+ *
+ * ```json
+ * [{ "type": "text", "text": "Hi, I'm Claude." }]
+ * ```
+ *
+ * If the request input `messages` ended with an `assistant` turn, then the
+ * response `content` will continue directly from that last turn. You can use this
+ * to constrain the model's output.
+ *
+ * For example, if the input `messages` were:
+ *
+ * ```json
+ * [
+ * {
+ * "role": "user",
+ * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
+ * },
+ * { "role": "assistant", "content": "The best answer is (" }
+ * ]
+ * ```
+ *
+ * Then the response `content` might be:
+ *
+ * ```json
+ * [{ "type": "text", "text": "B)" }]
+ * ```
+ */
+ content: Array;
+
+ /**
+ * The model that handled the request.
+ */
+ model: string;
+
+ /**
+ * Conversational role of the generated message.
+ *
+ * This will always be `"assistant"`.
+ */
+ role: 'assistant';
+
+ /**
+ * The reason that we stopped.
+ *
+ * This may be one the following values:
+ *
+ * - `"end_turn"`: the model reached a natural stopping point
+ * - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum
+ * - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated
+ * - `"tool_use"`: (tools beta only) the model invoked one or more tools
+ *
+ * In non-streaming mode this value is always non-null. In streaming mode, it is
+ * null in the `message_start` event and non-null otherwise.
+ */
+ stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use' | null;
+
+ /**
+ * Which custom stop sequence was generated, if any.
+ *
+ * This value will be a non-null string if one of your custom stop sequences was
+ * generated.
+ */
+ stop_sequence: string | null;
+
+ /**
+ * Object type.
+ *
+ * For Messages, this is always `"message"`.
+ */
+ type: 'message';
+
+ /**
+ * Billing and rate-limit usage.
+ *
+ * Anthropic's API bills and rate-limits by token counts, as tokens represent the
+ * underlying cost to our systems.
+ *
+ * Under the hood, the API transforms requests into a format suitable for the
+ * model. The model's output then goes through a parsing stage before becoming an
+ * API response. As a result, the token counts in `usage` will not match one-to-one
+ * with the exact visible content of an API request or response.
+ *
+ * For example, `output_tokens` will be non-zero, even for an empty string response
+ * from Claude.
+ */
+ usage: MessagesAPI.Usage;
+}
+
+export interface ToolsBetaMessageParam {
+ content:
+ | string
+ | Array<
+ MessagesAPI.TextBlockParam | MessagesAPI.ImageBlockParam | ToolUseBlockParam | ToolResultBlockParam
+ >;
+
+ role: 'user' | 'assistant';
+}
+
+export type MessageCreateParams = MessageCreateParamsNonStreaming | MessageCreateParamsStreaming;
+
+export interface MessageCreateParamsBase {
+ /**
+ * The maximum number of tokens to generate before stopping.
+ *
+ * Note that our models may stop _before_ reaching this maximum. This parameter
+ * only specifies the absolute maximum number of tokens to generate.
+ *
+ * Different models have different maximum values for this parameter. See
+ * [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ */
+ max_tokens: number;
+
+ /**
+ * Input messages.
+ *
+ * Our models are trained to operate on alternating `user` and `assistant`
+ * conversational turns. When creating a new `Message`, you specify the prior
+ * conversational turns with the `messages` parameter, and the model then generates
+ * the next `Message` in the conversation.
+ *
+ * Each input message must be an object with a `role` and `content`. You can
+ * specify a single `user`-role message, or you can include multiple `user` and
+ * `assistant` messages. The first message must always use the `user` role.
+ *
+ * If the final message uses the `assistant` role, the response content will
+ * continue immediately from the content in that message. This can be used to
+ * constrain part of the model's response.
+ *
+ * Example with a single `user` message:
+ *
+ * ```json
+ * [{ "role": "user", "content": "Hello, Claude" }]
+ * ```
+ *
+ * Example with multiple conversational turns:
+ *
+ * ```json
+ * [
+ * { "role": "user", "content": "Hello there." },
+ * { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
+ * { "role": "user", "content": "Can you explain LLMs in plain English?" }
+ * ]
+ * ```
+ *
+ * Example with a partially-filled response from Claude:
+ *
+ * ```json
+ * [
+ * {
+ * "role": "user",
+ * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
+ * },
+ * { "role": "assistant", "content": "The best answer is (" }
+ * ]
+ * ```
+ *
+ * Each input message `content` may be either a single `string` or an array of
+ * content blocks, where each block has a specific `type`. Using a `string` for
+ * `content` is shorthand for an array of one content block of type `"text"`. The
+ * following input messages are equivalent:
+ *
+ * ```json
+ * { "role": "user", "content": "Hello, Claude" }
+ * ```
+ *
+ * ```json
+ * { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
+ * ```
+ *
+ * Starting with Claude 3 models, you can also send image content blocks:
+ *
+ * ```json
+ * {
+ * "role": "user",
+ * "content": [
+ * {
+ * "type": "image",
+ * "source": {
+ * "type": "base64",
+ * "media_type": "image/jpeg",
+ * "data": "/9j/4AAQSkZJRg..."
+ * }
+ * },
+ * { "type": "text", "text": "What is in this image?" }
+ * ]
+ * }
+ * ```
+ *
+ * We currently support the `base64` source type for images, and the `image/jpeg`,
+ * `image/png`, `image/gif`, and `image/webp` media types.
+ *
+ * See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
+ * for more input examples.
+ *
+ * Note that if you want to include a
+ * [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
+ * use the top-level `system` parameter — there is no `"system"` role for input
+ * messages in the Messages API.
+ */
+ messages: Array;
+
+ /**
+ * The model that will complete your prompt.
+ *
+ * See [models](https://docs.anthropic.com/claude/docs/models-overview) for
+ * additional details and options.
+ */
+ model: string;
+
+ /**
+ * An object describing metadata about the request.
+ */
+ metadata?: MessageCreateParams.Metadata;
+
+ /**
+ * Custom text sequences that will cause the model to stop generating.
+ *
+ * Our models will normally stop when they have naturally completed their turn,
+ * which will result in a response `stop_reason` of `"end_turn"`.
+ *
+ * If you want the model to stop generating when it encounters custom strings of
+ * text, you can use the `stop_sequences` parameter. If the model encounters one of
+ * the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
+ * and the response `stop_sequence` value will contain the matched stop sequence.
+ */
+ stop_sequences?: Array;
+
+ /**
+ * Whether to incrementally stream the response using server-sent events.
+ *
+ * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
+ * for details.
+ */
+ stream?: boolean;
+
+ /**
+ * System prompt.
+ *
+ * A system prompt is a way of providing context and instructions to Claude, such
+ * as specifying a particular goal or role. See our
+ * [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ */
+ system?: string;
+
+ /**
+ * Amount of randomness injected into the response.
+ *
+ * Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
+ * for analytical / multiple choice, and closer to `1.0` for creative and
+ * generative tasks.
+ *
+ * Note that even with `temperature` of `0.0`, the results will not be fully
+ * deterministic.
+ */
+ temperature?: number;
+
+ /**
+ * [beta] Definitions of tools that the model may use.
+ *
+ * If you include `tools` in your API request, the model may return `tool_use`
+ * content blocks that represent the model's use of those tools. You can then run
+ * those tools using the tool input generated by the model and then optionally
+ * return results back to the model using `tool_result` content blocks.
+ *
+ * Each tool definition includes:
+ *
+ * - `name`: Name of the tool.
+ * - `description`: Optional, but strongly-recommended description of the tool.
+ * - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ * shape that the model will produce in `tool_use` output content blocks.
+ *
+ * For example, if you defined `tools` as:
+ *
+ * ```json
+ * [
+ * {
+ * "name": "get_stock_price",
+ * "description": "Get the current stock price for a given ticker symbol.",
+ * "input_schema": {
+ * "type": "object",
+ * "properties": {
+ * "ticker": {
+ * "type": "string",
+ * "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ * }
+ * },
+ * "required": ["ticker"]
+ * }
+ * }
+ * ]
+ * ```
+ *
+ * And then asked the model "What's the S&P 500 at today?", the model might produce
+ * `tool_use` content blocks in the response like this:
+ *
+ * ```json
+ * [
+ * {
+ * "type": "tool_use",
+ * "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ * "name": "get_stock_price",
+ * "input": { "ticker": "^GSPC" }
+ * }
+ * ]
+ * ```
+ *
+ * You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ * input, and return the following back to the model in a subsequent `user`
+ * message:
+ *
+ * ```json
+ * [
+ * {
+ * "type": "tool_result",
+ * "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ * "content": "259.75 USD"
+ * }
+ * ]
+ * ```
+ *
+ * Tools can be used for workflows that include running client-side tools and
+ * functions, or more generally whenever you want the model to produce a particular
+ * JSON structure of output.
+ *
+ * See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ * details.
+ */
+ tools?: Array;
+
+ /**
+ * Only sample from the top K options for each subsequent token.
+ *
+ * Used to remove "long tail" low probability responses.
+ * [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
+ */
+ top_k?: number;
+
+ /**
+ * Use nucleus sampling.
+ *
+ * In nucleus sampling, we compute the cumulative distribution over all the options
+ * for each subsequent token in decreasing probability order and cut it off once it
+ * reaches a particular probability specified by `top_p`. You should either alter
+ * `temperature` or `top_p`, but not both.
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
+ */
+ top_p?: number;
+}
+
+export namespace MessageCreateParams {
+ /**
+ * An object describing metadata about the request.
+ */
+ export interface Metadata {
+ /**
+ * An external identifier for the user who is associated with the request.
+ *
+ * This should be a uuid, hash value, or other opaque identifier. Anthropic may use
+ * this id to help detect abuse. Do not include any identifying information such as
+ * name, email address, or phone number.
+ */
+ user_id?: string | null;
+ }
+
+ export type MessageCreateParamsNonStreaming = ToolsMessagesAPI.MessageCreateParamsNonStreaming;
+ export type MessageCreateParamsStreaming = ToolsMessagesAPI.MessageCreateParamsStreaming;
+}
+
+export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase {
+ /**
+ * Whether to incrementally stream the response using server-sent events.
+ *
+ * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
+ * for details.
+ */
+ stream?: false;
+}
+
+export interface MessageCreateParamsStreaming extends MessageCreateParamsBase {
+ /**
+ * Whether to incrementally stream the response using server-sent events.
+ *
+ * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
+ * for details.
+ */
+ stream: true;
+}
+
+export namespace Messages {
+ export import Tool = ToolsMessagesAPI.Tool;
+ export import ToolResultBlockParam = ToolsMessagesAPI.ToolResultBlockParam;
+ export import ToolUseBlock = ToolsMessagesAPI.ToolUseBlock;
+ export import ToolUseBlockParam = ToolsMessagesAPI.ToolUseBlockParam;
+ export import ToolsBetaContentBlock = ToolsMessagesAPI.ToolsBetaContentBlock;
+ export import ToolsBetaMessage = ToolsMessagesAPI.ToolsBetaMessage;
+ export import ToolsBetaMessageParam = ToolsMessagesAPI.ToolsBetaMessageParam;
+ export import MessageCreateParams = ToolsMessagesAPI.MessageCreateParams;
+ export import MessageCreateParamsNonStreaming = ToolsMessagesAPI.MessageCreateParamsNonStreaming;
+ export import MessageCreateParamsStreaming = ToolsMessagesAPI.MessageCreateParamsStreaming;
+}
diff --git a/src/resources/beta/tools/tools.ts b/src/resources/beta/tools/tools.ts
new file mode 100644
index 00000000..d6be65db
--- /dev/null
+++ b/src/resources/beta/tools/tools.ts
@@ -0,0 +1,22 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '@anthropic-ai/sdk/resource';
+import * as MessagesAPI from '@anthropic-ai/sdk/resources/beta/tools/messages';
+
+export class Tools extends APIResource {
+ messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client);
+}
+
+export namespace Tools {
+ export import Messages = MessagesAPI.Messages;
+ export import Tool = MessagesAPI.Tool;
+ export import ToolResultBlockParam = MessagesAPI.ToolResultBlockParam;
+ export import ToolUseBlock = MessagesAPI.ToolUseBlock;
+ export import ToolUseBlockParam = MessagesAPI.ToolUseBlockParam;
+ export import ToolsBetaContentBlock = MessagesAPI.ToolsBetaContentBlock;
+ export import ToolsBetaMessage = MessagesAPI.ToolsBetaMessage;
+ export import ToolsBetaMessageParam = MessagesAPI.ToolsBetaMessageParam;
+ export import MessageCreateParams = MessagesAPI.MessageCreateParams;
+ export import MessageCreateParamsNonStreaming = MessagesAPI.MessageCreateParamsNonStreaming;
+ export import MessageCreateParamsStreaming = MessagesAPI.MessageCreateParamsStreaming;
+}
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 97f5682a..360cb01e 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -1,5 +1,6 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+export { Beta } from './beta/beta';
export {
Completion,
CompletionCreateParams,
diff --git a/src/resources/messages.ts b/src/resources/messages.ts
index 73d9291a..1156ae83 100644
--- a/src/resources/messages.ts
+++ b/src/resources/messages.ts
@@ -152,9 +152,6 @@ export interface Message {
* - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum
* - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated
*
- * Note that these values are different than those in `/v1/complete`, where
- * `end_turn` and `stop_sequence` were not differentiated.
- *
* In non-streaming mode this value is always non-null. In streaming mode, it is
* null in the `message_start` event and non-null otherwise.
*/
diff --git a/tests/api-resources/beta/tools/messages.test.ts b/tests/api-resources/beta/tools/messages.test.ts
new file mode 100644
index 00000000..93f12376
--- /dev/null
+++ b/tests/api-resources/beta/tools/messages.test.ts
@@ -0,0 +1,76 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Anthropic from '@anthropic-ai/sdk';
+import { Response } from 'node-fetch';
+
+const anthropic = new Anthropic({
+ apiKey: 'my-anthropic-api-key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource messages', () => {
+ test('create: only required params', async () => {
+ const responsePromise = anthropic.beta.tools.messages.create({
+ max_tokens: 1024,
+ messages: [{ role: 'user', content: 'Hello, world' }],
+ model: 'claude-3-opus-20240229',
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await anthropic.beta.tools.messages.create({
+ max_tokens: 1024,
+ messages: [{ role: 'user', content: 'Hello, world' }],
+ model: 'claude-3-opus-20240229',
+ metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' },
+ stop_sequences: ['string', 'string', 'string'],
+ stream: false,
+ system: "Today's date is 2024-01-01.",
+ temperature: 1,
+ tools: [
+ {
+ description: 'Get the current weather in a given location',
+ name: 'x',
+ input_schema: {
+ type: 'object',
+ properties: {
+ location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' },
+ unit: { description: 'Unit for the output - one of (celsius, fahrenheit)', type: 'string' },
+ },
+ },
+ },
+ {
+ description: 'Get the current weather in a given location',
+ name: 'x',
+ input_schema: {
+ type: 'object',
+ properties: {
+ location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' },
+ unit: { description: 'Unit for the output - one of (celsius, fahrenheit)', type: 'string' },
+ },
+ },
+ },
+ {
+ description: 'Get the current weather in a given location',
+ name: 'x',
+ input_schema: {
+ type: 'object',
+ properties: {
+ location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' },
+ unit: { description: 'Unit for the output - one of (celsius, fahrenheit)', type: 'string' },
+ },
+ },
+ },
+ ],
+ top_k: 5,
+ top_p: 0.7,
+ });
+ });
+});