From 2c7cc70ca841bcb20f133d528fc8f1336e0a3a8c Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Thu, 16 May 2024 11:01:37 -0400
Subject: [PATCH] feat(api): add `tool_choice` param, image block params inside
`tool_result.content`, and streaming for `tool_use` blocks (#418)
---
.stats.yml | 2 +-
api.md | 5 +
examples/tools-streaming.ts | 50 ++
src/_vendor/partial-json-parser/README.md | 3 +
src/_vendor/partial-json-parser/parser.ts | 262 ++++++++
src/lib/ToolsBetaMessageStream.ts | 561 ++++++++++++++++++
src/resources/beta/tools/index.ts | 5 +
src/resources/beta/tools/messages.ts | 418 ++++++++++++-
src/resources/beta/tools/tools.ts | 5 +
src/resources/completions.ts | 30 +-
src/resources/messages.ts | 44 +-
.../api-resources/beta/tools/messages.test.ts | 1 +
12 files changed, 1323 insertions(+), 63 deletions(-)
create mode 100644 examples/tools-streaming.ts
create mode 100644 src/_vendor/partial-json-parser/README.md
create mode 100644 src/_vendor/partial-json-parser/parser.ts
create mode 100644 src/lib/ToolsBetaMessageStream.ts
diff --git a/.stats.yml b/.stats.yml
index d383b41d..0f28d1e5 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 3
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-4742de59ec06077403336bc26e26390e57888e5eef313bf27eab241dbb905f06.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-0017013a270564e5cdfb7b8ffe474c962f4b806c862cbcc33c905504897fabbe.yml
diff --git a/api.md b/api.md
index ae434947..12150d07 100644
--- a/api.md
+++ b/api.md
@@ -34,14 +34,19 @@ Methods:
Types:
+- InputJsonDelta
- Tool
- ToolResultBlockParam
- ToolUseBlock
- ToolUseBlockParam
- ToolsBetaContentBlock
+- ToolsBetaContentBlockDeltaEvent
+- ToolsBetaContentBlockStartEvent
- ToolsBetaMessage
- ToolsBetaMessageParam
+- ToolsBetaMessageStreamEvent
Methods:
- client.beta.tools.messages.create({ ...params }) -> ToolsBetaMessage
+- client.beta.tools.messages.stream(body, options?) -> ToolsBetaMessageStream
diff --git a/examples/tools-streaming.ts b/examples/tools-streaming.ts
new file mode 100644
index 00000000..11d9589f
--- /dev/null
+++ b/examples/tools-streaming.ts
@@ -0,0 +1,50 @@
+#!/usr/bin/env -S npm run tsn -T
+
+import Anthropic from '@anthropic-ai/sdk';
+import { inspect } from 'util';
+
+// gets API Key from environment variable ANTHROPIC_API_KEY
+const client = new Anthropic();
+
+async function main() {
+ const stream = client.beta.tools.messages
+ .stream({
+ messages: [
+ {
+ role: 'user',
+ content: `What is the weather in SF?`,
+ },
+ ],
+ tools: [
+ {
+ name: 'get_weather',
+ description: 'Get the weather at a specific location',
+ input_schema: {
+ type: 'object',
+ properties: {
+ location: { type: 'string', description: 'The city and state, e.g. San Francisco, CA' },
+ unit: {
+ type: 'string',
+ enum: ['celsius', 'fahrenheit'],
+ description: 'Unit for the output',
+ },
+ },
+ required: ['location'],
+ },
+ },
+ ],
+ model: 'claude-3-haiku-20240307',
+ max_tokens: 1024,
+ })
+ // When a JSON content block delta is encountered this
+ // event will be fired with the delta and the currently accumulated object
+ .on('inputJson', (delta, snapshot) => {
+ console.log(`delta: ${delta}`);
+ console.log(`snapshot: ${inspect(snapshot)}`);
+ console.log();
+ });
+
+ await stream.done();
+}
+
+main();
diff --git a/src/_vendor/partial-json-parser/README.md b/src/_vendor/partial-json-parser/README.md
new file mode 100644
index 00000000..bc6ea4e3
--- /dev/null
+++ b/src/_vendor/partial-json-parser/README.md
@@ -0,0 +1,3 @@
+# Partial JSON Parser
+
+Vendored from https://www.npmjs.com/package/partial-json-parser and updated to use TypeScript.
diff --git a/src/_vendor/partial-json-parser/parser.ts b/src/_vendor/partial-json-parser/parser.ts
new file mode 100644
index 00000000..2b84f7ca
--- /dev/null
+++ b/src/_vendor/partial-json-parser/parser.ts
@@ -0,0 +1,262 @@
+type Token = {
+ type: string;
+ value: string;
+};
+
+const tokenize = (input: string) => {
+ let current = 0;
+ let tokens = [];
+
+ while (current < input.length) {
+ let char = input[current];
+
+ if (char === '\\') {
+ current++;
+ continue;
+ }
+
+ if (char === '{') {
+ tokens.push({
+ type: 'brace',
+ value: '{',
+ });
+
+ current++;
+ continue;
+ }
+
+ if (char === '}') {
+ tokens.push({
+ type: 'brace',
+ value: '}',
+ });
+
+ current++;
+ continue;
+ }
+
+ if (char === '[') {
+ tokens.push({
+ type: 'paren',
+ value: '[',
+ });
+
+ current++;
+ continue;
+ }
+
+ if (char === ']') {
+ tokens.push({
+ type: 'paren',
+ value: ']',
+ });
+
+ current++;
+ continue;
+ }
+
+ if (char === ':') {
+ tokens.push({
+ type: 'separator',
+ value: ':',
+ });
+
+ current++;
+ continue;
+ }
+
+ if (char === ',') {
+ tokens.push({
+ type: 'delimiter',
+ value: ',',
+ });
+
+ current++;
+ continue;
+ }
+
+ if (char === '"') {
+ let value = '';
+ let danglingQuote = false;
+
+ char = input[++current];
+
+ while (char !== '"') {
+ if (current === input.length) {
+ danglingQuote = true;
+ break;
+ }
+
+ if (char === '\\') {
+ current++;
+ if (current === input.length) {
+ danglingQuote = true;
+ break;
+ }
+ value += char + input[current];
+ char = input[++current];
+ } else {
+ value += char;
+ char = input[++current];
+ }
+ }
+
+ char = input[++current];
+
+ if (!danglingQuote) {
+ tokens.push({
+ type: 'string',
+ value,
+ });
+ }
+ continue;
+ }
+
+ let WHITESPACE = /\s/;
+ if (char && WHITESPACE.test(char)) {
+ current++;
+ continue;
+ }
+
+ let NUMBERS = /[0-9]/;
+ if ((char && NUMBERS.test(char)) || char === '-' || char === '.') {
+ let value = '';
+
+ if (char === '-') {
+ value += char;
+ char = input[++current];
+ }
+
+ while ((char && NUMBERS.test(char)) || char === '.') {
+ value += char;
+ char = input[++current];
+ }
+
+ tokens.push({
+ type: 'number',
+ value,
+ });
+ continue;
+ }
+
+ let LETTERS = /[a-z]/i;
+ if (char && LETTERS.test(char)) {
+ let value = '';
+
+ while (char && LETTERS.test(char)) {
+ if (current === input.length) {
+ break;
+ }
+ value += char;
+ char = input[++current];
+ }
+
+ if (value == 'true' || value == 'false') {
+ tokens.push({
+ type: 'name',
+ value,
+ });
+ } else {
+ throw new Error(`Invalid token: ${value} is not a valid token!`);
+ }
+ continue;
+ }
+
+ current++;
+ }
+
+ return tokens;
+ },
+ strip = (tokens: Token[]): Token[] => {
+ if (tokens.length === 0) {
+ return tokens;
+ }
+
+ let lastToken = tokens[tokens.length - 1]!;
+
+ switch (lastToken.type) {
+ case 'separator':
+ tokens = tokens.slice(0, tokens.length - 1);
+ return strip(tokens);
+ break;
+ case 'number':
+ let lastCharacterOfLastToken = lastToken.value[lastToken.value.length - 1];
+ if (lastCharacterOfLastToken === '.' || lastCharacterOfLastToken === '-') {
+ tokens = tokens.slice(0, tokens.length - 1);
+ return strip(tokens);
+ }
+ case 'string':
+ let tokenBeforeTheLastToken = tokens[tokens.length - 2];
+ if (tokenBeforeTheLastToken?.type === 'delimiter') {
+ tokens = tokens.slice(0, tokens.length - 1);
+ return strip(tokens);
+ } else if (tokenBeforeTheLastToken?.type === 'brace' && tokenBeforeTheLastToken.value === '{') {
+ tokens = tokens.slice(0, tokens.length - 1);
+ return strip(tokens);
+ }
+ break;
+ case 'delimiter':
+ tokens = tokens.slice(0, tokens.length - 1);
+ return strip(tokens);
+ break;
+ }
+
+ return tokens;
+ },
+ unstrip = (tokens: Token[]): Token[] => {
+ let tail: string[] = [];
+
+ tokens.map((token) => {
+ if (token.type === 'brace') {
+ if (token.value === '{') {
+ tail.push('}');
+ } else {
+ tail.splice(tail.lastIndexOf('}'), 1);
+ }
+ }
+ if (token.type === 'paren') {
+ if (token.value === '[') {
+ tail.push(']');
+ } else {
+ tail.splice(tail.lastIndexOf(']'), 1);
+ }
+ }
+ });
+
+ if (tail.length > 0) {
+ tail.reverse().map((item) => {
+ if (item === '}') {
+ tokens.push({
+ type: 'brace',
+ value: '}',
+ });
+ } else if (item === ']') {
+ tokens.push({
+ type: 'paren',
+ value: ']',
+ });
+ }
+ });
+ }
+
+ return tokens;
+ },
+ generate = (tokens: Token[]): string => {
+ let output = '';
+
+ tokens.map((token) => {
+ switch (token.type) {
+ case 'string':
+ output += '"' + token.value + '"';
+ break;
+ default:
+ output += token.value;
+ break;
+ }
+ });
+
+ return output;
+ },
+ partialParse = (input: string): unknown => JSON.parse(generate(unstrip(strip(tokenize(input)))));
+
+export { partialParse };
diff --git a/src/lib/ToolsBetaMessageStream.ts b/src/lib/ToolsBetaMessageStream.ts
new file mode 100644
index 00000000..7c1612e9
--- /dev/null
+++ b/src/lib/ToolsBetaMessageStream.ts
@@ -0,0 +1,561 @@
+import * as Core from '@anthropic-ai/sdk/core';
+import { AnthropicError, APIUserAbortError } from '@anthropic-ai/sdk/error';
+import {
+ ToolsBetaContentBlock,
+ Messages,
+ ToolsBetaMessage,
+ ToolsBetaMessageStreamEvent,
+ ToolsBetaMessageParam,
+ MessageCreateParams,
+ MessageCreateParamsBase,
+} from '@anthropic-ai/sdk/resources/beta/tools/messages';
+import { type ReadableStream } from '@anthropic-ai/sdk/_shims/index';
+import { Stream } from '@anthropic-ai/sdk/streaming';
+import { TextBlock } from '@anthropic-ai/sdk/resources';
+import { partialParse } from '../_vendor/partial-json-parser/parser';
+
+export interface MessageStreamEvents {
+ connect: () => void;
+ streamEvent: (event: ToolsBetaMessageStreamEvent, snapshot: ToolsBetaMessage) => void;
+ text: (textDelta: string, textSnapshot: string) => void;
+ inputJson: (jsonDelta: string, jsonSnapshot: unknown) => void;
+ message: (message: ToolsBetaMessage) => void;
+ contentBlock: (content: ToolsBetaContentBlock) => void;
+ finalMessage: (message: ToolsBetaMessage) => void;
+ error: (error: AnthropicError) => void;
+ abort: (error: APIUserAbortError) => void;
+ end: () => void;
+}
+
+type MessageStreamEventListeners = {
+ listener: MessageStreamEvents[Event];
+ once?: boolean;
+}[];
+
+const JSON_BUF_PROPERTY = '__json_buf';
+
+export class ToolsBetaMessageStream implements AsyncIterable {
+ messages: ToolsBetaMessageParam[] = [];
+ receivedMessages: ToolsBetaMessage[] = [];
+ #currentMessageSnapshot: ToolsBetaMessage | undefined;
+
+ controller: AbortController = new AbortController();
+
+ #connectedPromise: Promise;
+ #resolveConnectedPromise: () => void = () => {};
+ #rejectConnectedPromise: (error: AnthropicError) => void = () => {};
+
+ #endPromise: Promise;
+ #resolveEndPromise: () => void = () => {};
+ #rejectEndPromise: (error: AnthropicError) => void = () => {};
+
+ #listeners: { [Event in keyof MessageStreamEvents]?: MessageStreamEventListeners } = {};
+
+ #ended = false;
+ #errored = false;
+ #aborted = false;
+ #catchingPromiseCreated = false;
+
+ constructor() {
+ this.#connectedPromise = new Promise((resolve, reject) => {
+ this.#resolveConnectedPromise = resolve;
+ this.#rejectConnectedPromise = reject;
+ });
+
+ this.#endPromise = new Promise((resolve, reject) => {
+ this.#resolveEndPromise = resolve;
+ this.#rejectEndPromise = reject;
+ });
+
+ // Don't let these promises cause unhandled rejection errors.
+ // we will manually cause an unhandled rejection error later
+ // if the user hasn't registered any error listener or called
+ // any promise-returning method.
+ this.#connectedPromise.catch(() => {});
+ this.#endPromise.catch(() => {});
+ }
+
+ /**
+ * Intended for use on the frontend, consuming a stream produced with
+ * `.toReadableStream()` on the backend.
+ *
+ * Note that messages sent to the model do not appear in `.on('message')`
+ * in this context.
+ */
+ static fromReadableStream(stream: ReadableStream): ToolsBetaMessageStream {
+ const runner = new ToolsBetaMessageStream();
+ runner._run(() => runner._fromReadableStream(stream));
+ return runner;
+ }
+
+ static createMessage(
+ messages: Messages,
+ params: MessageCreateParamsBase,
+ options?: Core.RequestOptions,
+ ): ToolsBetaMessageStream {
+ const runner = new ToolsBetaMessageStream();
+ for (const message of params.messages) {
+ runner._addMessageParam(message);
+ }
+ runner._run(() =>
+ runner._createMessage(
+ messages,
+ { ...params, stream: true },
+ { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } },
+ ),
+ );
+ return runner;
+ }
+
+ protected _run(executor: () => Promise) {
+ executor().then(() => {
+ this._emitFinal();
+ this._emit('end');
+ }, this.#handleError);
+ }
+
+ protected _addMessageParam(message: ToolsBetaMessageParam) {
+ this.messages.push(message);
+ }
+
+ protected _addMessage(message: ToolsBetaMessage, emit = true) {
+ this.receivedMessages.push(message);
+ if (emit) {
+ this._emit('message', message);
+ }
+ }
+
+ protected async _createMessage(
+ messages: Messages,
+ params: MessageCreateParams,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+ this.#beginRequest();
+ const stream = await messages.create(
+ { ...params, stream: true },
+ { ...options, signal: this.controller.signal },
+ );
+ this._connected();
+ for await (const event of stream) {
+ this.#addStreamEvent(event);
+ }
+ if (stream.controller.signal?.aborted) {
+ throw new APIUserAbortError();
+ }
+ this.#endRequest();
+ }
+
+ protected _connected() {
+ if (this.ended) return;
+ this.#resolveConnectedPromise();
+ this._emit('connect');
+ }
+
+ get ended(): boolean {
+ return this.#ended;
+ }
+
+ get errored(): boolean {
+ return this.#errored;
+ }
+
+ get aborted(): boolean {
+ return this.#aborted;
+ }
+
+ abort() {
+ this.controller.abort();
+ }
+
+ /**
+ * Adds the listener function to the end of the listeners array for the event.
+ * No checks are made to see if the listener has already been added. Multiple calls passing
+ * the same combination of event and listener will result in the listener being added, and
+ * called, multiple times.
+ * @returns this MessageStream, so that calls can be chained
+ */
+ on(event: Event, listener: MessageStreamEvents[Event]): this {
+ const listeners: MessageStreamEventListeners =
+ this.#listeners[event] || (this.#listeners[event] = []);
+ listeners.push({ listener });
+ return this;
+ }
+
+ /**
+ * Removes the specified listener from the listener array for the event.
+ * off() will remove, at most, one instance of a listener from the listener array. If any single
+ * listener has been added multiple times to the listener array for the specified event, then
+ * off() must be called multiple times to remove each instance.
+ * @returns this MessageStream, so that calls can be chained
+ */
+ off(event: Event, listener: MessageStreamEvents[Event]): this {
+ const listeners = this.#listeners[event];
+ if (!listeners) return this;
+ const index = listeners.findIndex((l) => l.listener === listener);
+ if (index >= 0) listeners.splice(index, 1);
+ return this;
+ }
+
+ /**
+ * Adds a one-time listener function for the event. The next time the event is triggered,
+ * this listener is removed and then invoked.
+ * @returns this MessageStream, so that calls can be chained
+ */
+ once(event: Event, listener: MessageStreamEvents[Event]): this {
+ const listeners: MessageStreamEventListeners =
+ this.#listeners[event] || (this.#listeners[event] = []);
+ listeners.push({ listener, once: true });
+ return this;
+ }
+
+ /**
+ * This is similar to `.once()`, but returns a Promise that resolves the next time
+ * the event is triggered, instead of calling a listener callback.
+ * @returns a Promise that resolves the next time given event is triggered,
+ * or rejects if an error is emitted. (If you request the 'error' event,
+ * returns a promise that resolves with the error).
+ *
+ * Example:
+ *
+ * const message = await stream.emitted('message') // rejects if the stream errors
+ */
+ emitted(
+ event: Event,
+ ): Promise<
+ Parameters extends [infer Param] ? Param
+ : Parameters extends [] ? void
+ : Parameters
+ > {
+ return new Promise((resolve, reject) => {
+ this.#catchingPromiseCreated = true;
+ if (event !== 'error') this.once('error', reject);
+ this.once(event, resolve as any);
+ });
+ }
+
+ async done(): Promise {
+ this.#catchingPromiseCreated = true;
+ await this.#endPromise;
+ }
+
+ get currentMessage(): ToolsBetaMessage | undefined {
+ return this.#currentMessageSnapshot;
+ }
+
+ #getFinalMessage(): ToolsBetaMessage {
+ if (this.receivedMessages.length === 0) {
+ throw new AnthropicError('stream ended without producing a Message with role=assistant');
+ }
+ return this.receivedMessages.at(-1)!;
+ }
+
+ /**
+ * @returns a promise that resolves with the the final assistant Message response,
+ * or rejects if an error occurred or the stream ended prematurely without producing a Message.
+ */
+ async finalMessage(): Promise {
+ await this.done();
+ return this.#getFinalMessage();
+ }
+
+ #getFinalText(): string {
+ if (this.receivedMessages.length === 0) {
+ throw new AnthropicError('stream ended without producing a Message with role=assistant');
+ }
+ const textBlocks = this.receivedMessages
+ .at(-1)!
+ .content.filter((block): block is TextBlock => block.type === 'text')
+ .map((block) => block.text);
+ if (textBlocks.length === 0) {
+ throw new AnthropicError('stream ended without producing a content block with type=text');
+ }
+ return textBlocks.join(' ');
+ }
+
+ /**
+ * @returns a promise that resolves with the the final assistant Message's text response, concatenated
+ * together if there are more than one text blocks.
+ * Rejects if an error occurred or the stream ended prematurely without producing a Message.
+ */
+ async finalText(): Promise {
+ await this.done();
+ return this.#getFinalText();
+ }
+
+ #handleError = (error: unknown) => {
+ this.#errored = true;
+ if (error instanceof Error && error.name === 'AbortError') {
+ error = new APIUserAbortError();
+ }
+ if (error instanceof APIUserAbortError) {
+ this.#aborted = true;
+ return this._emit('abort', error);
+ }
+ if (error instanceof AnthropicError) {
+ return this._emit('error', error);
+ }
+ if (error instanceof Error) {
+ const anthropicError: AnthropicError = new AnthropicError(error.message);
+ // @ts-ignore
+ anthropicError.cause = error;
+ return this._emit('error', anthropicError);
+ }
+ return this._emit('error', new AnthropicError(String(error)));
+ };
+
+ protected _emit(
+ event: Event,
+ ...args: Parameters
+ ) {
+ // make sure we don't emit any MessageStreamEvents after end
+ if (this.#ended) return;
+
+ if (event === 'end') {
+ this.#ended = true;
+ this.#resolveEndPromise();
+ }
+
+ const listeners: MessageStreamEventListeners | undefined = this.#listeners[event];
+ if (listeners) {
+ this.#listeners[event] = listeners.filter((l) => !l.once) as any;
+ listeners.forEach(({ listener }: any) => listener(...args));
+ }
+
+ if (event === 'abort') {
+ const error = args[0] as APIUserAbortError;
+ if (!this.#catchingPromiseCreated && !listeners?.length) {
+ Promise.reject(error);
+ }
+ this.#rejectConnectedPromise(error);
+ this.#rejectEndPromise(error);
+ this._emit('end');
+ return;
+ }
+
+ if (event === 'error') {
+ // NOTE: _emit('error', error) should only be called from #handleError().
+
+ const error = args[0] as AnthropicError;
+ if (!this.#catchingPromiseCreated && !listeners?.length) {
+ // Trigger an unhandled rejection if the user hasn't registered any error handlers.
+ // If you are seeing stack traces here, make sure to handle errors via either:
+ // - runner.on('error', () => ...)
+ // - await runner.done()
+ // - await runner.final...()
+ // - etc.
+ Promise.reject(error);
+ }
+ this.#rejectConnectedPromise(error);
+ this.#rejectEndPromise(error);
+ this._emit('end');
+ }
+ }
+
+ protected _emitFinal() {
+ const finalMessage = this.receivedMessages.at(-1);
+ if (finalMessage) {
+ this._emit('finalMessage', this.#getFinalMessage());
+ }
+ }
+
+ #beginRequest() {
+ if (this.ended) return;
+ this.#currentMessageSnapshot = undefined;
+ }
+ #addStreamEvent(event: ToolsBetaMessageStreamEvent) {
+ if (this.ended) return;
+ const messageSnapshot = this.#accumulateMessage(event);
+ this._emit('streamEvent', event, messageSnapshot);
+
+ switch (event.type) {
+ case 'content_block_delta': {
+ const content = messageSnapshot.content.at(-1)!;
+ if (event.delta.type === 'text_delta' && content.type === 'text') {
+ this._emit('text', event.delta.text, content.text || '');
+ } else if (event.delta.type === 'input_json_delta' && content.type === 'tool_use') {
+ if (content.input) {
+ this._emit('inputJson', event.delta.partial_json, content.input);
+ }
+ }
+ break;
+ }
+ case 'message_stop': {
+ this._addMessageParam(messageSnapshot);
+ this._addMessage(messageSnapshot, true);
+ break;
+ }
+ case 'content_block_stop': {
+ this._emit('contentBlock', messageSnapshot.content.at(-1)!);
+ break;
+ }
+ case 'message_start': {
+ this.#currentMessageSnapshot = messageSnapshot;
+ break;
+ }
+ case 'content_block_start':
+ case 'message_delta':
+ break;
+ }
+ }
+ #endRequest(): ToolsBetaMessage {
+ if (this.ended) {
+ throw new AnthropicError(`stream has ended, this shouldn't happen`);
+ }
+ const snapshot = this.#currentMessageSnapshot;
+ if (!snapshot) {
+ throw new AnthropicError(`request ended without sending any chunks`);
+ }
+ this.#currentMessageSnapshot = undefined;
+ return snapshot;
+ }
+
+ protected async _fromReadableStream(
+ readableStream: ReadableStream,
+ options?: Core.RequestOptions,
+ ): Promise {
+ const signal = options?.signal;
+ if (signal) {
+ if (signal.aborted) this.controller.abort();
+ signal.addEventListener('abort', () => this.controller.abort());
+ }
+ this.#beginRequest();
+ this._connected();
+ const stream = Stream.fromReadableStream(readableStream, this.controller);
+ for await (const event of stream) {
+ this.#addStreamEvent(event);
+ }
+ if (stream.controller.signal?.aborted) {
+ throw new APIUserAbortError();
+ }
+ this.#endRequest();
+ }
+
+ /**
+ * Mutates this.#currentMessage with the current event. Handling the accumulation of multiple messages
+ * will be needed to be handled by the caller, this method will throw if you try to accumulate for multiple
+ * messages.
+ */
+ #accumulateMessage(event: ToolsBetaMessageStreamEvent): ToolsBetaMessage {
+ let snapshot = this.#currentMessageSnapshot;
+
+ if (event.type === 'message_start') {
+ if (snapshot) {
+ throw new AnthropicError(`Unexpected event order, got ${event.type} before receiving "message_stop"`);
+ }
+ return event.message;
+ }
+
+ if (!snapshot) {
+ throw new AnthropicError(`Unexpected event order, got ${event.type} before "message_start"`);
+ }
+
+ switch (event.type) {
+ case 'message_stop':
+ return snapshot;
+ case 'message_delta':
+ snapshot.stop_reason = event.delta.stop_reason;
+ snapshot.stop_sequence = event.delta.stop_sequence;
+ snapshot.usage.output_tokens = event.usage.output_tokens;
+ return snapshot;
+ case 'content_block_start':
+ snapshot.content.push(event.content_block);
+ return snapshot;
+ case 'content_block_delta': {
+ const snapshotContent = snapshot.content.at(event.index);
+ if (snapshotContent?.type === 'text' && event.delta.type === 'text_delta') {
+ snapshotContent.text += event.delta.text;
+ } else if (snapshotContent?.type === 'tool_use' && event.delta.type === 'input_json_delta') {
+ // we need to keep track of the raw JSON string as well so that we can
+ // re-parse it for each delta, for now we just store it as an untyped
+ // non-enumerable property on the snapshot
+ let jsonBuf = (snapshotContent as any)[JSON_BUF_PROPERTY] || '';
+ jsonBuf += event.delta.partial_json;
+
+ Object.defineProperty(snapshotContent, JSON_BUF_PROPERTY, {
+ value: jsonBuf,
+ enumerable: false,
+ writable: true,
+ });
+
+ if (jsonBuf) {
+ snapshotContent.input = partialParse(jsonBuf);
+ }
+ }
+ return snapshot;
+ }
+ case 'content_block_stop':
+ return snapshot;
+ }
+ }
+
+ [Symbol.asyncIterator](): AsyncIterator {
+ const pushQueue: ToolsBetaMessageStreamEvent[] = [];
+ const readQueue: {
+ resolve: (chunk: ToolsBetaMessageStreamEvent | undefined) => void;
+ reject: (error: unknown) => void;
+ }[] = [];
+ let done = false;
+
+ this.on('streamEvent', (event) => {
+ const reader = readQueue.shift();
+ if (reader) {
+ reader.resolve(event);
+ } else {
+ pushQueue.push(event);
+ }
+ });
+
+ this.on('end', () => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.resolve(undefined);
+ }
+ readQueue.length = 0;
+ });
+
+ this.on('abort', (err) => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.reject(err);
+ }
+ readQueue.length = 0;
+ });
+
+ this.on('error', (err) => {
+ done = true;
+ for (const reader of readQueue) {
+ reader.reject(err);
+ }
+ readQueue.length = 0;
+ });
+
+ return {
+ next: async (): Promise> => {
+ if (!pushQueue.length) {
+ if (done) {
+ return { value: undefined, done: true };
+ }
+ return new Promise((resolve, reject) =>
+ readQueue.push({ resolve, reject }),
+ ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }));
+ }
+ const chunk = pushQueue.shift()!;
+ return { value: chunk, done: false };
+ },
+ return: async () => {
+ this.abort();
+ return { value: undefined, done: true };
+ },
+ };
+ }
+
+ toReadableStream(): ReadableStream {
+ const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller);
+ return stream.toReadableStream();
+ }
+}
diff --git a/src/resources/beta/tools/index.ts b/src/resources/beta/tools/index.ts
index 9e77fc23..fb831369 100644
--- a/src/resources/beta/tools/index.ts
+++ b/src/resources/beta/tools/index.ts
@@ -1,16 +1,21 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
export {
+ InputJsonDelta,
Tool,
ToolResultBlockParam,
ToolUseBlock,
ToolUseBlockParam,
ToolsBetaContentBlock,
+ ToolsBetaContentBlockDeltaEvent,
+ ToolsBetaContentBlockStartEvent,
ToolsBetaMessage,
ToolsBetaMessageParam,
+ ToolsBetaMessageStreamEvent,
MessageCreateParams,
MessageCreateParamsNonStreaming,
MessageCreateParamsStreaming,
+ MessageStreamParams,
Messages,
} from './messages';
export { Tools } from './tools';
diff --git a/src/resources/beta/tools/messages.ts b/src/resources/beta/tools/messages.ts
index 80856085..9c16043a 100644
--- a/src/resources/beta/tools/messages.ts
+++ b/src/resources/beta/tools/messages.ts
@@ -3,6 +3,8 @@
import * as Core from '@anthropic-ai/sdk/core';
import { APIPromise } from '@anthropic-ai/sdk/core';
import { APIResource } from '@anthropic-ai/sdk/resource';
+import { ToolsBetaMessageStream } from '@anthropic-ai/sdk/lib/ToolsBetaMessageStream';
+export { ToolsBetaMessageStream } from '@anthropic-ai/sdk/lib/ToolsBetaMessageStream';
import * as ToolsMessagesAPI from '@anthropic-ai/sdk/resources/beta/tools/messages';
import * as MessagesAPI from '@anthropic-ai/sdk/resources/messages';
import { Stream } from '@anthropic-ai/sdk/streaming';
@@ -21,23 +23,36 @@ export class Messages extends APIResource {
create(
body: MessageCreateParamsStreaming,
options?: Core.RequestOptions,
- ): APIPromise>;
+ ): APIPromise>;
create(
body: MessageCreateParamsBase,
options?: Core.RequestOptions,
- ): APIPromise | ToolsBetaMessage>;
+ ): APIPromise | ToolsBetaMessage>;
create(
body: MessageCreateParams,
options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
+ ): APIPromise | APIPromise> {
return this._client.post('/v1/messages?beta=tools', {
body,
timeout: 600000,
...options,
- headers: { 'anthropic-beta': 'tools-2024-04-04', ...options?.headers },
+ headers: { 'anthropic-beta': 'tools-2024-05-16', ...options?.headers },
stream: body.stream ?? false,
- }) as APIPromise | APIPromise>;
+ }) as APIPromise | APIPromise>;
}
+
+ /**
+ * Create a Message stream
+ */
+ stream(body: MessageStreamParams, options?: Core.RequestOptions): ToolsBetaMessageStream {
+ return ToolsBetaMessageStream.createMessage(this, body, options);
+ }
+}
+
+export interface InputJsonDelta {
+ partial_json: string;
+
+ type: 'input_json_delta';
}
export interface Tool {
@@ -82,7 +97,7 @@ export interface ToolResultBlockParam {
type: 'tool_result';
- content?: Array;
+ content?: Array;
is_error?: boolean;
}
@@ -109,6 +124,22 @@ export interface ToolUseBlockParam {
export type ToolsBetaContentBlock = MessagesAPI.TextBlock | ToolUseBlock;
+export interface ToolsBetaContentBlockDeltaEvent {
+ delta: MessagesAPI.TextDelta | InputJsonDelta;
+
+ index: number;
+
+ type: 'content_block_delta';
+}
+
+export interface ToolsBetaContentBlockStartEvent {
+ content_block: MessagesAPI.TextBlock | ToolUseBlock;
+
+ index: number;
+
+ type: 'content_block_start';
+}
+
export interface ToolsBetaMessage {
/**
* Unique object identifier.
@@ -222,6 +253,14 @@ export interface ToolsBetaMessageParam {
role: 'user' | 'assistant';
}
+export type ToolsBetaMessageStreamEvent =
+ | MessagesAPI.MessageStartEvent
+ | MessagesAPI.MessageDeltaEvent
+ | MessagesAPI.MessageStopEvent
+ | ToolsBetaContentBlockStartEvent
+ | ToolsBetaContentBlockDeltaEvent
+ | MessagesAPI.ContentBlockStopEvent;
+
export type MessageCreateParams = MessageCreateParamsNonStreaming | MessageCreateParamsStreaming;
export interface MessageCreateParamsBase {
@@ -232,7 +271,7 @@ export interface MessageCreateParamsBase {
* only specifies the absolute maximum number of tokens to generate.
*
* Different models have different maximum values for this parameter. See
- * [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ * [models](https://docs.anthropic.com/en/docs/models-overview) for details.
*/
max_tokens: number;
@@ -315,12 +354,12 @@ export interface MessageCreateParamsBase {
* We currently support the `base64` source type for images, and the `image/jpeg`,
* `image/png`, `image/gif`, and `image/webp` media types.
*
- * See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- * for more input examples.
+ * See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ * input examples.
*
* Note that if you want to include a
- * [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- * use the top-level `system` parameter — there is no `"system"` role for input
+ * [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ * the top-level `system` parameter — there is no `"system"` role for input
* messages in the Messages API.
*/
messages: Array;
@@ -328,8 +367,8 @@ export interface MessageCreateParamsBase {
/**
* The model that will complete your prompt.
*
- * See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- * additional details and options.
+ * See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ * details and options.
*/
model: string;
@@ -354,8 +393,8 @@ export interface MessageCreateParamsBase {
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ * details.
*/
stream?: boolean;
@@ -364,7 +403,7 @@ export interface MessageCreateParamsBase {
*
* A system prompt is a way of providing context and instructions to Claude, such
* as specifying a particular goal or role. See our
- * [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ * [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
*/
system?: string;
@@ -380,6 +419,15 @@ export interface MessageCreateParamsBase {
*/
temperature?: number;
+ /**
+ * How the model should use the provided tools. The model can use a specific tool,
+ * any available tool, or decide by itself.
+ */
+ tool_choice?:
+ | MessageCreateParams.ToolChoiceAuto
+ | MessageCreateParams.ToolChoiceAny
+ | MessageCreateParams.ToolChoiceTool;
+
/**
* [beta] Definitions of tools that the model may use.
*
@@ -448,7 +496,7 @@ export interface MessageCreateParamsBase {
* functions, or more generally whenever you want the model to produce a particular
* JSON structure of output.
*
- * See our [beta guide](https://docs.anthropic.com/claude/docs/tool-use) for more
+ * See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
* details.
*/
tools?: Array;
@@ -493,6 +541,32 @@ export namespace MessageCreateParams {
user_id?: string | null;
}
+ /**
+ * The model will automatically decide whether to use tools.
+ */
+ export interface ToolChoiceAuto {
+ type: 'auto';
+ }
+
+ /**
+ * The model will use any available tools.
+ */
+ export interface ToolChoiceAny {
+ type: 'any';
+ }
+
+ /**
+ * The model will use the specified tool with `tool_choice.name`.
+ */
+ export interface ToolChoiceTool {
+ /**
+ * The name of the tool to use.
+ */
+ name: string;
+
+ type: 'tool';
+ }
+
export type MessageCreateParamsNonStreaming = ToolsMessagesAPI.MessageCreateParamsNonStreaming;
export type MessageCreateParamsStreaming = ToolsMessagesAPI.MessageCreateParamsStreaming;
}
@@ -501,8 +575,8 @@ export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ * details.
*/
stream?: false;
}
@@ -511,21 +585,323 @@ export interface MessageCreateParamsStreaming extends MessageCreateParamsBase {
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ * details.
*/
stream: true;
}
+export interface MessageStreamParams {
+ /**
+ * The maximum number of tokens to generate before stopping.
+ *
+ * Note that our models may stop _before_ reaching this maximum. This parameter
+ * only specifies the absolute maximum number of tokens to generate.
+ *
+ * Different models have different maximum values for this parameter. See
+ * [models](https://docs.anthropic.com/en/docs/models-overview) for details.
+ */
+ max_tokens: number;
+
+ /**
+ * Input messages.
+ *
+ * Our models are trained to operate on alternating `user` and `assistant`
+ * conversational turns. When creating a new `Message`, you specify the prior
+ * conversational turns with the `messages` parameter, and the model then generates
+ * the next `Message` in the conversation.
+ *
+ * Each input message must be an object with a `role` and `content`. You can
+ * specify a single `user`-role message, or you can include multiple `user` and
+ * `assistant` messages. The first message must always use the `user` role.
+ *
+ * If the final message uses the `assistant` role, the response content will
+ * continue immediately from the content in that message. This can be used to
+ * constrain part of the model's response.
+ *
+ * Example with a single `user` message:
+ *
+ * ```json
+ * [{ "role": "user", "content": "Hello, Claude" }]
+ * ```
+ *
+ * Example with multiple conversational turns:
+ *
+ * ```json
+ * [
+ * { "role": "user", "content": "Hello there." },
+ * { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
+ * { "role": "user", "content": "Can you explain LLMs in plain English?" }
+ * ]
+ * ```
+ *
+ * Example with a partially-filled response from Claude:
+ *
+ * ```json
+ * [
+ * {
+ * "role": "user",
+ * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
+ * },
+ * { "role": "assistant", "content": "The best answer is (" }
+ * ]
+ * ```
+ *
+ * Each input message `content` may be either a single `string` or an array of
+ * content blocks, where each block has a specific `type`. Using a `string` for
+ * `content` is shorthand for an array of one content block of type `"text"`. The
+ * following input messages are equivalent:
+ *
+ * ```json
+ * { "role": "user", "content": "Hello, Claude" }
+ * ```
+ *
+ * ```json
+ * { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
+ * ```
+ *
+ * Starting with Claude 3 models, you can also send image content blocks:
+ *
+ * ```json
+ * {
+ * "role": "user",
+ * "content": [
+ * {
+ * "type": "image",
+ * "source": {
+ * "type": "base64",
+ * "media_type": "image/jpeg",
+ * "data": "/9j/4AAQSkZJRg..."
+ * }
+ * },
+ * { "type": "text", "text": "What is in this image?" }
+ * ]
+ * }
+ * ```
+ *
+ * We currently support the `base64` source type for images, and the `image/jpeg`,
+ * `image/png`, `image/gif`, and `image/webp` media types.
+ *
+ * See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ * input examples.
+ *
+ * Note that if you want to include a
+ * [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ * the top-level `system` parameter — there is no `"system"` role for input
+ * messages in the Messages API.
+ */
+ messages: Array;
+
+ /**
+ * The model that will complete your prompt.
+ *
+ * See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ * details and options.
+ */
+ model: string;
+
+ /**
+ * An object describing metadata about the request.
+ */
+ metadata?: MessageStreamParams.Metadata;
+
+ /**
+ * Custom text sequences that will cause the model to stop generating.
+ *
+ * Our models will normally stop when they have naturally completed their turn,
+ * which will result in a response `stop_reason` of `"end_turn"`.
+ *
+ * If you want the model to stop generating when it encounters custom strings of
+ * text, you can use the `stop_sequences` parameter. If the model encounters one of
+ * the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
+ * and the response `stop_sequence` value will contain the matched stop sequence.
+ */
+ stop_sequences?: Array;
+
+ /**
+ * System prompt.
+ *
+ * A system prompt is a way of providing context and instructions to Claude, such
+ * as specifying a particular goal or role. See our
+ * [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
+ */
+ system?: string;
+
+ /**
+ * Amount of randomness injected into the response.
+ *
+ * Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
+ * for analytical / multiple choice, and closer to `1.0` for creative and
+ * generative tasks.
+ *
+ * Note that even with `temperature` of `0.0`, the results will not be fully
+ * deterministic.
+ */
+ temperature?: number;
+
+ /**
+ * How the model should use the provided tools. The model can use a specific tool,
+ * any available tool, or decide by itself.
+ */
+ tool_choice?:
+ | MessageStreamParams.ToolChoiceAuto
+ | MessageStreamParams.ToolChoiceAny
+ | MessageStreamParams.ToolChoiceTool;
+
+ /**
+ * [beta] Definitions of tools that the model may use.
+ *
+ * If you include `tools` in your API request, the model may return `tool_use`
+ * content blocks that represent the model's use of those tools. You can then run
+ * those tools using the tool input generated by the model and then optionally
+ * return results back to the model using `tool_result` content blocks.
+ *
+ * Each tool definition includes:
+ *
+ * - `name`: Name of the tool.
+ * - `description`: Optional, but strongly-recommended description of the tool.
+ * - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ * shape that the model will produce in `tool_use` output content blocks.
+ *
+ * For example, if you defined `tools` as:
+ *
+ * ```json
+ * [
+ * {
+ * "name": "get_stock_price",
+ * "description": "Get the current stock price for a given ticker symbol.",
+ * "input_schema": {
+ * "type": "object",
+ * "properties": {
+ * "ticker": {
+ * "type": "string",
+ * "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ * }
+ * },
+ * "required": ["ticker"]
+ * }
+ * }
+ * ]
+ * ```
+ *
+ * And then asked the model "What's the S&P 500 at today?", the model might produce
+ * `tool_use` content blocks in the response like this:
+ *
+ * ```json
+ * [
+ * {
+ * "type": "tool_use",
+ * "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ * "name": "get_stock_price",
+ * "input": { "ticker": "^GSPC" }
+ * }
+ * ]
+ * ```
+ *
+ * You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ * input, and return the following back to the model in a subsequent `user`
+ * message:
+ *
+ * ```json
+ * [
+ * {
+ * "type": "tool_result",
+ * "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ * "content": "259.75 USD"
+ * }
+ * ]
+ * ```
+ *
+ * Tools can be used for workflows that include running client-side tools and
+ * functions, or more generally whenever you want the model to produce a particular
+ * JSON structure of output.
+ *
+ * See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
+ * details.
+ */
+ tools?: Array;
+
+ /**
+ * Only sample from the top K options for each subsequent token.
+ *
+ * Used to remove "long tail" low probability responses.
+ * [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
+ */
+ top_k?: number;
+
+ /**
+ * Use nucleus sampling.
+ *
+ * In nucleus sampling, we compute the cumulative distribution over all the options
+ * for each subsequent token in decreasing probability order and cut it off once it
+ * reaches a particular probability specified by `top_p`. You should either alter
+ * `temperature` or `top_p`, but not both.
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
+ */
+ top_p?: number;
+}
+
+export namespace MessageStreamParams {
+ /**
+ * An object describing metadata about the request.
+ */
+ export interface Metadata {
+ /**
+ * An external identifier for the user who is associated with the request.
+ *
+ * This should be a uuid, hash value, or other opaque identifier. Anthropic may use
+ * this id to help detect abuse. Do not include any identifying information such as
+ * name, email address, or phone number.
+ */
+ user_id?: string | null;
+ }
+
+ /**
+ * The model will automatically decide whether to use tools.
+ */
+ export interface ToolChoiceAuto {
+ type: 'auto';
+ }
+
+ /**
+ * The model will use any available tools.
+ */
+ export interface ToolChoiceAny {
+ type: 'any';
+ }
+
+ /**
+ * The model will use the specified tool with `tool_choice.name`.
+ */
+ export interface ToolChoiceTool {
+ /**
+ * The name of the tool to use.
+ */
+ name: string;
+
+ type: 'tool';
+ }
+}
+
export namespace Messages {
+ export import InputJsonDelta = ToolsMessagesAPI.InputJsonDelta;
export import Tool = ToolsMessagesAPI.Tool;
export import ToolResultBlockParam = ToolsMessagesAPI.ToolResultBlockParam;
export import ToolUseBlock = ToolsMessagesAPI.ToolUseBlock;
export import ToolUseBlockParam = ToolsMessagesAPI.ToolUseBlockParam;
export import ToolsBetaContentBlock = ToolsMessagesAPI.ToolsBetaContentBlock;
+ export import ToolsBetaContentBlockDeltaEvent = ToolsMessagesAPI.ToolsBetaContentBlockDeltaEvent;
+ export import ToolsBetaContentBlockStartEvent = ToolsMessagesAPI.ToolsBetaContentBlockStartEvent;
export import ToolsBetaMessage = ToolsMessagesAPI.ToolsBetaMessage;
export import ToolsBetaMessageParam = ToolsMessagesAPI.ToolsBetaMessageParam;
+ export import ToolsBetaMessageStreamEvent = ToolsMessagesAPI.ToolsBetaMessageStreamEvent;
export import MessageCreateParams = ToolsMessagesAPI.MessageCreateParams;
export import MessageCreateParamsNonStreaming = ToolsMessagesAPI.MessageCreateParamsNonStreaming;
export import MessageCreateParamsStreaming = ToolsMessagesAPI.MessageCreateParamsStreaming;
+ export import MessageStreamParams = ToolsMessagesAPI.MessageStreamParams;
}
diff --git a/src/resources/beta/tools/tools.ts b/src/resources/beta/tools/tools.ts
index d6be65db..f3bd8009 100644
--- a/src/resources/beta/tools/tools.ts
+++ b/src/resources/beta/tools/tools.ts
@@ -9,14 +9,19 @@ export class Tools extends APIResource {
export namespace Tools {
export import Messages = MessagesAPI.Messages;
+ export import InputJsonDelta = MessagesAPI.InputJsonDelta;
export import Tool = MessagesAPI.Tool;
export import ToolResultBlockParam = MessagesAPI.ToolResultBlockParam;
export import ToolUseBlock = MessagesAPI.ToolUseBlock;
export import ToolUseBlockParam = MessagesAPI.ToolUseBlockParam;
export import ToolsBetaContentBlock = MessagesAPI.ToolsBetaContentBlock;
+ export import ToolsBetaContentBlockDeltaEvent = MessagesAPI.ToolsBetaContentBlockDeltaEvent;
+ export import ToolsBetaContentBlockStartEvent = MessagesAPI.ToolsBetaContentBlockStartEvent;
export import ToolsBetaMessage = MessagesAPI.ToolsBetaMessage;
export import ToolsBetaMessageParam = MessagesAPI.ToolsBetaMessageParam;
+ export import ToolsBetaMessageStreamEvent = MessagesAPI.ToolsBetaMessageStreamEvent;
export import MessageCreateParams = MessagesAPI.MessageCreateParams;
export import MessageCreateParamsNonStreaming = MessagesAPI.MessageCreateParamsNonStreaming;
export import MessageCreateParamsStreaming = MessagesAPI.MessageCreateParamsStreaming;
+ export import MessageStreamParams = MessagesAPI.MessageStreamParams;
}
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 818a3cad..d8e810e2 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -11,11 +11,10 @@ export class Completions extends APIResource {
* [Legacy] Create a Text Completion.
*
* The Text Completions API is a legacy API. We recommend using the
- * [Messages API](https://docs.anthropic.com/claude/reference/messages_post) going
- * forward.
+ * [Messages API](https://docs.anthropic.com/en/api/messages) going forward.
*
* Future models and features will not be compatible with Text Completions. See our
- * [migration guide](https://docs.anthropic.com/claude/reference/migrating-from-text-completions-to-messages)
+ * [migration guide](https://docs.anthropic.com/en/api/migrating-from-text-completions-to-messages)
* for guidance in migrating from Text Completions to Messages.
*/
create(body: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
@@ -91,8 +90,8 @@ export interface CompletionCreateParamsBase {
/**
* The model that will complete your prompt.
*
- * See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- * additional details and options.
+ * See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ * details and options.
*/
model: (string & {}) | 'claude-2.0' | 'claude-2.1' | 'claude-instant-1.2';
@@ -106,11 +105,10 @@ export interface CompletionCreateParamsBase {
* "\n\nHuman: {userQuestion}\n\nAssistant:"
* ```
*
- * See
- * [prompt validation](https://anthropic.readme.io/claude/reference/prompt-validation)
- * and our guide to
- * [prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- * for more details.
+ * See [prompt validation](https://docs.anthropic.com/en/api/prompt-validation) and
+ * our guide to
+ * [prompt design](https://docs.anthropic.com/en/docs/intro-to-prompting) for more
+ * details.
*/
prompt: string;
@@ -131,9 +129,7 @@ export interface CompletionCreateParamsBase {
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See
- * [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
*/
stream?: boolean;
@@ -197,9 +193,7 @@ export interface CompletionCreateParamsNonStreaming extends CompletionCreatePara
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See
- * [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
*/
stream?: false;
}
@@ -208,9 +202,7 @@ export interface CompletionCreateParamsStreaming extends CompletionCreateParamsB
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See
- * [streaming](https://docs.anthropic.com/claude/reference/text-completions-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/streaming) for details.
*/
stream: true;
}
diff --git a/src/resources/messages.ts b/src/resources/messages.ts
index 1156ae83..bdfad260 100644
--- a/src/resources/messages.ts
+++ b/src/resources/messages.ts
@@ -290,7 +290,7 @@ export interface MessageCreateParamsBase {
* only specifies the absolute maximum number of tokens to generate.
*
* Different models have different maximum values for this parameter. See
- * [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ * [models](https://docs.anthropic.com/en/docs/models-overview) for details.
*/
max_tokens: number;
@@ -373,12 +373,12 @@ export interface MessageCreateParamsBase {
* We currently support the `base64` source type for images, and the `image/jpeg`,
* `image/png`, `image/gif`, and `image/webp` media types.
*
- * See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- * for more input examples.
+ * See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ * input examples.
*
* Note that if you want to include a
- * [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- * use the top-level `system` parameter — there is no `"system"` role for input
+ * [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ * the top-level `system` parameter — there is no `"system"` role for input
* messages in the Messages API.
*/
messages: Array;
@@ -386,8 +386,8 @@ export interface MessageCreateParamsBase {
/**
* The model that will complete your prompt.
*
- * See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- * additional details and options.
+ * See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ * details and options.
*/
model:
| (string & {})
@@ -419,8 +419,8 @@ export interface MessageCreateParamsBase {
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ * details.
*/
stream?: boolean;
@@ -429,7 +429,7 @@ export interface MessageCreateParamsBase {
*
* A system prompt is a way of providing context and instructions to Claude, such
* as specifying a particular goal or role. See our
- * [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ * [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
*/
system?: string;
@@ -493,8 +493,8 @@ export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ * details.
*/
stream?: false;
}
@@ -503,8 +503,8 @@ export interface MessageCreateParamsStreaming extends MessageCreateParamsBase {
/**
* Whether to incrementally stream the response using server-sent events.
*
- * See [streaming](https://docs.anthropic.com/claude/reference/messages-streaming)
- * for details.
+ * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
+ * details.
*/
stream: true;
}
@@ -517,7 +517,7 @@ export interface MessageStreamParams {
* only specifies the absolute maximum number of tokens to generate.
*
* Different models have different maximum values for this parameter. See
- * [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
+ * [models](https://docs.anthropic.com/en/docs/models-overview) for details.
*/
max_tokens: number;
@@ -600,12 +600,12 @@ export interface MessageStreamParams {
* We currently support the `base64` source type for images, and the `image/jpeg`,
* `image/png`, `image/gif`, and `image/webp` media types.
*
- * See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
- * for more input examples.
+ * See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
+ * input examples.
*
* Note that if you want to include a
- * [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
- * use the top-level `system` parameter — there is no `"system"` role for input
+ * [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ * the top-level `system` parameter — there is no `"system"` role for input
* messages in the Messages API.
*/
messages: Array;
@@ -613,8 +613,8 @@ export interface MessageStreamParams {
/**
* The model that will complete your prompt.
*
- * See [models](https://docs.anthropic.com/claude/docs/models-overview) for
- * additional details and options.
+ * See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ * details and options.
*/
model:
| (string & {})
@@ -648,7 +648,7 @@ export interface MessageStreamParams {
*
* A system prompt is a way of providing context and instructions to Claude, such
* as specifying a particular goal or role. See our
- * [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
+ * [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
*/
system?: string;
diff --git a/tests/api-resources/beta/tools/messages.test.ts b/tests/api-resources/beta/tools/messages.test.ts
index 93f12376..71eccab6 100644
--- a/tests/api-resources/beta/tools/messages.test.ts
+++ b/tests/api-resources/beta/tools/messages.test.ts
@@ -34,6 +34,7 @@ describe('resource messages', () => {
stream: false,
system: "Today's date is 2024-01-01.",
temperature: 1,
+ tool_choice: { type: 'auto' },
tools: [
{
description: 'Get the current weather in a given location',