From 1b87d9ea141defa81fa31ee47673b47e19cc8cb1 Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 4 Mar 2024 14:59:05 +0100
Subject: [PATCH] feat(messages): add support for image inputs (#303)
---
README.md | 44 +++--
api.md | 11 +-
examples/cancellation.ts | 2 +-
examples/demo.ts | 2 +-
examples/raw-streaming.ts | 2 +-
examples/streaming.ts | 2 +-
packages/bedrock-sdk/README.md | 2 +-
packages/bedrock-sdk/examples/demo.ts | 2 +-
src/index.ts | 1 +
src/resources/completions.ts | 29 ++-
src/resources/index.ts | 1 +
src/resources/messages.ts | 216 ++++++++++++++++------
tests/api-resources/MessageStream.test.ts | 32 ++--
tests/api-resources/messages.test.ts | 8 +-
14 files changed, 228 insertions(+), 126 deletions(-)
diff --git a/README.md b/README.md
index bbdd3f64..6c1f413e 100644
--- a/README.md
+++ b/README.md
@@ -30,8 +30,8 @@ const anthropic = new Anthropic({
async function main() {
const message = await anthropic.messages.create({
max_tokens: 1024,
- messages: [{ role: 'user', content: 'How does a court case get to the supreme court?' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, Claude' }],
+ model: 'claude-3-opus-20240229',
});
console.log(message.content);
@@ -51,8 +51,8 @@ const anthropic = new Anthropic();
const stream = await anthropic.messages.create({
max_tokens: 1024,
- messages: [{ role: 'user', content: 'your prompt here' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, Claude' }],
+ model: 'claude-3-opus-20240229',
stream: true,
});
for await (const messageStreamEvent of stream) {
@@ -78,8 +78,8 @@ const anthropic = new Anthropic({
async function main() {
const params: Anthropic.MessageCreateParams = {
max_tokens: 1024,
- messages: [{ role: 'user', content: 'Where can I get a good coffee in my neighbourhood?' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, Claude' }],
+ model: 'claude-3-opus-20240229',
};
const message: Anthropic.Message = await anthropic.messages.create(params);
}
@@ -91,9 +91,13 @@ Documentation for each method, request param, and response field are available i
## Counting Tokens
-We provide a [separate package](https://github.com/anthropics/anthropic-tokenizer-typescript) for counting how many tokens a given piece of text contains.
+You can see the exact usage for a given request through the `usage` response property, e.g.
-See the [repository documentation](https://github.com/anthropics/anthropic-tokenizer-typescript) for more details.
+```ts
+const message = await client.messages.create(...)
+console.log(message.usage)
+// { input_tokens: 25, output_tokens: 13 }
+```
## Streaming Helpers
@@ -107,7 +111,7 @@ const anthropic = new Anthropic();
async function main() {
const stream = anthropic.messages
.stream({
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
max_tokens: 1024,
messages: [
{
@@ -143,8 +147,8 @@ async function main() {
const message = await anthropic.messages
.create({
max_tokens: 1024,
- messages: [{ role: 'user', content: 'your prompt here' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, Claude' }],
+ model: 'claude-3-opus-20240229',
})
.catch((err) => {
if (err instanceof Anthropic.APIError) {
@@ -189,7 +193,7 @@ const anthropic = new Anthropic({
});
// Or, configure per-request:
-await anthropic.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Can you help me effectively ask for a raise at work?' }], model: 'claude-2.1' }, {
+await anthropic.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229' }, {
maxRetries: 5,
});
```
@@ -206,7 +210,7 @@ const anthropic = new Anthropic({
});
// Override per-request:
-await anthropic.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Where can I get a good coffee in my neighbourhood?' }], model: 'claude-2.1' }, {
+await anthropic.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229' }, {
timeout: 5 * 1000,
});
```
@@ -231,8 +235,8 @@ const anthropic = new Anthropic();
const message = await anthropic.messages.create(
{
max_tokens: 1024,
- messages: [{ role: 'user', content: 'Where can I get a good coffee in my neighbourhood?' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, Claude' }],
+ model: 'claude-3-opus-20240229',
},
{ headers: { 'anthropic-version': 'My-Custom-Value' } },
);
@@ -253,8 +257,8 @@ const anthropic = new Anthropic();
const response = await anthropic.messages
.create({
max_tokens: 1024,
- messages: [{ role: 'user', content: 'Where can I get a good coffee in my neighbourhood?' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, Claude' }],
+ model: 'claude-3-opus-20240229',
})
.asResponse();
console.log(response.headers.get('X-My-Header'));
@@ -263,8 +267,8 @@ console.log(response.statusText); // access the underlying Response object
const { data: message, response: raw } = await anthropic.messages
.create({
max_tokens: 1024,
- messages: [{ role: 'user', content: 'Where can I get a good coffee in my neighbourhood?' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, Claude' }],
+ model: 'claude-3-opus-20240229',
})
.withResponse();
console.log(raw.headers.get('X-My-Header'));
@@ -326,7 +330,7 @@ const anthropic = new Anthropic({
});
// Override per-request:
-await anthropic.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Where can I get a good coffee in my neighbourhood?' }], model: 'claude-2.1' }, {
+await anthropic.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229' }, {
baseURL: 'http://localhost:8080/test-api',
httpAgent: new http.Agent({ keepAlive: false }),
})
diff --git a/api.md b/api.md
index 7006dbce..450ef5be 100644
--- a/api.md
+++ b/api.md
@@ -1,15 +1,5 @@
# Anthropic
-# Completions
-
-Types:
-
-- Completion
-
-Methods:
-
-- client.completions.create({ ...params }) -> Completion
-
# Messages
Types:
@@ -18,6 +8,7 @@ Types:
- ContentBlockDeltaEvent
- ContentBlockStartEvent
- ContentBlockStopEvent
+- ImageBlockParam
- Message
- MessageDeltaEvent
- MessageDeltaUsage
diff --git a/examples/cancellation.ts b/examples/cancellation.ts
index 71ead273..54607cc0 100755
--- a/examples/cancellation.ts
+++ b/examples/cancellation.ts
@@ -17,7 +17,7 @@ async function main() {
const stream = await client.completions.create({
prompt: `${Anthropic.HUMAN_PROMPT}${question}${Anthropic.AI_PROMPT}:`,
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
stream: true,
max_tokens_to_sample: 500,
});
diff --git a/examples/demo.ts b/examples/demo.ts
index 50ccbcea..5907fa2d 100755
--- a/examples/demo.ts
+++ b/examples/demo.ts
@@ -7,7 +7,7 @@ const client = new Anthropic(); // gets API Key from environment variable ANTHRO
async function main() {
const result = await client.completions.create({
prompt: `${Anthropic.HUMAN_PROMPT} how does a court case get to the Supreme Court? ${Anthropic.AI_PROMPT}`,
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
max_tokens_to_sample: 300,
});
console.log(result.completion);
diff --git a/examples/raw-streaming.ts b/examples/raw-streaming.ts
index b7157393..12053685 100755
--- a/examples/raw-streaming.ts
+++ b/examples/raw-streaming.ts
@@ -9,7 +9,7 @@ async function main() {
const stream = await client.completions.create({
prompt: `${Anthropic.HUMAN_PROMPT}${question}${Anthropic.AI_PROMPT}:`,
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
stream: true,
max_tokens_to_sample: 500,
});
diff --git a/examples/streaming.ts b/examples/streaming.ts
index a359b5b8..9ac2da60 100755
--- a/examples/streaming.ts
+++ b/examples/streaming.ts
@@ -13,7 +13,7 @@ async function main() {
content: `Hey Claude! How can I recursively list all files in a directory in Rust?`,
},
],
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
max_tokens: 1024,
})
// Once a content block is fully streamed, this event will fire
diff --git a/packages/bedrock-sdk/README.md b/packages/bedrock-sdk/README.md
index 6bd4efd4..596fda14 100644
--- a/packages/bedrock-sdk/README.md
+++ b/packages/bedrock-sdk/README.md
@@ -30,7 +30,7 @@ const anthropic = new AnthropicBedrock();
async function main() {
const completion = await anthropic.completions.create({
- model: 'anthropic.claude-instant-v1',
+ model: 'anthropic.claude-3-opus-20240229-v1:0',
prompt: `${Anthropic.HUMAN_PROMPT} how does a court case get to the Supreme Court? ${Anthropic.AI_PROMPT}`,
stop_sequences: [Anthropic.HUMAN_PROMPT],
max_tokens_to_sample: 800,
diff --git a/packages/bedrock-sdk/examples/demo.ts b/packages/bedrock-sdk/examples/demo.ts
index 6efcacc5..5f03645b 100644
--- a/packages/bedrock-sdk/examples/demo.ts
+++ b/packages/bedrock-sdk/examples/demo.ts
@@ -12,7 +12,7 @@ const anthropic = new AnthropicBedrock();
async function main() {
const completion = await anthropic.completions.create({
- model: 'anthropic.claude-instant-v1',
+ model: 'anthropic.claude-3-opus-20240229-v1:0',
prompt: `${Anthropic.HUMAN_PROMPT} how does a court case get to the Supreme Court? ${Anthropic.AI_PROMPT}`,
stop_sequences: [Anthropic.HUMAN_PROMPT],
max_tokens_to_sample: 800,
diff --git a/src/index.ts b/src/index.ts
index 30263e32..e3ac7dba 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -241,6 +241,7 @@ export namespace Anthropic {
export import ContentBlockDeltaEvent = API.ContentBlockDeltaEvent;
export import ContentBlockStartEvent = API.ContentBlockStartEvent;
export import ContentBlockStopEvent = API.ContentBlockStopEvent;
+ export import ImageBlockParam = API.ImageBlockParam;
export import Message = API.Message;
export import MessageDeltaEvent = API.MessageDeltaEvent;
export import MessageDeltaUsage = API.MessageDeltaUsage;
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 6e06a8a4..e916f3b3 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -69,6 +69,11 @@ export interface Completion {
*/
stop_reason: string | null;
+ /**
+ * Object type.
+ *
+ * For Text Completions, this is always `"completion"`.
+ */
type: 'completion';
}
@@ -86,16 +91,10 @@ export interface CompletionCreateParamsBase {
/**
* The model that will complete your prompt.
*
- * As we improve Claude, we develop new versions of it that you can query. The
- * `model` parameter controls which version of Claude responds to your request.
- * Right now we offer two model families: Claude, and Claude Instant. You can use
- * them by setting `model` to `"claude-2.1"` or `"claude-instant-1.2"`,
- * respectively.
- *
- * See [models](https://docs.anthropic.com/claude/reference/selecting-a-model) for
+ * See [models](https://docs.anthropic.com/claude/docs/models-overview) for
* additional details and options.
*/
- model: (string & {}) | 'claude-2.1' | 'claude-instant-1';
+ model: (string & {}) | 'claude-3-opus-20240229' | 'claude-2.1' | 'claude-instant-1';
/**
* The prompt that you want Claude to complete.
@@ -141,8 +140,12 @@ export interface CompletionCreateParamsBase {
/**
* Amount of randomness injected into the response.
*
- * Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
- * multiple choice, and closer to 1 for creative and generative tasks.
+ * Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
+ * for analytical / multiple choice, and closer to `1.0` for creative and
+ * generative tasks.
+ *
+ * Note that even with `temperature` of `0.0`, the results will not be fully
+ * deterministic.
*/
temperature?: number;
@@ -151,6 +154,9 @@ export interface CompletionCreateParamsBase {
*
* Used to remove "long tail" low probability responses.
* [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
*/
top_k?: number;
@@ -161,6 +167,9 @@ export interface CompletionCreateParamsBase {
* for each subsequent token in decreasing probability order and cut it off once it
* reaches a particular probability specified by `top_p`. You should either alter
* `temperature` or `top_p`, but not both.
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
*/
top_p?: number;
}
diff --git a/src/resources/index.ts b/src/resources/index.ts
index 4a53acb8..14cacc30 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -12,6 +12,7 @@ export {
ContentBlockDeltaEvent,
ContentBlockStartEvent,
ContentBlockStopEvent,
+ ImageBlockParam,
Message,
MessageDeltaEvent,
MessageDeltaUsage,
diff --git a/src/resources/messages.ts b/src/resources/messages.ts
index b8531904..e7063586 100644
--- a/src/resources/messages.ts
+++ b/src/resources/messages.ts
@@ -12,11 +12,11 @@ export class Messages extends APIResource {
/**
* Create a Message.
*
- * Send a structured list of input messages, and the model will generate the next
- * message in the conversation.
+ * Send a structured list of input messages with text and/or image content, and the
+ * model will generate the next message in the conversation.
*
- * Messages can be used for either single queries to the model or for multi-turn
- * conversations.
+ * The Messages API can be used for for either single queries or stateless
+ * multi-turn conversations.
*/
create(body: MessageCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise;
create(
@@ -75,6 +75,22 @@ export interface ContentBlockStopEvent {
type: 'content_block_stop';
}
+export interface ImageBlockParam {
+ source: ImageBlockParam.Source;
+
+ type?: 'image';
+}
+
+export namespace ImageBlockParam {
+ export interface Source {
+ data: string;
+
+ media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp';
+
+ type?: 'base64';
+ }
+}
+
export interface Message {
/**
* Unique object identifier.
@@ -87,7 +103,7 @@ export interface Message {
* Content generated by the model.
*
* This is an array of content blocks, each of which has a `type` that determines
- * its shape. Currently, the only `type` available is `"text"`.
+ * its shape. Currently, the only `type` in responses is `"text"`.
*
* Example:
*
@@ -107,10 +123,7 @@ export interface Message {
* "role": "user",
* "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
* },
- * {
- * "role": "assistant",
- * "content": "The best answer is ("
- * }
+ * { "role": "assistant", "content": "The best answer is (" }
* ]
* ```
*
@@ -152,16 +165,33 @@ export interface Message {
stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
/**
- * Which custom stop sequence was generated.
+ * Which custom stop sequence was generated, if any.
*
- * This value will be non-null if one of your custom stop sequences was generated.
+ * This value will be a non-null string if one of your custom stop sequences was
+ * generated.
*/
stop_sequence: string | null;
+ /**
+ * Object type.
+ *
+ * For Messages, this is always `"message"`.
+ */
type: 'message';
/**
- * Container for the number of tokens used.
+ * Billing and rate-limit usage.
+ *
+ * Anthropic's API bills and rate-limits by token counts, as tokens represent the
+ * underlying cost to our systems.
+ *
+ * Under the hood, the API transforms requests into a format suitable for the
+ * model. The model's output then goes through a parsing stage before becoming an
+ * API response. As a result, the token counts in `usage` will not match one-to-one
+ * with the exact visible content of an API request or response.
+ *
+ * For example, `output_tokens` will be non-zero, even for an empty string response
+ * from Claude.
*/
usage: Usage;
}
@@ -172,7 +202,18 @@ export interface MessageDeltaEvent {
type: 'message_delta';
/**
- * Container for the number of tokens used.
+ * Billing and rate-limit usage.
+ *
+ * Anthropic's API bills and rate-limits by token counts, as tokens represent the
+ * underlying cost to our systems.
+ *
+ * Under the hood, the API transforms requests into a format suitable for the
+ * model. The model's output then goes through a parsing stage before becoming an
+ * API response. As a result, the token counts in `usage` will not match one-to-one
+ * with the exact visible content of an API request or response.
+ *
+ * For example, `output_tokens` will be non-zero, even for an empty string response
+ * from Claude.
*/
usage: MessageDeltaUsage;
}
@@ -193,7 +234,7 @@ export interface MessageDeltaUsage {
}
export interface MessageParam {
- content: string | Array;
+ content: string | Array;
role: 'user' | 'assistant';
}
@@ -250,8 +291,7 @@ export interface MessageCreateParamsBase {
* only specifies the absolute maximum number of tokens to generate.
*
* Different models have different maximum values for this parameter. See
- * [input and output sizes](https://docs.anthropic.com/claude/reference/input-and-output-sizes)
- * for details.
+ * [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
*/
max_tokens: number;
@@ -291,15 +331,18 @@ export interface MessageCreateParamsBase {
*
* ```json
* [
- * { "role": "user", "content": "Please describe yourself using only JSON" },
- * { "role": "assistant", "content": "Here is my JSON description:\n{" }
+ * {
+ * "role": "user",
+ * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
+ * },
+ * { "role": "assistant", "content": "The best answer is (" }
* ]
* ```
*
* Each input message `content` may be either a single `string` or an array of
- * content blocks, where each block has a specific `type`. Using a `string` is
- * shorthand for an array of one content block of type `"text"`. The following
- * input messages are equivalent:
+ * content blocks, where each block has a specific `type`. Using a `string` for
+ * `content` is shorthand for an array of one content block of type `"text"`. The
+ * following input messages are equivalent:
*
* ```json
* { "role": "user", "content": "Hello, Claude" }
@@ -309,27 +352,42 @@ export interface MessageCreateParamsBase {
* { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
* ```
*
- * See our
- * [guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- * for more details on how to best construct prompts.
+ * Starting with Claude 3 models, you can also send image content blocks:
+ *
+ * ```json
+ * {
+ * "role": "user",
+ * "content": [
+ * {
+ * "type": "image",
+ * "source": {
+ * "type": "base64",
+ * "media_type": "image/jpeg",
+ * "data": "/9j/4AAQSkZJRg..."
+ * }
+ * },
+ * { "type": "text", "text": "What is in this image?" }
+ * ]
+ * }
+ * ```
+ *
+ * We currently support the `base64` source type for images, and the `image/jpeg`,
+ * `image/png`, `image/gif`, and `image/webp` media types.
+ *
+ * See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
+ * for more input examples.
*
* Note that if you want to include a
- * [system prompt](https://docs.anthropic.com/claude/docs/how-to-use-system-prompts),
- * you can use the top-level `system` parameter — there is no `"system"` role for
- * input messages in the Messages API.
+ * [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
+ * use the top-level `system` parameter — there is no `"system"` role for input
+ * messages in the Messages API.
*/
messages: Array;
/**
* The model that will complete your prompt.
*
- * As we improve Claude, we develop new versions of it that you can query. The
- * `model` parameter controls which version of Claude responds to your request.
- * Right now we offer two model families: Claude, and Claude Instant. You can use
- * them by setting `model` to `"claude-2.1"` or `"claude-instant-1.2"`,
- * respectively.
- *
- * See [models](https://docs.anthropic.com/claude/reference/selecting-a-model) for
+ * See [models](https://docs.anthropic.com/claude/docs/models-overview) for
* additional details and options.
*/
model: string;
@@ -365,15 +423,19 @@ export interface MessageCreateParamsBase {
*
* A system prompt is a way of providing context and instructions to Claude, such
* as specifying a particular goal or role. See our
- * [guide to system prompts](https://docs.anthropic.com/claude/docs/how-to-use-system-prompts).
+ * [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
*/
system?: string;
/**
* Amount of randomness injected into the response.
*
- * Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
- * multiple choice, and closer to 1 for creative and generative tasks.
+ * Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
+ * for analytical / multiple choice, and closer to `1.0` for creative and
+ * generative tasks.
+ *
+ * Note that even with `temperature` of `0.0`, the results will not be fully
+ * deterministic.
*/
temperature?: number;
@@ -382,6 +444,9 @@ export interface MessageCreateParamsBase {
*
* Used to remove "long tail" low probability responses.
* [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
*/
top_k?: number;
@@ -392,6 +457,9 @@ export interface MessageCreateParamsBase {
* for each subsequent token in decreasing probability order and cut it off once it
* reaches a particular probability specified by `top_p`. You should either alter
* `temperature` or `top_p`, but not both.
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
*/
top_p?: number;
}
@@ -443,8 +511,7 @@ export interface MessageStreamParams {
* only specifies the absolute maximum number of tokens to generate.
*
* Different models have different maximum values for this parameter. See
- * [input and output sizes](https://docs.anthropic.com/claude/reference/input-and-output-sizes)
- * for details.
+ * [models](https://docs.anthropic.com/claude/docs/models-overview) for details.
*/
max_tokens: number;
@@ -484,15 +551,18 @@ export interface MessageStreamParams {
*
* ```json
* [
- * { "role": "user", "content": "Please describe yourself using only JSON" },
- * { "role": "assistant", "content": "Here is my JSON description:\n{" }
+ * {
+ * "role": "user",
+ * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
+ * },
+ * { "role": "assistant", "content": "The best answer is (" }
* ]
* ```
*
* Each input message `content` may be either a single `string` or an array of
- * content blocks, where each block has a specific `type`. Using a `string` is
- * shorthand for an array of one content block of type `"text"`. The following
- * input messages are equivalent:
+ * content blocks, where each block has a specific `type`. Using a `string` for
+ * `content` is shorthand for an array of one content block of type `"text"`. The
+ * following input messages are equivalent:
*
* ```json
* { "role": "user", "content": "Hello, Claude" }
@@ -502,27 +572,42 @@ export interface MessageStreamParams {
* { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
* ```
*
- * See our
- * [guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
- * for more details on how to best construct prompts.
+ * Starting with Claude 3 models, you can also send image content blocks:
+ *
+ * ```json
+ * {
+ * "role": "user",
+ * "content": [
+ * {
+ * "type": "image",
+ * "source": {
+ * "type": "base64",
+ * "media_type": "image/jpeg",
+ * "data": "/9j/4AAQSkZJRg..."
+ * }
+ * },
+ * { "type": "text", "text": "What is in this image?" }
+ * ]
+ * }
+ * ```
+ *
+ * We currently support the `base64` source type for images, and the `image/jpeg`,
+ * `image/png`, `image/gif`, and `image/webp` media types.
+ *
+ * See [examples](https://docs.anthropic.com/claude/reference/messages-examples)
+ * for more input examples.
*
* Note that if you want to include a
- * [system prompt](https://docs.anthropic.com/claude/docs/how-to-use-system-prompts),
- * you can use the top-level `system` parameter — there is no `"system"` role for
- * input messages in the Messages API.
+ * [system prompt](https://docs.anthropic.com/claude/docs/system-prompts), you can
+ * use the top-level `system` parameter — there is no `"system"` role for input
+ * messages in the Messages API.
*/
messages: Array;
/**
* The model that will complete your prompt.
*
- * As we improve Claude, we develop new versions of it that you can query. The
- * `model` parameter controls which version of Claude responds to your request.
- * Right now we offer two model families: Claude, and Claude Instant. You can use
- * them by setting `model` to `"claude-2.1"` or `"claude-instant-1.2"`,
- * respectively.
- *
- * See [models](https://docs.anthropic.com/claude/reference/selecting-a-model) for
+ * See [models](https://docs.anthropic.com/claude/docs/models-overview) for
* additional details and options.
*/
model: string;
@@ -550,15 +635,19 @@ export interface MessageStreamParams {
*
* A system prompt is a way of providing context and instructions to Claude, such
* as specifying a particular goal or role. See our
- * [guide to system prompts](https://docs.anthropic.com/claude/docs/how-to-use-system-prompts).
+ * [guide to system prompts](https://docs.anthropic.com/claude/docs/system-prompts).
*/
system?: string;
/**
* Amount of randomness injected into the response.
*
- * Defaults to 1. Ranges from 0 to 1. Use temp closer to 0 for analytical /
- * multiple choice, and closer to 1 for creative and generative tasks.
+ * Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
+ * for analytical / multiple choice, and closer to `1.0` for creative and
+ * generative tasks.
+ *
+ * Note that even with `temperature` of `0.0`, the results will not be fully
+ * deterministic.
*/
temperature?: number;
@@ -567,6 +656,9 @@ export interface MessageStreamParams {
*
* Used to remove "long tail" low probability responses.
* [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
*/
top_k?: number;
@@ -577,6 +669,9 @@ export interface MessageStreamParams {
* for each subsequent token in decreasing probability order and cut it off once it
* reaches a particular probability specified by `top_p`. You should either alter
* `temperature` or `top_p`, but not both.
+ *
+ * Recommended for advanced use cases only. You usually only need to use
+ * `temperature`.
*/
top_p?: number;
}
@@ -602,6 +697,7 @@ export namespace Messages {
export import ContentBlockDeltaEvent = MessagesAPI.ContentBlockDeltaEvent;
export import ContentBlockStartEvent = MessagesAPI.ContentBlockStartEvent;
export import ContentBlockStopEvent = MessagesAPI.ContentBlockStopEvent;
+ export import ImageBlockParam = MessagesAPI.ImageBlockParam;
export import Message = MessagesAPI.Message;
export import MessageDeltaEvent = MessagesAPI.MessageDeltaEvent;
export import MessageDeltaUsage = MessagesAPI.MessageDeltaUsage;
diff --git a/tests/api-resources/MessageStream.test.ts b/tests/api-resources/MessageStream.test.ts
index bb8861aa..35cf5c1d 100644
--- a/tests/api-resources/MessageStream.test.ts
+++ b/tests/api-resources/MessageStream.test.ts
@@ -119,7 +119,7 @@ describe('MessageStream class', () => {
id: 'msg_01hhptzfxdaeehfxfv070yb6b8',
role: 'assistant',
content: [{ type: 'text', text: 'Hello there!' }],
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
stop_reason: 'end_turn',
stop_sequence: null,
usage: { output_tokens: 6, input_tokens: 10 },
@@ -128,7 +128,7 @@ describe('MessageStream class', () => {
const stream = anthropic.messages.stream({
max_tokens: 1024,
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
messages: [{ role: 'user', content: 'Say hello there!' }],
});
@@ -182,22 +182,22 @@ describe('MessageStream class', () => {
},
{
"args": [
- "{"type":"message_start","message":{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[],"model":"claude-2.1","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[],"model":"claude-2.1","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message_start","message":{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "streamEvent",
},
{
"args": [
"{"type":"content_block_start","content_block":{"type":"text","text":""},"index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":""}],"model":"claude-2.1","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":""}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "streamEvent",
},
{
"args": [
"{"type":"content_block_delta","delta":{"type":"text_delta","text":"Hello"},"index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello"}],"model":"claude-2.1","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "streamEvent",
},
@@ -211,7 +211,7 @@ describe('MessageStream class', () => {
{
"args": [
"{"type":"content_block_delta","delta":{"type":"text_delta","text":" ther"},"index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello ther"}],"model":"claude-2.1","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello ther"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "streamEvent",
},
@@ -225,7 +225,7 @@ describe('MessageStream class', () => {
{
"args": [
"{"type":"content_block_delta","delta":{"type":"text_delta","text":"e!"},"index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-2.1","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "streamEvent",
},
@@ -239,7 +239,7 @@ describe('MessageStream class', () => {
{
"args": [
"{"type":"content_block_stop","index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-2.1","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "streamEvent",
},
@@ -252,26 +252,26 @@ describe('MessageStream class', () => {
{
"args": [
"{"type":"message_delta","usage":{"output_tokens":6},"delta":{"stop_reason":"end_turn","stop_sequence":null}}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-2.1","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "streamEvent",
},
{
"args": [
"{"type":"message_stop"}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-2.1","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "streamEvent",
},
{
"args": [
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-2.1","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "message",
},
{
"args": [
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-2.1","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
],
"type": "finalMessage",
},
@@ -293,7 +293,7 @@ describe('MessageStream class', () => {
},
],
"id": "msg_01hhptzfxdaeehfxfv070yb6b8",
- "model": "claude-2.1",
+ "model": "claude-3-opus-20240229",
"role": "assistant",
"stop_reason": "end_turn",
"stop_sequence": null,
@@ -313,7 +313,7 @@ describe('MessageStream class', () => {
const stream = anthropic.messages.stream({
max_tokens: 1024,
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
messages: [{ role: 'user', content: 'Say hello there!' }],
});
@@ -323,7 +323,7 @@ describe('MessageStream class', () => {
id: 'msg_01hhptzfxdaeehfxfv070yb6b8',
role: 'assistant',
content: [{ type: 'text', text: 'Hello there!' }],
- model: 'claude-2.1',
+ model: 'claude-3-opus-20240229',
stop_reason: 'end_turn',
stop_sequence: null,
usage: { output_tokens: 6, input_tokens: 10 },
diff --git a/tests/api-resources/messages.test.ts b/tests/api-resources/messages.test.ts
index ac2b1c96..29d53854 100644
--- a/tests/api-resources/messages.test.ts
+++ b/tests/api-resources/messages.test.ts
@@ -12,8 +12,8 @@ describe('resource messages', () => {
test('create: only required params', async () => {
const responsePromise = anthropic.messages.create({
max_tokens: 1024,
- messages: [{ role: 'user', content: 'In one sentence, what is good about the color blue?' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, world' }],
+ model: 'claude-3-opus-20240229',
});
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
@@ -27,8 +27,8 @@ describe('resource messages', () => {
test('create: required and optional params', async () => {
const response = await anthropic.messages.create({
max_tokens: 1024,
- messages: [{ role: 'user', content: 'In one sentence, what is good about the color blue?' }],
- model: 'claude-2.1',
+ messages: [{ role: 'user', content: 'Hello, world' }],
+ model: 'claude-3-opus-20240229',
metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' },
stop_sequences: ['string', 'string', 'string'],
stream: false,