diff --git a/README.md b/README.md index e5d25135..5c07abfc 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ const client = new Anthropic({ }); async function main() { - const message = await anthropic.messages.create({ + const message = await client.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229', @@ -46,7 +46,7 @@ import Anthropic from '@anthropic-ai/sdk'; const client = new Anthropic(); -const stream = await anthropic.messages.create({ +const stream = await client.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229', @@ -78,7 +78,7 @@ async function main() { messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229', }; - const message: Anthropic.Message = await anthropic.messages.create(params); + const message: Anthropic.Message = await client.messages.create(params); } main(); @@ -149,7 +149,7 @@ a subclass of `APIError` will be thrown: <!-- prettier-ignore --> ```ts async function main() { - const message = await anthropic.messages + const message = await client.messages .create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], @@ -198,7 +198,7 @@ const client = new Anthropic({ }); // Or, configure per-request: -await anthropic.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229' }, { +await client.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229' }, { maxRetries: 5, }); ``` @@ -215,7 +215,7 @@ const client = new Anthropic({ }); // Override per-request: -await anthropic.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229' }, { +await client.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], model: 'claude-3-opus-20240229' }, { timeout: 5 * 1000, }); ``` @@ -237,7 +237,7 @@ import Anthropic from '@anthropic-ai/sdk'; const client = new Anthropic(); -const message = await anthropic.messages.create( +const message = await client.messages.create( { max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], @@ -259,7 +259,7 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new Anthropic(); -const response = await anthropic.messages +const response = await client.messages .create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], @@ -269,7 +269,7 @@ const response = await anthropic.messages console.log(response.headers.get('X-My-Header')); console.log(response.statusText); // access the underlying Response object -const { data: message, response: raw } = await anthropic.messages +const { data: message, response: raw } = await client.messages .create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], @@ -381,7 +381,7 @@ const client = new Anthropic({ }); // Override per-request: -await anthropic.messages.create( +await client.messages.create( { max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, Claude' }], diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts index 4508a959..aa326cf2 100644 --- a/tests/api-resources/completions.test.ts +++ b/tests/api-resources/completions.test.ts @@ -3,14 +3,14 @@ import Anthropic from '@anthropic-ai/sdk'; import { Response } from 'node-fetch'; -const anthropic = new Anthropic({ +const client = new Anthropic({ apiKey: 'my-anthropic-api-key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', }); describe('resource completions', () => { test('create: only required params', async () => { - const responsePromise = anthropic.completions.create({ + const responsePromise = client.completions.create({ max_tokens_to_sample: 256, model: 'string', prompt: '\n\nHuman: Hello, world!\n\nAssistant:', @@ -25,7 +25,7 @@ describe('resource completions', () => { }); test('create: required and optional params', async () => { - const response = await anthropic.completions.create({ + const response = await client.completions.create({ max_tokens_to_sample: 256, model: 'string', prompt: '\n\nHuman: Hello, world!\n\nAssistant:', diff --git a/tests/api-resources/messages.test.ts b/tests/api-resources/messages.test.ts index 7f71c7b7..c6e76bea 100644 --- a/tests/api-resources/messages.test.ts +++ b/tests/api-resources/messages.test.ts @@ -3,14 +3,14 @@ import Anthropic from '@anthropic-ai/sdk'; import { Response } from 'node-fetch'; -const anthropic = new Anthropic({ +const client = new Anthropic({ apiKey: 'my-anthropic-api-key', baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', }); describe('resource messages', () => { test('create: only required params', async () => { - const responsePromise = anthropic.messages.create({ + const responsePromise = client.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, world' }], model: 'claude-3-5-sonnet-20240620', @@ -25,7 +25,7 @@ describe('resource messages', () => { }); test('create: required and optional params', async () => { - const response = await anthropic.messages.create({ + const response = await client.messages.create({ max_tokens: 1024, messages: [{ role: 'user', content: 'Hello, world' }], model: 'claude-3-5-sonnet-20240620',