diff --git a/README.md b/README.md
index 6707707b2..1ff9c757d 100644
--- a/README.md
+++ b/README.md
@@ -100,13 +100,30 @@ Documentation for each method, request param, and response field are available i
> [!IMPORTANT]
> Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217).
+### Polling Helpers
+
+When interacting with the API some actions such as starting a Run may take time to complete. The SDK includes
+helper functions which will poll the status until it reaches a terminal state and then return the resulting object.
+If an API method results in an action which could benefit from polling there will be a corresponding version of the
+method ending in 'AndPoll'.
+
+For instance to create a Run and poll until it reaches a terminal state you can run:
+
+```ts
+const run = await openai.beta.threads.runs.createAndPoll(thread.id, {
+ assistant_id: assistantId,
+});
+```
+
+More information on the lifecycle of a Run can be found in the [Run Lifecycle Documentation](https://platform.openai.com/docs/assistants/how-it-works/run-lifecycle)
+
### Streaming Helpers
The SDK also includes helpers to process streams and handle the incoming events.
```ts
const run = openai.beta.threads.runs
- .createAndStream(thread.id, {
+ .stream(thread.id, {
assistant_id: assistant.id,
})
.on('textCreated', (text) => process.stdout.write('\nassistant > '))
diff --git a/api.md b/api.md
index 504a103c7..2f82dd17b 100644
--- a/api.md
+++ b/api.md
@@ -224,6 +224,7 @@ Methods:
- client.beta.threads.update(threadId, { ...params }) -> Thread
- client.beta.threads.del(threadId) -> ThreadDeleted
- client.beta.threads.createAndRun({ ...params }) -> Run
+- client.beta.threads.createAndRunPoll(body, options?) -> Promise<Threads.Run>
- client.beta.threads.createAndRunStream(body, options?) -> AssistantStream
### Runs
@@ -242,7 +243,11 @@ Methods:
- client.beta.threads.runs.list(threadId, { ...params }) -> RunsPage
- client.beta.threads.runs.cancel(threadId, runId) -> Run
- client.beta.threads.runs.submitToolOutputs(threadId, runId, { ...params }) -> Run
+- client.beta.threads.runs.createAndPoll(threadId, body, options?) -> Promise<Run>
- client.beta.threads.runs.createAndStream(threadId, body, options?) -> AssistantStream
+- client.beta.threads.runs.poll(threadId, runId, options?) -> Promise<Run>
+- client.beta.threads.runs.stream(threadId, body, options?) -> AssistantStream
+- client.beta.threads.runs.submitToolOutputsAndPoll(threadId, runId, body, options?) -> Promise<Run>
- client.beta.threads.runs.submitToolOutputsStream(threadId, runId, body, options?) -> AssistantStream
#### Steps
diff --git a/examples/assistant-stream-raw.ts b/examples/assistant-stream-raw.ts
old mode 100644
new mode 100755
diff --git a/examples/assistant-stream.ts b/examples/assistant-stream.ts
old mode 100644
new mode 100755
index 36c4ed152..6c71bf23b
--- a/examples/assistant-stream.ts
+++ b/examples/assistant-stream.ts
@@ -31,7 +31,7 @@ async function main() {
console.log('Created thread with Id: ' + threadId);
const run = openai.beta.threads.runs
- .createAndStream(threadId, {
+ .stream(threadId, {
assistant_id: assistantId,
})
//Subscribe to streaming events and log them
diff --git a/examples/assistants.ts b/examples/assistants.ts
old mode 100644
new mode 100755
index bbc2f80ce..40238ac86
--- a/examples/assistants.ts
+++ b/examples/assistants.ts
@@ -1,7 +1,6 @@
#!/usr/bin/env -S npm run tsn -T
import OpenAI from 'openai';
-import { sleep } from 'openai/core';
/**
* Example of polling for a complete response from an assistant
@@ -32,24 +31,17 @@ async function main() {
let threadId = thread.id;
console.log('Created thread with Id: ' + threadId);
- const run = await openai.beta.threads.runs.create(thread.id, {
+ const run = await openai.beta.threads.runs.createAndPoll(thread.id, {
assistant_id: assistantId,
additional_instructions: 'Please address the user as Jane Doe. The user has a premium account.',
});
- console.log('Created run with Id: ' + run.id);
-
- while (true) {
- const result = await openai.beta.threads.runs.retrieve(thread.id, run.id);
- if (result.status == 'completed') {
- const messages = await openai.beta.threads.messages.list(thread.id);
- for (const message of messages.getPaginatedItems()) {
- console.log(message);
- }
- break;
- } else {
- console.log('Waiting for completion. Current status: ' + result.status);
- await sleep(5000);
+ console.log('Run finished with status: ' + run.status);
+
+ if (run.status == 'completed') {
+ const messages = await openai.beta.threads.messages.list(thread.id);
+ for (const message of messages.getPaginatedItems()) {
+ console.log(message);
}
}
}
diff --git a/helpers.md b/helpers.md
index 9a94a618e..7a34c3023 100644
--- a/helpers.md
+++ b/helpers.md
@@ -13,7 +13,7 @@ More information can be found in the documentation: [Assistant Streaming](https:
```ts
const run = openai.beta.threads.runs
- .createAndStream(thread.id, {
+ .stream(thread.id, {
assistant_id: assistant.id,
})
.on('textCreated', (text) => process.stdout.write('\nassistant > '))
@@ -41,7 +41,7 @@ const run = openai.beta.threads.runs
There are three helper methods for creating streams:
```ts
-openai.beta.threads.runs.createAndStream();
+openai.beta.threads.runs.stream();
```
This method can be used to start and stream the response to an existing run with an associated thread
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
index 43ee8c7e7..7d4457319 100644
--- a/src/resources/beta/beta.ts
+++ b/src/resources/beta/beta.ts
@@ -37,5 +37,6 @@ export namespace Beta {
export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams;
export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;
export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;
+ export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams;
export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams;
}
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
index 7f35730fb..e43ff7315 100644
--- a/src/resources/beta/index.ts
+++ b/src/resources/beta/index.ts
@@ -28,6 +28,7 @@ export {
ThreadCreateAndRunParams,
ThreadCreateAndRunParamsNonStreaming,
ThreadCreateAndRunParamsStreaming,
+ ThreadCreateAndRunPollParams,
ThreadCreateAndRunStreamParams,
Threads,
} from './threads/index';
diff --git a/src/resources/beta/threads/index.ts b/src/resources/beta/threads/index.ts
index 097a52819..ac2f9a4fa 100644
--- a/src/resources/beta/threads/index.ts
+++ b/src/resources/beta/threads/index.ts
@@ -36,10 +36,13 @@ export {
RunCreateParamsStreaming,
RunUpdateParams,
RunListParams,
+ RunCreateAndPollParams,
RunCreateAndStreamParams,
+ RunStreamParams,
RunSubmitToolOutputsParams,
RunSubmitToolOutputsParamsNonStreaming,
RunSubmitToolOutputsParamsStreaming,
+ RunSubmitToolOutputsAndPollParams,
RunSubmitToolOutputsStreamParams,
RunsPage,
Runs,
@@ -52,6 +55,7 @@ export {
ThreadCreateAndRunParams,
ThreadCreateAndRunParamsNonStreaming,
ThreadCreateAndRunParamsStreaming,
+ ThreadCreateAndRunPollParams,
ThreadCreateAndRunStreamParams,
Threads,
} from './threads';
diff --git a/src/resources/beta/threads/runs/index.ts b/src/resources/beta/threads/runs/index.ts
index 636b5d850..c9b2d1ef5 100644
--- a/src/resources/beta/threads/runs/index.ts
+++ b/src/resources/beta/threads/runs/index.ts
@@ -31,10 +31,13 @@ export {
RunCreateParamsStreaming,
RunUpdateParams,
RunListParams,
+ RunCreateAndPollParams,
RunCreateAndStreamParams,
+ RunStreamParams,
RunSubmitToolOutputsParams,
RunSubmitToolOutputsParamsNonStreaming,
RunSubmitToolOutputsParamsStreaming,
+ RunSubmitToolOutputsAndPollParams,
RunSubmitToolOutputsStreamParams,
RunsPage,
Runs,
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 54c671131..5dfc7d595 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -5,6 +5,7 @@ import { APIPromise } from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
import { AssistantStream, RunCreateParamsBaseStream } from 'openai/lib/AssistantStream';
+import { sleep } from 'openai/core';
import { RunSubmitToolOutputsParamsStream } from 'openai/lib/AssistantStream';
import * as RunsAPI from 'openai/resources/beta/threads/runs/runs';
import * as AssistantsAPI from 'openai/resources/beta/assistants/assistants';
@@ -102,8 +103,24 @@ export class Runs extends APIResource {
});
}
+ /**
+ * A helper to create a run an poll for a terminal state. More information on Run
+ * lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ */
+ async createAndPoll(
+ threadId: string,
+ body: RunCreateParamsNonStreaming,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const run = await this.create(threadId, body, options);
+ return await this.poll(threadId, run.id, options);
+ }
+
/**
* Create a Run stream
+ *
+ * @deprecated use `stream` instead
*/
createAndStream(
threadId: string,
@@ -113,6 +130,66 @@ export class Runs extends APIResource {
return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
}
+ /**
+ * A helper to poll a run status until it reaches a terminal state. More
+ * information on Run lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ */
+ async poll(
+ threadId: string,
+ runId: string,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };
+
+ if (options?.pollIntervalMs) {
+ headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();
+ }
+
+ while (true) {
+ const { data: run, response } = await this.retrieve(threadId, runId, {
+ ...options,
+ headers: { ...options?.headers, ...headers },
+ }).withResponse();
+
+ switch (run.status) {
+ //If we are in any sort of intermediate state we poll
+ case 'queued':
+ case 'in_progress':
+ case 'cancelling':
+ let sleepInterval = 5000;
+
+ if (options?.pollIntervalMs) {
+ sleepInterval = options.pollIntervalMs;
+ } else {
+ const headerInterval = response.headers.get('openai-poll-after-ms');
+ if (headerInterval) {
+ const headerIntervalMs = parseInt(headerInterval);
+ if (!isNaN(headerIntervalMs)) {
+ sleepInterval = headerIntervalMs;
+ }
+ }
+ }
+ await sleep(sleepInterval);
+ break;
+ //We return the run in any terminal state.
+ case 'requires_action':
+ case 'cancelled':
+ case 'completed':
+ case 'failed':
+ case 'expired':
+ return run;
+ }
+ }
+ }
+
+ /**
+ * Create a Run stream
+ */
+ stream(threadId: string, body: RunCreateParamsBaseStream, options?: Core.RequestOptions): AssistantStream {
+ return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
+ }
+
/**
* When a run has the `status: "requires_action"` and `required_action.type` is
* `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
@@ -151,9 +228,25 @@ export class Runs extends APIResource {
}) as APIPromise | APIPromise>;
}
+ /**
+ * A helper to submit a tool output to a run and poll for a terminal run state.
+ * More information on Run lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ */
+ async submitToolOutputsAndPoll(
+ threadId: string,
+ runId: string,
+ body: RunSubmitToolOutputsParamsNonStreaming,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const run = await this.submitToolOutputs(threadId, runId, body, options);
+ return await this.poll(threadId, run.id, options);
+ }
+
/**
* Submit the tool outputs from a previous run and stream the run to a terminal
- * state.
+ * state. More information on Run lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
*/
submitToolOutputsStream(
threadId: string,
@@ -529,6 +622,58 @@ export interface RunListParams extends CursorPageParams {
order?: 'asc' | 'desc';
}
+export interface RunCreateAndPollParams {
+ /**
+ * The ID of the
+ * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this run.
+ */
+ assistant_id: string;
+
+ /**
+ * Appends additional instructions at the end of the instructions for the run. This
+ * is useful for modifying the behavior on a per-run basis without overriding other
+ * instructions.
+ */
+ additional_instructions?: string | null;
+
+ /**
+ * Overrides the
+ * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+ * of the assistant. This is useful for modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this run. If a value is provided here, it will override the
+ * model associated with the assistant. If not, the model associated with the
+ * assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * Override the tools the assistant can use for this run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array | null;
+}
+
export interface RunCreateAndStreamParams {
/**
* The ID of the
@@ -581,6 +726,58 @@ export interface RunCreateAndStreamParams {
tools?: Array | null;
}
+export interface RunStreamParams {
+ /**
+ * The ID of the
+ * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this run.
+ */
+ assistant_id: string;
+
+ /**
+ * Appends additional instructions at the end of the instructions for the run. This
+ * is useful for modifying the behavior on a per-run basis without overriding other
+ * instructions.
+ */
+ additional_instructions?: string | null;
+
+ /**
+ * Overrides the
+ * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
+ * of the assistant. This is useful for modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this run. If a value is provided here, it will override the
+ * model associated with the assistant. If not, the model associated with the
+ * assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * Override the tools the assistant can use for this run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array | null;
+}
+
export type RunSubmitToolOutputsParams =
| RunSubmitToolOutputsParamsNonStreaming
| RunSubmitToolOutputsParamsStreaming;
@@ -635,6 +832,28 @@ export interface RunSubmitToolOutputsParamsStreaming extends RunSubmitToolOutput
stream: true;
}
+export interface RunSubmitToolOutputsAndPollParams {
+ /**
+ * A list of tools for which the outputs are being submitted.
+ */
+ tool_outputs: Array;
+}
+
+export namespace RunSubmitToolOutputsAndPollParams {
+ export interface ToolOutput {
+ /**
+ * The output of the tool call to be submitted to continue the run.
+ */
+ output?: string;
+
+ /**
+ * The ID of the tool call in the `required_action` object within the run object
+ * the output is being submitted for.
+ */
+ tool_call_id?: string;
+ }
+}
+
export interface RunSubmitToolOutputsStreamParams {
/**
* A list of tools for which the outputs are being submitted.
@@ -667,10 +886,13 @@ export namespace Runs {
export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
export import RunUpdateParams = RunsAPI.RunUpdateParams;
export import RunListParams = RunsAPI.RunListParams;
+ export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams;
export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams;
+ export import RunStreamParams = RunsAPI.RunStreamParams;
export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
+ export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams;
export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams;
export import Steps = StepsAPI.Steps;
export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs;
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index 9b4785850..1b4b3f7d5 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -92,6 +92,19 @@ export class Threads extends APIResource {
}) as APIPromise | APIPromise>;
}
+ /**
+ * A helper to create a thread, start a run and then poll for a terminal state.
+ * More information on Run lifecycles can be found here:
+ * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
+ */
+ async createAndRunPoll(
+ body: ThreadCreateAndRunParamsNonStreaming,
+ options?: Core.RequestOptions & { pollIntervalMs?: number },
+ ): Promise {
+ const run = await this.createAndRun(body, options);
+ return await this.runs.poll(run.thread_id, run.id, options);
+ }
+
/**
* Create a thread and stream the run back
*/
@@ -340,6 +353,113 @@ export interface ThreadCreateAndRunParamsStreaming extends ThreadCreateAndRunPar
stream: true;
}
+export interface ThreadCreateAndRunPollParams {
+ /**
+ * The ID of the
+ * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
+ * execute this run.
+ */
+ assistant_id: string;
+
+ /**
+ * Override the default system message of the assistant. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ instructions?: string | null;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+
+ /**
+ * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
+ * be used to execute this run. If a value is provided here, it will override the
+ * model associated with the assistant. If not, the model associated with the
+ * assistant will be used.
+ */
+ model?: string | null;
+
+ /**
+ * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
+ * make the output more random, while lower values like 0.2 will make it more
+ * focused and deterministic.
+ */
+ temperature?: number | null;
+
+ /**
+ * If no thread is provided, an empty thread will be created.
+ */
+ thread?: ThreadCreateAndRunPollParams.Thread;
+
+ /**
+ * Override the tools the assistant can use for this run. This is useful for
+ * modifying the behavior on a per-run basis.
+ */
+ tools?: Array<
+ AssistantsAPI.CodeInterpreterTool | AssistantsAPI.RetrievalTool | AssistantsAPI.FunctionTool
+ > | null;
+}
+
+export namespace ThreadCreateAndRunPollParams {
+ /**
+ * If no thread is provided, an empty thread will be created.
+ */
+ export interface Thread {
+ /**
+ * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
+ * start the thread with.
+ */
+ messages?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+
+ export namespace Thread {
+ export interface Message {
+ /**
+ * The content of the message.
+ */
+ content: string;
+
+ /**
+ * The role of the entity that is creating the message. Allowed values include:
+ *
+ * - `user`: Indicates the message is sent by an actual user and should be used in
+ * most cases to represent user-generated messages.
+ * - `assistant`: Indicates the message is generated by the assistant. Use this
+ * value to insert messages from the assistant into the conversation.
+ */
+ role: 'user' | 'assistant';
+
+ /**
+ * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
+ * the message should use. There can be a maximum of 10 files attached to a
+ * message. Useful for tools like `retrieval` and `code_interpreter` that can
+ * access and use files.
+ */
+ file_ids?: Array;
+
+ /**
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
+ * for storing additional information about the object in a structured format. Keys
+ * can be a maximum of 64 characters long and values can be a maxium of 512
+ * characters long.
+ */
+ metadata?: unknown | null;
+ }
+ }
+}
+
export interface ThreadCreateAndRunStreamParams {
/**
* The ID of the
@@ -455,6 +575,7 @@ export namespace Threads {
export import ThreadCreateAndRunParams = ThreadsAPI.ThreadCreateAndRunParams;
export import ThreadCreateAndRunParamsNonStreaming = ThreadsAPI.ThreadCreateAndRunParamsNonStreaming;
export import ThreadCreateAndRunParamsStreaming = ThreadsAPI.ThreadCreateAndRunParamsStreaming;
+ export import ThreadCreateAndRunPollParams = ThreadsAPI.ThreadCreateAndRunPollParams;
export import ThreadCreateAndRunStreamParams = ThreadsAPI.ThreadCreateAndRunStreamParams;
export import Runs = RunsAPI.Runs;
export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
@@ -466,10 +587,13 @@ export namespace Threads {
export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
export import RunUpdateParams = RunsAPI.RunUpdateParams;
export import RunListParams = RunsAPI.RunListParams;
+ export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams;
export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams;
+ export import RunStreamParams = RunsAPI.RunStreamParams;
export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
+ export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams;
export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams;
export import Messages = MessagesAPI.Messages;
export import Annotation = MessagesAPI.Annotation;