-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathopenai.ts
57 lines (49 loc) · 1.6 KB
/
openai.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import OpenAI, { ClientOptions as OpenAIOptions } from 'openai'
import { Provider, responseAsStructuredOutput, toLLMTools } from '../models.js'
export type OpenAIProviderOptions = {
/**
* Model to use.
*/
model?: string
options?: OpenAIOptions
/**
* Additional body parameters.
*/
body?: Record<string, any>
}
/**
* OpenAI provider.
*
* This provider uses response_format / structured output, together with tools.
*
* When using this provider with other LLMs, make sure they support both tools and structured_output,
* otherwise you will get an error. Otherwise, use the one from `openai_response_functions.js` instead.
*/
export const openai = (options: OpenAIProviderOptions = {}): Provider => {
const { model = 'gpt-4o', options: clientOptions, body = {} } = options
const client = new OpenAI(clientOptions)
return {
chat: async ({ messages, response_format, temperature, ...options }) => {
const mappedTools = 'tools' in options ? toLLMTools(options.tools) : []
const response = await client.beta.chat.completions.parse({
model,
messages,
tools: mappedTools.length > 0 ? mappedTools : undefined,
temperature,
response_format: responseAsStructuredOutput(response_format),
...body,
})
const message = response.choices[0].message
if (message.tool_calls.length > 0) {
return {
type: 'tool_call',
value: message.tool_calls,
}
}
if (!message.parsed?.response) {
throw new Error('No response in message')
}
return message.parsed.response
},
}
}