-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathopenai_response_functions.ts
103 lines (94 loc) · 3.16 KB
/
openai_response_functions.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import { randomUUID } from 'node:crypto'
import OpenAI, { ClientOptions } from 'openai'
import { Provider, responseAsToolCall, toLLMTools } from '../models.js'
import { OpenAIProviderOptions as BaseOpenAIProviderOptions } from './openai.js'
/**
* Required options for the OpenAI-compatible provider.
*/
export type OpenAIProviderOptions = BaseOpenAIProviderOptions & {
/**
* Since this is meant to be used with OpenAI-compatible providers,
* we do not provide any defaults.
*/
model: string
/**
* Client options.
*/
options: ClientOptions
/**
* Whether to use strict mode.
* @default false
*/
strictMode?: boolean
}
/**
* OpenAI provider.
*
* Unlike `openai.js`, this provider does not use response_format / structured output,
* but tools as response to enforce the right JSON schema.
*/
export const openai = (options: OpenAIProviderOptions): Provider => {
const { model, options: clientOptions, strictMode = false, body = {} } = options
const client = new OpenAI(clientOptions)
return {
chat: async ({ messages, response_format, temperature, ...options }) => {
const tools = 'tools' in options ? toLLMTools(options.tools, strictMode) : []
const response = await client.chat.completions.create({
model,
tools: [...tools, ...responseAsToolCall(response_format, strictMode)],
messages,
temperature,
tool_choice: 'required',
...body,
})
/**
* Tool choice is required. If tools are missing in the response,
* we throw an error.
*/
const message = response.choices[0].message
if (!message.tool_calls) {
throw new Error('No response in message')
}
/**
* If LLM wants to call a tool that is a response, we return that response.
*/
const firstToolCall = message.tool_calls[0]
if (Object.keys(response_format).includes(firstToolCall.function.name)) {
/**
* Just in case LLM called multiple tools, including response as tool,
* we throw an error.
*/
if (message.tool_calls.length > 1) {
throw new Error(
`When calling one of ${Object.keys(response_format).join(', ')}, you cannot call other tools.`
)
}
const schema = response_format[firstToolCall.function.name]
return {
type: firstToolCall.function.name,
value: schema.parse(JSON.parse(firstToolCall.function.arguments)),
}
}
return {
type: 'tool_call',
value: message.tool_calls.map((tollCall) => ({
...tollCall,
/**
* Some providers, such as Groq, return unique IDs within the current
* request. We generate a random UUID to ensure that the tool call ID
* is unique throughout the entire session.
*/
id: randomUUID(),
function: {
...tollCall.function,
/**
* Internally, we rely on OpenAI built-in support for Zod schemas,
* via `parsed_arguments` field.
*/
parsed_arguments: JSON.parse(tollCall.function.arguments),
},
})),
}
},
}
}