Class: OpenAI
Extends
Extended by
Constructors
new OpenAI()
new OpenAI(
init
?):OpenAI
Parameters
• init?: Partial
<OpenAI
> & object
Returns
Overrides
Source
packages/core/src/llm/openai.ts:181
Properties
additionalChatOptions?
optional
additionalChatOptions:OpenAIAdditionalChatOptions
Source
packages/core/src/llm/openai.ts:169
additionalSessionOptions?
optional
additionalSessionOptions:Omit
<Partial
<ClientOptions
>,"apiKey"
|"timeout"
|"maxRetries"
>
Source
packages/core/src/llm/openai.ts:176
apiKey?
optional
apiKey:string
=undefined
Source
packages/core/src/llm/openai.ts:172
maxRetries
maxRetries:
number
Source
packages/core/src/llm/openai.ts:173
maxTokens?
optional
maxTokens:number
Source
packages/core/src/llm/openai.ts:168
model
model:
string
Source
packages/core/src/llm/openai.ts:165
session
session:
OpenAISession
Source
packages/core/src/llm/openai.ts:175
temperature
temperature:
number
Source
packages/core/src/llm/openai.ts:166
timeout?
optional
timeout:number
Source
packages/core/src/llm/openai.ts:174
topP
topP:
number
Source
packages/core/src/llm/openai.ts:167
Accessors
metadata
get
metadata():LLMMetadata
Returns
Source
packages/core/src/llm/openai.ts:238
supportToolCall
get
supportToolCall():boolean
Returns
boolean
Source
packages/core/src/llm/openai.ts:234
Methods
chat()
chat(params)
chat(
params
):Promise
<AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>>
Parameters
• params: LLMChatParamsStreaming
<OpenAIAdditionalChatOptions
, ToolCallLLMMessageOptions
>
Returns
Promise
<AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>>
Overrides
Source
packages/core/src/llm/openai.ts:315
chat(params)
chat(
params
):Promise
<ChatResponse
<ToolCallLLMMessageOptions
>>
Parameters
• params: LLMChatParamsNonStreaming
<OpenAIAdditionalChatOptions
, ToolCallLLMMessageOptions
>
Returns
Promise
<ChatResponse
<ToolCallLLMMessageOptions
>>
Overrides
Source
packages/core/src/llm/openai.ts:321
complete()
complete(params)
complete(
params
):Promise
<AsyncIterable
<CompletionResponse
>>
Parameters
• params: LLMCompletionParamsStreaming
Returns
Promise
<AsyncIterable
<CompletionResponse
>>
Inherited from
Source
packages/core/src/llm/base.ts:22
complete(params)
complete(
params
):Promise
<CompletionResponse
>
Parameters
• params: LLMCompletionParamsNonStreaming
Returns
Promise
<CompletionResponse
>
Inherited from
Source
packages/core/src/llm/base.ts:25
streamChat()
protected
streamChat(baseRequestParams
):AsyncIterable
<ChatResponseChunk
<ToolCallLLMMessageOptions
>>
Parameters
• baseRequestParams: ChatCompletionCreateParams