import { ChatCompletionStream } from "https://deno.land/x/openai@v4.69.0/resources/beta/chat/completions.ts";
Methods
protected
_createChatCompletion(client: OpenAI,
params: ChatCompletionCreateParams,
options?: Core.RequestOptions,
protected
_fromReadableStream(readableStream: ReadableStream, options?: Core.RequestOptions): Promise<ChatCompletion>[Symbol.asyncIterator](this: ChatCompletionStream<ParsedT>): AsyncIterator<ChatCompletionChunk>
Static Methods
createChatCompletion<ParsedT>(): ChatCompletionStream<ParsedT>
client: OpenAI,
params: ChatCompletionStreamParams,
options?: Core.RequestOptions,
fromReadableStream(stream: ReadableStream): ChatCompletionStream<null>
Intended for use on the frontend, consuming a stream produced with
.toReadableStream()
on the backend.
Note that messages sent to the model do not appear in .on('message')
in this context.