interface OpenAIChatInput {
    apiKey?: string;
    audio?: ChatCompletionAudioParam;
    frequencyPenalty: number;
    logitBias?: Record<string, number>;
    logprobs?: boolean;
    maxCompletionTokens?: number;
    maxTokens?: number;
    modalities?: ChatCompletionModality[];
    model: OpenAIChatModelId;
    modelKwargs?: Record<string, any>;
    modelName: string;
    n: number;
    openAIApiKey?: string;
    prefixMessages?: ChatCompletionMessageParam[];
    presencePenalty: number;
    promptCacheKey?: string;
    reasoning?: Reasoning;
    reasoningEffort?: ReasoningEffort;
    service_tier?:
        | null
        | "auto"
        | "default"
        | "flex"
        | "scale"
        | "priority";
    stop?: string[];
    stopSequences?: string[];
    streamUsage?: boolean;
    streaming: boolean;
    supportsStrictToolCalling?: boolean;
    temperature: number;
    timeout?: number;
    topLogprobs?: number;
    topP: number;
    user?: string;
    verbosity?: OpenAIVerbosityParam;
    zdrEnabled?: boolean;
}

Hierarchy (view full)

Properties

apiKey?: string

API key to use when making requests to OpenAI. Defaults to the value of OPENAI_API_KEY environment variable.

Parameters for audio output. Required when audio output is requested with modalities: ["audio"]. Learn more.

frequencyPenalty: number

Penalizes repeated tokens according to frequency

logitBias?: Record<string, number>

Dictionary used to adjust the probability of specific tokens being generated

logprobs?: boolean

Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.

maxCompletionTokens?: number

Maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the model's maximum context size. Alias for maxTokens for reasoning models.

maxTokens?: number

Maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the model's maximum context size.

modalities?: ChatCompletionModality[]

Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default:

["text"]

The gpt-4o-audio-preview model can also be used to generate audio. To request that this model generate both text and audio responses, you can use:

["text", "audio"]

Model name to use

modelKwargs?: Record<string, any>

Holds any additional parameters that are valid to pass to openai.createCompletion that are not explicitly specified on this class.

modelName: string

Model name to use Alias for model

Use "model" instead.