LocalLm api documentation
    Preparing search index...

    Class OpenaiCompatibleProvider

    Implements

    • LmProvider
    Index

    Constructors

    Properties

    abortController: AbortController = ...
    api: {
        addHeader: (key: string, val: string) => void;
        csrfToken: () => null | string;
        del: <T_4>(uri: string, verbose?: boolean) => Promise<ApiResponse<T_4>>;
        get: <T>(uri: string, verbose?: boolean) => Promise<ApiResponse<T>>;
        hasCsrfCookie: () => boolean;
        onResponse: (hook: OnResponseHook) => void;
        patch: <T_3>(
            uri: string,
            payload: any[] | Record<string, any>,
            verbose?: boolean,
        ) => Promise<ApiResponse<T_3>>;
        post: <T_1>(
            uri: string,
            payload: any[] | Record<string, any> | FormData,
            multipart?: boolean,
            verbose?: boolean,
        ) => Promise<ApiResponse<T_1>>;
        postSse: <T_5>(
            uri: string,
            payload: any[] | Record<string, any> | FormData,
            onChunk: (payload: T_5) => void,
            abortController: AbortController,
            parseJson?: boolean,
            multipart?: boolean,
            verbose?: boolean,
            debug?: boolean,
        ) => Promise<void>;
        put: <T_2>(
            uri: string,
            payload: any[] | Record<string, any>,
            verbose?: boolean,
        ) => Promise<ApiResponse<T_2>>;
        removeHeader: (key: string) => void;
        setCsrfToken: (token: string) => void;
        setCsrfTokenFromCookie: (verbose?: boolean) => boolean;
    }
    apiKey: string
    model: ModelConf = ...
    models: ModelConf[] = ...
    name: string
    onEndEmit?: (result: InferenceResult) => void
    onError?: (err: string) => void
    onStartEmit?: (data: IngestionStats) => void
    onToken?: (t: string) => void
    openai: OpenAI
    serverUrl: string
    tools: Record<string, ToolSpec> = {}

    Methods

    • Aborts a currently running inference task.

      Returns Promise<void>

    • Makes an inference based on the provided prompt and parameters.

      Parameters

      • prompt: string

        The input text to base the inference on.

      • params: InferenceParams

        Parameters for customizing the inference behavior.

      • Optionaloptions: InferenceOptions

      Returns Promise<InferenceResult>

      • The result of the inference.
    • Returns Promise<Record<string, any>>

    • Use a specified model for inferences.

      Parameters

      • name: string

        The name of the model to load.

      • Optionalctx: number

        The optional context window length, defaults to the model ctx.

      • Optionalurls: string | string[]
      • OptionalonLoadProgress: OnLoadProgress

      Returns Promise<void>