Implements

  • LmProvider

Constructors

Properties

abortController: AbortController = ...
api: {
    addHeader: ((key: string, val: string) => void);
    csrfToken: (() => null | string);
    del: (<T_4>(uri: string, verbose?: boolean) => Promise<ApiResponse<T_4>>);
    get: (<T>(uri: string, verbose?: boolean) => Promise<ApiResponse<T>>);
    hasCsrfCookie: (() => boolean);
    onResponse: ((hook: OnResponseHook) => void);
    patch: (<T_3>(uri: string, payload: any[] | Record<string, any>, verbose?: boolean) => Promise<ApiResponse<T_3>>);
    post: (<T_1>(uri: string, payload: any[] | Record<string, any> | FormData, multipart?: boolean, verbose?: boolean) => Promise<ApiResponse<T_1>>);
    postSse: (<T_5>(uri: string, payload: any[] | Record<string, any> | FormData, onChunk: ((payload: T_5) => void), abortController: AbortController, parseJson?: boolean, multipart?: boolean, verbose?: boolean, debug?: boolean) => Promise<void>);
    put: (<T_2>(uri: string, payload: any[] | Record<string, any>, verbose?: boolean) => Promise<ApiResponse<T_2>>);
    removeHeader: ((key: string) => void);
    setCsrfToken: ((token: string) => void);
    setCsrfTokenFromCookie: ((verbose?: boolean) => boolean);
}
apiKey: string
model: ModelConf = ...
models: ModelConf[] = ...
name: string
onEndEmit?: ((result: InferenceResult) => void)
onError?: ((err: string) => void)
onStartEmit?: ((data: IngestionStats) => void)
onToken?: ((t: string) => void)
serverUrl: string

Methods

  • Aborts a currently running inference task.

    Returns Promise<void>

  • Makes an inference based on the provided prompt and parameters.

    Parameters

    • prompt: string

      The input text to base the inference on.

    • params: InferenceParams

      Parameters for customizing the inference behavior.

    • parseJson: boolean = false
    • OptionalparseJsonFunc: ((data: string) => Record<string, any>)
        • (data): Record<string, any>
        • Parameters

          • data: string

          Returns Record<string, any>

    Returns Promise<InferenceResult>

    • The result of the inference.
  • Returns Promise<Record<string, any>>

  • Loads a specified model for inferences. Note: it will query the server and retrieve current model info (name and ctx).

    Parameters

    • name: string

      The name of the model to load.

    • Optionalctx: number

      The optional context window length, defaults to the model ctx.

    • Optionalthreads: number

      The number of threads to use for inference.

    • Optionalgpu_layers: number

      The number of layers to offload to the GPU

    Returns Promise<void>

  • Not implemented for this provider

    Returns Promise<void>