LocalLm types documentation
    Preparing search index...

    Interface LmProvider

    Defines the structure and behavior of an LM Provider.

    LmProvider

    const lmProvider: LmProvider = {
    name: 'koboldcpp',
    api: useApi(),
    serverUrl: 'http://example.com/api',
    apiKey: 'your-api-key',
    model: { name: 'gpt-3', ctx: 2048 },
    models: [{ name: 'gpt-3', ctx: 2048 }],
    info: async () => ({ config: 'some-config' }),
    modelsInfo: async () => {},
    loadModel: async (name, ctx, urls, onLoadProgress) => {},
    infer: async (prompt, params, parseJson, parseJsonFunc) => ({ text: 'result', data: {}, stats: {}, serverStats: {} }),
    abort: async () => {},
    onToken: (t) => console.log(t),
    onStartEmit: (data) => console.log(data),
    onEndEmit: (result) => console.log(result),
    onError: (err) => console.error(err)
    };
    interface LmProvider {
        abort: () => Promise<void>;
        api: {
            addHeader: (key: string, val: string) => void;
            csrfToken: () => null | string;
            del: <T_4>(uri: string, verbose?: boolean) => Promise<ApiResponse<T_4>>;
            get: <T>(uri: string, verbose?: boolean) => Promise<ApiResponse<T>>;
            hasCsrfCookie: () => boolean;
            onResponse: (hook: OnResponseHook) => void;
            patch: <T_3>(
                uri: string,
                payload: any[] | Record<string, any>,
                verbose?: boolean,
            ) => Promise<ApiResponse<T_3>>;
            post: <T_1>(
                uri: string,
                payload: any[] | FormData | Record<string, any>,
                multipart?: boolean,
                verbose?: boolean,
            ) => Promise<ApiResponse<T_1>>;
            postSse: <T_5>(
                uri: string,
                payload: any[] | FormData | Record<string, any>,
                onChunk: (payload: T_5) => void,
                abortController: AbortController,
                parseJson?: boolean,
                multipart?: boolean,
                verbose?: boolean,
                debug?: boolean,
            ) => Promise<void>;
            put: <T_2>(
                uri: string,
                payload: any[] | Record<string, any>,
                verbose?: boolean,
            ) => Promise<ApiResponse<T_2>>;
            removeHeader: (key: string) => void;
            setCsrfToken: (token: string) => void;
            setCsrfTokenFromCookie: (verbose?: boolean) => boolean;
        };
        apiKey: string;
        defaults?: LmDefaults;
        infer: (
            prompt: string,
            params: InferenceParams,
            options?: InferenceOptions,
        ) => Promise<InferenceResult>;
        info: () => Promise<Record<string, any>>;
        loadModel: (
            name: string,
            ctx?: number,
            urls?: string | string[],
            onLoadProgress?: OnLoadProgress,
        ) => Promise<void>;
        model: ModelConf;
        models: ModelConf[];
        modelsInfo: () => Promise<void>;
        name: string;
        onEndEmit?: (result: InferenceResult) => void;
        onError?: (err: string) => void;
        onStartEmit?: (data: IngestionStats) => void;
        onToken?: (t: string) => void;
        serverUrl: string;
    }
    Index

    Properties

    abort: () => Promise<void>

    Aborts a currently running inference task.

    api: {
        addHeader: (key: string, val: string) => void;
        csrfToken: () => null | string;
        del: <T_4>(uri: string, verbose?: boolean) => Promise<ApiResponse<T_4>>;
        get: <T>(uri: string, verbose?: boolean) => Promise<ApiResponse<T>>;
        hasCsrfCookie: () => boolean;
        onResponse: (hook: OnResponseHook) => void;
        patch: <T_3>(
            uri: string,
            payload: any[] | Record<string, any>,
            verbose?: boolean,
        ) => Promise<ApiResponse<T_3>>;
        post: <T_1>(
            uri: string,
            payload: any[] | FormData | Record<string, any>,
            multipart?: boolean,
            verbose?: boolean,
        ) => Promise<ApiResponse<T_1>>;
        postSse: <T_5>(
            uri: string,
            payload: any[] | FormData | Record<string, any>,
            onChunk: (payload: T_5) => void,
            abortController: AbortController,
            parseJson?: boolean,
            multipart?: boolean,
            verbose?: boolean,
            debug?: boolean,
        ) => Promise<void>;
        put: <T_2>(
            uri: string,
            payload: any[] | Record<string, any>,
            verbose?: boolean,
        ) => Promise<ApiResponse<T_2>>;
        removeHeader: (key: string) => void;
        setCsrfToken: (token: string) => void;
        setCsrfTokenFromCookie: (verbose?: boolean) => boolean;
    }

    API utility being used.

    apiKey: string

    The key used for authentication with the provider's API.

    defaults?: LmDefaults
    infer: (
        prompt: string,
        params: InferenceParams,
        options?: InferenceOptions,
    ) => Promise<InferenceResult>

    Makes an inference based on provided prompt and parameters.

    info: () => Promise<Record<string, any>>

    Retrieves information about available server config.

    loadModel: (
        name: string,
        ctx?: number,
        urls?: string | string[],
        onLoadProgress?: OnLoadProgress,
    ) => Promise<void>

    Loads a model by name, with optional context.

    model: ModelConf

    Active model configuration.

    models: ModelConf[]

    List of available model configurations.

    modelsInfo: () => Promise<void>

    Retrieves information about available models.

    name: string

    Identifier for the LM provider.

    onEndEmit?: (result: InferenceResult) => void

    Callback triggered when inference ends.

    onError?: (err: string) => void

    Callback triggered on errors during inference.

    onStartEmit?: (data: IngestionStats) => void

    Callback triggered when inference starts.

    onToken?: (t: string) => void

    Callback when a new token is received

    serverUrl: string

    The URL endpoint for the provider's server.