New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More

ollama-ai-provider

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

ollama-ai-provider - npm Package Compare versions

Comparing version 0.5.1 to 0.6.0

@@ -1,2 +0,2 @@

import { LanguageModelV1 } from '@ai-sdk/provider';
import { LanguageModelV1, EmbeddingModelV1 } from '@ai-sdk/provider';

@@ -51,3 +51,2 @@ type OllamaChatModelId = 'codellama' | 'codellama:7b' | 'codellama:13b' | 'codellama:34b' | 'codellama:70b' | 'codellama:code' | 'codellama:python' | 'falcon2' | 'falcon2:11b' | 'gemma' | 'gemma:2b' | 'gemma:7b' | 'llama2' | 'llama2:7b' | 'llama2:13b' | 'llama2:70b' | 'llama3' | 'llama3:8b' | 'llama3:70b' | 'llama3-chatqa' | 'llama3-chatqa:8b' | 'llama3-chatqa:70b' | 'llama3-gradient' | 'llama3-gradient:8b' | 'llama3-gradient:70b' | 'llava' | 'llava:7b' | 'llava:13b' | 'llava:34b' | 'llava-llama3' | 'llava-llama3:8b' | 'llava-phi3' | 'llava-phi3:3.8b' | 'mistral' | 'mistral:7b' | 'mixtral' | 'mixtral:8x7b' | 'mixtral:8x22b' | 'moondream' | 'moondream:1.8b' | 'openhermes' | 'openhermes:v2.5' | 'phi3' | 'phi3:3.8b' | (string & NonNullable<unknown>);

baseURL: string;
generateId: () => string;
headers: () => Record<string, string | undefined>;

@@ -69,5 +68,28 @@ provider: string;

type OllamaEmbeddingModelId = 'all-minilm' | 'all-minilm:22m' | 'all-minilm:33m' | 'mxbai-embed-large' | 'nomic-embed-text' | 'snowflake-arctic-embed' | 'snowflake-arctic-embed:22m' | 'snowflake-arctic-embed:33m' | 'snowflake-arctic-embed:110m' | 'snowflake-arctic-embed:137m' | 'snowflake-arctic-embed:335m' | OllamaChatModelId | (string & NonNullable<unknown>);
interface OllamaEmbeddingSettings {
maxEmbeddingsPerCall?: number;
}
type OllamaEmbeddingConfig = {
baseURL: string;
headers: () => Record<string, string | undefined>;
provider: string;
};
declare class OllamaEmbeddingModel implements EmbeddingModelV1<string> {
readonly specificationVersion = "v1";
readonly modelId: OllamaEmbeddingModelId;
private readonly config;
private readonly settings;
get provider(): string;
get maxEmbeddingsPerCall(): number;
get supportsParallelCalls(): boolean;
constructor(modelId: OllamaEmbeddingModelId, settings: OllamaEmbeddingSettings, config: OllamaEmbeddingConfig);
doEmbed({ abortSignal, values, }: Parameters<EmbeddingModelV1<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV1<string>['doEmbed']>>>;
}
interface OllamaProvider {
(modelId: OllamaChatModelId, settings?: OllamaChatSettings): OllamaChatLanguageModel;
chat(modelId: OllamaChatModelId, settings?: OllamaChatSettings): OllamaChatLanguageModel;
embedding(modelId: OllamaEmbeddingModelId, settings?: OllamaEmbeddingSettings): OllamaEmbeddingModel;
}

@@ -84,3 +106,2 @@ interface OllamaProviderSettings {

readonly baseURL: string;
private readonly generateId;
readonly headers?: Record<string, string>;

@@ -87,0 +108,0 @@ constructor(options?: OllamaProviderSettings);

@@ -487,5 +487,4 @@ "use strict";

constructor(options = {}) {
var _a, _b;
var _a;
this.baseURL = (_a = (0, import_provider_utils6.withoutTrailingSlash)(options.baseURL)) != null ? _a : "http://127.0.0.1:11434/api";
this.generateId = (_b = options.generateId) != null ? _b : import_provider_utils6.generateId;
this.headers = options.headers;

@@ -504,4 +503,3 @@ }

provider: "ollama.chat",
...this.baseConfig,
generateId: this.generateId
...this.baseConfig
});

@@ -512,4 +510,82 @@ }

// src/ollama-provider.ts
var import_provider_utils8 = require("@ai-sdk/provider-utils");
// src/ollama-embedding-model.ts
var import_provider4 = require("@ai-sdk/provider");
var import_provider_utils7 = require("@ai-sdk/provider-utils");
var import_zod4 = require("zod");
var OllamaEmbeddingModel = class {
constructor(modelId, settings, config) {
this.specificationVersion = "v1";
this.modelId = modelId;
this.settings = settings;
this.config = config;
}
get provider() {
return this.config.provider;
}
get maxEmbeddingsPerCall() {
var _a;
return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 1;
}
get supportsParallelCalls() {
return false;
}
async doEmbed({
abortSignal,
values
}) {
if (values.length > this.maxEmbeddingsPerCall) {
throw new import_provider4.TooManyEmbeddingValuesForCallError({
maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
modelId: this.modelId,
provider: this.provider,
values
});
}
const embeddings = {
embeddings: [],
rawResponse: { headers: {} }
};
for (const value of values) {
const { responseHeaders, value: response } = await (0, import_provider_utils7.postJsonToApi)({
abortSignal,
body: {
model: this.modelId,
prompt: value
},
failedResponseHandler: ollamaFailedResponseHandler,
headers: this.config.headers(),
successfulResponseHandler: (0, import_provider_utils7.createJsonResponseHandler)(
ollamaTextEmbeddingResponseSchema
),
url: `${this.config.baseURL}/embeddings`
});
embeddings.embeddings.push(response.embedding);
embeddings.rawResponse = { headers: responseHeaders };
}
return embeddings;
}
};
var ollamaTextEmbeddingResponseSchema = import_zod4.z.object({
embedding: import_zod4.z.array(import_zod4.z.number())
});
// src/ollama-provider.ts
function createOllama(options = {}) {
const ollama2 = new Ollama(options);
var _a;
const baseURL = (_a = (0, import_provider_utils8.withoutTrailingSlash)(options.baseURL)) != null ? _a : "http://127.0.0.1:11434/api";
const getHeaders = () => ({
...options.headers
});
const createChatModel = (modelId, settings = {}) => new OllamaChatLanguageModel(modelId, settings, {
baseURL,
headers: getHeaders,
provider: "ollama.chat"
});
const createEmbeddingModel = (modelId, settings = {}) => new OllamaEmbeddingModel(modelId, settings, {
baseURL,
headers: getHeaders,
provider: "ollama.embedding"
});
const provider = function(modelId, settings) {

@@ -521,5 +597,6 @@ if (new.target) {

}
return ollama2.chat(modelId, settings);
return createChatModel(modelId, settings);
};
provider.chat = ollama2.chat.bind(ollama2);
provider.chat = createChatModel;
provider.embedding = createEmbeddingModel;
return provider;

@@ -526,0 +603,0 @@ }

{
"name": "ollama-ai-provider",
"version": "0.5.1",
"version": "0.6.0",
"description": "Vercel AI Provider for running LLMs locally using Ollama",

@@ -5,0 +5,0 @@ "main": "./dist/index.js",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet