Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

llamaindex

Package Overview
Dependencies
Maintainers
1
Versions
210
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

llamaindex - npm Package Compare versions

Comparing version 0.0.19 to 0.0.20

8

CHANGELOG.md
# llamaindex
## 0.0.20
### Patch Changes
- b526a2d: added additionalSessionOptions and additionalChatOptions
- b526a2d: OpenAI v4.0.1
- b526a2d: OpenAI moved timeout back to 60 seconds
## 0.0.19

@@ -4,0 +12,0 @@

5

dist/index.d.ts

@@ -0,3 +1,3 @@

import OpenAI$1, { ClientOptions as ClientOptions$1 } from 'openai';
import Anthropic$1, { ClientOptions } from '@anthropic-ai/sdk';
import OpenAI$1, { ClientOptions as ClientOptions$1 } from 'openai';
import Replicate from 'replicate';

@@ -250,2 +250,3 @@ import { ParseConfig } from 'papaparse';

maxTokens?: number;
additionalChatOptions?: Omit<Partial<OpenAI$1.Chat.CompletionCreateParams>, "max_tokens" | "messages" | "model" | "temperature" | "top_p" | "streaming">;
apiKey?: string;

@@ -255,2 +256,3 @@ maxRetries: number;

session: OpenAISession;
additionalSessionOptions?: Omit<Partial<ClientOptions$1>, "apiKey" | "maxRetries" | "timeout">;
callbackManager?: CallbackManager;

@@ -541,2 +543,3 @@ constructor(init?: Partial<OpenAI> & {

timeout?: number;
additionalSessionOptions?: Omit<Partial<ClientOptions$1>, "apiKey" | "maxRetries" | "timeout">;
session: OpenAISession;

@@ -543,0 +546,0 @@ constructor(init?: Partial<OpenAIEmbedding> & {

6

package.json
{
"name": "llamaindex",
"version": "0.0.19",
"version": "0.0.20",
"dependencies": {
"@anthropic-ai/sdk": "^0.6.0",
"lodash": "^4.17.21",
"openai": "^4.0.0",
"openai": "^4.0.1",
"papaparse": "^5.4.1",

@@ -17,3 +17,3 @@ "pdf-parse": "^1.1.1",

"@types/lodash": "^4.14.197",
"@types/node": "^18.17.5",
"@types/node": "^18.17.6",
"@types/papaparse": "^5.3.7",

@@ -20,0 +20,0 @@ "@types/pdf-parse": "^1.1.1",

@@ -0,10 +1,12 @@

import { ClientOptions as OpenAIClientOptions } from "openai";
import { DEFAULT_SIMILARITY_TOP_K } from "./constants";
import {
AzureOpenAIConfig,
getAzureBaseUrl,
getAzureConfigFromEnv,
getAzureModel,
getAzureBaseUrl,
shouldUseAzure,
} from "./llm/azure";
import { OpenAISession, getOpenAISession } from "./llm/openai";
import { getOpenAISession, OpenAISession } from "./llm/openai";
import { VectorStoreQueryMode } from "./storage/vectorStore/types";

@@ -32,3 +34,3 @@

embedding2: number[],
mode: SimilarityType = SimilarityType.DEFAULT
mode: SimilarityType = SimilarityType.DEFAULT,
): number {

@@ -90,3 +92,3 @@ if (embedding1.length !== embedding2.length) {

embeddingIds: any[] | null = null,
similarityCutoff: number | null = null
similarityCutoff: number | null = null,
): [number[], any[]] {

@@ -99,3 +101,3 @@ if (embeddingIds == null) {

throw new Error(
"getTopKEmbeddings: embeddings and embeddingIds length mismatch"
"getTopKEmbeddings: embeddings and embeddingIds length mismatch",
);

@@ -134,3 +136,3 @@ }

embeddingsIds?: any[],
queryMode: VectorStoreQueryMode = VectorStoreQueryMode.SVM
queryMode: VectorStoreQueryMode = VectorStoreQueryMode.SVM,
): [number[], any[]] {

@@ -149,3 +151,3 @@ throw new Error("Not implemented yet");

_similarityCutoff: number | null = null,
mmrThreshold: number | null = null
mmrThreshold: number | null = null,
): [number[], any[]] {

@@ -187,3 +189,3 @@ let threshold = mmrThreshold || 0.5;

embeddings[embedMap.get(embedId)!],
embeddings[fullEmbedMap.get(recentEmbeddingId!)!]
embeddings[fullEmbedMap.get(recentEmbeddingId!)!],
);

@@ -213,3 +215,3 @@ if (

embedding2: number[],
mode: SimilarityType = SimilarityType.DEFAULT
mode: SimilarityType = SimilarityType.DEFAULT,
): number {

@@ -234,2 +236,7 @@ return similarity(embedding1, embedding2, mode);

timeout?: number;
additionalSessionOptions?: Omit<
Partial<OpenAIClientOptions>,
"apiKey" | "maxRetries" | "timeout"
>;
session: OpenAISession;

@@ -243,3 +250,4 @@

this.maxRetries = init?.maxRetries ?? 10;
this.timeout = init?.timeout ?? undefined;
this.timeout = init?.timeout ?? 60 * 1000; // Default is 60 seconds
this.additionalSessionOptions = init?.additionalSessionOptions;

@@ -254,3 +262,3 @@ if (init?.azure || shouldUseAzure()) {

throw new Error(
"Azure API key is required for OpenAI Azure models. Please set the AZURE_OPENAI_KEY environment variable."
"Azure API key is required for OpenAI Azure models. Please set the AZURE_OPENAI_KEY environment variable.",
);

@@ -269,2 +277,3 @@ }

defaultQuery: { "api-version": azureConfig.apiVersion },
...this.additionalSessionOptions,
});

@@ -279,2 +288,3 @@ } else {

timeout: this.timeout,
...this.additionalSessionOptions,
});

@@ -281,0 +291,0 @@ }

@@ -1,2 +0,2 @@

import OpenAILLM from "openai";
import OpenAILLM, { ClientOptions as OpenAIClientOptions } from "openai";
import { CallbackManager, Event } from "../callbacks/CallbackManager";

@@ -85,2 +85,6 @@ import { handleOpenAIStream } from "../callbacks/utility/handleOpenAIStream";

maxTokens?: number;
additionalChatOptions?: Omit<
Partial<OpenAILLM.Chat.CompletionCreateParams>,
"max_tokens" | "messages" | "model" | "temperature" | "top_p" | "streaming"
>;

@@ -92,6 +96,14 @@ // OpenAI session params

session: OpenAISession;
additionalSessionOptions?: Omit<
Partial<OpenAIClientOptions>,
"apiKey" | "maxRetries" | "timeout"
>;
callbackManager?: CallbackManager;
constructor(init?: Partial<OpenAI> & { azure?: AzureOpenAIConfig }) {
constructor(
init?: Partial<OpenAI> & {
azure?: AzureOpenAIConfig;
},
) {
this.model = init?.model ?? "gpt-3.5-turbo";

@@ -103,3 +115,5 @@ this.temperature = init?.temperature ?? 0.1;

this.maxRetries = init?.maxRetries ?? 10;
this.timeout = init?.timeout ?? undefined; // Default is 60 seconds
this.timeout = init?.timeout ?? 60 * 1000; // Default is 60 seconds
this.additionalChatOptions = init?.additionalChatOptions;
this.additionalSessionOptions = init?.additionalSessionOptions;

@@ -128,2 +142,3 @@ if (init?.azure || shouldUseAzure()) {

defaultQuery: { "api-version": azureConfig.apiVersion },
...this.additionalSessionOptions,
});

@@ -138,2 +153,3 @@ } else {

timeout: this.timeout,
...this.additionalSessionOptions,
});

@@ -175,2 +191,3 @@ }

top_p: this.topP,
...this.additionalChatOptions,
};

@@ -177,0 +194,0 @@

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc