Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

llamaindex

Package Overview
Dependencies
Maintainers
1
Versions
210
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

llamaindex - npm Package Compare versions

Comparing version 0.0.15 to 0.0.16

src/llm/azure.ts

9

CHANGELOG.md
# llamaindex
## 0.0.16
### Patch Changes
- ec12633: Breaking: make vector store abstraction async (thank you @tyre for the PR)
- 9214b06: Fix persistence bug (thanks @HenryHengZJ)
- 3316c6b: Add Azure OpenAI support
- 3316c6b: OpenAI Node v4-beta.8
## 0.0.15

@@ -4,0 +13,0 @@

6

CONTRIBUTING.md

@@ -15,3 +15,3 @@ # Contributing

You can checkout how Turborepo works using the built in [README-turborepo.md](README-turborepo.md)
You can checkout how Turborepo works using the default [README-turborepo.md](/README-turborepo.md)

@@ -45,3 +45,3 @@ ## Getting Started

To write new test cases write them in packages/core/src/tests
To write new test cases write them in [packages/core/src/tests](/packages/core/src/tests)

@@ -52,2 +52,4 @@ We use Jest https://jestjs.io/ to write our test cases. Jest comes with a bunch of built in assertions using the expect function: https://jestjs.io/docs/expect

There is an existing ["simple"](/apps/simple/README.md) demos folder with mainly NodeJS scripts. Feel free to add additional demos to that folder. If you would like to try out your changes in the core package with a new demo, you need to run the build command in the README.
You can create new demo applications in the apps folder. Just run pnpm init in the folder after you create it to create its own package.json

@@ -54,0 +56,0 @@

@@ -55,2 +55,7 @@ import OpenAI$1, { ClientOptions } from 'openai';

asRelatedNodeInfo(): RelatedNodeInfo;
/**
* Used with built in JSON.stringify
* @returns
*/
toJSON(): Record<string, any>;
}

@@ -79,2 +84,3 @@ /**

indexId: string;
constructor(init?: Partial<IndexNode>);
getType(): ObjectType;

@@ -90,2 +96,3 @@ }

}
declare function jsonToNode(json: any): TextNode;
/**

@@ -152,3 +159,5 @@ * A node with a similarity score

openai: OpenAI$1;
constructor(options?: ClientOptions);
constructor(options?: ClientOptions & {
azure?: boolean;
});
}

@@ -167,2 +176,9 @@

interface AzureOpenAIConfig {
apiKey?: string;
endpoint?: string;
apiVersion?: string;
deploymentName?: string;
}
type MessageType = "user" | "assistant" | "system" | "generic" | "function";

@@ -240,3 +256,5 @@ interface ChatMessage {

callbackManager?: CallbackManager;
constructor(init?: Partial<OpenAI>);
constructor(init?: Partial<OpenAI> & {
azure?: AzureOpenAIConfig;
});
mapMessageType(messageType: MessageType): "user" | "assistant" | "system" | "function";

@@ -415,2 +433,3 @@ chat(messages: ChatMessage[], parentEvent?: Event): Promise<ChatResponse>;

interface ExactMatchFilter {
filterType: "ExactMatch";
key: string;

@@ -422,2 +441,16 @@ value: string | number;

}
interface VectorStoreQuerySpec {
query: string;
filters: ExactMatchFilter[];
topK?: number;
}
interface MetadataInfo {
name: string;
type: string;
description: string;
}
interface VectorStoreInfo {
metadataInfo: MetadataInfo[];
contentInfo: string;
}
interface VectorStoreQuery {

@@ -437,6 +470,6 @@ queryEmbedding?: number[];

client(): any;
add(embeddingResults: NodeWithEmbedding[]): string[];
delete(refDocId: string, deleteKwargs?: any): void;
query(query: VectorStoreQuery, kwargs?: any): VectorStoreQueryResult;
persist(persistPath: string, fs?: GenericFileSystem): void;
add(embeddingResults: NodeWithEmbedding[]): Promise<string[]>;
delete(refDocId: string, deleteKwargs?: any): Promise<void>;
query(query: VectorStoreQuery, kwargs?: any): Promise<VectorStoreQueryResult>;
persist(persistPath: string, fs?: GenericFileSystem): Promise<void>;
}

@@ -487,3 +520,5 @@

session: OpenAISession;
constructor(init?: Partial<OpenAIEmbedding>);
constructor(init?: Partial<OpenAIEmbedding> & {
azure?: AzureOpenAIConfig;
});
private getOpenAIEmbedding;

@@ -1191,3 +1226,3 @@ getTextEmbedding(text: string): Promise<number[]>;

declare const DEFAULT_INDEX_STORE_PERSIST_FILENAME = "index_store.json";
declare const DEFAULT_DOC_STORE_PERSIST_FILENAME = "docstore.json";
declare const DEFAULT_DOC_STORE_PERSIST_FILENAME = "doc_store.json";
declare const DEFAULT_VECTOR_STORE_PERSIST_FILENAME = "vector_store.json";

@@ -1197,2 +1232,2 @@ declare const DEFAULT_GRAPH_STORE_PERSIST_FILENAME = "graph_store.json";

export { ALL_AVAILABLE_LLAMADEUCE_MODELS, ALL_AVAILABLE_OPENAI_MODELS, Anthropic, BaseEmbedding, BaseIndex, BaseIndexInit, BaseNode, BaseOutputParser, BaseQueryEngine, BaseQuestionGenerator, BaseReader, BaseRetriever, BaseTool, CallbackManager, ChatEngine, ChatMessage, ChatResponse, CompactAndRefine, CompleteFileSystem, CompletionResponse, CondenseQuestionChatEngine, ContextChatEngine, DEFAULT_CHUNK_OVERLAP, DEFAULT_CHUNK_OVERLAP_RATIO, DEFAULT_CHUNK_SIZE, DEFAULT_COLLECTION, DEFAULT_CONTEXT_WINDOW, DEFAULT_DOC_STORE_PERSIST_FILENAME, DEFAULT_EMBEDDING_DIM, DEFAULT_FS, DEFAULT_GRAPH_STORE_PERSIST_FILENAME, DEFAULT_INDEX_STORE_PERSIST_FILENAME, DEFAULT_NAMESPACE, DEFAULT_NUM_OUTPUTS, DEFAULT_PADDING, DEFAULT_PERSIST_DIR, DEFAULT_SIMILARITY_TOP_K, DEFAULT_VECTOR_STORE_PERSIST_FILENAME, DeuceChatStrategy, Document, Event, EventTag, EventType, GPT4_MODELS, GenericFileSystem, InMemoryFileSystem, IndexDict, IndexList, IndexNode, IndexStruct, IndexStructType, LLM, LLMQuestionGenerator, ListIndex, ListIndexLLMRetriever, ListIndexRetriever, ListRetrieverMode, LlamaDeuce, MessageType, MetadataMode, NodeParser, NodeRelationship, NodeWithEmbedding, NodeWithScore, ObjectType, OpenAI, OpenAIEmbedding, PDFReader, QueryEngineTool, Refine, RelatedNodeInfo, RelatedNodeType, Response, ResponseSynthesizer, RetrievalCallbackResponse, RetrieverQueryEngine, SentenceSplitter, ServiceContext, ServiceContextOptions, SimilarityType, SimpleChatEngine, SimpleDirectoryReader, SimpleDirectoryReaderLoadDataProps, SimpleNodeParser, SimplePrompt, SimpleResponseBuilder, StorageContext, StreamCallbackResponse, StreamToken, StructuredOutput, SubQuestion, SubQuestionOutputParser, SubQuestionQueryEngine, TURBO_MODELS, TextFileReader, TextNode, ToolMetadata, TreeSummarize, VectorIndexConstructorProps, VectorIndexOptions, VectorIndexRetriever, VectorStoreIndex, WalkableFileSystem, buildToolsText, contextSystemPrompt, defaultChoiceSelectPrompt, defaultCondenseQuestionPrompt, defaultRefinePrompt, defaultSubQuestionPrompt, defaultSummaryPrompt, defaultTextQaPrompt, exists, getNodeFS, getNodesFromDocument, getResponseBuilder, getTextSplitsFromDocument, getTopKEmbeddings, getTopKEmbeddingsLearner, getTopKMMREmbeddings, globalsHelper, jsonToIndexStruct, messagesToHistoryStr, serviceContextFromDefaults, serviceContextFromServiceContext, similarity, storageContextFromDefaults, walk };
export { ALL_AVAILABLE_LLAMADEUCE_MODELS, ALL_AVAILABLE_OPENAI_MODELS, Anthropic, BaseEmbedding, BaseIndex, BaseIndexInit, BaseNode, BaseOutputParser, BaseQueryEngine, BaseQuestionGenerator, BaseReader, BaseRetriever, BaseTool, CallbackManager, ChatEngine, ChatMessage, ChatResponse, CompactAndRefine, CompleteFileSystem, CompletionResponse, CondenseQuestionChatEngine, ContextChatEngine, DEFAULT_CHUNK_OVERLAP, DEFAULT_CHUNK_OVERLAP_RATIO, DEFAULT_CHUNK_SIZE, DEFAULT_COLLECTION, DEFAULT_CONTEXT_WINDOW, DEFAULT_DOC_STORE_PERSIST_FILENAME, DEFAULT_EMBEDDING_DIM, DEFAULT_FS, DEFAULT_GRAPH_STORE_PERSIST_FILENAME, DEFAULT_INDEX_STORE_PERSIST_FILENAME, DEFAULT_NAMESPACE, DEFAULT_NUM_OUTPUTS, DEFAULT_PADDING, DEFAULT_PERSIST_DIR, DEFAULT_SIMILARITY_TOP_K, DEFAULT_VECTOR_STORE_PERSIST_FILENAME, DeuceChatStrategy, Document, Event, EventTag, EventType, ExactMatchFilter, GPT4_MODELS, GenericFileSystem, InMemoryFileSystem, IndexDict, IndexList, IndexNode, IndexStruct, IndexStructType, LLM, LLMQuestionGenerator, ListIndex, ListIndexLLMRetriever, ListIndexRetriever, ListRetrieverMode, LlamaDeuce, MessageType, MetadataFilters, MetadataInfo, MetadataMode, NodeParser, NodeRelationship, NodeWithEmbedding, NodeWithScore, ObjectType, OpenAI, OpenAIEmbedding, PDFReader, QueryEngineTool, Refine, RelatedNodeInfo, RelatedNodeType, Response, ResponseSynthesizer, RetrievalCallbackResponse, RetrieverQueryEngine, SentenceSplitter, ServiceContext, ServiceContextOptions, SimilarityType, SimpleChatEngine, SimpleDirectoryReader, SimpleDirectoryReaderLoadDataProps, SimpleNodeParser, SimplePrompt, SimpleResponseBuilder, StorageContext, StreamCallbackResponse, StreamToken, StructuredOutput, SubQuestion, SubQuestionOutputParser, SubQuestionQueryEngine, TURBO_MODELS, TextFileReader, TextNode, ToolMetadata, TreeSummarize, VectorIndexConstructorProps, VectorIndexOptions, VectorIndexRetriever, VectorStore, VectorStoreIndex, VectorStoreInfo, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult, VectorStoreQuerySpec, WalkableFileSystem, buildToolsText, contextSystemPrompt, defaultChoiceSelectPrompt, defaultCondenseQuestionPrompt, defaultRefinePrompt, defaultSubQuestionPrompt, defaultSummaryPrompt, defaultTextQaPrompt, exists, getNodeFS, getNodesFromDocument, getResponseBuilder, getTextSplitsFromDocument, getTopKEmbeddings, getTopKEmbeddingsLearner, getTopKMMREmbeddings, globalsHelper, jsonToIndexStruct, jsonToNode, messagesToHistoryStr, serviceContextFromDefaults, serviceContextFromServiceContext, similarity, storageContextFromDefaults, walk };
{
"name": "llamaindex",
"version": "0.0.15",
"version": "0.0.16",
"dependencies": {
"@anthropic-ai/sdk": "^0.5.8",
"js-tiktoken": "^1.0.7",
"@anthropic-ai/sdk": "^0.5.9",
"lodash": "^4.17.21",
"openai": "4.0.0-beta.6",
"openai": "4.0.0-beta.8",
"pdf-parse": "^1.1.1",
"replicate": "^0.12.3",
"replicate": "^0.14.1",
"tiktoken-node": "^0.0.6",

@@ -12,0 +11,0 @@ "uuid": "^9.0.0",

# LlamaIndex.TS
LlamaIndex is a data framework for your LLM application.
Use your own data with large language models (LLMs, OpenAI ChatGPT and others) in Typescript and Javascript.
Documentation: https://ts.llamaindex.ai/
## What is LlamaIndex.TS?

@@ -17,4 +21,7 @@

export OPENAI_API_KEY="sk-......" # Replace with your key from https://platform.openai.com/account/api-keys
npx tsc –-init # if needed
pnpm init
pnpm install typescript
pnpm exec tsc –-init # if needed
pnpm install llamaindex
pnpm install @types/node
```

@@ -58,24 +65,34 @@

```bash
npx ts-node example.ts
pnpm dlx ts-node example.ts
```
## Playground
Check out our NextJS playground at https://llama-playground.vercel.app/. The source is available at https://github.com/run-llama/ts-playground
## Core concepts for getting started:
- [Document](packages/core/src/Node.ts): A document represents a text file, PDF file or other contiguous piece of data.
- [Document](/packages/core/src/Node.ts): A document represents a text file, PDF file or other contiguous piece of data.
- [Node](packages/core/src/Node.ts): The basic data building block. Most commonly, these are parts of the document split into manageable pieces that are small enough to be fed into an embedding model and LLM.
- [Node](/packages/core/src/Node.ts): The basic data building block. Most commonly, these are parts of the document split into manageable pieces that are small enough to be fed into an embedding model and LLM.
- [Embedding](packages/core/src/Embedding.ts): Embeddings are sets of floating point numbers which represent the data in a Node. By comparing the similarity of embeddings, we can derive an understanding of the similarity of two pieces of data. One use case is to compare the embedding of a question with the embeddings of our Nodes to see which Nodes may contain the data needed to answer that quesiton.
- [Embedding](/packages/core/src/Embedding.ts): Embeddings are sets of floating point numbers which represent the data in a Node. By comparing the similarity of embeddings, we can derive an understanding of the similarity of two pieces of data. One use case is to compare the embedding of a question with the embeddings of our Nodes to see which Nodes may contain the data needed to answer that quesiton.
- [Indices](packages/core/src/indices/): Indices store the Nodes and the embeddings of those nodes. QueryEngines retrieve Nodes from these Indices using embedding similarity.
- [Indices](/packages/core/src/indices/): Indices store the Nodes and the embeddings of those nodes. QueryEngines retrieve Nodes from these Indices using embedding similarity.
- [QueryEngine](packages/core/src/QueryEngine.ts): Query engines are what generate the query you put in and give you back the result. Query engines generally combine a pre-built prompt with selected Nodes from your Index to give the LLM the context it needs to answer your query.
- [QueryEngine](/packages/core/src/QueryEngine.ts): Query engines are what generate the query you put in and give you back the result. Query engines generally combine a pre-built prompt with selected Nodes from your Index to give the LLM the context it needs to answer your query.
- [ChatEngine](packages/core/src/ChatEngine.ts): A ChatEngine helps you build a chatbot that will interact with your Indices.
- [ChatEngine](/packages/core/src/ChatEngine.ts): A ChatEngine helps you build a chatbot that will interact with your Indices.
- [SimplePrompt](packages/core/src/Prompt.ts): A simple standardized function call definition that takes in inputs and formats them in a template literal. SimplePrompts can be specialized using currying and combined using other SimplePrompt functions.
- [SimplePrompt](/packages/core/src/Prompt.ts): A simple standardized function call definition that takes in inputs and formats them in a template literal. SimplePrompts can be specialized using currying and combined using other SimplePrompt functions.
## Supported LLMs:
- OpenAI GPT-3.5-turbo and GPT-4
- Anthropic Claude Instant and Claude 2
- Llama2 Chat LLMs (70B, 13B, and 7B parameters)
## Contributing:
We are in the very early days of LlamaIndex.TS. If you’re interested in hacking on it with us check out our [contributing guide](CONTRIBUTING.md)
We are in the very early days of LlamaIndex.TS. If you’re interested in hacking on it with us check out our [contributing guide](/CONTRIBUTING.md)

@@ -82,0 +99,0 @@ ## Bugs? Questions?

import { DEFAULT_SIMILARITY_TOP_K } from "./constants";
import {
AzureOpenAIConfig,
getAzureConfigFromEnv,
getAzureModel,
getAzureBaseUrl,
shouldUseAzure,
} from "./llm/azure";
import { OpenAISession, getOpenAISession } from "./llm/openai";

@@ -222,3 +229,3 @@ import { VectorStoreQueryMode } from "./storage/vectorStore/types";

constructor(init?: Partial<OpenAIEmbedding>) {
constructor(init?: Partial<OpenAIEmbedding> & { azure?: AzureOpenAIConfig }) {
super();

@@ -228,10 +235,36 @@

this.apiKey = init?.apiKey ?? undefined;
this.maxRetries = init?.maxRetries ?? 10;
this.timeout = init?.timeout ?? undefined;
this.session = getOpenAISession({
apiKey: this.apiKey,
maxRetries: this.maxRetries,
timeout: this.timeout,
});
if (init?.azure || shouldUseAzure()) {
const azureConfig = getAzureConfigFromEnv({
...init?.azure,
model: getAzureModel(this.model),
});
if (!azureConfig.apiKey) {
throw new Error(
"Azure API key is required for OpenAI Azure models. Please set the AZURE_OPENAI_KEY environment variable."
);
}
this.apiKey = azureConfig.apiKey;
this.session =
init?.session ??
getOpenAISession({
azure: true,
apiKey: this.apiKey,
baseURL: getAzureBaseUrl(azureConfig),
maxRetries: this.maxRetries,
timeout: this.timeout,
defaultQuery: { "api-version": azureConfig.apiVersion },
});
} else {
this.apiKey = init?.apiKey ?? undefined;
this.session = getOpenAISession({
apiKey: this.apiKey,
maxRetries: this.maxRetries,
timeout: this.timeout,
});
}
}

@@ -238,0 +271,0 @@

@@ -30,1 +30,2 @@ export * from "./ChatEngine";

export * from "./storage/StorageContext";
export * from "./storage/vectorStore/types";

@@ -1,2 +0,2 @@

import { Document, BaseNode } from "../Node";
import { Document, BaseNode, jsonToNode } from "../Node";
import { v4 as uuidv4 } from "uuid";

@@ -77,3 +77,8 @@ import { BaseRetriever } from "../Retriever";

const indexDict = new IndexDict(json.indexId, json.summary);
indexDict.nodesDict = json.nodesDict;
indexDict.nodesDict = Object.entries(json.nodesDict).reduce<
Record<string, BaseNode>
>((acc, [key, value]) => {
acc[key] = jsonToNode(value);
return acc;
}, {});
return indexDict;

@@ -80,0 +85,0 @@ } else {

@@ -44,3 +44,3 @@ import { VectorStoreIndex } from "./VectorStoreIndex";

};
const result = this.index.vectorStore.query(q);
const result = await this.index.vectorStore.query(q);

@@ -47,0 +47,0 @@ let nodesWithScores: NodeWithScore[] = [];

@@ -12,2 +12,9 @@ import { CallbackManager, Event } from "../callbacks/CallbackManager";

} from "./anthropic";
import {
AzureOpenAIConfig,
getAzureConfigFromEnv,
getAzureModel,
getAzureBaseUrl,
shouldUseAzure,
} from "./azure";

@@ -88,3 +95,3 @@ export type MessageType =

constructor(init?: Partial<OpenAI>) {
constructor(init?: Partial<OpenAI> & { azure?: AzureOpenAIConfig }) {
this.model = init?.model ?? "gpt-3.5-turbo";

@@ -95,13 +102,39 @@ this.temperature = init?.temperature ?? 0;

this.apiKey = init?.apiKey ?? undefined;
this.maxRetries = init?.maxRetries ?? 10;
this.timeout = init?.timeout ?? undefined; // Default is 60 seconds
this.session =
init?.session ??
getOpenAISession({
apiKey: this.apiKey,
maxRetries: this.maxRetries,
timeout: this.timeout,
if (init?.azure || shouldUseAzure()) {
const azureConfig = getAzureConfigFromEnv({
...init?.azure,
model: getAzureModel(this.model),
});
if (!azureConfig.apiKey) {
throw new Error(
"Azure API key is required for OpenAI Azure models. Please set the AZURE_OPENAI_KEY environment variable."
);
}
this.apiKey = azureConfig.apiKey;
this.session =
init?.session ??
getOpenAISession({
azure: true,
apiKey: this.apiKey,
baseURL: getAzureBaseUrl(azureConfig),
maxRetries: this.maxRetries,
timeout: this.timeout,
defaultQuery: { "api-version": azureConfig.apiVersion },
});
} else {
this.apiKey = init?.apiKey ?? undefined;
this.session =
init?.session ??
getOpenAISession({
apiKey: this.apiKey,
maxRetries: this.maxRetries,
timeout: this.timeout,
});
}
this.callbackManager = init?.callbackManager;

@@ -157,5 +190,6 @@ }

// Non-streaming
const response = await this.session.openai.chat.completions.create(
baseRequestParams
);
const response = await this.session.openai.chat.completions.create({
...baseRequestParams,
stream: false,
});

@@ -162,0 +196,0 @@ const content = response.choices[0].message?.content ?? "";

import OpenAI, { ClientOptions } from "openai";
import _ from "lodash";
export class AzureOpenAI extends OpenAI {
protected override authHeaders() {
return { "api-key": this.apiKey };
}
}
export class OpenAISession {
openai: OpenAI;
constructor(options: ClientOptions = {}) {
constructor(options: ClientOptions & { azure?: boolean } = {}) {
if (!options.apiKey) {

@@ -18,3 +24,7 @@ if (typeof process !== undefined) {

this.openai = new OpenAI(options);
if (options.azure) {
this.openai = new AzureOpenAI(options);
} else {
this.openai = new OpenAI(options);
}
}

@@ -35,3 +45,5 @@ }

*/
export function getOpenAISession(options: ClientOptions = {}) {
export function getOpenAISession(
options: ClientOptions & { azure?: boolean } = {}
) {
let session = defaultOpenAISession.find((session) => {

@@ -38,0 +50,0 @@ return _.isEqual(session.options, options);

@@ -131,2 +131,10 @@ import { v4 as uuidv4 } from "uuid";

}
/**
* Used with built in JSON.stringify
* @returns
*/
toJSON(): Record<string, any> {
return { ...this, type: this.getType() };
}
}

@@ -208,2 +216,7 @@

constructor(init?: Partial<IndexNode>) {
super(init);
Object.assign(this, init);
}
getType(): ObjectType {

@@ -232,2 +245,19 @@ return ObjectType.INDEX;

export function jsonToNode(json: any) {
if (!json.type) {
throw new Error("Node type not found");
}
switch (json.type) {
case ObjectType.TEXT:
return new TextNode(json);
case ObjectType.INDEX:
return new IndexNode(json);
case ObjectType.DOCUMENT:
return new Document(json);
default:
throw new Error(`Invalid node type: ${json.type}`);
}
}
// export class ImageDocument extends Document {

@@ -234,0 +264,0 @@ // image?: string;

export const DEFAULT_COLLECTION = "data";
export const DEFAULT_PERSIST_DIR = "./storage";
export const DEFAULT_INDEX_STORE_PERSIST_FILENAME = "index_store.json";
export const DEFAULT_DOC_STORE_PERSIST_FILENAME = "docstore.json";
export const DEFAULT_DOC_STORE_PERSIST_FILENAME = "doc_store.json";
export const DEFAULT_VECTOR_STORE_PERSIST_FILENAME = "vector_store.json";

@@ -6,0 +6,0 @@ export const DEFAULT_GRAPH_STORE_PERSIST_FILENAME = "graph_store.json";

@@ -54,7 +54,7 @@ import * as path from "path";

get(textId: string): number[] {
async get(textId: string): Promise<number[]> {
return this.data.embeddingDict[textId];
}
add(embeddingResults: NodeWithEmbedding[]): string[] {
async add(embeddingResults: NodeWithEmbedding[]): Promise<string[]> {
for (let result of embeddingResults) {

@@ -73,3 +73,3 @@ this.data.embeddingDict[result.node.id_] = result.embedding;

if (this.persistPath) {
this.persist(this.persistPath, this.fs);
await this.persist(this.persistPath, this.fs);
}

@@ -80,3 +80,3 @@

delete(refDocId: string): void {
async delete(refDocId: string): Promise<void> {
let textIdsToDelete = Object.keys(this.data.textIdToRefDocId).filter(

@@ -89,5 +89,6 @@ (textId) => this.data.textIdToRefDocId[textId] === refDocId

}
return Promise.resolve();
}
query(query: VectorStoreQuery): VectorStoreQueryResult {
async query(query: VectorStoreQuery): Promise<VectorStoreQueryResult> {
if (!_.isNil(query.filters)) {

@@ -144,6 +145,6 @@ throw new Error(

return {
return Promise.resolve({
similarities: topSimilarities,
ids: topIds,
};
});
}

@@ -150,0 +151,0 @@

@@ -24,2 +24,3 @@ import { BaseNode } from "../../Node";

export interface ExactMatchFilter {
filterType: "ExactMatch";
key: string;

@@ -65,6 +66,6 @@ value: string | number;

client(): any;
add(embeddingResults: NodeWithEmbedding[]): string[];
delete(refDocId: string, deleteKwargs?: any): void;
query(query: VectorStoreQuery, kwargs?: any): VectorStoreQueryResult;
persist(persistPath: string, fs?: GenericFileSystem): void;
add(embeddingResults: NodeWithEmbedding[]): Promise<string[]>;
delete(refDocId: string, deleteKwargs?: any): Promise<void>;
query(query: VectorStoreQuery, kwargs?: any): Promise<VectorStoreQueryResult>;
persist(persistPath: string, fs?: GenericFileSystem): Promise<void>;
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc