@types/lunr
Advanced tools
Comparing version 0.5.29 to 2.1.0
1306
lunr/index.d.ts
@@ -1,8 +0,9 @@ | ||
// Type definitions for lunr.js 0.5.4 | ||
// Type definitions for lunr.js 2.1 | ||
// Project: https://github.com/olivernn/lunr.js | ||
// Definitions by: Sebastian Lenz <https://github.com/sebastian-lenz> | ||
// Definitions by: Sean Tan <https://github.com/seantanly> | ||
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped | ||
// TypeScript Version: 2.3 | ||
/** | ||
* lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 0.5.4 | ||
* lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright | ||
* Copyright (C) 2014 Oliver Nightingale | ||
@@ -12,881 +13,963 @@ * MIT Licensed | ||
*/ | ||
declare namespace lunr | ||
{ | ||
var version:string; | ||
declare namespace lunr { | ||
namespace Builder { | ||
/** | ||
* A plugin is a function that is called with the index builder as its context. | ||
* Plugins can be used to customise or extend the behaviour of the index | ||
* in some way. A plugin is just a function, that encapsulated the custom | ||
* behaviour that should be applied when building the index. | ||
* | ||
* The plugin function will be called with the index builder as its argument, additional | ||
* arguments can also be passed when calling use. The function will be called | ||
* with the index builder as its context. | ||
*/ | ||
type Plugin = (this: Builder, ...args: any[]) => void; | ||
} | ||
/** | ||
* A function for splitting a string into tokens ready to be inserted into | ||
* the search index. Uses `lunr.tokenizer.seperator` to split strings, change | ||
* the value of this property to change how strings are split into tokens. | ||
* lunr.Builder performs indexing on a set of documents and | ||
* returns instances of lunr.Index ready for querying. | ||
* | ||
* @module | ||
* @param {String} obj The string to convert into tokens | ||
* @see lunr.tokenizer.seperator | ||
* @returns {Array} | ||
* All configuration of the index is done via the builder, the | ||
* fields to index, the document reference, the text processing | ||
* pipeline and document scoring parameters are all set on the | ||
* builder before indexing. | ||
*/ | ||
function tokenizer(obj: any): string[]; | ||
class Builder { | ||
/** | ||
* Internal reference to the document reference field. | ||
*/ | ||
_ref: string; | ||
interface TokenizerFunction { | ||
// obj is usually a string, but the default lunr tokenizer handles null, | ||
// undefined and arrays of objects with a .toString() method. | ||
(obj: any): string[]; | ||
} | ||
/** | ||
* Internal reference to the document fields to index. | ||
*/ | ||
_fields: string[]; | ||
module tokenizer { | ||
/** | ||
* The sperator used to split a string into tokens. Override this property to change the behaviour of | ||
* `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens. | ||
* | ||
* @static | ||
* @see lunr.tokenizer | ||
* | ||
* (Note: this is misspelled in the original API, kept for compatibility sake) | ||
* The inverted index maps terms to document fields. | ||
*/ | ||
var seperator: RegExp | string; | ||
invertedIndex: object; | ||
var label: string; | ||
/** | ||
* Keeps track of document term frequencies. | ||
*/ | ||
documentTermFrequencies: object; | ||
var registeredFunctions: {[label: string]: TokenizerFunction}; | ||
/** | ||
* Keeps track of the length of documents added to the index. | ||
*/ | ||
documentLengths: object; | ||
/** | ||
* Register a tokenizer function. | ||
* | ||
* Functions that are used as tokenizers should be registered if they are to be used with a serialised index. | ||
* | ||
* Registering a function does not add it to an index, functions must still be associated with a specific index for them to be used when indexing and searching documents. | ||
* | ||
* @param {Function} fn The function to register. | ||
* @param {String} label The label to register this function with | ||
* @memberOf tokenizer | ||
* Function for splitting strings into tokens for indexing. | ||
*/ | ||
function registerFunction(fn: TokenizerFunction, label: string): void; | ||
tokenizer: typeof tokenizer; | ||
/** | ||
* Loads a previously serialised tokenizer. | ||
* | ||
* A tokenizer function to be loaded must already be registered with lunr.tokenizer. | ||
* If the serialised tokenizer has not been registered then an error will be thrown. | ||
* | ||
* @param {String} label The label of the serialised tokenizer. | ||
* @returns {Function} | ||
* @memberOf tokenizer | ||
* The pipeline performs text processing on tokens before indexing. | ||
*/ | ||
function load(label: string): TokenizerFunction; | ||
} | ||
pipeline: Pipeline; | ||
/** | ||
* A pipeline for processing search terms before querying the index. | ||
*/ | ||
searchPipeline: Pipeline; | ||
/** | ||
* lunr.stemmer is an english language stemmer, this is a JavaScript implementation of | ||
* the PorterStemmer taken from http://tartaurs.org/~martin | ||
* | ||
* @param token The string to stem | ||
*/ | ||
function stemmer(token:string):string; | ||
/** | ||
* Keeps track of the total number of documents indexed. | ||
*/ | ||
documentCount: number; | ||
/** | ||
* lunr.stopWordFilter is an English language stop word list filter, any words contained | ||
* in the list will not be passed through the filter. | ||
* | ||
* This is intended to be used in the Pipeline. If the token does not pass the filter then | ||
* undefined will be returned. | ||
* | ||
* @param token The token to pass through the filter | ||
*/ | ||
function stopWordFilter(token:string):string; | ||
namespace stopWordFilter { | ||
var stopWords:SortedSet<string>; | ||
} | ||
/** | ||
* lunr.trimmer is a pipeline function for trimming non word characters from the beginning | ||
* and end of tokens before they enter the index. | ||
* | ||
* This implementation may not work correctly for non latin characters and should either | ||
* be removed or adapted for use with languages with non-latin characters. | ||
* @param token The token to pass through the filter | ||
*/ | ||
function trimmer(token:string):string; | ||
/** | ||
* lunr.EventEmitter is an event emitter for lunr. It manages adding and removing event handlers | ||
* and triggering events and their handlers. | ||
*/ | ||
class EventEmitter | ||
{ | ||
/** | ||
* Can bind a single function to many different events in one call. | ||
* | ||
* @param eventName The name(s) of events to bind this function to. | ||
* @param handler The function to call when an event is fired. Binds a handler | ||
* function to a specific event(s). | ||
* A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75. | ||
*/ | ||
addListener(eventName:string, handler:Function):void; | ||
addListener(eventName:string, eventName2:string, handler:Function):void; | ||
addListener(eventName:string, eventName2:string, eventName3:string, handler:Function):void; | ||
addListener(eventName:string, eventName2:string, eventName3:string, eventName4:string, handler:Function):void; | ||
addListener(eventName:string, eventName2:string, eventName3:string, eventName4:string, eventName5:string, handler:Function):void; | ||
_b: number; | ||
/** | ||
* Removes a handler function from a specific event. | ||
* | ||
* @param eventName The name of the event to remove this function from. | ||
* @param handler The function to remove from an event. | ||
* A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2. | ||
*/ | ||
removeListener(eventName:string, handler:Function):void; | ||
_k1: number; | ||
/** | ||
* Calls all functions bound to the given event. | ||
* | ||
* Additional data can be passed to the event handler as arguments to emit after the event name. | ||
* | ||
* @param eventName The name of the event to emit. | ||
* @param args | ||
* A counter incremented for each unique term, used to identify a terms position in the vector space. | ||
*/ | ||
emit(eventName:string, ...args:any[]):void; | ||
termIndex: number; | ||
/** | ||
* Checks whether a handler has ever been stored against an event. | ||
* | ||
* @param eventName The name of the event to check. | ||
* A list of metadata keys that have been whitelisted for entry in the index. | ||
*/ | ||
hasHandler(eventName:string):boolean; | ||
} | ||
metadataWhitelist: string[]; | ||
constructor() | ||
interface IPipelineFunction { | ||
(token:string):string; | ||
(token:string, tokenIndex:number):string; | ||
(token:string, tokenIndex:number, tokens:string[]):string; | ||
} | ||
/** | ||
* lunr.Pipelines maintain an ordered list of functions to be applied to all tokens in documents | ||
* entering the search index and queries being ran against the index. | ||
* | ||
* An instance of lunr.Index created with the lunr shortcut will contain a pipeline with a stop | ||
* word filter and an English language stemmer. Extra functions can be added before or after either | ||
* of these functions or these default functions can be removed. | ||
* | ||
* When run the pipeline will call each function in turn, passing a token, the index of that token | ||
* in the original list of all tokens and finally a list of all the original tokens. | ||
* | ||
* The output of functions in the pipeline will be passed to the next function in the pipeline. | ||
* To exclude a token from entering the index the function should return undefined, the rest of | ||
* the pipeline will not be called with this token. | ||
* | ||
* For serialisation of pipelines to work, all functions used in an instance of a pipeline should | ||
* be registered with lunr.Pipeline. Registered functions can then be loaded. If trying to load a | ||
* serialised pipeline that uses functions that are not registered an error will be thrown. | ||
* | ||
* If not planning on serialising the pipeline then registering pipeline functions is not necessary. | ||
*/ | ||
class Pipeline | ||
{ | ||
registeredFunctions:{[label:string]:Function}; | ||
/** | ||
* Register a function with the pipeline. | ||
* Sets the document field used as the document reference. Every document must have this field. | ||
* The type of this field in the document should be a string, if it is not a string it will be | ||
* coerced into a string by calling toString. | ||
* | ||
* Functions that are used in the pipeline should be registered if the pipeline needs to be | ||
* serialised, or a serialised pipeline needs to be loaded. | ||
* The default ref is 'id'. | ||
* | ||
* Registering a function does not add it to a pipeline, functions must still be added to instances | ||
* of the pipeline for them to be used when running a pipeline. | ||
* The ref should _not_ be changed during indexing, it should be set before any documents are | ||
* added to the index. Changing it during indexing can lead to inconsistent results. | ||
* | ||
* @param fn The function to check for. | ||
* @param label The label to register this function with | ||
* @param {string} ref - The name of the reference field in the document. | ||
*/ | ||
registerFunction(fn:IPipelineFunction, label:string):void; | ||
ref(ref: string): void; | ||
/** | ||
* Warns if the function is not registered as a Pipeline function. | ||
* Adds a field to the list of document fields that will be indexed. Every document being | ||
* indexed should have this field. Null values for this field in indexed documents will | ||
* not cause errors but will limit the chance of that document being retrieved by searches. | ||
* | ||
* @param fn The function to check for. | ||
* All fields should be added before adding documents to the index. Adding fields after | ||
* a document has been indexed will have no effect on already indexed documents. | ||
* | ||
* @param {string} field - The name of a field to index in all documents. | ||
*/ | ||
warnIfFunctionNotRegistered(fn:IPipelineFunction):void; | ||
field(field: string): void; | ||
/** | ||
* Adds new functions to the end of the pipeline. | ||
* A parameter to tune the amount of field length normalisation that is applied when | ||
* calculating relevance scores. A value of 0 will completely disable any normalisation | ||
* and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b | ||
* will be clamped to the range 0 - 1. | ||
* | ||
* Logs a warning if the function has not been registered. | ||
* | ||
* @param functions Any number of functions to add to the pipeline. | ||
* @param {number} number - The value to set for this tuning parameter. | ||
*/ | ||
add(...functions:IPipelineFunction[]):void; | ||
b(number: number): void; | ||
/** | ||
* Adds a single function after a function that already exists in the pipeline. | ||
* A parameter that controls the speed at which a rise in term frequency results in term | ||
* frequency saturation. The default value is 1.2. Setting this to a higher value will give | ||
* slower saturation levels, a lower value will result in quicker saturation. | ||
* | ||
* Logs a warning if the function has not been registered. | ||
* | ||
* @param existingFn A function that already exists in the pipeline. | ||
* @param newFn The new function to add to the pipeline. | ||
* @param {number} number - The value to set for this tuning parameter. | ||
*/ | ||
after(existingFn:IPipelineFunction, newFn:IPipelineFunction):void; | ||
k1(number: number): void; | ||
/** | ||
* Adds a single function before a function that already exists in the pipeline. | ||
* Adds a document to the index. | ||
* | ||
* Logs a warning if the function has not been registered. | ||
* Before adding fields to the index the index should have been fully setup, with the document | ||
* ref and all fields to index already having been specified. | ||
* | ||
* @param existingFn A function that already exists in the pipeline. | ||
* @param newFn The new function to add to the pipeline. | ||
* The document must have a field name as specified by the ref (by default this is 'id') and | ||
* it should have all fields defined for indexing, though null or undefined values will not | ||
* cause errors. | ||
* | ||
* @param {object} doc - The document to add to the index. | ||
*/ | ||
before(existingFn:IPipelineFunction, newFn:IPipelineFunction):void; | ||
add(doc: object): void; | ||
/** | ||
* Removes a function from the pipeline. | ||
* Builds the index, creating an instance of lunr.Index. | ||
* | ||
* @param fn The function to remove from the pipeline. | ||
* This completes the indexing process and should only be called | ||
* once all documents have been added to the index. | ||
* | ||
* @returns {lunr.Index} | ||
*/ | ||
remove(fn:IPipelineFunction):void; | ||
build(): Index; | ||
/** | ||
* Runs the current list of functions that make up the pipeline against | ||
* the passed tokens. | ||
* Applies a plugin to the index builder. | ||
* | ||
* @param tokens The tokens to run through the pipeline. | ||
* A plugin is a function that is called with the index builder as its context. | ||
* Plugins can be used to customise or extend the behaviour of the index | ||
* in some way. A plugin is just a function, that encapsulated the custom | ||
* behaviour that should be applied when building the index. | ||
* | ||
* The plugin function will be called with the index builder as its argument, additional | ||
* arguments can also be passed when calling use. The function will be called | ||
* with the index builder as its context. | ||
* | ||
* @param {Function} plugin The plugin to apply. | ||
*/ | ||
run(tokens:string[]):string[]; | ||
use(plugin: Builder.Plugin, ...args: any[]): void; | ||
} | ||
namespace Index { | ||
interface Attributes { | ||
/** | ||
* An index of term/field to document reference. | ||
*/ | ||
invertedIndex: object; | ||
/** | ||
* Document vectors keyed by document reference. | ||
*/ | ||
documentVectors: { [docRef: string]: Vector }; | ||
/** | ||
* An set of all corpus tokens. | ||
*/ | ||
tokenSet: TokenSet; | ||
/** | ||
* The names of indexed document fields. | ||
*/ | ||
fields: string[]; | ||
/** | ||
* The pipeline to use for search terms. | ||
*/ | ||
pipeline: Pipeline; | ||
} | ||
/** | ||
* Resets the pipeline by removing any existing processors. | ||
* A result contains details of a document matching a search query. | ||
*/ | ||
reset():void; | ||
interface Result { | ||
/** | ||
* The reference of the document this result represents. | ||
*/ | ||
ref: string; | ||
/** | ||
* A number between 0 and 1 representing how similar this document is to the query. | ||
*/ | ||
score: number; | ||
/** | ||
* Contains metadata about this match including which term(s) caused the match. | ||
*/ | ||
matchData: MatchData; | ||
} | ||
/** | ||
* Returns a representation of the pipeline ready for serialisation. | ||
* A query builder callback provides a query object to be used to express | ||
* the query to perform on the index. | ||
* | ||
* @callback lunr.Index~queryBuilder | ||
* @param {lunr.Query} query - The query object to build up. | ||
* @this lunr.Query | ||
*/ | ||
toJSON():any; | ||
type QueryBuilder = (this: Query, query: Query) => void; | ||
/** | ||
* Loads a previously serialised pipeline. | ||
* Although lunr provides the ability to create queries using lunr.Query, it also provides a simple | ||
* query language which itself is parsed into an instance of lunr.Query. | ||
* | ||
* All functions to be loaded must already be registered with lunr.Pipeline. If any function from | ||
* the serialised data has not been registered then an error will be thrown. | ||
* For programmatically building queries it is advised to directly use lunr.Query, the query language | ||
* is best used for human entered text rather than program generated text. | ||
* | ||
* @param serialised The serialised pipeline to load. | ||
* At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported | ||
* and will be combined with OR, e.g `hello world` will match documents that contain either 'hello' | ||
* or 'world', though those that contain both will rank higher in the results. | ||
* | ||
* Wildcards can be included in terms to match one or more unspecified characters, these wildcards can | ||
* be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding | ||
* wildcards will increase the number of documents that will be found but can also have a negative | ||
* impact on query performance, especially with wildcards at the beginning of a term. | ||
* | ||
* Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term | ||
* hello in the title field will match this query. Using a field not present in the index will lead | ||
* to an error being thrown. | ||
* | ||
* Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term | ||
* boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported | ||
* to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2. | ||
* Avoid large values for edit distance to improve query performance. | ||
* | ||
* To escape special characters the backslash character '\' can be used, this allows searches to include | ||
* characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead | ||
* of attempting to apply a boost of 2 to the search term "foo". | ||
* | ||
* @example <caption>Simple single term query</caption> | ||
* hello | ||
* @example <caption>Multiple term query</caption> | ||
* hello world | ||
* @example <caption>term scoped to a field</caption> | ||
* title:hello | ||
* @example <caption>term with a boost of 10</caption> | ||
* hello^10 | ||
* @example <caption>term with an edit distance of 2</caption> | ||
* hello~2 | ||
*/ | ||
static load(serialised:any):Pipeline; | ||
type QueryString = string; | ||
} | ||
/** | ||
* lunr.Vectors implement vector related operations for a series of elements. | ||
* An index contains the built index of all documents and provides a query interface | ||
* to the index. | ||
* | ||
* Usually instances of lunr.Index will not be created using this constructor, instead | ||
* lunr.Builder should be used to construct new indexes, or lunr.Index.load should be | ||
* used to load previously built and serialized indexes. | ||
*/ | ||
class Vector | ||
{ | ||
list:Node; | ||
class Index { | ||
/** | ||
* @param attrs The attributes of the built search index. | ||
*/ | ||
constructor(attrs: Index.Attributes) | ||
/** | ||
* Performs a search against the index using lunr query syntax. | ||
* | ||
* Results will be returned sorted by their score, the most relevant results | ||
* will be returned first. | ||
* | ||
* For more programmatic querying use lunr.Index#query. | ||
* | ||
* @param {lunr.Index~QueryString} queryString - A string containing a lunr query. | ||
* @throws {lunr.QueryParseError} If the passed query string cannot be parsed. | ||
* @returns {lunr.Index~Result[]} | ||
*/ | ||
search(queryString: Index.QueryString): Index.Result[]; | ||
/** | ||
* Calculates the magnitude of this vector. | ||
* Performs a query against the index using the yielded lunr.Query object. | ||
* | ||
* If performing programmatic queries against the index, this method is preferred | ||
* over lunr.Index#search so as to avoid the additional query parsing overhead. | ||
* | ||
* A query object is yielded to the supplied function which should be used to | ||
* express the query to be run against the index. | ||
* | ||
* Note that although this function takes a callback parameter it is _not_ an | ||
* asynchronous operation, the callback is just yielded a query object to be | ||
* customized. | ||
* | ||
* @param {lunr.Index~queryBuilder} fn - A function that is used to build the query. | ||
* @returns {lunr.Index~Result[]} | ||
*/ | ||
magnitude():number; | ||
query(fn: Index.QueryBuilder): Index.Result; | ||
/** | ||
* Calculates the dot product of this vector and another vector. | ||
* @param otherVector The vector to compute the dot product with. | ||
* Prepares the index for JSON serialization. | ||
* | ||
* The schema for this JSON blob will be described in a | ||
* separate JSON schema file. | ||
* | ||
* @returns {Object} | ||
*/ | ||
dot(otherVector:Vector):number; | ||
toJSON(): object; | ||
/** | ||
* Calculates the cosine similarity between this vector and another vector. | ||
* Loads a previously serialized lunr.Index | ||
* | ||
* @param otherVector The other vector to calculate the | ||
* @param {Object} serializedIndex - A previously serialized lunr.Index | ||
* @returns {lunr.Index} | ||
*/ | ||
similarity(otherVector:Vector):number; | ||
static load(serializedIndex: object): Index; | ||
} | ||
/** | ||
* lunr.Vector.Node is a simple struct for each node in a lunr.Vector. | ||
* Contains and collects metadata about a matching document. | ||
* A single instance of lunr.MatchData is returned as part of every | ||
* lunr.IndexResult. | ||
*/ | ||
class Node | ||
{ | ||
class MatchData { | ||
/** | ||
* The index of the node in the vector. | ||
* A cloned collection of metadata associated with this document. | ||
*/ | ||
idx:number; | ||
metadata: object; | ||
/** | ||
* The data at this node in the vector. | ||
* @param {string} term - The term this match data is associated with | ||
* @param {string} field - The field in which the term was found | ||
* @param {object} metadata - The metadata recorded about this term in this field | ||
*/ | ||
val:number; | ||
constructor(term: string, field: string, metadata: object) | ||
/** | ||
* The node directly after this node in the vector. | ||
* An instance of lunr.MatchData will be created for every term that matches a | ||
* document. However only one instance is required in a lunr.Index~Result. This | ||
* method combines metadata from another instance of lunr.MatchData with this | ||
* objects metadata. | ||
* | ||
* @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one. | ||
* @see {@link lunr.Index~Result} | ||
*/ | ||
next:Node; | ||
/** | ||
* @param idx The index of the node in the vector. | ||
* @param val The data at this node in the vector. | ||
* @param next The node directly after this node in the vector. | ||
*/ | ||
constructor(idx:number, val:number, next:Node); | ||
combine(otherMatchData: MatchData): void; | ||
} | ||
/** | ||
* A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token | ||
* string as well as all known metadata. A pipeline function can mutate the token string | ||
* or mutate (or add) metadata for a given token. | ||
* | ||
* A pipeline function can indicate that the passed token should be discarded by returning | ||
* null. This token will not be passed to any downstream pipeline functions and will not be | ||
* added to the index. | ||
* | ||
* Multiple tokens can be returned by returning an array of tokens. Each token will be passed | ||
* to any downstream pipeline functions and all will returned tokens will be added to the index. | ||
* | ||
* Any number of pipeline functions may be chained together using a lunr.Pipeline. | ||
* | ||
* @interface lunr.PipelineFunction | ||
* @param {lunr.Token} token - A token from the document being processed. | ||
* @param {number} i - The index of this token in the complete list of tokens for this document/field. | ||
* @param {lunr.Token[]} tokens - All tokens for this document/field. | ||
* @returns {(?lunr.Token|lunr.Token[])} | ||
*/ | ||
type PipelineFunction = ( | ||
token: Token, | ||
i: number, | ||
tokens: Token[] | ||
) => null | Token | Token[]; | ||
/** | ||
* lunr.SortedSets are used to maintain an array of unique values in a sorted order. | ||
* lunr.Pipelines maintain an ordered list of functions to be applied to all | ||
* tokens in documents entering the search index and queries being ran against | ||
* the index. | ||
* | ||
* An instance of lunr.Index created with the lunr shortcut will contain a | ||
* pipeline with a stop word filter and an English language stemmer. Extra | ||
* functions can be added before or after either of these functions or these | ||
* default functions can be removed. | ||
* | ||
* When run the pipeline will call each function in turn, passing a token, the | ||
* index of that token in the original list of all tokens and finally a list of | ||
* all the original tokens. | ||
* | ||
* The output of functions in the pipeline will be passed to the next function | ||
* in the pipeline. To exclude a token from entering the index the function | ||
* should return undefined, the rest of the pipeline will not be called with | ||
* this token. | ||
* | ||
* For serialisation of pipelines to work, all functions used in an instance of | ||
* a pipeline should be registered with lunr.Pipeline. Registered functions can | ||
* then be loaded. If trying to load a serialised pipeline that uses functions | ||
* that are not registered an error will be thrown. | ||
* | ||
* If not planning on serialising the pipeline then registering pipeline functions | ||
* is not necessary. | ||
*/ | ||
class SortedSet<T> | ||
{ | ||
elements:T[]; | ||
class Pipeline { | ||
constructor() | ||
length:number; | ||
/** | ||
* Inserts new items into the set in the correct position to maintain the order. | ||
* Register a function with the pipeline. | ||
* | ||
* @param values The objects to add to this set. | ||
* Functions that are used in the pipeline should be registered if the pipeline | ||
* needs to be serialised, or a serialised pipeline needs to be loaded. | ||
* | ||
* Registering a function does not add it to a pipeline, functions must still be | ||
* added to instances of the pipeline for them to be used when running a pipeline. | ||
* | ||
* @param {lunr.PipelineFunction} fn - The function to check for. | ||
* @param {String} label - The label to register this function with | ||
*/ | ||
add(...values:T[]):void; | ||
static registerFunction(fn: PipelineFunction, label: string): void; | ||
/** | ||
* Converts this sorted set into an array. | ||
*/ | ||
toArray():T[]; | ||
/** | ||
* Creates a new array with the results of calling a provided function on | ||
* every element in this sorted set. | ||
* Loads a previously serialised pipeline. | ||
* | ||
* Delegates to Array.prototype.map and has the same signature. | ||
* All functions to be loaded must already be registered with lunr.Pipeline. | ||
* If any function from the serialised data has not been registered then an | ||
* error will be thrown. | ||
* | ||
* @param fn The function that is called on each element of the | ||
* @param ctx An optional object that can be used as the context | ||
* @param {Object} serialised - The serialised pipeline to load. | ||
* @returns {lunr.Pipeline} | ||
*/ | ||
map(fn:Function, ctx:any):T[]; | ||
static load(serialised: object): Pipeline; | ||
/** | ||
* Executes a provided function once per sorted set element. | ||
* Adds new functions to the end of the pipeline. | ||
* | ||
* Delegates to Array.prototype.forEach and has the same signature. | ||
* Logs a warning if the function has not been registered. | ||
* | ||
* @param fn The function that is called on each element of the | ||
* @param ctx An optional object that can be used as the context | ||
* @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline. | ||
*/ | ||
forEach(fn:Function, ctx:any):any; | ||
add(...functions: PipelineFunction[]): void; | ||
/** | ||
* Returns the index at which a given element can be found in the sorted | ||
* set, or -1 if it is not present. | ||
* Adds a single function after a function that already exists in the | ||
* pipeline. | ||
* | ||
* @param elem The object to locate in the sorted set. | ||
* @param start An optional index at which to start searching from | ||
* @param end An optional index at which to stop search from within | ||
* Logs a warning if the function has not been registered. | ||
* | ||
* @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. | ||
* @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. | ||
*/ | ||
indexOf(elem:T, start?:number, end?:number):number; | ||
after(existingFn: PipelineFunction, newFn: PipelineFunction): void; | ||
/** | ||
* Returns the position within the sorted set that an element should be | ||
* inserted at to maintain the current order of the set. | ||
* Adds a single function before a function that already exists in the | ||
* pipeline. | ||
* | ||
* This function assumes that the element to search for does not already exist | ||
* in the sorted set. | ||
* Logs a warning if the function has not been registered. | ||
* | ||
* @param elem - The elem to find the position for in the set | ||
* @param start - An optional index at which to start searching from | ||
* @param end - An optional index at which to stop search from within | ||
* @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. | ||
* @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. | ||
*/ | ||
locationFor(elem:T, start?:number, end?:number):number; | ||
before(existingFn: PipelineFunction, newFn: PipelineFunction): void; | ||
/** | ||
* Creates a new lunr.SortedSet that contains the elements in the | ||
* intersection of this set and the passed set. | ||
* Removes a function from the pipeline. | ||
* | ||
* @param otherSet The set to intersect with this set. | ||
* @param {lunr.PipelineFunction} fn The function to remove from the pipeline. | ||
*/ | ||
intersect(otherSet:SortedSet<T>):SortedSet<T>; | ||
remove(fn: PipelineFunction): void; | ||
/** | ||
* Creates a new lunr.SortedSet that contains the elements in the union of this | ||
* set and the passed set. | ||
* Runs the current list of functions that make up the pipeline against the | ||
* passed tokens. | ||
* | ||
* @param otherSet The set to union with this set. | ||
* @param {Array} tokens The tokens to run through the pipeline. | ||
* @returns {Array} | ||
*/ | ||
union(otherSet:SortedSet<T>):SortedSet<T>; | ||
run(tokens: Token[]): Token[]; | ||
/** | ||
* Makes a copy of this set | ||
* Convenience method for passing a string through a pipeline and getting | ||
* strings out. This method takes care of wrapping the passed string in a | ||
* token and mapping the resulting tokens back to strings. | ||
* | ||
* @param {string} str - The string to pass through the pipeline. | ||
* @returns {string[]} | ||
*/ | ||
clone():SortedSet<T>; | ||
runString(str: string): string[]; | ||
/** | ||
* Returns a representation of the sorted set ready for serialisation. | ||
* Resets the pipeline by removing any existing processors. | ||
* | ||
*/ | ||
toJSON():any; | ||
reset(): void; | ||
/** | ||
* Loads a previously serialised sorted set. | ||
* Returns a representation of the pipeline ready for serialisation. | ||
* | ||
* @param serialisedData The serialised set to load. | ||
* Logs a warning if the function has not been registered. | ||
* | ||
* @returns {Array} | ||
*/ | ||
static load<T>(serialisedData:T[]):SortedSet<T>; | ||
toJSON(): PipelineFunction[]; | ||
} | ||
namespace Query { | ||
enum wildcard { | ||
NONE = 0, | ||
LEADING = 1 << 0, | ||
TRAILING = 1 << 1 | ||
} | ||
interface IIndexField | ||
{ | ||
/** | ||
* The name of the field within the document that | ||
* A single clause in a {@link lunr.Query} contains a term and details on how to | ||
* match that term against a {@link lunr.Index}. | ||
* | ||
* @typedef {Object} lunr.Query~Clause | ||
* @property {string[]} fields - The fields in an index this clause should be matched against. | ||
* @property {number} [boost=1] - Any boost that should be applied when matching this clause. | ||
* @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be. | ||
* @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline. | ||
* @property {number} [wildcard=0] - Whether the term should have wildcards appended or prepended. | ||
*/ | ||
name:string; | ||
/** | ||
* An optional boost that can be applied to terms in this field. | ||
*/ | ||
boost:number; | ||
interface Clause { | ||
fields: string[]; | ||
boost: number; | ||
editDistance: number; | ||
usePipeline: boolean; | ||
wildcard: number; | ||
} | ||
} | ||
interface IIndexSearchResult | ||
{ | ||
ref:any; | ||
score:number; | ||
} | ||
/** | ||
* lunr.Index is object that manages a search index. It contains the indexes and stores | ||
* all the tokens and document lookups. It also provides the main user facing API for | ||
* the library. | ||
* A lunr.Query provides a programmatic way of defining queries to be performed | ||
* against a {@link lunr.Index}. | ||
* | ||
* Prefer constructing a lunr.Query using the {@link lunr.Index#query} method | ||
* so the query object is pre-initialized with the right index fields. | ||
* | ||
* @property {lunr.Query~Clause[]} clauses - An array of query clauses. | ||
* @property {string[]} allFields - An array of all available fields in a lunr.Index. | ||
*/ | ||
class Index | ||
{ | ||
eventEmitter:EventEmitter; | ||
documentStore:Store<string>; | ||
tokenStore:TokenStore; | ||
corpusTokens:SortedSet<string>; | ||
pipeline:Pipeline; | ||
_fields:IIndexField[]; | ||
_ref:string; | ||
_idfCache:{[key:string]:string}; | ||
class Query { | ||
/** | ||
* Bind a handler to events being emitted by the index. | ||
* | ||
* The handler can be bound to many events at the same time. | ||
* | ||
* @param eventName The name(s) of events to bind the function to. | ||
* @param handler The function to call when an event is fired. Binds a handler | ||
* function to a specific event(s). | ||
* An array of query clauses. | ||
*/ | ||
on(eventName:string, handler:Function):void; | ||
on(eventName:string, eventName2:string, handler:Function):void; | ||
on(eventName:string, eventName2:string, eventName3:string, handler:Function):void; | ||
on(eventName:string, eventName2:string, eventName3:string, eventName4:string, handler:Function):void; | ||
on(eventName:string, eventName2:string, eventName3:string, eventName4:string, eventName5:string, handler:Function):void; | ||
clauses: Query.Clause[]; | ||
/** | ||
* Removes a handler from an event being emitted by the index. | ||
* | ||
* @param eventName The name of events to remove the function from. | ||
* @param handler The serialised set to load. | ||
* An array of all available fields in a lunr.Index. | ||
*/ | ||
off(eventName:string, handler:Function):void; | ||
allFields: string[]; | ||
/** | ||
* Adds a field to the list of fields that will be searchable within documents in the index. | ||
* | ||
* An optional boost param can be passed to affect how much tokens in this field rank in | ||
* search results, by default the boost value is 1. | ||
* | ||
* Fields should be added before any documents are added to the index, fields that are added | ||
* after documents are added to the index will only apply to new documents added to the index. | ||
* | ||
* @param fieldName The name of the field within the document that | ||
* @param options An optional boost that can be applied to terms in this field. | ||
* @param allFields An array of all available fields in a lunr.Index. | ||
*/ | ||
field(fieldName:string, options?:{boost?:number}):Index; | ||
constructor(allFields: string[]) | ||
/** | ||
* Sets the property used to uniquely identify documents added to the index, by default this | ||
* property is 'id'. | ||
* Adds a {@link lunr.Query~Clause} to this query. | ||
* | ||
* This should only be changed before adding documents to the index, changing the ref property | ||
* without resetting the index can lead to unexpected results. | ||
* Unless the clause contains the fields to be matched all fields will be matched. In addition | ||
* a default boost of 1 is applied to the clause. | ||
* | ||
* @refName The property to use to uniquely identify the | ||
* @param {lunr.Query~Clause} clause - The clause to add to this query. | ||
* @see lunr.Query~Clause | ||
* @returns {lunr.Query} | ||
*/ | ||
ref(refName:string):Index; | ||
clause(clause: Query.Clause): Query; | ||
/** | ||
* Add a document to the index. | ||
* Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause} | ||
* to the list of clauses that make up this query. | ||
* | ||
* This is the way new documents enter the index, this function will run the fields from the | ||
* document through the index's pipeline and then add it to the index, it will then show up | ||
* in search results. | ||
* | ||
* An 'add' event is emitted with the document that has been added and the index the document | ||
* has been added to. This event can be silenced by passing false as the second argument to add. | ||
* | ||
* @param doc The document to add to the index. | ||
* @param emitEvent Whether or not to emit events, default true. | ||
* @param {string} term - The term to add to the query. | ||
* @param {Object} [options] - Any additional properties to add to the query clause. | ||
* @returns {lunr.Query} | ||
* @see lunr.Query#clause | ||
* @see lunr.Query~Clause | ||
* @example <caption>adding a single term to a query</caption> | ||
* query.term("foo") | ||
* @example <caption>adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard</caption> | ||
* query.term("foo", { | ||
* fields: ["title"], | ||
* boost: 10, | ||
* wildcard: lunr.Query.wildcard.TRAILING | ||
* }) | ||
*/ | ||
add(doc:any, emitEvent?:boolean):void; | ||
term(term: string, options: object): Query; | ||
} | ||
class QueryParseError extends Error { | ||
name: "QueryParseError"; | ||
message: string; | ||
start: number; | ||
end: number; | ||
constructor(message: string, start: string, end: string) | ||
} | ||
/** | ||
* lunr.stemmer is an english language stemmer, this is a JavaScript | ||
* implementation of the PorterStemmer taken from http://tartarus.org/~martin | ||
* | ||
* @static | ||
* @implements {lunr.PipelineFunction} | ||
* @param {lunr.Token} token - The string to stem | ||
* @returns {lunr.Token} | ||
* @see {@link lunr.Pipeline} | ||
*/ | ||
function stemmer(token: Token): Token; | ||
/** | ||
* lunr.stopWordFilter is an English language stop word list filter, any words | ||
* contained in the list will not be passed through the filter. | ||
* | ||
* This is intended to be used in the Pipeline. If the token does not pass the | ||
* filter then undefined will be returned. | ||
* | ||
* @implements {lunr.PipelineFunction} | ||
* @params {lunr.Token} token - A token to check for being a stop word. | ||
* @returns {lunr.Token} | ||
* @see {@link lunr.Pipeline} | ||
*/ | ||
function stopWordFilter(token: Token): Token; | ||
namespace Token { | ||
/** | ||
* Removes a document from the index. | ||
* A token update function is used when updating or optionally | ||
* when cloning a token. | ||
* | ||
* To make sure documents no longer show up in search results they can be removed from the | ||
* index using this method. | ||
* | ||
* The document passed only needs to have the same ref property value as the document that was | ||
* added to the index, they could be completely different objects. | ||
* | ||
* A 'remove' event is emitted with the document that has been removed and the index the | ||
* document has been removed from. This event can be silenced by passing false as the second | ||
* argument to remove. | ||
* | ||
* @param doc The document to remove from the index. | ||
* @param emitEvent Whether to emit remove events, defaults to true | ||
* @callback lunr.Token~updateFunction | ||
* @param {string} str - The string representation of the token. | ||
* @param {Object} metadata - All metadata associated with this token. | ||
*/ | ||
remove(doc:any, emitEvent?:boolean):void; | ||
type UpdateFunction = (str: string, metadata: object) => void; | ||
} | ||
/** | ||
* A token wraps a string representation of a token | ||
* as it is passed through the text processing pipeline. | ||
*/ | ||
class Token { | ||
/** | ||
* Updates a document in the index. | ||
* | ||
* When a document contained within the index gets updated, fields changed, added or removed, | ||
* to make sure it correctly matched against search queries, it should be updated in the index. | ||
* | ||
* This method is just a wrapper around [[remove]] and [[add]]. | ||
* | ||
* An 'update' event is emitted with the document that has been updated and the index. | ||
* This event can be silenced by passing false as the second argument to update. Only an | ||
* update event will be fired, the 'add' and 'remove' events of the underlying calls are | ||
* silenced. | ||
* | ||
* @param doc The document to update in the index. | ||
* @param emitEvent Whether to emit update events, defaults to true | ||
* @param {string} [str=''] - The string token being wrapped. | ||
* @param {object} [metadata={}] - Metadata associated with this token. | ||
*/ | ||
update(doc:any, emitEvent?:boolean):void; | ||
constructor(str: string, metadata: object) | ||
/** | ||
* Calculates the inverse document frequency for a token within the index. | ||
* Returns the token string that is being wrapped by this object. | ||
* | ||
* @param token The token to calculate the idf of. | ||
* @returns {string} | ||
*/ | ||
idf(token:string):string; | ||
toString(): string; | ||
/** | ||
* Searches the index using the passed query. | ||
* Applies the given function to the wrapped string token. | ||
* | ||
* Queries should be a string, multiple words are allowed and will lead to an AND based | ||
* query, e.g. idx.search('foo bar') will run a search for documents containing both | ||
* 'foo' and 'bar'. | ||
* @example | ||
* token.update(function (str, metadata) { | ||
* return str.toUpperCase() | ||
* }) | ||
* | ||
* All query tokens are passed through the same pipeline that document tokens are passed | ||
* through, so any language processing involved will be run on every query term. | ||
* | ||
* Each query term is expanded, so that the term 'he' might be expanded to 'hello' | ||
* and 'help' if those terms were already included in the index. | ||
* | ||
* Matching documents are returned as an array of objects, each object contains the | ||
* matching document ref, as set for this index, and the similarity score for this | ||
* document against the query. | ||
* | ||
* @param query The query to search the index with. | ||
* @param {lunr.Token~updateFunction} fn - A function to apply to the token string. | ||
* @returns {lunr.Token} | ||
*/ | ||
search(query:string):IIndexSearchResult[]; | ||
update(fn: Token.UpdateFunction): Token; | ||
/** | ||
* Generates a vector containing all the tokens in the document matching the | ||
* passed documentRef. | ||
* Creates a clone of this token. Optionally a function can be | ||
* applied to the cloned token. | ||
* | ||
* The vector contains the tf-idf score for each token contained in the document with | ||
* the passed documentRef. The vector will contain an element for every token in the | ||
* indexes corpus, if the document does not contain that token the element will be 0. | ||
* | ||
* @param documentRef The ref to find the document with. | ||
* @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token. | ||
* @returns {lunr.Token} | ||
*/ | ||
documentVector(documentRef:string):Vector; | ||
clone(fn: Token.UpdateFunction): Token; | ||
} | ||
/** | ||
* A token set is used to store the unique list of all tokens | ||
* within an index. Token sets are also used to represent an | ||
* incoming query to the index, this query token set and index | ||
* token set are then intersected to find which tokens to look | ||
* up in the inverted index. | ||
* | ||
* A token set can hold multiple tokens, as in the case of the | ||
* index token set, or it can hold a single token as in the | ||
* case of a simple query token set. | ||
* | ||
* Additionally token sets are used to perform wildcard matching. | ||
* Leading, contained and trailing wildcards are supported, and | ||
* from this edit distance matching can also be provided. | ||
* | ||
* Token sets are implemented as a minimal finite state automata, | ||
* where both common prefixes and suffixes are shared between tokens. | ||
* This helps to reduce the space used for storing the token set. | ||
*/ | ||
class TokenSet { | ||
constructor() | ||
/** | ||
* Returns a representation of the index ready for serialisation. | ||
* Creates a TokenSet instance from the given sorted array of words. | ||
* | ||
* @param {String[]} arr - A sorted array of strings to create the set from. | ||
* @returns {lunr.TokenSet} | ||
* @throws Will throw an error if the input array is not sorted. | ||
*/ | ||
toJSON():any; | ||
fromArray(arr: string[]): TokenSet; | ||
/** | ||
* Applies a plugin to the current index. | ||
* Creates a token set representing a single string with a specified | ||
* edit distance. | ||
* | ||
* A plugin is a function that is called with the index as its context. Plugins can be | ||
* used to customise or extend the behaviour the index in some way. A plugin is just a | ||
* function, that encapsulated the custom behaviour that should be applied to the index. | ||
* Insertions, deletions, substitutions and transpositions are each | ||
* treated as an edit distance of 1. | ||
* | ||
* The plugin function will be called with the index as its argument, additional arguments | ||
* can also be passed when calling use. The function will be called with the index as | ||
* its context. | ||
* Increasing the allowed edit distance will have a dramatic impact | ||
* on the performance of both creating and intersecting these TokenSets. | ||
* It is advised to keep the edit distance less than 3. | ||
* | ||
* Example: | ||
* | ||
* ```javascript | ||
* var myPlugin = function(idx, arg1, arg2) { | ||
* // `this` is the index to be extended | ||
* // apply any extensions etc here. | ||
* }; | ||
* | ||
* var idx = lunr(function() { | ||
* this.use(myPlugin, 'arg1', 'arg2'); | ||
* }); | ||
* ``` | ||
* | ||
* @param plugin The plugin to apply. | ||
* @param args | ||
* @param {string} str - The string to create the token set from. | ||
* @param {number} editDistance - The allowed edit distance to match. | ||
* @returns {lunr.Vector} | ||
*/ | ||
use(plugin:Function, ...args:any[]):void; | ||
fromFuzzyString(str: string, editDistance: number): Vector; | ||
/** | ||
* Loads a previously serialised index. | ||
* Creates a TokenSet from a string. | ||
* | ||
* Issues a warning if the index being imported was serialised by a different version | ||
* of lunr. | ||
* The string may contain one or more wildcard characters (*) | ||
* that will allow wildcard matching when intersecting with | ||
* another TokenSet. | ||
* | ||
* @param serialisedData The serialised set to load. | ||
* @param {string} str - The string to create a TokenSet from. | ||
* @returns {lunr.TokenSet} | ||
*/ | ||
static load(serialisedData:any):Index; | ||
} | ||
fromString(str: string): TokenSet; | ||
/** | ||
* lunr.Store is a simple key-value store used for storing sets of tokens for documents | ||
* stored in index. | ||
*/ | ||
class Store<T> | ||
{ | ||
store:{[id:string]:SortedSet<T>}; | ||
length:number; | ||
/** | ||
* Stores the given tokens in the store against the given id. | ||
* Converts this TokenSet into an array of strings | ||
* contained within the TokenSet. | ||
* | ||
* @param id The key used to store the tokens against. | ||
* @param tokens The tokens to store against the key. | ||
* @returns {string[]} | ||
*/ | ||
set(id:string, tokens:SortedSet<T>):void; | ||
toArray(): string[]; | ||
/** | ||
* Retrieves the tokens from the store for a given key. | ||
* Generates a string representation of a TokenSet. | ||
* | ||
* @param id The key to lookup and retrieve from the store. | ||
*/ | ||
get(id:string):SortedSet<T>; | ||
/** | ||
* Checks whether the store contains a key. | ||
* This is intended to allow TokenSets to be used as keys | ||
* in objects, largely to aid the construction and minimisation | ||
* of a TokenSet. As such it is not designed to be a human | ||
* friendly representation of the TokenSet. | ||
* | ||
* @param id The id to look up in the store. | ||
* @returns {string} | ||
*/ | ||
has(id:string):boolean; | ||
toString(): string; | ||
/** | ||
* Removes the value for a key in the store. | ||
* Returns a new TokenSet that is the intersection of | ||
* this TokenSet and the passed TokenSet. | ||
* | ||
* @param id The id to remove from the store. | ||
* This intersection will take into account any wildcards | ||
* contained within the TokenSet. | ||
* | ||
* @param {lunr.TokenSet} b - An other TokenSet to intersect with. | ||
* @returns {lunr.TokenSet} | ||
*/ | ||
remove(id:string):void; | ||
intersect(b: TokenSet): TokenSet; | ||
} | ||
namespace tokenizer { | ||
/** | ||
* Returns a representation of the store ready for serialisation. | ||
*/ | ||
toJSON():any; | ||
/** | ||
* Loads a previously serialised store. | ||
* The separator used to split a string into tokens. Override this property to change the behaviour of | ||
* `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens. | ||
* | ||
* @param serialisedData The serialised store to load. | ||
* @static | ||
* @see lunr.tokenizer | ||
*/ | ||
static load<T>(serialisedData:any):Store<T>; | ||
const separator: RegExp; | ||
} | ||
/** | ||
* A function for splitting a string into tokens ready to be inserted into | ||
* the search index. Uses `lunr.tokenizer.separator` to split strings, change | ||
* the value of this property to change how strings are split into tokens. | ||
* | ||
* This tokenizer will convert its parameter to a string by calling `toString` and | ||
* then will split this string on the character in `lunr.tokenizer.separator`. | ||
* Arrays will have their elements converted to strings and wrapped in a lunr.Token. | ||
* | ||
* @static | ||
* @param {?(string|object|object[])} obj - The object to convert into tokens | ||
* @returns {lunr.Token[]} | ||
*/ | ||
function tokenizer(obj?: null | string | object | object[]): Token[]; | ||
interface ITokenDocument | ||
{ | ||
ref:number; | ||
/** | ||
* lunr.trimmer is a pipeline function for trimming non word | ||
* characters from the beginning and end of tokens before they | ||
* enter the index. | ||
* | ||
* This implementation may not work correctly for non latin | ||
* characters and should either be removed or adapted for use | ||
* with languages with non-latin characters. | ||
* | ||
* @static | ||
* @implements {lunr.PipelineFunction} | ||
* @param {lunr.Token} token The token to pass through the filter | ||
* @returns {lunr.Token} | ||
* @see lunr.Pipeline | ||
*/ | ||
function trimmer(token: Token): Token; | ||
tf:number; | ||
} | ||
/** | ||
* lunr.TokenStore is used for efficient storing and lookup of the reverse index of token | ||
* to document ref. | ||
* A namespace containing utils for the rest of the lunr library | ||
*/ | ||
class TokenStore | ||
{ | ||
root:{[token:string]:TokenStore}; | ||
namespace utils { | ||
/** | ||
* Print a warning message to the console. | ||
* | ||
* @param {String} message The message to be printed. | ||
* @memberOf Utils | ||
*/ | ||
function warn(message: string): void; | ||
docs:{[ref:string]:ITokenDocument}; | ||
length:number; | ||
/** | ||
* Adds a new token doc pair to the store. | ||
* Convert an object to a string. | ||
* | ||
* By default this function starts at the root of the current store, however it can | ||
* start at any node of any token store if required. | ||
* In the case of `null` and `undefined` the function returns | ||
* the empty string, in all other cases the result of calling | ||
* `toString` on the passed object is returned. | ||
* | ||
* @param token The token to store the doc under | ||
* @param doc The doc to store against the token | ||
* @param root An optional node at which to start looking for the | ||
* @param {Any} obj The object to convert to a string. | ||
* @return {String} string representation of the passed object. | ||
* @memberOf Utils | ||
*/ | ||
add(token:string, doc:ITokenDocument, root?:TokenStore):void; | ||
function asString(obj: any): string; | ||
} | ||
/** | ||
* A vector is used to construct the vector space of documents and queries. These | ||
* vectors support operations to determine the similarity between two documents or | ||
* a document and a query. | ||
* | ||
* Normally no parameters are required for initializing a vector, but in the case of | ||
* loading a previously dumped vector the raw elements can be provided to the constructor. | ||
* | ||
* For performance reasons vectors are implemented with a flat array, where an elements | ||
* index is immediately followed by its value. E.g. [index, value, index, value]. This | ||
* allows the underlying array to be as sparse as possible and still offer decent | ||
* performance when being used for vector calculations. | ||
*/ | ||
class Vector { | ||
/** | ||
* Checks whether this key is contained within this lunr.TokenStore. | ||
* | ||
* @param token The token to check for | ||
* @param {Number[]} [elements] - The flat list of element index and element value pairs. | ||
*/ | ||
has(token:string):boolean; | ||
constructor(elements: number[]) | ||
/** | ||
* Retrieve a node from the token store for a given token. | ||
* Calculates the position within the vector to insert a given index. | ||
* | ||
* @param token The token to get the node for. | ||
* This is used internally by insert and upsert. If there are duplicate indexes then | ||
* the position is returned as if the value for that index were to be updated, but it | ||
* is the callers responsibility to check whether there is a duplicate at that index | ||
* | ||
* @param {Number} insertIdx - The index at which the element should be inserted. | ||
* @returns {Number} | ||
*/ | ||
getNode(token:string):TokenStore; | ||
positionForIndex(index: number): number; | ||
/** | ||
* Retrieve the documents for a node for the given token. | ||
* Inserts an element at an index within the vector. | ||
* | ||
* By default this function starts at the root of the current store, however it can | ||
* start at any node of any token store if required. | ||
* Does not allow duplicates, will throw an error if there is already an entry | ||
* for this index. | ||
* | ||
* @param token The token to get the documents for. | ||
* @param root An optional node at which to start. | ||
* @param {Number} insertIdx - The index at which the element should be inserted. | ||
* @param {Number} val - The value to be inserted into the vector. | ||
*/ | ||
get(token:string, root:TokenStore):{[ref:string]:ITokenDocument}; | ||
insert(insertIdx: number, val: number): void; | ||
count(token:string, root:TokenStore):number; | ||
/** | ||
* Remove the document identified by ref from the token in the store. | ||
* Inserts or updates an existing index within the vector. | ||
* | ||
* @param token The token to get the documents for. | ||
* @param ref The ref of the document to remove from this token. | ||
* @param {Number} insertIdx - The index at which the element should be inserted. | ||
* @param {Number} val - The value to be inserted into the vector. | ||
* @param {function} fn - A function that is called for updates, the existing value and the | ||
* requested value are passed as arguments | ||
*/ | ||
remove(token:string, ref:string):void; | ||
upsert( | ||
insertIdx: number, | ||
val: number, | ||
fn: (existingVal: number, val: number) => number | ||
): void; | ||
/** | ||
* Find all the possible suffixes of the passed token using tokens currently in | ||
* the store. | ||
* Calculates the magnitude of this vector. | ||
* | ||
* @param token The token to expand. | ||
* @param memo | ||
* @returns {Number} | ||
*/ | ||
expand(token:string, memo?:string[]):string[]; | ||
magnitude(): number; | ||
/** | ||
* Returns a representation of the token store ready for serialisation. | ||
* Calculates the dot product of this vector and another vector. | ||
* | ||
* @param {lunr.Vector} otherVector - The vector to compute the dot product with. | ||
* @returns {Number} | ||
*/ | ||
toJSON():any; | ||
dot(otherVector: Vector): number; | ||
/** | ||
* Loads a previously serialised token store. | ||
* Calculates the cosine similarity between this vector and another | ||
* vector. | ||
* | ||
* @param serialisedData The serialised token store to load. | ||
* @param {lunr.Vector} otherVector - The other vector to calculate the | ||
* similarity with. | ||
* @returns {Number} | ||
*/ | ||
static load(serialisedData:any):TokenStore; | ||
} | ||
similarity(otherVector: Vector): number; | ||
/** | ||
* A namespace containing utils for the rest of the lunr library | ||
*/ | ||
module utils { | ||
/** | ||
* Print a warning message to the console. | ||
* Converts the vector to an array of the elements within the vector. | ||
* | ||
* @param {String} message The message to be printed. | ||
* @memberOf Utils | ||
* @returns {Number[]} | ||
*/ | ||
function warn(message: any): void; | ||
toArray(): number[]; | ||
/** | ||
* Convert an object to a string. | ||
* A JSON serializable representation of the vector. | ||
* | ||
* In the case of `null` and `undefined` the function returns | ||
* the empty string, in all other cases the result of calling | ||
* `toString` on the passed object is returned. | ||
* | ||
* @param {Any} obj The object to convert to a string. | ||
* @return {String} string representation of the passed object. | ||
* @memberOf Utils | ||
* @returns {Number[]} | ||
*/ | ||
function asString(obj: any): string; | ||
toJSON(): number[]; | ||
} | ||
const version: string; | ||
type ConfigFunction = (this: Builder, builder: Builder) => void; | ||
} | ||
/** | ||
@@ -906,3 +989,3 @@ * Convenience function for instantiating a new lunr index and configuring it with the default | ||
* ```javascript | ||
* var idx = lunr(function () { | ||
* var idx = lunr(function () { | ||
* this.field('title', 10); | ||
@@ -917,8 +1000,9 @@ * this.field('tags', 100); | ||
* }); | ||
* }); | ||
* }); | ||
* ``` | ||
*/ | ||
declare function lunr(config:Function):lunr.Index; | ||
declare function lunr(config: lunr.ConfigFunction): lunr.Index; | ||
export = lunr; | ||
export as namespace lunr; | ||
declare module "lunr" { | ||
export = lunr; | ||
} |
{ | ||
"name": "@types/lunr", | ||
"version": "0.5.29", | ||
"version": "2.1.0", | ||
"description": "TypeScript definitions for lunr.js", | ||
@@ -8,4 +8,5 @@ "license": "MIT", | ||
{ | ||
"name": "Sebastian Lenz", | ||
"url": "https://github.com/sebastian-lenz" | ||
"name": "Sean Tan", | ||
"url": "https://github.com/seantanly", | ||
"githubUsername": "seantanly" | ||
} | ||
@@ -20,5 +21,4 @@ ], | ||
"dependencies": {}, | ||
"peerDependencies": {}, | ||
"typesPublisherContentHash": "f33a712b641de5195ba9b6406159890259a80de8f5c90455f8614c86f1120d12", | ||
"typeScriptVersion": "2.0" | ||
"typesPublisherContentHash": "e2d0f5761e27b20cad33e7a94ce3abcb6430e00e23fd6f790dfcff3f8b14425b", | ||
"typeScriptVersion": "2.3" | ||
} |
@@ -8,6 +8,6 @@ # Installation | ||
# Details | ||
Files were exported from https://www.github.com/DefinitelyTyped/DefinitelyTyped/tree/master/lunr | ||
Files were exported from https://www.github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/lunr | ||
Additional Details | ||
* Last updated: Fri, 24 Mar 2017 16:00:10 GMT | ||
* Last updated: Thu, 14 Sep 2017 19:43:24 GMT | ||
* Dependencies: none | ||
@@ -17,2 +17,2 @@ * Global values: lunr | ||
# Credits | ||
These definitions were written by Sebastian Lenz <https://github.com/sebastian-lenz>. | ||
These definitions were written by Sean Tan <https://github.com/seantanly>. |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
No v1
QualityPackage is not semver >=1. This means it is not stable and does not support ^ ranges.
Found 1 instance in 1 package
40738
4
917
1
1