@hdelva/termennetwerk_client
Advanced tools
Comparing version 3.1.1 to 3.2.0
@@ -33,9 +33,34 @@ "use strict"; | ||
} | ||
// returns how many of the `expected` string's tokens are present in the `found` string | ||
// it allows 1 token be incomplete if it is a prefix | ||
function strictResultPrefix(expected, found) { | ||
// flip expected and found | ||
// we want `expected` to be a prefix of `found` | ||
return tokenwise_1.default(strictPrefix_1.default, found, expected); | ||
const maxPrefixTolerance = 1; // how many prefix matches do we tolerate | ||
let prefixTolerance = 0; // how many prefix matches do we have so far | ||
// count how many times we expect to find each token | ||
let expectedTokens = new Map(); | ||
for (const token of expected.trim().split(/\s/)) { | ||
const count = expectedTokens.get(token) || 0; | ||
expectedTokens.set(token, count + 1); | ||
} | ||
const foundTokens = found.trim().split(/\s/); | ||
let score = 0; | ||
for (const [token, expectedCount] of expectedTokens.entries()) { | ||
let count = 0; | ||
for (const foundToken of foundTokens) { | ||
if (foundToken === token) { | ||
count++; | ||
} | ||
else if (prefixTolerance < maxPrefixTolerance && foundToken.startsWith(token)) { | ||
prefixTolerance++; | ||
count++; | ||
} | ||
} | ||
if (count >= expectedCount) { | ||
score += expectedCount; | ||
} | ||
} | ||
return score; | ||
} | ||
function prefixResultFilter(expected, found, similarity) { | ||
return expected.replace(/\p{Z}/gu, "").length === similarity; | ||
return expected.trim().split(/\s/).length === similarity; | ||
} | ||
@@ -42,0 +67,0 @@ function diceResult(expected, found) { |
@@ -7,6 +7,7 @@ "use strict"; | ||
exports.tokenwiseCompare = exports.strictPrefixSimilarity = exports.fuzzyPrefixSimilarity = exports.fuzzyIndexSimilarity = exports.commonPrefixSimilarity = exports.asymmetricDiceCoefficient = exports.NFKD = exports.SimilarityConfiguration = exports.ResultUniqueFilter = exports.ResultStore = exports.ResultRanking = exports.ResultEmitter = exports.QueryAggregator = exports.QueryAgent = exports.StrictAutoComplete = exports.FuzzyAutoComplete = void 0; | ||
const StrictAutoComplete_1 = __importDefault(require("./examples/StrictAutoComplete")); | ||
var FuzzyAutoComplete_1 = require("./examples/FuzzyAutoComplete"); | ||
Object.defineProperty(exports, "FuzzyAutoComplete", { enumerable: true, get: function () { return __importDefault(FuzzyAutoComplete_1).default; } }); | ||
var StrictAutoComplete_1 = require("./examples/StrictAutoComplete"); | ||
Object.defineProperty(exports, "StrictAutoComplete", { enumerable: true, get: function () { return __importDefault(StrictAutoComplete_1).default; } }); | ||
var StrictAutoComplete_2 = require("./examples/StrictAutoComplete"); | ||
Object.defineProperty(exports, "StrictAutoComplete", { enumerable: true, get: function () { return __importDefault(StrictAutoComplete_2).default; } }); | ||
var QueryAgent_1 = require("./QueryAgent"); | ||
@@ -40,2 +41,15 @@ Object.defineProperty(exports, "QueryAgent", { enumerable: true, get: function () { return __importDefault(QueryAgent_1).default; } }); | ||
Object.defineProperty(exports, "tokenwiseCompare", { enumerable: true, get: function () { return __importDefault(tokenwise_1).default; } }); | ||
const client = new StrictAutoComplete_1.default([ | ||
"https://termen.opoi.org/nta", | ||
"https://termen.opoi.org/vtmk", | ||
"https://termen.opoi.org/cht", | ||
"https://termen.opoi.org/rkdartists" | ||
], 10); | ||
client.on("data", (q) => { | ||
console.log(q.object.value); | ||
}); | ||
client.on("reset", () => { | ||
console.clear(); | ||
}); | ||
client.query("anne fr"); | ||
//# sourceMappingURL=index.js.map |
{ | ||
"name": "@hdelva/termennetwerk_client", | ||
"version": "3.1.1", | ||
"version": "3.2.0", | ||
"description": "Proof of concept of client-side autocompletion", | ||
@@ -5,0 +5,0 @@ "main": "lib/index.js", |
@@ -20,13 +20,43 @@ import IQueryEmitter from "../ResultEmitter"; | ||
function strictRelationFilter(expected: string, found: string, similarity: number): boolean { | ||
// one of the expected tokens has been matched, so this relation can be relevant | ||
return similarity > 0; | ||
} | ||
// returns how many of the `expected` string's tokens are present in the `found` string | ||
// it allows 1 token be incomplete if it is a prefix | ||
function strictResultPrefix(expected: string, found: string): number { | ||
// flip expected and found | ||
// we want `expected` to be a prefix of `found` | ||
return tokenwiseCompare(strictPrefixSimilarity, found, expected); | ||
const maxPrefixTolerance = 1; // how many prefix matches do we tolerate | ||
let prefixTolerance = 0; // how many prefix matches do we have so far | ||
// count how many times we expect to find each token | ||
let expectedTokens: Map<string, number> = new Map(); | ||
for (const token of expected.trim().split(/\s/)) { | ||
const count = expectedTokens.get(token) || 0; | ||
expectedTokens.set(token, count + 1); | ||
} | ||
const foundTokens = found.trim().split(/\s/); | ||
let score = 0; | ||
for (const [token, expectedCount] of expectedTokens.entries()) { | ||
let count = 0; | ||
for (const foundToken of foundTokens) { | ||
if (foundToken === token) { | ||
count ++; | ||
} else if (prefixTolerance < maxPrefixTolerance && foundToken.startsWith(token)) { | ||
prefixTolerance++; | ||
count++; | ||
} | ||
} | ||
if (count >= expectedCount) { | ||
score += expectedCount; | ||
} | ||
} | ||
return score; | ||
} | ||
function prefixResultFilter(expected: string, found: string, similarity: number): boolean { | ||
return expected.replace(/\p{Z}/gu, "").length === similarity; | ||
// each token must be accounted for | ||
return expected.trim().split(/\s/).length === similarity; | ||
} | ||
@@ -33,0 +63,0 @@ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
153814
1911