Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

binguru

Package Overview
Dependencies
Maintainers
1
Versions
18
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

binguru - npm Package Compare versions

Comparing version 1.0.0-alpha.10.0 to 1.0.0-alpha.11.0

6

package.json
{
"name": "binguru",
"version": "1.0.0-alpha.10.0",
"version": "1.0.0-alpha.11.0",
"description": "BinGuru is a Javascript package with an API to several established data binning / data classification methods, often used for visualizing data on choropleth maps. It also includes an implementation of a new, consensus binning method, 'Resiliency'.",

@@ -39,3 +39,7 @@ "main": "dist/index.mjs",

"typescript": "^5.1.5"
},
"engines": {
"npm": ">=7.24.0",
"node": ">=16.10.0"
}
}

269

src/index.ts

@@ -69,4 +69,4 @@ /***********************************************************

this.data = this.rawData.filter(value => this.isValid(value)); // only work with non NaN, non null, non undefined, numeric data
this.minSortedData = JSON.parse(JSON.stringify(this.data)).sort((n1:number, n2:number) => n1 - n2);
this.maxSortedData = JSON.parse(JSON.stringify(this.data)).sort((n1:number, n2:number) => n2 - n1);
this.minSortedData = JSON.parse(JSON.stringify(this.data)).sort((n1: number, n2: number) => n1 - n2);
this.maxSortedData = JSON.parse(JSON.stringify(this.data)).sort((n1: number, n2: number) => n2 - n1);

@@ -105,5 +105,5 @@ // Compute Basic Stats

*/
getMostFrequentElement(array:number[]) {
const store:any = {};
array.forEach((num:number) => store[num] ? store[num] += 1 : store[num] = 1);
getMostFrequentElement(array: number[]) {
const store: any = {};
array.forEach((num: number) => store[num] ? store[num] += 1 : store[num] = 1);
return parseInt(Object.keys(store).sort((a, b) => store[b] - store[a])[0]);

@@ -115,3 +115,3 @@ }

*/
getFrequencyOfMostFrequentElement(array:number[]) {
getFrequencyOfMostFrequentElement(array: number[]) {
var mp = new Map();

@@ -133,7 +133,7 @@ var n = array.length;

});
keys.sort((a:number, b:number) => a - b);
keys.sort((a: number, b: number) => a - b);
// Traverse through map and print frequencies
let max = -Infinity;
keys.forEach((key:string) => {
keys.forEach((key: string) => {
let val = mp.get(key);

@@ -185,6 +185,8 @@ if (val > max) {

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": context.binCount,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -203,3 +205,3 @@ }

function recursive(data:number[]) {
function recursive(data: number[]) {
let data_mean: number = data.reduce(function (a, b) { return a + b }) / data.length;

@@ -223,6 +225,8 @@ let head = data.filter(function (d) { return d > data_mean });

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -244,3 +248,3 @@ }

let clusters = ss.ckmeans(context.data, context.binCount);
binBreaks = clusters.map(function (cluster:number[]) {
binBreaks = clusters.map(function (cluster: number[]) {
return cluster[cluster.length - 1]; // Last element of each cluster is the bin's upper limit;

@@ -258,6 +262,8 @@ });

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -288,6 +294,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": context.binCount,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -319,6 +327,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -350,6 +360,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -395,6 +407,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -423,6 +437,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -462,6 +478,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, 0),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -496,6 +514,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -527,6 +547,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -565,6 +587,8 @@ }

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -580,9 +604,9 @@ }

*/
logarithmicInterval(logBase:number|string='auto') {
logarithmicInterval(logBase: number | string = 'auto') {
let context = this;
let binBreaks: number[] = [];
let binBreak:number = context.min;
let binBreak: number = context.min;
// Calculate the logarithmic base
if(logBase == "auto"){
if (logBase == "auto") {

@@ -594,16 +618,16 @@ // Calculate the logarithmic base from the data extent and desired bin count

for (let i = 0; i < context.binCount; i++) {
if(i != 0) binBreaks.push(binBreak);
if (i != 0) binBreaks.push(binBreak);
binBreak *= logBase;
}
}else{
} else {
// Calculate the logarithmic interval size
const logIntervalSize = (Math.log10(context.max) - Math.log10(context.min)) / context.binCount;
const logIntervalSize = (Math.log10(context.max) - Math.log10(context.min)) / context.binCount;
for (let i = 0; i < context.binCount; i++) {
if(i != 0) binBreaks.push(binBreak);
if (i != 0) binBreaks.push(binBreak);
binBreak *= Math.pow(10, logIntervalSize);
}
}
// Compute Bin Sizes

@@ -617,6 +641,8 @@ let binSizes = context.computeBinSizes(binBreaks);

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -638,3 +664,3 @@ }

const equation = firstBinSize.toString() + ' * (1 - x^' + context.binCount.toString() + ') = ' + seriesSum.toString() + ' * (1 - x)';
const solutions = nerdamer.solveEquations(equation, 'x').map((solution:any) => nerdamer(solution).evaluate().text());
const solutions = nerdamer.solveEquations(equation, 'x').map((solution: any) => nerdamer(solution).evaluate().text());
let commonRatio = 1;

@@ -687,6 +713,8 @@

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -708,3 +736,3 @@ }

// can be used for any binning of data with `bins <= binCount`
function getMatrices(data:number[], binCount:number) {
function getMatrices(data: number[], binCount: number) {

@@ -813,3 +841,3 @@ // in the original implementation, these matrices are referred to

// and derive an array of n breaks.
function breaks(data:number[], lower_bin_limits:number[][], binCount:number) {
function breaks(data: number[], lower_bin_limits: number[][], binCount: number) {

@@ -837,6 +865,8 @@ var k = data.length - 1,

if (context.binCount > context.data.length) return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": null,
"binBreaks": [],
"binSizes": { "valids": null, "invalids": null },
"dataRange": [context.min, context.max],
"dataBinAssignments": {}

@@ -867,6 +897,8 @@ };

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -883,24 +915,26 @@ }

*/
unique() {
let context = this;
const binBreaks:number[] = Array.from(new Set(context.minSortedData));
// Compute Bin Sizes
const binSizes = context.computeBinSizes(binBreaks);
// Compute Data-> Bin Assignments
const dataBinAssignments = context.computeDataBinAssignments(binBreaks);
// Return final Bin Object
return {
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments
}
unique() {
let context = this;
const binBreaks: number[] = Array.from(new Set(context.minSortedData));
// Compute Bin Sizes
const binSizes = context.computeBinSizes(binBreaks);
// Compute Data-> Bin Assignments
const dataBinAssignments = context.computeDataBinAssignments(binBreaks);
// Return final Bin Object
return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataBinAssignments": dataBinAssignments
}
}
/**

@@ -922,6 +956,8 @@ * Unclassed

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": null,
"binBreaks": [],
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments

@@ -942,3 +978,3 @@ }

// Data structure to store the binObj corresponding to each binningMethod.
let binObjs:any = {};
let binObjs: any = {};

@@ -1012,3 +1048,3 @@ binningMethods.forEach(function (binningMethod) {

break;
case GEOMETRIC_INTERVAL:

@@ -1043,11 +1079,11 @@ binObj = context.geometricInterval();

if (context.isValid(val)) {
let binAssignmentsForPrimaryKey = Array.from(Object.values(binObjs)).map((binObj:any) => binObj["dataBinAssignments"][primaryKey]);
let binAssignmentsForPrimaryKey = Array.from(Object.values(binObjs)).map((binObj: any) => binObj["dataBinAssignments"][primaryKey]);
if (!(primaryKey in frequencyOfMostFrequentBins)) {
frequencyOfMostFrequentBins[primaryKey] = 0;
}
frequencyOfMostFrequentBins[primaryKey] = context.getFrequencyOfMostFrequentElement(binAssignmentsForPrimaryKey);
frequencyOfMostFrequentBins[primaryKey] = context.getFrequencyOfMostFrequentElement(binAssignmentsForPrimaryKey);
if (!(primaryKey in mostFrequentBins)) {
mostFrequentBins[primaryKey] = 0;
}
mostFrequentBins[primaryKey] = context.getMostFrequentElement(binAssignmentsForPrimaryKey);
mostFrequentBins[primaryKey] = context.getMostFrequentElement(binAssignmentsForPrimaryKey);
}

@@ -1057,5 +1093,5 @@ });

// Compute Data for Resiliency
let resiliencyData:object[] = [];
let resiliencyData: object[] = [];
Object.keys(frequencyOfMostFrequentBins).forEach(function (primaryKey, valindex) {
let obj:any = {};
let obj: any = {};
obj["primaryKey"] = primaryKey;

@@ -1074,3 +1110,3 @@ obj["value"] = context.rawData[valindex];

resiliencyData.forEach(function (d:any) {
resiliencyData.forEach(function (d: any) {
itemwiseBinPriorities[d["primaryKey"]] = [];

@@ -1102,3 +1138,3 @@ itemwiseBinPriorityWeights[d["primaryKey"]] = [];

let priorityBins: number[] = [];
resiliencyData.forEach(function (d:any) {
resiliencyData.forEach(function (d: any) {
let priorityBin = itemwiseBinPriorities[d["primaryKey"]][0]; // First element is highest priority.

@@ -1117,3 +1153,3 @@ if (!(priorityBin in binInfo)) {

priorityBins.forEach(function (priorityBin, valindex) {
binInfo[priorityBin] = binInfo[priorityBin].sort((n1:number, n2:number) => n1 - n2);
binInfo[priorityBin] = binInfo[priorityBin].sort((n1: number, n2: number) => n1 - n2);

@@ -1139,6 +1175,8 @@ // The first item from the 2nd bin onwards would be the binBreaks.

return {
"rawData": context.rawData,
"processedData": context.data,
"dataRange": [context.min, context.max],
"binCount": binBreaks.length + 1,
"binBreaks": context.roundToPrecision(binBreaks, context.precision),
"binSizes": binSizes,
"dataRange": [context.min, context.max],
"dataBinAssignments": dataBinAssignments,

@@ -1194,3 +1232,3 @@ "binObjs": binObjs,

let dataBinAssignments:any = {};
let dataBinAssignments: any = {};

@@ -1214,3 +1252,3 @@ // Iterate through all values for the current feature/attribute.

dataBinAssignments[primaryKey] = binID;
}else{
} else {
// For invalid values, the binID will be null, by design choice.

@@ -1227,3 +1265,3 @@ dataBinAssignments[primaryKey] = null;

*/
isValid(val:any) {
isValid(val: any) {
return !Number.isNaN(Number(val)) && val != undefined && val != null && val != "";

@@ -1235,3 +1273,3 @@ }

*/
roundToPrecision(array:number[], precision = 2) {
roundToPrecision(array: number[], precision = 2) {
return array.map((item) => parseFloat(item.toFixed(precision)));

@@ -1242,5 +1280,5 @@ }

/*
* Create a visualization showing the bin intervals, counts, sizes. Currently using Vega-Lite.
* Create a legend-like visualization showing the bin intervals, counts, sizes. Currently using Vega-Lite.
*/
visualize(binBreaks:number[], binningMethodName:string, colorSchemeCode = "viridis") {
visualize(binguruRes: any, binningMethodName: string, colorSchemeCode = "viridis") {
let context = this;

@@ -1253,2 +1291,3 @@ /**

let dataMax = context.max;
let binBreaks = binguruRes["binBreaks"];
let [binMin, binMax] = [Infinity, -Infinity];

@@ -1278,3 +1317,3 @@ for (var i = 0; i < binBreaks.length; i++) {

for (var i = 0; i <= binBreaks.length; i++) {
let obj:any = {};
let obj: any = {};
let binID = (i + 1).toString();

@@ -1374,3 +1413,3 @@ if (i == 0) {

"scale": {
"domain": data.map((obj:any) => obj["binID"]), // Important, as otherwise the binIDs are sorted as 1,10,11,..., 2,3,4,5,...
"domain": data.map((obj: any) => obj["binID"]), // Important, as otherwise the binIDs are sorted as 1,10,11,..., 2,3,4,5,...
"scheme": colorSchemeCode,

@@ -1428,3 +1467,3 @@ "type": "threshold"

if(binningMethodName == UNCLASSED){
if (binningMethodName == UNCLASSED) {
delete vlSpec["layer"][0]["encoding"]["color"];

@@ -1438,13 +1477,13 @@ vlSpec["layer"][0]["mark"]["color"] = {

"stops": [
{"offset": 0, "color": "#440154"},
{"offset": 0.1, "color": "#48186a"},
{"offset": 0.2, "color": "#472d7b"},
{"offset": 0.3, "color": "#424086"},
{"offset": 0.4, "color": "#3b528b"},
{"offset": 0.5, "color": "#33638d"},
{"offset": 0.6, "color": "#2c728e"},
{"offset": 0.7, "color": "#26828e"},
{"offset": 0.8, "color": "#21918c"},
{"offset": 0.9, "color": "#21a784"},
{"offset": 1, "color": "#29b872"}
{ "offset": 0, "color": "#440154" },
{ "offset": 0.1, "color": "#48186a" },
{ "offset": 0.2, "color": "#472d7b" },
{ "offset": 0.3, "color": "#424086" },
{ "offset": 0.4, "color": "#3b528b" },
{ "offset": 0.5, "color": "#33638d" },
{ "offset": 0.6, "color": "#2c728e" },
{ "offset": 0.7, "color": "#26828e" },
{ "offset": 0.8, "color": "#21918c" },
{ "offset": 0.9, "color": "#21a784" },
{ "offset": 1, "color": "#29b872" }
]

@@ -1457,2 +1496,52 @@ }

/*
* Create a choropleth map, given the output object and
*/
map(binguruRes: any, inputData: number[], geoData: any[], inputDataFeature: string, geoDataFeature: string, geoDataLookup:string = "id", inputDataKey:string = "id", colorSchemeCode = "viridis") {
let vlSpec = {
"$schema": "https://vega.github.io/schema/vega-lite/v5.json",
"width": 500,
"height": 300,
"data": {
"values": geoData,
"format": {
"type": "topojson",
"feature": geoDataFeature
}
},
"transform": [{
"lookup": geoDataLookup,
"from": {
"data": {
"values": inputData
},
"key": inputDataKey,
"fields": [inputDataFeature]
}
}],
"projection": {
"type": "albersUsa"
},
"mark": { type: "geoshape", tooltip: { "content": "data" }, "invalid": null },
"encoding": {
"color": {
"field": inputDataFeature,
"type": "quantitative",
"condition": {
"test": "!isValid(datum['" + inputDataFeature + "'])",
"value": null
},
"scale": {
domain: binguruRes["binBreaks"],
type: "threshold",
scheme: colorSchemeCode
}
}
}
}
return vlSpec;
}
}

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc