Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

ml-levenberg-marquardt

Package Overview
Dependencies
Maintainers
7
Versions
17
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

ml-levenberg-marquardt - npm Package Compare versions

Comparing version 1.0.3 to 1.0.4

src/__tests__/curve.js

10

History.md

@@ -0,1 +1,11 @@

## [1.0.4](https://github.com/mljs/levenberg-marquardt/compare/v1.0.3...v1.0.4) (2019-05-09)
### Bug Fixes
* pluralize min/maxValues parameters & update README ([#20](https://github.com/mljs/levenberg-marquardt/issues/20)) ([afbbd90](https://github.com/mljs/levenberg-marquardt/commit/afbbd90))
* returns on NaN ([4d61879](https://github.com/mljs/levenberg-marquardt/commit/4d61879))
<a name="1.0.3"></a>

@@ -2,0 +12,0 @@ ## [1.0.3](https://github.com/mljs/levenberg-marquardt/compare/v1.0.2...v1.0.3) (2018-02-07)

50

lib/index.js

@@ -76,6 +76,6 @@ 'use strict';

for (var point = 0; point < m; point++) {
ans[point] = data.y[point] - evaluatedData[point];
ans[point] = [data.y[point] - evaluatedData[point]];
}
return new mlMatrix.Matrix([ans]);
return new mlMatrix.Matrix(ans);
}

@@ -100,12 +100,8 @@

) {
var identity = mlMatrix.Matrix.eye(params.length).mul(
damping * gradientDifference * gradientDifference
);
var value = damping * gradientDifference * gradientDifference;
var identity = mlMatrix.Matrix.eye(params.length, params.length, value);
var l = data.x.length;
var evaluatedData = new Array(l);
const func = parameterizedFunction(params);
for (var i = 0; i < l; i++) {
evaluatedData[i] = func(data.x[i]);
}
var evaluatedData = data.x.map((e) => func(e));
var gradientFunc = gradientFunction(

@@ -118,6 +114,7 @@ data,

);
var matrixFunc = matrixFunction(data, evaluatedData).transposeView();
var matrixFunc = matrixFunction(data, evaluatedData);
var inverseMatrix = mlMatrix.inverse(
identity.add(gradientFunc.mmul(gradientFunc.transposeView()))
identity.add(gradientFunc.mmul(gradientFunc.transpose()))
);
params = new mlMatrix.Matrix([params]);

@@ -129,3 +126,3 @@ params = params.sub(

.mul(gradientDifference)
.transposeView()
.transpose()
);

@@ -143,2 +140,4 @@

* @param {number} [options.gradientDifference = 10e-2] - Adjustment for decrease the damping parameter
* @param {Array<number>} [options.minValues] - Minimum allowed values for parameters
* @param {Array<number>} [options.maxValues] - Maximum allowed values for parameters
* @param {Array<number>} [options.initialValues] - Array of initial parameter values

@@ -159,2 +158,4 @@ * @param {number} [options.maxIterations = 100] - Maximum of allowed iterations

errorTolerance = 10e-3,
minValues,
maxValues,
initialValues

@@ -176,7 +177,4 @@ } = options;

);
} else {
let dataLen = data.x.length;
if (dataLen !== data.y.length) {
throw new Error('The data parameter elements must have the same size');
}
} else if (data.x.length !== data.y.length) {
throw new Error('The data parameter elements must have the same size');
}

@@ -186,3 +184,10 @@

initialValues || new Array(parameterizedFunction.length).fill(1);
let parLen = parameters.length;
maxValues = maxValues || new Array(parLen).fill(Number.MAX_SAFE_INTEGER);
minValues = minValues || new Array(parLen).fill(Number.MIN_SAFE_INTEGER);
if (maxValues.length !== minValues.length) {
throw new Error('minValues and maxValues must be the same size');
}
if (!Array.isArray(parameters)) {

@@ -208,3 +213,12 @@ throw new Error('initialValues must be an array');

);
for (let k = 0; k < parLen; k++) {
parameters[k] = Math.min(
Math.max(minValues[k], parameters[k]),
maxValues[k]
);
}
error = errorCalculation(data, parameters, parameterizedFunction);
if (isNaN(error)) break;
converged = error <= errorTolerance;

@@ -211,0 +225,0 @@ }

{
"name": "ml-levenberg-marquardt",
"version": "1.0.3",
"version": "1.0.4",
"description": "Curve fitting method in javascript",
"main": "./lib/index.js",
"files": ["runkit.js", "src", "lib"],
"module": "./src/index.js",
"files": [
"runkit.js",
"src",
"lib"
],
"scripts": {
"eslint": "eslint src",
"eslint-fix": "npm run eslint -- --fix",
"prepublish": "rollup -c",
"test": "npm run test-simple && npm run eslint",
"test-travis": "jest --coverage && codecov",
"test-simple": "jest"
"prepare": "rollup -c",
"test": "npm run test-only && npm run eslint",
"test-coverage": "npm run test-only -- --coverage",
"test-only": "jest",
"test-travis": "npm run test-coverage && npm run eslint"
},

@@ -19,3 +25,11 @@ "repository": {

},
"keywords": ["machine", "learning", "data", "mining", "datamining"],
"keywords": [
"machine",
"learning",
"data",
"mining",
"datamining",
"levenberg",
"marquardt"
],
"author": "Miguel Asencio <maasencioh@gmail.com>",

@@ -29,16 +43,20 @@ "license": "MIT",

"devDependencies": {
"babel-plugin-transform-es2015-modules-commonjs": "^6.26.0",
"codecov": "^2.3.1",
"eslint": "^4.17.0",
"eslint-config-cheminfo": "^1.15.1",
"eslint-plugin-import": "^2.8.0",
"eslint-plugin-jest": "^21.7.0",
"jest": "^22.1.4",
"jest-matcher-deep-close-to": "^1.1.0",
"mocha-better-spec-reporter": "^3.1.0",
"rollup": "^0.55.3"
"babel-plugin-transform-es2015-modules-commonjs": "^6.26.2",
"cz-conventional-changelog": "^2.1.0",
"eslint": "^5.16.0",
"eslint-config-cheminfo": "^1.20.1",
"eslint-plugin-import": "^2.17.1",
"eslint-plugin-jest": "^22.4.1",
"jest": "^24.7.1",
"jest-matcher-deep-close-to": "^1.3.0",
"rollup": "^1.10.0"
},
"dependencies": {
"ml-matrix": "^5.0.1"
"ml-matrix": "^5.3.0"
},
"config": {
"commitizen": {
"path": "./node_modules/cz-conventional-changelog"
}
}
}

@@ -49,5 +49,15 @@ # levenberg-marquardt

// Optionally, restrict parameters to minimum & maximum values
let minValues = [
/* a_min, b_min, c_min, ... */
];
let maxValues = [
/* a_max, b_max, c_max, ... */
];
const options = {
damping: 1.5,
initialValues: initialValues,
minValues: minValues,
maxValues: maxValues,
gradientDifference: 10e-2,

@@ -54,0 +64,0 @@ maxIterations: 100,

import errorCalculation from '../errorCalculation';
function sinFunction([a, b]) {
return (t) => a * Math.sin(b * t);
}
describe('parameterError', () => {
describe('Linear functions', () => {
const linearFunction = ([slope, intercept]) => ((x) => slope * x + intercept);
describe('errorCalculation test', () => {
it('Simple case', () => {
const len = 20;
let data = {
x: new Array(len),
y: new Array(len)
const sampleParameters = [1, 1];
const n = 10;
const xs = new Array(n).fill(0).map((zero, i) => i);
const data = {
x: xs,
y: xs.map(linearFunction(sampleParameters))
};
let sampleFunction = sinFunction([2, 2]);
for (let i = 0; i < len; i++) {
data.x[i] = i;
data.y[i] = sampleFunction(i);
}
expect(errorCalculation(data, [2, 2], sinFunction)).toBeCloseTo(0, 3);
expect(errorCalculation(data, [4, 4], sinFunction)).toBeCloseTo(48.7, 1);
it('parameterError should be zero for an exact fit', () => {
expect(errorCalculation(data, sampleParameters, linearFunction)).toBeCloseTo(0, 3);
});
it('parameterError should match the sum of absolute difference between the model and the data', () => {
const parameters = Array.from(sampleParameters);
// Offset line so that it's still parallel but differs by 1 at each point
// Then each point will result in a residual increase of 1
parameters[1] += 1;
expect(errorCalculation(data, parameters, linearFunction)).toBeCloseTo(n, 3);
});
});
});

@@ -11,2 +11,4 @@ import errorCalculation from './errorCalculation';

* @param {number} [options.gradientDifference = 10e-2] - Adjustment for decrease the damping parameter
* @param {Array<number>} [options.minValues] - Minimum allowed values for parameters
* @param {Array<number>} [options.maxValues] - Maximum allowed values for parameters
* @param {Array<number>} [options.initialValues] - Array of initial parameter values

@@ -27,2 +29,4 @@ * @param {number} [options.maxIterations = 100] - Maximum of allowed iterations

errorTolerance = 10e-3,
minValues,
maxValues,
initialValues

@@ -44,7 +48,4 @@ } = options;

);
} else {
let dataLen = data.x.length;
if (dataLen !== data.y.length) {
throw new Error('The data parameter elements must have the same size');
}
} else if (data.x.length !== data.y.length) {
throw new Error('The data parameter elements must have the same size');
}

@@ -54,3 +55,10 @@

initialValues || new Array(parameterizedFunction.length).fill(1);
let parLen = parameters.length;
maxValues = maxValues || new Array(parLen).fill(Number.MAX_SAFE_INTEGER);
minValues = minValues || new Array(parLen).fill(Number.MIN_SAFE_INTEGER);
if (maxValues.length !== minValues.length) {
throw new Error('minValues and maxValues must be the same size');
}
if (!Array.isArray(parameters)) {

@@ -76,3 +84,12 @@ throw new Error('initialValues must be an array');

);
for (let k = 0; k < parLen; k++) {
parameters[k] = Math.min(
Math.max(minValues[k], parameters[k]),
maxValues[k]
);
}
error = errorCalculation(data, parameters, parameterizedFunction);
if (isNaN(error)) break;
converged = error <= errorTolerance;

@@ -79,0 +96,0 @@ }

@@ -51,6 +51,6 @@ import { inverse, Matrix } from 'ml-matrix';

for (var point = 0; point < m; point++) {
ans[point] = data.y[point] - evaluatedData[point];
ans[point] = [data.y[point] - evaluatedData[point]];
}
return new Matrix([ans]);
return new Matrix(ans);
}

@@ -75,12 +75,8 @@

) {
var identity = Matrix.eye(params.length).mul(
damping * gradientDifference * gradientDifference
);
var value = damping * gradientDifference * gradientDifference;
var identity = Matrix.eye(params.length, params.length, value);
var l = data.x.length;
var evaluatedData = new Array(l);
const func = parameterizedFunction(params);
for (var i = 0; i < l; i++) {
evaluatedData[i] = func(data.x[i]);
}
var evaluatedData = data.x.map((e) => func(e));
var gradientFunc = gradientFunction(

@@ -93,6 +89,7 @@ data,

);
var matrixFunc = matrixFunction(data, evaluatedData).transposeView();
var matrixFunc = matrixFunction(data, evaluatedData);
var inverseMatrix = inverse(
identity.add(gradientFunc.mmul(gradientFunc.transposeView()))
identity.add(gradientFunc.mmul(gradientFunc.transpose()))
);
params = new Matrix([params]);

@@ -104,3 +101,3 @@ params = params.sub(

.mul(gradientDifference)
.transposeView()
.transpose()
);

@@ -107,0 +104,0 @@

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc