ml-levenberg-marquardt
Advanced tools
Comparing version 2.1.1 to 3.0.0
@@ -0,1 +1,8 @@ | ||
## [3.0.0](https://github.com/mljs/levenberg-marquardt/compare/v2.1.1...v3.0.0) (2020-12-03) | ||
### Features | ||
* gradientDifference can now be in array ([fd9e4cb](https://github.com/mljs/levenberg-marquardt/commit/fd9e4cbfbe24d8705277b91199f4e52e829b5053)), closes [#32](https://github.com/mljs/levenberg-marquardt/issues/32) | ||
## [2.1.1](https://github.com/mljs/levenberg-marquardt/compare/v2.1.0...v2.1.1) (2020-03-16) | ||
@@ -2,0 +9,0 @@ |
304
lib/index.js
'use strict'; | ||
function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } | ||
var isArray = _interopDefault(require('is-any-array')); | ||
var isArray = require('is-any-array'); | ||
var mlMatrix = require('ml-matrix'); | ||
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; } | ||
var isArray__default = /*#__PURE__*/_interopDefaultLegacy(isArray); | ||
function checkOptions(data, parameterizedFunction, options) { | ||
let { | ||
minValues, | ||
maxValues, | ||
initialValues, | ||
weights = 1, | ||
damping = 1e-2, | ||
dampingStepUp = 11, | ||
dampingStepDown = 9, | ||
maxIterations = 100, | ||
errorTolerance = 1e-7, | ||
centralDifference = false, | ||
gradientDifference = 10e-2, | ||
improvementThreshold = 1e-3, | ||
} = options; | ||
if (damping <= 0) { | ||
throw new Error('The damping option must be a positive number'); | ||
} else if (!data.x || !data.y) { | ||
throw new Error('The data parameter must have x and y elements'); | ||
} else if ( | ||
!isArray__default['default'](data.x) || | ||
data.x.length < 2 || | ||
!isArray__default['default'](data.y) || | ||
data.y.length < 2 | ||
) { | ||
throw new Error( | ||
'The data parameter elements must be an array with more than 2 points', | ||
); | ||
} else if (data.x.length !== data.y.length) { | ||
throw new Error('The data parameter elements must have the same size'); | ||
} | ||
let parameters = | ||
initialValues || new Array(parameterizedFunction.length).fill(1); | ||
let nbPoints = data.y.length; | ||
let parLen = parameters.length; | ||
maxValues = maxValues || new Array(parLen).fill(Number.MAX_SAFE_INTEGER); | ||
minValues = minValues || new Array(parLen).fill(Number.MIN_SAFE_INTEGER); | ||
if (maxValues.length !== minValues.length) { | ||
throw new Error('minValues and maxValues must be the same size'); | ||
} | ||
if (!isArray__default['default'](parameters)) { | ||
throw new Error('initialValues must be an array'); | ||
} | ||
if (typeof gradientDifference === 'number') { | ||
gradientDifference = new Array(parameters.length).fill(gradientDifference); | ||
} else if (isArray__default['default'](gradientDifference)) { | ||
if (gradientDifference.length !== parLen) { | ||
gradientDifference = new Array(parLen).fill(gradientDifference[0]); | ||
} | ||
} else { | ||
throw new Error( | ||
'gradientDifference should be a number or array with length equal to the number of parameters', | ||
); | ||
} | ||
let filler; | ||
if (typeof weights === 'number') { | ||
let value = 1 / weights ** 2; | ||
filler = () => value; | ||
} else if (isArray__default['default'](weights)) { | ||
if (weights.length < data.x.length) { | ||
let value = 1 / weights[0] ** 2; | ||
filler = () => value; | ||
} else { | ||
filler = (i) => 1 / weights[i] ** 2; | ||
} | ||
} else { | ||
throw new Error( | ||
'weights should be a number or array with length equal to the number of data points', | ||
); | ||
} | ||
let weightSquare = new Array(data.x.length); | ||
for (let i = 0; i < nbPoints; i++) { | ||
weightSquare[i] = filler(i); | ||
} | ||
return { | ||
minValues, | ||
maxValues, | ||
parameters, | ||
weightSquare, | ||
damping, | ||
dampingStepUp, | ||
dampingStepDown, | ||
maxIterations, | ||
errorTolerance, | ||
centralDifference, | ||
gradientDifference, | ||
improvementThreshold, | ||
}; | ||
} | ||
/** | ||
* Calculate current error | ||
* the sum of the weighted squares of the errors (or weighted residuals) between the data.y | ||
* and the curve-fit function. | ||
* @ignore | ||
@@ -14,2 +116,3 @@ * @param {{x:Array<number>, y:Array<number>}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] | ||
* @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter | ||
* @param {Array} weightSquare - Square of weights | ||
* @return {number} | ||
@@ -21,8 +124,8 @@ */ | ||
parameterizedFunction, | ||
weightSquare, | ||
) { | ||
let error = 0; | ||
const func = parameterizedFunction(parameters); | ||
for (let i = 0; i < data.x.length; i++) { | ||
error += Math.abs(data.y[i] - func(data.x[i])); | ||
error += Math.pow(data.y[i] - func(data.x[i]), 2) / weightSquare[i]; | ||
} | ||
@@ -39,6 +142,8 @@ | ||
* @param {Array<number>} params - Array of previous parameter values | ||
* @param {number} gradientDifference - Adjustment for decrease the damping parameter | ||
* @param {number|array} gradientDifference - The step size to approximate the jacobian matrix | ||
* @param {boolean} centralDifference - If true the jacobian matrix is approximated by central differences otherwise by forward differences | ||
* @param {function} paramFunction - The parameters and returns a function with the independent variable as a parameter | ||
* @return {Matrix} | ||
*/ | ||
function gradientFunction( | ||
@@ -50,19 +155,40 @@ data, | ||
paramFunction, | ||
centralDifference, | ||
) { | ||
const n = params.length; | ||
const m = data.x.length; | ||
const nbParams = params.length; | ||
const nbPoints = data.x.length; | ||
let ans = mlMatrix.Matrix.zeros(nbParams, nbPoints); | ||
let ans = new Array(n); | ||
for (let param = 0; param < n; param++) { | ||
ans[param] = new Array(m); | ||
let rowIndex = 0; | ||
for (let param = 0; param < nbParams; param++) { | ||
if (gradientDifference[param] === 0) continue; | ||
let delta = gradientDifference[param]; | ||
let auxParams = params.slice(); | ||
auxParams[param] += gradientDifference; | ||
auxParams[param] += delta; | ||
let funcParam = paramFunction(auxParams); | ||
for (let point = 0; point < m; point++) { | ||
ans[param][point] = evaluatedData[point] - funcParam(data.x[point]); | ||
if (!centralDifference) { | ||
for (let point = 0; point < nbPoints; point++) { | ||
ans.set( | ||
rowIndex, | ||
point, | ||
(evaluatedData[point] - funcParam(data.x[point])) / delta, | ||
); | ||
} | ||
} else { | ||
auxParams = params.slice(); | ||
auxParams[param] -= delta; | ||
delta *= 2; | ||
let funcParam2 = paramFunction(auxParams); | ||
for (let point = 0; point < nbPoints; point++) { | ||
ans.set( | ||
rowIndex, | ||
point, | ||
(funcParam2(data.x[point]) - funcParam(data.x[point])) / delta, | ||
); | ||
} | ||
} | ||
rowIndex++; | ||
} | ||
return new mlMatrix.Matrix(ans); | ||
return ans; | ||
} | ||
@@ -80,9 +206,8 @@ | ||
let ans = new Array(m); | ||
let ans = new mlMatrix.Matrix(m, 1); | ||
for (let point = 0; point < m; point++) { | ||
ans[point] = [data.y[point] - evaluatedData[point]]; | ||
ans.set(point, 0, data.y[point] - evaluatedData[point]); | ||
} | ||
return new mlMatrix.Matrix(ans); | ||
return ans; | ||
} | ||
@@ -96,3 +221,4 @@ | ||
* @param {number} damping - Levenberg-Marquardt parameter | ||
* @param {number} gradientDifference - Adjustment for decrease the damping parameter | ||
* @param {number|array} gradientDifference - The step size to approximate the jacobian matrix | ||
* @param {boolean} centralDifference - If true the jacobian matrix is approximated by central differences otherwise by forward differences | ||
* @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter | ||
@@ -107,4 +233,6 @@ * @return {Array<number>} | ||
parameterizedFunction, | ||
centralDifference, | ||
weights, | ||
) { | ||
let value = damping * gradientDifference * gradientDifference; | ||
let value = damping; | ||
let identity = mlMatrix.Matrix.eye(params.length, params.length, value); | ||
@@ -125,18 +253,24 @@ | ||
parameterizedFunction, | ||
centralDifference, | ||
); | ||
let matrixFunc = matrixFunction(data, evaluatedData); | ||
let residualError = matrixFunction(data, evaluatedData); | ||
let inverseMatrix = mlMatrix.inverse( | ||
identity.add(gradientFunc.mmul(gradientFunc.transpose())), | ||
identity.add( | ||
gradientFunc.mmul( | ||
gradientFunc.transpose().scale('row', { scale: weights }), | ||
), | ||
), | ||
); | ||
params = new mlMatrix.Matrix([params]); | ||
params = params.sub( | ||
inverseMatrix | ||
.mmul(gradientFunc) | ||
.mmul(matrixFunc) | ||
.mul(gradientDifference) | ||
.transpose(), | ||
let jacobianWeigthResidualError = gradientFunc.mmul( | ||
residualError.scale('row', { scale: weights }), | ||
); | ||
return params.to1DArray(); | ||
let perturbations = inverseMatrix.mmul(jacobianWeigthResidualError); | ||
return { | ||
perturbations, | ||
jacobianWeigthResidualError, | ||
}; | ||
} | ||
@@ -149,4 +283,10 @@ | ||
* @param {object} [options] - Options object | ||
* @param {number} [options.damping] - Levenberg-Marquardt parameter | ||
* @param {number} [options.gradientDifference = 10e-2] - Adjustment for decrease the damping parameter | ||
* @param {number|array} [options.weights = 1] - weighting vector, if the length does not match with the number of data points, the vector is reconstructed with first value. | ||
* @param {number} [options.damping = 1e-2] - Levenberg-Marquardt parameter, small values of the damping parameter λ result in a Gauss-Newton update and large | ||
values of λ result in a gradient descent update | ||
* @param {number} [options.dampingStepDown = 9] - factor to reduce the damping (Levenberg-Marquardt parameter) when there is not an improvement when updating parameters. | ||
* @param {number} [options.dampingStepUp = 11] - factor to increase the damping (Levenberg-Marquardt parameter) when there is an improvement when updating parameters. | ||
* @param {number} [options.improvementThreshold = 1e-3] - the threshold to define an improvement through an update of parameters | ||
* @param {number|array} [options.gradientDifference = 10e-2] - The step size to approximate the jacobian matrix | ||
* @param {boolean} [options.centralDifference = false] - If true the jacobian matrix is approximated by central differences otherwise by forward differences | ||
* @param {Array<number>} [options.minValues] - Minimum allowed values for parameters | ||
@@ -165,49 +305,30 @@ * @param {Array<number>} [options.maxValues] - Maximum allowed values for parameters | ||
let { | ||
maxIterations = 100, | ||
gradientDifference = 10e-2, | ||
damping = 0, | ||
errorTolerance = 10e-3, | ||
minValues, | ||
maxValues, | ||
initialValues, | ||
} = options; | ||
parameters, | ||
weightSquare, | ||
damping, | ||
dampingStepUp, | ||
dampingStepDown, | ||
maxIterations, | ||
errorTolerance, | ||
centralDifference, | ||
gradientDifference, | ||
improvementThreshold, | ||
} = checkOptions(data, parameterizedFunction, options); | ||
if (damping <= 0) { | ||
throw new Error('The damping option must be a positive number'); | ||
} else if (!data.x || !data.y) { | ||
throw new Error('The data parameter must have x and y elements'); | ||
} else if ( | ||
!isArray(data.x) || | ||
data.x.length < 2 || | ||
!isArray(data.y) || | ||
data.y.length < 2 | ||
) { | ||
throw new Error( | ||
'The data parameter elements must be an array with more than 2 points', | ||
); | ||
} else if (data.x.length !== data.y.length) { | ||
throw new Error('The data parameter elements must have the same size'); | ||
} | ||
let error = errorCalculation( | ||
data, | ||
parameters, | ||
parameterizedFunction, | ||
weightSquare, | ||
); | ||
let parameters = | ||
initialValues || new Array(parameterizedFunction.length).fill(1); | ||
let parLen = parameters.length; | ||
maxValues = maxValues || new Array(parLen).fill(Number.MAX_SAFE_INTEGER); | ||
minValues = minValues || new Array(parLen).fill(Number.MIN_SAFE_INTEGER); | ||
let converged = error <= errorTolerance; | ||
if (maxValues.length !== minValues.length) { | ||
throw new Error('minValues and maxValues must be the same size'); | ||
} | ||
let iteration = 0; | ||
for (; iteration < maxIterations && !converged; iteration++) { | ||
let previousError = error; | ||
if (!isArray(parameters)) { | ||
throw new Error('initialValues must be an array'); | ||
} | ||
let error = errorCalculation(data, parameters, parameterizedFunction); | ||
let converged = error <= errorTolerance; | ||
let iteration; | ||
for (iteration = 0; iteration < maxIterations && !converged; iteration++) { | ||
parameters = step( | ||
let { perturbations, jacobianWeigthResidualError } = step( | ||
data, | ||
@@ -218,7 +339,9 @@ parameters, | ||
parameterizedFunction, | ||
centralDifference, | ||
weightSquare, | ||
); | ||
for (let k = 0; k < parLen; k++) { | ||
for (let k = 0; k < parameters.length; k++) { | ||
parameters[k] = Math.min( | ||
Math.max(minValues[k], parameters[k]), | ||
Math.max(minValues[k], parameters[k] - perturbations.get(k, 0)), | ||
maxValues[k], | ||
@@ -228,4 +351,25 @@ ); | ||
error = errorCalculation(data, parameters, parameterizedFunction); | ||
error = errorCalculation( | ||
data, | ||
parameters, | ||
parameterizedFunction, | ||
weightSquare, | ||
); | ||
if (isNaN(error)) break; | ||
let improvementMetric = | ||
(previousError - error) / | ||
perturbations | ||
.transpose() | ||
.mmul(perturbations.mulS(damping).add(jacobianWeigthResidualError)) | ||
.get(0, 0); | ||
if (improvementMetric > improvementThreshold) { | ||
damping = Math.max(damping / dampingStepDown, 1e-7); | ||
} else { | ||
error = previousError; | ||
damping = Math.min(damping * dampingStepUp, 1e7); | ||
} | ||
converged = error <= errorTolerance; | ||
@@ -232,0 +376,0 @@ } |
{ | ||
"name": "ml-levenberg-marquardt", | ||
"version": "2.1.1", | ||
"version": "3.0.0", | ||
"description": "Curve fitting method in javascript", | ||
@@ -12,2 +12,3 @@ "main": "./lib/index.js", | ||
"scripts": { | ||
"testA": "jest -i src/__tests__/exceptions.test.js", | ||
"compile": "rollup -c", | ||
@@ -41,18 +42,18 @@ "eslint": "eslint src", | ||
"devDependencies": { | ||
"@babel/plugin-transform-modules-commonjs": "^7.8.3", | ||
"@babel/plugin-transform-modules-commonjs": "^7.12.1", | ||
"benchmark": "^2.1.4", | ||
"cz-conventional-changelog": "^3.1.0", | ||
"eslint": "^6.8.0", | ||
"eslint-config-cheminfo": "^2.0.4", | ||
"eslint-plugin-import": "^2.20.1", | ||
"eslint-plugin-jest": "^23.8.1", | ||
"eslint-plugin-prettier": "^3.1.2", | ||
"jest": "^25.1.0", | ||
"jest-matcher-deep-close-to": "^1.3.0", | ||
"prettier": "^1.19.1", | ||
"rollup": "^1.32.0" | ||
"cz-conventional-changelog": "^3.3.0", | ||
"eslint": "^7.14.0", | ||
"eslint-config-cheminfo": "^5.2.2", | ||
"eslint-plugin-import": "^2.22.1", | ||
"eslint-plugin-jest": "^24.1.3", | ||
"eslint-plugin-prettier": "^3.2.0", | ||
"jest": "^26.6.3", | ||
"jest-matcher-deep-close-to": "^2.0.1", | ||
"prettier": "^2.2.1", | ||
"rollup": "^2.34.1" | ||
}, | ||
"dependencies": { | ||
"is-any-array": "^0.1.0", | ||
"ml-matrix": "^6.4.1" | ||
"ml-matrix": "^6.5.3" | ||
}, | ||
@@ -59,0 +60,0 @@ "config": { |
@@ -10,2 +10,8 @@ # levenberg-marquardt | ||
## [API Documentation](https://mljs.github.io/levenberg-marquardt/) | ||
This algorithm is based on the article [Brown, Kenneth M., and J. E. Dennis. "Derivative free analogues of the Levenberg-Marquardt and Gauss algorithms for nonlinear least squares approximation." Numerische Mathematik 18.4 (1971): 289-297.](https://doi.org/10.1007/BF01404679) and [http://people.duke.edu/~hpgavin/ce281/lm.pdf](http://people.duke.edu/~hpgavin/ce281/lm.pdf) | ||
In order to get a general idea of the problem you could also check the [Wikipedia article](https://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm). | ||
## Installation | ||
@@ -15,8 +21,12 @@ | ||
## [API Documentation](https://mljs.github.io/levenberg-marquardt/) | ||
## Options | ||
This algorithm is based on the article [Brown, Kenneth M., and J. E. Dennis. "Derivative free analogues of the Levenberg-Marquardt and Gauss algorithms for nonlinear least squares approximation." Numerische Mathematik 18.4 (1971): 289-297.](https://doi.org/10.1007/BF01404679) | ||
Next there is some options could change the behavior of the code. | ||
In order to get a general idea of the problem you could also check the [Wikipedia article](https://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm). | ||
### centralDifference | ||
The jacobian matrix is approximated by finite difference; forward differences or central differences (one additional function evaluation). The option centralDifference select one of them, by default the jacobian is calculated by forward difference. | ||
### gradientDifference | ||
The jacobian matrix is approximated as mention above, the gradientDifference option is the step size (dp) to calculate de difference between the function with the current parameter state and the perturbation added. It could be a number (same step size for all parameters) or an array with different values for each parameter, if the gradientDifference is zero the derive will be zero, and the parameter will hold fixed | ||
## Example | ||
@@ -23,0 +33,0 @@ |
/* eslint-disable jest/no-standalone-expect */ | ||
import { toBeDeepCloseTo } from 'jest-matcher-deep-close-to'; | ||
import { toBeDeepCloseTo, toMatchCloseTo } from 'jest-matcher-deep-close-to'; | ||
import levenbergMarquardt from '..'; | ||
expect.extend({ toBeDeepCloseTo }); | ||
expect.extend({ toBeDeepCloseTo, toMatchCloseTo }); | ||
@@ -23,5 +23,5 @@ describe('curve', () => { | ||
maxIterations: 1000, | ||
errorTolerance: 1e-3, | ||
maxBound: [11, 11, 11], | ||
minBound: [1, 1, 1], | ||
errorTolerance: 1e-7, | ||
maxValues: [11, 11, 11], | ||
minValues: [1, 2.7, 1], | ||
initialValues: [3.5, 3.8, 4], | ||
@@ -38,3 +38,7 @@ }, | ||
options: { | ||
maxIterations: 100, | ||
gradientDifference: 10e-2, | ||
damping: 0.1, | ||
dampingStepDown: 1, | ||
dampingStepUp: 1, | ||
initialValues: [3, 3], | ||
@@ -79,5 +83,34 @@ }, | ||
damping: 0.01, | ||
gradientDifference: [0.01, 0.0001, 0.0001, 0.01, 0.0001, 0], | ||
initialValues: [1.1, 0.15, 0.29, 4.05, 0.17, 0.3], | ||
maxIterations: 500, | ||
}, | ||
decimalsForParameterValues: 1, | ||
}, | ||
{ | ||
name: 'Sum of lorentzians, central differences', | ||
getFunctionFromParameters: function sumOfLorentzians(p) { | ||
return (t) => { | ||
let nL = p.length; | ||
let factor, p2; | ||
let result = 0; | ||
for (let i = 0; i < nL; i += 3) { | ||
p2 = Math.pow(p[i + 2] / 2, 2); | ||
factor = p[i + 1] * p2; | ||
result += factor / (Math.pow(t - p[i], 2) + p2); | ||
} | ||
return result; | ||
}; | ||
}, | ||
n: 100, | ||
xStart: 0, | ||
xEnd: 99, | ||
problemParameters: [1, 0.1, 0.3, 4, 0.15, 0.3], | ||
options: { | ||
damping: 0.01, | ||
gradientDifference: [0.01, 0.0001, 0.0001, 0.01, 0.0001], | ||
centralDifference: true, | ||
initialValues: [1.1, 0.15, 0.29, 4.05, 0.17, 0.28], | ||
maxIterations: 500, | ||
errorTolerance: 10e-5, | ||
errorTolerance: 10e-8, | ||
}, | ||
@@ -168,3 +201,3 @@ decimalsForParameterValues: 1, | ||
iterations: 200, | ||
parameterError: 374.6448, | ||
parameterError: 16398.0009709, | ||
parameterValues: [-16.7697, 43.4549, 1018.8938, -4.3514], | ||
@@ -175,2 +208,3 @@ }, | ||
maxIterations: 200, | ||
weights: 1, | ||
initialValues: new Float64Array([0, 100, 1, 0.1]), | ||
@@ -201,3 +235,4 @@ }, | ||
); | ||
expect(actual).toBeDeepCloseTo(expected, decimals); | ||
actual.parameterValues = Array.from(actual.parameterValues); | ||
expect(actual).toMatchCloseTo(expected, decimals); | ||
}); | ||
@@ -204,0 +239,0 @@ }); |
@@ -9,2 +9,3 @@ import errorCalculation from '../errorCalculation'; | ||
const n = 10; | ||
const w = new Float64Array(n).fill(1); | ||
const xs = new Array(n).fill(0).map((zero, i) => i); | ||
@@ -18,3 +19,3 @@ const data = { | ||
expect( | ||
errorCalculation(data, sampleParameters, linearFunction), | ||
errorCalculation(data, sampleParameters, linearFunction, w), | ||
).toBeCloseTo(0, 3); | ||
@@ -28,3 +29,3 @@ }); | ||
parameters[1] += 1; | ||
expect(errorCalculation(data, parameters, linearFunction)).toBeCloseTo( | ||
expect(errorCalculation(data, parameters, linearFunction, w)).toBeCloseTo( | ||
n, | ||
@@ -43,5 +44,7 @@ 3, | ||
const y = new Float64Array(n); | ||
const w = new Float64Array(n); | ||
const fct = linearFunction(sampleParameters); | ||
for (let i = 0; i < n; i++) { | ||
x[i] = i; | ||
w[i] = 1; | ||
y[i] = fct(i); | ||
@@ -52,3 +55,3 @@ } | ||
expect( | ||
errorCalculation({ x, y }, sampleParameters, linearFunction), | ||
errorCalculation({ x, y }, sampleParameters, linearFunction, w), | ||
).toBeCloseTo(0, 3); | ||
@@ -63,3 +66,3 @@ }); | ||
expect( | ||
errorCalculation({ x, y }, parameters, linearFunction), | ||
errorCalculation({ x, y }, parameters, linearFunction, w), | ||
).toBeCloseTo(n, 3); | ||
@@ -66,0 +69,0 @@ }); |
@@ -13,4 +13,4 @@ import { toBeDeepCloseTo } from 'jest-matcher-deep-close-to'; | ||
describe('options', () => { | ||
it('Should throw an error when no options are provided (missing damping)', () => { | ||
expect(() => levenbergMarquardt()).toThrow( | ||
it('Should throw an error when bad options are provided (negative damping)', () => { | ||
expect(() => levenbergMarquardt({}, () => 1, { damping: -1 })).toThrow( | ||
'The damping option must be a positive number', | ||
@@ -17,0 +17,0 @@ ); |
/** | ||
* Calculate current error | ||
* the sum of the weighted squares of the errors (or weighted residuals) between the data.y | ||
* and the curve-fit function. | ||
* @ignore | ||
@@ -7,2 +8,3 @@ * @param {{x:Array<number>, y:Array<number>}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] | ||
* @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter | ||
* @param {Array} weightSquare - Square of weights | ||
* @return {number} | ||
@@ -14,8 +16,8 @@ */ | ||
parameterizedFunction, | ||
weightSquare, | ||
) { | ||
let error = 0; | ||
const func = parameterizedFunction(parameters); | ||
for (let i = 0; i < data.x.length; i++) { | ||
error += Math.abs(data.y[i] - func(data.x[i])); | ||
error += Math.pow(data.y[i] - func(data.x[i]), 2) / weightSquare[i]; | ||
} | ||
@@ -22,0 +24,0 @@ |
105
src/index.js
@@ -1,3 +0,2 @@ | ||
import isArray from 'is-any-array'; | ||
import checkOptions from './checkOptions'; | ||
import errorCalculation from './errorCalculation'; | ||
@@ -11,4 +10,10 @@ import step from './step'; | ||
* @param {object} [options] - Options object | ||
* @param {number} [options.damping] - Levenberg-Marquardt parameter | ||
* @param {number} [options.gradientDifference = 10e-2] - Adjustment for decrease the damping parameter | ||
* @param {number|array} [options.weights = 1] - weighting vector, if the length does not match with the number of data points, the vector is reconstructed with first value. | ||
* @param {number} [options.damping = 1e-2] - Levenberg-Marquardt parameter, small values of the damping parameter λ result in a Gauss-Newton update and large | ||
values of λ result in a gradient descent update | ||
* @param {number} [options.dampingStepDown = 9] - factor to reduce the damping (Levenberg-Marquardt parameter) when there is not an improvement when updating parameters. | ||
* @param {number} [options.dampingStepUp = 11] - factor to increase the damping (Levenberg-Marquardt parameter) when there is an improvement when updating parameters. | ||
* @param {number} [options.improvementThreshold = 1e-3] - the threshold to define an improvement through an update of parameters | ||
* @param {number|array} [options.gradientDifference = 10e-2] - The step size to approximate the jacobian matrix | ||
* @param {boolean} [options.centralDifference = false] - If true the jacobian matrix is approximated by central differences otherwise by forward differences | ||
* @param {Array<number>} [options.minValues] - Minimum allowed values for parameters | ||
@@ -27,49 +32,30 @@ * @param {Array<number>} [options.maxValues] - Maximum allowed values for parameters | ||
let { | ||
maxIterations = 100, | ||
gradientDifference = 10e-2, | ||
damping = 0, | ||
errorTolerance = 10e-3, | ||
minValues, | ||
maxValues, | ||
initialValues, | ||
} = options; | ||
parameters, | ||
weightSquare, | ||
damping, | ||
dampingStepUp, | ||
dampingStepDown, | ||
maxIterations, | ||
errorTolerance, | ||
centralDifference, | ||
gradientDifference, | ||
improvementThreshold, | ||
} = checkOptions(data, parameterizedFunction, options); | ||
if (damping <= 0) { | ||
throw new Error('The damping option must be a positive number'); | ||
} else if (!data.x || !data.y) { | ||
throw new Error('The data parameter must have x and y elements'); | ||
} else if ( | ||
!isArray(data.x) || | ||
data.x.length < 2 || | ||
!isArray(data.y) || | ||
data.y.length < 2 | ||
) { | ||
throw new Error( | ||
'The data parameter elements must be an array with more than 2 points', | ||
); | ||
} else if (data.x.length !== data.y.length) { | ||
throw new Error('The data parameter elements must have the same size'); | ||
} | ||
let error = errorCalculation( | ||
data, | ||
parameters, | ||
parameterizedFunction, | ||
weightSquare, | ||
); | ||
let parameters = | ||
initialValues || new Array(parameterizedFunction.length).fill(1); | ||
let parLen = parameters.length; | ||
maxValues = maxValues || new Array(parLen).fill(Number.MAX_SAFE_INTEGER); | ||
minValues = minValues || new Array(parLen).fill(Number.MIN_SAFE_INTEGER); | ||
let converged = error <= errorTolerance; | ||
if (maxValues.length !== minValues.length) { | ||
throw new Error('minValues and maxValues must be the same size'); | ||
} | ||
let iteration = 0; | ||
for (; iteration < maxIterations && !converged; iteration++) { | ||
let previousError = error; | ||
if (!isArray(parameters)) { | ||
throw new Error('initialValues must be an array'); | ||
} | ||
let error = errorCalculation(data, parameters, parameterizedFunction); | ||
let converged = error <= errorTolerance; | ||
let iteration; | ||
for (iteration = 0; iteration < maxIterations && !converged; iteration++) { | ||
parameters = step( | ||
let { perturbations, jacobianWeigthResidualError } = step( | ||
data, | ||
@@ -80,7 +66,9 @@ parameters, | ||
parameterizedFunction, | ||
centralDifference, | ||
weightSquare, | ||
); | ||
for (let k = 0; k < parLen; k++) { | ||
for (let k = 0; k < parameters.length; k++) { | ||
parameters[k] = Math.min( | ||
Math.max(minValues[k], parameters[k]), | ||
Math.max(minValues[k], parameters[k] - perturbations.get(k, 0)), | ||
maxValues[k], | ||
@@ -90,4 +78,25 @@ ); | ||
error = errorCalculation(data, parameters, parameterizedFunction); | ||
error = errorCalculation( | ||
data, | ||
parameters, | ||
parameterizedFunction, | ||
weightSquare, | ||
); | ||
if (isNaN(error)) break; | ||
let improvementMetric = | ||
(previousError - error) / | ||
perturbations | ||
.transpose() | ||
.mmul(perturbations.mulS(damping).add(jacobianWeigthResidualError)) | ||
.get(0, 0); | ||
if (improvementMetric > improvementThreshold) { | ||
damping = Math.max(damping / dampingStepDown, 1e-7); | ||
} else { | ||
error = previousError; | ||
damping = Math.min(damping * dampingStepUp, 1e7); | ||
} | ||
converged = error <= errorTolerance; | ||
@@ -94,0 +103,0 @@ } |
import { inverse, Matrix } from 'ml-matrix'; | ||
/** | ||
* Difference of the matrix function over the parameters | ||
* @ignore | ||
* @param {{x:Array<number>, y:Array<number>}} data - Array of points to fit in the format [x1, x2, ... ], [y1, y2, ... ] | ||
* @param {Array<number>} evaluatedData - Array of previous evaluated function values | ||
* @param {Array<number>} params - Array of previous parameter values | ||
* @param {number} gradientDifference - Adjustment for decrease the damping parameter | ||
* @param {function} paramFunction - The parameters and returns a function with the independent variable as a parameter | ||
* @return {Matrix} | ||
*/ | ||
function gradientFunction( | ||
data, | ||
evaluatedData, | ||
params, | ||
gradientDifference, | ||
paramFunction, | ||
) { | ||
const n = params.length; | ||
const m = data.x.length; | ||
import gradientFunction from './gradientFunction'; | ||
let ans = new Array(n); | ||
for (let param = 0; param < n; param++) { | ||
ans[param] = new Array(m); | ||
let auxParams = params.slice(); | ||
auxParams[param] += gradientDifference; | ||
let funcParam = paramFunction(auxParams); | ||
for (let point = 0; point < m; point++) { | ||
ans[param][point] = evaluatedData[point] - funcParam(data.x[point]); | ||
} | ||
} | ||
return new Matrix(ans); | ||
} | ||
/** | ||
@@ -48,9 +15,8 @@ * Matrix function over the samples | ||
let ans = new Array(m); | ||
let ans = new Matrix(m, 1); | ||
for (let point = 0; point < m; point++) { | ||
ans[point] = [data.y[point] - evaluatedData[point]]; | ||
ans.set(point, 0, data.y[point] - evaluatedData[point]); | ||
} | ||
return new Matrix(ans); | ||
return ans; | ||
} | ||
@@ -64,3 +30,4 @@ | ||
* @param {number} damping - Levenberg-Marquardt parameter | ||
* @param {number} gradientDifference - Adjustment for decrease the damping parameter | ||
* @param {number|array} gradientDifference - The step size to approximate the jacobian matrix | ||
* @param {boolean} centralDifference - If true the jacobian matrix is approximated by central differences otherwise by forward differences | ||
* @param {function} parameterizedFunction - The parameters and returns a function with the independent variable as a parameter | ||
@@ -75,4 +42,6 @@ * @return {Array<number>} | ||
parameterizedFunction, | ||
centralDifference, | ||
weights, | ||
) { | ||
let value = damping * gradientDifference * gradientDifference; | ||
let value = damping; | ||
let identity = Matrix.eye(params.length, params.length, value); | ||
@@ -93,18 +62,24 @@ | ||
parameterizedFunction, | ||
centralDifference, | ||
); | ||
let matrixFunc = matrixFunction(data, evaluatedData); | ||
let residualError = matrixFunction(data, evaluatedData); | ||
let inverseMatrix = inverse( | ||
identity.add(gradientFunc.mmul(gradientFunc.transpose())), | ||
identity.add( | ||
gradientFunc.mmul( | ||
gradientFunc.transpose().scale('row', { scale: weights }), | ||
), | ||
), | ||
); | ||
params = new Matrix([params]); | ||
params = params.sub( | ||
inverseMatrix | ||
.mmul(gradientFunc) | ||
.mmul(matrixFunc) | ||
.mul(gradientDifference) | ||
.transpose(), | ||
let jacobianWeigthResidualError = gradientFunc.mmul( | ||
residualError.scale('row', { scale: weights }), | ||
); | ||
return params.to1DArray(); | ||
let perturbations = inverseMatrix.mmul(jacobianWeigthResidualError); | ||
return { | ||
perturbations, | ||
jacobianWeigthResidualError, | ||
}; | ||
} |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
44295
13
1050
93
Updatedml-matrix@^6.5.3