ml-regression-power
Advanced tools
Comparing version 1.0.0 to 1.0.1
@@ -0,1 +1,10 @@ | ||
## [1.0.1](https://github.com/mljs/regression-power/compare/v1.0.0...v1.0.1) (2019-01-11) | ||
### Bug Fixes | ||
* latex formatting with big / small numbers ([8a6f9be](https://github.com/mljs/regression-power/commit/8a6f9be)) | ||
<a name="1.0.0"></a> | ||
@@ -2,0 +11,0 @@ # 1.0.0 (2017-04-28) |
@@ -10,59 +10,72 @@ 'use strict'; | ||
class PowerRegression extends BaseRegression__default { | ||
constructor(x, y) { | ||
super(); | ||
if (x === true) { // reloading model | ||
this.A = y.A; | ||
this.B = y.B; | ||
} else { | ||
BaseRegression.checkArrayLength(x, y); | ||
regress(this, x, y); | ||
} | ||
constructor(x, y) { | ||
super(); | ||
if (x === true) { | ||
// reloading model | ||
this.A = y.A; | ||
this.B = y.B; | ||
} else { | ||
BaseRegression.checkArrayLength(x, y); | ||
regress(this, x, y); | ||
} | ||
} | ||
_predict(newInputs) { | ||
return this.A * Math.pow(newInputs, this.B); | ||
} | ||
_predict(newInputs) { | ||
return this.A * Math.pow(newInputs, this.B); | ||
} | ||
toJSON() { | ||
return { | ||
name: 'powerRegression', | ||
A: this.A, | ||
B: this.B | ||
}; | ||
} | ||
toJSON() { | ||
return { | ||
name: 'powerRegression', | ||
A: this.A, | ||
B: this.B | ||
}; | ||
} | ||
toString(precision) { | ||
return 'f(x) = ' + BaseRegression.maybeToPrecision(this.A, precision) + ' * x^' + BaseRegression.maybeToPrecision(this.B, precision); | ||
} | ||
toString(precision) { | ||
return `f(x) = ${BaseRegression.maybeToPrecision( | ||
this.A, | ||
precision | ||
)} * x^${BaseRegression.maybeToPrecision(this.B, precision)}`; | ||
} | ||
toLaTeX(precision) { | ||
if (this.B >= 0) { | ||
return 'f(x) = ' + BaseRegression.maybeToPrecision(this.A, precision) + 'x^{' + BaseRegression.maybeToPrecision(this.B, precision) + '}'; | ||
} else { | ||
return 'f(x) = \\frac{' + BaseRegression.maybeToPrecision(this.A, precision) + '}{x^{' + BaseRegression.maybeToPrecision(-this.B, precision) + '}}'; | ||
} | ||
toLaTeX(precision) { | ||
let latex = ''; | ||
if (this.B >= 0) { | ||
latex = `f(x) = ${BaseRegression.maybeToPrecision( | ||
this.A, | ||
precision | ||
)}x^{${BaseRegression.maybeToPrecision(this.B, precision)}}`; | ||
} else { | ||
latex = `f(x) = \\frac{${BaseRegression.maybeToPrecision( | ||
this.A, | ||
precision | ||
)}}{x^{${BaseRegression.maybeToPrecision(-this.B, precision)}}}`; | ||
} | ||
latex = latex.replace(/e([+-]?[0-9]+)/g, 'e^{$1}'); | ||
return latex; | ||
} | ||
static load(json) { | ||
if (json.name !== 'powerRegression') { | ||
throw new TypeError('not a power regression model'); | ||
} | ||
return new PowerRegression(true, json); | ||
static load(json) { | ||
if (json.name !== 'powerRegression') { | ||
throw new TypeError('not a power regression model'); | ||
} | ||
return new PowerRegression(true, json); | ||
} | ||
} | ||
function regress(pr, x, y) { | ||
const n = x.length; | ||
const xl = new Array(n); | ||
const yl = new Array(n); | ||
for (let i = 0; i < n; i++) { | ||
xl[i] = Math.log(x[i]); | ||
yl[i] = Math.log(y[i]); | ||
} | ||
const n = x.length; | ||
const xl = new Array(n); | ||
const yl = new Array(n); | ||
for (let i = 0; i < n; i++) { | ||
xl[i] = Math.log(x[i]); | ||
yl[i] = Math.log(y[i]); | ||
} | ||
const linear = new SimpleLinearRegression(xl, yl); | ||
pr.A = Math.exp(linear.intercept); | ||
pr.B = linear.slope; | ||
const linear = new SimpleLinearRegression(xl, yl); | ||
pr.A = Math.exp(linear.intercept); | ||
pr.B = linear.slope; | ||
} | ||
module.exports = PowerRegression; |
{ | ||
"name": "ml-regression-power", | ||
"version": "1.0.0", | ||
"version": "1.0.1", | ||
"description": "", | ||
@@ -33,10 +33,13 @@ "main": "lib/index.js", | ||
"devDependencies": { | ||
"babel-jest": "^19.0.0", | ||
"babel-jest": "^23.6.0", | ||
"babel-plugin-transform-es2015-modules-commonjs": "^6.24.1", | ||
"eslint": "^3.19.0", | ||
"eslint-config-cheminfo": "^1.7.0", | ||
"eslint": "^5.12.0", | ||
"eslint-config-cheminfo": "^1.20.0", | ||
"eslint-plugin-import": "^2.14.0", | ||
"eslint-plugin-jest": "^22.1.3", | ||
"eslint-plugin-no-only-tests": "^1.1.0", | ||
"jest": "^19.0.2", | ||
"jest": "^23.6.0", | ||
"npm-run-all": "^4.0.2", | ||
"rollup": "^0.41.6" | ||
"rollup": "^0.41.6", | ||
"undefined": "^0.1.0" | ||
}, | ||
@@ -43,0 +46,0 @@ "dependencies": { |
import PowerRegression from '..'; | ||
describe('Power regression', () => { | ||
it('basic test', () =>{ | ||
const x = [17.6, 26, 31.9, 38.9, 45.8, 51.2, 58.1, 64.7, 66.7, 80.8, 82.9]; | ||
const y = [159.9, 206.9, 236.8, 269.9, 300.6, 323.6, 351.7, 377.6, 384.1, 437.2, 444.7]; | ||
const result = new PowerRegression(x, y); | ||
it('basic test', () => { | ||
const x = [17.6, 26, 31.9, 38.9, 45.8, 51.2, 58.1, 64.7, 66.7, 80.8, 82.9]; | ||
const y = [ | ||
159.9, | ||
206.9, | ||
236.8, | ||
269.9, | ||
300.6, | ||
323.6, | ||
351.7, | ||
377.6, | ||
384.1, | ||
437.2, | ||
444.7 | ||
]; | ||
const result = new PowerRegression(x, y); | ||
const expected = { | ||
A: 24.12989312, | ||
B: 0.65949782 | ||
}; | ||
expect(result.A).toBeCloseTo(expected.A, 10e-4); | ||
expect(result.B).toBeCloseTo(expected.B, 10e-4); | ||
const expected = { | ||
A: 24.12989312, | ||
B: 0.65949782 | ||
}; | ||
expect(result.A).toBeCloseTo(expected.A, 10e-4); | ||
expect(result.B).toBeCloseTo(expected.B, 10e-4); | ||
const x2 = [20, 30]; | ||
const y2 = result.predict(x2); | ||
const x2 = [20, 30]; | ||
const y2 = result.predict(x2); | ||
expect(y2[0]).toBeCloseTo(expected.A * Math.pow(x2[0], expected.B), 10e-4); | ||
expect(y2[1]).toBeCloseTo(expected.A * Math.pow(x2[1], expected.B), 10e-4); | ||
expect(y2[0]).toBeCloseTo(expected.A * Math.pow(x2[0], expected.B), 10e-4); | ||
expect(y2[1]).toBeCloseTo(expected.A * Math.pow(x2[1], expected.B), 10e-4); | ||
const score = result.score(x, y); | ||
expect(score.r2).toBeCloseTo(0.999, 1e-2); | ||
expect(score.chi2).toBeCloseTo(0.03, 1e-2); | ||
expect(score.rmsd).toBeCloseTo(0.03, 1e-2); | ||
expect(result.toString(4)).toEqual('f(x) = 24.13 * x^0.6595'); | ||
expect(result.toLaTeX(4)).toEqual('f(x) = 24.13x^{0.6595}'); | ||
const score = result.score(x, y); | ||
expect(score.r2).toBeCloseTo(0.999, 1e-2); | ||
expect(score.chi2).toBeCloseTo(0.03, 1e-2); | ||
expect(score.rmsd).toBeCloseTo(0.03, 1e-2); | ||
expect(result.toString(4)).toStrictEqual('f(x) = 24.13 * x^0.6595'); | ||
expect(result.toLaTeX(4)).toStrictEqual('f(x) = 24.13x^{0.6595}'); | ||
}); | ||
it('toJSON / load', () => { | ||
const regression = PowerRegression.load({ | ||
name: 'powerRegression', | ||
A: 1, | ||
B: -1 | ||
}); | ||
it('toJSON / load', () =>{ | ||
const regression = PowerRegression.load({ | ||
name: 'powerRegression', | ||
A: 1, | ||
B: -1, | ||
}); | ||
expect(regression.predict(4)).toStrictEqual(0.25); | ||
expect(regression.predict(4)).toEqual(0.25); | ||
const model = regression.toJSON(); | ||
expect(model).toStrictEqual({ | ||
name: 'powerRegression', | ||
A: 1, | ||
B: -1 | ||
}); | ||
}); | ||
const model = regression.toJSON(); | ||
expect(model).toEqual({ | ||
name: 'powerRegression', | ||
A: 1, | ||
B: -1 | ||
}); | ||
it('test latex formatting of big / small numbers', () => { | ||
const regression = PowerRegression.load({ | ||
name: 'powerRegression', | ||
A: 1000000000, | ||
B: -0.000000001 | ||
}); | ||
expect(regression.toLaTeX(4)).toStrictEqual( | ||
'f(x) = \\frac{1.000e^{+9}}{x^{1.000e^{-9}}}' | ||
); | ||
}); | ||
}); |
104
src/index.js
@@ -1,60 +0,76 @@ | ||
import BaseRegression, {checkArrayLength, maybeToPrecision} from 'ml-regression-base'; | ||
import BaseRegression, { | ||
checkArrayLength, | ||
maybeToPrecision | ||
} from 'ml-regression-base'; | ||
import SimpleLinearRegression from 'ml-regression-simple-linear'; | ||
export default class PowerRegression extends BaseRegression { | ||
constructor(x, y) { | ||
super(); | ||
if (x === true) { // reloading model | ||
this.A = y.A; | ||
this.B = y.B; | ||
} else { | ||
checkArrayLength(x, y); | ||
regress(this, x, y); | ||
} | ||
constructor(x, y) { | ||
super(); | ||
if (x === true) { | ||
// reloading model | ||
this.A = y.A; | ||
this.B = y.B; | ||
} else { | ||
checkArrayLength(x, y); | ||
regress(this, x, y); | ||
} | ||
} | ||
_predict(newInputs) { | ||
return this.A * Math.pow(newInputs, this.B); | ||
} | ||
_predict(newInputs) { | ||
return this.A * Math.pow(newInputs, this.B); | ||
} | ||
toJSON() { | ||
return { | ||
name: 'powerRegression', | ||
A: this.A, | ||
B: this.B | ||
}; | ||
} | ||
toJSON() { | ||
return { | ||
name: 'powerRegression', | ||
A: this.A, | ||
B: this.B | ||
}; | ||
} | ||
toString(precision) { | ||
return 'f(x) = ' + maybeToPrecision(this.A, precision) + ' * x^' + maybeToPrecision(this.B, precision); | ||
} | ||
toString(precision) { | ||
return `f(x) = ${maybeToPrecision( | ||
this.A, | ||
precision | ||
)} * x^${maybeToPrecision(this.B, precision)}`; | ||
} | ||
toLaTeX(precision) { | ||
if (this.B >= 0) { | ||
return 'f(x) = ' + maybeToPrecision(this.A, precision) + 'x^{' + maybeToPrecision(this.B, precision) + '}'; | ||
} else { | ||
return 'f(x) = \\frac{' + maybeToPrecision(this.A, precision) + '}{x^{' + maybeToPrecision(-this.B, precision) + '}}'; | ||
} | ||
toLaTeX(precision) { | ||
let latex = ''; | ||
if (this.B >= 0) { | ||
latex = `f(x) = ${maybeToPrecision( | ||
this.A, | ||
precision | ||
)}x^{${maybeToPrecision(this.B, precision)}}`; | ||
} else { | ||
latex = `f(x) = \\frac{${maybeToPrecision( | ||
this.A, | ||
precision | ||
)}}{x^{${maybeToPrecision(-this.B, precision)}}}`; | ||
} | ||
latex = latex.replace(/e([+-]?[0-9]+)/g, 'e^{$1}'); | ||
return latex; | ||
} | ||
static load(json) { | ||
if (json.name !== 'powerRegression') { | ||
throw new TypeError('not a power regression model'); | ||
} | ||
return new PowerRegression(true, json); | ||
static load(json) { | ||
if (json.name !== 'powerRegression') { | ||
throw new TypeError('not a power regression model'); | ||
} | ||
return new PowerRegression(true, json); | ||
} | ||
} | ||
function regress(pr, x, y) { | ||
const n = x.length; | ||
const xl = new Array(n); | ||
const yl = new Array(n); | ||
for (let i = 0; i < n; i++) { | ||
xl[i] = Math.log(x[i]); | ||
yl[i] = Math.log(y[i]); | ||
} | ||
const n = x.length; | ||
const xl = new Array(n); | ||
const yl = new Array(n); | ||
for (let i = 0; i < n; i++) { | ||
xl[i] = Math.log(x[i]); | ||
yl[i] = Math.log(y[i]); | ||
} | ||
const linear = new SimpleLinearRegression(xl, yl); | ||
pr.A = Math.exp(linear.intercept); | ||
pr.B = linear.slope; | ||
const linear = new SimpleLinearRegression(xl, yl); | ||
pr.A = Math.exp(linear.intercept); | ||
pr.B = linear.slope; | ||
} |
New author
Supply chain riskA new npm collaborator published a version of the package for the first time. New collaborators are usually benign additions to a project, but do indicate a change to the security surface area of a package.
Found 1 instance in 1 package
9016
197
11
1