Socket
Socket
Sign inDemoInstall

@tensorflow/tfjs-layers

Package Overview
Dependencies
Maintainers
11
Versions
157
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@tensorflow/tfjs-layers - npm Package Compare versions

Comparing version 0.6.7 to 0.7.0

dist/backend/state.d.ts

3

dist/activations.js

@@ -16,2 +16,3 @@ "use strict";

var K = require("./backend/tfjs_backend");
var state_1 = require("./backend/state");
var generic_utils_1 = require("./utils/generic_utils");

@@ -75,3 +76,3 @@ var Activation = (function (_super) {

Relu6.prototype.apply = function (x) {
return tfjs_core_1.tidy(function () { return tfc.minimum(K.getScalar(6.0), tfc.relu(x)); });
return tfjs_core_1.tidy(function () { return tfc.minimum(state_1.getScalar(6.0), tfc.relu(x)); });
};

@@ -78,0 +79,0 @@ Relu6.className = 'relu6';

@@ -1,16 +0,8 @@

import { DataType, Scalar, Tensor, Tensor1D, Tensor2D } from '@tensorflow/tfjs-core';
import { Scalar, Tensor, Tensor1D } from '@tensorflow/tfjs-core';
import { DataFormat } from '../common';
import { Shape, SymbolicTensor } from '../types';
import { LayerVariable } from '../variables';
import { epsilon as common_epsilon } from './common';
export declare function disposeScalarCache(): void;
import { HasShape, Shape } from '../types';
export declare function setBackend(requestedBackend: 'cpu' | 'webgl'): void;
export declare function getBackend(): 'cpu' | 'webgl';
export declare function getScalar(value: number, dtype?: DataType): Scalar;
export declare const epsilon: typeof common_epsilon;
export declare function isBackendSymbolic(): boolean;
export declare function shape(x: Tensor | SymbolicTensor): Shape;
export declare function intShape(x: Tensor | SymbolicTensor): number[];
export declare function dtype(x: Tensor | SymbolicTensor): DataType;
export declare function countParams(x: Tensor | SymbolicTensor): number;
export declare function countParams(x: HasShape): number;
export declare function cast(x: Tensor, dtype: 'float32' | 'int32' | 'bool'): Tensor;

@@ -27,10 +19,5 @@ export declare function expandDims(x: Tensor, axis?: number): Tensor;

export declare function tile(x: Tensor, n: number | number[]): Tensor;
export declare function identity(x: Tensor): Tensor;
export declare function eyeVariable(size: number, dtype?: DataType, name?: string): LayerVariable;
export declare function scalarTimesArray(c: Scalar, x: Tensor): Tensor;
export declare function scalarPlusArray(c: Scalar, x: Tensor): Tensor;
export declare function randomNormal(shape: Shape, mean?: number, stddev?: number, dtype?: 'float32' | 'int32', seed?: number): Tensor;
export declare function dot(x: Tensor, y: Tensor): Tensor;
export declare function sign(x: Tensor): Tensor;
export declare function qr(x: Tensor2D): [Tensor, Tensor];
export declare function oneHot(indices: Tensor, numClasses: number): Tensor;

@@ -44,7 +31,3 @@ export declare function gather(reference: Tensor, indices: number[] | Tensor1D, axis?: number): Tensor;

export declare function dropout(x: Tensor, level: Scalar, noiseShape?: number[], seed?: number): Tensor;
export declare function nameScope<T>(name: string, fn: () => T): T;
export declare function floatx(): DataType;
export declare function getUid(prefix?: string): string;
export declare function hardSigmoid(x: Tensor): Tensor;
export declare function inTrainPhase<T>(x: () => T, alt: () => T, training?: boolean): T;
export declare function gradients(lossFn: () => Scalar, variables: LayerVariable[]): Tensor[];

@@ -5,23 +5,12 @@ "use strict";

var tfjs_core_1 = require("@tensorflow/tfjs-core");
var state_1 = require("../backend/state");
var common_1 = require("../common");
var errors_1 = require("../errors");
var math_utils = require("../utils/math_utils");
var variables_1 = require("../variables");
var common_2 = require("./common");
var common_3 = require("./common");
var backend = 'webgl';
var DEFAULT_DTYPE = 'float32';
function disposeScalarCache() {
for (var typeKey in scalarCache) {
for (var key in scalarCache[typeKey]) {
scalarCache[typeKey][key].dispose();
delete scalarCache[typeKey][key];
}
}
}
exports.disposeScalarCache = disposeScalarCache;
function setBackend(requestedBackend) {
tfc.setBackend(requestedBackend);
backend = requestedBackend;
disposeScalarCache();
state_1.disposeScalarCache();
}

@@ -33,18 +22,2 @@ exports.setBackend = setBackend;

exports.getBackend = getBackend;
var scalarCache = {
float32: {},
int32: {}
};
function getScalar(value, dtype) {
if (dtype === undefined) {
dtype = DEFAULT_DTYPE;
}
if (scalarCache[dtype][value] == null) {
scalarCache[dtype][value] = tfjs_core_1.scalar(value, dtype);
tfc.keep(scalarCache[dtype][value]);
}
return scalarCache[dtype][value];
}
exports.getScalar = getScalar;
exports.epsilon = common_2.epsilon;
function isBackendSymbolic() {

@@ -54,14 +27,2 @@ return false;

exports.isBackendSymbolic = isBackendSymbolic;
function shape(x) {
return x.shape;
}
exports.shape = shape;
function intShape(x) {
return x.shape;
}
exports.intShape = intShape;
function dtype(x) {
return (x instanceof tfjs_core_1.Tensor) ? DEFAULT_DTYPE : x.dtype;
}
exports.dtype = dtype;
function countParams(x) {

@@ -83,3 +44,3 @@ var shape = x.shape;

if (axis === void 0) { axis = -1; }
var outShape = shape(x).slice();
var outShape = x.shape.slice();
if (axis < 0) {

@@ -245,18 +206,2 @@ axis = outShape.length + axis + 1;

exports.tile = tile;
function identity(x) {
return x.clone();
}
exports.identity = identity;
function eyeVariable(size, dtype, name) {
return new variables_1.LayerVariable(tfc.eye(size, size, null, dtype), dtype, name);
}
exports.eyeVariable = eyeVariable;
function scalarTimesArray(c, x) {
return tfc.mul(c, x);
}
exports.scalarTimesArray = scalarTimesArray;
function scalarPlusArray(c, x) {
return tfc.add(c, x);
}
exports.scalarPlusArray = scalarPlusArray;
function randomNormal(shape, mean, stddev, dtype, seed) {

@@ -271,3 +216,3 @@ if (mean === void 0) { mean = 0.0; }

throw new errors_1.NotImplementedError("dot support for y other than rank 2 is not yet implemented: " +
("y shape = " + shape));
("y shape = " + y.shape));
}

@@ -289,3 +234,3 @@ else {

throw new errors_1.NotImplementedError("dot support for x of rank " + x.rank + " is not yet implemented: " +
("x shape = " + shape));
("x shape = " + x.shape));
}

@@ -299,68 +244,6 @@ }

var onesLikeX = tfjs_core_1.onesLike(x);
return tfjs_core_1.where(tfc.equal(x, zerosLikeX), zerosLikeX, tfjs_core_1.where(tfc.greater(x, tfjs_core_1.zerosLike(x)), onesLikeX, scalarTimesArray(getScalar(-1), onesLikeX)));
return tfjs_core_1.where(tfc.equal(x, zerosLikeX), zerosLikeX, tfjs_core_1.where(tfc.greater(x, tfjs_core_1.zerosLike(x)), onesLikeX, tfc.mul(state_1.getScalar(-1), onesLikeX)));
});
}
exports.sign = sign;
function qr(x) {
var _a = tfjs_core_1.tidy(function () {
if (x.shape.length !== 2) {
throw new errors_1.ValueError("qr() requires a 2D Tensor, but got a " + x.shape.length + "D Tensor.");
}
if (x.shape[0] < x.shape[1]) {
throw new errors_1.ValueError("qr() requires x.shape[0] >= x.shape[1], but got shape: [" + x.shape + "]");
}
var m = x.shape[0];
var n = x.shape[1];
var q = tfc.eye(m);
var r = x.clone();
var one2D = tfjs_core_1.tensor2d([[1]], [1, 1]);
var w = one2D.clone();
var _loop_1 = function (j) {
var rTemp = r;
var wTemp = w;
var qTemp = q;
_a = tfjs_core_1.tidy(function () {
var rjEnd1 = r.slice([j, j], [m - j, 1]);
var normX = tfc.norm(rjEnd1);
var rjj = r.slice([j, j], [1, 1]);
var s = tfc.neg(sign(rjj));
var u1 = rjj.sub(tfc.mul(s, normX));
var wPre = tfc.div(rjEnd1, u1);
if (wPre.shape[0] === 1) {
w = one2D.clone();
}
else {
w = one2D.concat(wPre.slice([1, 0], [wPre.shape[0] - 1, wPre.shape[1]]), 0);
}
var tau = tfc.neg(tfc.div(tfc.matMul(s, u1), normX));
var rjEndAll = r.slice([j, 0], [m - j, n]);
var tauTimesW = tau.mul(w);
if (j === 0) {
r = rjEndAll.sub(tauTimesW.matMul(w.transpose().matMul(rjEndAll)));
}
else {
r = r.slice([0, 0], [j, n])
.concat(rjEndAll.sub(tauTimesW.matMul(w.transpose().matMul(rjEndAll))), 0);
}
var qAllJEnd = q.slice([0, j], [m, q.shape[1] - j]);
if (j === 0) {
q = qAllJEnd.sub(qAllJEnd.matMul(w).matMul(tauTimesW.transpose()));
}
else {
q = q.slice([0, 0], [m, j])
.concat(qAllJEnd.sub(qAllJEnd.matMul(w).matMul(tauTimesW.transpose())), 1);
}
return [w, r, q];
}), w = _a[0], r = _a[1], q = _a[2];
tfjs_core_1.dispose([rTemp, wTemp, qTemp]);
var _a;
};
for (var j = 0; j < n; ++j) {
_loop_1(j);
}
return [q, r];
}), qOuter = _a[0], rOuter = _a[1];
return [qOuter, rOuter];
}
exports.qr = qr;
function oneHot(indices, numClasses) {

@@ -408,3 +291,3 @@ return tfjs_core_1.tidy(function () {

if (dataFormat == null) {
dataFormat = common_3.imageDataFormat();
dataFormat = common_2.imageDataFormat();
}

@@ -492,3 +375,3 @@ common_1.checkDataFormat(dataFormat);

function softsign(x) {
return tfjs_core_1.tidy(function () { return tfc.div(x, tfc.add(getScalar(1), tfc.abs(x))); });
return tfjs_core_1.tidy(function () { return tfc.div(x, tfc.add(state_1.getScalar(1), tfc.abs(x))); });
}

@@ -506,3 +389,3 @@ exports.softsign = softsign;

var multiplier = tfc.step(tfc.add(tfc.neg(level), tfc.randomUniform(x.shape, 0, 1, 'float32')));
multiplier = tfc.mul(tfc.div(getScalar(1), tfc.sub(getScalar(1), level)), multiplier);
multiplier = tfc.mul(tfc.div(state_1.getScalar(1), tfc.sub(state_1.getScalar(1), level)), multiplier);
return tfc.mul(x, multiplier);

@@ -512,23 +395,5 @@ });

exports.dropout = dropout;
function nameScope(name, fn) {
return common_1.nameScope(name, fn);
}
exports.nameScope = nameScope;
function floatx() {
return 'float32';
}
exports.floatx = floatx;
var _uidPrefixes = {};
function getUid(prefix) {
if (prefix === void 0) { prefix = ''; }
if (!(prefix in _uidPrefixes)) {
_uidPrefixes[prefix] = 0;
}
_uidPrefixes[prefix] += 1;
return prefix + _uidPrefixes[prefix].toString();
}
exports.getUid = getUid;
function hardSigmoid(x) {
return tfjs_core_1.tidy(function () {
var y = scalarPlusArray(getScalar(0.5), scalarTimesArray(getScalar(0.2), x));
var y = tfc.add(state_1.getScalar(0.5), tfc.mul(state_1.getScalar(0.2), x));
return tfc.clipByValue(y, 0, 1);

@@ -543,8 +408,2 @@ });

exports.inTrainPhase = inTrainPhase;
function gradients(lossFn, variables) {
var variableList = variables.map(function (variable) { return variable.read(); });
var valudAndGrads = tfjs_core_1.variableGrads(lossFn, variableList);
return variables.map(function (variable) { return valudAndGrads.grads[variable.name]; });
}
exports.gradients = gradients;
//# sourceMappingURL=tfjs_backend.js.map

@@ -1,81 +0,7 @@

import { Scalar, Tensor } from '@tensorflow/tfjs-core';
import { BaseCallback } from './base_callbacks';
import { Container } from './engine/topology';
import { Model } from './engine/training';
export declare type UnresolvedLogs = {
[key: string]: number | Scalar;
};
export declare type Logs = {
[key: string]: number;
};
export declare type Params = {
[key: string]: number | string | boolean | number[] | string[] | boolean[];
};
export declare abstract class Callback {
validationData: Tensor | Tensor[];
export declare abstract class Callback extends BaseCallback {
model: Model;
params: Params;
setParams(params: Params): void;
setModel(model: Model): void;
onEpochBegin(epoch: number, logs?: UnresolvedLogs): Promise<void>;
onEpochEnd(epoch: number, logs?: UnresolvedLogs): Promise<void>;
onBatchBegin(batch: number, logs?: UnresolvedLogs): Promise<void>;
onBatchEnd(batch: number, logs?: UnresolvedLogs): Promise<void>;
onTrainBegin(logs?: UnresolvedLogs): Promise<void>;
onTrainEnd(logs?: UnresolvedLogs): Promise<void>;
setModel(model: Container): void;
}
export declare class CallbackList {
callbacks: Callback[];
queueLength: number;
constructor(callbacks?: Callback[], queueLength?: number);
append(callback: Callback): void;
setParams(params: Params): void;
setModel(model: Model): void;
onEpochBegin(epoch: number, logs?: UnresolvedLogs): Promise<void>;
onEpochEnd(epoch: number, logs?: UnresolvedLogs): Promise<void>;
onBatchBegin(batch: number, logs?: UnresolvedLogs): Promise<void>;
onBatchEnd(batch: number, logs?: UnresolvedLogs): Promise<void>;
onTrainBegin(logs?: UnresolvedLogs): Promise<void>;
onTrainEnd(logs?: UnresolvedLogs): Promise<void>;
}
export declare class BaseLogger extends Callback {
private seen;
private totals;
constructor();
onEpochBegin(epoch: number, logs?: UnresolvedLogs): Promise<void>;
onBatchEnd(batch: number, logs?: UnresolvedLogs): Promise<void>;
onEpochEnd(epoch: number, logs?: UnresolvedLogs): Promise<void>;
}
export declare function resolveScalarsInLogs(logs: UnresolvedLogs): Promise<void>;
export declare function disposeTensorsInLogs(logs: UnresolvedLogs): void;
export declare class History extends Callback {
epoch: number[];
history: {
[key: string]: Array<number | Tensor>;
};
onTrainBegin(logs?: UnresolvedLogs): Promise<void>;
onEpochEnd(epoch: number, logs?: UnresolvedLogs): Promise<void>;
syncData(): Promise<void>;
}
export interface CustomCallbackConfig {
onTrainBegin?: (logs?: Logs) => Promise<void>;
onTrainEnd?: (logs?: Logs) => Promise<void>;
onEpochBegin?: (epoch: number, logs?: Logs) => Promise<void>;
onEpochEnd?: (epoch: number, logs?: Logs) => Promise<void>;
onBatchBegin?: (batch: number, logs?: Logs) => Promise<void>;
onBatchEnd?: (batch: number, logs?: Logs) => Promise<void>;
}
export declare class CustomCallback extends Callback {
protected readonly trainBegin: (logs?: Logs) => Promise<void>;
protected readonly trainEnd: (logs?: Logs) => Promise<void>;
protected readonly epochBegin: (epoch: number, logs?: Logs) => Promise<void>;
protected readonly epochEnd: (epoch: number, logs?: Logs) => Promise<void>;
protected readonly batchBegin: (batch: number, logs?: Logs) => Promise<void>;
protected readonly batchEnd: (batch: number, logs?: Logs) => Promise<void>;
constructor(config: CustomCallbackConfig);
onEpochBegin(epoch: number, logs?: UnresolvedLogs): Promise<void>;
onEpochEnd(epoch: number, logs?: UnresolvedLogs): Promise<void>;
onBatchBegin(batch: number, logs?: UnresolvedLogs): Promise<void>;
onBatchEnd(batch: number, logs?: UnresolvedLogs): Promise<void>;
onTrainBegin(logs?: UnresolvedLogs): Promise<void>;
onTrainEnd(logs?: UnresolvedLogs): Promise<void>;
}
export declare function standardizeCallbacks(callbacks: Callback | Callback[] | CustomCallbackConfig | CustomCallbackConfig[]): Callback[];

@@ -12,604 +12,21 @@ "use strict";

})();
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = y[op[0] & 2 ? "return" : op[0] ? "throw" : "next"]) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [0, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
var tfjs_core_1 = require("@tensorflow/tfjs-core");
var K = require("./backend/tfjs_backend");
var generic_utils = require("./utils/generic_utils");
var Callback = (function () {
var base_callbacks_1 = require("./base_callbacks");
var training_1 = require("./engine/training");
var Callback = (function (_super) {
__extends(Callback, _super);
function Callback() {
this.validationData = null;
this.model = null;
var _this = _super !== null && _super.apply(this, arguments) || this;
_this.model = null;
return _this;
}
Callback.prototype.setParams = function (params) {
this.params = params;
};
Callback.prototype.setModel = function (model) {
if (!(model instanceof training_1.Model)) {
throw new Error('model must be a Model, not some other Container');
}
this.model = model;
};
Callback.prototype.onEpochBegin = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () { return __generator(this, function (_a) {
return [2];
}); });
};
Callback.prototype.onEpochEnd = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () { return __generator(this, function (_a) {
return [2];
}); });
};
Callback.prototype.onBatchBegin = function (batch, logs) {
return __awaiter(this, void 0, void 0, function () { return __generator(this, function (_a) {
return [2];
}); });
};
Callback.prototype.onBatchEnd = function (batch, logs) {
return __awaiter(this, void 0, void 0, function () { return __generator(this, function (_a) {
return [2];
}); });
};
Callback.prototype.onTrainBegin = function (logs) {
return __awaiter(this, void 0, void 0, function () { return __generator(this, function (_a) {
return [2];
}); });
};
Callback.prototype.onTrainEnd = function (logs) {
return __awaiter(this, void 0, void 0, function () { return __generator(this, function (_a) {
return [2];
}); });
};
return Callback;
}());
}(base_callbacks_1.BaseCallback));
exports.Callback = Callback;
var CallbackList = (function () {
function CallbackList(callbacks, queueLength) {
if (queueLength === void 0) { queueLength = 10; }
if (callbacks == null) {
callbacks = [];
}
this.callbacks = callbacks;
this.queueLength = queueLength;
}
CallbackList.prototype.append = function (callback) {
this.callbacks.push(callback);
};
CallbackList.prototype.setParams = function (params) {
for (var _i = 0, _a = this.callbacks; _i < _a.length; _i++) {
var callback = _a[_i];
callback.setParams(params);
}
};
CallbackList.prototype.setModel = function (model) {
for (var _i = 0, _a = this.callbacks; _i < _a.length; _i++) {
var callback = _a[_i];
callback.setModel(model);
}
};
CallbackList.prototype.onEpochBegin = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () {
var _i, _a, callback;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
if (logs == null) {
logs = {};
}
_i = 0, _a = this.callbacks;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3, 4];
callback = _a[_i];
return [4, callback.onEpochBegin(epoch, logs)];
case 2:
_b.sent();
_b.label = 3;
case 3:
_i++;
return [3, 1];
case 4: return [2];
}
});
});
};
CallbackList.prototype.onEpochEnd = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () {
var _i, _a, callback;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
if (logs == null) {
logs = {};
}
_i = 0, _a = this.callbacks;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3, 4];
callback = _a[_i];
return [4, callback.onEpochEnd(epoch, logs)];
case 2:
_b.sent();
_b.label = 3;
case 3:
_i++;
return [3, 1];
case 4: return [2];
}
});
});
};
CallbackList.prototype.onBatchBegin = function (batch, logs) {
return __awaiter(this, void 0, void 0, function () {
var _i, _a, callback;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
if (logs == null) {
logs = {};
}
_i = 0, _a = this.callbacks;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3, 4];
callback = _a[_i];
return [4, callback.onBatchBegin(batch, logs)];
case 2:
_b.sent();
_b.label = 3;
case 3:
_i++;
return [3, 1];
case 4: return [2];
}
});
});
};
CallbackList.prototype.onBatchEnd = function (batch, logs) {
return __awaiter(this, void 0, void 0, function () {
var _i, _a, callback;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
if (logs == null) {
logs = {};
}
_i = 0, _a = this.callbacks;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3, 4];
callback = _a[_i];
return [4, callback.onBatchEnd(batch, logs)];
case 2:
_b.sent();
_b.label = 3;
case 3:
_i++;
return [3, 1];
case 4: return [2];
}
});
});
};
CallbackList.prototype.onTrainBegin = function (logs) {
return __awaiter(this, void 0, void 0, function () {
var _i, _a, callback;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
if (logs == null) {
logs = {};
}
_i = 0, _a = this.callbacks;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3, 4];
callback = _a[_i];
return [4, callback.onTrainBegin(logs)];
case 2:
_b.sent();
_b.label = 3;
case 3:
_i++;
return [3, 1];
case 4: return [2];
}
});
});
};
CallbackList.prototype.onTrainEnd = function (logs) {
return __awaiter(this, void 0, void 0, function () {
var _i, _a, callback;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
if (logs == null) {
logs = {};
}
_i = 0, _a = this.callbacks;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3, 4];
callback = _a[_i];
return [4, callback.onTrainEnd(logs)];
case 2:
_b.sent();
_b.label = 3;
case 3:
_i++;
return [3, 1];
case 4: return [2];
}
});
});
};
return CallbackList;
}());
exports.CallbackList = CallbackList;
var BaseLogger = (function (_super) {
__extends(BaseLogger, _super);
function BaseLogger() {
return _super.call(this) || this;
}
BaseLogger.prototype.onEpochBegin = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
this.seen = 0;
this.totals = {};
return [2];
});
});
};
BaseLogger.prototype.onBatchEnd = function (batch, logs) {
return __awaiter(this, void 0, void 0, function () {
var _this = this;
var batchSize, _loop_1, this_1, key;
return __generator(this, function (_a) {
if (logs == null) {
logs = {};
}
batchSize = logs['size'] == null ? 0 : logs['size'];
this.seen += batchSize;
_loop_1 = function (key) {
var value = logs[key];
if (typeof value === 'number') {
if (!this_1.totals.hasOwnProperty(key)) {
this_1.totals[key] = 0;
}
this_1.totals[key] = this_1.totals[key] + value * batchSize;
}
else {
var oldTotalsToDispose = void 0;
if (key in this_1.totals) {
oldTotalsToDispose = this_1.totals[key];
}
else {
this_1.totals[key] = K.getScalar(0);
}
this_1.totals[key] = tfjs_core_1.tidy(function () { return K.scalarPlusArray(_this.totals[key], tfjs_core_1.mul(value, K.getScalar(batchSize))); });
if (oldTotalsToDispose != null) {
oldTotalsToDispose.dispose();
}
}
};
this_1 = this;
for (key in logs) {
_loop_1(key);
}
return [2];
});
});
};
BaseLogger.prototype.onEpochEnd = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () {
var _this = this;
var _loop_2, this_2, _i, _a, key;
return __generator(this, function (_b) {
if (logs != null) {
_loop_2 = function (key) {
if (this_2.totals[key] == null) {
return "continue";
}
if (typeof this_2.totals[key] === 'number') {
logs[key] = this_2.totals[key] / this_2.seen;
}
else {
tfjs_core_1.tidy(function () {
logs[key] =
K.scalarTimesArray(tfjs_core_1.div(K.getScalar(1), K.getScalar(_this.seen)), _this.totals[key]);
_this.totals[key].dispose();
tfjs_core_1.keep(logs[key]);
});
}
};
this_2 = this;
for (_i = 0, _a = this.params['metrics']; _i < _a.length; _i++) {
key = _a[_i];
_loop_2(key);
}
}
return [2];
});
});
};
return BaseLogger;
}(Callback));
exports.BaseLogger = BaseLogger;
function resolveScalarsInLogs(logs) {
return __awaiter(this, void 0, void 0, function () {
var promises, keys, key, value, valueScalar, values, i;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (logs == null) {
return [2];
}
promises = [];
keys = [];
for (key in logs) {
value = logs[key];
if (typeof value !== 'number') {
valueScalar = value;
promises.push(valueScalar.data());
keys.push(key);
}
}
return [4, Promise.all(promises)];
case 1:
values = _a.sent();
for (i = 0; i < values.length; ++i) {
logs[keys[i]] = values[i][0];
}
return [2];
}
});
});
}
exports.resolveScalarsInLogs = resolveScalarsInLogs;
function disposeTensorsInLogs(logs) {
if (logs == null) {
return;
}
for (var key in logs) {
var value = logs[key];
if (typeof value !== 'number') {
value.dispose();
}
}
}
exports.disposeTensorsInLogs = disposeTensorsInLogs;
var History = (function (_super) {
__extends(History, _super);
function History() {
return _super !== null && _super.apply(this, arguments) || this;
}
History.prototype.onTrainBegin = function (logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
this.epoch = [];
this.history = {};
return [2];
});
});
};
History.prototype.onEpochEnd = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () {
var key;
return __generator(this, function (_a) {
if (logs == null) {
logs = {};
}
this.epoch.push(epoch);
for (key in logs) {
if (this.history[key] == null) {
this.history[key] = [];
}
this.history[key].push(logs[key]);
}
return [2];
});
});
};
History.prototype.syncData = function () {
return __awaiter(this, void 0, void 0, function () {
var promises, keys, indices, key, valueArray, i, valueScalar, values, n;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
promises = [];
keys = [];
indices = [];
for (key in this.history) {
valueArray = this.history[key];
for (i = 0; i < valueArray.length; ++i) {
if (typeof valueArray[i] !== 'number') {
valueScalar = valueArray[i];
promises.push(valueScalar.data());
keys.push(key);
indices.push(i);
}
}
}
return [4, Promise.all(promises)];
case 1:
values = _a.sent();
for (n = 0; n < values.length; ++n) {
this.history[keys[n]][indices[n]].dispose();
this.history[keys[n]][indices[n]] = values[n][0];
}
return [2];
}
});
});
};
return History;
}(Callback));
exports.History = History;
var CustomCallback = (function (_super) {
__extends(CustomCallback, _super);
function CustomCallback(config) {
var _this = _super.call(this) || this;
_this.trainBegin = config.onTrainBegin;
_this.trainEnd = config.onTrainEnd;
_this.epochBegin = config.onEpochBegin;
_this.epochEnd = config.onEpochEnd;
_this.batchBegin = config.onBatchBegin;
_this.batchEnd = config.onBatchEnd;
return _this;
}
CustomCallback.prototype.onEpochBegin = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(this.epochBegin != null)) return [3, 3];
return [4, resolveScalarsInLogs(logs)];
case 1:
_a.sent();
return [4, this.epochBegin(epoch, logs)];
case 2:
_a.sent();
_a.label = 3;
case 3: return [2];
}
});
});
};
CustomCallback.prototype.onEpochEnd = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(this.epochEnd != null)) return [3, 3];
return [4, resolveScalarsInLogs(logs)];
case 1:
_a.sent();
return [4, this.epochEnd(epoch, logs)];
case 2:
_a.sent();
_a.label = 3;
case 3: return [2];
}
});
});
};
CustomCallback.prototype.onBatchBegin = function (batch, logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(this.batchBegin != null)) return [3, 3];
return [4, resolveScalarsInLogs(logs)];
case 1:
_a.sent();
return [4, this.batchBegin(batch, logs)];
case 2:
_a.sent();
_a.label = 3;
case 3: return [2];
}
});
});
};
CustomCallback.prototype.onBatchEnd = function (batch, logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(this.batchEnd != null)) return [3, 3];
return [4, resolveScalarsInLogs(logs)];
case 1:
_a.sent();
return [4, this.batchEnd(batch, logs)];
case 2:
_a.sent();
_a.label = 3;
case 3: return [2];
}
});
});
};
CustomCallback.prototype.onTrainBegin = function (logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(this.trainBegin != null)) return [3, 3];
return [4, resolveScalarsInLogs(logs)];
case 1:
_a.sent();
return [4, this.trainBegin(logs)];
case 2:
_a.sent();
_a.label = 3;
case 3: return [2];
}
});
});
};
CustomCallback.prototype.onTrainEnd = function (logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(this.trainEnd != null)) return [3, 3];
return [4, resolveScalarsInLogs(logs)];
case 1:
_a.sent();
return [4, this.trainEnd(logs)];
case 2:
_a.sent();
_a.label = 3;
case 3: return [2];
}
});
});
};
return CustomCallback;
}(Callback));
exports.CustomCallback = CustomCallback;
function standardizeCallbacks(callbacks) {
if (callbacks == null) {
return null;
}
if (callbacks instanceof Callback) {
return [callbacks];
}
if (Array.isArray(callbacks) && callbacks[0] instanceof Callback) {
return callbacks;
}
var callbackConfigs = generic_utils.toList(callbacks);
return callbackConfigs.map(function (callbackConfig) { return new CustomCallback(callbackConfig); });
}
exports.standardizeCallbacks = standardizeCallbacks;
//# sourceMappingURL=callbacks.js.map

@@ -21,6 +21,7 @@ "use strict";

var tfjs_core_1 = require("@tensorflow/tfjs-core");
var K = require("./backend/tfjs_backend");
var common_1 = require("./backend/common");
var state_1 = require("./backend/state");
var generic_utils_1 = require("./utils/generic_utils");
function calcL2Norms(w, axis) {
return tfjs_core_1.tidy(function () { return tfc.sqrt(tfc.sum(K.square(w), axis, true)); });
return tfjs_core_1.tidy(function () { return tfc.sqrt(tfc.sum(tfc.mulStrict(w, w), axis, true)); });
}

@@ -57,3 +58,3 @@ var Constraint = (function (_super) {

var desired = tfc.clipByValue(norms, 0, _this.maxValue);
return tfc.mul(w, tfc.div(desired, K.scalarPlusArray(K.getScalar(K.epsilon()), norms)));
return tfc.mul(w, tfc.div(desired, tfc.add(state_1.getScalar(common_1.epsilon()), norms)));
});

@@ -79,3 +80,3 @@ };

var _this = this;
return tfjs_core_1.tidy(function () { return tfc.div(w, K.scalarPlusArray(K.getScalar(K.epsilon()), calcL2Norms(w, _this.axis))); });
return tfjs_core_1.tidy(function () { return tfc.div(w, tfc.add(state_1.getScalar(common_1.epsilon()), calcL2Norms(w, _this.axis))); });
};

@@ -123,4 +124,4 @@ UnitNorm.prototype.getConfig = function () {

var norms = calcL2Norms(w, _this.axis);
var desired = tfc.add(K.scalarTimesArray(K.getScalar(_this.rate), tfc.clipByValue(norms, _this.minValue, _this.maxValue)), K.scalarTimesArray(K.getScalar(1.0 - _this.rate), norms));
return tfc.mul(w, tfc.div(desired, K.scalarPlusArray(K.getScalar(K.epsilon()), norms)));
var desired = tfc.add(tfc.mul(state_1.getScalar(_this.rate), tfc.clipByValue(norms, _this.minValue, _this.maxValue)), tfc.mul(state_1.getScalar(1.0 - _this.rate), norms));
return tfc.mul(w, tfc.div(desired, tfc.add(state_1.getScalar(common_1.epsilon()), norms)));
});

@@ -127,0 +128,0 @@ };

import { Tensor } from '@tensorflow/tfjs-core';
import { Kwargs, SymbolicTensor } from '../types';
import { Kwargs } from '../types';
import { SymbolicTensor } from './topology';
export interface Feed {

@@ -4,0 +5,0 @@ key: SymbolicTensor;

@@ -5,3 +5,3 @@ import { DataType, Scalar, serialization, Tensor } from '@tensorflow/tfjs-core';

import { Regularizer } from '../regularizers';
import { JsonDict, Kwargs, NamedTensorMap, RegularizerFn, Shape, SymbolicTensor } from '../types';
import { JsonDict, Kwargs, NamedTensorMap, RegularizerFn, Shape } from '../types';
import { LayerVariable } from '../variables';

@@ -30,2 +30,17 @@ export declare type Op = (x: LayerVariable) => LayerVariable;

}
export declare class SymbolicTensor {
readonly dtype: DataType;
readonly shape: Shape;
sourceLayer: Layer;
readonly inputs: SymbolicTensor[];
readonly callArgs: Kwargs;
readonly outputTensorIndex: number;
readonly id: number;
readonly name: string;
readonly originalName?: string;
readonly rank: number;
nodeIndex: number;
tensorIndex: number;
constructor(dtype: DataType, shape: Shape, sourceLayer: Layer, inputs: SymbolicTensor[], callArgs: Kwargs, name?: string, outputTensorIndex?: number);
}
export interface NodeConfig {

@@ -32,0 +47,0 @@ outboundLayer: Layer;

import * as tfc from '@tensorflow/tfjs-core';
import { io, ModelPredictConfig, Optimizer, Scalar, Tensor, Tensor1D } from '@tensorflow/tfjs-core';
import { Callback, CustomCallbackConfig, History } from '../callbacks';
import { BaseCallback, CustomCallbackConfig, History } from '../base_callbacks';
import { LossOrMetricFn, NamedTensorMap, Shape } from '../types';

@@ -37,3 +37,3 @@ import { Container, ContainerConfig } from './topology';

verbose?: ModelLoggingVerbosity;
callbacks?: Callback[] | CustomCallbackConfig | CustomCallbackConfig[];
callbacks?: BaseCallback[] | CustomCallbackConfig | CustomCallbackConfig[];
validationSplit?: number;

@@ -40,0 +40,0 @@ validationData?: [Tensor | Tensor[], Tensor | Tensor[]] | [Tensor | Tensor[], Tensor | Tensor[], Tensor | Tensor[]];

@@ -56,5 +56,8 @@ "use strict";

var tfjs_core_1 = require("@tensorflow/tfjs-core");
var state_1 = require("../backend/state");
var K = require("../backend/tfjs_backend");
var callbacks_1 = require("../callbacks");
var base_callbacks_1 = require("../base_callbacks");
var common_1 = require("../common");
var errors_1 = require("../errors");
var logs_1 = require("../logs");
var losses = require("../losses");

@@ -428,3 +431,3 @@ var Metrics = require("../metrics");

this.metricsTensors = [];
K.nameScope('loss', function () {
common_1.nameScope('loss', function () {
for (var i = 0; i < _this.outputs.length; ++i) {

@@ -449,3 +452,3 @@ if (skipTargetIndices.indexOf(i) !== -1) {

};
K.nameScope('metric', function () {
common_1.nameScope('metric', function () {
var _loop_1 = function (i) {

@@ -507,3 +510,3 @@ if (skipTargetIndices.indexOf(i) !== -1) {

var metricResult;
K.nameScope(metricName, function () {
common_1.nameScope(metricName, function () {
metricResult = weightedMetricFn;

@@ -645,41 +648,43 @@ });

if (verbose === void 0) { verbose = false; }
var numSamples = this.checkNumSamples(ins);
if (verbose) {
throw new errors_1.NotImplementedError('Verbose predictLoop() is not implemented yet.');
}
var batches = makeBatches(numSamples, batchSize);
var outs = [];
var _loop_3 = function (batchIndex) {
var batchOuts = tfc.tidy(function () {
var batchStart = batches[batchIndex][0];
var batchEnd = batches[batchIndex][1];
var insBatch = sliceArrays(ins, batchStart, batchEnd);
var feeds = [];
if (Array.isArray(insBatch)) {
for (var i = 0; i < insBatch.length; ++i) {
feeds.push({ key: _this.inputs[i], value: insBatch[i] });
return tfc.tidy(function () {
var numSamples = _this.checkNumSamples(ins);
if (verbose) {
throw new errors_1.NotImplementedError('Verbose predictLoop() is not implemented yet.');
}
var batches = makeBatches(numSamples, batchSize);
var outs = [];
var _loop_3 = function (batchIndex) {
var batchOuts = tfc.tidy(function () {
var batchStart = batches[batchIndex][0];
var batchEnd = batches[batchIndex][1];
var insBatch = sliceArrays(ins, batchStart, batchEnd);
var feeds = [];
if (Array.isArray(insBatch)) {
for (var i = 0; i < insBatch.length; ++i) {
feeds.push({ key: _this.inputs[i], value: insBatch[i] });
}
}
else {
feeds.push({ key: _this.inputs[0], value: insBatch });
}
var feedDict = new executor_1.FeedDict(feeds);
return executor_1.execute(_this.outputs, feedDict);
});
if (batchIndex === 0) {
for (var _i = 0, batchOuts_1 = batchOuts; _i < batchOuts_1.length; _i++) {
var batchOut = batchOuts_1[_i];
outs.push(batchOut);
}
}
else {
feeds.push({ key: _this.inputs[0], value: insBatch });
for (var i = 0; i < batchOuts.length; ++i) {
outs[i] = K.concatAlongFirstAxis(outs[i], batchOuts[i]);
}
}
var feedDict = new executor_1.FeedDict(feeds);
return executor_1.execute(_this.outputs, feedDict);
});
if (batchIndex === 0) {
for (var _i = 0, batchOuts_1 = batchOuts; _i < batchOuts_1.length; _i++) {
var batchOut = batchOuts_1[_i];
outs.push(batchOut);
}
};
for (var batchIndex = 0; batchIndex < batches.length; ++batchIndex) {
_loop_3(batchIndex);
}
else {
for (var i = 0; i < batchOuts.length; ++i) {
outs[i] = K.concatAlongFirstAxis(outs[i], batchOuts[i]);
}
}
};
for (var batchIndex = 0; batchIndex < batches.length; ++batchIndex) {
_loop_3(batchIndex);
}
return generic_utils_1.singletonOrArray(outs);
return generic_utils_1.singletonOrArray(outs);
});
};

@@ -730,3 +735,3 @@ Model.prototype.predict = function (x, config) {

var _this = this;
var doValidation, numTrainSamples, indexArray, callbackList, _loop_4, this_1, epoch, state_1;
var doValidation, numTrainSamples, indexArray, callbackList, _loop_4, this_1, epoch, state_2;
return __generator(this, function (_a) {

@@ -762,8 +767,8 @@ switch (_a.label) {

}
this.history = new callbacks_1.History();
this.history = new base_callbacks_1.History();
if (callbacks == null) {
callbacks = [new callbacks_1.BaseLogger()];
callbacks = [new base_callbacks_1.BaseLogger()];
}
else {
callbacks = [new callbacks_1.BaseLogger()].concat(callbacks);
callbacks = [new base_callbacks_1.BaseLogger()].concat(callbacks);
}

@@ -774,3 +779,3 @@ callbacks = callbacks.concat([this.history]);

}
callbackList = new callbacks_1.CallbackList(callbacks);
callbackList = new base_callbacks_1.CallbackList(callbacks);
callbackList.setModel(this);

@@ -789,3 +794,3 @@ callbackList.setParams({

_loop_4 = function (epoch) {
var epochLogs, epochIndexArray1D_1, batches_1, _loop_5, batchIndex, state_2;
var epochLogs, epochIndexArray1D_1, batches_1, _loop_5, batchIndex, state_3;
return __generator(this, function (_a) {

@@ -846,3 +851,3 @@ switch (_a.label) {

_a.sent();
callbacks_1.disposeTensorsInLogs(batchLogs);
logs_1.disposeTensorsInLogs(batchLogs);
if (this_1.stopTraining) {

@@ -861,4 +866,4 @@ return [2, "break"];

case 4:
state_2 = _a.sent();
if (state_2 === "break")
state_3 = _a.sent();
if (state_3 === "break")
return [3, 6];

@@ -889,4 +894,4 @@ _a.label = 5;

case 3:
state_1 = _a.sent();
if (state_1 === "break")
state_2 = _a.sent();
if (state_2 === "break")
return [3, 5];

@@ -909,36 +914,39 @@ _a.label = 4;

Model.prototype.testLoop = function (f, ins, batchSize, verbose, steps) {
var _this = this;
if (verbose === void 0) { verbose = 0; }
var numSamples = this.checkNumSamples(ins, batchSize, steps, 'steps');
var outs = [];
if (verbose === 1) {
throw new errors_1.NotImplementedError('Verbose mode is not implemented yet.');
}
if (steps != null) {
throw new errors_1.NotImplementedError('steps mode in testLoop() is not implemented yet');
}
else {
var batches = makeBatches(numSamples, batchSize);
var indexArray = tfjs_core_1.tensor1d(math_utils_1.range(0, numSamples));
for (var batchIndex = 0; batchIndex < batches.length; ++batchIndex) {
var batchStart = batches[batchIndex][0];
var batchEnd = batches[batchIndex][1];
var batchIds = K.sliceAlongFirstAxis(indexArray, batchStart, batchEnd - batchStart);
var insBatch = sliceArraysByIndices(ins, batchIds);
var batchOuts = f(insBatch);
if (batchIndex === 0) {
return tfc.tidy(function () {
var numSamples = _this.checkNumSamples(ins, batchSize, steps, 'steps');
var outs = [];
if (verbose === 1) {
throw new errors_1.NotImplementedError('Verbose mode is not implemented yet.');
}
if (steps != null) {
throw new errors_1.NotImplementedError('steps mode in testLoop() is not implemented yet');
}
else {
var batches = makeBatches(numSamples, batchSize);
var indexArray = tfjs_core_1.tensor1d(math_utils_1.range(0, numSamples));
for (var batchIndex = 0; batchIndex < batches.length; ++batchIndex) {
var batchStart = batches[batchIndex][0];
var batchEnd = batches[batchIndex][1];
var batchIds = K.sliceAlongFirstAxis(indexArray, batchStart, batchEnd - batchStart);
var insBatch = sliceArraysByIndices(ins, batchIds);
var batchOuts = f(insBatch);
if (batchIndex === 0) {
for (var i = 0; i < batchOuts.length; ++i) {
outs.push(state_1.getScalar(0));
}
}
for (var i = 0; i < batchOuts.length; ++i) {
outs.push(K.getScalar(0));
var batchOut = batchOuts[i];
outs[i] =
tfc.add(outs[i], tfc.mul(state_1.getScalar(batchEnd - batchStart), batchOut));
}
}
for (var i = 0; i < batchOuts.length; ++i) {
var batchOut = batchOuts[i];
outs[i] =
tfc.add(outs[i], K.scalarTimesArray(K.getScalar(batchEnd - batchStart), batchOut));
for (var i = 0; i < outs.length; ++i) {
outs[i] = tfc.div(outs[i], state_1.getScalar(numSamples));
}
}
for (var i = 0; i < outs.length; ++i) {
outs[i] = tfc.div(outs[i], K.getScalar(numSamples));
}
}
return outs;
return outs;
});
};

@@ -1101,3 +1109,3 @@ Model.prototype.getDedupedMetricsNames = function () {

}
callbacks = callbacks_1.standardizeCallbacks(config.callbacks);
callbacks = base_callbacks_1.standardizeCallbacks(config.callbacks);
return [4, this.fitLoop(trainFunction, ins, outLabels, batchSize, config.epochs, config.verbose, callbacks, valFunction, valIns, config.shuffle, callbackMetrics, null, null, null)];

@@ -1104,0 +1112,0 @@ case 1:

import { io, Tensor } from '@tensorflow/tfjs-core';
import { Constraint, MaxNormConfig, MinMaxNormConfig, UnitNormConfig } from './constraints';
import { ContainerConfig, InputConfig, InputLayerConfig, Layer, LayerConfig } from './engine/topology';
import { ContainerConfig, InputConfig, InputLayerConfig, Layer, LayerConfig, SymbolicTensor } from './engine/topology';
import { Model } from './engine/training';

@@ -19,3 +19,2 @@ import { ConstantConfig, IdentityConfig, Initializer, OrthogonalConfig, RandomNormalConfig, RandomUniformConfig, SeedOnlyInitializerConfig, TruncatedNormalConfig, VarianceScalingConfig, Zeros } from './initializers';

import { L1Config, L1L2Config, L2Config, Regularizer } from './regularizers';
import { SymbolicTensor } from './types';
export declare class ModelExports {

@@ -22,0 +21,0 @@ static model(config: ContainerConfig): Model;

import { ConstraintExports, InitializerExports, LayerExports, MetricExports, ModelExports, RegularizerExports } from './exports';
export { Callback, CallbackList, CustomCallback, CustomCallbackConfig, Logs } from './callbacks';
export { CallbackList, CustomCallback, CustomCallbackConfig } from './base_callbacks';
export { Callback } from './callbacks';
export { SymbolicTensor } from './engine/topology';
export { Model, ModelCompileConfig, ModelEvaluateConfig, ModelFitConfig } from './engine/training';
export { GRUCellLayerConfig, GRULayerConfig, LSTMCellLayerConfig, LSTMLayerConfig, RNN, RNNLayerConfig, SimpleRNNCellLayerConfig, SimpleRNNLayerConfig } from './layers/recurrent';
export { Logs } from './logs';
export { ModelAndWeightsConfig, Sequential, SequentialConfig } from './models';
export { Shape, SymbolicTensor } from './types';
export { Shape } from './types';
export { version as version_layers } from './version';

@@ -8,0 +11,0 @@ export declare const model: typeof ModelExports.model;

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var exports_1 = require("./exports");
var base_callbacks_1 = require("./base_callbacks");
exports.CallbackList = base_callbacks_1.CallbackList;
exports.CustomCallback = base_callbacks_1.CustomCallback;
var callbacks_1 = require("./callbacks");
exports.Callback = callbacks_1.Callback;
exports.CallbackList = callbacks_1.CallbackList;
exports.CustomCallback = callbacks_1.CustomCallback;
var topology_1 = require("./engine/topology");
exports.SymbolicTensor = topology_1.SymbolicTensor;
var training_1 = require("./engine/training");

@@ -14,4 +17,2 @@ exports.Model = training_1.Model;

exports.Sequential = models_1.Sequential;
var types_1 = require("./types");
exports.SymbolicTensor = types_1.SymbolicTensor;
var version_1 = require("./version");

@@ -18,0 +19,0 @@ exports.version_layers = version_1.version;

@@ -22,2 +22,3 @@ "use strict";

var common_1 = require("./common");
var state_1 = require("./backend/state");
var errors_1 = require("./errors");

@@ -88,3 +89,3 @@ var generic_utils_1 = require("./utils/generic_utils");

var _this = this;
return tfjs_core_1.tidy(function () { return K.scalarTimesArray(tfjs_core_1.scalar(_this.value), tfjs_core_1.ones(shape, dtype)); });
return tfjs_core_1.tidy(function () { return tfjs_core_1.mul(tfjs_core_1.scalar(_this.value), tfjs_core_1.ones(shape, dtype)); });
};

@@ -177,3 +178,3 @@ Constant.prototype.getConfig = function () {

var _this = _super.call(this) || this;
_this.gain = config.gain != null ? tfjs_core_1.scalar(config.gain) : K.getScalar(1.0);
_this.gain = config.gain != null ? tfjs_core_1.scalar(config.gain) : state_1.getScalar(1.0);
return _this;

@@ -189,3 +190,3 @@ }

else {
return K.scalarTimesArray(_this.gain, tfjs_core_1.eye(shape[0]));
return tfjs_core_1.mul(_this.gain, tfjs_core_1.eye(shape[0]));
}

@@ -377,3 +378,3 @@ });

}
return K.scalarTimesArray(K.getScalar(_this.gain), q);
return tfjs_core_1.mul(state_1.getScalar(_this.gain), q);
});

@@ -380,0 +381,0 @@ };

@@ -16,6 +16,6 @@ "use strict";

var tfjs_backend_1 = require("../backend/tfjs_backend");
var tfjs_backend_2 = require("../backend/tfjs_backend");
var topology_1 = require("../engine/topology");
var state_1 = require("../backend/state");
var errors_1 = require("../errors");
var generic_utils = require("../utils/generic_utils");
var types_utils_1 = require("../utils/types_utils");
var LeakyReLU = (function (_super) {

@@ -33,3 +33,3 @@ __extends(LeakyReLU, _super);

LeakyReLU.prototype.call = function (inputs, kwargs) {
var x = generic_utils.getExactlyOneTensor(inputs);
var x = types_utils_1.getExactlyOneTensor(inputs);
return tfjs_core_1.leakyRelu(x, this.alpha);

@@ -67,3 +67,3 @@ };

ELU.prototype.call = function (inputs, kwargs) {
var x = generic_utils.getExactlyOneTensor(inputs);
var x = types_utils_1.getExactlyOneTensor(inputs);
return tfjs_core_1.elu(x);

@@ -94,7 +94,7 @@ };

_this.theta = config.theta == null ? _this.DEFAULT_THETA : config.theta;
_this.thetaTensor = tfjs_backend_2.getScalar(_this.theta);
_this.thetaTensor = state_1.getScalar(_this.theta);
return _this;
}
ThresholdedReLU.prototype.call = function (inputs, kwargs) {
var x = generic_utils.getExactlyOneTensor(inputs);
var x = types_utils_1.getExactlyOneTensor(inputs);
return x.mul(tfjs_backend_1.cast(x.greater(this.thetaTensor), 'float32'));

@@ -129,3 +129,3 @@ };

Softmax.prototype.call = function (inputs, kwargs) {
var x = generic_utils.getExactlyOneTensor(inputs);
var x = types_utils_1.getExactlyOneTensor(inputs);
return this.softmax(x, this.axis);

@@ -132,0 +132,0 @@ };

@@ -23,3 +23,3 @@ "use strict";

var conv_utils_1 = require("../utils/conv_utils");
var generic_utils_1 = require("../utils/generic_utils");
var types_utils_1 = require("../utils/types_utils");
var convolutional_1 = require("./convolutional");

@@ -64,3 +64,3 @@ function depthwiseConv2d(x, depthwiseKernel, strides, padding, dataFormat, dilationRate) {

DepthwiseConv2D.prototype.build = function (inputShape) {
inputShape = generic_utils_1.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
if (inputShape.length < 4) {

@@ -91,3 +91,3 @@ throw new errors_1.ValueError("Inputs to DepthwiseConv2D should have rank 4. " +

return tfjs_core_1.tidy(function () {
inputs = generic_utils_1.getExactlyOneTensor(inputs);
inputs = types_utils_1.getExactlyOneTensor(inputs);
var outputs = depthwiseConv2d(inputs, _this.depthwiseKernel.read(), _this.strides, _this.padding, _this.dataFormat, null);

@@ -104,3 +104,3 @@ if (_this.useBias) {

DepthwiseConv2D.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils_1.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var rows = this.dataFormat === 'channelsFirst' ? inputShape[2] : inputShape[1];

@@ -107,0 +107,0 @@ var cols = this.dataFormat === 'channelsFirst' ? inputShape[3] : inputShape[2];

@@ -42,3 +42,3 @@ import { serialization, Tensor } from '@tensorflow/tfjs-core';

protected readonly useBias: boolean;
protected readonly dilationRate: number | [number] | [number, number];
protected readonly dilationRate: number[];
protected readonly biasInitializer?: Initializer;

@@ -45,0 +45,0 @@ protected readonly biasConstraint?: Constraint;

@@ -26,2 +26,3 @@ "use strict";

var generic_utils = require("../utils/generic_utils");
var types_utils_1 = require("../utils/types_utils");
function preprocessConv2DInput(x, dataFormat) {

@@ -153,3 +154,3 @@ return tfjs_core_1.tidy(function () {

_this.activityRegularizer = regularizers_1.getRegularizer(config.activityRegularizer);
_this.dilationRate = config.dilationRate == null ? 1 : config.dilationRate;
_this.dilationRate = conv_utils_1.normalizeArray(config.dilationRate == null ? 1 : config.dilationRate, rank, 'dilationRate');
if (_this.rank === 1 &&

@@ -196,3 +197,3 @@ (Array.isArray(_this.dilationRate) &&

Conv.prototype.build = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var channelAxis = this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1;

@@ -216,7 +217,7 @@ if (inputShape[channelAxis] == null) {

return tfjs_core_1.tidy(function () {
inputs = generic_utils.getExactlyOneTensor(inputs);
inputs = types_utils_1.getExactlyOneTensor(inputs);
var outputs;
var biasValue = _this.bias == null ? null : _this.bias.read();
if (_this.rank === 1) {
outputs = conv1dWithBias(inputs, _this.kernel.read(), biasValue, _this.strides[0], _this.padding, _this.dataFormat, _this.dilationRate);
outputs = conv1dWithBias(inputs, _this.kernel.read(), biasValue, _this.strides[0], _this.padding, _this.dataFormat, _this.dilationRate[0]);
}

@@ -236,3 +237,3 @@ else if (_this.rank === 2) {

Conv.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var newSpace = [];

@@ -326,3 +327,3 @@ var space = (this.dataFormat === 'channelsLast') ?

Conv2DTranspose.prototype.build = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
if (inputShape.length !== 4) {

@@ -351,3 +352,3 @@ throw new errors_1.ValueError('Input should have rank 4; Received input shape: ' +

return tfc.tidy(function () {
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
if (input.shape.length !== 4) {

@@ -396,3 +397,3 @@ throw new errors_1.ValueError("Conv2DTranspose.call() expects input tensor to be rank-4, but " +

Conv2DTranspose.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var outputShape = inputShape.slice();

@@ -468,3 +469,3 @@ var channelAxis;

SeparableConv.prototype.build = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
if (inputShape.length < this.rank + 2) {

@@ -504,3 +505,3 @@ throw new errors_1.ValueError("Inputs to SeparableConv" + this.rank + "D should have rank " +

return tfjs_core_1.tidy(function () {
inputs = generic_utils.getExactlyOneTensor(inputs);
inputs = types_utils_1.getExactlyOneTensor(inputs);
var output;

@@ -624,3 +625,3 @@ if (_this.rank === 1) {

return tfjs_core_1.tidy(function () {
inputs = generic_utils.getExactlyOneTensor(inputs);
inputs = types_utils_1.getExactlyOneTensor(inputs);
if (_this.dataFormat === 'channelsLast') {

@@ -673,3 +674,3 @@ var hSliced = K.sliceAlongAxis(inputs, _this.cropping[0][0], inputs.shape[1] - _this.cropping[0][0] - _this.cropping[0][1], 2);

return tfc.tidy(function () {
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
var inputShape = input.shape;

@@ -676,0 +677,0 @@ if (_this.dataFormat === 'channelsFirst') {

@@ -18,8 +18,8 @@ "use strict";

var topology_1 = require("../engine/topology");
var state_1 = require("../backend/state");
var errors_1 = require("../errors");
var initializers_1 = require("../initializers");
var regularizers_1 = require("../regularizers");
var generic_utils = require("../utils/generic_utils");
var generic_utils_1 = require("../utils/generic_utils");
var math_utils = require("../utils/math_utils");
var types_utils_1 = require("../utils/types_utils");
var Dropout = (function (_super) {

@@ -30,3 +30,3 @@ __extends(Dropout, _super);

_this.rate = Math.max(Math.min(config.rate, 1), 0);
_this.rateScalar = K.getScalar(_this.rate);
_this.rateScalar = state_1.getScalar(_this.rate);
_this.noiseShape = config.noiseShape;

@@ -56,3 +56,3 @@ _this.seed = config.seed;

_this.invokeCallHook(inputs, kwargs);
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
if (_this.noiseShape != null &&

@@ -122,3 +122,3 @@ !tfjs_core_1.util.arraysEqual(input.shape, _this.noiseShape)) {

Dense.prototype.build = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var inputLastDim = inputShape[inputShape.length - 1];

@@ -136,3 +136,3 @@ if (this.kernel == null) {

Dense.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var outputShape = inputShape.slice();

@@ -146,3 +146,3 @@ outputShape[outputShape.length - 1] = this.units;

_this.invokeCallHook(inputs, kwargs);
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
var output = K.dot(input, _this.kernel.read());

@@ -188,3 +188,3 @@ if (_this.bias != null) {

Flatten.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
for (var _i = 0, _a = inputShape.slice(1); _i < _a.length; _i++) {

@@ -205,3 +205,3 @@ var dim = _a[_i];

_this.invokeCallHook(inputs, kwargs);
return K.batchFlatten(generic_utils.getExactlyOneTensor(inputs));
return K.batchFlatten(types_utils_1.getExactlyOneTensor(inputs));
});

@@ -226,3 +226,3 @@ };

_this.invokeCallHook(inputs, kwargs);
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
return _this.activation.apply(input);

@@ -256,3 +256,3 @@ });

return tfjs_core_1.tidy(function () {
inputs = generic_utils_1.getExactlyOneTensor(inputs);
inputs = types_utils_1.getExactlyOneTensor(inputs);
return K.repeat(inputs, _this.n);

@@ -339,4 +339,4 @@ });

_this.invokeCallHook(inputs, kwargs);
var input = generic_utils.getExactlyOneTensor(inputs);
var inputShape = K.shape(input);
var input = types_utils_1.getExactlyOneTensor(inputs);
var inputShape = input.shape;
var outputShape = inputShape.slice(0, 1).concat(_this.fixUnknownDimension(inputShape.slice(1), _this.targetShape));

@@ -343,0 +343,0 @@ return input.reshape(outputShape);

@@ -21,3 +21,3 @@ "use strict";

var generic_utils = require("../utils/generic_utils");
var generic_utils_1 = require("../utils/generic_utils");
var types_utils_1 = require("../utils/types_utils");
var Embedding = (function (_super) {

@@ -60,3 +60,3 @@ __extends(Embedding, _super);

Embedding.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
if (this.inputLength == null) {

@@ -91,8 +91,8 @@ return inputShape.concat([this.outputDim]);

_this.invokeCallHook(inputs, kwargs);
var input = generic_utils.getExactlyOneTensor(inputs);
if (K.dtype(input) !== 'int32') {
var input = types_utils_1.getExactlyOneTensor(inputs);
if (input.dtype !== 'int32') {
input = K.cast(input, 'int32');
}
var output = K.gather(_this.embeddings.read(), input.as1D());
return output.reshape(generic_utils_1.getExactlyOneShape(_this.computeOutputShape(input.shape)));
return output.reshape(types_utils_1.getExactlyOneShape(_this.computeOutputShape(input.shape)));
});

@@ -99,0 +99,0 @@ };

import { serialization, Tensor } from '@tensorflow/tfjs-core';
import { Layer, LayerConfig } from '../engine/topology';
import { Kwargs, Shape, SymbolicTensor } from '../types';
import { Layer, LayerConfig, SymbolicTensor } from '../engine/topology';
import { Kwargs, Shape } from '../types';
export declare abstract class Merge extends Layer {

@@ -5,0 +5,0 @@ protected reshapeRequired: boolean;

@@ -17,5 +17,7 @@ "use strict";

var topology_1 = require("../engine/topology");
var state_1 = require("../backend/state");
var errors_1 = require("../errors");
var generic_utils = require("../utils/generic_utils");
var mathUtils = require("../utils/math_utils");
var types_utils_1 = require("../utils/types_utils");
var Merge = (function (_super) {

@@ -66,3 +68,3 @@ __extends(Merge, _super);

if (Array.isArray(inputShape) && !Array.isArray(inputShape[0])) {
inputShape = [generic_utils.getExactlyOneShape(inputShape)];
inputShape = [types_utils_1.getExactlyOneShape(inputShape)];
}

@@ -125,3 +127,3 @@ inputShape = inputShape;

if (xNDim == null) {
var xShape = K.shape(x);
var xShape = x.shape;
var batchSize = xShape[0];

@@ -148,3 +150,3 @@ var newShape = xShape.slice(1).concat([batchSize]);

if (yNDim == null) {
var yShape = K.shape(y);
var yShape = y.shape;
var yNDim_1 = yShape.length;

@@ -273,3 +275,3 @@ var batchSize = yShape[yNDim_1 - 1];

}
return K.scalarTimesArray(K.getScalar(1 / inputs.length), output);
return tfc.mul(state_1.getScalar(1 / inputs.length), output);
});

@@ -276,0 +278,0 @@ };

@@ -15,5 +15,5 @@ "use strict";

var tfjs_core_1 = require("@tensorflow/tfjs-core");
var K = require("../backend/tfjs_backend");
var constraints_1 = require("../constraints");
var topology_1 = require("../engine/topology");
var state_1 = require("../backend/state");
var errors_1 = require("../errors");

@@ -24,2 +24,3 @@ var initializers_1 = require("../initializers");

var math_utils = require("../utils/math_utils");
var types_utils_1 = require("../utils/types_utils");
function batchNormalization(x, mean, variance, beta, gamma, epsilon) {

@@ -112,3 +113,3 @@ if (epsilon === void 0) { epsilon = 1e-3; }

BatchNormalization.prototype.build = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var axis = this.axis >= 0 ? this.axis : (this.axis + inputShape.length);

@@ -139,4 +140,4 @@ var dim = inputShape[axis];

var training = kwargs['training'] == null ? false : kwargs['training'];
var input = generic_utils.getExactlyOneTensor(inputs);
var inputShape = K.shape(input);
var input = types_utils_1.getExactlyOneTensor(inputs);
var inputShape = input.shape;
var ndim = inputShape.length;

@@ -168,3 +169,3 @@ var reductionAxes = math_utils.range(0, ndim);

var sampleSize = math_utils.arrayProd(reductionAxes.map(function (axis) { return input.shape[axis]; }));
var varianceDebiased = variance.mul(K.getScalar(sampleSize / (sampleSize - (1 + _this.epsilon))));
var varianceDebiased = variance.mul(state_1.getScalar(sampleSize / (sampleSize - (1 + _this.epsilon))));
var updateMovingMeanAndVariance = function () {

@@ -171,0 +172,0 @@ _this.stepCount++;

@@ -18,3 +18,3 @@ "use strict";

var errors_1 = require("../errors");
var generic_utils_1 = require("../utils/generic_utils");
var types_utils_1 = require("../utils/types_utils");
function temporalPadding(x, padding) {

@@ -120,3 +120,3 @@ return tfjs_core_1.tidy(function () {

ZeroPadding2D.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils_1.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var rows;

@@ -157,3 +157,3 @@ var cols;

var _this = this;
return tfjs_core_1.tidy(function () { return spatial2dPadding(generic_utils_1.getExactlyOneTensor(inputs), _this.padding, _this.dataFormat); });
return tfjs_core_1.tidy(function () { return spatial2dPadding(types_utils_1.getExactlyOneTensor(inputs), _this.padding, _this.dataFormat); });
};

@@ -160,0 +160,0 @@ ZeroPadding2D.prototype.getConfig = function () {

@@ -22,3 +22,3 @@ "use strict";

var conv_utils_1 = require("../utils/conv_utils");
var generic_utils = require("../utils/generic_utils");
var types_utils_1 = require("../utils/types_utils");
var convolutional_1 = require("./convolutional");

@@ -103,3 +103,3 @@ function pool2d(x, poolSize, strides, padding, dataFormat, poolMode) {

Pooling1D.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var length = conv_utils_1.convOutputLength(inputShape[1], this.poolSize[0], this.padding, this.strides[0]);

@@ -112,4 +112,4 @@ return [inputShape[0], length, inputShape[2]];

_this.invokeCallHook(inputs, kwargs);
inputs = K.expandDims(generic_utils.getExactlyOneTensor(inputs), 2);
var output = _this.poolingFunction(generic_utils.getExactlyOneTensor(inputs), [_this.poolSize[0], 1], [_this.strides[0], 1], _this.padding, 'channelsLast');
inputs = K.expandDims(types_utils_1.getExactlyOneTensor(inputs), 2);
var output = _this.poolingFunction(types_utils_1.getExactlyOneTensor(inputs), [_this.poolSize[0], 1], [_this.strides[0], 1], _this.padding, 'channelsLast');
return tfc.squeeze(output, [2]);

@@ -195,3 +195,3 @@ });

Pooling2D.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var rows = this.dataFormat === 'channelsFirst' ? inputShape[2] : inputShape[1];

@@ -214,3 +214,3 @@ var cols = this.dataFormat === 'channelsFirst' ? inputShape[3] : inputShape[2];

_this.invokeCallHook(inputs, kwargs);
return _this.poolingFunction(generic_utils.getExactlyOneTensor(inputs), _this.poolSize, _this.strides, _this.padding, _this.dataFormat);
return _this.poolingFunction(types_utils_1.getExactlyOneTensor(inputs), _this.poolSize, _this.strides, _this.padding, _this.dataFormat);
});

@@ -285,3 +285,3 @@ };

return tfjs_core_1.tidy(function () {
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
return tfc.mean(input, 1);

@@ -302,3 +302,3 @@ });

return tfjs_core_1.tidy(function () {
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
return tfc.max(input, 1);

@@ -351,3 +351,3 @@ });

return tfjs_core_1.tidy(function () {
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
if (_this.dataFormat === 'channelsLast') {

@@ -374,3 +374,3 @@ return tfc.mean(input, [1, 2]);

return tfjs_core_1.tidy(function () {
var input = generic_utils.getExactlyOneTensor(inputs);
var input = types_utils_1.getExactlyOneTensor(inputs);
if (_this.dataFormat === 'channelsLast') {

@@ -377,0 +377,0 @@ return tfc.max(input, [1, 2]);

@@ -5,7 +5,7 @@ import * as tfc from '@tensorflow/tfjs-core';

import { Constraint, ConstraintIdentifier } from '../constraints';
import { InputSpec } from '../engine/topology';
import { InputSpec, SymbolicTensor } from '../engine/topology';
import { Layer, LayerConfig } from '../engine/topology';
import { Initializer, InitializerIdentifier } from '../initializers';
import { Regularizer, RegularizerIdentifier } from '../regularizers';
import { Kwargs, RnnStepFunction, Shape, SymbolicTensor } from '../types';
import { Kwargs, RnnStepFunction, Shape } from '../types';
import { LayerVariable } from '../variables';

@@ -12,0 +12,0 @@ export declare function rnn(stepFunction: RnnStepFunction, inputs: Tensor, initialStates: Tensor[], goBackwards?: boolean, mask?: Tensor, constants?: Tensor[], unroll?: boolean, inputLength?: number): [Tensor, Tensor, Tensor[]];

import * as tfc from '@tensorflow/tfjs-core';
import { serialization, Tensor } from '@tensorflow/tfjs-core';
import { Layer, LayerConfig } from '../engine/topology';
import { Layer, LayerConfig, SymbolicTensor } from '../engine/topology';
import { Kwargs, Shape } from '../types';
import { RegularizerFn, SymbolicTensor } from '../types';
import { RegularizerFn } from '../types';
import { LayerVariable } from '../variables';

@@ -7,0 +7,0 @@ import { RNN } from './recurrent';

@@ -16,5 +16,8 @@ "use strict";

var K = require("../backend/tfjs_backend");
var common_1 = require("../common");
var topology_1 = require("../engine/topology");
var state_1 = require("../backend/state");
var errors_1 = require("../errors");
var generic_utils = require("../utils/generic_utils");
var types_utils_1 = require("../utils/types_utils");
var recurrent_1 = require("./recurrent");

@@ -114,3 +117,3 @@ var serialization_1 = require("./serialization");

TimeDistributed.prototype.build = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
if (inputShape.length < 3) {

@@ -129,3 +132,3 @@ throw new errors_1.ValueError("TimeDistributed layer expects an input shape >= 3D, but received " +

TimeDistributed.prototype.computeOutputShape = function (inputShape) {
inputShape = generic_utils.getExactlyOneShape(inputShape);
inputShape = types_utils_1.getExactlyOneShape(inputShape);
var childInputShape = [inputShape[0]].concat(inputShape.slice(2));

@@ -139,3 +142,3 @@ var childOutputShape = this.layer.computeOutputShape(childInputShape);

return tfjs_core_1.tidy(function () {
inputs = generic_utils.getExactlyOneTensor(inputs);
inputs = types_utils_1.getExactlyOneTensor(inputs);
var step = function (inputs, states) {

@@ -298,3 +301,3 @@ var output = _this.layer.call(inputs, kwargs);

else if (_this.mergeMode === 'ave') {
output = K.scalarTimesArray(K.getScalar(0.5), tfc.add(y, yRev));
output = tfc.mul(state_1.getScalar(0.5), tfc.add(y, yRev));
}

@@ -322,6 +325,6 @@ else if (_this.mergeMode === 'mul') {

var _this = this;
K.nameScope(this.forwardLayer.name, function () {
common_1.nameScope(this.forwardLayer.name, function () {
_this.forwardLayer.build(inputShape);
});
K.nameScope(this.backwardLayer.name, function () {
common_1.nameScope(this.backwardLayer.name, function () {
_this.backwardLayer.build(inputShape);

@@ -328,0 +331,0 @@ });

@@ -5,3 +5,5 @@ "use strict";

var tfjs_core_1 = require("@tensorflow/tfjs-core");
var common_1 = require("./backend/common");
var K = require("./backend/tfjs_backend");
var state_1 = require("./backend/state");
var errors_1 = require("./errors");

@@ -11,3 +13,3 @@ function l2Normalize(x, axis) {

var squareSum = tfc.sum(K.square(x), axis, true);
var epsilonTensor = K.scalarTimesArray(tfjs_core_1.scalar(K.epsilon()), tfc.onesLike(x));
var epsilonTensor = tfc.mul(tfjs_core_1.scalar(common_1.epsilon()), tfc.onesLike(x));
var norm = tfc.sqrt(tfc.maximum(squareSum, epsilonTensor));

@@ -29,5 +31,5 @@ return tfc.div(x, norm);

var diff = tfc.sub(yTrue, yPred);
var clippedTrue = tfc.clipByValue(tfc.abs(yTrue), K.epsilon(), Number.MAX_VALUE);
var clippedTrue = tfc.clipByValue(tfc.abs(yTrue), common_1.epsilon(), Number.MAX_VALUE);
var absResult = tfc.abs(tfc.div(diff, clippedTrue));
return K.scalarTimesArray(K.getScalar(100.0), tfc.mean(absResult, -1));
return tfc.mul(state_1.getScalar(100.0), tfc.mean(absResult, -1));
});

@@ -38,7 +40,7 @@ }

return tfjs_core_1.tidy(function () {
var one = K.getScalar(1.0);
var clippedPred = tfc.clipByValue(yPred, K.epsilon(), Number.MAX_VALUE);
var firstLog = tfc.log(K.scalarPlusArray(one, clippedPred));
var clippedTrue = tfc.clipByValue(yTrue, K.epsilon(), Number.MAX_VALUE);
var secondLog = tfc.log(K.scalarPlusArray(one, clippedTrue));
var one = state_1.getScalar(1.0);
var clippedPred = tfc.clipByValue(yPred, common_1.epsilon(), Number.MAX_VALUE);
var firstLog = tfc.log(tfc.add(one, clippedPred));
var clippedTrue = tfc.clipByValue(yTrue, common_1.epsilon(), Number.MAX_VALUE);
var secondLog = tfc.log(tfc.add(one, clippedTrue));
return tfc.mean(K.square(tfc.sub(firstLog, secondLog)), -1);

@@ -50,4 +52,4 @@ });

return tfjs_core_1.tidy(function () {
var zeroTensor = K.getScalar(0.0);
var one = K.getScalar(1.0);
var zeroTensor = state_1.getScalar(0.0);
var one = state_1.getScalar(1.0);
var maxResult = tfc.maximum(zeroTensor, tfc.sub(one, tfc.mul(yTrue, yPred)));

@@ -60,4 +62,4 @@ return tfc.mean(K.square(maxResult), -1);

return tfjs_core_1.tidy(function () {
var zeroTensor = K.getScalar(0.0);
var one = K.getScalar(1.0);
var zeroTensor = state_1.getScalar(0.0);
var one = state_1.getScalar(1.0);
var maxResult = tfc.maximum(zeroTensor, tfc.sub(one, tfc.mul(yTrue, yPred)));

@@ -70,7 +72,7 @@ return tfc.mean(maxResult, -1);

return tfjs_core_1.tidy(function () {
var zeroTensor = K.getScalar(0.0);
var one = K.getScalar(1.0);
var zeroTensor = state_1.getScalar(0.0);
var one = state_1.getScalar(1.0);
var pos = tfc.sum(tfc.mul(yTrue, yPred), -1);
var neg = tfc.max(tfc.mul(tfc.sub(one, yTrue), yPred), -1);
return tfc.maximum(zeroTensor, K.scalarPlusArray(one, tfc.sub(neg, pos)));
return tfc.maximum(zeroTensor, tfc.add(one, tfc.sub(neg, pos)));
});

@@ -81,5 +83,5 @@ }

return tfjs_core_1.tidy(function () {
var log2 = K.getScalar(Math.log(2.0));
var log2 = state_1.getScalar(Math.log(2.0));
var predictionDiff = tfc.sub(yPred, yTrue);
var logcoshResult = tfc.sub(tfc.add(predictionDiff, tfc.softplus(K.scalarTimesArray(K.getScalar(-2.0), predictionDiff))), log2);
var logcoshResult = tfc.sub(tfc.add(predictionDiff, tfc.softplus(tfc.mul(state_1.getScalar(-2.0), predictionDiff))), log2);
return tfc.mean(logcoshResult, -1);

@@ -96,7 +98,7 @@ });

else {
var outputSum = tfc.sum(output, K.shape(output).length - 1, true);
var outputSum = tfc.sum(output, output.shape.length - 1, true);
output = tfc.div(output, outputSum);
}
output = tfc.clipByValue(output, K.epsilon(), 1 - K.epsilon());
return tfc.neg(tfc.sum(tfc.mul(target.toFloat(), tfc.log(output)), K.shape(output).length - 1));
output = tfc.clipByValue(output, common_1.epsilon(), 1 - common_1.epsilon());
return tfc.neg(tfc.sum(tfc.mul(target.toFloat(), tfc.log(output)), output.shape.length - 1));
});

@@ -109,3 +111,3 @@ }

var flatTarget = tfc.floor(K.flatten(target)).toInt();
var outputShape = K.shape(output);
var outputShape = output.shape;
var oneHotTarget = tfc.oneHot(flatTarget, outputShape[outputShape.length - 1])

@@ -121,3 +123,3 @@ .reshape(outputShape);

var outputXTarget = tfc.mul(output, target);
var sigmoidOutput = tfc.log(tfc.add(K.getScalar(1), tfc.exp(tfc.neg(tfc.abs(output)))));
var sigmoidOutput = tfc.log(tfc.add(state_1.getScalar(1), tfc.exp(tfc.neg(tfc.abs(output)))));
var result = tfc.add(tfc.sub(maxOutput, outputXTarget), sigmoidOutput);

@@ -131,3 +133,3 @@ return result;

var y;
y = tfc.clipByValue(yPred, K.epsilon(), 1 - K.epsilon());
y = tfc.clipByValue(yPred, common_1.epsilon(), 1 - common_1.epsilon());
y = tfc.log(tfc.div(y, tfc.sub(tfc.onesLike(y), y)));

@@ -140,4 +142,4 @@ return tfc.mean(sigmoidCrossEntropyWithLogits(yTrue, y), -1);

return tfjs_core_1.tidy(function () {
var clippedTrue = tfc.clipByValue(yTrue, K.epsilon(), 1);
var clippedPred = tfc.clipByValue(yPred, K.epsilon(), 1);
var clippedTrue = tfc.clipByValue(yTrue, common_1.epsilon(), 1);
var clippedPred = tfc.clipByValue(yPred, common_1.epsilon(), 1);
return tfc.sum(tfc.mul(yTrue, tfc.log(tfc.div(clippedTrue, clippedPred))), -1);

@@ -149,3 +151,3 @@ });

return tfjs_core_1.tidy(function () {
var logPred = tfc.log(K.scalarPlusArray(K.getScalar(K.epsilon()), yPred));
var logPred = tfc.log(tfc.add(state_1.getScalar(common_1.epsilon()), yPred));
return tfc.mean(tfc.sub(yPred, tfc.mul(yTrue, logPred)), -1);

@@ -152,0 +154,0 @@ });

@@ -6,2 +6,3 @@ "use strict";

var K = require("./backend/tfjs_backend");
var state_1 = require("./backend/state");
var errors_1 = require("./errors");

@@ -12,3 +13,3 @@ var losses_1 = require("./losses");

return tfjs_core_1.tidy(function () {
var threshold = K.scalarTimesArray(K.getScalar(0.5), tfc.onesLike(yPred));
var threshold = tfc.mul(state_1.getScalar(0.5), tfc.onesLike(yPred));
var yPredThresholded = K.cast(tfc.greater(yPred, threshold), yTrue.dtype);

@@ -15,0 +16,0 @@ return tfc.mean(tfc.equal(yTrue, yPredThresholded), -1);

import { io, Scalar, serialization, Tensor } from '@tensorflow/tfjs-core';
import { History } from './callbacks';
import { History } from './base_callbacks';
import { Layer } from './engine/topology';

@@ -4,0 +4,0 @@ import { Model, ModelCompileConfig, ModelEvaluateConfig, ModelFitConfig } from './engine/training';

@@ -55,3 +55,3 @@ "use strict";

var tfjs_core_1 = require("@tensorflow/tfjs-core");
var K = require("./backend/tfjs_backend");
var state_1 = require("./backend/state");
var topology_1 = require("./engine/topology");

@@ -63,2 +63,3 @@ var training_1 = require("./engine/training");

var serialization_utils_1 = require("./utils/serialization_utils");
var types_utils_1 = require("./utils/types_utils");
function modelFromJSON(modelAndWeightsConfig, customObjects) {

@@ -157,3 +158,3 @@ return __awaiter(this, void 0, void 0, function () {

_this.built = false;
_this.name = (config.name != null) ? config.name : K.getUid('sequential_');
_this.name = (config.name != null) ? config.name : state_1.getUid('sequential_');
if (config.layers != null) {

@@ -273,3 +274,3 @@ for (var _i = 0, _a = config.layers; _i < _a.length; _i++) {

Sequential.prototype.build = function (inputShape) {
generic_utils.getExactlyOneShape(inputShape);
types_utils_1.getExactlyOneShape(inputShape);
if (this.inputs.length === 0 || this.outputs.length === 0) {

@@ -276,0 +277,0 @@ throw new TypeError('Sequential model cannot be built: model is empty.' +

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var tfjs_core_1 = require("@tensorflow/tfjs-core");
var K = require("./backend/tfjs_backend");
var common_1 = require("./backend/common");
var errors_1 = require("./errors");
function getOptimizer(identifier) {
var optimizerMap = {
'Adagrad': function () { return tfjs_core_1.train.adagrad(.01); },
'Adadelta': function () { return tfjs_core_1.train.adadelta(1.0, 0.95, K.epsilon()); },
'Adam': function () { return tfjs_core_1.train.adam(.001, .9, .999, K.epsilon()); },
'Adamax': function () { return tfjs_core_1.train.adamax(0.002, .9, .999, K.epsilon(), 0.0); },
'RMSProp': function () { return tfjs_core_1.train.rmsprop(.001, .9, null, K.epsilon()); },
'SGD': function () { return tfjs_core_1.train.sgd(.01); }
'Adagrad': function () { return tfjs_core_1.train.adagrad(0.01); },
'Adadelta': function () { return tfjs_core_1.train.adadelta(1, 0.95, common_1.epsilon()); },
'Adam': function () { return tfjs_core_1.train.adam(0.001, 0.9, 0.999, common_1.epsilon()); },
'Adamax': function () { return tfjs_core_1.train.adamax(0.002, 0.9, 0.999, common_1.epsilon(), 0); },
'RMSProp': function () { return tfjs_core_1.train.rmsprop(0.001, 0.9, 0, common_1.epsilon()); },
'SGD': function () { return tfjs_core_1.train.sgd(0.01); }
};

@@ -15,0 +15,0 @@ optimizerMap['adagrad'] = optimizerMap['Adagrad'];

@@ -19,4 +19,6 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
var tfc = require("@tensorflow/tfjs-core");
var tfjs_core_1 = require("@tensorflow/tfjs-core");
var K = require("./backend/tfjs_backend");
var state_1 = require("./backend/state");
var generic_utils_1 = require("./utils/generic_utils");

@@ -39,4 +41,4 @@ var Regularizer = (function (_super) {

_this.hasL2 = l2 !== 0;
_this.l1 = K.getScalar(l1);
_this.l2 = K.getScalar(l2);
_this.l1 = state_1.getScalar(l1);
_this.l2 = state_1.getScalar(l2);
return _this;

@@ -49,8 +51,7 @@ }

if (_this.hasL1) {
regularization =
tfjs_core_1.add(regularization, tfjs_core_1.sum(K.scalarTimesArray(_this.l1, tfjs_core_1.abs(x))));
regularization = tfjs_core_1.add(regularization, tfjs_core_1.sum(tfc.mul(_this.l1, tfjs_core_1.abs(x))));
}
if (_this.hasL2) {
regularization =
tfjs_core_1.add(regularization, tfjs_core_1.sum(K.scalarTimesArray(_this.l2, K.square(x))));
tfjs_core_1.add(regularization, tfjs_core_1.sum(tfc.mul(_this.l2, K.square(x))));
}

@@ -57,0 +58,0 @@ return regularization.asScalar();

@@ -1,20 +0,6 @@

import { DataType, Scalar, Tensor } from '@tensorflow/tfjs-core';
import { Layer } from './engine/topology';
import { Scalar, Tensor } from '@tensorflow/tfjs-core';
export declare type Shape = number[];
export declare function getNextUniqueTensorId(): number;
export declare class SymbolicTensor {
readonly dtype: DataType;
readonly shape: Shape;
sourceLayer: Layer;
readonly inputs: SymbolicTensor[];
readonly callArgs: Kwargs;
readonly outputTensorIndex: number;
readonly id: number;
readonly name: string;
readonly originalName?: string;
readonly rank: number;
nodeIndex: number;
tensorIndex: number;
constructor(dtype: DataType, shape: Shape, sourceLayer: Layer, inputs: SymbolicTensor[], callArgs: Kwargs, name?: string, outputTensorIndex?: number);
}
export declare type HasShape = {
shape: Shape;
};
export declare type LossOrMetricFn = (yTrue: Tensor, yPred: Tensor) => Tensor;

@@ -21,0 +7,0 @@ export declare type RegularizerFn = () => Scalar;

"use strict";
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
var tfjs_core_1 = require("@tensorflow/tfjs-core");
var common_1 = require("./common");
var _nextUniqueTensorId = 0;
function getNextUniqueTensorId() {
return _nextUniqueTensorId++;
}
exports.getNextUniqueTensorId = getNextUniqueTensorId;
var SymbolicTensor = (function () {
function SymbolicTensor(dtype, shape, sourceLayer, inputs, callArgs, name, outputTensorIndex) {
this.dtype = dtype;
this.shape = shape;
this.sourceLayer = sourceLayer;
this.inputs = inputs;
this.callArgs = callArgs;
this.outputTensorIndex = outputTensorIndex;
this.id = getNextUniqueTensorId();
if (name != null) {
this.originalName = common_1.getScopedTensorName(name);
this.name = common_1.getUniqueTensorName(this.originalName);
}
this.rank = shape.length;
}
SymbolicTensor = __decorate([
tfjs_core_1.doc({ heading: 'Models', 'subheading': 'Classes' })
], SymbolicTensor);
return SymbolicTensor;
}());
exports.SymbolicTensor = SymbolicTensor;
//# sourceMappingURL=types.js.map

@@ -1,4 +0,2 @@

import { DataType, serialization, Tensor } from '@tensorflow/tfjs-core';
import { Shape } from '../types';
import { LayerVariable } from '../variables';
import { DataType, serialization } from '@tensorflow/tfjs-core';
export declare function pyListRepeat(value: any, numValues: number): any[];

@@ -10,4 +8,2 @@ export declare function assert(val: boolean, message?: string): void;

export declare function objectListUid(objs: any | any[]): string;
export declare function isArrayOfShapes(x: Shape | Shape[]): boolean;
export declare function normalizeShapeList(x: Shape | Shape[]): Shape[];
export declare function toSnakeCase(name: string): string;

@@ -21,4 +17,2 @@ export declare function toCamelCase(identifier: string): string;

}, printableModuleName?: string): any;
export declare function getExactlyOneTensor(xs: Tensor | Tensor[]): Tensor;
export declare function getExactlyOneShape(shapes: Shape | Shape[]): Shape;
export declare function numberCompare(a: number, b: number): 0 | 1 | -1;

@@ -32,2 +26,1 @@ export declare function reverseNumberCompare(a: number, b: number): number;

export declare function checkArrayTypeAndLength(x: any, expectedType: string, minLength?: number, maxLength?: number): boolean;
export declare function countParamsInWeights(weights: LayerVariable[]): number;

@@ -74,16 +74,2 @@ "use strict";

exports.objectListUid = objectListUid;
function isArrayOfShapes(x) {
return Array.isArray(x) && Array.isArray(x[0]);
}
exports.isArrayOfShapes = isArrayOfShapes;
function normalizeShapeList(x) {
if (x.length === 0) {
return [];
}
if (!Array.isArray(x[0])) {
return [x];
}
return x;
}
exports.normalizeShapeList = normalizeShapeList;
function toSnakeCase(name) {

@@ -193,31 +179,2 @@ var intermediate = name.replace(/(.)([A-Z][a-z0-9]+)/g, '$1_$2');

exports.deserializeKerasObject = deserializeKerasObject;
function getExactlyOneTensor(xs) {
var x;
if (Array.isArray(xs)) {
if (xs.length !== 1) {
throw new errors_1.ValueError("Expected Tensor length to be 1; got " + xs.length);
}
x = xs[0];
}
else {
x = xs;
}
return x;
}
exports.getExactlyOneTensor = getExactlyOneTensor;
function getExactlyOneShape(shapes) {
if (Array.isArray(shapes) && Array.isArray(shapes[0])) {
if (shapes.length === 1) {
shapes = shapes;
return shapes[0];
}
else {
throw new errors_1.ValueError("Expected exactly 1 Shape; got " + shapes.length);
}
}
else {
return shapes;
}
}
exports.getExactlyOneShape = getExactlyOneShape;
function numberCompare(a, b) {

@@ -299,16 +256,2 @@ return (a < b) ? -1 : ((a > b) ? 1 : 0);

exports.checkArrayTypeAndLength = checkArrayTypeAndLength;
function countParamsInWeights(weights) {
var count = 0;
for (var _i = 0, weights_1 = weights; _i < weights_1.length; _i++) {
var weight = weights_1[_i];
if (weight.shape.length === 0) {
count += 1;
}
else {
count += weight.shape.reduce(function (a, b) { return a * b; });
}
}
return count;
}
exports.countParamsInWeights = countParamsInWeights;
//# sourceMappingURL=generic_utils.js.map

@@ -1,2 +0,2 @@

import { Model } from '..';
export declare function printSummary(model: Model, lineLength?: number, positions?: number[], printFn?: (message?: any, ...optionalParams: any[]) => void): void;
import { Container } from '../engine/topology';
export declare function printSummary(model: Container, lineLength?: number, positions?: number[], printFn?: (message?: any, ...optionalParams: any[]) => void): void;
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var generic_utils_1 = require("./generic_utils");
var variable_utils_1 = require("./variable_utils");
function printSummary(model, lineLength, positions, printFn) {

@@ -44,8 +44,8 @@ if (printFn === void 0) { printFn = console.log; }

trainableCount =
generic_utils_1.countParamsInWeights(model.collectedTrainableWeights);
variable_utils_1.countParamsInWeights(model.collectedTrainableWeights);
}
else {
trainableCount = generic_utils_1.countParamsInWeights(model.trainableWeights);
trainableCount = variable_utils_1.countParamsInWeights(model.trainableWeights);
}
var nonTrainableCount = generic_utils_1.countParamsInWeights(model.nonTrainableWeights);
var nonTrainableCount = variable_utils_1.countParamsInWeights(model.nonTrainableWeights);
printFn("Total params: " + (trainableCount + nonTrainableCount));

@@ -52,0 +52,0 @@ printFn("Trainable params: " + trainableCount);

@@ -5,3 +5,3 @@ "use strict";

var jasmine_util_1 = require("@tensorflow/tfjs-core/dist/jasmine_util");
var tfjs_backend_1 = require("../backend/tfjs_backend");
var state_1 = require("../backend/state");
var errors_1 = require("../errors");

@@ -28,3 +28,3 @@ function expectTensorsClose(actual, expected, epsilon) {

beforeEach(function () {
tfjs_backend_1.disposeScalarCache();
state_1.disposeScalarCache();
});

@@ -38,3 +38,3 @@ tests();

beforeEach(function () {
tfjs_backend_1.disposeScalarCache();
state_1.disposeScalarCache();
});

@@ -48,3 +48,3 @@ tests();

beforeEach(function () {
tfjs_backend_1.disposeScalarCache();
state_1.disposeScalarCache();
});

@@ -51,0 +51,0 @@ tests();

@@ -32,1 +32,2 @@ import * as tfc from '@tensorflow/tfjs-core';

export declare function batchSetValue(variablesAndValues: Array<[LayerVariable, Tensor]>): void;
export declare function gradients(lossFn: () => tfc.Scalar, variables: LayerVariable[]): Tensor[];
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var tfc = require("@tensorflow/tfjs-core");
var tfjs_backend_1 = require("./backend/tfjs_backend");
var tfjs_core_1 = require("@tensorflow/tfjs-core");
var state_1 = require("./backend/state");
var common_1 = require("./common");
var errors_1 = require("./errors");
var types_1 = require("./types");
var DEFAULT_VARIABLE_NAME_PREFIX = 'Variable';

@@ -17,3 +17,3 @@ var LayerVariable = (function () {

this.shape = val.shape;
this.id = types_1.getNextUniqueTensorId();
this.id = state_1.getNextUniqueTensorId();
name = name == null ? DEFAULT_VARIABLE_NAME_PREFIX : name;

@@ -94,3 +94,3 @@ this.originalName = common_1.getScopedTensorName(name);

}
return new LayerVariable(tfjs_backend_1.randomNormal(shape, mean, stddev, dtype, seed), dtype, name);
return new LayerVariable(tfc.randomNormal(shape, mean, stddev, dtype, seed), dtype, name);
}

@@ -121,2 +121,8 @@ exports.randomNormalVariable = randomNormalVariable;

exports.batchSetValue = batchSetValue;
function gradients(lossFn, variables) {
var variableList = variables.map(function (variable) { return variable.read(); });
var valudAndGrads = tfjs_core_1.variableGrads(lossFn, variableList);
return variables.map(function (variable) { return valudAndGrads.grads[variable.name]; });
}
exports.gradients = gradients;
//# sourceMappingURL=variables.js.map

@@ -1,2 +0,2 @@

declare const version = "0.6.7";
declare const version = "0.7.0";
export { version };
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var version = '0.6.7';
var version = '0.7.0';
exports.version = version;
//# sourceMappingURL=version.js.map
{
"name": "@tensorflow/tfjs-layers",
"version": "0.6.7",
"version": "0.7.0",
"description": "TensorFlow layers API in JavaScript",

@@ -13,3 +13,3 @@ "private": false,

"devDependencies": {
"@tensorflow/tfjs-core": "~0.11.9",
"@tensorflow/tfjs-core": "~0.12.0",
"@types/jasmine": "~2.5.53",

@@ -32,2 +32,3 @@ "clang-format": "~1.2.2",

"tslint": "~5.6.0",
"tslint-no-circular-imports": "^0.5.0",
"typescript": "2.8.3",

@@ -49,4 +50,4 @@ "yalc": "~1.0.0-pre.21"

"peerDependencies": {
"@tensorflow/tfjs-core": "~0.11.9"
"@tensorflow/tfjs-core": "~0.12.0"
}
}

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc