Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@tensorflow/tfjs-layers

Package Overview
Dependencies
Maintainers
11
Versions
159
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@tensorflow/tfjs-layers - npm Package Compare versions

Comparing version 0.6.2 to 0.6.3

.rpt2_cache/cec123aac81f55a1c996339ffecb87ce940f063b/code/cache/05df0ce2e667f6318910d96446e69defc3a30669

1

dist-es6/callbacks.js

@@ -343,2 +343,3 @@ var __extends = (this && this.__extends) || (function () {

K.scalarTimesArray(div(K.getScalar(1), K.getScalar(_this.seen)), _this.totals[key]);
_this.totals[key].dispose();
keep(logs[key]);

@@ -345,0 +346,0 @@ });

@@ -0,1 +1,11 @@

var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {

@@ -37,3 +47,3 @@ return new (P || (P = Promise))(function (resolve, reject) {

var _this = this;
import { abs, mean, ones, scalar, SGDOptimizer, tensor1d, tensor2d, tensor3d, test_util, zeros } from '@tensorflow/tfjs-core';
import { abs, mean, memory, ones, scalar, SGDOptimizer, tensor1d, tensor2d, tensor3d, test_util, zeros } from '@tensorflow/tfjs-core';
import * as K from '../backend/tfjs_backend';

@@ -845,2 +855,69 @@ import { CustomCallback } from '../callbacks';

}); });
var StopAfterNEpochs = (function (_super) {
__extends(StopAfterNEpochs, _super);
function StopAfterNEpochs(epochsToTrain) {
var _this = _super.call(this) || this;
_this.epochsToTrain = epochsToTrain;
return _this;
}
StopAfterNEpochs.prototype.onEpochEnd = function (epoch, logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
if (epoch === this.epochsToTrain - 1) {
this.model.stopTraining = true;
}
return [2];
});
});
};
return StopAfterNEpochs;
}(tfl.Callback));
it('Stop training at the end of an epoch: Functional model', function (done) {
createDenseModelAndData(true);
model.compile({ optimizer: 'SGD', loss: 'meanSquaredError' });
model
.fit(inputs, targets, {
batchSize: numSamples,
epochs: 10,
callbacks: [new StopAfterNEpochs(2)]
})
.then(function (history) {
expect(history.history.loss.length).toEqual(2);
done();
})
.catch(function (err) { return done.fail(err.stack); });
});
var StopAfterNBatches = (function (_super) {
__extends(StopAfterNBatches, _super);
function StopAfterNBatches(epochsToTrain) {
var _this = _super.call(this) || this;
_this.batchesToTrain = epochsToTrain;
return _this;
}
StopAfterNBatches.prototype.onBatchEnd = function (batch, logs) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
if (batch === this.batchesToTrain - 1) {
this.model.stopTraining = true;
}
return [2];
});
});
};
return StopAfterNBatches;
}(tfl.Callback));
it('Stop training at the end of a batch: Sequential model', function (done) {
var sequentialModel = tfl.sequential();
sequentialModel.add(tfl.layers.dense({ units: 1, kernelInitializer: 'ones', inputShape: [inputSize] }));
inputs = ones([numSamples, inputSize]);
targets = ones([numSamples, 1]);
sequentialModel.compile({ optimizer: 'SGD', loss: 'meanSquaredError' });
sequentialModel
.fit(inputs, targets, { batchSize: 1, epochs: 10, callbacks: [new StopAfterNBatches(2)] })
.then(function (history) {
expect(history.history.loss.length).toEqual(1);
done();
})
.catch(function (err) { return done.fail(err.stack); });
});
it('Invalid dict loss: nonexistent output name', function () {

@@ -912,2 +989,200 @@ createDenseModelAndData();

});
describeMathCPUAndGPU('Model.fit: No memory leak', function () {
var inputSize = 4;
var numSamples = 5;
var inputTensor = tfl.layers.input({ shape: [inputSize], name: 'inputLayer1', dtype: 'float32' });
var model;
var inputs;
var targets;
var valInputs;
var valTargets;
function createDenseModelAndData(useBias, kernelRegularizer, biasRegularizer) {
if (useBias === void 0) { useBias = false; }
var layer = tfl.layers.dense({ units: 1, useBias: useBias, kernelInitializer: 'ones', kernelRegularizer: kernelRegularizer });
var output = layer.apply(inputTensor);
model = new tfl.Model({ inputs: [inputTensor], outputs: [output] });
inputs = ones([numSamples, inputSize]);
targets = ones([numSamples, 1]);
valInputs = zeros([numSamples, inputSize]);
valTargets = zeros([numSamples, 1]);
}
it('Repeated fit calls leads to no memory leak: no validation or metrics', function (done) { return __awaiter(_this, void 0, void 0, function () {
var numTensors0, i, numTensorsNow;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
createDenseModelAndData();
model.compile({ optimizer: 'SGD', loss: 'meanSquaredError' });
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1 })];
case 1:
_a.sent();
numTensors0 = memory().numTensors;
i = 0;
_a.label = 2;
case 2:
if (!(i < 2)) return [3, 5];
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1 })];
case 3:
_a.sent();
numTensorsNow = memory().numTensors;
if (numTensorsNow > numTensors0) {
done.fail("Memory leak detected during fit(): Leaked " +
(numTensorsNow - numTensors0 + " tensor(s) after the ") +
(i + 1 + "-th fit() call."));
}
else {
done();
}
_a.label = 4;
case 4:
++i;
return [3, 2];
case 5: return [2];
}
});
}); });
it('Repeated fit calls leads to no memory leak: with metrics', function (done) { return __awaiter(_this, void 0, void 0, function () {
var numTensors0, i, numTensorsNow;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
createDenseModelAndData();
model.compile({ optimizer: 'SGD', loss: 'meanSquaredError', metrics: ['mse'] });
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1 })];
case 1:
_a.sent();
numTensors0 = memory().numTensors;
i = 0;
_a.label = 2;
case 2:
if (!(i < 2)) return [3, 5];
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1 })];
case 3:
_a.sent();
numTensorsNow = memory().numTensors;
if (numTensorsNow > numTensors0) {
done.fail("Memory leak detected during fit(): Leaked " +
(numTensorsNow - numTensors0 + " tensor(s) after the ") +
(i + 1 + "-th fit() call."));
}
else {
done();
}
_a.label = 4;
case 4:
++i;
return [3, 2];
case 5: return [2];
}
});
}); });
it('Repeated fit calls leads to no memory leak: validationSplit', function (done) { return __awaiter(_this, void 0, void 0, function () {
var validationSplit, numTensors0, i, numTensorsNow;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
createDenseModelAndData();
validationSplit = 0.4;
model.compile({ optimizer: 'SGD', loss: 'meanSquaredError' });
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1, validationSplit: validationSplit })];
case 1:
_a.sent();
numTensors0 = memory().numTensors;
i = 0;
_a.label = 2;
case 2:
if (!(i < 2)) return [3, 5];
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1, validationSplit: validationSplit })];
case 3:
_a.sent();
numTensorsNow = memory().numTensors;
if (numTensorsNow > numTensors0) {
done.fail("Memory leak detected during fit(): Leaked " +
(numTensorsNow - numTensors0 + " tensor(s) after the ") +
(i + 1 + "-th fit() call."));
}
else {
done();
}
_a.label = 4;
case 4:
++i;
return [3, 2];
case 5: return [2];
}
});
}); });
it('Repeated fit calls leads to no memory leak: validationData', function (done) { return __awaiter(_this, void 0, void 0, function () {
var validationData, numTensors0, i, numTensorsNow;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
createDenseModelAndData();
validationData = [valInputs, valTargets];
model.compile({ optimizer: 'SGD', loss: 'meanSquaredError' });
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1, validationData: validationData })];
case 1:
_a.sent();
numTensors0 = memory().numTensors;
i = 0;
_a.label = 2;
case 2:
if (!(i < 2)) return [3, 5];
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1, validationData: validationData })];
case 3:
_a.sent();
numTensorsNow = memory().numTensors;
if (numTensorsNow > numTensors0) {
done.fail("Memory leak detected during fit(): Leaked " +
(numTensorsNow - numTensors0 + " tensor(s) after the ") +
(i + 1 + "-th fit() call."));
}
else {
done();
}
_a.label = 4;
case 4:
++i;
return [3, 2];
case 5: return [2];
}
});
}); });
it('Repeated fit calls leads to no memory leak: metrics & validationSplit', function (done) { return __awaiter(_this, void 0, void 0, function () {
var validationSplit, numTensors0, i, numTensorsNow;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
createDenseModelAndData();
validationSplit = 0.4;
model.compile({ optimizer: 'SGD', loss: 'meanSquaredError', metrics: ['mse'] });
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1, validationSplit: validationSplit })];
case 1:
_a.sent();
numTensors0 = memory().numTensors;
i = 0;
_a.label = 2;
case 2:
if (!(i < 2)) return [3, 5];
return [4, model.fit(inputs, targets, { batchSize: numSamples, epochs: 1, validationSplit: validationSplit })];
case 3:
_a.sent();
numTensorsNow = memory().numTensors;
if (numTensorsNow > numTensors0) {
done.fail("Memory leak detected during fit(): Leaked " +
(numTensorsNow - numTensors0 + " tensor(s) after the ") +
(i + 1 + "-th fit() call."));
}
else {
done();
}
_a.label = 4;
case 4:
++i;
return [3, 2];
case 5: return [2];
}
});
}); });
});
describeMathCPUAndGPU('Model.evaluate', function () {

@@ -914,0 +1189,0 @@ var numExamples = 8;

@@ -77,2 +77,3 @@ import { io, Optimizer, Scalar, Tensor, Tensor1D } from '@tensorflow/tfjs-core';

history: History;
stopTraining: boolean;
metrics: string[] | {

@@ -79,0 +80,0 @@ [outputName: string]: string;

221

dist-es6/engine/training.js

@@ -634,3 +634,3 @@ var __extends = (this && this.__extends) || (function () {

var _this = this;
var doValidation, numTrainSamples, indexArray, callbackList, _loop_4, epoch;
var doValidation, numTrainSamples, indexArray, callbackList, _loop_4, this_1, epoch, state_1;
return __generator(this, function (_a) {

@@ -689,4 +689,5 @@ switch (_a.label) {

_a.sent();
this.stopTraining = false;
_loop_4 = function (epoch) {
var epochLogs, epochIndexArray1D_1, batches_1, _loop_5, batchIndex;
var epochLogs, epochIndexArray1D_1, batches_1, _loop_5, batchIndex, state_2;
return __generator(this, function (_a) {

@@ -748,2 +749,5 @@ switch (_a.label) {

disposeTensorsInLogs(batchLogs);
if (this_1.stopTraining) {
return [2, "break"];
}
return [2];

@@ -759,3 +763,5 @@ }

case 4:
_a.sent();
state_2 = _a.sent();
if (state_2 === "break")
return [3, 6];
_a.label = 5;

@@ -771,2 +777,5 @@ case 5:

_a.sent();
if (this_1.stopTraining) {
return [2, "break"];
}
return [2];

@@ -776,2 +785,3 @@ }

};
this_1 = this;
epoch = initialEpoch;

@@ -783,3 +793,5 @@ _a.label = 2;

case 3:
_a.sent();
state_1 = _a.sent();
if (state_1 === "break")
return [3, 5];
_a.label = 4;

@@ -889,103 +901,116 @@ case 4:

var _this = this;
var batchSize, standardizedOuts, inputs, targets, doValidation, valX, valY, valIns, valStandardized, splitAt, originalBatchSize, ins, trainFunction, outLabels, valFunction, callbackMetrics, callbacks;
var batchSize, standardizedOuts, inputs, targets, doValidation, valX, valY, valIns, needValidationDisposal, valStandardized, splitAt, originalBatchSize, ins, trainFunction, outLabels, valFunction, callbackMetrics, callbacks, out;
return __generator(this, function (_a) {
batchSize = config.batchSize == null ? 32 : config.batchSize;
standardizedOuts = this.standardizeUserData(x, y, false, batchSize);
inputs = standardizedOuts[0];
targets = standardizedOuts[1];
doValidation = false;
if (config.validationData != null && config.validationData.length > 0) {
doValidation = true;
if (config.validationData.length === 2) {
valX = config.validationData[0];
valY = config.validationData[1];
}
else if (config.validationData.length === 3) {
throw new NotImplementedError('validationData including sample weights is not supported yet.');
}
else {
throw new ValueError("When passing validation data, it must contain 2 (valX, valY) " +
"or 3 (valX, valY, valSampleWeight) items; " +
(config.validationData + " is invalid."));
}
valStandardized = this.standardizeUserData(valX, valY, true, batchSize);
valX = valStandardized[0];
valY = valStandardized[1];
valIns = valX.concat(valY);
}
else if (config.validationSplit != null && config.validationSplit > 0 &&
config.validationSplit < 1) {
doValidation = true;
splitAt = Math.floor(inputs[0].shape[0] * (1 - config.validationSplit));
originalBatchSize = inputs[0].shape[0];
valX = sliceArrays(inputs, splitAt, originalBatchSize);
inputs = sliceArrays(inputs, 0, splitAt);
valY = sliceArrays(targets, splitAt, originalBatchSize);
targets = sliceArrays(targets, 0, splitAt);
valIns = valX.concat(valY);
}
else if (config.validationSteps != null) {
doValidation = true;
}
ins = inputs.concat(targets);
this.checkTrainableWeightsConsistency();
trainFunction = function (data) {
var losses = [];
var lossValues = [];
var inputs = data.slice(0, _this.inputs.length);
var targets = data.slice(_this.inputs.length, _this.inputs.length + _this.outputs.length);
var metricsValues = [];
var totalLossFunction = function () {
var feeds = [];
for (var i = 0; i < _this.inputs.length; ++i) {
feeds.push({ key: _this.inputs[i], value: inputs[i] });
}
var feedDict = new FeedDict(feeds);
var outputs = execute(_this.outputs, feedDict, { 'training': true });
var totalLoss;
for (var i = 0; i < _this.lossFunctions.length; ++i) {
var lossFunction = _this.lossFunctions[i];
var loss = lossFunction(targets[i], outputs[i]);
losses.push(loss);
var meanLoss = tfc.mean(loss);
lossValues.push(meanLoss);
if (i === 0) {
totalLoss = loss;
switch (_a.label) {
case 0:
batchSize = config.batchSize == null ? 32 : config.batchSize;
standardizedOuts = this.standardizeUserData(x, y, false, batchSize);
inputs = standardizedOuts[0];
targets = standardizedOuts[1];
doValidation = false;
needValidationDisposal = false;
if (config.validationData != null && config.validationData.length > 0) {
doValidation = true;
if (config.validationData.length === 2) {
valX = config.validationData[0];
valY = config.validationData[1];
}
else if (config.validationData.length === 3) {
throw new NotImplementedError('validationData including sample weights is not supported yet.');
}
else {
totalLoss = tfc.add(totalLoss, loss);
throw new ValueError("When passing validation data, it must contain 2 (valX, valY) " +
"or 3 (valX, valY, valSampleWeight) items; " +
(config.validationData + " is invalid."));
}
valStandardized = this.standardizeUserData(valX, valY, true, batchSize);
valX = valStandardized[0];
valY = valStandardized[1];
valIns = valX.concat(valY);
}
for (var i = 0; i < _this.metricsTensors.length; ++i) {
var metric = _this.metricsTensors[i][0];
var outputIndex = _this.metricsTensors[i][1];
var meanMetric = tfc.mean(metric(targets[outputIndex], outputs[outputIndex]));
tfc.keep(meanMetric);
metricsValues.push(meanMetric);
else if (config.validationSplit != null && config.validationSplit > 0 &&
config.validationSplit < 1) {
doValidation = true;
splitAt = Math.floor(inputs[0].shape[0] * (1 - config.validationSplit));
originalBatchSize = inputs[0].shape[0];
valX = sliceArrays(inputs, splitAt, originalBatchSize);
inputs = sliceArrays(inputs, 0, splitAt);
valY = sliceArrays(targets, splitAt, originalBatchSize);
targets = sliceArrays(targets, 0, splitAt);
needValidationDisposal = true;
valIns = valX.concat(valY);
}
totalLoss = tfc.mean(totalLoss);
_this.calculateLosses().forEach(function (regularizerLoss) {
totalLoss = tfc.add(totalLoss, regularizerLoss);
});
return totalLoss;
};
var variables = _this.collectedTrainableWeights.map(function (param) { return param.read(); });
var returnCost = true;
var totalLossValue = _this.optimizer.minimize(totalLossFunction, returnCost, variables);
return [totalLossValue].concat(metricsValues);
};
outLabels = this.getDedupedMetricsNames();
if (doValidation) {
this.makeTestFunction();
valFunction = this.testFunction;
callbackMetrics =
outLabels.slice().concat(outLabels.map(function (n) { return 'val_' + n; }));
else if (config.validationSteps != null) {
doValidation = true;
}
ins = inputs.concat(targets);
this.checkTrainableWeightsConsistency();
trainFunction = function (data) {
var losses = [];
var lossValues = [];
var inputs = data.slice(0, _this.inputs.length);
var targets = data.slice(_this.inputs.length, _this.inputs.length + _this.outputs.length);
var metricsValues = [];
var totalLossFunction = function () {
var feeds = [];
for (var i = 0; i < _this.inputs.length; ++i) {
feeds.push({ key: _this.inputs[i], value: inputs[i] });
}
var feedDict = new FeedDict(feeds);
var outputs = execute(_this.outputs, feedDict, { 'training': true });
var totalLoss;
for (var i = 0; i < _this.lossFunctions.length; ++i) {
var lossFunction = _this.lossFunctions[i];
var loss = lossFunction(targets[i], outputs[i]);
losses.push(loss);
var meanLoss = tfc.mean(loss);
lossValues.push(meanLoss);
if (i === 0) {
totalLoss = loss;
}
else {
totalLoss = tfc.add(totalLoss, loss);
}
}
for (var i = 0; i < _this.metricsTensors.length; ++i) {
var metric = _this.metricsTensors[i][0];
var outputIndex = _this.metricsTensors[i][1];
var meanMetric = tfc.mean(metric(targets[outputIndex], outputs[outputIndex]));
tfc.keep(meanMetric);
metricsValues.push(meanMetric);
}
totalLoss = tfc.mean(totalLoss);
_this.calculateLosses().forEach(function (regularizerLoss) {
totalLoss = tfc.add(totalLoss, regularizerLoss);
});
return totalLoss;
};
var variables = _this.collectedTrainableWeights.map(function (param) { return param.read(); });
var returnCost = true;
var totalLossValue = _this.optimizer.minimize(totalLossFunction, returnCost, variables);
return [totalLossValue].concat(metricsValues);
};
outLabels = this.getDedupedMetricsNames();
if (doValidation) {
this.makeTestFunction();
valFunction = this.testFunction;
callbackMetrics =
outLabels.slice().concat(outLabels.map(function (n) { return 'val_' + n; }));
}
else {
valFunction = null;
valIns = [];
callbackMetrics = outLabels.slice();
}
callbacks = standardizeCallbacks(config.callbacks);
return [4, this.fitLoop(trainFunction, ins, outLabels, batchSize, config.epochs, config.verbose, callbacks, valFunction, valIns, config.shuffle, callbackMetrics, null, null, null)];
case 1:
out = _a.sent();
if (needValidationDisposal) {
valIns.forEach(function (tensor) { return tensor.dispose(); });
inputs.forEach(function (tensor) { return tensor.dispose(); });
targets.forEach(function (tensor) { return tensor.dispose(); });
}
return [2, out];
}
else {
valFunction = null;
valIns = [];
callbackMetrics = outLabels.slice();
}
callbacks = standardizeCallbacks(config.callbacks);
return [2, this.fitLoop(trainFunction, ins, outLabels, batchSize, config.epochs, config.verbose, callbacks, valFunction, valIns, config.shuffle, callbackMetrics, null, null, null)];
});

@@ -992,0 +1017,0 @@ });

@@ -7,3 +7,3 @@ import { io, Tensor } from '@tensorflow/tfjs-core';

import { ELULayerConfig, LeakyReLULayerConfig, SoftmaxLayerConfig, ThresholdedReLULayerConfig } from './layers/advanced_activations';
import { ConvLayerConfig, Cropping2DLayerConfig, SeparableConvLayerConfig } from './layers/convolutional';
import { ConvLayerConfig, Cropping2DLayerConfig, SeparableConvLayerConfig, UpSampling2DLayerConfig } from './layers/convolutional';
import { DepthwiseConv2DLayerConfig } from './layers/convolutional_depthwise';

@@ -42,2 +42,3 @@ import { ActivationLayerConfig, DenseLayerConfig, DropoutLayerConfig, RepeatVectorLayerConfig, ReshapeLayerConfig } from './layers/core';

static cropping2D(config: Cropping2DLayerConfig): Layer;
static upSampling2d(config: UpSampling2DLayerConfig): Layer;
static depthwiseConv2d(config: DepthwiseConv2DLayerConfig): Layer;

@@ -44,0 +45,0 @@ static activation(config: ActivationLayerConfig): Layer;

@@ -13,3 +13,3 @@ var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {

import { ELU, LeakyReLU, Softmax, ThresholdedReLU } from './layers/advanced_activations';
import { Conv1D, Conv2D, Conv2DTranspose, Cropping2D, SeparableConv2D } from './layers/convolutional';
import { Conv1D, Conv2D, Conv2DTranspose, Cropping2D, SeparableConv2D, UpSampling2D } from './layers/convolutional';
import { DepthwiseConv2D } from './layers/convolutional_depthwise';

@@ -100,2 +100,5 @@ import { Activation, Dense, Dropout, Flatten, RepeatVector, Reshape } from './layers/core';

};
LayerExports.upSampling2d = function (config) {
return new UpSampling2D(config);
};
LayerExports.depthwiseConv2d = function (config) {

@@ -314,2 +317,11 @@ return new DepthwiseConv2D(config);

namespace: 'layers',
useDocsFrom: 'UpSampling2D',
configParamIndices: [0]
})
], LayerExports, "upSampling2d", null);
__decorate([
doc({
heading: 'Layers',
subheading: 'Convolutional',
namespace: 'layers',
useDocsFrom: 'DepthwiseConv2D',

@@ -316,0 +328,0 @@ configParamIndices: [0]

@@ -928,2 +928,88 @@ import * as tfc from '@tensorflow/tfjs-core';

});
describeMathCPU('UpSampling2D Layer: Symbolic', function () {
var dataFormats = ['channelsFirst', 'channelsLast'];
var sizes = [undefined, [2, 2]];
var _loop_41 = function (dataFormat) {
var _loop_42 = function (size) {
var testTitle = "size=" + size + ", " + dataFormat;
it(testTitle, function () {
var inputShape = dataFormat === 'channelsFirst' ? [2, 16, 11, 9] : [2, 11, 9, 16];
var symbolicInput = new tfl.SymbolicTensor('float32', inputShape, null, [], null);
var upSampling2dLayer = tfl.layers.upSampling2d({
size: size,
dataFormat: dataFormat,
});
var output = upSampling2dLayer.apply(symbolicInput);
var outputRows;
var outputCols;
if (size === undefined) {
outputRows = 2;
outputCols = 2;
}
else {
outputRows = size[0];
outputCols = size[1];
}
var expectedShape;
if (dataFormat === 'channelsFirst') {
outputRows *= inputShape[2];
outputCols *= inputShape[3];
expectedShape = [2, 16, outputRows, outputCols];
}
else {
outputRows *= inputShape[1];
outputCols *= inputShape[2];
expectedShape = [2, outputRows, outputCols, 16];
}
expect(output.shape).toEqual(expectedShape);
});
};
for (var _i = 0, sizes_1 = sizes; _i < sizes_1.length; _i++) {
var size = sizes_1[_i];
_loop_42(size);
}
};
for (var _i = 0, dataFormats_8 = dataFormats; _i < dataFormats_8.length; _i++) {
var dataFormat = dataFormats_8[_i];
_loop_41(dataFormat);
}
});
describe('UpSampling2D Layer', function () {
it('check with default values', function () {
var layer = tfl.layers.upSampling2d({});
var x = tensor4d([
[[[1], [2]], [[3], [4]]],
], [1, 2, 2, 1]);
var y = tensor4d([
[
[[1], [1], [2], [2]], [[1], [1], [2], [2]], [[3], [3], [4], [4]],
[[3], [3], [4], [4]]
],
], [1, 4, 4, 1]);
expectTensorsClose(layer.apply(x, null), y);
});
it('check with channels last', function () {
var layer = tfl.layers.upSampling2d({ size: [2, 2], dataFormat: 'channelsLast' });
var x = tensor4d([
[[[1], [2]], [[3], [4]]],
], [1, 2, 2, 1]);
var y = tensor4d([
[
[[1], [1], [2], [2]], [[1], [1], [2], [2]], [[3], [3], [4], [4]],
[[3], [3], [4], [4]]
],
], [1, 4, 4, 1]);
expectTensorsClose(layer.apply(x, null), y);
});
it('check with channels first', function () {
var layer = tfl.layers.upSampling2d({ size: [2, 2], dataFormat: 'channelsFirst' });
var x = tensor4d([
[[[1, 2], [3, 4]]],
], [1, 1, 2, 2]);
var y = tensor4d([
[[[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]]],
], [1, 1, 4, 4]);
expectTensorsClose(layer.apply(x, null), y);
});
});
//# sourceMappingURL=convolutional_test.js.map

@@ -123,1 +123,15 @@ import { serialization, Tensor } from '@tensorflow/tfjs-core';

}
export interface UpSampling2DLayerConfig extends LayerConfig {
size?: number[];
dataFormat?: DataFormat;
}
export declare class UpSampling2D extends Layer {
static className: string;
protected readonly DEFAULT_SIZE: number[];
protected readonly size: number[];
protected readonly dataFormat: DataFormat;
constructor(config: UpSampling2DLayerConfig);
computeOutputShape(inputShape: Shape): Shape;
call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
getConfig(): serialization.ConfigDict;
}

@@ -589,2 +589,55 @@ var __extends = (this && this.__extends) || (function () {

serialization.SerializationMap.register(Cropping2D);
var UpSampling2D = (function (_super) {
__extends(UpSampling2D, _super);
function UpSampling2D(config) {
var _this = _super.call(this, config) || this;
_this.DEFAULT_SIZE = [2, 2];
_this.inputSpec = [{ ndim: 4 }];
_this.size = config.size === undefined ? _this.DEFAULT_SIZE : config.size;
_this.dataFormat =
config.dataFormat === undefined ? 'channelsLast' : config.dataFormat;
return _this;
}
UpSampling2D.prototype.computeOutputShape = function (inputShape) {
if (this.dataFormat === 'channelsFirst') {
var height = this.size[0] * inputShape[2];
var width = this.size[1] * inputShape[3];
return [inputShape[0], inputShape[1], height, width];
}
else {
var height = this.size[0] * inputShape[1];
var width = this.size[1] * inputShape[2];
return [inputShape[0], height, width, inputShape[3]];
}
};
UpSampling2D.prototype.call = function (inputs, kwargs) {
var _this = this;
return tfc.tidy(function () {
var input = generic_utils.getExactlyOneTensor(inputs);
var inputShape = input.shape;
if (_this.dataFormat === 'channelsFirst') {
input = tfc.transpose(input, [0, 2, 3, 1]);
var height = _this.size[0] * inputShape[2];
var width = _this.size[1] * inputShape[3];
var resized = input.resizeNearestNeighbor([height, width]);
return tfc.transpose(resized, [0, 3, 1, 2]);
}
else {
var height = _this.size[0] * inputShape[1];
var width = _this.size[1] * inputShape[2];
return input.resizeNearestNeighbor([height, width]);
}
});
};
UpSampling2D.prototype.getConfig = function () {
var config = { size: this.size, dataFormat: this.dataFormat };
var baseConfig = _super.prototype.getConfig.call(this);
Object.assign(config, baseConfig);
return config;
};
UpSampling2D.className = 'UpSampling2D';
return UpSampling2D;
}(Layer));
export { UpSampling2D };
serialization.SerializationMap.register(UpSampling2D);
//# sourceMappingURL=convolutional.js.map

@@ -157,4 +157,5 @@ var __extends = (this && this.__extends) || (function () {

var _this = _super.call(this, config) || this;
_this.forwardLayer = config.layer;
var layerConfig = config.layer.getConfig();
_this.forwardLayer =
deserialize({ className: config.layer.getClassName(), config: layerConfig });
layerConfig['goBackwards'] =

@@ -161,0 +162,0 @@ layerConfig['goBackwards'] === true ? false : true;

@@ -275,3 +275,36 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {

});
it('Call predict() and fit() after load: Bidirectional LSTM', function (done) {
var model = tfl.sequential();
var lstmUnits = 3;
var sequenceLength = 4;
var inputDims = 5;
model.add(tfl.layers.bidirectional({
layer: tfl.layers.lstm({ units: lstmUnits }),
mergeMode: 'concat',
inputShape: [sequenceLength, inputDims]
}));
var x = randomNormal([2, 4, 5]);
var y = model.predict(x);
var path = "testModel" + new Date().getTime() + "_" + Math.random();
var url = "indexeddb://" + path;
model.save(url)
.then(function (saveResult) {
tfl.loadModel(url)
.then(function (modelPrime) {
var yPrime = modelPrime.predict(x);
expectTensorsClose(y, yPrime);
modelPrime.compile({ optimizer: 'sgd', loss: 'meanSquaredError' });
var trainExamples = 2;
modelPrime
.fit(randomNormal([trainExamples, sequenceLength, inputDims]), randomNormal([trainExamples, lstmUnits * 2]), { epochs: 2 })
.then(function (history) {
done();
})
.catch(function (err) { return done.fail(err.stack); });
})
.catch(function (err) { return done.fail(err.stack); });
})
.catch(function (err) { return done.fail(err.stack); });
});
});
//# sourceMappingURL=model_save_test.js.map

@@ -28,3 +28,3 @@ import { io, Scalar, serialization, Tensor } from '@tensorflow/tfjs-core';

call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
build(inputShape?: Shape): void;
build(inputShape?: Shape | Shape[]): void;
setWeights(weights: Tensor[]): void;

@@ -31,0 +31,0 @@ updatable: boolean;

@@ -263,2 +263,3 @@ var __extends = (this && this.__extends) || (function () {

Sequential.prototype.build = function (inputShape) {
generic_utils.getExactlyOneShape(inputShape);
if (this.inputs.length === 0 || this.outputs.length === 0) {

@@ -265,0 +266,0 @@ throw new TypeError('Sequential model cannot be built: model is empty.' +

@@ -1,2 +0,2 @@

declare const version = "0.6.2";
declare const version = "0.6.3";
export { version };

@@ -1,3 +0,3 @@

var version = '0.6.2';
var version = '0.6.3';
export { version };
//# sourceMappingURL=version.js.map

@@ -345,2 +345,3 @@ "use strict";

K.scalarTimesArray(tfjs_core_1.div(K.getScalar(1), K.getScalar(_this.seen)), _this.totals[key]);
_this.totals[key].dispose();
tfjs_core_1.keep(logs[key]);

@@ -347,0 +348,0 @@ });

@@ -77,2 +77,3 @@ import { io, Optimizer, Scalar, Tensor, Tensor1D } from '@tensorflow/tfjs-core';

history: History;
stopTraining: boolean;
metrics: string[] | {

@@ -79,0 +80,0 @@ [outputName: string]: string;

@@ -643,3 +643,3 @@ "use strict";

var _this = this;
var doValidation, numTrainSamples, indexArray, callbackList, _loop_4, epoch;
var doValidation, numTrainSamples, indexArray, callbackList, _loop_4, this_1, epoch, state_1;
return __generator(this, function (_a) {

@@ -698,4 +698,5 @@ switch (_a.label) {

_a.sent();
this.stopTraining = false;
_loop_4 = function (epoch) {
var epochLogs, epochIndexArray1D_1, batches_1, _loop_5, batchIndex;
var epochLogs, epochIndexArray1D_1, batches_1, _loop_5, batchIndex, state_2;
return __generator(this, function (_a) {

@@ -757,2 +758,5 @@ switch (_a.label) {

callbacks_1.disposeTensorsInLogs(batchLogs);
if (this_1.stopTraining) {
return [2, "break"];
}
return [2];

@@ -768,3 +772,5 @@ }

case 4:
_a.sent();
state_2 = _a.sent();
if (state_2 === "break")
return [3, 6];
_a.label = 5;

@@ -780,2 +786,5 @@ case 5:

_a.sent();
if (this_1.stopTraining) {
return [2, "break"];
}
return [2];

@@ -785,2 +794,3 @@ }

};
this_1 = this;
epoch = initialEpoch;

@@ -792,3 +802,5 @@ _a.label = 2;

case 3:
_a.sent();
state_1 = _a.sent();
if (state_1 === "break")
return [3, 5];
_a.label = 4;

@@ -898,103 +910,116 @@ case 4:

var _this = this;
var batchSize, standardizedOuts, inputs, targets, doValidation, valX, valY, valIns, valStandardized, splitAt, originalBatchSize, ins, trainFunction, outLabels, valFunction, callbackMetrics, callbacks;
var batchSize, standardizedOuts, inputs, targets, doValidation, valX, valY, valIns, needValidationDisposal, valStandardized, splitAt, originalBatchSize, ins, trainFunction, outLabels, valFunction, callbackMetrics, callbacks, out;
return __generator(this, function (_a) {
batchSize = config.batchSize == null ? 32 : config.batchSize;
standardizedOuts = this.standardizeUserData(x, y, false, batchSize);
inputs = standardizedOuts[0];
targets = standardizedOuts[1];
doValidation = false;
if (config.validationData != null && config.validationData.length > 0) {
doValidation = true;
if (config.validationData.length === 2) {
valX = config.validationData[0];
valY = config.validationData[1];
}
else if (config.validationData.length === 3) {
throw new errors_1.NotImplementedError('validationData including sample weights is not supported yet.');
}
else {
throw new errors_1.ValueError("When passing validation data, it must contain 2 (valX, valY) " +
"or 3 (valX, valY, valSampleWeight) items; " +
(config.validationData + " is invalid."));
}
valStandardized = this.standardizeUserData(valX, valY, true, batchSize);
valX = valStandardized[0];
valY = valStandardized[1];
valIns = valX.concat(valY);
}
else if (config.validationSplit != null && config.validationSplit > 0 &&
config.validationSplit < 1) {
doValidation = true;
splitAt = Math.floor(inputs[0].shape[0] * (1 - config.validationSplit));
originalBatchSize = inputs[0].shape[0];
valX = sliceArrays(inputs, splitAt, originalBatchSize);
inputs = sliceArrays(inputs, 0, splitAt);
valY = sliceArrays(targets, splitAt, originalBatchSize);
targets = sliceArrays(targets, 0, splitAt);
valIns = valX.concat(valY);
}
else if (config.validationSteps != null) {
doValidation = true;
}
ins = inputs.concat(targets);
this.checkTrainableWeightsConsistency();
trainFunction = function (data) {
var losses = [];
var lossValues = [];
var inputs = data.slice(0, _this.inputs.length);
var targets = data.slice(_this.inputs.length, _this.inputs.length + _this.outputs.length);
var metricsValues = [];
var totalLossFunction = function () {
var feeds = [];
for (var i = 0; i < _this.inputs.length; ++i) {
feeds.push({ key: _this.inputs[i], value: inputs[i] });
}
var feedDict = new executor_1.FeedDict(feeds);
var outputs = executor_1.execute(_this.outputs, feedDict, { 'training': true });
var totalLoss;
for (var i = 0; i < _this.lossFunctions.length; ++i) {
var lossFunction = _this.lossFunctions[i];
var loss = lossFunction(targets[i], outputs[i]);
losses.push(loss);
var meanLoss = tfc.mean(loss);
lossValues.push(meanLoss);
if (i === 0) {
totalLoss = loss;
switch (_a.label) {
case 0:
batchSize = config.batchSize == null ? 32 : config.batchSize;
standardizedOuts = this.standardizeUserData(x, y, false, batchSize);
inputs = standardizedOuts[0];
targets = standardizedOuts[1];
doValidation = false;
needValidationDisposal = false;
if (config.validationData != null && config.validationData.length > 0) {
doValidation = true;
if (config.validationData.length === 2) {
valX = config.validationData[0];
valY = config.validationData[1];
}
else if (config.validationData.length === 3) {
throw new errors_1.NotImplementedError('validationData including sample weights is not supported yet.');
}
else {
totalLoss = tfc.add(totalLoss, loss);
throw new errors_1.ValueError("When passing validation data, it must contain 2 (valX, valY) " +
"or 3 (valX, valY, valSampleWeight) items; " +
(config.validationData + " is invalid."));
}
valStandardized = this.standardizeUserData(valX, valY, true, batchSize);
valX = valStandardized[0];
valY = valStandardized[1];
valIns = valX.concat(valY);
}
for (var i = 0; i < _this.metricsTensors.length; ++i) {
var metric = _this.metricsTensors[i][0];
var outputIndex = _this.metricsTensors[i][1];
var meanMetric = tfc.mean(metric(targets[outputIndex], outputs[outputIndex]));
tfc.keep(meanMetric);
metricsValues.push(meanMetric);
else if (config.validationSplit != null && config.validationSplit > 0 &&
config.validationSplit < 1) {
doValidation = true;
splitAt = Math.floor(inputs[0].shape[0] * (1 - config.validationSplit));
originalBatchSize = inputs[0].shape[0];
valX = sliceArrays(inputs, splitAt, originalBatchSize);
inputs = sliceArrays(inputs, 0, splitAt);
valY = sliceArrays(targets, splitAt, originalBatchSize);
targets = sliceArrays(targets, 0, splitAt);
needValidationDisposal = true;
valIns = valX.concat(valY);
}
totalLoss = tfc.mean(totalLoss);
_this.calculateLosses().forEach(function (regularizerLoss) {
totalLoss = tfc.add(totalLoss, regularizerLoss);
});
return totalLoss;
};
var variables = _this.collectedTrainableWeights.map(function (param) { return param.read(); });
var returnCost = true;
var totalLossValue = _this.optimizer.minimize(totalLossFunction, returnCost, variables);
return [totalLossValue].concat(metricsValues);
};
outLabels = this.getDedupedMetricsNames();
if (doValidation) {
this.makeTestFunction();
valFunction = this.testFunction;
callbackMetrics =
outLabels.slice().concat(outLabels.map(function (n) { return 'val_' + n; }));
else if (config.validationSteps != null) {
doValidation = true;
}
ins = inputs.concat(targets);
this.checkTrainableWeightsConsistency();
trainFunction = function (data) {
var losses = [];
var lossValues = [];
var inputs = data.slice(0, _this.inputs.length);
var targets = data.slice(_this.inputs.length, _this.inputs.length + _this.outputs.length);
var metricsValues = [];
var totalLossFunction = function () {
var feeds = [];
for (var i = 0; i < _this.inputs.length; ++i) {
feeds.push({ key: _this.inputs[i], value: inputs[i] });
}
var feedDict = new executor_1.FeedDict(feeds);
var outputs = executor_1.execute(_this.outputs, feedDict, { 'training': true });
var totalLoss;
for (var i = 0; i < _this.lossFunctions.length; ++i) {
var lossFunction = _this.lossFunctions[i];
var loss = lossFunction(targets[i], outputs[i]);
losses.push(loss);
var meanLoss = tfc.mean(loss);
lossValues.push(meanLoss);
if (i === 0) {
totalLoss = loss;
}
else {
totalLoss = tfc.add(totalLoss, loss);
}
}
for (var i = 0; i < _this.metricsTensors.length; ++i) {
var metric = _this.metricsTensors[i][0];
var outputIndex = _this.metricsTensors[i][1];
var meanMetric = tfc.mean(metric(targets[outputIndex], outputs[outputIndex]));
tfc.keep(meanMetric);
metricsValues.push(meanMetric);
}
totalLoss = tfc.mean(totalLoss);
_this.calculateLosses().forEach(function (regularizerLoss) {
totalLoss = tfc.add(totalLoss, regularizerLoss);
});
return totalLoss;
};
var variables = _this.collectedTrainableWeights.map(function (param) { return param.read(); });
var returnCost = true;
var totalLossValue = _this.optimizer.minimize(totalLossFunction, returnCost, variables);
return [totalLossValue].concat(metricsValues);
};
outLabels = this.getDedupedMetricsNames();
if (doValidation) {
this.makeTestFunction();
valFunction = this.testFunction;
callbackMetrics =
outLabels.slice().concat(outLabels.map(function (n) { return 'val_' + n; }));
}
else {
valFunction = null;
valIns = [];
callbackMetrics = outLabels.slice();
}
callbacks = callbacks_1.standardizeCallbacks(config.callbacks);
return [4, this.fitLoop(trainFunction, ins, outLabels, batchSize, config.epochs, config.verbose, callbacks, valFunction, valIns, config.shuffle, callbackMetrics, null, null, null)];
case 1:
out = _a.sent();
if (needValidationDisposal) {
valIns.forEach(function (tensor) { return tensor.dispose(); });
inputs.forEach(function (tensor) { return tensor.dispose(); });
targets.forEach(function (tensor) { return tensor.dispose(); });
}
return [2, out];
}
else {
valFunction = null;
valIns = [];
callbackMetrics = outLabels.slice();
}
callbacks = callbacks_1.standardizeCallbacks(config.callbacks);
return [2, this.fitLoop(trainFunction, ins, outLabels, batchSize, config.epochs, config.verbose, callbacks, valFunction, valIns, config.shuffle, callbackMetrics, null, null, null)];
});

@@ -1001,0 +1026,0 @@ });

@@ -7,3 +7,3 @@ import { io, Tensor } from '@tensorflow/tfjs-core';

import { ELULayerConfig, LeakyReLULayerConfig, SoftmaxLayerConfig, ThresholdedReLULayerConfig } from './layers/advanced_activations';
import { ConvLayerConfig, Cropping2DLayerConfig, SeparableConvLayerConfig } from './layers/convolutional';
import { ConvLayerConfig, Cropping2DLayerConfig, SeparableConvLayerConfig, UpSampling2DLayerConfig } from './layers/convolutional';
import { DepthwiseConv2DLayerConfig } from './layers/convolutional_depthwise';

@@ -42,2 +42,3 @@ import { ActivationLayerConfig, DenseLayerConfig, DropoutLayerConfig, RepeatVectorLayerConfig, ReshapeLayerConfig } from './layers/core';

static cropping2D(config: Cropping2DLayerConfig): Layer;
static upSampling2d(config: UpSampling2DLayerConfig): Layer;
static depthwiseConv2d(config: DepthwiseConv2DLayerConfig): Layer;

@@ -44,0 +45,0 @@ static activation(config: ActivationLayerConfig): Layer;

@@ -101,2 +101,5 @@ "use strict";

};
LayerExports.upSampling2d = function (config) {
return new convolutional_1.UpSampling2D(config);
};
LayerExports.depthwiseConv2d = function (config) {

@@ -315,2 +318,11 @@ return new convolutional_depthwise_1.DepthwiseConv2D(config);

namespace: 'layers',
useDocsFrom: 'UpSampling2D',
configParamIndices: [0]
})
], LayerExports, "upSampling2d", null);
__decorate([
tfjs_core_1.doc({
heading: 'Layers',
subheading: 'Convolutional',
namespace: 'layers',
useDocsFrom: 'DepthwiseConv2D',

@@ -317,0 +329,0 @@ configParamIndices: [0]

@@ -123,1 +123,15 @@ import { serialization, Tensor } from '@tensorflow/tfjs-core';

}
export interface UpSampling2DLayerConfig extends LayerConfig {
size?: number[];
dataFormat?: DataFormat;
}
export declare class UpSampling2D extends Layer {
static className: string;
protected readonly DEFAULT_SIZE: number[];
protected readonly size: number[];
protected readonly dataFormat: DataFormat;
constructor(config: UpSampling2DLayerConfig);
computeOutputShape(inputShape: Shape): Shape;
call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
getConfig(): serialization.ConfigDict;
}

@@ -596,2 +596,55 @@ "use strict";

tfjs_core_1.serialization.SerializationMap.register(Cropping2D);
var UpSampling2D = (function (_super) {
__extends(UpSampling2D, _super);
function UpSampling2D(config) {
var _this = _super.call(this, config) || this;
_this.DEFAULT_SIZE = [2, 2];
_this.inputSpec = [{ ndim: 4 }];
_this.size = config.size === undefined ? _this.DEFAULT_SIZE : config.size;
_this.dataFormat =
config.dataFormat === undefined ? 'channelsLast' : config.dataFormat;
return _this;
}
UpSampling2D.prototype.computeOutputShape = function (inputShape) {
if (this.dataFormat === 'channelsFirst') {
var height = this.size[0] * inputShape[2];
var width = this.size[1] * inputShape[3];
return [inputShape[0], inputShape[1], height, width];
}
else {
var height = this.size[0] * inputShape[1];
var width = this.size[1] * inputShape[2];
return [inputShape[0], height, width, inputShape[3]];
}
};
UpSampling2D.prototype.call = function (inputs, kwargs) {
var _this = this;
return tfc.tidy(function () {
var input = generic_utils.getExactlyOneTensor(inputs);
var inputShape = input.shape;
if (_this.dataFormat === 'channelsFirst') {
input = tfc.transpose(input, [0, 2, 3, 1]);
var height = _this.size[0] * inputShape[2];
var width = _this.size[1] * inputShape[3];
var resized = input.resizeNearestNeighbor([height, width]);
return tfc.transpose(resized, [0, 3, 1, 2]);
}
else {
var height = _this.size[0] * inputShape[1];
var width = _this.size[1] * inputShape[2];
return input.resizeNearestNeighbor([height, width]);
}
});
};
UpSampling2D.prototype.getConfig = function () {
var config = { size: this.size, dataFormat: this.dataFormat };
var baseConfig = _super.prototype.getConfig.call(this);
Object.assign(config, baseConfig);
return config;
};
UpSampling2D.className = 'UpSampling2D';
return UpSampling2D;
}(topology_1.Layer));
exports.UpSampling2D = UpSampling2D;
tfjs_core_1.serialization.SerializationMap.register(UpSampling2D);
//# sourceMappingURL=convolutional.js.map

@@ -160,4 +160,5 @@ "use strict";

var _this = _super.call(this, config) || this;
_this.forwardLayer = config.layer;
var layerConfig = config.layer.getConfig();
_this.forwardLayer =
serialization_1.deserialize({ className: config.layer.getClassName(), config: layerConfig });
layerConfig['goBackwards'] =

@@ -164,0 +165,0 @@ layerConfig['goBackwards'] === true ? false : true;

@@ -28,3 +28,3 @@ import { io, Scalar, serialization, Tensor } from '@tensorflow/tfjs-core';

call(inputs: Tensor | Tensor[], kwargs: Kwargs): Tensor | Tensor[];
build(inputShape?: Shape): void;
build(inputShape?: Shape | Shape[]): void;
setWeights(weights: Tensor[]): void;

@@ -31,0 +31,0 @@ updatable: boolean;

@@ -269,2 +269,3 @@ "use strict";

Sequential.prototype.build = function (inputShape) {
generic_utils.getExactlyOneShape(inputShape);
if (this.inputs.length === 0 || this.outputs.length === 0) {

@@ -271,0 +272,0 @@ throw new TypeError('Sequential model cannot be built: model is empty.' +

@@ -1,2 +0,2 @@

declare const version = "0.6.2";
declare const version = "0.6.3";
export { version };
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var version = '0.6.2';
var version = '0.6.3';
exports.version = version;
//# sourceMappingURL=version.js.map
{
"name": "@tensorflow/tfjs-layers",
"version": "0.6.2",
"version": "0.6.3",
"description": "TensorFlow layers API in JavaScript",

@@ -11,3 +11,3 @@ "private": false,

"devDependencies": {
"@tensorflow/tfjs-core": "0.11.1",
"@tensorflow/tfjs-core": "0.11.4",
"@types/jasmine": "~2.5.53",

@@ -54,4 +54,4 @@ "browserify": "~16.1.0",

"peerDependencies": {
"@tensorflow/tfjs-core": "0.11.1"
"@tensorflow/tfjs-core": "0.11.4"
}
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc