Socket
Socket
Sign inDemoInstall

@tensorflow/tfjs

Package Overview
Dependencies
Maintainers
12
Versions
159
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@tensorflow/tfjs - npm Package Compare versions

Comparing version 0.13.2 to 0.13.3

.yalc/@tensorflow/tfjs-layers/dist/engine/dataset_fakes.d.ts

8

.yalc/@tensorflow/tfjs-layers/dist/base_callbacks.d.ts
import { Tensor } from '@tensorflow/tfjs-core';
import { Container } from './engine/container';
import { Logs, UnresolvedLogs } from './logs';
export declare enum ModelLoggingVerbosity {
SILENT = 0,
VERBOSE = 1,
}
export declare type Params = {

@@ -104,1 +108,5 @@ [key: string]: number | string | boolean | number[] | string[] | boolean[];

}
export declare function configureCallbacks(callbacks: BaseCallback[], yieldEvery: YieldEveryOptions, verbose: ModelLoggingVerbosity, epochs: number, initialEpoch: number, numTrainSamples: number, stepsPerEpoch: number, batchSize: number, doValidation: boolean, callbackMetrics: string[]): {
callbackList: CallbackList;
history: History;
};

47

.yalc/@tensorflow/tfjs-layers/dist/base_callbacks.js

@@ -53,2 +53,7 @@ "use strict";

var generic_utils = require("./utils/generic_utils");
var ModelLoggingVerbosity;
(function (ModelLoggingVerbosity) {
ModelLoggingVerbosity[ModelLoggingVerbosity["SILENT"] = 0] = "SILENT";
ModelLoggingVerbosity[ModelLoggingVerbosity["VERBOSE"] = 1] = "VERBOSE";
})(ModelLoggingVerbosity = exports.ModelLoggingVerbosity || (exports.ModelLoggingVerbosity = {}));
var BaseCallback = (function () {

@@ -207,15 +212,18 @@ function BaseCallback() {

}
return [4, logs_1.resolveScalarsInLogs(logs)];
case 1:
_b.sent();
_i = 0, _a = this.callbacks;
_b.label = 1;
case 1:
if (!(_i < _a.length)) return [3, 4];
_b.label = 2;
case 2:
if (!(_i < _a.length)) return [3, 5];
callback = _a[_i];
return [4, callback.onBatchEnd(batch, logs)];
case 2:
case 3:
_b.sent();
_b.label = 3;
case 3:
_b.label = 4;
case 4:
_i++;
return [3, 1];
case 4: return [2];
return [3, 2];
case 5: return [2];
}

@@ -743,2 +751,25 @@ });

exports.CallbackConstructorRegistry = CallbackConstructorRegistry;
function configureCallbacks(callbacks, yieldEvery, verbose, epochs, initialEpoch, numTrainSamples, stepsPerEpoch, batchSize, doValidation, callbackMetrics) {
var history = new History();
var actualCallbacks = [
new BaseLogger(yieldEvery)
].concat(CallbackConstructorRegistry.createCallbacks(verbose));
if (callbacks != null) {
actualCallbacks.push.apply(actualCallbacks, callbacks);
}
actualCallbacks.push(history);
var callbackList = new CallbackList(actualCallbacks);
callbackList.setParams({
epochs: epochs,
initialEpoch: initialEpoch,
samples: numTrainSamples,
steps: stepsPerEpoch,
batchSize: batchSize,
verbose: verbose,
doValidation: doValidation,
metrics: callbackMetrics,
});
return { callbackList: callbackList, history: history };
}
exports.configureCallbacks = configureCallbacks;
//# sourceMappingURL=base_callbacks.js.map

16

.yalc/@tensorflow/tfjs-layers/dist/engine/dataset_stub.d.ts
import * as tfc from '@tensorflow/tfjs-core';
import { TensorContainer } from '@tensorflow/tfjs-core/dist/tensor_types';
import { Shape } from '../types';
export declare abstract class LazyIterator<T> {

@@ -10,12 +9,2 @@ abstract next(): Promise<IteratorResult<T>>;

}
export interface FakeDatasetConfig {
xShape: Shape | {
[name: string]: Shape;
};
yShape: Shape | {
[name: string]: Shape;
};
batchSize: number;
numBatches: number;
}
export declare type TensorMap = {

@@ -25,6 +14,1 @@ [name: string]: tfc.Tensor;

export declare type TensorOrTensorMap = tfc.Tensor | TensorMap;
export declare class FakeNumericDataset extends Dataset<[TensorOrTensorMap, TensorOrTensorMap]> {
readonly config: FakeDatasetConfig;
constructor(config: FakeDatasetConfig);
iterator(): Promise<LazyIterator<[TensorOrTensorMap, TensorOrTensorMap]>>;
}
"use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = y[op[0] & 2 ? "return" : op[0] ? "throw" : "next"]) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [0, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
var tfc = require("@tensorflow/tfjs-core");
var LazyIterator = (function () {

@@ -61,74 +15,2 @@ function LazyIterator() {

exports.Dataset = Dataset;
function mergeBatchSizeAndShape(batchSize, shape) {
if (Array.isArray(shape)) {
return [batchSize].concat(shape);
}
else {
var output = {};
for (var name_1 in shape) {
output[name_1] = [batchSize].concat(shape[name_1]);
}
return output;
}
}
function generateRandomTensorContainer(shape) {
var output;
if (Array.isArray(shape)) {
output = tfc.randomNormal(shape);
}
else {
output = {};
for (var name_2 in shape) {
output[name_2] = tfc.randomNormal(shape[name_2]);
}
}
return output;
}
var FakeNumericIterator = (function (_super) {
__extends(FakeNumericIterator, _super);
function FakeNumericIterator(config) {
var _this = _super.call(this) || this;
_this.xBatchShape = mergeBatchSizeAndShape(config.batchSize, config.xShape);
_this.yBatchShape = mergeBatchSizeAndShape(config.batchSize, config.yShape);
_this.numBatches = config.numBatches;
_this.batchCount = 0;
return _this;
}
FakeNumericIterator.prototype.next = function () {
return __awaiter(this, void 0, void 0, function () {
var done;
return __generator(this, function (_a) {
done = ++this.batchCount > this.numBatches;
return [2, {
done: done,
value: done ? null :
[
generateRandomTensorContainer(this.xBatchShape),
generateRandomTensorContainer(this.yBatchShape)
]
}];
});
});
};
return FakeNumericIterator;
}(LazyIterator));
var FakeNumericDataset = (function (_super) {
__extends(FakeNumericDataset, _super);
function FakeNumericDataset(config) {
var _this = _super.call(this) || this;
_this.config = config;
tfc.util.assert(config.batchSize > 0 && Number.isInteger(config.batchSize), "batchSize must be a positive integer, but got " + config.batchSize);
tfc.util.assert(config.numBatches > 0 && Number.isInteger(config.numBatches), "numBatches must be positive integer, but got " + config.numBatches);
return _this;
}
FakeNumericDataset.prototype.iterator = function () {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2, new FakeNumericIterator(this.config)];
});
});
};
return FakeNumericDataset;
}(Dataset));
exports.FakeNumericDataset = FakeNumericDataset;
//# sourceMappingURL=dataset_stub.js.map
import { DataType, serialization, Tensor } from '@tensorflow/tfjs-core';
import { Kwargs, Shape } from '../types';
import { Layer, SymbolicTensor, DisposeResult } from './topology';
import { DisposeResult, Layer, SymbolicTensor } from './topology';
export interface InputLayerConfig {

@@ -5,0 +5,0 @@ inputShape?: Shape;

@@ -79,6 +79,3 @@ "use strict";

InputLayer.prototype.dispose = function () {
return {
refCountAfterDispose: this._refCount,
numDisposedVariables: 0
};
return { refCountAfterDispose: this._refCount, numDisposedVariables: 0 };
};

@@ -85,0 +82,0 @@ InputLayer.prototype.getConfig = function () {

@@ -688,6 +688,3 @@ "use strict";

}
return {
refCountAfterDispose: this._refCount,
numDisposedVariables: numDisposedVariables
};
return { refCountAfterDispose: this._refCount, numDisposedVariables: numDisposedVariables };
};

@@ -694,0 +691,0 @@ return Layer;

import * as tfc from '@tensorflow/tfjs-core';
import { io, ModelPredictConfig, Optimizer, Scalar, Tensor, Tensor1D } from '@tensorflow/tfjs-core';
import { BaseCallback, CustomCallbackConfig, History, YieldEveryOptions } from '../base_callbacks';
import { io, ModelPredictConfig, Optimizer, Scalar, Tensor } from '@tensorflow/tfjs-core';
import { TensorContainer } from '@tensorflow/tfjs-core/dist/tensor_types';
import { History, ModelLoggingVerbosity } from '../base_callbacks';
import { LossOrMetricFn, NamedTensorMap, Shape } from '../types';
import { Container, ContainerConfig } from './container';
import { Dataset } from './dataset_stub';
import { ModelEvaluateDatasetConfig, ModelFitDatasetConfig } from './training_dataset';
import { ModelFitConfig } from './training_tensors';
export declare function isDataTensor(x: Tensor | Tensor[] | {

@@ -21,8 +25,2 @@ [inputName: string]: Tensor;

export declare function checkArrayLengths(inputs: Tensor[], targets: Tensor[], weights?: Tensor[]): void;
export declare function makeBatches(size: number, batchSize: number): Array<[number, number]>;
export declare function sliceArraysByIndices(arrays: Tensor | Tensor[], indices: Tensor1D): Tensor | Tensor[];
export declare enum ModelLoggingVerbosity {
SILENT = 0,
VERBOSE = 1,
}
export interface ModelEvaluateConfig {

@@ -34,19 +32,2 @@ batchSize?: number;

}
export interface ModelFitConfig {
batchSize?: number;
epochs?: number;
verbose?: ModelLoggingVerbosity;
callbacks?: BaseCallback[] | CustomCallbackConfig | CustomCallbackConfig[];
validationSplit?: number;
validationData?: [Tensor | Tensor[], Tensor | Tensor[]] | [Tensor | Tensor[], Tensor | Tensor[], Tensor | Tensor[]];
shuffle?: boolean;
classWeight?: {
[classIndex: string]: number;
};
sampleWeight?: Tensor;
initialEpoch?: number;
stepsPerEpoch?: number;
validationSteps?: number;
yieldEvery?: YieldEveryOptions;
}
export interface ModelCompileConfig {

@@ -77,4 +58,4 @@ optimizer: string | Optimizer;

history: History;
private stopTraining_;
private isTraining;
protected stopTraining_: boolean;
protected isTraining: boolean;
metrics: string[] | {

@@ -88,4 +69,5 @@ [outputName: string]: string;

compile(config: ModelCompileConfig): void;
private checkTrainableWeightsConsistency();
protected checkTrainableWeightsConsistency(): void;
evaluate(x: Tensor | Tensor[], y: Tensor | Tensor[], config?: ModelEvaluateConfig): Scalar | Scalar[];
evaluateDataset<T extends TensorContainer>(dataset: Dataset<T>, config: ModelEvaluateDatasetConfig): Promise<Scalar | Scalar[]>;
private checkNumSamples(ins, batchSize?, steps?, stepsName?);

@@ -102,5 +84,5 @@ execute(inputs: Tensor | Tensor[] | NamedTensorMap, outputs: string | string[]): Tensor | Tensor[];

}, checkBatchAxis?: boolean, batchSize?: number): [Tensor[], Tensor[], Tensor[]];
private fitLoop(f, ins, outLabels?, batchSize?, epochs?, verbose?, callbacks?, valF?, valIns?, shuffle?, callbackMetrics?, initialEpoch?, stepsPerEpoch?, validationSteps?, yieldEvery?);
private testLoop(f, ins, batchSize?, verbose?, steps?);
private getDedupedMetricsNames();
protected getDedupedMetricsNames(): string[];
protected makeTrainFunction(): (data: Tensor[]) => Scalar[];
private makeTestFunction();

@@ -112,2 +94,3 @@ fit(x: Tensor | Tensor[] | {

}, config?: ModelFitConfig): Promise<History>;
fitDataset<T extends TensorContainer>(dataset: Dataset<T>, config: ModelFitDatasetConfig<T>): Promise<History>;
protected getNamedWeights(config?: io.SaveConfig): NamedTensorMap;

@@ -114,0 +97,0 @@ stopTraining: boolean;

@@ -52,6 +52,4 @@ "use strict";

var K = require("../backend/tfjs_backend");
var base_callbacks_1 = require("../base_callbacks");
var common_1 = require("../common");
var errors_1 = require("../errors");
var logs_1 = require("../logs");
var losses = require("../losses");

@@ -65,2 +63,4 @@ var Metrics = require("../metrics");

var executor_1 = require("./executor");
var training_dataset_1 = require("./training_dataset");
var training_tensors_1 = require("./training_tensors");
function isDataTensor(x) {

@@ -230,42 +230,2 @@ return x instanceof tfjs_core_1.Tensor;

}
function makeBatches(size, batchSize) {
var output = [];
var batchStart = 0;
var batchEnd = null;
while (batchStart < size) {
batchEnd = batchStart + batchSize;
if (batchEnd >= size) {
batchEnd = size;
}
output.push([batchStart, batchEnd]);
batchStart = batchEnd;
}
return output;
}
exports.makeBatches = makeBatches;
function sliceArrays(arrays, start, stop) {
if (arrays == null) {
return [null];
}
else if (Array.isArray(arrays)) {
return arrays.map(function (array) { return K.sliceAlongFirstAxis(array, start, stop - start); });
}
else {
return K.sliceAlongFirstAxis(arrays, start, stop - start);
}
}
function sliceArraysByIndices(arrays, indices) {
return tfc.tidy(function () {
if (arrays == null) {
return null;
}
else if (Array.isArray(arrays)) {
return arrays.map(function (array) { return sliceArraysByIndices(array, indices); });
}
else {
return K.gather(arrays, indices.dtype === 'int32' ? indices : indices.toInt());
}
});
}
exports.sliceArraysByIndices = sliceArraysByIndices;
function checkInputData(data, names, shapes, checkBatchAxis, exceptionPrefix) {

@@ -344,10 +304,2 @@ if (checkBatchAxis === void 0) { checkBatchAxis = true; }

}
var ModelLoggingVerbosity;
(function (ModelLoggingVerbosity) {
ModelLoggingVerbosity[ModelLoggingVerbosity["SILENT"] = 0] = "SILENT";
ModelLoggingVerbosity[ModelLoggingVerbosity["VERBOSE"] = 1] = "VERBOSE";
})(ModelLoggingVerbosity = exports.ModelLoggingVerbosity || (exports.ModelLoggingVerbosity = {}));
function checkBatchSize(batchSize) {
tfc.util.assert(batchSize > 0 && Number.isInteger(batchSize), "batchSize is required to be a positive integer, but got " + batchSize);
}
var Model = (function (_super) {

@@ -542,3 +494,3 @@ __extends(Model, _super);

var batchSize = config.batchSize == null ? 32 : config.batchSize;
checkBatchSize(batchSize);
training_tensors_1.checkBatchSize(batchSize);
var standardizedOuts = this.standardizeUserData(x, y, true, batchSize);

@@ -551,2 +503,10 @@ var ins = standardizedOuts[0].concat(standardizedOuts[1]);

};
Model.prototype.evaluateDataset = function (dataset, config) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
this.makeTestFunction();
return [2, training_dataset_1.evaluateDataset(this, dataset, config)];
});
});
};
Model.prototype.checkNumSamples = function (ins, batchSize, steps, stepsName) {

@@ -655,3 +615,3 @@ if (stepsName === void 0) { stepsName = 'steps'; }

}
var batches = makeBatches(numSamples, batchSize);
var batches = training_tensors_1.makeBatches(numSamples, batchSize);
var outs = [];

@@ -662,3 +622,3 @@ var _loop_3 = function (batchIndex) {

var batchEnd = batches[batchIndex][1];
var insBatch = sliceArrays(ins, batchStart, batchEnd);
var insBatch = training_tensors_1.sliceArrays(ins, batchStart, batchEnd);
var feeds = [];

@@ -698,3 +658,3 @@ if (Array.isArray(insBatch)) {

var batchSize = config.batchSize == null ? 32 : config.batchSize;
checkBatchSize(batchSize);
training_tensors_1.checkBatchSize(batchSize);
return this.predictLoop(x, batchSize);

@@ -736,179 +696,2 @@ };

};
Model.prototype.fitLoop = function (f, ins, outLabels, batchSize, epochs, verbose, callbacks, valF, valIns, shuffle, callbackMetrics, initialEpoch, stepsPerEpoch, validationSteps, yieldEvery) {
return __awaiter(this, void 0, void 0, function () {
var _this = this;
var doValidation, numTrainSamples, indexArray, actualCallbacks, callbackList, _loop_4, this_1, epoch, state_2;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (batchSize == null) {
batchSize = 32;
}
if (epochs == null) {
epochs = 1;
}
if (shuffle == null) {
shuffle = true;
}
if (initialEpoch == null) {
initialEpoch = 0;
}
doValidation = false;
if (valF != null && valIns != null) {
doValidation = true;
}
if (validationSteps != null) {
doValidation = true;
if (stepsPerEpoch == null) {
throw new errors_1.ValueError('Can only use `validationSteps` when doing step-wise training, ' +
'i.e., `stepsPerEpoch` must be set.');
}
}
numTrainSamples = this.checkNumSamples(ins, batchSize, stepsPerEpoch, 'steps_per_epoch');
if (numTrainSamples != null) {
indexArray = math_utils_1.range(0, numTrainSamples);
}
if (verbose == null) {
verbose = 1;
}
this.history = new base_callbacks_1.History();
actualCallbacks = [
new base_callbacks_1.BaseLogger(yieldEvery)
].concat(base_callbacks_1.CallbackConstructorRegistry.createCallbacks(verbose));
if (callbacks != null) {
actualCallbacks.push.apply(actualCallbacks, callbacks);
}
actualCallbacks.push(this.history);
callbackList = new base_callbacks_1.CallbackList(actualCallbacks);
callbackList.setModel(this);
callbackList.setParams({
epochs: epochs,
initialEpoch: initialEpoch,
samples: numTrainSamples,
steps: stepsPerEpoch,
batchSize: batchSize,
verbose: verbose,
doValidation: doValidation,
metrics: callbackMetrics,
});
return [4, callbackList.onTrainBegin()];
case 1:
_a.sent();
this.stopTraining_ = false;
_loop_4 = function (epoch) {
var epochLogs, epochIndexArray1D_1, batches_1, _loop_5, batchIndex, state_3;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4, callbackList.onEpochBegin(epoch)];
case 1:
_a.sent();
epochLogs = {};
if (!(stepsPerEpoch != null)) return [3, 2];
throw new errors_1.NotImplementedError('stepsPerEpoch mode is not implemented yet.');
case 2:
if (shuffle === 'batch') {
throw new errors_1.NotImplementedError('batch shuffling is not implemneted yet');
}
else if (shuffle) {
tfjs_core_1.util.shuffle(indexArray);
}
epochIndexArray1D_1 = tfjs_core_1.tensor1d(indexArray);
batches_1 = makeBatches(numTrainSamples, batchSize);
_loop_5 = function (batchIndex) {
var batchLogs;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
batchLogs = {};
return [4, callbackList.onBatchBegin(batchIndex, batchLogs)];
case 1:
_a.sent();
tfc.tidy(function () {
var batchStart = batches_1[batchIndex][0];
var batchEnd = batches_1[batchIndex][1];
var batchIds = K.sliceAlongFirstAxis(epochIndexArray1D_1, batchStart, batchEnd - batchStart);
batchLogs['batch'] = batchIndex;
batchLogs['size'] = batchEnd - batchStart;
var insBatch = sliceArraysByIndices(ins, batchIds);
var outs = f(insBatch);
for (var i = 0; i < outLabels.length; ++i) {
var label = outLabels[i];
var out = outs[i];
batchLogs[label] = out;
tfc.keep(out);
}
if (batchIndex === batches_1.length - 1) {
if (doValidation) {
var valOuts = _this.testLoop(valF, valIns, batchSize);
for (var i = 0; i < outLabels.length; ++i) {
var label = outLabels[i];
var out = valOuts[i];
tfc.keep(out);
epochLogs['val_' + label] = out;
}
}
}
});
return [4, callbackList.onBatchEnd(batchIndex, batchLogs)];
case 2:
_a.sent();
logs_1.disposeTensorsInLogs(batchLogs);
if (this_1.stopTraining_) {
return [2, "break"];
}
return [2];
}
});
};
batchIndex = 0;
_a.label = 3;
case 3:
if (!(batchIndex < batches_1.length)) return [3, 6];
return [5, _loop_5(batchIndex)];
case 4:
state_3 = _a.sent();
if (state_3 === "break")
return [3, 6];
_a.label = 5;
case 5:
++batchIndex;
return [3, 3];
case 6:
epochIndexArray1D_1.dispose();
_a.label = 7;
case 7: return [4, callbackList.onEpochEnd(epoch, epochLogs)];
case 8:
_a.sent();
if (this_1.stopTraining_) {
return [2, "break"];
}
return [2];
}
});
};
this_1 = this;
epoch = initialEpoch;
_a.label = 2;
case 2:
if (!(epoch < epochs)) return [3, 5];
return [5, _loop_4(epoch)];
case 3:
state_2 = _a.sent();
if (state_2 === "break")
return [3, 5];
_a.label = 4;
case 4:
++epoch;
return [3, 2];
case 5: return [4, callbackList.onTrainEnd()];
case 6:
_a.sent();
return [4, this.history.syncData()];
case 7:
_a.sent();
return [2, this.history];
}
});
});
};
Model.prototype.testLoop = function (f, ins, batchSize, verbose, steps) {

@@ -920,3 +703,3 @@ var _this = this;

var outs = [];
if (verbose === 1) {
if (verbose > 0) {
throw new errors_1.NotImplementedError('Verbose mode is not implemented yet.');

@@ -928,3 +711,3 @@ }

else {
var batches = makeBatches(numSamples, batchSize);
var batches = training_tensors_1.makeBatches(numSamples, batchSize);
var indexArray = tfjs_core_1.tensor1d(math_utils_1.range(0, numSamples));

@@ -935,3 +718,3 @@ for (var batchIndex = 0; batchIndex < batches.length; ++batchIndex) {

var batchIds = K.sliceAlongFirstAxis(indexArray, batchStart, batchEnd - batchStart);
var insBatch = sliceArraysByIndices(ins, batchIds);
var insBatch = training_tensors_1.sliceArraysByIndices(ins, batchIds);
var batchOuts = f(insBatch);

@@ -970,2 +753,50 @@ if (batchIndex === 0) {

};
Model.prototype.makeTrainFunction = function () {
var _this = this;
return function (data) {
var losses = [];
var lossValues = [];
var inputs = data.slice(0, _this.inputs.length);
var targets = data.slice(_this.inputs.length, _this.inputs.length + _this.outputs.length);
var metricsValues = [];
var totalLossFunction = function () {
var feeds = [];
for (var i = 0; i < _this.inputs.length; ++i) {
feeds.push({ key: _this.inputs[i], value: inputs[i] });
}
var feedDict = new executor_1.FeedDict(feeds);
var outputs = executor_1.execute(_this.outputs, feedDict, { 'training': true });
var totalLoss;
for (var i = 0; i < _this.lossFunctions.length; ++i) {
var lossFunction = _this.lossFunctions[i];
var loss = lossFunction(targets[i], outputs[i]);
losses.push(loss);
var meanLoss = tfc.mean(loss);
lossValues.push(meanLoss);
if (i === 0) {
totalLoss = loss;
}
else {
totalLoss = tfc.add(totalLoss, loss);
}
}
for (var i = 0; i < _this.metricsTensors.length; ++i) {
var metric = _this.metricsTensors[i][0];
var outputIndex = _this.metricsTensors[i][1];
var meanMetric = tfc.mean(metric(targets[outputIndex], outputs[outputIndex]));
tfc.keep(meanMetric);
metricsValues.push(meanMetric);
}
totalLoss = tfc.mean(totalLoss);
_this.calculateLosses().forEach(function (regularizerLoss) {
totalLoss = tfc.add(totalLoss, regularizerLoss);
});
return totalLoss;
};
var variables = _this.collectedTrainableWeights.map(function (param) { return param.read(); });
var returnCost = true;
var totalLossValue = _this.optimizer.minimize(totalLossFunction, returnCost, variables);
return [totalLossValue].concat(metricsValues);
};
};
Model.prototype.makeTestFunction = function () {

@@ -1009,138 +840,14 @@ var _this = this;

return __awaiter(this, void 0, void 0, function () {
var _this = this;
var batchSize, standardizedOuts, inputs, targets, doValidation, valX, valY, valIns, needValidationDisposal, valStandardized, splitAt, originalBatchSize, ins, trainFunction, outLabels, valFunction, callbackMetrics, callbacks, out;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (this.isTraining) {
throw new Error('Cannot start training because another fit() call is ongoing.');
}
this.isTraining = true;
_a.label = 1;
case 1:
_a.trys.push([1, , 3, 4]);
batchSize = config.batchSize == null ? 32 : config.batchSize;
checkBatchSize(batchSize);
standardizedOuts = this.standardizeUserData(x, y, false, batchSize);
inputs = standardizedOuts[0];
targets = standardizedOuts[1];
doValidation = false;
valX = void 0;
valY = void 0;
valIns = void 0;
needValidationDisposal = false;
if (config.validationData != null && config.validationData.length > 0) {
doValidation = true;
if (config.validationData.length === 2) {
valX = config.validationData[0];
valY = config.validationData[1];
}
else if (config.validationData.length === 3) {
throw new errors_1.NotImplementedError('validationData including sample weights is not supported yet.');
}
else {
throw new errors_1.ValueError("When passing validation data, it must contain 2 (valX, valY) " +
"or 3 (valX, valY, valSampleWeight) items; " +
(config.validationData + " is invalid."));
}
valStandardized = this.standardizeUserData(valX, valY, true, batchSize);
valX = valStandardized[0];
valY = valStandardized[1];
valIns = valX.concat(valY);
}
else if (config.validationSplit != null && config.validationSplit > 0 &&
config.validationSplit < 1) {
doValidation = true;
splitAt = Math.floor(inputs[0].shape[0] * (1 - config.validationSplit));
originalBatchSize = inputs[0].shape[0];
valX = sliceArrays(inputs, splitAt, originalBatchSize);
inputs = sliceArrays(inputs, 0, splitAt);
valY = sliceArrays(targets, splitAt, originalBatchSize);
targets = sliceArrays(targets, 0, splitAt);
needValidationDisposal = true;
valIns = valX.concat(valY);
}
else if (config.validationSteps != null) {
doValidation = true;
}
ins = inputs.concat(targets);
this.checkTrainableWeightsConsistency();
trainFunction = function (data) {
var losses = [];
var lossValues = [];
var inputs = data.slice(0, _this.inputs.length);
var targets = data.slice(_this.inputs.length, _this.inputs.length + _this.outputs.length);
var metricsValues = [];
var totalLossFunction = function () {
var feeds = [];
for (var i = 0; i < _this.inputs.length; ++i) {
feeds.push({ key: _this.inputs[i], value: inputs[i] });
}
var feedDict = new executor_1.FeedDict(feeds);
var outputs = executor_1.execute(_this.outputs, feedDict, { 'training': true });
var totalLoss;
for (var i = 0; i < _this.lossFunctions.length; ++i) {
var lossFunction = _this.lossFunctions[i];
var loss = lossFunction(targets[i], outputs[i]);
losses.push(loss);
var meanLoss = tfc.mean(loss);
lossValues.push(meanLoss);
if (i === 0) {
totalLoss = loss;
}
else {
totalLoss = tfc.add(totalLoss, loss);
}
}
for (var i = 0; i < _this.metricsTensors.length; ++i) {
var metric = _this.metricsTensors[i][0];
var outputIndex = _this.metricsTensors[i][1];
var meanMetric = tfc.mean(metric(targets[outputIndex], outputs[outputIndex]));
tfc.keep(meanMetric);
metricsValues.push(meanMetric);
}
totalLoss = tfc.mean(totalLoss);
_this.calculateLosses().forEach(function (regularizerLoss) {
totalLoss = tfc.add(totalLoss, regularizerLoss);
});
return totalLoss;
};
var variables = _this.collectedTrainableWeights.map(function (param) { return param.read(); });
var returnCost = true;
var totalLossValue = _this.optimizer.minimize(totalLossFunction, returnCost, variables);
return [totalLossValue].concat(metricsValues);
};
outLabels = this.getDedupedMetricsNames();
valFunction = void 0;
callbackMetrics = void 0;
if (doValidation) {
this.makeTestFunction();
valFunction = this.testFunction;
callbackMetrics =
outLabels.slice().concat(outLabels.map(function (n) { return 'val_' + n; }));
}
else {
valFunction = null;
valIns = [];
callbackMetrics = outLabels.slice();
}
callbacks = base_callbacks_1.standardizeCallbacks(config.callbacks);
return [4, this.fitLoop(trainFunction, ins, outLabels, batchSize, config.epochs, config.verbose, callbacks, valFunction, valIns, config.shuffle, callbackMetrics, config.initialEpoch, null, null, config.yieldEvery)];
case 2:
out = _a.sent();
if (needValidationDisposal) {
valIns.forEach(function (tensor) { return tensor.dispose(); });
inputs.forEach(function (tensor) { return tensor.dispose(); });
targets.forEach(function (tensor) { return tensor.dispose(); });
}
this.isTraining = false;
return [2, out];
case 3:
this.isTraining = false;
return [7];
case 4: return [2];
}
return [2, training_tensors_1.fitTensors(this, x, y, config)];
});
});
};
Model.prototype.fitDataset = function (dataset, config) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2, training_dataset_1.fitDataset(this, dataset, config)];
});
});
};
Model.prototype.getNamedWeights = function (config) {

@@ -1147,0 +854,0 @@ var namedWeights = {};

@@ -9,3 +9,3 @@ import { InputLayerConfig } from './engine/input_layer';

import { EmbeddingLayerConfig } from './layers/embeddings';
import { ConcatenateLayerConfig } from './layers/merge';
import { ConcatenateLayerConfig, DotLayerConfig } from './layers/merge';
import { BatchNormalizationLayerConfig } from './layers/normalization';

@@ -43,2 +43,3 @@ import { ZeroPadding2DLayerConfig } from './layers/padding';

export declare function multiply(config?: LayerConfig): Layer;
export declare function dot(config: DotLayerConfig): Layer;
export declare function batchNormalization(config?: BatchNormalizationLayerConfig): Layer;

@@ -45,0 +46,0 @@ export declare function zeroPadding2d(config?: ZeroPadding2DLayerConfig): Layer;

@@ -129,2 +129,6 @@ "use strict";

exports.multiply = multiply;
function dot(config) {
return new merge_1.Dot(config);
}
exports.dot = dot;
function batchNormalization(config) {

@@ -131,0 +135,0 @@ return new normalization_1.BatchNormalization(config);

@@ -8,4 +8,6 @@ import * as constraints from './exports_constraints';

export { Callback } from './callbacks';
export { SymbolicTensor } from './engine/topology';
export { Model, ModelCompileConfig, ModelEvaluateConfig, ModelFitConfig } from './engine/training';
export { InputSpec, SymbolicTensor } from './engine/topology';
export { Model, ModelCompileConfig, ModelEvaluateConfig } from './engine/training';
export { ModelFitDatasetConfig } from './engine/training_dataset';
export { ModelFitConfig } from './engine/training_tensors';
export { input, loadModel, model, registerCallbackConstructor, sequential } from './exports';

@@ -12,0 +14,0 @@ export { GRUCellLayerConfig, GRULayerConfig, LSTMCellLayerConfig, LSTMLayerConfig, RNN, RNNLayerConfig, SimpleRNNCellLayerConfig, SimpleRNNLayerConfig } from './layers/recurrent';

@@ -20,2 +20,3 @@ "use strict";

var topology_1 = require("./engine/topology");
exports.InputSpec = topology_1.InputSpec;
exports.SymbolicTensor = topology_1.SymbolicTensor;

@@ -22,0 +23,0 @@ var training_1 = require("./engine/training");

@@ -12,2 +12,3 @@ import { serialization, Tensor } from '@tensorflow/tfjs-core';

computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[];
computeMask(inputs: Tensor | Tensor[], mask?: Tensor | Tensor[]): Tensor;
}

@@ -55,4 +56,21 @@ export declare class Add extends Merge {

computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[];
computeMask(inputs: Tensor | Tensor[], mask?: Tensor | Tensor[]): Tensor;
getConfig(): serialization.ConfigDict;
}
export declare function concatenate(config?: SymbolicTensor[] | Tensor[] | ConcatenateLayerConfig): Layer | SymbolicTensor | Tensor;
export interface DotLayerConfig extends LayerConfig {
axes: number | [number, number];
normalize?: boolean;
}
export declare class Dot extends Merge {
static className: string;
private axes;
private normalize;
constructor(config: DotLayerConfig);
build(inputShape: Shape | Shape[]): void;
protected mergeFunction(inputs: Tensor[]): Tensor;
private interpretAxes(shape1, shape2);
computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[];
computeMask(inputs: Tensor | Tensor[], mask?: Tensor | Tensor[]): Tensor;
getConfig(): serialization.ConfigDict;
}

@@ -19,2 +19,3 @@ "use strict";

var errors_1 = require("../errors");
var losses_1 = require("../losses");
var generic_utils = require("../utils/generic_utils");

@@ -197,2 +198,5 @@ var mathUtils = require("../utils/math_utils");

};
Merge.prototype.computeMask = function (inputs, mask) {
throw new errors_1.NotImplementedError('computeMask has not been implemented for Merge yet');
};
return Merge;

@@ -422,2 +426,5 @@ }(topology_1.Layer));

};
Concatenate.prototype.computeMask = function (inputs, mask) {
throw new errors_1.NotImplementedError('computeMask has not been implemented for Concatenate yet');
};
Concatenate.prototype.getConfig = function () {

@@ -446,2 +453,178 @@ var config = {

exports.concatenate = concatenate;
function interpretAxis(axis, dim) {
while (axis < 0) {
axis += dim;
}
return axis;
}
function batchDot(x, y, axes) {
if (x.shape.length > 3 || y.shape.length > 3) {
throw new errors_1.NotImplementedError('batchDot is not implemented for tensors of 4D or higher rank yet');
}
tfc.util.assert(x.shape.length >= 2, "batchDot requires the rank of x to be >= 2, " +
("but got " + x.shape.length));
tfc.util.assert(x.shape.length >= 2, "batchDot requires the rank of y to be >= 2, " +
("but got " + y.shape.length));
if (typeof axes === 'number') {
axes = [axes, axes];
}
if (x.dtype === 'complex64' || y.dtype === 'complex64') {
throw new errors_1.NotImplementedError('batchDot is not implemented for complex64-type Tensors yet.');
}
var xNDim = x.shape.length;
var yNDim = y.shape.length;
if (axes == null) {
axes = [xNDim - 1, yNDim - 2];
}
var axesArray = axes;
return tfc.tidy(function () {
var diff;
if (xNDim > yNDim) {
diff = xNDim - yNDim;
var diffShape = [];
for (var i = 0; i < diff; ++i) {
diffShape.push(1);
}
y = y.reshape(y.shape.concat(diffShape));
}
else if (yNDim > xNDim) {
diff = yNDim - xNDim;
var diffShape = [];
for (var i = 0; i < diff; ++i) {
diffShape.push(1);
}
x = x.reshape(x.shape.concat(diffShape));
}
else {
diff = 0;
}
var out;
if (x.shape.length === 2 && y.shape.length === 2) {
if (axesArray[0] === axesArray[1]) {
out = x.mulStrict(y).sum(axesArray[0]);
}
else {
out = x.transpose([1, 0]).mulStrict(y).sum(axesArray[1]);
}
}
else {
var adjX = axesArray[0] === x.shape.length - 1 ? null : true;
var adjY = axesArray[1] === y.shape.length - 1 ? true : null;
out = x.matMul(y, adjX, adjY);
}
if (diff > 0) {
var idx = void 0;
if (xNDim > yNDim) {
idx = xNDim + yNDim - 3;
}
else {
idx = xNDim - 1;
}
var squeezeAxes = [];
for (var i = idx; i < idx + diff; ++i) {
squeezeAxes.push(i);
}
out = out.squeeze(squeezeAxes);
}
if (out.shape.length === 1) {
out = out.expandDims(1);
}
return out;
});
}
var Dot = (function (_super) {
__extends(Dot, _super);
function Dot(config) {
var _this = _super.call(this, config) || this;
_this.axes = config.axes;
_this.normalize = config.normalize == null ? false : config.normalize;
_this.supportsMasking = true;
_this.reshapeRequired = false;
return _this;
}
Dot.prototype.build = function (inputShape) {
tfc.util.assert(Array.isArray(inputShape) && inputShape.length === 2 &&
Array.isArray(inputShape[0]) && Array.isArray(inputShape[1]), 'A `Dot` layer should be called on a list of exactly 2 inputs.');
var shape1 = inputShape[0];
var shape2 = inputShape[1];
if (shape1.length > 3 || shape2.length > 3) {
throw new errors_1.NotImplementedError('Dot layer does not support tensors of 4D or higher rank yet.');
}
var axes = this.interpretAxes(shape1, shape2);
if (shape1[axes[0]] !== shape2[axes[1]]) {
throw new errors_1.ValueError("Dimension incompatibility: " +
(shape1[axes[0]] + " !== " + shape2[axes[1]]));
}
};
Dot.prototype.mergeFunction = function (inputs) {
if (inputs.length !== 2) {
throw new errors_1.ValueError('A `Dot` layer must be called on exactly 2 inputs, ' +
("but received " + inputs.length + " input(s)."));
}
var x1 = inputs[0];
var x2 = inputs[1];
var axes;
if (!Array.isArray(this.axes)) {
axes = [
interpretAxis(this.axes, x1.shape.length),
interpretAxis(this.axes, x2.shape.length)
];
}
else {
axes = this.axes.map(function (axis, i) { return interpretAxis(axis, inputs[i].shape.length); });
}
if (this.normalize) {
x1 = losses_1.l2Normalize(x1, axes[0]);
x2 = losses_1.l2Normalize(x2, axes[1]);
}
return batchDot(x1, x2, axes);
};
Dot.prototype.interpretAxes = function (shape1, shape2) {
var axes;
if (!Array.isArray(this.axes)) {
axes = [
interpretAxis(this.axes, shape1.length),
interpretAxis(this.axes, shape2.length)
];
}
else {
axes = this.axes;
}
return axes;
};
Dot.prototype.computeOutputShape = function (inputShape) {
tfc.util.assert(Array.isArray(inputShape) && inputShape.length === 2 &&
Array.isArray(inputShape[0]) && Array.isArray(inputShape[1]), 'A `Dot` layer should be called on a list of exactly 2 inputs.');
var shape1 = inputShape[0];
var shape2 = inputShape[1];
if (shape1.length > 3 || shape2.length > 3) {
throw new errors_1.NotImplementedError('Dot layer does not support tensors of 4D or higher rank yet.');
}
var axes = this.interpretAxes(shape1, shape2);
shape1.splice(axes[0], 1);
shape2.splice(axes[1], 1);
shape2.splice(0, 1);
var outputShape = shape1.concat(shape2);
if (outputShape.length === 1) {
outputShape.push(1);
}
return outputShape;
};
Dot.prototype.computeMask = function (inputs, mask) {
throw new errors_1.NotImplementedError('computeMask has not been implemented for Dot yet');
};
Dot.prototype.getConfig = function () {
var config = {
'axes': this.axes,
'normalize': this.normalize
};
var baseConfig = _super.prototype.getConfig.call(this);
Object.assign(config, baseConfig);
return config;
};
Dot.className = 'Dot';
return Dot;
}(Merge));
exports.Dot = Dot;
tfjs_core_1.serialization.registerClass(Dot);
//# sourceMappingURL=merge.js.map

@@ -19,2 +19,3 @@ "use strict";

function meanSquaredError(yTrue, yPred) {
console.log('In meanSquredError');
return tfjs_core_1.tidy(function () { return tfc.mean(K.square(tfc.sub(yPred, yTrue)), -1); });

@@ -21,0 +22,0 @@ }

import { io, Scalar, serialization, Tensor } from '@tensorflow/tfjs-core';
import { TensorContainer } from '@tensorflow/tfjs-core/dist/tensor_types';
import { History } from './base_callbacks';
import { Dataset } from './engine/dataset_stub';
import { Layer } from './engine/topology';
import { Model, ModelCompileConfig, ModelEvaluateConfig, ModelFitConfig } from './engine/training';
import { Model, ModelCompileConfig, ModelEvaluateConfig } from './engine/training';
import { ModelFitDatasetConfig, ModelEvaluateDatasetConfig } from './engine/training_dataset';
import { ModelFitConfig } from './engine/training_tensors';
import { Kwargs, Shape } from './types';

@@ -38,2 +42,3 @@ import { JsonDict } from './types';

evaluate(x: Tensor | Tensor[], y: Tensor | Tensor[], config?: ModelEvaluateConfig): Scalar | Scalar[];
evaluateDataset<T extends TensorContainer>(dataset: Dataset<T>, config: ModelEvaluateDatasetConfig): Promise<Scalar | Scalar[]>;
predict(x: Tensor | Tensor[], config?: ModelPredictConfig): Tensor | Tensor[];

@@ -47,2 +52,3 @@ predictOnBatch(x: Tensor): Tensor | Tensor[];

}, config?: ModelFitConfig): Promise<History>;
fitDataset<T extends TensorContainer>(dataset: Dataset<T>, config: ModelFitDatasetConfig<T>): Promise<History>;
static fromConfig<T extends serialization.Serializable>(cls: serialization.SerializableConstructor<T>, config: serialization.ConfigDict): T;

@@ -49,0 +55,0 @@ stopTraining: boolean;

@@ -341,2 +341,12 @@ "use strict";

};
Sequential.prototype.evaluateDataset = function (dataset, config) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
if (!this.built) {
throw new errors_1.RuntimeError('The model needs to be compiled before being used.');
}
return [2, this.model.evaluateDataset(dataset, config)];
});
});
};
Sequential.prototype.predict = function (x, config) {

@@ -376,15 +386,36 @@ if (config === void 0) { config = {}; }

};
Sequential.prototype.fitDataset = function (dataset, config) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
if (!this.built) {
throw new errors_1.RuntimeError('The model needs to be compiled before ' +
'being used.');
}
return [2, this.model.fitDataset(dataset, config)];
});
});
};
Sequential.fromConfig = function (cls, config) {
var model = new cls({});
if (!(model instanceof Sequential)) {
throw new errors_1.ValueError("Sequential.fromConfig called on non-Sequential input: " + model);
var configArray;
var extraModelConfig = {};
if (config instanceof Array) {
if (!(config[0].className != null) ||
config[0]['className'] === 'Merge') {
throw new errors_1.ValueError('Legacy serialization format not supported yet.');
}
configArray = config;
}
if (!(config instanceof Array)) {
throw new errors_1.ValueError("Sequential.fromConfig called without an array of configs");
else {
tfjs_core_1.util.assert(config['layers'] != null, "When the config data for a Sequential model is not an Array, " +
"it must be an Object that contains the 'layers' field.");
configArray = config['layers'];
delete config['layers'];
extraModelConfig = config;
}
if (!(config[0].className != null) || config[0]['className'] === 'Merge') {
throw new errors_1.ValueError('Legacy serialization format not supported yet.');
var model = new cls(extraModelConfig);
if (!(model instanceof Sequential)) {
throw new errors_1.NotImplementedError("Sequential.fromConfig called on non-Sequential input: " + model);
}
for (var _i = 0, _a = config; _i < _a.length; _i++) {
var conf = _a[_i];
for (var _i = 0, configArray_1 = configArray; _i < configArray_1.length; _i++) {
var conf = configArray_1[_i];
var layer = serialization_1.deserialize(conf);

@@ -391,0 +422,0 @@ model.add(layer);

@@ -1,2 +0,2 @@

declare const version = "0.8.0";
declare const version = "0.8.2";
export { version };
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var version = '0.8.0';
var version = '0.8.2';
exports.version = version;
//# sourceMappingURL=version.js.map
{
"name": "@tensorflow/tfjs-layers",
"version": "0.8.0-59d24a7e",
"version": "0.8.2-85ae7874",
"description": "TensorFlow layers API in JavaScript",

@@ -13,3 +13,3 @@ "private": false,

"devDependencies": {
"@tensorflow/tfjs-core": "0.13.0",
"@tensorflow/tfjs-core": "0.13.6",
"@types/jasmine": "~2.5.53",

@@ -49,4 +49,4 @@ "clang-format": "~1.2.2",

"peerDependencies": {
"@tensorflow/tfjs-core": "0.13.0"
"@tensorflow/tfjs-core": "0.13.6"
}
}
// @tensorflow/tfjs Copyright 2018 Google
import{version_core}from"@tensorflow/tfjs-core";export*from"@tensorflow/tfjs-core";import{version_layers}from"@tensorflow/tfjs-layers";export*from"@tensorflow/tfjs-layers";import{version_converter}from"@tensorflow/tfjs-converter";export*from"@tensorflow/tfjs-converter";var version="0.13.2",version$1={"tfjs-core":version_core,"tfjs-layers":version_layers,"tfjs-converter":version_converter,tfjs:version};export{version$1 as version};
import{version_core}from"@tensorflow/tfjs-core";export*from"@tensorflow/tfjs-core";import{version_layers}from"@tensorflow/tfjs-layers";export*from"@tensorflow/tfjs-layers";import{version_converter}from"@tensorflow/tfjs-converter";export*from"@tensorflow/tfjs-converter";var version="0.13.3",version$1={"tfjs-core":version_core,"tfjs-layers":version_layers,"tfjs-converter":version_converter,tfjs:version};export{version$1 as version};
//# sourceMappingURL=tf.esm.js.map

@@ -1,2 +0,2 @@

declare const version = "0.13.2";
declare const version = "0.13.3";
export { version };
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var version = '0.13.2';
var version = '0.13.3';
exports.version = version;
//# sourceMappingURL=version.js.map
{
"name": "@tensorflow/tfjs",
"version": "0.13.2",
"version": "0.13.3",
"description": "An open-source machine learning framework.",

@@ -42,2 +42,3 @@ "private": false,

"rollup-plugin-node-resolve": "3.3.0",
"rollup-plugin-sourcemaps": "^0.4.2",
"rollup-plugin-typescript2": "0.13.0",

@@ -62,6 +63,6 @@ "rollup-plugin-uglify": "~3.0.0",

"dependencies": {
"@tensorflow/tfjs-converter": "0.6.4",
"@tensorflow/tfjs-core": "0.13.6",
"@tensorflow/tfjs-layers": "0.8.2"
"@tensorflow/tfjs-converter": "0.6.5",
"@tensorflow/tfjs-core": "0.13.8",
"@tensorflow/tfjs-layers": "0.8.3"
}
}

@@ -65,3 +65,4 @@ [![Build Status](https://travis-ci.org/tensorflow/tfjs.svg?branch=master)](https://travis-ci.org/tensorflow/tfjs)

<!-- Load TensorFlow.js -->
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"> </script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs/dist/tf.min.js"> </script>

@@ -68,0 +69,0 @@ <!-- Place your code in the script tag below. You can also use an external .js file -->

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc