Socket
Socket
Sign inDemoInstall

@tensorflow/tfjs-layers

Package Overview
Dependencies
Maintainers
10
Versions
157
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@tensorflow/tfjs-layers - npm Package Compare versions

Comparing version 0.0.1 to 0.0.2

dist/version.d.ts

3

dist/activations.d.ts
import { Tensor } from 'deeplearn';
import { ConfigDictValue } from './types';
export declare type ActivationFn = (tensor: Tensor, axis?: number) => Tensor;
export declare function getActivation(initializerType: string): ActivationFn;
export declare type ActivationIdentifier = 'elu' | 'hardsigmoid' | 'linear' | 'relu' | 'relu6' | 'selu' | 'sigmoid' | 'softmax' | 'softplus' | 'softsign' | 'tanh' | string;
export declare function getActivation(activationType: ActivationIdentifier): ActivationFn;
export declare function elu(x: Tensor, alpha?: number): Tensor;

@@ -6,0 +7,0 @@ export declare function selu(x: Tensor): Tensor;

@@ -6,38 +6,41 @@ "use strict";

var errors_1 = require("./errors");
function getActivation(initializerType) {
if (initializerType == null || initializerType.toLowerCase() === 'linear') {
function getActivation(activationType) {
if (activationType == null) {
return linear;
}
else if (initializerType.toLowerCase() === 'elu') {
else if (activationType.toLowerCase() === 'elu') {
return elu;
}
else if (initializerType.toLowerCase() === 'relu') {
else if (activationType.toLowerCase() === 'hardsigmoid') {
return hardSigmoid;
}
else if (activationType.toLowerCase() === 'linear') {
return linear;
}
else if (activationType.toLowerCase() === 'relu') {
return relu;
}
else if (initializerType.toLowerCase() === 'relu6') {
else if (activationType.toLowerCase() === 'relu6') {
return relu6;
}
else if (initializerType.toLowerCase() === 'selu') {
else if (activationType.toLowerCase() === 'selu') {
return selu;
}
else if (initializerType.toLowerCase() === 'sigmoid') {
else if (activationType.toLowerCase() === 'sigmoid') {
return sigmoid;
}
else if (initializerType.toLowerCase() === 'hardsigmoid') {
return hardSigmoid;
}
else if (initializerType.toLowerCase() === 'softmax') {
else if (activationType.toLowerCase() === 'softmax') {
return softmax;
}
else if (initializerType.toLowerCase() === 'softplus') {
else if (activationType.toLowerCase() === 'softplus') {
return softplus;
}
else if (initializerType.toLowerCase() === 'softsign') {
else if (activationType.toLowerCase() === 'softsign') {
return softsign;
}
else if (initializerType.toLowerCase() === 'tanh') {
else if (activationType.toLowerCase() === 'tanh') {
return tanh;
}
else {
throw new errors_1.ValueError("Unsupported activation function " + initializerType);
throw new errors_1.ValueError("Unsupported activation function " + activationType);
}

@@ -44,0 +47,0 @@ }

@@ -52,4 +52,5 @@ import { Tensor } from 'deeplearn';

}
export declare type ConstraintIdentifier = 'MaxNorm' | 'MinMaxNorm' | 'NonNeg' | 'UnitNorm' | string;
export declare function serializeConstraint(constraint: Constraint): ConfigDictValue;
export declare function deserializeConstraint(config: ConfigDict, customObjects?: ConfigDict): Constraint;
export declare function getConstraint(identifier: string | ConfigDict | Constraint): Constraint;
export declare function getConstraint(identifier: ConstraintIdentifier | ConfigDict | Constraint): Constraint;

@@ -12,3 +12,10 @@ "use strict";

})();
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
var deeplearn_1 = require("deeplearn");
var K = require("./backend/deeplearnjs_backend");

@@ -25,2 +32,5 @@ var generic_utils_1 = require("./utils/generic_utils");

};
Constraint = __decorate([
deeplearn_1.doc({ heading: 'Constraints', subheading: 'Classes', namespace: 'constraints' })
], Constraint);
return Constraint;

@@ -27,0 +37,0 @@ }());

@@ -1,2 +0,2 @@

import { Scalar, Tensor, Tensor1D } from 'deeplearn';
import { Optimizer, Scalar, Tensor, Tensor1D } from 'deeplearn';
import { Callback, CustomCallbackConfig, History } from '../callbacks';

@@ -50,3 +50,3 @@ import * as optimizers from '../optimizers';

export interface ModelCompileConfig {
optimizer: string | optimizers.Optimizer;
optimizer: string | Optimizer;
loss: string | string[] | {

@@ -60,3 +60,3 @@ [outputName: string]: string;

export declare class Model extends Container {
optimizer: optimizers.Optimizer;
optimizer: optimizers.LayersOptimizer;
loss: string | string[] | {

@@ -63,0 +63,0 @@ [outputName: string]: string;

@@ -356,10 +356,10 @@ "use strict";

}
this.loss = config.loss;
var optimizerConstructor = optimizers.get(config.optimizer);
if (typeof config.optimizer === 'string') {
var optimizerConstructor = optimizers.get(config.optimizer);
this.optimizer = new optimizerConstructor({});
}
else {
this.optimizer = config.optimizer;
this.optimizer = new optimizerConstructor(config.optimizer);
}
this.loss = config.loss;
var lossFunctions = [];

@@ -366,0 +366,0 @@ if (!Array.isArray(config.loss) && typeof config.loss !== 'string') {

@@ -1,54 +0,82 @@

import { ContainerConfig, InputConfig, InputLayer, InputLayerConfig, LayerConfig } from './engine/topology';
import { MaxNorm, MaxNormConfig, MinMaxNorm, MinMaxNormConfig, NonNeg, UnitNorm, UnitNormConfig } from './constraints';
import { ContainerConfig, InputConfig, InputLayerConfig, Layer, LayerConfig } from './engine/topology';
import { Model } from './engine/training';
import { Conv1D, Conv2D, ConvLayerConfig } from './layers/convolutional';
import { DepthwiseConv2D, DepthwiseConv2DLayerConfig } from './layers/convolutional_depthwise';
import { Activation, ActivationLayerConfig, Dense, DenseLayerConfig, Dropout, DropoutLayerConfig, Flatten, RepeatVector, RepeatVectorLayerConfig } from './layers/core';
import { Embedding, EmbeddingLayerConfig } from './layers/embeddings';
import { Add, Average, Concatenate, ConcatenateLayerConfig, Maximum, MergeLayerConfig, Minimum, Multiply } from './layers/merge';
import { BatchNormalization, BatchNormalizationLayerConfig } from './layers/normalization';
import { AvgPooling1D, AvgPooling2D, GlobalAveragePooling1D, GlobalAveragePooling2D, GlobalMaxPooling1D, GlobalMaxPooling2D, GlobalPooling2DLayerConfig, MaxPooling1D, MaxPooling2D, Pooling1DLayerConfig, Pooling2DLayerConfig } from './layers/pooling';
import { GRU, GRUCell, GRUCellLayerConfig, GRULayerConfig, LSTM, LSTMCell, LSTMCellLayerConfig, LSTMLayerConfig, SimpleRNN, SimpleRNNCell, SimpleRNNCellLayerConfig, SimpleRNNLayerConfig } from './layers/recurrent';
import { Bidirectional, BidirectionalLayerConfig, TimeDistributed, WrapperLayerConfig } from './layers/wrappers';
import { Constant, ConstantConfig, GlorotNormal, GlorotUniform, HeNormal, Identity, IdentityConfig, LeCunNormal, Ones, RandomNormal, RandomNormalConfig, RandomUniform, RandomUniformConfig, SeedOnlyInitializerConfig, TruncatedNormal, TruncatedNormalConfig, VarianceScaling, VarianceScalingConfig, Zeros } from './initializers';
import { ConvLayerConfig } from './layers/convolutional';
import { DepthwiseConv2DLayerConfig } from './layers/convolutional_depthwise';
import { ActivationLayerConfig, DenseLayerConfig, DropoutLayerConfig, RepeatVectorLayerConfig } from './layers/core';
import { EmbeddingLayerConfig } from './layers/embeddings';
import { ConcatenateLayerConfig, MergeLayerConfig } from './layers/merge';
import { BatchNormalizationLayerConfig } from './layers/normalization';
import { GlobalPooling2DLayerConfig, Pooling1DLayerConfig, Pooling2DLayerConfig } from './layers/pooling';
import { GRUCellLayerConfig, GRULayerConfig, LSTMCellLayerConfig, LSTMLayerConfig, RNNCell, SimpleRNNCellLayerConfig, SimpleRNNLayerConfig } from './layers/recurrent';
import { BidirectionalLayerConfig, WrapperLayerConfig } from './layers/wrappers';
import { Sequential, SequentialConfig } from './models';
import { L1Config, L1L2, L1L2Config, L2Config } from './regularizers';
import { SymbolicTensor } from './types';
export declare class ModelExports {
static model(config: ContainerConfig): Model;
static sequential(config: SequentialConfig): Sequential;
static sequential(config?: SequentialConfig): Sequential;
static loadModel(modelConfigPath: string): Promise<Model>;
static input(config: InputConfig): SymbolicTensor;
static inputLayer(config: InputLayerConfig): InputLayer;
static inputLayer(config: InputLayerConfig): Layer;
}
export declare class LayerExports {
static conv1d(config: ConvLayerConfig): Conv1D;
static conv2d(config: ConvLayerConfig): Conv2D;
static depthwiseConv2d(config: DepthwiseConv2DLayerConfig): DepthwiseConv2D;
static activation(config: ActivationLayerConfig): Activation;
static dense(config: DenseLayerConfig): Dense;
static dropout(config: DropoutLayerConfig): Dropout;
static flatten(config?: LayerConfig): Flatten;
static repeatVector(config: RepeatVectorLayerConfig): RepeatVector;
static embedding(config: EmbeddingLayerConfig): Embedding;
static add(config: MergeLayerConfig): Add;
static average(config: MergeLayerConfig): Average;
static concatenate(config: ConcatenateLayerConfig): Concatenate;
static maximum(config: MergeLayerConfig): Maximum;
static minimum(config: MergeLayerConfig): Minimum;
static multiply(config: MergeLayerConfig): Multiply;
static batchNormalization(config: BatchNormalizationLayerConfig): BatchNormalization;
static avgPooling1d(config: Pooling1DLayerConfig): AvgPooling1D;
static avgPooling2d(config: Pooling2DLayerConfig): AvgPooling2D;
static globalAveragePooling1d(config: LayerConfig): GlobalAveragePooling1D;
static globalAveragePooling2d(config: GlobalPooling2DLayerConfig): GlobalAveragePooling2D;
static globalMaxPooling1d(config: LayerConfig): GlobalMaxPooling1D;
static globalMaxPooling2d(config: GlobalPooling2DLayerConfig): GlobalMaxPooling2D;
static maxPooling1d(config: Pooling1DLayerConfig): MaxPooling1D;
static maxPooling2d(config: Pooling2DLayerConfig): MaxPooling2D;
static gru(config: GRULayerConfig): GRU;
static gruCell(config: GRUCellLayerConfig): GRUCell;
static lstm(config: LSTMLayerConfig): LSTM;
static lstmCell(config: LSTMCellLayerConfig): LSTMCell;
static simpleRNN(config: SimpleRNNLayerConfig): SimpleRNN;
static simpleRNNCell(config: SimpleRNNCellLayerConfig): SimpleRNNCell;
static bidirectional(config: BidirectionalLayerConfig): Bidirectional;
static timeDistributed(config: WrapperLayerConfig): TimeDistributed;
static conv1d(config: ConvLayerConfig): Layer;
static conv2d(config: ConvLayerConfig): Layer;
static depthwiseConv2d(config: DepthwiseConv2DLayerConfig): Layer;
static activation(config: ActivationLayerConfig): Layer;
static dense(config: DenseLayerConfig): Layer;
static dropout(config: DropoutLayerConfig): Layer;
static flatten(config?: LayerConfig): Layer;
static repeatVector(config: RepeatVectorLayerConfig): Layer;
static embedding(config: EmbeddingLayerConfig): Layer;
static add(config: MergeLayerConfig): Layer;
static average(config: MergeLayerConfig): Layer;
static concatenate(config: ConcatenateLayerConfig): Layer;
static maximum(config: MergeLayerConfig): Layer;
static minimum(config: MergeLayerConfig): Layer;
static multiply(config: MergeLayerConfig): Layer;
static batchNormalization(config: BatchNormalizationLayerConfig): Layer;
static avgPooling1d(config: Pooling1DLayerConfig): Layer;
static avgPooling2d(config: Pooling2DLayerConfig): Layer;
static globalAveragePooling1d(config: LayerConfig): Layer;
static globalAveragePooling2d(config: GlobalPooling2DLayerConfig): Layer;
static globalMaxPooling1d(config: LayerConfig): Layer;
static globalMaxPooling2d(config: GlobalPooling2DLayerConfig): Layer;
static maxPooling1d(config: Pooling1DLayerConfig): Layer;
static maxPooling2d(config: Pooling2DLayerConfig): Layer;
static gru(config: GRULayerConfig): Layer;
static gruCell(config: GRUCellLayerConfig): RNNCell;
static lstm(config: LSTMLayerConfig): Layer;
static lstmCell(config: LSTMCellLayerConfig): RNNCell;
static simpleRNN(config: SimpleRNNLayerConfig): Layer;
static simpleRNNCell(config: SimpleRNNCellLayerConfig): RNNCell;
static bidirectional(config: BidirectionalLayerConfig): Layer;
static timeDistributed(config: WrapperLayerConfig): Layer;
}
export declare class ConstraintExports {
static maxNorm(config: MaxNormConfig): MaxNorm;
static unitNorm(config: UnitNormConfig): UnitNorm;
static nonNeg(): NonNeg;
static minMaxNorm(config: MinMaxNormConfig): MinMaxNorm;
}
export declare class InitializerExports {
static zeros(): Zeros;
static ones(): Ones;
static constant(config: ConstantConfig): Constant;
static randomUniform(config: RandomUniformConfig): RandomUniform;
static randomNormal(config: RandomNormalConfig): RandomNormal;
static truncatedNormal(config: TruncatedNormalConfig): TruncatedNormal;
static identity(config: IdentityConfig): Identity;
static varianceScaling(config: VarianceScalingConfig): VarianceScaling;
static glorotUniform(config: SeedOnlyInitializerConfig): GlorotUniform;
static glorotNormal(config: SeedOnlyInitializerConfig): GlorotNormal;
static heNormal(config: SeedOnlyInitializerConfig): HeNormal;
static leCunNormal(config: SeedOnlyInitializerConfig): LeCunNormal;
}
export declare class RegularizerExports {
static l1l2(config?: L1L2Config): L1L2;
static l1(config?: L1Config): L1L2;
static l2(config?: L2Config): L1L2;
}

@@ -10,4 +10,6 @@ "use strict";

var deeplearn_1 = require("deeplearn");
var constraints_1 = require("./constraints");
var topology_1 = require("./engine/topology");
var training_1 = require("./engine/training");
var initializers_1 = require("./initializers");
var convolutional_1 = require("./layers/convolutional");

@@ -23,2 +25,3 @@ var convolutional_depthwise_1 = require("./layers/convolutional_depthwise");

var models_1 = require("./models");
var regularizers_1 = require("./regularizers");
var ModelExports = (function () {

@@ -474,1 +477,202 @@ function ModelExports() {

exports.LayerExports = LayerExports;
var ConstraintExports = (function () {
function ConstraintExports() {
}
ConstraintExports.maxNorm = function (config) {
return new constraints_1.MaxNorm(config);
};
ConstraintExports.unitNorm = function (config) {
return new constraints_1.UnitNorm(config);
};
ConstraintExports.nonNeg = function () {
return new constraints_1.NonNeg();
};
ConstraintExports.minMaxNorm = function (config) {
return new constraints_1.MinMaxNorm(config);
};
__decorate([
deeplearn_1.doc({
heading: 'Constraints',
namespace: 'constraints',
useDocsFrom: 'MaxNorm',
configParamIndices: [0]
})
], ConstraintExports, "maxNorm", null);
__decorate([
deeplearn_1.doc({
heading: 'Constraints',
namespace: 'constraints',
useDocsFrom: 'UnitNorm',
configParamIndices: [0]
})
], ConstraintExports, "unitNorm", null);
__decorate([
deeplearn_1.doc({ heading: 'Constraints', namespace: 'constraints', useDocsFrom: 'NonNeg' })
], ConstraintExports, "nonNeg", null);
__decorate([
deeplearn_1.doc({
heading: 'Constraints',
namespace: 'constraints',
useDocsFrom: 'MinMaxNormConfig',
configParamIndices: [0]
})
], ConstraintExports, "minMaxNorm", null);
return ConstraintExports;
}());
exports.ConstraintExports = ConstraintExports;
var InitializerExports = (function () {
function InitializerExports() {
}
InitializerExports.zeros = function () {
return new initializers_1.Zeros();
};
InitializerExports.ones = function () {
return new initializers_1.Ones();
};
InitializerExports.constant = function (config) {
return new initializers_1.Constant(config);
};
InitializerExports.randomUniform = function (config) {
return new initializers_1.RandomUniform(config);
};
InitializerExports.randomNormal = function (config) {
return new initializers_1.RandomNormal(config);
};
InitializerExports.truncatedNormal = function (config) {
return new initializers_1.TruncatedNormal(config);
};
InitializerExports.identity = function (config) {
return new initializers_1.Identity(config);
};
InitializerExports.varianceScaling = function (config) {
return new initializers_1.VarianceScaling(config);
};
InitializerExports.glorotUniform = function (config) {
return new initializers_1.GlorotUniform(config);
};
InitializerExports.glorotNormal = function (config) {
return new initializers_1.GlorotNormal(config);
};
InitializerExports.heNormal = function (config) {
return new initializers_1.HeNormal(config);
};
InitializerExports.leCunNormal = function (config) {
return new initializers_1.LeCunNormal(config);
};
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'Zeros'
})
], InitializerExports, "zeros", null);
__decorate([
deeplearn_1.doc({ heading: 'Initializers', namespace: 'initializers', useDocsFrom: 'Ones' })
], InitializerExports, "ones", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'Constant',
configParamIndices: [0]
})
], InitializerExports, "constant", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'RandomUniform',
configParamIndices: [0]
})
], InitializerExports, "randomUniform", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'RandomNormal',
configParamIndices: [0]
})
], InitializerExports, "randomNormal", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'TruncatedNormal',
configParamIndices: [0]
})
], InitializerExports, "truncatedNormal", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'Identity',
configParamIndices: [0]
})
], InitializerExports, "identity", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'VarianceScaling',
configParamIndices: [0]
})
], InitializerExports, "varianceScaling", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'GlorotUniform',
configParamIndices: [0]
})
], InitializerExports, "glorotUniform", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'GlorotNormal',
configParamIndices: [0]
})
], InitializerExports, "glorotNormal", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'HeNormal',
configParamIndices: [0]
})
], InitializerExports, "heNormal", null);
__decorate([
deeplearn_1.doc({
heading: 'Initializers',
namespace: 'initializers',
useDocsFrom: 'LeCunNormal',
configParamIndices: [0]
})
], InitializerExports, "leCunNormal", null);
return InitializerExports;
}());
exports.InitializerExports = InitializerExports;
var RegularizerExports = (function () {
function RegularizerExports() {
}
RegularizerExports.l1l2 = function (config) {
return new regularizers_1.L1L2(config);
};
RegularizerExports.l1 = function (config) {
return regularizers_1.l1(config);
};
RegularizerExports.l2 = function (config) {
return regularizers_1.l2(config);
};
__decorate([
deeplearn_1.doc({ heading: 'Regularizers', namespace: 'regularizers', useDocsFrom: 'L1L2' })
], RegularizerExports, "l1l2", null);
__decorate([
deeplearn_1.doc({ heading: 'Regularizers', namespace: 'regularizers', useDocsFrom: 'L1L2' })
], RegularizerExports, "l1", null);
__decorate([
deeplearn_1.doc({ heading: 'Regularizers', namespace: 'regularizers', useDocsFrom: 'L1L2' })
], RegularizerExports, "l2", null);
return RegularizerExports;
}());
exports.RegularizerExports = RegularizerExports;
import * as dl from 'deeplearn';
import * as backend from './backend/deeplearnjs_backend';
import { LayerExports, ModelExports } from './exports';
import * as optimizers from './optimizers';
import { ConstraintExports, InitializerExports, LayerExports, ModelExports, RegularizerExports } from './exports';
export { Callback, CallbackList, CustomCallback, CustomCallbackConfig, Logs } from './callbacks';

@@ -9,4 +8,5 @@ export { Model } from './engine/training';

export { SymbolicTensor } from './types';
export { version } from './version';
export { dl };
export { backend, optimizers };
export { backend };
export declare const model: typeof ModelExports.model;

@@ -18,1 +18,4 @@ export declare const sequential: typeof ModelExports.sequential;

export declare const layers: typeof LayerExports;
export declare const constraints: typeof ConstraintExports;
export declare const initializers: typeof InitializerExports;
export declare const regularizers: typeof RegularizerExports;

@@ -8,4 +8,2 @@ "use strict";

var exports_1 = require("./exports");
var optimizers = require("./optimizers");
exports.optimizers = optimizers;
var callbacks_1 = require("./callbacks");

@@ -21,2 +19,4 @@ exports.Callback = callbacks_1.Callback;

exports.SymbolicTensor = types_1.SymbolicTensor;
var version_1 = require("./version");
exports.version = version_1.version;
exports.model = exports_1.ModelExports.model;

@@ -28,1 +28,4 @@ exports.sequential = exports_1.ModelExports.sequential;

exports.layers = exports_1.LayerExports;
exports.constraints = exports_1.ConstraintExports;
exports.initializers = exports_1.InitializerExports;
exports.regularizers = exports_1.RegularizerExports;

@@ -119,3 +119,4 @@ import { Tensor } from 'deeplearn';

}
export declare type InitializerIdentifier = 'Constant' | 'GlorotNormal' | 'GlorotUniform' | 'HeNormal' | 'Identity' | 'LeCunNormal' | 'Ones' | 'RandomNormal' | 'RandomUniform' | 'TruncatedNormal' | 'VarianceScaling' | 'Zeros' | string;
export declare function serializeInitializer(initializer: Initializer): ConfigDictValue;
export declare function getInitializer(identifier: string | Initializer | ConfigDict): Initializer;
export declare function getInitializer(identifier: InitializerIdentifier | Initializer | ConfigDict): Initializer;

@@ -12,2 +12,8 @@ "use strict";

})();
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });

@@ -50,2 +56,5 @@ var deeplearn_1 = require("deeplearn");

};
Initializer = __decorate([
deeplearn_1.doc({ heading: 'Initializers', subheading: 'Classes', namespace: 'initializers' })
], Initializer);
return Initializer;

@@ -52,0 +61,0 @@ }());

import { Tensor } from 'deeplearn';
import { Constraint } from '../constraints';
import { Initializer } from '../initializers';
import { Regularizer } from '../regularizers';
import { Constraint, ConstraintIdentifier } from '../constraints';
import { Initializer, InitializerIdentifier } from '../initializers';
import { Regularizer, RegularizerIdentifier } from '../regularizers';
import { Shape } from '../types';

@@ -10,5 +10,5 @@ import { Conv2D, ConvLayerConfig } from './convolutional';

depthMultiplier?: number;
depthwiseInitializer?: string | Initializer;
depthwiseConstraint?: string | Constraint;
depthwiseRegularizer?: string | Regularizer;
depthwiseInitializer?: InitializerIdentifier | Initializer;
depthwiseConstraint?: ConstraintIdentifier | Constraint;
depthwiseRegularizer?: RegularizerIdentifier | Regularizer;
}

@@ -15,0 +15,0 @@ export declare class DepthwiseConv2D extends Conv2D {

import { Tensor } from 'deeplearn';
import { ActivationFn } from '../activations';
import { DataFormat, PaddingMode } from '../common';
import { Constraint } from '../constraints';
import { Constraint, ConstraintIdentifier } from '../constraints';
import { Layer, LayerConfig } from '../engine/topology';
import { Initializer } from '../initializers';
import { Regularizer } from '../regularizers';
import { Initializer, InitializerIdentifier } from '../initializers';
import { Regularizer, RegularizerIdentifier } from '../regularizers';
import { Shape } from '../types';

@@ -19,9 +19,9 @@ import { ConfigDict, LayerVariable } from '../types';

useBias?: boolean;
kernelInitializer?: string | Initializer;
biasInitializer?: string | Initializer;
kernelConstraint?: string | Constraint;
biasConstraint?: string | Constraint;
kernelRegularizer?: string | Regularizer;
biasRegularizer?: string | Regularizer;
activityRegularizer?: string | Regularizer;
kernelInitializer?: InitializerIdentifier | Initializer;
biasInitializer?: InitializerIdentifier | Initializer;
kernelConstraint?: ConstraintIdentifier | Constraint;
biasConstraint?: ConstraintIdentifier | Constraint;
kernelRegularizer?: RegularizerIdentifier | Regularizer;
biasRegularizer?: RegularizerIdentifier | Regularizer;
activityRegularizer?: RegularizerIdentifier | Regularizer;
}

@@ -28,0 +28,0 @@ export declare abstract class Conv extends Layer {

import { Tensor } from 'deeplearn';
import { ActivationFn } from '../activations';
import { Constraint } from '../constraints';
import { ActivationFn, ActivationIdentifier } from '../activations';
import { Constraint, ConstraintIdentifier } from '../constraints';
import { Layer, LayerConfig } from '../engine/topology';
import { Initializer } from '../initializers';
import { Regularizer } from '../regularizers';
import { Initializer, InitializerIdentifier } from '../initializers';
import { Regularizer, RegularizerIdentifier } from '../regularizers';
import { Shape } from '../types';

@@ -26,12 +26,12 @@ import { ConfigDict } from '../types';

units: number;
activation?: string;
activation?: ActivationIdentifier;
useBias?: boolean;
kernelInitializer?: string | Initializer;
biasInitializer?: string | Initializer;
kernelInitializer?: InitializerIdentifier | Initializer;
biasInitializer?: InitializerIdentifier | Initializer;
inputDim?: number;
kernelConstraint?: string | Constraint;
biasConstraint?: string | Constraint;
kernelRegularizer?: string | Regularizer;
biasRegularizer?: string | Regularizer;
activityRegularizer?: string | Regularizer;
kernelConstraint?: ConstraintIdentifier | Constraint;
biasConstraint?: ConstraintIdentifier | Constraint;
kernelRegularizer?: RegularizerIdentifier | Regularizer;
biasRegularizer?: RegularizerIdentifier | Regularizer;
activityRegularizer?: RegularizerIdentifier | Regularizer;
}

@@ -38,0 +38,0 @@ export declare class Dense extends Layer {

import { Tensor } from 'deeplearn';
import { Constraint } from '../constraints';
import { Constraint, ConstraintIdentifier } from '../constraints';
import { Layer, LayerConfig } from '../engine/topology';
import { Initializer } from '../initializers';
import { Regularizer } from '../regularizers';
import { Initializer, InitializerIdentifier } from '../initializers';
import { Regularizer, RegularizerIdentifier } from '../regularizers';
import { Shape } from '../types';

@@ -11,6 +11,6 @@ import { ConfigDict } from '../types';

outputDim: number;
embeddingsInitializer?: string | Initializer;
embeddingsRegularizer?: string | Regularizer;
activityRegularizer?: string | Regularizer;
embeddingsConstraint?: string | Constraint;
embeddingsInitializer?: InitializerIdentifier | Initializer;
embeddingsRegularizer?: RegularizerIdentifier | Regularizer;
activityRegularizer?: RegularizerIdentifier | Regularizer;
embeddingsConstraint?: ConstraintIdentifier | Constraint;
maskZero?: boolean;

@@ -17,0 +17,0 @@ inputLength?: number | number[];

import { Tensor } from 'deeplearn';
import * as constraints from '../constraints';
import { Constraint, ConstraintIdentifier } from '../constraints';
import { Layer, LayerConfig } from '../engine/topology';
import * as initializers from '../initializers';
import * as regularizers from '../regularizers';
import { Initializer, InitializerIdentifier } from '../initializers';
import { Regularizer, RegularizerIdentifier } from '../regularizers';
import { Shape } from '../types';

@@ -14,10 +14,10 @@ import { ConfigDict } from '../types';

scale?: boolean;
betaInitializer?: string | initializers.Initializer;
gammaInitializer?: string | initializers.Initializer;
movingMeanInitializer?: string | initializers.Initializer;
movingVarianceInitializer?: string | initializers.Initializer;
betaConstraint?: string | constraints.Constraint;
gammaConstraint?: string | constraints.Constraint;
betaRegularizer?: string | regularizers.Regularizer;
gammaRegularizer?: string | regularizers.Regularizer;
betaInitializer?: InitializerIdentifier | Initializer;
gammaInitializer?: InitializerIdentifier | Initializer;
movingMeanInitializer?: InitializerIdentifier | Initializer;
movingVarianceInitializer?: InitializerIdentifier | Initializer;
betaConstraint?: ConstraintIdentifier | Constraint;
gammaConstraint?: ConstraintIdentifier | Constraint;
betaRegularizer?: RegularizerIdentifier | Regularizer;
gammaRegularizer?: RegularizerIdentifier | Regularizer;
}

@@ -24,0 +24,0 @@ export declare class BatchNormalization extends Layer {

@@ -15,7 +15,7 @@ "use strict";

var K = require("../backend/deeplearnjs_backend");
var constraints = require("../constraints");
var constraints_1 = require("../constraints");
var topology_1 = require("../engine/topology");
var errors_1 = require("../errors");
var initializers = require("../initializers");
var regularizers = require("../regularizers");
var initializers_1 = require("../initializers");
var regularizers_1 = require("../regularizers");
var generic_utils = require("../utils/generic_utils");

@@ -32,15 +32,12 @@ var BatchNormalization = (function (_super) {

_this.scale = config.scale == null ? true : config.scale;
_this.betaInitializer =
initializers.getInitializer(config.betaInitializer || 'Zeros');
_this.gammaInitializer =
initializers.getInitializer(config.gammaInitializer || 'Ones');
_this.betaInitializer = initializers_1.getInitializer(config.betaInitializer || 'Zeros');
_this.gammaInitializer = initializers_1.getInitializer(config.gammaInitializer || 'Ones');
_this.movingMeanInitializer =
initializers.getInitializer(config.movingMeanInitializer || 'Zeros');
initializers_1.getInitializer(config.movingMeanInitializer || 'Zeros');
_this.movingVarianceInitializer =
initializers.getInitializer(config.movingVarianceInitializer || 'Ones');
_this.betaConstraint = constraints.getConstraint(config.betaConstraint);
_this.gammaConstraint = constraints.getConstraint(config.gammaConstraint);
_this.betaRegularizer = regularizers.getRegularizer(config.betaRegularizer);
_this.gammaRegularizer =
regularizers.getRegularizer(config.gammaRegularizer);
initializers_1.getInitializer(config.movingVarianceInitializer || 'Ones');
_this.betaConstraint = constraints_1.getConstraint(config.betaConstraint);
_this.gammaConstraint = constraints_1.getConstraint(config.gammaConstraint);
_this.betaRegularizer = regularizers_1.getRegularizer(config.betaRegularizer);
_this.gammaRegularizer = regularizers_1.getRegularizer(config.gammaRegularizer);
return _this;

@@ -110,10 +107,10 @@ }

scale: this.scale,
betaInitializer: initializers.serializeInitializer(this.betaInitializer),
gammaInitializer: initializers.serializeInitializer(this.gammaInitializer),
movingMeanInitializer: initializers.serializeInitializer(this.movingMeanInitializer),
movingVarianceInitializer: initializers.serializeInitializer(this.movingVarianceInitializer),
betaRegularizer: regularizers.serializeRegularizer(this.betaRegularizer),
gammaRegularizer: regularizers.serializeRegularizer(this.gammaRegularizer),
betaConstraint: constraints.serializeConstraint(this.betaConstraint),
gammaConstraint: constraints.serializeConstraint(this.gammaConstraint)
betaInitializer: initializers_1.serializeInitializer(this.betaInitializer),
gammaInitializer: initializers_1.serializeInitializer(this.gammaInitializer),
movingMeanInitializer: initializers_1.serializeInitializer(this.movingMeanInitializer),
movingVarianceInitializer: initializers_1.serializeInitializer(this.movingVarianceInitializer),
betaRegularizer: regularizers_1.serializeRegularizer(this.betaRegularizer),
gammaRegularizer: regularizers_1.serializeRegularizer(this.gammaRegularizer),
betaConstraint: constraints_1.serializeConstraint(this.betaConstraint),
gammaConstraint: constraints_1.serializeConstraint(this.gammaConstraint)
};

@@ -120,0 +117,0 @@ var baseConfig = _super.prototype.getConfig.call(this);

@@ -29,3 +29,3 @@ import { Tensor } from 'deeplearn';

export interface Pooling2DLayerConfig extends LayerConfig {
poolSize?: [number, number];
poolSize?: number | [number, number];
strides?: [number, number];

@@ -32,0 +32,0 @@ padding?: PaddingMode;

@@ -90,3 +90,5 @@ "use strict";

_this = _super.call(this, config) || this;
_this.poolSize = config.poolSize;
_this.poolSize = Array.isArray(config.poolSize) ?
config.poolSize :
[config.poolSize, config.poolSize];
_this.strides = config.strides == null ? _this.poolSize : config.strides;

@@ -93,0 +95,0 @@ _this.padding = config.padding == null ? common_1.PaddingMode.VALID : config.padding;

import { Tensor } from 'deeplearn';
import * as activations from '../activations';
import { Constraint } from '../constraints';
import { ActivationFn, ActivationIdentifier } from '../activations';
import { Constraint, ConstraintIdentifier } from '../constraints';
import { InputSpec } from '../engine/topology';
import { Layer, LayerConfig } from '../engine/topology';
import { Initializer } from '../initializers';
import { Regularizer } from '../regularizers';
import { Initializer, InitializerIdentifier } from '../initializers';
import { Regularizer, RegularizerIdentifier } from '../regularizers';
import { Shape, SymbolicTensor } from '../types';

@@ -57,13 +57,13 @@ import { ConfigDict, LayerVariable } from '../types';

units: number;
activation?: string;
activation?: ActivationIdentifier;
useBias?: boolean;
kernelInitializer?: string | Initializer;
recurrentInitializer?: string | Initializer;
biasInitializer?: string | Initializer;
kernelRegularizer?: string | Regularizer;
recurrentRegularizer?: string | Regularizer;
biasRegularizer?: string | Regularizer;
kernelConstraint?: string | Constraint;
recurrentConstraint?: string | Constraint;
biasConstraint?: string | Constraint;
kernelInitializer?: InitializerIdentifier | Initializer;
recurrentInitializer?: InitializerIdentifier | Initializer;
biasInitializer?: InitializerIdentifier | Initializer;
kernelRegularizer?: RegularizerIdentifier | Regularizer;
recurrentRegularizer?: RegularizerIdentifier | Regularizer;
biasRegularizer?: RegularizerIdentifier | Regularizer;
kernelConstraint?: ConstraintIdentifier | Constraint;
recurrentConstraint?: ConstraintIdentifier | Constraint;
biasConstraint?: ConstraintIdentifier | Constraint;
dropout?: number;

@@ -74,3 +74,3 @@ recurrentDropout?: number;

readonly units: number;
readonly activation: activations.ActivationFn;
readonly activation: ActivationFn;
readonly useBias: boolean;

@@ -105,13 +105,13 @@ readonly kernelInitializer: Initializer;

units: number;
activation?: string;
activation?: ActivationIdentifier;
useBias?: boolean;
kernelInitializer?: string | Initializer;
recurrentInitializer?: string | Initializer;
biasInitializer?: string | Initializer;
kernelRegularizer?: string | Regularizer;
recurrentRegularizer?: string | Regularizer;
biasRegularizer?: string | Regularizer;
kernelConstraint?: string | Constraint;
recurrentConstraint?: string | Constraint;
biasConstraint?: string | Constraint;
kernelInitializer?: InitializerIdentifier | Initializer;
recurrentInitializer?: InitializerIdentifier | Initializer;
biasInitializer?: InitializerIdentifier | Initializer;
kernelRegularizer?: RegularizerIdentifier | Regularizer;
recurrentRegularizer?: RegularizerIdentifier | Regularizer;
biasRegularizer?: RegularizerIdentifier | Regularizer;
kernelConstraint?: ConstraintIdentifier | Constraint;
recurrentConstraint?: ConstraintIdentifier | Constraint;
biasConstraint?: ConstraintIdentifier | Constraint;
dropout?: number;

@@ -124,3 +124,3 @@ recurrentDropout?: number;

readonly units: number;
readonly activation: activations.ActivationFn;
readonly activation: ActivationFn;
readonly useBias: boolean;

@@ -146,4 +146,4 @@ readonly kernelInitializer: Initializer;

readonly units: number;
readonly activation: activations.ActivationFn;
readonly recurrentActivation: activations.ActivationFn;
readonly activation: ActivationFn;
readonly recurrentActivation: ActivationFn;
readonly useBias: boolean;

@@ -185,3 +185,3 @@ readonly kernelInitializer: Initializer;

readonly units: number;
readonly activation: activations.ActivationFn;
readonly activation: ActivationFn;
readonly useBias: boolean;

@@ -204,3 +204,3 @@ readonly kernelInitializer: Initializer;

export interface LSTMCellLayerConfig extends SimpleRNNCellLayerConfig {
recurrentActivation?: string;
recurrentActivation?: ActivationIdentifier;
unitForgetBias?: boolean;

@@ -211,4 +211,4 @@ implementation?: number;

readonly units: number;
readonly activation: activations.ActivationFn;
readonly recurrentActivation: activations.ActivationFn;
readonly activation: ActivationFn;
readonly recurrentActivation: ActivationFn;
readonly useBias: boolean;

@@ -252,3 +252,3 @@ readonly kernelInitializer: Initializer;

readonly units: number;
readonly activation: activations.ActivationFn;
readonly activation: ActivationFn;
readonly useBias: boolean;

@@ -255,0 +255,0 @@ readonly kernelInitializer: Initializer;

@@ -12,5 +12,12 @@ "use strict";

})();
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
var deeplearn_1 = require("deeplearn");
var _ = require("underscore");
var activations = require("../activations");
var activations_1 = require("../activations");
var K = require("../backend/deeplearnjs_backend");

@@ -369,2 +376,5 @@ var constraints_1 = require("../constraints");

}
RNNCell = __decorate([
deeplearn_1.doc({ heading: 'Layers', subheading: 'Classes' })
], RNNCell);
return RNNCell;

@@ -382,3 +392,3 @@ }(topology_2.Layer));

_this.units = config.units;
_this.activation = activations.getActivation(config.activation === undefined ? _this.DEFAULT_ACTIVATION :
_this.activation = activations_1.getActivation(config.activation === undefined ? _this.DEFAULT_ACTIVATION :
config.activation);

@@ -441,3 +451,3 @@ _this.useBias = config.useBias === undefined ? true : config.useBias;

units: this.units,
activation: activations.serializeActivation(this.activation),
activation: activations_1.serializeActivation(this.activation),
useBias: this.useBias,

@@ -580,3 +590,3 @@ kernelInitializer: initializers_1.serializeInitializer(this.kernelInitializer),

units: this.units,
activation: activations.serializeActivation(this.activation),
activation: activations_1.serializeActivation(this.activation),
useBias: this.useBias,

@@ -614,5 +624,5 @@ kernelInitializer: initializers_1.serializeInitializer(this.kernelInitializer),

_this.units = config.units;
_this.activation = activations.getActivation(config.activation === undefined ? _this.DEFAULT_ACTIVATION :
_this.activation = activations_1.getActivation(config.activation === undefined ? _this.DEFAULT_ACTIVATION :
config.activation);
_this.recurrentActivation = activations.getActivation(config.activation === undefined ? _this.DEFAULT_RECURRENT_ACTIVATION :
_this.recurrentActivation = activations_1.getActivation(config.activation === undefined ? _this.DEFAULT_RECURRENT_ACTIVATION :
config.recurrentActivation);

@@ -718,3 +728,3 @@ _this.useBias = config.useBias == null ? true : config.useBias;

units: this.units,
activation: activations.serializeActivation(this.activation),
activation: activations_1.serializeActivation(this.activation),
useBias: this.useBias,

@@ -869,3 +879,3 @@ kernelInitializer: initializers_1.serializeInitializer(this.kernelInitializer),

units: this.units,
activation: activations.serializeActivation(this.activation),
activation: activations_1.serializeActivation(this.activation),
useBias: this.useBias,

@@ -910,5 +920,5 @@ kernelInitializer: initializers_1.serializeInitializer(this.kernelInitializer),

_this.units = config.units;
_this.activation = activations.getActivation(config.activation === undefined ? _this.DEFAULT_ACTIVATION :
_this.activation = activations_1.getActivation(config.activation === undefined ? _this.DEFAULT_ACTIVATION :
config.activation);
_this.recurrentActivation = activations.getActivation(config.activation === undefined ? _this.DEFAULT_RECURRENT_ACTIVATION :
_this.recurrentActivation = activations_1.getActivation(config.activation === undefined ? _this.DEFAULT_RECURRENT_ACTIVATION :
config.recurrentActivation);

@@ -1045,3 +1055,3 @@ _this.useBias = config.useBias == null ? true : config.useBias;

units: this.units,
activation: activations.serializeActivation(this.activation),
activation: activations_1.serializeActivation(this.activation),
useBias: this.useBias,

@@ -1204,3 +1214,3 @@ kernelInitializer: initializers_1.serializeInitializer(this.kernelInitializer),

units: this.units,
activation: activations.serializeActivation(this.activation),
activation: activations_1.serializeActivation(this.activation),
useBias: this.useBias,

@@ -1207,0 +1217,0 @@ kernelInitializer: initializers_1.serializeInitializer(this.kernelInitializer),

@@ -22,3 +22,3 @@ import { Scalar, Tensor, WeightsManifestConfig } from 'deeplearn';

private _updatable;
constructor(config: SequentialConfig);
constructor(config?: SequentialConfig);
add(layer: Layer): void;

@@ -25,0 +25,0 @@ pop(): void;

@@ -119,2 +119,3 @@ "use strict";

var _this = _super.call(this, { inputs: [], outputs: [] }) || this;
config = config || {};
_this.trainable = true;

@@ -121,0 +122,0 @@ _this._updatable = true;

@@ -1,4 +0,4 @@

import { Scalar, Tensor } from 'deeplearn';
import { AdagradOptimizer, AdamOptimizer, Optimizer as CoreOptimizer, RMSPropOptimizer, Scalar, SGDOptimizer } from 'deeplearn';
import { ConfigDict, LayerVariable } from './types';
import * as generic_utils from './utils/generic_utils';
import { Constructor } from './utils/generic_utils';
export interface OptimizerConfig {

@@ -8,11 +8,13 @@ clipnorm?: number;

}
export declare abstract class Optimizer {
export declare abstract class LayersOptimizer {
clipnorm: number;
clipvalue: number;
weights: LayerVariable[];
constructor(config: OptimizerConfig);
abstract updateVariables(lossFn: () => Scalar, params: LayerVariable[]): void;
getGradients(lossFn: () => Scalar, params: LayerVariable[]): Tensor[];
protected optimizer: CoreOptimizer;
private readonly createdFromCoreOptimizer;
constructor(config: OptimizerConfig | CoreOptimizer);
protected abstract constructFromCoreOptimizer(optimizer: CoreOptimizer): void;
protected abstract constructFromConfig(config: OptimizerConfig): void;
getConfig(): ConfigDict;
static fromConfig<T>(cls: generic_utils.Constructor<T>, config: ConfigDict): T;
updateVariables(lossFn: () => Scalar, params: LayerVariable[]): void;
static fromConfig<T>(cls: Constructor<T>, config: ConfigDict): T;
}

@@ -25,12 +27,11 @@ export interface SGDConfig extends OptimizerConfig {

}
export declare class SGD extends Optimizer {
iterations: number;
export declare class SGD extends LayersOptimizer {
lr: number;
momentum: number;
momentumScalar: Scalar;
decay: number;
nesterov: boolean;
private momentsMap;
constructor(config: SGDConfig);
updateVariables(lossFn: () => Scalar, params: LayerVariable[]): void;
constructor(config: SGDConfig | SGDOptimizer);
constructFromConfig(config: SGDConfig): void;
protected constructFromCoreOptimizer(optimizer: CoreOptimizer): void;
getConfig(): ConfigDict;
}

@@ -45,17 +46,12 @@ export interface AdamConfig extends OptimizerConfig {

}
export declare class Adam extends Optimizer {
iterations: LayerVariable;
lr: LayerVariable;
beta1: LayerVariable;
beta2: LayerVariable;
decay: LayerVariable;
epsilon: Scalar;
initialDecay: number;
export declare class Adam extends LayersOptimizer {
lr: number;
beta1: number;
beta2: number;
decay: number;
epsilon: number;
amsgrad: boolean;
private oneFloat;
private oneInt;
private ms;
private vs;
constructor(config: AdamConfig);
updateVariables(lossFn: () => Scalar, params: LayerVariable[]): void;
constructor(config: AdamConfig | AdamOptimizer);
constructFromConfig(config: AdamConfig): void;
protected constructFromCoreOptimizer(optimizer: CoreOptimizer): void;
getConfig(): ConfigDict;

@@ -69,18 +65,31 @@ }

}
export declare class RMSProp extends Optimizer {
lr: LayerVariable;
rho: LayerVariable;
decay: LayerVariable;
iterations: LayerVariable;
epsilon: Scalar;
initialDecay: number;
weights: LayerVariable[];
updates: Tensor[];
constructor(config: RMSPropConfig);
updateVariables(lossFn: () => Scalar, params: LayerVariable[]): void;
export declare class RMSProp extends LayersOptimizer {
lr: number;
rho: number;
decay: number;
iterations: number;
epsilon: number;
constructor(config: RMSPropConfig | RMSPropOptimizer);
constructFromConfig(config: RMSPropConfig): void;
protected constructFromCoreOptimizer(optimizer: CoreOptimizer): void;
getConfig(): ConfigDict;
}
export declare const sgd: typeof SGD;
export interface AdagradConfig extends OptimizerConfig {
lr?: number;
epsilon?: number;
decay?: number;
}
export declare class Adagrad extends LayersOptimizer {
private lr;
private epsilon;
private decay;
constructor(config: AdagradConfig | AdagradOptimizer);
constructFromConfig(config: AdagradConfig): void;
constructFromCoreOptimizer(optimizer: CoreOptimizer): void;
getConfig(): ConfigDict;
}
export declare const adagrad: typeof Adagrad;
export declare const adam: typeof Adam;
export declare const rmsprop: typeof RMSProp;
export declare function get(identifier: string): generic_utils.Constructor<Optimizer>;
export declare const sgd: typeof SGD;
export declare function get(identifier: string | CoreOptimizer): Constructor<LayersOptimizer>;

@@ -16,33 +16,21 @@ "use strict";

var errors_1 = require("./errors");
var types_1 = require("./types");
var types_2 = require("./types");
function clipNorm(gradient, clipnorm, norm) {
if (clipnorm <= 0) {
return gradient;
var generic_utils_1 = require("./utils/generic_utils");
var LayersOptimizer = (function () {
function LayersOptimizer(config) {
if (config instanceof deeplearn_1.Optimizer) {
this.createdFromCoreOptimizer = true;
this.constructFromCoreOptimizer(config);
}
else {
this.createdFromCoreOptimizer = false;
this.clipnorm = config.clipnorm;
this.clipvalue = config.clipvalue;
this.constructFromConfig(config);
}
}
if (norm >= clipnorm) {
return K.scalarTimesArray(K.getScalar(clipnorm / norm), gradient);
}
return gradient;
}
var Optimizer = (function () {
function Optimizer(config) {
this.clipnorm = config.clipnorm;
this.clipvalue = config.clipvalue;
}
Optimizer.prototype.getGradients = function (lossFn, params) {
var _this = this;
var grads = K.gradients(lossFn, params);
if (this.clipnorm != null && this.clipnorm > 0) {
var sumOfSquaredGrads = grads.map(function (g) { return K.sum(K.square(g)); });
var sumAcrossAllGrads = sumOfSquaredGrads.reduce(function (prevValue, curValue) { return prevValue + curValue.dataSync()[0]; }, 0);
var norm_1 = Math.sqrt(sumAcrossAllGrads);
grads = grads.map(function (g) { return clipNorm(g, _this.clipnorm, norm_1); });
LayersOptimizer.prototype.getConfig = function () {
if (this.createdFromCoreOptimizer) {
throw new errors_1.NotImplementedError('getConfig() for a LayersOptimizer constructed from a core ' +
'Optimizer is not supported yet.');
}
if (this.clipvalue != null && this.clipvalue > 0) {
grads = grads.map(function (g) { return K.clip(g, -_this.clipvalue, _this.clipvalue); });
}
return grads;
};
Optimizer.prototype.getConfig = function () {
var config = {};

@@ -57,154 +45,96 @@ if (this.clipnorm != null) {

};
Optimizer.fromConfig = function (cls, config) {
LayersOptimizer.prototype.updateVariables = function (lossFn, params) {
var variables = params.map(function (param) { return param.read(); });
this.optimizer.minimize(lossFn, false, variables);
};
LayersOptimizer.fromConfig = function (cls, config) {
return new cls(config);
};
return Optimizer;
return LayersOptimizer;
}());
exports.Optimizer = Optimizer;
exports.LayersOptimizer = LayersOptimizer;
var SGD = (function (_super) {
__extends(SGD, _super);
function SGD(config) {
var _this = _super.call(this, config) || this;
_this.momentsMap = {};
_this.iterations = 0;
_this.lr = (config.lr == null) ? 0.01 : config.lr;
_this.momentum = (config.momentum == null) ? 0.0 : config.momentum;
_this.momentumScalar = K.getScalar(_this.momentum);
_this.decay = (config.decay == null) ? 0.0 : config.decay;
_this.nesterov = (config.nesterov == null) ? false : config.nesterov;
if (_this.lr < 0) {
throw new errors_1.ValueError("Invalid lr (" + _this.lr + "). Must be >= 0 or undefined.");
return _super.call(this, config) || this;
}
SGD.prototype.constructFromConfig = function (config) {
this.lr = (config.lr == null) ? 0.01 : config.lr;
if (this.lr < 0) {
throw new errors_1.ValueError("Invalid lr (" + this.lr + "). Must be >= 0 or undefined.");
}
if (_this.momentum < 0) {
throw new errors_1.ValueError("Invalid momentum (" + _this.momentum + "). Must be >= 0 or undefined.");
this.momentum = (config.momentum == null) ? 0.0 : config.momentum;
if (this.momentum < 0) {
throw new errors_1.ValueError("Invalid momentum (" + this.momentum + "). Must be >= 0 or undefined.");
}
if (_this.decay < 0) {
throw new errors_1.ValueError("Invalid decay (" + _this.decay + "). Must be >= 0 or undefined.");
if (this.momentum !== 0) {
throw new errors_1.NotImplementedError('SGD momentum is not implemented yet.');
}
return _this;
}
SGD.prototype.updateVariables = function (lossFn, params) {
var variablesKey = params.map(function (x) { return x.id; }).join(':');
if (!(variablesKey in this.momentsMap)) {
var shapes = params.map(function (p) { return K.intShape(p); });
this.momentsMap[variablesKey] =
shapes.map(function (shape) { return K.zerosVariable(shape); });
this.decay = (config.decay == null) ? 0.0 : config.decay;
if (this.decay < 0) {
throw new errors_1.ValueError("Invalid decay (" + this.decay + "). Must be >= 0 or undefined.");
}
var moments = this.momentsMap[variablesKey];
var grads = this.getGradients(lossFn, params);
this.iterations++;
var lr = this.lr;
if (this.decay > 0) {
lr *= 1 / (1 + this.decay * this.iterations);
if (this.decay !== 0) {
throw new errors_1.NotImplementedError('SGD decay is not implemented yet');
}
var lrScalar = K.getScalar(lr);
for (var i = 0; i < params.length; i++) {
var param = params[i];
var gradient = grads[i];
var moment = moments[i];
var negLRXgradient = K.neg(K.scalarTimesArray(lrScalar, gradient));
var velocity = K.add(K.scalarTimesArray(this.momentumScalar, moment.read()), negLRXgradient);
K.update(moment, velocity);
var newParamValues = void 0;
if (this.nesterov) {
newParamValues = K.add(param.read(), K.add(K.scalarTimesArray(this.momentumScalar, velocity), negLRXgradient));
}
else {
newParamValues = K.add(param.read(), velocity);
}
K.update(param, newParamValues);
this.nesterov = (config.nesterov == null) ? false : config.nesterov;
if (this.nesterov !== false) {
throw new errors_1.NotImplementedError('SGD nesterov is not implemented yet');
}
this.optimizer = deeplearn_1.train.sgd(this.lr);
};
SGD.prototype.constructFromCoreOptimizer = function (optimizer) {
if (!(optimizer instanceof deeplearn_1.SGDOptimizer)) {
throw new errors_1.ValueError('Cannot construct SGD from a non-SGD core optimizer');
}
this.optimizer = optimizer;
};
SGD.prototype.getConfig = function () {
var config = {
lr: this.lr,
momentum: this.momentum,
decay: this.decay,
nestorv: this.nesterov,
};
var baseConfig = _super.prototype.getConfig.call(this);
Object.assign(config, baseConfig);
return config;
};
return SGD;
}(Optimizer));
}(LayersOptimizer));
exports.SGD = SGD;
generic_utils_1.ClassNameMap.register('SGD', SGD);
var Adam = (function (_super) {
__extends(Adam, _super);
function Adam(config) {
var _this = _super.call(this, config) || this;
K.nameScope(_this.constructor.name, function () {
_this.iterations =
new types_2.LayerVariable(K.getScalar(0), types_1.DType.int32, null, false);
_this.lr = new types_2.LayerVariable(K.getScalar(config.lr == null ? 0.001 : config.lr), null, 'lr', false);
_this.beta1 = new types_2.LayerVariable(K.getScalar(config.beta_1 == null ? 0.9 : config.beta_1), null, 'beta_1', false);
_this.beta2 = new types_2.LayerVariable(K.getScalar(config.beta_2 == null ? 0.999 : config.beta_2), null, 'beta_2', false);
_this.decay = new types_2.LayerVariable(K.getScalar(config.decay == null ? 0 : config.decay), null, 'decay', false);
});
_this.epsilon =
deeplearn_1.scalar(config.epsilon == null ? K.epsilon() : config.epsilon);
_this.initialDecay = config.decay == null ? 0 : config.decay;
_this.amsgrad = config.amsgrad;
_this.oneFloat = K.getScalar(1);
_this.oneInt = deeplearn_1.scalar(1, types_1.DType.int32);
return _this;
return _super.call(this, config) || this;
}
Adam.prototype.updateVariables = function (lossFn, params) {
var _this = this;
var grads = this.getGradients(lossFn, params);
var updates = [];
updates.push(function () {
K.update(_this.iterations, K.add(_this.iterations.read(), _this.oneInt));
});
var lr = this.lr;
var iterationsFloat = K.cast(this.iterations.read(), types_1.DType.float32);
if (this.initialDecay > 0) {
var lrMultiplier = K.divide(this.oneFloat, K.add(this.oneFloat, K.multiply(this.decay.read(), iterationsFloat)));
K.update(lr, K.multiply(lr.read(), lrMultiplier));
Adam.prototype.constructFromConfig = function (config) {
this.lr = config.lr == null ? 0.001 : config.lr;
this.beta1 = config.beta_1 == null ? 0.9 : config.beta_1;
this.beta2 = config.beta_2 == null ? 0.999 : config.beta_2;
this.epsilon = config.epsilon == null ? K.epsilon() : config.epsilon;
this.decay = config.decay == null ? 0 : config.decay;
if (this.decay !== 0.0) {
throw new errors_1.NotImplementedError('Adam decay is not implemented yet');
}
var t = K.add(this.iterations.read(), this.oneInt);
var oneMinusBeta2Pow = K.subtract(this.oneFloat, K.pow(this.beta2.read(), t));
var oneMinusBeta1Pow = K.subtract(this.oneFloat, K.pow(this.beta1.read(), t));
var lrT = K.multiply(this.lr.read(), K.divide(K.sqrt(oneMinusBeta2Pow), oneMinusBeta1Pow));
if (this.ms == null) {
this.ms = params.map(function (p) { return K.zerosVariable(p.shape, p.dtype); });
this.amsgrad = config.amsgrad == null ? false : config.amsgrad;
if (this.amsgrad !== false) {
throw new errors_1.NotImplementedError('Adam amsgrad is not implemented yet');
}
if (this.vs == null) {
this.vs = params.map(function (p) { return K.zerosVariable(p.shape, p.dtype); });
this.optimizer = deeplearn_1.train.adam(this.lr, this.beta1, this.beta2, this.epsilon);
};
Adam.prototype.constructFromCoreOptimizer = function (optimizer) {
if (!(optimizer instanceof deeplearn_1.AdamOptimizer)) {
throw new errors_1.ValueError('Cannot construct Adam from a non-Adam core optimizer');
}
if (this.amsgrad) {
throw new errors_1.NotImplementedError('The support for amsgrad in Adam optimizer is not implemented yet');
}
this.weights = [this.iterations].concat(this.ms).concat(this.vs);
var _loop_1 = function (i) {
var p = params[i];
var g = grads[i];
var m = this_1.ms[i];
var v = this_1.vs[i];
var mT = K.add(K.multiply(this_1.beta1.read(), m.read()), K.multiply(K.subtract(this_1.oneFloat, this_1.beta1.read()), g));
var vT = K.add(K.multiply(this_1.beta2.read(), v.read()), K.multiply(K.subtract(this_1.oneFloat, this_1.beta2.read()), K.square(g)));
var pT = void 0;
if (this_1.amsgrad) {
throw new errors_1.NotImplementedError('The support for amsgrad in Adam optimizer is not implemented yet');
}
else {
pT = K.subtract(p.read(), K.divide(K.multiply(lrT, mT), K.add(K.sqrt(vT), this_1.epsilon)));
}
updates.push(function () {
K.update(m, mT);
K.update(v, vT);
});
var newP = pT;
if (p.constraint != null) {
throw new errors_1.NotImplementedError('Adam optimizer does not support variable constraints yet.');
}
updates.push(function () {
K.update(p, newP);
});
};
var this_1 = this;
for (var i = 0; i < params.length; ++i) {
_loop_1(i);
}
for (var _i = 0, updates_1 = updates; _i < updates_1.length; _i++) {
var update = updates_1[_i];
update();
}
this.optimizer = optimizer;
};
Adam.prototype.getConfig = function () {
var config = {
lr: this.lr.read().get(),
beta1: this.beta1.read().get(),
beta2: this.beta2.read().get(),
decay: this.decay.read().get(),
epsilon: this.epsilon.get(),
lr: this.lr,
beta1: this.beta1,
beta2: this.beta2,
decay: this.decay,
epsilon: this.epsilon,
amsgrad: this.amsgrad

@@ -217,75 +147,65 @@ };

return Adam;
}(Optimizer));
}(LayersOptimizer));
exports.Adam = Adam;
generic_utils_1.ClassNameMap.register('Adam', Adam);
var RMSProp = (function (_super) {
__extends(RMSProp, _super);
function RMSProp(config) {
var _this = _super.call(this, config) || this;
_this.initialDecay = config.decay == null ? 0 : config.decay;
K.nameScope(_this.constructor.name, function () {
_this.iterations = K.variable(K.getScalar(0, types_1.DType.int32), types_1.DType.int32);
_this.lr =
K.variable(deeplearn_1.scalar(config.lr == null ? 0.001 : config.lr), null, 'lr');
_this.rho = K.variable(deeplearn_1.scalar(config.rho == null ? 0.9 : config.rho), null, 'rho');
_this.decay = K.variable(deeplearn_1.scalar(_this.initialDecay), null, 'decay');
});
_this.epsilon =
deeplearn_1.scalar(config.epsilon == null ? K.epsilon() : config.epsilon);
_this.weights = null;
return _this;
return _super.call(this, config) || this;
}
RMSProp.prototype.updateVariables = function (lossFn, params) {
var _this = this;
if (this.weights === null) {
this.weights = [];
for (var _i = 0, params_1 = params; _i < params_1.length; _i++) {
var p = params_1[_i];
this.weights.push(K.variable(K.zeros(K.intShape(p.read()), K.dtype(p.read()))));
}
RMSProp.prototype.constructFromConfig = function (config) {
this.lr = config.lr == null ? 0.001 : config.lr;
this.rho = config.rho == null ? 0.9 : config.rho;
this.epsilon = config.epsilon == null ? K.epsilon() : config.epsilon;
if (config.decay != null) {
throw new errors_1.NotImplementedError('RMSProp decay is not implemented yet');
}
else if (this.weights.length !== params.length) {
throw new errors_1.ValueError('Number of params changed mid-training');
this.optimizer = deeplearn_1.train.rmsprop(this.lr, this.rho, null, this.epsilon);
};
RMSProp.prototype.constructFromCoreOptimizer = function (optimizer) {
if (!(optimizer instanceof deeplearn_1.RMSPropOptimizer)) {
throw new errors_1.ValueError('Cannot construct RMSProp from a non-RMSProp core optimizer');
}
var grads = this.getGradients(lossFn, params);
var updates = [];
updates.push(function () {
K.update(_this.iterations, K.add(_this.iterations.read(), K.getScalar(1, types_1.DType.int32)));
});
var lr = this.lr;
var iterationsFloat = K.cast(this.iterations.read(), types_1.DType.float32);
if (this.initialDecay > 0) {
var lrMultiplier = K.divide(K.getScalar(1), K.add(K.getScalar(1), K.multiply(this.decay.read(), iterationsFloat)));
K.update(lr, K.multiply(lr.read(), lrMultiplier));
}
var _loop_2 = function (index) {
var p = params[index];
var g = grads[index];
var a = this_2.weights[index];
var newA = K.add(K.multiply(this_2.rho.read(), a.read()), K.multiply(K.subtract(K.getScalar(1), this_2.rho.read()), K.square(g)));
updates.push(function () {
K.update(a, newA);
});
var newP = K.subtract(p.read(), K.multiply(this_2.lr.read(), K.divide(g, K.add(K.sqrt(newA), this_2.epsilon))));
if (p['constraint'] != null) {
throw new errors_1.NotImplementedError('RMSProp optimizer does not support variable constraints yet.');
}
updates.push(function () {
K.update(p, newP);
});
this.optimizer = optimizer;
};
RMSProp.prototype.getConfig = function () {
var config = {
lr: this.lr,
rho: this.rho,
decay: this.decay,
epsilon: this.epsilon,
};
var this_2 = this;
for (var index in params) {
_loop_2(index);
var baseConfig = _super.prototype.getConfig.call(this);
Object.assign(config, baseConfig);
return config;
};
return RMSProp;
}(LayersOptimizer));
exports.RMSProp = RMSProp;
generic_utils_1.ClassNameMap.register('RMSProp', RMSProp);
var Adagrad = (function (_super) {
__extends(Adagrad, _super);
function Adagrad(config) {
return _super.call(this, config) || this;
}
Adagrad.prototype.constructFromConfig = function (config) {
this.lr = config.lr == null ? 0.01 : config.lr;
this.epsilon = config.epsilon == null ? K.epsilon() : config.epsilon;
this.decay = config.decay == null ? 0 : config.decay;
if (this.decay !== 0) {
throw new errors_1.NotImplementedError('Adagrad decay is not implemented yet');
}
for (var _a = 0, updates_2 = updates; _a < updates_2.length; _a++) {
var update = updates_2[_a];
update();
this.optimizer = deeplearn_1.train.adagrad(this.lr);
};
Adagrad.prototype.constructFromCoreOptimizer = function (optimizer) {
if (!(optimizer instanceof deeplearn_1.AdagradOptimizer)) {
throw new errors_1.ValueError('Cannot construct Adagrad from a non-Adagrad core optimizer');
}
this.optimizer = optimizer;
};
RMSProp.prototype.getConfig = function () {
Adagrad.prototype.getConfig = function () {
var config = {
lr: this.lr.read().get(),
rho: this.rho.read().get(),
decay: this.decay.read().get(),
epsilon: this.epsilon.get(),
lr: this.lr,
decay: this.decay,
epsilon: this.epsilon,
};

@@ -296,15 +216,32 @@ var baseConfig = _super.prototype.getConfig.call(this);

};
return RMSProp;
}(Optimizer));
exports.RMSProp = RMSProp;
exports.sgd = SGD;
return Adagrad;
}(LayersOptimizer));
exports.Adagrad = Adagrad;
generic_utils_1.ClassNameMap.register('Adagrad', Adagrad);
exports.adagrad = Adagrad;
exports.adam = Adam;
exports.rmsprop = RMSProp;
exports.sgd = SGD;
function get(identifier) {
var optimizerMap = { Adam: Adam, SGD: SGD, adam: exports.adam, sgd: exports.sgd, RMSProp: RMSProp, rmsprop: exports.rmsprop };
if (identifier in optimizerMap) {
return optimizerMap[identifier];
var coreOptimizerToConstructorMap = {
'AdagradOptimizer': Adagrad,
'AdamOptimizer': Adam,
'RMSPropOptimizer': RMSProp,
'SGDOptimizer': SGD
};
var optimizerMap = { Adagrad: Adagrad, Adam: Adam, RMSProp: RMSProp, SGD: SGD, adagrad: exports.adagrad, adam: exports.adam, rmsprop: exports.rmsprop, sgd: exports.sgd };
if (typeof identifier === 'string') {
if (identifier in optimizerMap) {
return optimizerMap[identifier];
}
throw new errors_1.ValueError("Unknown Optimizer " + identifier);
}
throw new errors_1.ValueError("Unknown Optimizer " + identifier);
else {
var coreOptimizerTypeName = identifier.constructor.name;
if (coreOptimizerTypeName in coreOptimizerToConstructorMap) {
return coreOptimizerToConstructorMap[coreOptimizerTypeName];
}
throw new errors_1.ValueError("Unsupported core optimizer type: " + coreOptimizerTypeName);
}
}
exports.get = get;

@@ -8,2 +8,12 @@ import { Tensor } from 'deeplearn';

}
export interface L1L2Config {
l1?: number;
l2?: number;
}
export interface L1Config {
l1: number;
}
export interface L2Config {
l2: number;
}
export declare class L1L2 extends Regularizer {

@@ -14,3 +24,3 @@ private readonly l1;

private readonly hasL2;
constructor(l1?: number, l2?: number);
constructor(config?: L1L2Config);
apply(x: LayerVariable): Tensor;

@@ -20,7 +30,7 @@ getConfig(): ConfigDict;

}
export declare function l1(l?: number): Regularizer;
export declare function l2(l?: number): Regularizer;
export declare function l1_l2(l1?: number, l2?: number): Regularizer;
export declare function l1(config?: L1Config): L1L2;
export declare function l2(config: L2Config): L1L2;
export declare type RegularizerIdentifier = 'L1L2' | string;
export declare function serializeRegularizer(constraint: Regularizer): ConfigDictValue;
export declare function deserializeRegularizer(config: ConfigDict, customObjects?: ConfigDict): Regularizer;
export declare function getRegularizer(identifier: string | ConfigDict | Regularizer): Regularizer;
export declare function getRegularizer(identifier: RegularizerIdentifier | ConfigDict | Regularizer): Regularizer;

@@ -12,2 +12,8 @@ "use strict";

})();
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });

@@ -25,6 +31,6 @@ var deeplearn_1 = require("deeplearn");

__extends(L1L2, _super);
function L1L2(l1, l2) {
if (l1 === void 0) { l1 = 0.0; }
if (l2 === void 0) { l2 = 0.0; }
function L1L2(config) {
var _this = _super.call(this) || this;
var l1 = config == null || config.l1 == null ? 0.01 : config.l1;
var l2 = config == null || config.l2 == null ? 0.01 : config.l2;
_this.hasL1 = l1 !== 0;

@@ -36,2 +42,3 @@ _this.hasL2 = l2 !== 0;

}
L1L2_1 = L1L2;
L1L2.prototype.apply = function (x) {

@@ -51,24 +58,20 @@ var regularization = deeplearn_1.zeros([1]);

L1L2.fromConfig = function (cls, config) {
return new L1L2(config.l1, config.l2);
return new L1L2_1({ l1: config.l1, l2: config.l2 });
};
L1L2 = L1L2_1 = __decorate([
deeplearn_1.doc({ heading: 'Regularizers', namespace: 'regularizers' })
], L1L2);
return L1L2;
var L1L2_1;
}(Regularizer));
exports.L1L2 = L1L2;
generic_utils_1.ClassNameMap.register('L1L2', L1L2);
function l1(l) {
if (l === void 0) { l = 0.01; }
return new L1L2(l);
function l1(config) {
return new L1L2({ l1: config != null ? config.l1 : null, l2: 0 });
}
exports.l1 = l1;
function l2(l) {
if (l === void 0) { l = 0.01; }
return new L1L2(0, l);
function l2(config) {
return new L1L2({ l2: config != null ? config.l2 : null, l1: 0 });
}
exports.l2 = l2;
function l1_l2(l1, l2) {
if (l1 === void 0) { l1 = 0.01; }
if (l2 === void 0) { l2 = 0.01; }
return new L1L2(l1, l2);
}
exports.l1_l2 = l1_l2;
function serializeRegularizer(constraint) {

@@ -75,0 +78,0 @@ return generic_utils_1.serializeKerasObject(constraint);

@@ -43,3 +43,3 @@ import * as dl from 'deeplearn';

readonly trainable: boolean;
protected val: dl.Variable;
protected readonly val: dl.Variable;
readonly constraint: Constraint;

@@ -46,0 +46,0 @@ constructor(val: Tensor | ConcreteTensor, dtype?: DType, name?: string, trainable?: boolean, constraint?: Constraint);

{
"name": "@tensorflow/tfjs-layers",
"version": "0.0.1",
"version": "0.0.2",
"description": "TensorFlow layers API in JavaScript",

@@ -38,2 +38,3 @@ "private": false,

"build-npm": "./scripts/build-npm.sh",
"publish-npm": "./scripts/publish-npm.sh",
"test": "karma start",

@@ -40,0 +41,0 @@ "lint": "tslint -p . --type-check -t verbose"

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc