Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@tensorflow/tfjs-node

Package Overview
Dependencies
Maintainers
11
Versions
138
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@tensorflow/tfjs-node - npm Package Compare versions

Comparing version 1.0.0-alpha2 to 1.0.1

binding.gyp-e

14

dist/callbacks_test.js

@@ -237,3 +237,3 @@ "use strict";

yDataset = tf.data.array([[1], [2], [3], [4]]).map(function (y) { return tf.tensor2d(y, [1, 1]); });
dataset = tf.data.zip([xDataset, yDataset]).repeat(epochs);
dataset = tf.data.zip({ xs: xDataset, ys: yDataset }).repeat(epochs);
model = tf.sequential();

@@ -274,12 +274,12 @@ model.add(tf.layers.dense({ units: 1, inputShape: [2] }));

yDataset = tf.data.array([[1], [2], [3], [4]]).map(function (y) { return tf.tensor2d(y, [1, 1]); });
dataset = tf.data.zip([xDataset, yDataset]).repeat(epochs);
dataset = tf.data.zip({ xs: xDataset, ys: yDataset }).repeat(epochs);
model = tf.sequential();
model.add(tf.layers.dense({ units: 1, inputShape: [2] }));
model.compile({ loss: 'meanSquaredError', optimizer: 'sgd' });
// `batchesPerEpoch` is not specified. Instead, `fitDataset()` relies on the
// `done` field being `true` to terminate the epoch(s).
// `batchesPerEpoch` is not specified. Instead, `fitDataset()` relies on
// the `done` field being `true` to terminate the epoch(s).
return [4 /*yield*/, model.fitDataset(dataset, { epochs: epochs, verbose: 1 })];
case 1:
// `batchesPerEpoch` is not specified. Instead, `fitDataset()` relies on the
// `done` field being `true` to terminate the epoch(s).
// `batchesPerEpoch` is not specified. Instead, `fitDataset()` relies on
// the `done` field being `true` to terminate the epoch(s).
_a.sent();

@@ -314,3 +314,3 @@ expect(consoleMessages.length).toEqual(4);

yDataset = tf.data.array([[1], [2], [3], [4]]).map(function (y) { return tf.tensor2d(y, [1, 1]); });
dataset = tf.data.zip([xDataset, yDataset]);
dataset = tf.data.zip({ xs: xDataset, ys: yDataset });
model = tf.sequential();

@@ -317,0 +317,0 @@ model.add(tf.layers.dense({ units: 1, inputShape: [2] }));

@@ -46,1 +46,90 @@ /**

export declare function getDisplayDecimalPlaces(x: number): number;
export interface TensorBoardCallbackArgs {
/**
* The frequency at which loss and metric values are written to logs.
*
* Currently supported options are:
*
* - 'batch': Write logs at the end of every batch of training, in addition
* to the end of every epoch of training.
* - 'epoch': Write logs at the end of every epoch of training.
*
* Note that writing logs too often slows down the training.
*
* Default: 'epoch'.
*/
updateFreq?: 'batch' | 'epoch';
}
/**
* Callback for logging to TensorBoard durnig training.
*
* Users are expected to access this class through the `tensorBoardCallback()`
* factory method instead.
*/
export declare class TensorBoardCallback extends CustomCallback {
readonly logdir: string;
private trainWriter;
private valWriter;
private batchesSeen;
private epochsSeen;
private readonly args;
constructor(logdir?: string, args?: TensorBoardCallbackArgs);
private logMetrics;
private ensureTrainWriterCreated;
private ensureValWriterCreated;
}
/**
* Callback for logging to TensorBoard durnig training.
*
* Writes the loss and metric values (if any) to the specified log directory
* (`logdir`) which can be ingested and visualized by TensorBoard.
* This callback is usually passed as a callback to `tf.Model.fit()` or
* `tf.Model.fitDataset()` calls during model training. The frequency at which
* the values are logged can be controlled with the `updateFreq` field of the
* configuration object (2nd argument).
*
* Usage example:
* ```js
* // Constructor a toy multilayer-perceptron regressor for demo purpose.
* const model = tf.sequential();
* model.add(
* tf.layers.dense({units: 100, activation: 'relu', inputShape: [200]}));
* model.add(tf.layers.dense({units: 1}));
* model.compile({
* loss: 'meanSquaredError',
* optimizer: 'sgd',
* metrics: ['MAE']
* });
*
* // Generate some random fake data for demo purpose.
* const xs = tf.randomUniform([10000, 200]);
* const ys = tf.randomUniform([10000, 1]);
* const valXs = tf.randomUniform([1000, 200]);
* const valYs = tf.randomUniform([1000, 1]);
*
* // Start model training process.
* await model.fit(xs, ys, {
* epochs: 100,
* validationData: [valXs, valYs],
* // Add the tensorBoard callback here.
* callbacks: tf.node.tensorBoard('/tmp/fit_logs_1')
* });
* ```
*
* Then you can use the following commands to point tensorboard
* to the logdir:
*
* ```sh
* pip install tensorboard # Unless you've already installed it.
* tensorboard --logdir /tmp/fit_logs_1
* ```
*
* @param logdir Directory to which the logs will be written.
* @param args Optional configuration arguments.
* @returns An instance of `TensorBoardCallback`, which is a subclass of
* `tf.CustomCallback`.
*/
/**
* @doc {heading: 'TensorBoard', namespace: 'node'}
*/
export declare function tensorBoard(logdir?: string, args?: TensorBoardCallbackArgs): TensorBoardCallback;

@@ -19,5 +19,8 @@ "use strict";

var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {

@@ -66,3 +69,5 @@ extendStatics(d, b);

var tfjs_1 = require("@tensorflow/tfjs");
var path = require("path");
var ProgressBar = require("progress");
var tensorboard_1 = require("./tensorboard");
// A helper class created for testing with the jasmine `spyOn` method, which

@@ -208,1 +213,136 @@ // operates only on member methods of objects.

exports.getDisplayDecimalPlaces = getDisplayDecimalPlaces;
/**
* Callback for logging to TensorBoard durnig training.
*
* Users are expected to access this class through the `tensorBoardCallback()`
* factory method instead.
*/
var TensorBoardCallback = /** @class */ (function (_super) {
__extends(TensorBoardCallback, _super);
function TensorBoardCallback(logdir, args) {
if (logdir === void 0) { logdir = './logs'; }
var _this = _super.call(this, {
onBatchEnd: function (batch, logs) { return __awaiter(_this, void 0, void 0, function () {
return __generator(this, function (_a) {
this.batchesSeen++;
if (this.args.updateFreq !== 'epoch') {
this.logMetrics(logs, 'batch_', this.batchesSeen);
}
return [2 /*return*/];
});
}); },
onEpochEnd: function (epoch, logs) { return __awaiter(_this, void 0, void 0, function () {
return __generator(this, function (_a) {
this.epochsSeen++;
this.logMetrics(logs, 'epoch_', this.epochsSeen);
return [2 /*return*/];
});
}); },
onTrainEnd: function (logs) { return __awaiter(_this, void 0, void 0, function () {
return __generator(this, function (_a) {
if (this.trainWriter != null) {
this.trainWriter.flush();
}
if (this.valWriter != null) {
this.valWriter.flush();
}
return [2 /*return*/];
});
}); }
}) || this;
_this.logdir = logdir;
_this.args = args == null ? {} : args;
if (_this.args.updateFreq == null) {
_this.args.updateFreq = 'epoch';
}
tfjs_1.util.assert(['batch', 'epoch'].indexOf(_this.args.updateFreq) !== -1, function () { return "Expected updateFreq to be 'batch' or 'epoch', but got " +
("" + _this.args.updateFreq); });
_this.batchesSeen = 0;
_this.epochsSeen = 0;
return _this;
}
TensorBoardCallback.prototype.logMetrics = function (logs, prefix, step) {
for (var key in logs) {
if (key === 'batch' || key === 'size' || key === 'num_steps') {
continue;
}
var VAL_PREFIX = 'val_';
if (key.startsWith(VAL_PREFIX)) {
this.ensureValWriterCreated();
var scalarName = prefix + key.slice(VAL_PREFIX.length);
this.valWriter.scalar(scalarName, logs[key], step);
}
else {
this.ensureTrainWriterCreated();
this.trainWriter.scalar("" + prefix + key, logs[key], step);
}
}
};
TensorBoardCallback.prototype.ensureTrainWriterCreated = function () {
this.trainWriter = tensorboard_1.summaryFileWriter(path.join(this.logdir, 'train'));
};
TensorBoardCallback.prototype.ensureValWriterCreated = function () {
this.valWriter = tensorboard_1.summaryFileWriter(path.join(this.logdir, 'val'));
};
return TensorBoardCallback;
}(tfjs_1.CustomCallback));
exports.TensorBoardCallback = TensorBoardCallback;
/**
* Callback for logging to TensorBoard durnig training.
*
* Writes the loss and metric values (if any) to the specified log directory
* (`logdir`) which can be ingested and visualized by TensorBoard.
* This callback is usually passed as a callback to `tf.Model.fit()` or
* `tf.Model.fitDataset()` calls during model training. The frequency at which
* the values are logged can be controlled with the `updateFreq` field of the
* configuration object (2nd argument).
*
* Usage example:
* ```js
* // Constructor a toy multilayer-perceptron regressor for demo purpose.
* const model = tf.sequential();
* model.add(
* tf.layers.dense({units: 100, activation: 'relu', inputShape: [200]}));
* model.add(tf.layers.dense({units: 1}));
* model.compile({
* loss: 'meanSquaredError',
* optimizer: 'sgd',
* metrics: ['MAE']
* });
*
* // Generate some random fake data for demo purpose.
* const xs = tf.randomUniform([10000, 200]);
* const ys = tf.randomUniform([10000, 1]);
* const valXs = tf.randomUniform([1000, 200]);
* const valYs = tf.randomUniform([1000, 1]);
*
* // Start model training process.
* await model.fit(xs, ys, {
* epochs: 100,
* validationData: [valXs, valYs],
* // Add the tensorBoard callback here.
* callbacks: tf.node.tensorBoard('/tmp/fit_logs_1')
* });
* ```
*
* Then you can use the following commands to point tensorboard
* to the logdir:
*
* ```sh
* pip install tensorboard # Unless you've already installed it.
* tensorboard --logdir /tmp/fit_logs_1
* ```
*
* @param logdir Directory to which the logs will be written.
* @param args Optional configuration arguments.
* @returns An instance of `TensorBoardCallback`, which is a subclass of
* `tf.CustomCallback`.
*/
/**
* @doc {heading: 'TensorBoard', namespace: 'node'}
*/
function tensorBoard(logdir, args) {
if (logdir === void 0) { logdir = './logs'; }
return new TensorBoardCallback(logdir, args);
}
exports.tensorBoard = tensorBoard;

@@ -42,7 +42,7 @@ "use strict";

}());
describe('tf.fromPixels', function () {
describe('tf.browser.fromPixels with polyfills', function () {
it('accepts a canvas-like element', function () {
var c = new MockCanvas(2, 2);
// tslint:disable-next-line:no-any
var t = tf.fromPixels(c);
var t = tf.browser.fromPixels(c);
expect(t.dtype).toBe('int32');

@@ -55,3 +55,3 @@ expect(t.shape).toEqual([2, 2, 3]);

// tslint:disable-next-line:no-any
var t = tf.fromPixels(c, 4);
var t = tf.browser.fromPixels(c, 4);
expect(t.dtype).toBe('int32');

@@ -64,5 +64,5 @@ expect(t.shape).toEqual([2, 2, 4]);

// tslint:disable-next-line:no-any
expect(function () { return tf.fromPixels(c); })
expect(function () { return tf.browser.fromPixels(c); })
.toThrowError(/When running in node, pixels must be an HTMLCanvasElement/);
});
});

@@ -18,9 +18,12 @@ "use strict";

*/
var __assign = (this && this.__assign) || Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};

@@ -27,0 +30,0 @@ function __export(m) {

@@ -54,5 +54,7 @@ "use strict";

}
tfjs_1.util.assert(value > -INT32_MAX && value < INT32_MAX - 1, "Got a value outside of the bound of values supported for int64 " +
("dtype ([-" + INT32_MAX + ", " + (INT32_MAX - 1) + "]): " + value));
tfjs_1.util.assert(Number.isInteger(value), "Expected value to be an integer, but got " + value);
tfjs_1.util.assert(value > -INT32_MAX && value < INT32_MAX - 1, function () {
return "Got a value outside of the bound of values supported for int64 " +
("dtype ([-" + INT32_MAX + ", " + (INT32_MAX - 1) + "]): " + value);
});
tfjs_1.util.assert(Number.isInteger(value), function () { return "Expected value to be an integer, but got " + value; });
// We use two int32 elements to represent a int64 value. This assumes

@@ -59,0 +61,0 @@ // little endian, which is checked above.

@@ -19,7 +19,7 @@ /**

export declare class NodeFileSystem implements tfc.io.IOHandler {
static readonly URL_SCHEME: string;
static readonly URL_SCHEME = "file://";
protected readonly path: string | string[];
readonly MODEL_JSON_FILENAME: string;
readonly WEIGHTS_BINARY_FILENAME: string;
readonly MODEL_BINARY_FILENAME: string;
readonly MODEL_JSON_FILENAME = "model.json";
readonly WEIGHTS_BINARY_FILENAME = "weights.bin";
readonly MODEL_BINARY_FILENAME = "tensorflowjs.pb";
/**

@@ -26,0 +26,0 @@ * Constructor of the NodeFileSystem IOHandler.

@@ -98,4 +98,4 @@ "use strict";

if (Array.isArray(path)) {
tfc.util.assert(path.length === 2, 'file paths must have a length of 2, ' +
("(actual length is " + path.length + ")."));
tfc.util.assert(path.length === 2, function () { return 'file paths must have a length of 2, ' +
("(actual length is " + path.length + ")."); });
this.path = path.map(function (p) { return path_1.resolve(p); });

@@ -102,0 +102,0 @@ }

@@ -192,3 +192,3 @@ "use strict";

});
return [4 /*yield*/, tfl.loadModel('https://localhost/model.json')];
return [4 /*yield*/, tfl.loadLayersModel('https://localhost/model.json')];
case 1:

@@ -195,0 +195,0 @@ model = _a.sent();

@@ -29,3 +29,3 @@ /**

*/
export declare function nodeHTTPRequest(path: string | string[], requestInit?: RequestInit, weightPathPrefix?: string): io.IOHandler;
export declare const nodeHTTPRequestRouter: (url: string | string[]) => io.IOHandler;
export declare function nodeHTTPRequest(path: string, requestInit?: RequestInit, weightPathPrefix?: string): io.IOHandler;
export declare const nodeHTTPRequestRouter: (url: string) => io.IOHandler;

@@ -33,3 +33,3 @@ "use strict";

function nodeHTTPRequest(path, requestInit, weightPathPrefix) {
return tfjs_core_1.io.browserHTTPRequest(path, requestInit, weightPathPrefix, exports.fetchWrapper.fetch);
return tfjs_core_1.io.browserHTTPRequest(path, { requestInit: requestInit, weightPathPrefix: weightPathPrefix, fetchFunc: exports.fetchWrapper.fetch });
}

@@ -36,0 +36,0 @@ exports.nodeHTTPRequest = nodeHTTPRequest;

@@ -20,5 +20,7 @@ /**

*/
import { tensorBoard } from './callbacks';
import { summaryFileWriter } from './tensorboard';
export declare const node: {
summaryFileWriter: typeof summaryFileWriter;
tensorBoard: typeof tensorBoard;
};

@@ -22,3 +22,7 @@ "use strict";

*/
var callbacks_1 = require("./callbacks");
var tensorboard_1 = require("./tensorboard");
exports.node = { summaryFileWriter: tensorboard_1.summaryFileWriter };
exports.node = {
summaryFileWriter: tensorboard_1.summaryFileWriter,
tensorBoard: callbacks_1.tensorBoard
};

@@ -27,2 +27,3 @@ /**

setDataMover(dataMover: DataMover): void;
private getDTypeInteger;
private typeAttributeFromTensor;

@@ -57,2 +58,5 @@ private createOutputTensor;

register(dataId: object, shape: number[], dtype: DataType): void;
fill<R extends Rank>(shape: ShapeMap[R], value: number | string, dtype?: DataType): Tensor<R>;
onesLike<R extends Rank>(x: Tensor<R>): Tensor<R>;
zerosLike<R extends Rank>(x: Tensor<R>): Tensor<R>;
stridedSlice<T extends Tensor>(x: T, begin: number[], end: number[], strides: number[], beginMask: number, endMask: number, ellipsisMask: number, newAxisMask: number, shrinkAxisMask: number): T;

@@ -59,0 +63,0 @@ unstack(x: Tensor<Rank>, axis: number): Tensor[];

@@ -52,3 +52,5 @@ "use strict";

'maxPool test-tensorflow {} x=[2,2,3] f=[1,1] s=2 p=1 dimRoundingMode=floor',
'avgPool test-tensorflow {} x=[2,2,3] f=[1,1] s=2 p=1 dimRoundingMode=floor'
'avgPool test-tensorflow {} x=[2,2,3] f=[1,1] s=2 p=1 dimRoundingMode=floor',
// libtensorflow doesn't support 6D ArgMax yet.
'Reduction: argmax test-tensorflow {} 6D, axis=0'
];

@@ -55,0 +57,0 @@ // Windows has two failing tests:

@@ -18,2 +18,38 @@ "use strict";

*/
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (_) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var _this = this;
Object.defineProperty(exports, "__esModule", { value: true });

@@ -126,1 +162,149 @@ var tfjs_1 = require("@tensorflow/tfjs");

});
describe('tensorBoard callback', function () {
var tmpLogDir;
beforeEach(function () {
tmpLogDir = tmp.dirSync().name;
});
afterEach(function () {
if (tmpLogDir != null) {
shelljs.rm('-rf', tmpLogDir);
}
});
function createModelForTest() {
var model = tfn.sequential();
model.add(tfn.layers.dense({ units: 5, activation: 'relu', inputShape: [10] }));
model.add(tfn.layers.dense({ units: 1 }));
model.compile({ loss: 'meanSquaredError', optimizer: 'sgd', metrics: ['MAE'] });
return model;
}
it('fit(): default epoch updateFreq, with validation', function () { return __awaiter(_this, void 0, void 0, function () {
var model, xs, ys, valXs, valYs, subDirs, trainLogDir, trainFiles, trainFileSize0, valLogDir, valFiles, valFileSize0, history, trainFileSize1, valFileSize1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
model = createModelForTest();
xs = tfn.randomUniform([100, 10]);
ys = tfn.randomUniform([100, 1]);
valXs = tfn.randomUniform([10, 10]);
valYs = tfn.randomUniform([10, 1]);
// Warm-up training.
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 1,
verbose: 0,
validationData: [valXs, valYs],
callbacks: tfn.node.tensorBoard(tmpLogDir)
})];
case 1:
// Warm-up training.
_a.sent();
subDirs = fs.readdirSync(tmpLogDir);
expect(subDirs).toContain('train');
expect(subDirs).toContain('val');
trainLogDir = path.join(tmpLogDir, 'train');
trainFiles = fs.readdirSync(trainLogDir);
trainFileSize0 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
expect(trainFileSize0).toBeGreaterThan(0);
valLogDir = path.join(tmpLogDir, 'val');
valFiles = fs.readdirSync(valLogDir);
valFileSize0 = fs.statSync(path.join(valLogDir, valFiles[0])).size;
expect(valFileSize0).toBeGreaterThan(0);
// With updateFreq === epoch, the train and val subset should have generated
// the same amount of logs.
expect(valFileSize0).toEqual(trainFileSize0);
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 3,
verbose: 0,
validationData: [valXs, valYs],
callbacks: tfn.node.tensorBoard(tmpLogDir)
})];
case 2:
history = _a.sent();
expect(history.history.loss.length).toEqual(3);
expect(history.history.val_loss.length).toEqual(3);
expect(history.history.MAE.length).toEqual(3);
expect(history.history.val_MAE.length).toEqual(3);
trainFileSize1 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
valFileSize1 = fs.statSync(path.join(valLogDir, valFiles[0])).size;
// We currently only assert that new content has been written to the log
// file.
expect(trainFileSize1).toBeGreaterThan(trainFileSize0);
expect(valFileSize1).toBeGreaterThan(valFileSize0);
// With updateFreq === epoch, the train and val subset should have generated
// the same amount of logs.
expect(valFileSize1).toEqual(trainFileSize1);
return [2 /*return*/];
}
});
}); });
it('fit(): batch updateFreq, with validation', function () { return __awaiter(_this, void 0, void 0, function () {
var model, xs, ys, valXs, valYs, subDirs, trainLogDir, trainFiles, trainFileSize0, valLogDir, valFiles, valFileSize0, history, trainFileSize1, valFileSize1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
model = createModelForTest();
xs = tfn.randomUniform([100, 10]);
ys = tfn.randomUniform([100, 1]);
valXs = tfn.randomUniform([10, 10]);
valYs = tfn.randomUniform([10, 1]);
// Warm-up training.
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 1,
verbose: 0,
validationData: [valXs, valYs],
// Use batch updateFreq here.
callbacks: tfn.node.tensorBoard(tmpLogDir, { updateFreq: 'batch' })
})];
case 1:
// Warm-up training.
_a.sent();
subDirs = fs.readdirSync(tmpLogDir);
expect(subDirs).toContain('train');
expect(subDirs).toContain('val');
trainLogDir = path.join(tmpLogDir, 'train');
trainFiles = fs.readdirSync(trainLogDir);
trainFileSize0 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
expect(trainFileSize0).toBeGreaterThan(0);
valLogDir = path.join(tmpLogDir, 'val');
valFiles = fs.readdirSync(valLogDir);
valFileSize0 = fs.statSync(path.join(valLogDir, valFiles[0])).size;
expect(valFileSize0).toBeGreaterThan(0);
// The train subset should have generated more logs than the val subset,
// because the train subset gets logged every batch, while the val subset
// gets logged every epoch.
expect(trainFileSize0).toBeGreaterThan(valFileSize0);
return [4 /*yield*/, model.fit(xs, ys, {
epochs: 3,
verbose: 0,
validationData: [valXs, valYs],
callbacks: tfn.node.tensorBoard(tmpLogDir)
})];
case 2:
history = _a.sent();
expect(history.history.loss.length).toEqual(3);
expect(history.history.val_loss.length).toEqual(3);
expect(history.history.MAE.length).toEqual(3);
expect(history.history.val_MAE.length).toEqual(3);
trainFileSize1 = fs.statSync(path.join(trainLogDir, trainFiles[0])).size;
valFileSize1 = fs.statSync(path.join(valLogDir, valFiles[0])).size;
// We currently only assert that new content has been written to the log
// file.
expect(trainFileSize1).toBeGreaterThan(trainFileSize0);
expect(valFileSize1).toBeGreaterThan(valFileSize0);
// The train subset should have generated more logs than the val subset,
// because the train subset gets logged every batch, while the val subset
// gets logged every epoch.
expect(trainFileSize1).toBeGreaterThan(valFileSize1);
return [2 /*return*/];
}
});
}); });
it('Invalid updateFreq value causes error', function () { return __awaiter(_this, void 0, void 0, function () {
return __generator(this, function (_a) {
expect(function () { return tfn.node.tensorBoard(tmpLogDir, {
// tslint:disable-next-line:no-any
updateFreq: 'foo'
}); }).toThrowError(/Expected updateFreq/);
return [2 /*return*/];
});
}); });
});

@@ -62,2 +62,5 @@ /**

*/
/**
* @doc {heading: 'TensorBoard', namespace: 'node'}
*/
export declare function summaryFileWriter(logdir: string, maxQueue?: number, flushMillis?: number, filenameSuffix?: string): SummaryFileWriter;

@@ -83,2 +83,5 @@ "use strict";

*/
/**
* @doc {heading: 'TensorBoard', namespace: 'node'}
*/
function summaryFileWriter(logdir, maxQueue, flushMillis, filenameSuffix) {

@@ -88,3 +91,5 @@ if (maxQueue === void 0) { maxQueue = 10; }

if (filenameSuffix === void 0) { filenameSuffix = '.v2'; }
tfjs_1.util.assert(logdir != null && typeof logdir === 'string' && logdir.length > 0, "Invalid logdir: " + logdir + ". Expected a non-empty string for logdir.");
tfjs_1.util.assert(logdir != null && typeof logdir === 'string' && logdir.length > 0, function () {
return "Invalid logdir: " + logdir + ". Expected a non-empty string for logdir.";
});
if (!(logdir in summaryFileWriterCache)) {

@@ -91,0 +96,0 @@ var backend = op_utils_1.nodeBackend();

/** @license See the LICENSE file. */
declare const version = "1.0.0-alpha2";
declare const version = "1.0.1";
export { version };

@@ -5,3 +5,3 @@ "use strict";

// This code is auto-generated, do not modify this file!
var version = '1.0.0-alpha2';
var version = '1.0.1';
exports.version = version;
{
"name": "@tensorflow/tfjs-node",
"version": "1.0.0-alpha2",
"version": "1.0.1",
"main": "dist/index.js",

@@ -36,3 +36,3 @@ "types": "dist/index.d.ts",

"jasmine": "~3.1.0",
"nyc": "^12.0.2",
"nyc": "^13.3.0",
"shelljs": "^0.8.3",

@@ -42,7 +42,7 @@ "tmp": "^0.0.33",

"tslint": "~5.9.1",
"typescript": "~2.9.2",
"typescript": "3.3.3333",
"yalc": "~1.0.0-pre.21"
},
"dependencies": {
"@tensorflow/tfjs": "~1.0.0-alpha2",
"@tensorflow/tfjs": "~1.0.1",
"adm-zip": "^0.4.11",

@@ -49,0 +49,0 @@ "bindings": "~1.3.0",

@@ -7,5 +7,2 @@ <a id="travis-badge" href="https://travis-ci.org/tensorflow/tfjs-node" alt="Build Status">

**This repo is under active development and is not production-ready. We are
actively developing as an open source project.**
## Installing

@@ -12,0 +9,0 @@

@@ -23,3 +23,3 @@ /**

const frameworkLibName =
os.platform() === 'linux' ? 'libtensorflow_framework.so' : '';
os.platform() !== 'win32' ? 'libtensorflow_framework.so' : '';

@@ -32,2 +32,3 @@ const depsPath = path.join(__dirname, '..', 'deps');

module.exports = {
depsLibPath,
depsLibTensorFlowFrameworkPath,

@@ -34,0 +35,0 @@ depsLibTensorFlowPath,

@@ -53,6 +53,4 @@ /**

await symlink(depsLibTensorFlowPath, destLibPath);
if (os.platform() === 'linux') {
await symlink(
depsLibTensorFlowFrameworkPath,
destFrameworkLibPath);
if (os.platform() !== 'win32') {
await symlink(depsLibTensorFlowFrameworkPath, destFrameworkLibPath);
}

@@ -63,7 +61,4 @@ } catch (e) {

await copy(depsLibTensorFlowPath, destLibPath);
// Linux will require this library as well:
if (os.platform() === 'linux') {
await copy(
depsLibTensorFlowFrameworkPath,
destFrameworkLibPath);
if (os.platform() !== 'win32') {
await copy(depsLibTensorFlowFrameworkPath, destFrameworkLibPath);
}

@@ -78,6 +73,4 @@ }

await rename(depsLibTensorFlowPath, destLibPath);
if (os.platform() === 'linux') {
await rename(
depsLibTensorFlowFrameworkPath,
destFrameworkLibPath);
if (os.platform() !== 'win32') {
await rename(depsLibTensorFlowFrameworkPath, destFrameworkLibPath);
}

@@ -84,0 +77,0 @@ }

/**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");

@@ -17,30 +17,31 @@ * you may not use this file except in compliance with the License.

*/
const https = require('https');
const HttpsProxyAgent = require('https-proxy-agent');
const url = require('url');
const fs = require('fs');
let path = require('path');
const rimraf = require('rimraf');
const tar = require('tar');
const util = require('util');
const zip = require('adm-zip');
const cp = require('child_process');
const os = require('os');
const ProgressBar = require('progress');
const {depsPath, depsLibTensorFlowPath} = require('./deps-constants.js');
const {depsPath, depsLibPath, depsLibTensorFlowPath} =
require('./deps-constants.js');
const resources = require('./resources');
const exists = util.promisify(fs.exists);
const mkdir = util.promisify(fs.mkdir);
const rename = util.promisify(fs.rename);
const rimrafPromise = util.promisify(rimraf);
const unlink = util.promisify(fs.unlink);
const exec = util.promisify(cp.exec);
const BASE_URI =
'https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-';
const CPU_DARWIN = 'cpu-darwin-x86_64-1.12.0.tar.gz';
const CPU_LINUX = 'cpu-linux-x86_64-1.12.0.tar.gz';
const GPU_LINUX = 'gpu-linux-x86_64-1.12.0.tar.gz';
const CPU_WINDOWS = 'cpu-windows-x86_64-1.12.0.zip';
const GPU_WINDOWS = 'gpu-windows-x86_64-1.12.0.zip';
const CPU_DARWIN = 'cpu-darwin-x86_64-1.13.1.tar.gz';
const CPU_LINUX = 'cpu-linux-x86_64-1.13.1.tar.gz';
const GPU_LINUX = 'gpu-linux-x86_64-1.13.1.tar.gz';
const CPU_WINDOWS = 'cpu-windows-x86_64-1.13.1.zip';
const GPU_WINDOWS = 'gpu-windows-x86_64-1.13.1.zip';
// TODO(kreeger): Update to TensorFlow 1.13:
// https://github.com/tensorflow/tfjs/issues/1369
const TF_WIN_HEADERS_URI =
'https://storage.googleapis.com/tf-builds/tensorflow-headers-1.12.zip';
const platform = os.platform();

@@ -50,7 +51,11 @@ let libType = process.argv[2] === undefined ? 'cpu' : process.argv[2];

let targetUri = BASE_URI;
async function getTargetUri() {
/**
* Returns the libtensorflow hosted path of the current platform.
*/
function getPlatformLibtensorflowUri() {
let targetUri = BASE_URI;
if (platform === 'linux') {
if (os.arch() === 'arm') {
// TODO(kreeger): Update to TensorFlow 1.13:
// https://github.com/tensorflow/tfjs/issues/1370
targetUri =

@@ -78,2 +83,3 @@ 'https://storage.googleapis.com/tf-builds/libtensorflow_r1_12_linux_arm.tar.gz';

}
return targetUri;
}

@@ -104,68 +110,51 @@

async function downloadLibtensorflow(callback) {
await getTargetUri();
// The deps folder and resources do not exist, download and callback as
// needed:
console.error('* Downloading libtensorflow');
// Ensure dependencies staged directory is available:
await ensureDir(depsPath);
// If HTTPS_PROXY, https_proxy, HTTP_PROXY, or http_proxy is set
const proxy = process.env['HTTPS_PROXY'] || process.env['https_proxy'] ||
process.env['HTTP_PROXY'] || process.env['http_proxy'] || '';
console.warn('* Downloading libtensorflow');
resources.downloadAndUnpackResource(
getPlatformLibtensorflowUri(), depsPath, async () => {
if (platform === 'win32') {
// Some windows libtensorflow zip files are missing structure and the
// eager headers. Check, restructure, and download resources as
// needed.
const depsIncludePath = path.join(depsPath, 'include');
if (!await exists(depsLibTensorFlowPath)) {
// Verify that tensorflow.dll exists
const libtensorflowDll = path.join(depsPath, 'tensorflow.dll');
if (!await exists(libtensorflowDll)) {
throw new Error('Could not find libtensorflow.dll');
}
// Using object destructuring to construct the options object for the
// http request. the '...url.parse(targetUri)' part fills in the host,
// path, protocol, etc from the targetUri and then we set the agent to the
// default agent which is overridden a few lines down if there is a proxy
const options = {...url.parse(targetUri), agent: https.globalAgent};
await ensureDir(depsLibPath);
await rename(libtensorflowDll, depsLibTensorFlowPath);
}
if (proxy !== '') {
options.agent = new HttpsProxyAgent(proxy);
}
// Next check the structure for the C-library headers. If they don't
// exist, download and unzip them.
if (!await exists(depsIncludePath)) {
// Remove duplicated assets from the original libtensorflow package.
// They will be replaced by the download below:
await unlink(path.join(depsPath, 'c_api.h'));
await unlink(path.join(depsPath, 'LICENSE'));
const request = https.get(options, response => {
const bar = new ProgressBar('[:bar] :rate/bps :percent :etas', {
complete: '=',
incomplete: ' ',
width: 30,
total: parseInt(response.headers['content-length'], 10)
});
if (platform === 'win32') {
// Windows stores builds in a zip file. Save to disk, extract, and delete
// the downloaded archive.
const tempFileName = path.join(__dirname, '_libtensorflow.zip');
const outputFile = fs.createWriteStream(tempFileName);
response
.on('data',
(chunk) => {
bar.tick(chunk.length);
})
.pipe(outputFile)
.on('close', async () => {
const zipFile = new zip(tempFileName);
zipFile.extractAllTo(depsPath, true /* overwrite */);
await unlink(tempFileName);
// Download the C headers only and unpack:
resources.downloadAndUnpackResource(
TF_WIN_HEADERS_URI, depsPath, () => {
if (callback !== undefined) {
callback();
}
});
} else {
if (callback !== undefined) {
callback();
}
});
} else {
// All other platforms use a tarball:
response
.on('data',
(chunk) => {
bar.tick(chunk.length);
})
.pipe(tar.x({C: depsPath, strict: true}))
.on('close', () => {
if (callback !== undefined) {
callback();
}
});
}
request.end();
});
}
} else {
// No other work is required on other platforms.
if (callback !== undefined) {
callback();
}
}
});
}

@@ -172,0 +161,0 @@

@@ -9,2 +9,33 @@ # TensorFlow.js Node.js bindings Windows troubleshooting.

## 'The system cannot find the patch specified' Exceptions
This can happen for a variety of reasons. First, to inspect what is missing either `cd node_modules/@tensorflow/tfjs-node` or clone the [tensorflow/tfjs-node repo](https://github.com/tensorflow/tfjs-node).
After `cd`'ing or cloning, run the following command (you might need node-gyp installed globablly `npm install -g node-gyp`):
```sh
node-gyp configure --verbose
```
### Missing `python2`
From the verbose output, if you see something like:
```sh
gyp verb check python checking for Python executable "python2" in the PATH
gyp verb `which` failed Error: not found: python2
```
This means that node-gyp expects a 'python2' exe somewhere in `%PATH%`. Try running this command from an Admin (elevated privilaged prompt):
You can try running this from an Adminstrative prompt:
```sh
$ npm --add-python-to-path='true' --debug install --global windows-build-tools
```
### Something else?
If another missing component shows up - please file an issue on [tensorflow/tfjs](https://github.com/tensorflow/tfjs/issues/new) with the output from the `node-gyp configure --verbose` command.
## msbuild.exe Exceptions

@@ -11,0 +42,0 @@

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc