New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

cycle-posenet-driver

Package Overview
Dependencies
Maintainers
1
Versions
35
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

cycle-posenet-driver - npm Package Compare versions

Comparing version 0.0.1 to 0.0.2

4

lib/cjs/pose_detection.d.ts

@@ -5,5 +5,5 @@ export declare function makePoseDetectionDriver({videoWidth, videoHeight, flipHorizontal}?: {

flipHorizontal?: boolean;
}): (sink$: any) => {
}): (sink$: any) => Promise<{
DOM: any;
poses: any;
};
}>;

@@ -138,103 +138,102 @@ "use strict";

return function poseDetectionDriver(sink$) {
var _this = this;
var params = null;
var video = null;
var context = null;
var posesListener = null;
var poses$ = xstream_1.default.create({
start: function (listener) {
posesListener = function (result) {
listener.next(result);
};
},
stop: function () {
posesListener = null;
},
});
function poseDetectionFrame() {
return __awaiter(this, void 0, void 0, function () {
var _a, imageScaleFactor, outputStride, poses, minPoseConfidence, minPartConfidence, _b, pose, outPoses;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
if (!params.changeToArchitecture) return [3 /*break*/, 2];
// Important to purge variables and free up GPU memory
params.net.dispose();
// Load the PoseNet model weights for either the 0.50, 0.75, 1.00, or
// 1.01 version
_a = params;
return [4 /*yield*/, posenet.load(+params.changeToArchitecture)];
case 1:
// Load the PoseNet model weights for either the 0.50, 0.75, 1.00, or
// 1.01 version
_a.net = _c.sent();
params.changeToArchitecture = null;
_c.label = 2;
case 2:
// Begin monitoring code for frames per second
stats.begin();
imageScaleFactor = params.input.imageScaleFactor;
outputStride = +params.input.outputStride;
poses = [];
_b = params.algorithm;
switch (_b) {
case 'single-pose': return [3 /*break*/, 3];
case 'multi-pose': return [3 /*break*/, 5];
}
return [3 /*break*/, 7];
case 3: return [4 /*yield*/, params.net.estimateSinglePose(video, imageScaleFactor, flipHorizontal, outputStride)];
case 4:
pose = _c.sent();
poses.push(pose);
minPoseConfidence = +params.singlePoseDetection.minPoseConfidence;
minPartConfidence = +params.singlePoseDetection.minPartConfidence;
return [3 /*break*/, 7];
case 5: return [4 /*yield*/, params.net.estimateMultiplePoses(video, imageScaleFactor, flipHorizontal, outputStride, params.multiPoseDetection.maxPoseDetections, params.multiPoseDetection.minPartConfidence, params.multiPoseDetection.nmsRadius)];
case 6:
poses = _c.sent();
minPoseConfidence = +params.multiPoseDetection.minPoseConfidence;
minPartConfidence = +params.multiPoseDetection.minPartConfidence;
return [3 /*break*/, 7];
case 7:
context.clearRect(0, 0, videoWidth, videoHeight);
if (params.output.showVideo) {
context.save();
context.scale(-1, 1);
context.translate(-videoWidth, 0);
context.drawImage(video, 0, 0, videoWidth, videoHeight);
context.restore();
}
// For each pose (i.e. person) detected in an image, loop through the
// poses and draw the resulting skeleton and keypoints if over certain
// confidence scores
poses.forEach(function (_a) {
var score = _a.score, keypoints = _a.keypoints;
if (score >= minPoseConfidence) {
if (params.output.showPoints) {
utils_1.drawKeypoints(keypoints, minPartConfidence, context);
return __awaiter(this, void 0, void 0, function () {
function poseDetectionFrame() {
return __awaiter(this, void 0, void 0, function () {
var _a, imageScaleFactor, outputStride, poses, minPoseConfidence, minPartConfidence, _b, pose, outPoses;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
if (!params.changeToArchitecture) return [3 /*break*/, 2];
// Important to purge variables and free up GPU memory
params.net.dispose();
// Load the PoseNet model weights for either the 0.50, 0.75, 1.00, or
// 1.01 version
_a = params;
return [4 /*yield*/, posenet.load(+params.changeToArchitecture)];
case 1:
// Load the PoseNet model weights for either the 0.50, 0.75, 1.00, or
// 1.01 version
_a.net = _c.sent();
params.changeToArchitecture = null;
_c.label = 2;
case 2:
// Begin monitoring code for frames per second
stats.begin();
imageScaleFactor = params.input.imageScaleFactor;
outputStride = +params.input.outputStride;
poses = [];
_b = params.algorithm;
switch (_b) {
case 'single-pose': return [3 /*break*/, 3];
case 'multi-pose': return [3 /*break*/, 5];
}
return [3 /*break*/, 7];
case 3: return [4 /*yield*/, params.net.estimateSinglePose(video, imageScaleFactor, flipHorizontal, outputStride)];
case 4:
pose = _c.sent();
poses.push(pose);
minPoseConfidence = +params.singlePoseDetection.minPoseConfidence;
minPartConfidence = +params.singlePoseDetection.minPartConfidence;
return [3 /*break*/, 7];
case 5: return [4 /*yield*/, params.net.estimateMultiplePoses(video, imageScaleFactor, flipHorizontal, outputStride, params.multiPoseDetection.maxPoseDetections, params.multiPoseDetection.minPartConfidence, params.multiPoseDetection.nmsRadius)];
case 6:
poses = _c.sent();
minPoseConfidence = +params.multiPoseDetection.minPoseConfidence;
minPartConfidence = +params.multiPoseDetection.minPartConfidence;
return [3 /*break*/, 7];
case 7:
context.clearRect(0, 0, videoWidth, videoHeight);
if (params.output.showVideo) {
context.save();
context.scale(-1, 1);
context.translate(-videoWidth, 0);
context.drawImage(video, 0, 0, videoWidth, videoHeight);
context.restore();
}
// For each pose (i.e. person) detected in an image, loop through the
// poses and draw the resulting skeleton and keypoints if over certain
// confidence scores
poses.forEach(function (_a) {
var score = _a.score, keypoints = _a.keypoints;
if (score >= minPoseConfidence) {
if (params.output.showPoints) {
utils_1.drawKeypoints(keypoints, minPartConfidence, context);
}
if (params.output.showSkeleton) {
utils_1.drawSkeleton(keypoints, minPartConfidence, context);
}
}
if (params.output.showSkeleton) {
utils_1.drawSkeleton(keypoints, minPartConfidence, context);
}
});
outPoses = poses
.filter(function (pose) { return pose.score >= minPoseConfidence; })
.map(function (pose) { return (__assign({}, pose, { keypoints: pose.keypoints.filter(function (keypoint) { return keypoint.score >= minPartConfidence; }) })); });
if (posesListener) {
posesListener(outPoses);
// End monitoring code for frames per second
stats.end();
requestAnimationFrame(poseDetectionFrame);
}
});
outPoses = poses
.filter(function (pose) { return pose.score >= minPoseConfidence; })
.map(function (pose) { return (__assign({}, pose, { keypoints: pose.keypoints.filter(function (keypoint) { return keypoint.score >= minPartConfidence; }) })); });
if (posesListener) {
posesListener(outPoses);
// End monitoring code for frames per second
stats.end();
requestAnimationFrame(poseDetectionFrame);
}
return [2 /*return*/];
}
return [2 /*return*/];
}
});
});
});
}
window.addEventListener('load', function () { return __awaiter(_this, void 0, void 0, function () {
var canvas, _a, gui;
}
var params, video, context, posesListener, poses$, canvas, _a, gui, initialParams, vdom$;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
params = null;
video = null;
context = null;
posesListener = null;
poses$ = xstream_1.default.create({
start: function (listener) {
posesListener = function (result) {
listener.next(result);
};
},
stop: function () {
posesListener = null;
},
});
if (!!video) return [3 /*break*/, 3];

@@ -264,54 +263,54 @@ return [4 /*yield*/, utils_1.setupCamera(document.querySelector('#pose_detection_video'), videoWidth, videoHeight)];

_b.label = 3;
case 3: return [2 /*return*/];
case 3:
initialParams = {
algorithm: 'multi-pose',
input: {
mobileNetArchitecture: utils_1.isMobile() ? '0.50' : '0.75',
outputStride: 16,
imageScaleFactor: 0.5,
},
singlePoseDetection: {
minPoseConfidence: 0.1,
minPartConfidence: 0.5,
},
multiPoseDetection: {
maxPoseDetections: 5,
minPoseConfidence: 0.15,
minPartConfidence: 0.1,
nmsRadius: 30.0,
},
output: {
showVideo: true,
showSkeleton: true,
showPoints: true,
},
net: null,
changeToArchitecture: null,
};
sink$.fold(function (curParams, newParams) {
Object.keys(newParams).map(function (key) {
if (typeof newParams[key] === 'object') {
Object.assign(curParams[key], newParams[key]);
}
else {
curParams[key] = newParams[key];
}
return curParams;
});
return curParams;
}, initialParams).addListener({
next: function (newParams) {
params = newParams;
}
});
vdom$ = xstream_1.default.of((snabbdom_pragma_1.default.createElement("div", { id: "pose_detection", style: { position: "relative" } },
snabbdom_pragma_1.default.createElement("video", { id: "pose_detection_video", style: { display: 'none' }, autoPlay: true }),
snabbdom_pragma_1.default.createElement("canvas", { id: "pose_detection_canvas" }))));
return [2 /*return*/, {
DOM: adapt_1.adapt(vdom$),
poses: adapt_1.adapt(poses$),
}];
}
});
}); });
var initialParams = {
algorithm: 'multi-pose',
input: {
mobileNetArchitecture: utils_1.isMobile() ? '0.50' : '0.75',
outputStride: 16,
imageScaleFactor: 0.5,
},
singlePoseDetection: {
minPoseConfidence: 0.1,
minPartConfidence: 0.5,
},
multiPoseDetection: {
maxPoseDetections: 5,
minPoseConfidence: 0.15,
minPartConfidence: 0.1,
nmsRadius: 30.0,
},
output: {
showVideo: true,
showSkeleton: true,
showPoints: true,
},
net: null,
changeToArchitecture: null,
};
sink$.fold(function (curParams, newParams) {
Object.keys(newParams).map(function (key) {
if (typeof newParams[key] === 'object') {
Object.assign(curParams[key], newParams[key]);
}
else {
curParams[key] = newParams[key];
}
return curParams;
});
return curParams;
}, initialParams).addListener({
next: function (newParams) {
params = newParams;
}
});
var vdom$ = xstream_1.default.of((snabbdom_pragma_1.default.createElement("div", { id: "pose_detection", style: { position: "relative" } },
snabbdom_pragma_1.default.createElement("video", { id: "pose_detection_video", style: { display: 'none' }, autoPlay: true }),
snabbdom_pragma_1.default.createElement("canvas", { id: "pose_detection_canvas" }))));
return {
DOM: adapt_1.adapt(vdom$),
poses: adapt_1.adapt(poses$),
};
};

@@ -318,0 +317,0 @@ }

@@ -5,5 +5,5 @@ export declare function makePoseDetectionDriver({videoWidth, videoHeight, flipHorizontal}?: {

flipHorizontal?: boolean;
}): (sink$: any) => {
}): (sink$: any) => Promise<{
DOM: any;
poses: any;
};
}>;

@@ -126,103 +126,102 @@ var __assign = (this && this.__assign) || Object.assign || function(t) {

return function poseDetectionDriver(sink$) {
var _this = this;
var params = null;
var video = null;
var context = null;
var posesListener = null;
var poses$ = xs.create({
start: function (listener) {
posesListener = function (result) {
listener.next(result);
};
},
stop: function () {
posesListener = null;
},
});
function poseDetectionFrame() {
return __awaiter(this, void 0, void 0, function () {
var _a, imageScaleFactor, outputStride, poses, minPoseConfidence, minPartConfidence, _b, pose, outPoses;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
if (!params.changeToArchitecture) return [3 /*break*/, 2];
// Important to purge variables and free up GPU memory
params.net.dispose();
// Load the PoseNet model weights for either the 0.50, 0.75, 1.00, or
// 1.01 version
_a = params;
return [4 /*yield*/, posenet.load(+params.changeToArchitecture)];
case 1:
// Load the PoseNet model weights for either the 0.50, 0.75, 1.00, or
// 1.01 version
_a.net = _c.sent();
params.changeToArchitecture = null;
_c.label = 2;
case 2:
// Begin monitoring code for frames per second
stats.begin();
imageScaleFactor = params.input.imageScaleFactor;
outputStride = +params.input.outputStride;
poses = [];
_b = params.algorithm;
switch (_b) {
case 'single-pose': return [3 /*break*/, 3];
case 'multi-pose': return [3 /*break*/, 5];
}
return [3 /*break*/, 7];
case 3: return [4 /*yield*/, params.net.estimateSinglePose(video, imageScaleFactor, flipHorizontal, outputStride)];
case 4:
pose = _c.sent();
poses.push(pose);
minPoseConfidence = +params.singlePoseDetection.minPoseConfidence;
minPartConfidence = +params.singlePoseDetection.minPartConfidence;
return [3 /*break*/, 7];
case 5: return [4 /*yield*/, params.net.estimateMultiplePoses(video, imageScaleFactor, flipHorizontal, outputStride, params.multiPoseDetection.maxPoseDetections, params.multiPoseDetection.minPartConfidence, params.multiPoseDetection.nmsRadius)];
case 6:
poses = _c.sent();
minPoseConfidence = +params.multiPoseDetection.minPoseConfidence;
minPartConfidence = +params.multiPoseDetection.minPartConfidence;
return [3 /*break*/, 7];
case 7:
context.clearRect(0, 0, videoWidth, videoHeight);
if (params.output.showVideo) {
context.save();
context.scale(-1, 1);
context.translate(-videoWidth, 0);
context.drawImage(video, 0, 0, videoWidth, videoHeight);
context.restore();
}
// For each pose (i.e. person) detected in an image, loop through the
// poses and draw the resulting skeleton and keypoints if over certain
// confidence scores
poses.forEach(function (_a) {
var score = _a.score, keypoints = _a.keypoints;
if (score >= minPoseConfidence) {
if (params.output.showPoints) {
drawKeypoints(keypoints, minPartConfidence, context);
return __awaiter(this, void 0, void 0, function () {
function poseDetectionFrame() {
return __awaiter(this, void 0, void 0, function () {
var _a, imageScaleFactor, outputStride, poses, minPoseConfidence, minPartConfidence, _b, pose, outPoses;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
if (!params.changeToArchitecture) return [3 /*break*/, 2];
// Important to purge variables and free up GPU memory
params.net.dispose();
// Load the PoseNet model weights for either the 0.50, 0.75, 1.00, or
// 1.01 version
_a = params;
return [4 /*yield*/, posenet.load(+params.changeToArchitecture)];
case 1:
// Load the PoseNet model weights for either the 0.50, 0.75, 1.00, or
// 1.01 version
_a.net = _c.sent();
params.changeToArchitecture = null;
_c.label = 2;
case 2:
// Begin monitoring code for frames per second
stats.begin();
imageScaleFactor = params.input.imageScaleFactor;
outputStride = +params.input.outputStride;
poses = [];
_b = params.algorithm;
switch (_b) {
case 'single-pose': return [3 /*break*/, 3];
case 'multi-pose': return [3 /*break*/, 5];
}
return [3 /*break*/, 7];
case 3: return [4 /*yield*/, params.net.estimateSinglePose(video, imageScaleFactor, flipHorizontal, outputStride)];
case 4:
pose = _c.sent();
poses.push(pose);
minPoseConfidence = +params.singlePoseDetection.minPoseConfidence;
minPartConfidence = +params.singlePoseDetection.minPartConfidence;
return [3 /*break*/, 7];
case 5: return [4 /*yield*/, params.net.estimateMultiplePoses(video, imageScaleFactor, flipHorizontal, outputStride, params.multiPoseDetection.maxPoseDetections, params.multiPoseDetection.minPartConfidence, params.multiPoseDetection.nmsRadius)];
case 6:
poses = _c.sent();
minPoseConfidence = +params.multiPoseDetection.minPoseConfidence;
minPartConfidence = +params.multiPoseDetection.minPartConfidence;
return [3 /*break*/, 7];
case 7:
context.clearRect(0, 0, videoWidth, videoHeight);
if (params.output.showVideo) {
context.save();
context.scale(-1, 1);
context.translate(-videoWidth, 0);
context.drawImage(video, 0, 0, videoWidth, videoHeight);
context.restore();
}
// For each pose (i.e. person) detected in an image, loop through the
// poses and draw the resulting skeleton and keypoints if over certain
// confidence scores
poses.forEach(function (_a) {
var score = _a.score, keypoints = _a.keypoints;
if (score >= minPoseConfidence) {
if (params.output.showPoints) {
drawKeypoints(keypoints, minPartConfidence, context);
}
if (params.output.showSkeleton) {
drawSkeleton(keypoints, minPartConfidence, context);
}
}
if (params.output.showSkeleton) {
drawSkeleton(keypoints, minPartConfidence, context);
}
});
outPoses = poses
.filter(function (pose) { return pose.score >= minPoseConfidence; })
.map(function (pose) { return (__assign({}, pose, { keypoints: pose.keypoints.filter(function (keypoint) { return keypoint.score >= minPartConfidence; }) })); });
if (posesListener) {
posesListener(outPoses);
// End monitoring code for frames per second
stats.end();
requestAnimationFrame(poseDetectionFrame);
}
});
outPoses = poses
.filter(function (pose) { return pose.score >= minPoseConfidence; })
.map(function (pose) { return (__assign({}, pose, { keypoints: pose.keypoints.filter(function (keypoint) { return keypoint.score >= minPartConfidence; }) })); });
if (posesListener) {
posesListener(outPoses);
// End monitoring code for frames per second
stats.end();
requestAnimationFrame(poseDetectionFrame);
}
return [2 /*return*/];
}
return [2 /*return*/];
}
});
});
});
}
window.addEventListener('load', function () { return __awaiter(_this, void 0, void 0, function () {
var canvas, _a, gui;
}
var params, video, context, posesListener, poses$, canvas, _a, gui, initialParams, vdom$;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
params = null;
video = null;
context = null;
posesListener = null;
poses$ = xs.create({
start: function (listener) {
posesListener = function (result) {
listener.next(result);
};
},
stop: function () {
posesListener = null;
},
});
if (!!video) return [3 /*break*/, 3];

@@ -252,56 +251,56 @@ return [4 /*yield*/, setupCamera(document.querySelector('#pose_detection_video'), videoWidth, videoHeight)];

_b.label = 3;
case 3: return [2 /*return*/];
case 3:
initialParams = {
algorithm: 'multi-pose',
input: {
mobileNetArchitecture: isMobile() ? '0.50' : '0.75',
outputStride: 16,
imageScaleFactor: 0.5,
},
singlePoseDetection: {
minPoseConfidence: 0.1,
minPartConfidence: 0.5,
},
multiPoseDetection: {
maxPoseDetections: 5,
minPoseConfidence: 0.15,
minPartConfidence: 0.1,
nmsRadius: 30.0,
},
output: {
showVideo: true,
showSkeleton: true,
showPoints: true,
},
net: null,
changeToArchitecture: null,
};
sink$.fold(function (curParams, newParams) {
Object.keys(newParams).map(function (key) {
if (typeof newParams[key] === 'object') {
Object.assign(curParams[key], newParams[key]);
}
else {
curParams[key] = newParams[key];
}
return curParams;
});
return curParams;
}, initialParams).addListener({
next: function (newParams) {
params = newParams;
}
});
vdom$ = xs.of((Snabbdom.createElement("div", { id: "pose_detection", style: { position: "relative" } },
Snabbdom.createElement("video", { id: "pose_detection_video", style: { display: 'none' }, autoPlay: true }),
Snabbdom.createElement("canvas", { id: "pose_detection_canvas" }))));
return [2 /*return*/, {
DOM: adapt(vdom$),
poses: adapt(poses$),
}];
}
});
}); });
var initialParams = {
algorithm: 'multi-pose',
input: {
mobileNetArchitecture: isMobile() ? '0.50' : '0.75',
outputStride: 16,
imageScaleFactor: 0.5,
},
singlePoseDetection: {
minPoseConfidence: 0.1,
minPartConfidence: 0.5,
},
multiPoseDetection: {
maxPoseDetections: 5,
minPoseConfidence: 0.15,
minPartConfidence: 0.1,
nmsRadius: 30.0,
},
output: {
showVideo: true,
showSkeleton: true,
showPoints: true,
},
net: null,
changeToArchitecture: null,
};
sink$.fold(function (curParams, newParams) {
Object.keys(newParams).map(function (key) {
if (typeof newParams[key] === 'object') {
Object.assign(curParams[key], newParams[key]);
}
else {
curParams[key] = newParams[key];
}
return curParams;
});
return curParams;
}, initialParams).addListener({
next: function (newParams) {
params = newParams;
}
});
var vdom$ = xs.of((Snabbdom.createElement("div", { id: "pose_detection", style: { position: "relative" } },
Snabbdom.createElement("video", { id: "pose_detection_video", style: { display: 'none' }, autoPlay: true }),
Snabbdom.createElement("canvas", { id: "pose_detection_canvas" }))));
return {
DOM: adapt(vdom$),
poses: adapt(poses$),
};
};
}
//# sourceMappingURL=pose_detection.js.map
{
"name": "cycle-posenet-driver",
"version": "0.0.1",
"version": "0.0.2",
"description": "",

@@ -5,0 +5,0 @@ "author": "Michael Jae-Yoon Chung",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc