Socket
Socket
Sign inDemoInstall

node-red-contrib-face-recognition

Package Overview
Dependencies
371
Maintainers
1
Versions
17
Alerts
File Explorer

Advanced tools

Install Socket

Detect and block malicious and high-risk dependencies

Install

Comparing version 1.3.3 to 2.0.1

app/descriptors/5d6c06f7.11d2c8.json

979

face-api.js

@@ -1,934 +0,71 @@

module.exports = function (RED) {
// Import the required modules
let canvas = require('canvas');
let faceapi = require('face-api.js');
let fs = require("fs");
let formidable = require('formidable');
// Import required modules
const input_node = require('./app/nodes/input_node')
const recognise_node = require('./app/nodes/recognise_node')
const add_descriptors = require('./app/endpoints/add_descriptors');
const check_descriptors = require('./app/endpoints/check_descriptors');
const delete_descriptors = require('./app/endpoints/delete_descriptors');
const create_descriptor_location = require('./app/helpers/create_descriptor_location');
// Try load in Tfjs-node if it is installed
try {
require('@tensorflow/tfjs-node');
}
catch (e) {
if (e instanceof Error && e.code === "MODULE_NOT_FOUND")
RED.log.info("[Face-api.js] - TensorFlow.js for Node.js was not found, running without it");
else
throw e;
}
// Set the global variables for the app
global.descriptor_location = require('path').join(__dirname, 'app/descriptors')
create_descriptor_location(global.descriptor_location);
// Monkey patch nodejs to faceapi with canvas
const { Canvas, Image, ImageData } = canvas
faceapi.env.monkeyPatch({ Canvas, Image, ImageData })
let faceApiModelsLoaded = false
// Load the models in at startup
async function loadModels() {
RED.log.info("[Face-api.js] - Loading Models", "info")
try {
const modelPath = `${__dirname}/weights`;
const ssdMobilenetv1Method = faceapi.nets.ssdMobilenetv1.loadFromDisk(modelPath)
const tinyFaceDetectorMethod = faceapi.nets.tinyFaceDetector.loadFromDisk(modelPath)
const faceLandmark68NetMethod = faceapi.nets.faceLandmark68Net.loadFromDisk(modelPath)
const faceLandmark68TinyNetMethod = faceapi.nets.faceLandmark68TinyNet.loadFromDisk(modelPath)
const faceExpressionNetMethod = faceapi.nets.faceExpressionNet.loadFromDisk(modelPath)
const ageGenderNetMethod = faceapi.nets.ageGenderNet.loadFromDisk(modelPath)
const faceRecognitionNetMethod = faceapi.nets.faceRecognitionNet.loadFromDisk(modelPath)
await ssdMobilenetv1Method
await tinyFaceDetectorMethod
await faceLandmark68NetMethod
await faceLandmark68TinyNetMethod
await faceExpressionNetMethod
await ageGenderNetMethod
await faceRecognitionNetMethod
faceApiModelsLoaded = true
RED.log.info("[Face-api.js] - Models Loaded")
}
catch (error) {
RED.log.warn("[Face-api.js] - Models failed to load: \n" + error)
}
// Export the nodes
module.exports = function (RED) {
// Recognise Node Constructor
function recognise_node_creator(config) {
recognise_node(RED, config, this)
}
loadModels()
RED.nodes.registerType("face-api-recognise", recognise_node_creator);
// Create a descriptor file path if it does not exist
async function checkDescriptorDir() {
const saveDir = `${__dirname}/descriptors`;
if (!fs.existsSync(saveDir)){
fs.mkdirSync(saveDir);
RED.log.info("[Face-a-pi.js] - Created descriptors directory at " + saveDir)
}
}
checkDescriptorDir()
// Input Node constructor
function faceApiInputNode(config) {
// Register node with node red
RED.nodes.createNode(this, config);
var node = this;
node.busy = false;
// Register all compute nodes into an array
let computeNodes = [];
let computeNodesOutput = [];
if (RED.nodes.getNode(config.computeNode1)) computeNodes.push(RED.nodes.getNode(config.computeNode1));
if (RED.nodes.getNode(config.computeNode2)) computeNodes.push(RED.nodes.getNode(config.computeNode2));
if (RED.nodes.getNode(config.computeNode3)) computeNodes.push(RED.nodes.getNode(config.computeNode3));
if (RED.nodes.getNode(config.computeNode4)) computeNodes.push(RED.nodes.getNode(config.computeNode4));
if (RED.nodes.getNode(config.computeNode5)) computeNodes.push(RED.nodes.getNode(config.computeNode5));
if (RED.nodes.getNode(config.computeNode6)) computeNodes.push(RED.nodes.getNode(config.computeNode6));
if (RED.nodes.getNode(config.computeNode7)) computeNodes.push(RED.nodes.getNode(config.computeNode7));
if (RED.nodes.getNode(config.computeNode8)) computeNodes.push(RED.nodes.getNode(config.computeNode8));
if (RED.nodes.getNode(config.computeNode9)) computeNodes.push(RED.nodes.getNode(config.computeNode9));
if (RED.nodes.getNode(config.computeNode10)) computeNodes.push(RED.nodes.getNode(config.computeNode10));
// Get the number of active nodes
let numActiveNodes = 0;
for (i = 0; i < computeNodes.length; i++) {
if(computeNodes[i] !== null) numActiveNodes = numActiveNodes + 1
}
// Set the initial status of the node
this.status({fill:"green",shape:"dot",text:"ready"});
// Get the facec API config node
this.computeNode = RED.nodes.getNode(config.computeNode);
// message input handle
node.on('input', async function (msg, send, done) {
if (!node.busy) {
// Set the status to computing
node.busy = true;
this.status({fill:"blue",shape:"dot",text:"computing"});
// Check if at least one compute node is selected
if (computeNodes.every(element => element === null)) {
this.status({fill:"red",shape:"dot",text:"No compute nodes selected"});
RED.log.warn("[Face-api.js] - No compute nodes selected for " + this.name)
return
}
// Check Payload Exists and is a Buffer
if (!("payload" in msg)) {
this.status({fill:"red",shape:"dot",text:"No msg.payload found"});
RED.log.warn("[Face-api.js] - No msg.payload found")
return
}
else if (!Buffer.isBuffer(msg.payload)) {
this.status({fill:"red",shape:"dot",text:"msg.payload was not a buffer"});
RED.log.warn("[Face-api.js] - msg.payload was not a buffer, ignoring")
return
}
// Pass the image to each compute node
computeNodes.forEach((computeNode) => {
// Check if compute node is not null
if (computeNode !== null) {
// Send the message to the node
computeNode.compute(msg.payload, (output) => {
// Check if error
if ("error" in output || "warn" in output) {
// Set the status to error
node.status({fill:"red",shape:"dot",text:"Error in compute node"});
}
else if ("info" in output) {
// ignore message with info
}
else {
// Set Status back to ready
node.status({fill:"green",shape:"dot",text:"ready"});
// Add it to the compute nodes output array
computeNodesOutput.push(output)
if (numActiveNodes === computeNodesOutput.length) {
// Create the message
let msg = {}
msg["Matched Faces"] = []
computeNodesOutput.forEach((output) => {
msg[output.name] = output
output.faces.forEach(face => {
if (face.matchedLabel !== "unknown") {
msg["Matched Faces"].push({
"name" : face.matchedLabel,
"confidence" : face.matchedDistance
})
// console.log(face)
}
})
})
// Send the message
send = send || function() { node.send.apply(node,arguments); };
send({"payload": msg});
// Set the busy boolean
node.busy = false;
// Reset the array
computeNodesOutput = []
}
}
});
}
})
};
});
function input_node_creator(config) {
input_node(RED, config, this);
}
RED.nodes.registerType("face-api-input", faceApiInputNode);
// Compute Node Constructor
function faceApiComputeNode(config) {
// Register node with node red
RED.nodes.createNode(this, config);
let node = this;
// Node variables
node.name = config.name || "face-api-compute";
node.childHost = config.childHost || false;
node.labelName = (config.name === "face-api-compute") ? "known" : config.name;
node.recognitionType = config.recognitionType || "SSD";
node.multipleFaces = config.multipleFaces || "Multiple Faces";
node.confidence = config.confidence/100 || 0.5;
node.inputSize = parseInt(config.inputSize) || 416;
node.landmarks = config.landmarks || false;
node.expressions = config.expressions || false;
node.ageGender = config.ageGender || false;
node.recognition = config.recognition || false;
node.recognitionMetric = config.recognitionMetric || "Mean Squared Error";
node.recognitionConfidence = config.recognitionConfidence || 0;
node.descriptors = null;
node.modelsLoaded = false;
node.labelledDescriptors = null;
node.msgCallback = null;
node.isComputing = false;
// Start the child node if required or kill it
if (node.childHost) {
// Set up args and options for the child
const args = [];
const options = {stdio : "pipe"};
RED.nodes.registerType("face-api-input", input_node_creator)
// Start the forked child process
node.childProcess = require('child_process').fork(`${__dirname}/face-api-cmd.js`, args, options)
// Create the callback to handle a message event for info and warn messages
node.childProcess.on('message', (msg) => {
// Route the message appropriately
if ("info" in msg) {
RED.log.info("[Face-api.js : " + node.id + " : Child Node] - " + msg.info)
}
else if ("warn" in msg) {
RED.log.warn("[Face-api.js : " + node.id + " : Child Node] - " + msg.warn)
}
else if ("error" in msg) {
RED.log.error("[Face-api.js : " + node.id + " : Child Node] - " + msg.error)
}
else {
msg.image = Buffer.from(msg.image)
}
// Send messages to the callback if it has been set
if (node.msgCallback) {
node.msgCallback(msg);
// if (!("info" in msg)) node.msgCallback = null
}
// Set the computing boolean to false
node.isComputing = false;
});
// Create a callback to handle a error events
node.childProcess.on('error', (err, signal) => {
RED.log.error("[Face-api.js : " + node.id + " : Child Node]:\n" + err);
node.isComputing = false
});
// Create callback to handle a exit events
node.childProcess.on('exit', (code, signal) => {
const exitString = "[Face-api.js : " + node.id + " : Child Node] - child_process exited with " + `code ${code} and signal ${signal}`
if (signal == "SIGINT") RED.log.info(exitString)
else RED.log.error(exitString);
node.isComputing = false
});
// Create the stderr callback for errors that occur in the child node
node.childProcess.stderr.on('data', (data) => {
// Convert buffer to string
try {
errMsg = JSON.parse(data)
RED.log.error("[Face-api.js : " + node.id + " : Child Node]:\n" + errMsg);
if (node.msgCallback) node.msgCallback(msg);
}
catch (err) {
// cast the Error to a string
const errString = data.toString()
// Create a list of known errors
let ignoredErrors = [
"Hi there",
"cpu backend was already",
"Platform node has already",
"I tensorfl",
"Your CPU supports instructions"
]
// Search the incoming error string for known errors
for (i = 0; i < ignoredErrors.length; i++) {
if (errString.indexOf(ignoredErrors[i]) !== -1) {
return;
}
}
// If the error is not known print it out
RED.log.error(errString)
}
});
}
else if (!node.childHost && node.childProcess) {
if (node.childProcess) {
node.childProcess.kill('SIGINT');
}
}
node.loadDescriptor = async function () {
try {
// Check if the dir and file exist
const fileName = `${__dirname}/descriptors/` + node.id + ".json"
if (fs.existsSync(fileName)) {
// Get the contents
const fileContents = JSON.parse(fs.readFileSync(fileName, "UTF8"))
if (fileContents && typeof fileContents === 'object' && fileContents.constructor === Array) {
RED.log.info("[Face-api.js : " + node.id + "] - Creating descriptor from file for \"" + node.name + "\"")
imageBuffers = []
fileContents.forEach((image) => {
imageBuffers.push(Buffer.from(image.data))
})
await node.createDescriptor(imageBuffers)
}
else if (fileContents && typeof fileContents === 'object' && fileContents.constructor === Object && "label" in fileContents) {
let nameDescriptor = node.labelName || fileContents.label || "known"
let floatDescriptor = []
// Add each descriptor to an array to add to constructor
fileContents.descriptors.forEach(function (array) {
floatDescriptor.push(new Float32Array(array))
})
// Create a new descriptor for the node
node.descriptors = new faceapi.LabeledFaceDescriptors(nameDescriptor, floatDescriptor)
// Debug
RED.log.info("[Face-api.js : " + node.id + "] - Loaded descriptors for \"" + node.name + "\"")
}
}
else {
const errorMsg = "[Face-api.js : " + node.id + "] - Descriptor file for \"" + node.name + "\" does not exist"
RED.log.info(errorMsg)
}
}
catch (error) {
// Log error
const errorMsg = "[Face-api.js : " + node.id + "] - Could not load descriptors for \"" + node.name + "\" : \n" + error
RED.log.warn(errorMsg)
}
}
node.createDescriptor = async function (inputBuffers) {
if (faceApiModelsLoaded) {
// Get a descriptor for each input
new Promise((resolve, reject) => {
var results = []
inputBuffers.forEach(async function (inputBuffer, index, array) {
try {
// Turn the image into a Canvas
const img = new Image
img.src = inputBuffer
// Make a forward pass of each network for the detections
const detections = await faceapi.detectSingleFace(img)
.withFaceLandmarks()
.withFaceDescriptor()
if (detections) results.push(detections.descriptor)
else {
// Log error
const errorMsg = "[Face-api.js : " + node.id + "] - No faces detected in given descriptor image for \"" + node.name + "\""
RED.log.warn(errorMsg)
}
if (index === array.length -1) resolve(results);
}
catch (error) {
// Log error
const errorMsg = "[Face-api.js : " + node.id + "] - Could not create a descriptor for \"" + node.name + ": \n" + error
RED.log.warn(errorMsg)
reject(errorMsg)
}
})
}).then((descriptors) => {
if (Array.isArray(descriptors) && descriptors.length) {
// Get a descriptor for each face
node.descriptors = new faceapi.LabeledFaceDescriptors(
node.labelName,
descriptors
)
// Write the face descriptor for the specific node to disk
const saveDir = `${__dirname}/descriptors`;
const fileName = saveDir + "/" + node.id + ".json"
if (!fs.existsSync(saveDir)) fs.mkdirSync(saveDir);
fs.writeFileSync(fileName, JSON.stringify(node.descriptors))
// Debug
RED.log.info("[Face-api.js : " + node.id + "] - Saved descriptor for \"" + node.name + "\"")
}
else {
// Log error
const errorMsg = "[Face-api.js : " + node.id + "] - No faces detected in uploaded images for \"" + node.name + "\""
RED.log.warn(errorMsg)
}
})
}
}
node.deleteDescriptor = async function () {
try {
// Delete the descriptor file
const fileName = `${__dirname}/descriptors/` + node.id + ".json"
if (fs.existsSync(fileName)) {
fs.unlinkSync(fileName)
return true
}
else {
return false
}
}
catch (error) {
// Log error
const errorMsg = "[Face-api.js : " + node.id + "] - Could not clean up node with name \"" + node.name + ": \n" + error
RED.log.warn(errorMsg)
return false
}
}
node.compute = async function(inputBuffer, callback) {
const computeDebug = function (type, msg, externalCallback) {
const outputMsg = "[Face-api.js : " + node.id + "] - " + msg
if (type === "info") {
RED.log.info(outputMsg)
if (externalCallback) externalCallback( { "info" : msg} )
}
else if (type === "warn") {
RED.log.warn(outputMsg)
if (externalCallback) externalCallback( { "warn" : msg} )
node.isComputing = false;
}
else if (type === "error") {
RED.log.error(outputMsg)
if (externalCallback) externalCallback( { "error" : msg} )
node.isComputing = false;
}
}
// Debug
// computeDebug("info", "Computing input on node \"" + node.name + "\"", callback)
// Check if the inputBuffer is a Buffer
if (!Buffer.isBuffer(inputBuffer)){
const errorMsg = "Input was not a Buffer"
computeDebug("warn", errorMsg, callback)
return;
}
// Set the compute node boolean to true
node.isComputing = true;
// Pass to the child process if it exists
if (node.childHost && node.childProcess) {
// Pass to the child process for the node
node.childProcess.send({"node" : node, "image": inputBuffer });
node.msgCallback = callback;
}
else {
if (faceApiModelsLoaded) {
try {
// Capture time for inference debug
const startTime = Date.now()
// Turn the image into a Canvas
const img = new Image;
img.onload = async function () {
// // Set up the network options
let options
if (node.recognitionType === "SSD") options = new faceapi.SsdMobilenetv1Options({ minConfidence: node.confidence })
else if (node.recognitionType === "Yolo") options = new faceapi.TinyFaceDetectorOptions({ scoreThreshold: node.confidence, inputSize: node.inputSize })
// Make a forward pass of each network for the detections
let detections = null
if (node.multipleFaces === "Multiple Faces") {
// Just Face detection
if (!node.landmarks && !node.expressions && !node.ageGender && !node.recognition) {
detections = await faceapi.detectAllFaces(img, options)
}
// Face detection with either landmarks, expressions, AAG, or descriptors
else if (node.landmarks && !node.expressions && !node.ageGender && !node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks()
}
else if (!node.landmarks && node.expressions && !node.ageGender && !node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceExpressions()
}
else if (!node.landmarks && !node.expressions && node.ageGender && !node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withAgeAndGender()
}
else if (!node.landmarks && !node.expressions && !node.ageGender && node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks().withFaceDescriptors()
}
// Face detection with landmarks and either expressions, AAG, or descriptors
else if (node.landmarks && node.expressions && !node.ageGender && !node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks().withFaceExpressions()
}
else if (node.landmarks && !node.expressions && node.ageGender && !node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks().withAgeAndGender()
}
else if (node.landmarks && !node.expressions && !node.ageGender && node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks().withFaceDescriptors()
}
// Face detection with landmarks and expressions with either AAG, or descriptors
else if (node.landmarks && node.expressions && node.ageGender && !node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks().withFaceExpressions().withAgeAndGender()
}
else if (node.landmarks && node.expressions && !node.ageGender && node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks().withFaceExpressions().withFaceDescriptors()
}
// Face detection with landmarks, AAG, and descriptors, but not expressions
else if (node.landmarks && !node.expressions && node.ageGender && node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks().withAgeAndGender().withFaceDescriptors()
}
// All possible options
else if (node.landmarks && node.expressions && node.ageGender && node.recognition) {
detections = await faceapi.detectAllFaces(img, options).withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptors()
}
// Else not supported
else {
// Log error
const errorMsg = "Selected configuration of options for compute node \"" + node.name + "\" not supported"
computeDebug("warn", errorMsg, callback)
}
}
else {
// Just Face detection
if (!node.landmarks && !node.expressions && !node.ageGender && !node.recognition) {
detections = await faceapi.detectSingleFace(img, options)
}
// Face detection with either landmarks, expressions, AAG, or descriptors
else if (node.landmarks && !node.expressions && !node.ageGender && !node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks()
}
else if (!node.landmarks && node.expressions && !node.ageGender && !node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceExpressions()
}
else if (!node.landmarks && !node.expressions && node.ageGender && !node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withAgeAndGender()
}
else if (!node.landmarks && !node.expressions && !node.ageGender && node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withFaceDescriptor()
}
// Face detection with landmarks and either expressions, AAG, or descriptors
else if (node.landmarks && node.expressions && !node.ageGender && !node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withFaceExpressions()
}
else if (node.landmarks && !node.expressions && node.ageGender && !node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withAgeAndGender()
}
else if (node.landmarks && !node.expressions && !node.ageGender && node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withFaceDescriptor()
}
// Face detection with landmarks and expressions with either AAG, or descriptors
else if (node.landmarks && node.expressions && node.ageGender && !node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withFaceExpressions().withAgeAndGender()
}
else if (node.landmarks && node.expressions && !node.ageGender && node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withFaceExpressions().withFaceDescriptor()
}
// Face detection with landmarks, AAG, and descriptors, but not expressions
else if (node.landmarks && !node.expressions && node.ageGender && node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withAgeAndGender().withFaceDescriptor()
}
// All possible options
else if (node.landmarks && node.expressions && node.ageGender && node.recognition) {
detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptor()
}
// Else not supported
else {
// Log error
const errorMsg = "Selected configuration of options for compute node \"" + node.name + "\" not supported"
computeDebug("warn", errorMsg, callback)
}
if (detections === undefined) detections = []
else detections = [detections]
}
// Check if there are ny detections
if (detections && typeof detections === 'object' && detections.constructor === Array) {
// If recognition is required, check against comparator
if (node.recognition && node.descriptors) {
let nameDescriptor = node.labelName || fileContents.label || "known"
let floatDescriptor = []
// Add each descriptor to an array to add to constructor
node.descriptors.descriptors.forEach(function (array) {
floatDescriptor.push(new Float32Array(array))
})
// Create a new descriptor for the node
const descriptor = new faceapi.LabeledFaceDescriptors(nameDescriptor, floatDescriptor)
const faceMatcher = new faceapi.FaceMatcher(descriptor)
// Check if one or multiple faces require matching
detections.forEach(face => {
let bestDistance = null;
const inputDescriptor = Array.prototype.slice.call(face.descriptor)
// Loop the provided descriptors to compare against the input descriptor
node.descriptors.descriptors.forEach((baseDescriptor) => {
// Create currentDistance to hold value for this iteration
let currentDistance = null;
// Find best match for the chosen distance metric
if (node.recognitionMetric === "Euclidean") { // Smaller is better
const euclideanDistance = require('euclidean')
currentDistance = Math.round(euclideanDistance(baseDescriptor, inputDescriptor)*10000)
}
else if (node.recognitionMetric === "Manhattan") { // Smaller is better
const manhattanDistance = require('manhattan')
currentDistance = Math.round(manhattanDistance(baseDescriptor, inputDescriptor)*1000)
}
else if (node.recognitionMetric === "Chebyshev") { // Smaller is better
const chebyshevDistance = require('chebyshev')
currentDistance = Math.round(chebyshevDistance(baseDescriptor, inputDescriptor)*100000)
}
else if (node.recognitionMetric === "Mean Squared Error") { // Smaller is better
let sum = 0;
for (i = 0; i < inputDescriptor.length; i += 1) {
var error = inputDescriptor[i] - baseDescriptor[i];
sum += error * error;
}
currentDistance = Math.round(sum / inputDescriptor.length * 1000000)
}
// Compare to the best distance found
if (bestDistance == null) bestDistance = currentDistance
else if (bestDistance > currentDistance) bestDistance = currentDistance
})
// Check if the best distance found is below the threshold
face.bestMatch = {
_distance : bestDistance,
_metric : node.recognitionMetric,
_label : (node.recognitionConfidence > bestDistance) ? node.descriptors.label : "unknown"
}
})
}
else if (node.recognition && !node.descriptors) {
// Log error
const errorMsg = "Recognition is selected but there was no descriptor to compare against, please select an image to create a descriptor."
computeDebug("warn", errorMsg, callback)
}
// Draw the information on the image
const drawImage = async function (img, detections) {
// Draw the detection rectangle
const outImg = faceapi.createCanvasFromMedia(img)
// Draw the main box
faceapi.draw.drawDetections(outImg, detections)
// Draw the landmarks if required
if (node.landmarks) faceapi.draw.drawFaceLandmarks(outImg, detections)
// Draw the other optional data
detections.forEach(result => {
// Make label for experssion
const { expressions } = result
let expressionMaxKey = (node.expressions && expressions) ? Object.keys(expressions).reduce(function(a, b){
return expressions[a] > expressions[b] ? a : b
}) : null
const expressionsLabel = (node.expressions) ? [
`${ expressionMaxKey } : ${ faceapi.round(expressions[expressionMaxKey]*100, 0) }%`
] : []
// console.log(expressionsLabel)
// Make label for age and gender
const { age, gender, genderProbability } = result
const ageGenderLabel = (node.ageGender && age && gender && genderProbability) ? [
`${ gender } : ${ faceapi.round(genderProbability*100) }%`,
`${ faceapi.round(age, 0) } years`
] : []
// console.log(ageGenderLabel)
// Add the face recognition confidence
const { bestMatch } = result
const recognitionLabel = (node.recognition && bestMatch) ? [
`${ bestMatch["_label"] } (${ faceapi.round(bestMatch["_distance"], 2) })`,
] : []
// console.log(recognitionLabel)
// Draw the optional Labels for the current face
if (expressionsLabel.length || ageGenderLabel.length || recognitionLabel.length) {
new faceapi.draw.DrawTextField(
[
...expressionsLabel,
...ageGenderLabel,
...recognitionLabel
],
result.detection.box.bottomLeft
).draw(outImg)
}
})
return outImg.toBuffer('image/jpeg')
}
const newImg = await drawImage(img, detections)
// Create msg.payload from the detections object
let msg = {}
msg["faces"] = []
msg["name"] = node.name
msg["image"] = newImg
msg["inferenceTime"] = Date.now() - startTime
detections.forEach(result => {
// Get the info of the base detection
const { detection } = result
const FaceDetection = (detection) ? {
"imageDims" : detection._imageDims,
"score" : detection._score,
"classScore" : detection._classScore,
"className" : detection._className
} : {
"imageDims" : result._imageDims,
"score" : result._score,
"classScore" : result._classScore,
"className" : result._className
}
// Get the landmarks
const { landmarks, unshiftedLandmarks, alignedRect } = result
const FacialLandmarks = (node.landmarks && landmarks && unshiftedLandmarks) ? {
"landmarks" : {
"_imageDims" : landmarks._imageDims,
"_shift" : landmarks._shift,
"_positions" : landmarks._positions
},
"unshiftedLandmarks" : {
"_imageDims" : unshiftedLandmarks._imageDims,
"_shift" : unshiftedLandmarks._shift,
"_positions" : unshiftedLandmarks._positions
},
"alignedRect" : {
"_imageDims" : alignedRect._imageDims,
"_score" : alignedRect._score,
"_classScore" : alignedRect._classScore,
"_className" : alignedRect._className,
"_box" : alignedRect._box,
}
} : null
// Get the expressions and calculate the max score
const { expressions } = result
let expressionMaxKey = (node.expressions && expressions) ? Object.keys(expressions).reduce(function(a, b){
return expressions[a] > expressions[b] ? a : b
}) : null
const FacialExpressions = (expressions) ? {
"expressionLabel" : expressionMaxKey,
"expressionScore" : expressions[expressionMaxKey],
"expressions" : {
"neutral": expressions.neutral,
"happy": expressions.happy,
"sad": expressions.sad,
"angry": expressions.angry,
"fearful": expressions.fearful,
"disgusted": expressions.disgusted,
"surprised": expressions.surprised
}
} : null
// Get the age and gender results
const { age, gender, genderProbability } = result
const AgeAndGender = (node.ageGender && age && gender && genderProbability) ? {
"gender" : gender,
"age" : age,
"genderProbability" : genderProbability
} : null
// Get the Face recognition scores
const { bestMatch, descriptor } = result
const BestMatch = (node.recognition && bestMatch && descriptor) ? {
"matchedLabel" : bestMatch._label,
"matchedDistance" : bestMatch._distance,
"matchedMetric" : bestMatch._metric,
"descriptor" : descriptor
} : null
// Concat the objects to create output message
msg.faces.push({
...FaceDetection,
...FacialLandmarks,
...FacialExpressions,
...AgeAndGender,
...BestMatch
})
})
// Callback with the new message
callback( msg )
node.isComputing = false;
}
else if (detections && typeof detections === 'object' && detections.constructor === Array && detections.length == 0) {
let msg = {}
msg["faces"] = []
msg["name"] = node.name
msg["image"] = inputBuffer
msg["inferenceTime"] = Date.now() - startTime
callback( msg )
node.isComputing = false;
}
else {
// Log error
const errorMsg = "No detections found for input"
computeDebug("warn", errorMsg, callback)
}
};
img.onerror = err => {
const errorMsg = "Failed to load input image into Canvas";
computeDebug("error", errorMsg, callback)
};
img.src = inputBuffer;
}
catch (error) {
// Log error
const errorMsg = "Error computing detections: " + error
computeDebug("error", errorMsg, callback)
}
}
else {
// Log error
const errorMsg = "Models not loaded"
computeDebug("warn", errorMsg, callback)
}
}
}
node.clean = async function() {
// Debug
RED.log.info("[Face-api.js : " + node.id + "] - Clenaing up node \"" + node.name + "\"")
// Delete the save file
node.deleteDescriptor()
}
node.on('close', async function(removed, done) {
if (removed) {
// Clean up models
node.clean()
}
// kill the child process
if (node.childProcess) {
node.childProcess.kill('SIGINT');
}
// Callback to end function
done()
})
// Start the node by loading the descriptor
node.loadDescriptor();
}
RED.nodes.registerType("face-api-compute", faceApiComputeNode);
// HTTP Endpoints for use with the front end
RED.httpAdmin.post('/faceapi/:id', RED.auth.needsPermission('face-api-compute.upload'), function(req,res) {
// Get the important stuff
var node = RED.nodes.getNode(req.params.id);
var form = new formidable.IncomingForm();
// not ideal, shouldnt have to write to disk but is only every so often
form.parse(req, function (err, fields, files) {
if (form.openedFiles.length > 0) {
// Get each of the files data and put into an array
var filesArray = []
Object.keys(files).forEach((number) => {
const fileContents = fs.readFileSync(files[number].path)
filesArray.push(fileContents)
});
// Save the files or create a descriptor
if (node) {
// If the node exists then create a descriptor right away
node.createDescriptor(filesArray)
res.status(201).send('OK').end();
}
else {
// If the node has not been depolyed, save the image and load it when deployed
const saveDir = `${__dirname}/descriptors`;
const fileName = saveDir + "/" + req.params.id + ".json"
fs.writeFileSync(fileName, JSON.stringify(filesArray))
res.status(202).send('OK').end();
}
}
else {
res.status(400).send("No files sent with request").end();
}
});
});
RED.httpAdmin.get('/faceapi/:id/check', RED.auth.needsPermission('face-api-compute.upload'), async function(req,res) {
// Get the important stuff
var node = RED.nodes.getNode(req.params.id);
// Respond to the front end
if (node) {
const fileName = `${__dirname}/descriptors/` + node.id + ".json"
if (fs.existsSync(fileName)) {
const fileData = JSON.parse(fs.readFileSync(fileName))
res.status(200).send(fileData.descriptors.length.toString()).end();
}
else {
RED.httpAdmin.get('/faceapi/:id/check', RED.auth.needsPermission('face-api-recognise.upload'), async function (req, res) {
RED.log.debug("Finding descriptors for " + req.params.id);
check_descriptors(RED, req.params.id)
.then((value) => {
res.status(200).send(String(value)).end();
})
.catch((err) => {
RED.log.error(err);
res.status(200).send("0").end();
}
}
else {
res.send("No node found matching " + req.params.id).status(400).end();
}
})
});
RED.httpAdmin.get('/faceapi/:id/delete', RED.auth.needsPermission('face-api-compute.upload'), async function(req,res) {
// Get the important stuff
var node = RED.nodes.getNode(req.params.id);
// Delete the descriptors and respond to the front end
if (node) {
if (await node.deleteDescriptor()) {
RED.httpAdmin.post('/faceapi/:id/create', RED.auth.needsPermission('face-api-recognise.upload'), async function (req, res) {
RED.log.debug("Attempting to create descriptors for " + req.params.id);
add_descriptors(RED, req, res)
.then((code) => {
RED.log.info("Successfully created descriptors for " + req.params.id);
res.status(code).send('OK').end();
})
.catch((err) => {
RED.log.error(err);
res.status(400).send(err).end();
})
});
RED.httpAdmin.get('/faceapi/:id/delete', RED.auth.needsPermission('face-api-recognise.upload'), async function (req, res) {
RED.log.debug("Attempting to delete descriptors for " + req.params.id);
delete_descriptors(RED, req.params.id)
.then((code) => {
RED.log.info("Successfully deleted descriptors for " + req.params.id);
res.status(201).send('OK').end();
}
else {
res.status(404).send('OK').end();
}
}
else {
res.send("No node found matching " + req.params.id).status(400).end();
}
break;
})
.catch((err) => {
RED.log.error(err);
switch (err) {
case 400:
res.status(400).send('OK').end();
break;
case 404:
res.send("No node found matching " + req.params.id).status(404).end();
break;
}
})
});
}
{
"name": "node-red-contrib-face-recognition",
"version": "1.3.3",
"version": "2.0.1",
"description": "A wrapper node for the epic face-api.js library",
"main": "index.js",
"author": "thebigpotatoe",
"license": "MIT",
"keywords": [
"node-red",
"face-api.js"
"face-api.js",
"facial recognition",
"easy",
"fast",
"multiple faces",
"recognition",
"detection"
],
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"repository": {

@@ -17,4 +21,2 @@ "type": "git",

},
"author": "thebigpotatoe",
"license": "MIT",
"bugs": {

@@ -25,12 +27,25 @@ "url": "https://github.com/thebigpotatoe/node-red-contrib-face-recognition/issues"

"dependencies": {
"canvas": "^2.6.0",
"canvas": "^2.7.0",
"chebyshev": "0.2.1",
"euclidean": "0.0.0",
"face-api.js": "^0.21.0",
"formidable": "^1.2.1",
"manhattan": "1.0.0",
"euclidean": "0.0.0",
"chebyshev": "0.2.1"
"manhattan": "1.0.0"
},
"directories": {
"weights": "weights"
"devDependencies": {
"nodemon": "^2.0.7",
"node-red": "^1.3.3"
},
"peerDependencies": {
"@tensorflow/tfjs-node": "1.2.11"
},
"files": [
"app/**",
"face-api.html",
"face-api.js"
],
"main": "app/face-api.js",
"scripts": {
"test": "node tests/test.js"
},
"node-red": {

@@ -37,0 +52,0 @@ "nodes": {

# node-red-contrib-face-recognition
## Version 2 Out Now!
Version 2.0.0 is now officially released brining performance increases, better useability, and more support across systems. Testing is still ongoing, so if you come across any bugs please open an issue or a discussion here.
## Overview
This node aims to wrap the epic [Face-API.js library](https://github.com/justadudewhohacks/face-api.js) from [justadudewhohacks](https://github.com/justadudewhohacks) into a simple to import and use node in Node-Red. If you like anything in this repo be sure to also check out the original.

@@ -15,28 +21,55 @@

This module also utilizes the `child_process` module of Node.js to offload the complex calculations required to a separate thread. In turn, the offloaded task will not block the main event loop and allow Node-Red to continue other tasks. This is entirely optional and up to the user to decide to allow for better management of resources on a constrained device such as the Raspberry Pi.
This module also utilizes the `child_process` module of Node.js to offload the complex calculations required to a separate thread. In turn, the offloaded task will not block the main event loop and allow Node-Red to continue other tasks. Each input node spawns a new fork, which will consume memory, so this may want to be limited on resource restricted environments.
## Installation
From your .node-red directory, you can run;
From your `.node-red` directory, you can run;
`npm install node-red-contrib-face-recognition`
``` bash
npm install node-red-contrib-face-recognition
```
or you can go to the pallette manager in Node-Red and find `node-red-contrib-face-recognition` in the install tab.
> Linux users (including raspberry pi) should read [this issue](https://github.com/thebigpotatoe/node-red-contrib-face-recognition/issues/4#issuecomment-579821200) if having issues with installing canvas.
### Canvas
## Example Flow
Canvas will be installed correctly providing when using either installation method, however the required packages need to be installed on the system. These are as follows for several common OS's;
As an example on how to use the node, below is a flow that grabs an image from the internet and runs inference over it. Copy and paste it into Node-Red to use, but make sure to install the following nodes from the pallet manager;
#### Windows
- node-red-node-base64
- node-red-contrib-image-output
No requirements
![Example](Images/Example%20Flow.PNG)
#### Mac
`¯\_(ツ)_/¯`
#### Linux (Debian Based)
``` bash
apt-get install -y python \
g++ \
build-essential \
libcairo2-dev \
libjpeg-dev
```
[{"id":"5c9785ae.c524dc","type":"inject","z":"5a397940.a17aa8","name":"Input","topic":"","payload":"","payloadType":"date","repeat":"","crontab":"","once":false,"onceDelay":"","x":170,"y":80,"wires":[["bb3493cb.b73e2"]]},{"id":"2bdf00a.e8771","type":"image","z":"5a397940.a17aa8","name":"Labeled Image","width":"640","x":860,"y":180,"wires":[]},{"id":"16eacc92.6a4173","type":"image","z":"5a397940.a17aa8","name":"Input Image","width":"640","x":190,"y":180,"wires":[]},{"id":"beebc96a.133208","type":"change","z":"5a397940.a17aa8","name":"Set Payload to Image","rules":[{"t":"set","p":"payload","pt":"msg","to":"payload[\"TBB Faces\"].image","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":880,"y":140,"wires":[["2bdf00a.e8771"]]},{"id":"e0664c84.eb4f5","type":"debug","z":"5a397940.a17aa8","name":"Debug","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"true","targetType":"full","x":830,"y":80,"wires":[]},{"id":"bb3493cb.b73e2","type":"http request","z":"5a397940.a17aa8","name":"Get Image","method":"GET","ret":"bin","paytoqs":false,"url":"https://thumbor.forbes.com/thumbor/960x0/https%3A%2F%2Fblogs-images.forbes.com%2Fmaddieberg%2Ffiles%2F2017%2F09%2Fbigbangtheorytv_s05e01_05-_h_2017.jpg","tls":"","proxy":"","authType":"basic","x":330,"y":80,"wires":[["c70b7138.ebfc7","75806a6d.7a55b4"]]},{"id":"c70b7138.ebfc7","type":"base64","z":"5a397940.a17aa8","name":"","action":"","property":"payload","x":180,"y":140,"wires":[["16eacc92.6a4173"]]},{"id":"75806a6d.7a55b4","type":"face-api-input","z":"5a397940.a17aa8","name":"Find Faces","numNodes":1,"computeNode1":"5adfec11.8f4ef4","computeNode2":"","computeNode3":"","computeNode4":"","computeNode5":"","computeNode6":"","computeNode7":"","computeNode8":"","computeNode9":"","computeNode10":"","x":510,"y":80,"wires":[["e0664c84.eb4f5","beebc96a.133208"]]},{"id":"5adfec11.8f4ef4","type":"face-api-compute","z":"","name":"TBB Faces","childHost":true,"recognitionType":"SSD","multipleFaces":"Multiple Faces","confidence":"50","inputSize":"416","landmarks":true,"expressions":true,"ageGender":true,"recognition":false,"labelName":"known","file":""}]
#### Official Docker Image
``` bash
apk add python \
g++ \
build-base \
cairo-dev \
jpeg-dev \
pango-dev \
musl-dev \
giflib-dev \
pixman-dev \
pangomm-dev \
libjpeg-turbo-dev \
freetype-dev
```
## TensorFlow for Node.js (Optional)
### TensorFlow for Node.js (Optional)
You can also optionally install TensorFlow for Node.js to make this package run faster. If you do not, the node will still run albeit much slower. To install TensorFlow navigate to your `.node-red` folder and run the following command. This will install TensorFlow in your Node-Red directory for use by the node.

@@ -46,42 +79,56 @@

> There are known issues with the working combinations version of Node.js, @tensorflow/tfjs-node and face-api.js. At the time of writing this, on a windows environment these were found to be;
> There are known issues with the working combinations version of Node.js, @tensorflow/tfjs-node and face-api.js. At the time of writing this, these were found to be;
> - Node.js: 10.16.3
> - @tensorflow/tfjs-node: 1.2.11
> - face-api.js: 0.21.0
> - face-api.js: 0.21.0
> Please install these to gain the speed of the tf c++ backend and keep up to date on the face-api.js GitHib page for any errors relating to this.
## Included Nodes
tfjs-node is unfortunatley not supported on all OS's and all architectures. Below is a table of where they are supported;
This module comes with two nodes; the `face-api-input` node and `face-api-compute` node.
|OS | x86 | armv7 | arm64v8 |
|------------------------|-----|-------|---------|
|windows | yes | n/a | n/a |
|mac | ? | n/a | n/a |
|linux | yes | no | no |
|official docker image | no | no | no |
|unofficial docker image | yes | no | no |
#### Input node
## Example Flow
![Input Node](Images/face-api-input-node-menu.JPG)
As an example on how to use the node, below is a flow that grabs an image from the internet and runs inference over it. Copy and paste it into Node-Red to use, but make sure to install the following nodes from the pallet manager;
The `face-api-input` node acts as a pipeline between a flow and the compute nodes. Multiple compute nodes can be created and at least one must be selected for the input node to work. By using a config node based approach, multiple input nodes can share the same resources on your device allowing for a smaller memory footprint of the module.
- node-red-contrib-image-output
The input node is capable of utilizing 10 compute nodes to process an image. The output of the input node is an array of results from each individual compute nodes containing information about found faces, the over layed image and individual inference time.
> Note: In order to recognise faces you will need to add the recognise config nodes yourself as these cannot be exported across instances.
To add multiple compute nodes to the input simply click the __Add node__ button in the edit dialog. This will add an option to select another compute node from a list of already created nodes. If you have multiple nodes but do not fill in any of them, they will be ignored. Only Node 1 is required.
![Example](Images/Example%20Flow.PNG)
By design, if a node is computing and another image is sent to that node, it will be ignored until the compute node has finished. This allows users to use a stream of images as an input and not worry about queued images bogging down the event loop.
``` JSON
[{"id":"c08c9d7b.c2377","type":"image","z":"4eb4b426.c9cfcc","name":"","width":"320","data":"payload","dataType":"msg","thumbnail":false,"pass":false,"outputs":0,"x":120,"y":100,"wires":[]},{"id":"461f82e0.80fc8c","type":"image","z":"4eb4b426.c9cfcc","name":"","width":"640","data":"payload.labelled_img","dataType":"msg","thumbnail":false,"pass":false,"outputs":0,"x":440,"y":100,"wires":[]},{"id":"453418e3.520f28","type":"face-api-input","z":"4eb4b426.c9cfcc","name":"TBBT Recognition","model":"SSD","confidence":50,"input_size":"128","landmarks":false,"expressions":true,"age_gender":true,"descriptors":true,"match_metric":"Mean Squared Error","match_confidence":"2500","recognise_nodes":["a88d60e.9ca13a","5d6c06f7.11d2c8","71bfb897.3b8ef8","e09c0d5.ca5acf","dc3c3afc.04e708","eb7ecb3c.a1cbb8","b4b62a6d.fc5c18"],"recognise_node_editor":"b4b62a6d.fc5c18","x":450,"y":40,"wires":[["461f82e0.80fc8c","4d4a98a1.c04008"]]},{"id":"d7b82011.6cfd1","type":"http request","z":"4eb4b426.c9cfcc","name":"TBBT","method":"GET","ret":"bin","paytoqs":"ignore","url":"https://www.etonline.com/sites/default/files/images/2019-05/bigbangtheory.jpg","tls":"","persist":false,"proxy":"","authType":"","x":230,"y":40,"wires":[["453418e3.520f28","c08c9d7b.c2377"]]},{"id":"492a9534.4d694c","type":"inject","z":"4eb4b426.c9cfcc","name":"","props":[{"p":"payload","v":"","vt":"str"},{"p":"topic","v":"","vt":"string"}],"repeat":"","crontab":"","once":false,"onceDelay":0.1,"topic":"","payload":"","payloadType":"str","x":90,"y":40,"wires":[["d7b82011.6cfd1"]]},{"id":"4d4a98a1.c04008","type":"debug","z":"4eb4b426.c9cfcc","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"false","statusVal":"","statusType":"auto","x":650,"y":40,"wires":[]},{"id":"b4b62a6d.fc5c18","type":"face-api-recognise","name":"Leonard"}]
```
#### Compute Node
## Included Nodes
![Input Node](Images/face-api-compute-node-menu.JPG)
This module comes with two nodes; the `face-api-input` node and `face-api-recognise` node.
The `face-api-compute` node is where all the options are set and calculations done. There are numerous options, so as a brief outline these are;
### Input node
- __Name__: The name of this specific node. Useful to change when you have multiple config nodes and need to know the difference between them. (defaults to "face-api-compute")
![Input Node](Images/face-api-input-node-menu.JPG)
- __Detection Type__: The detection type to use. This is either `SSD` or `Yolo`, as `MTCNN` is not currently supported. (Defaults to SSD)
#### Description
- __Faces to Detect__: The number of detections to calculate. Either all faces found as `"Multiple Faces"` or the highest confidence one as `"Single Face"`. (Defaults to Multiple)
The `face-api-input` node is the main node that runs inference over an input image and optionally utilised recognition nodes to recognise faces. Each input node spawns a `fork` which is a seperate nodejs instance to run the recognition on to avoid blocking the main node-red event loop. This takes resources, be be conservative on resource constrained environments.
- __Detection Confidence__: The minimum confidence score that each detected face much be above to be counted as a face. This option is available for both SSD and Yolo. (Defaults to 50%, Limited between 0 and 100)
#### General Settings
- __Name__: The name of this specific node. Useful to change when you have multiple config nodes and need to know the difference between them. (defaults to "face-api-input")
#### Detection Settings
- __Detection Type__: The detection type to use. This is either `SSD` or `Yolo`, as `MTCNN` is not currently supported. (Defaults to SSD)
- __Input Size__: The input size of the Yolo algorithm. This option is only available when using Yolo as the type. (Defaults to 416)
- __Child Process__: Select if you would like to run the algorithm in a `child_process`. This will offload from the main event loop but use more resources. Use carefully. (Defaults to true)
- __Detection Confidence__: The minimum confidence score that each detected face much be above to be counted as a face. This option is available for both SSD and Yolo. (Defaults to 50%, Limited between 0 and 100)

@@ -94,30 +141,32 @@ - __Facial Landmarks__: Select this if you would like to add facial landmarks of each face to the output. (Defaults to false)

- __Recognise__: Select this if you would like to try recognise each face in the output. This will require adding a descriptor by uploading an image using the supplied add image button. (Defaults to false)
- __Descriptors__: Select this if you would like to output the computed descriptors for each found face. (Defaults to false)
- __Recognition Metric__: Select the type of recognition metric to use when comparing faces with the recognition option. This is option is only shown when the recognise option is selected.
#### Recognition Settings
- __Matched Confidence__: This is the minimum cutoff value for recognition for each of the metrics. Keep in mind that the metrics will produce different ranges of values for recognition. This is option is only shown when the recognise option is selected.
- __Recognition Metric__: Select the type of recognition metric to use when comparing faces with the recognition option. This is option is only shown when the recognise option is selected. (Defaults to Mean Squared Error)
- __Add Images__: Use this button to add an image to create a descriptors from. These descriptors will then be used in the compute node to predict against an input. This is option is only shown when the recognise option is selected.
- __Matched Confidence__: This is the minimum cutoff value for recognition for each of the metrics. Keep in mind that the different metrics will produce different ranges of values for recognition. This is option is only shown when the recognise option is selected. (Typical cutoffs are around 2000)
- __Remove Descriptors__: Use this button to remove the currently stored descriptors. This is option is only shown when the recognise option is selected and is irreversible.
- __Recognise Nodes List__: A list of recognition nodes to run recognition against for each face found in an input image. Simply add and remove as many as required, there are no limits
#### Adding a face descriptor
- __Add/Edit Recognise Nodes__: Use this dropdown menu to add and edit new recognise nodes which you can then add to the input node list. Keep in mimd you will have to deploy the node before being able to find it in the list.
In order to use the facial recognition option, facial descriptors must be calculated first to allow a comparison between them and any input image. To do this, check `enable recognition`, then click `Add Images`. Once selected all images will be computed either on the next deploy if your node is new, or immediately if your node already exists.
#### Compute Node
Currently the node will compute the face descriptor in the main node-red thread. Due to this, the deploy action will be blocking until finished. This was by design so that the descriptors would be computed before using the compute node they are associated with. This may take some time if not using `tensorflow for nodejs`, so please be patient on deploys for large sets of images.
![Input Node](Images/face-api-recognise-node-menu.JPG)
These descriptors are then saved to disk allowing it to survive restarts of Node-Red. A saved file will then be loaded on startup of Node-Red. Saving the descriptor is also safer than saving an image if your Node-Red instance is online as no data about the original image is stored.
- __Name__: The name of this specific face to recognise. (defaults to "Unknown")
#### Using the Child_Process
- __Add Images__: Use this button to add images to create a descriptors from. These descriptors will then be used in the input node to predict against an image. When adding images, the descriptors will take a while to compute.
As stated the compute node can offload the calculations to a `child_process` to allow the Node-Red event loop to continue. In doing this, another instance of Node.js is spawned which takes a certain amount of resources. Due to this it may not be desirable to run the calculations in this child node. With the option available, you can choose how to run this node for your specific application.
- __Remove Descriptors__: Use this button to remove the currently stored descriptors. This is irreversible.
> On a windows environment, each child process take approx 85MB of ram. This should be similar for other platforms.
## Adding a face descriptor
> It should be noted that the child node does not speed up the calculation. It only unblocks the main thread
In order to use the facial recognition, facial descriptors must be calculated first to allow a comparison between them and any input image. To do this, create a recognition node through the input node menu, then click `Add Images`. Once selected all images will be computed immediately in the background.
#### The Recognition Metric
These descriptors are then saved to disk allowing it to survive restarts of Node-Red. The saved file will then be loaded on startup of Node-Red. Saving the descriptor is also safer than saving an image if your Node-Red instance is online as no data about the original image is stored.
## The Recognition Metric
The original Face-api.js library only supports the Euclidean distance method of comparison between descriptors when matching faces. To extend this, this node also supports 3 more type of metrics. These are; __Manhattan__, __Chebyshev__, and __Mean Squared Error__.

@@ -124,0 +173,0 @@

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc