Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@yoonit/nativescript-camera

Package Overview
Dependencies
Maintainers
10
Versions
26
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@yoonit/nativescript-camera - npm Package Compare versions

Comparing version 1.8.0 to 2.0.0

helpers/Validator.js

3

package.json
{
"name": "@yoonit/nativescript-camera",
"version": "1.8.0",
"version": "2.0.0",
"description": "Yoonit Camera have a custom view that shows a preview layer of the front/back camera and detects human faces in it and read qr code.",

@@ -55,4 +55,5 @@ "main": "Yoonit.Camera",

"nativescript-permissions": "^1.3.11",
"reflect-metadata": "^0.1.13",
"ts-node": "^9.0.0"
}
}

@@ -49,5 +49,10 @@ [<img src="https://raw.githubusercontent.com/Yoonit-Labs/nativescript-yoonit-camera/development/logo_cyberlabs.png" width="300">](https://cyberlabs.ai/)

ref="yooCamera"
initialLens="front"
captureType="face"
numberOfImages=10
timeBetweenImages=500
saveImageCaptured=true
faceDetectionBox=true
@faceDetected="doFaceDetected"
@faceImage="doImageCreated"
@frameImage="doImageCreated"
@imageCaptured="doImageCaptured"
@endCapture="doEndCapture"

@@ -66,3 +71,4 @@ @qrCodeContent="doQRCodeContent"

methods: {
async onLoaded(args) {
async onLoaded() {
console.log('[YooCamera] Getting Camera view')

@@ -73,4 +79,4 @@ this.$yoo.camera.registerElement(this.$refs.yooCamera)

if (await this.$yoo.camera.requestPermission()) {
console.log('[YooCamera] Permission granted, start preview')
this.$yoo.camera.preview()

@@ -80,7 +86,11 @@ }

doFaceDetected({ faceDetected }) {
console.log('[YooCamera] faceDetected', faceDetected)
doFaceDetected({ x, y, width, height }) {
console.log('[YooCamera] doFaceDetected', `{x: ${x}, y: ${y}, width: ${width}, height: ${height}}`)
if (!x || !y || !width || !height) {
this.imagePath = null
}
},
doImageCreated({
doImageCaptured({
type,
count,

@@ -94,8 +104,10 @@ total,

if (total === 0) {
console.log('[YooCamera] doFaceImage', `[${count}] ${path}`)
console.log('[YooCamera] doImageCreated', `${type}: [${count}] ${path}`)
this.imageCreated = `${count}`
} else {
console.log('[YooCamera] doFaceImage', `[${count}] of [${total}] - ${path}`)
console.log('[YooCamera] doImageCreated', `${type}: [${count}] of [${total}] - ${path}`)
this.imageCreated = `${count} de ${total}`
}
console.log('[YooCamera] doFaceImage', path, source)
this.imagePath = source
},

@@ -112,25 +124,5 @@

doStatus({ status }) {
console.log('[YooCamera] doStatus', JSON.parse(status))
console.log('[YooCamera] doStatus', status)
},
doToggleLens() {
const currentCameraLens = this.$yoo.camera.getLens()
console.log('[YooCamera] doToggleLens', currentCameraLens)
this.$yoo.camera.toggleLens()
},
doStartCapture(captureType) {
console.log('[YooCamera] doStartCapture', captureType)
this.$yoo.camera.startCapture(captureType)
},
doFaceDetectionBox(status) {
console.log('[YooCamera] doFaceDetectionBox', status)
this.$yoo.camera.setFaceDetectionBox(status)
},
doPermissionDenied() {

@@ -146,68 +138,72 @@ console.log('[YooCamera] doPermissionDenied')

### Props
| Props | Input/Format | Default value | Description |
| - | - | - | - |
| initialLens | `"front"` or `"back"` | `"front"` | The camera lens when component initiated. |
| captureType | `"none"`, `"front"`, `"frame"` or `"qrcode"` | `"none"` | The capture type of the camera. |
| numberOfImages | number | `0` | The number of images to be captured. |
| timeBetweenImages | number | `1000` | The time interval in milliseconds to capture between images. |
| outputImageWidth | `"NNpx"` | `"200px"` | The output image width in pixels to be captured. |
| outputImageHeight | `"NNpx"` | `"200px"` | The output image height in pixels to be captured. |
| faceMinSize | `"NN%"` | `"0%"` | The face minimum size percentage to be captured. |
| faceMaxSize | `"NN%"` | `"100%"` | The face maximum size percentage to be captured. |
| faceDetectionBox | boolean | `false` | The indicator to show/hide the face detection box. |
| saveImageCaptured | boolean | `false` | The indicator to enable/disabled when an image captured. |
| faceROI | boolean | `false` | The indicator to enable.disable the region of interest. |
| faceROITopOffset | `"NN%"` | `"0%"` | The "top" offset percentage region of interest. |
| faceROIRightOffset | `"NN%"` | `"0%"` | The "right" offset percentage region of interest. |
| faceROIBottomOffset | `"NN%"` | `"0%"` | The "bottom" offset percentage region of interest. |
| faceROILeftOffset | `"NN%"` | `"0%"` | The "left" offset percentage region of interest. |
| faceROIMinSize | `"NN%"` | `"0%"` | The minimum face related with the ROI. |
#### Methods
| Function | Parameters | Valid values | Return Type | Description
| - | - | - | - | -
| **`requestPermission`** | - | - | promise | Ask to user to give the permission to access camera.
| **`hasPermission`** | - | - | boolean | Return if application has camera permission.
| **`preview`** | - | - | void | Start camera preview if has permission.
| **`startCapture`** | `captureType: string` | <ul><li>`"none"`</li><li>`"face"`</li><li>`"barcode"`</li><li>`"frame"`</li></ul> | void | Set capture type none, face, barcode or frame.
| **`stopCapture`** | - | - | void | Stop any type of capture.
| **`toggleLens`** | - | - | void | Set camera lens facing front or back.
| **`getLens`** | - | - | number | Return `number` that represents lens face state: 0 for front 1 for back camera.
| **`setFaceNumberOfImages`** | `faceNumberOfImages: number` | Any positive `number` value | void | Default value is 0. For value 0 is saved infinity images. When saved images reached the "face number os images", the `onEndCapture` is triggered.
| **`setFaceDetectionBox`** | `faceDetectionBox: boolean` | `true` or `false` | void | Set to show face detection box when face detected.
| **`setFaceTimeBetweenImages`** | `faceTimeBetweenImages: number` | Any positive `number` that represent time in milli seconds | void | Set saving face images time interval in milli seconds.
| **`setFacePaddingPercent`** | `facePaddingPercent: number` | Any positive `number` value | void | Set face image and bounding box padding in percent.
| **`setFaceImageSize`** | `faceImageSize: number` | Any positive `number` value | void | Set face image size to be saved.
| **`setFaceCaptureMinSize`** | `faceCaptureMinSize: number` | Value between `0` and `1`. Represents the percentage. | void | Set the minimum face capture related by percentage with the screen width.
| **`setFaceCaptureMaxSize`** | `faceCaptureMaxSize: number` | Value between `0` and `1`. Represents the percentage. | void | Set the maximum face capture related by percentage with the screen width.
| **`setFrameNumberOfImages`** | `frameNumberOfImages: number` | Any positive `number` value | void | Default value is 0. For value 0 is saved infinity images. When saved images reached the "frame number os images", the `onEndCapture` is triggered.
| **`setFrameTimeBetweenImages`** | `frameTimeBetweenImages: number` | Any positive `number` that represent time in milli seconds | void | Set saving frame images time interval in milli seconds.
| **`setFaceSaveImages`** | `faceSaveImages: boolean` | `true` or `false` | void | Set to enable/disable face save images when capturing faces.
| **`setFaceROIEnable`** | `faceROIEnable: boolean` | `true` or `false` | void | Enable/disable face region of interest capture.
| **`setFaceROIOffset`** | `topOffset: number, rightOffset: number,bottomOffset: number, leftOffset: number` | Values between `0` and `1`. Represents the percentage. | void | <ul><li>topOffset: "Above" the face detected.</li><li>rightOffset: "Right" of the face detected.</li><li>bottomOffset: "Bottom" of the face detected.</li><li>leftOffset: "Left" of the face detected.</li></ul>
| **`setFaceROIMinSize`** | `minimumSize: number` | Values between `0` and `1`. Represents the percentage. | void | Set the minimum face size related with the region of interest.
| Function | Parameters | Valid values | Return Type | Description
| - | - | - | - | -
| requestPermission | - | - | promise | Ask the user to give the permission to access camera.
| hasPermission | - | - | boolean | Return if application has camera permission.
| preview | - | - | void | Start camera preview if has permission.
| startCapture | `type: string` | <ul><li>`"none"`</li><li>`"face"`</li><li>`"qrcode"`</li><li>`"frame"`</li></ul> | void | Set capture type none, face, qrcode or frame.
| stopCapture | - | - | void | Stop any type of capture.
| setLens | `lens: string` | `"front" | "back"` | void | Set the camera lens facing: front or back.
| toggleLens | - | - | void | Toggle camera lens facing front or back.
| getLens | - | - | number | Return `number` that represents lens face state: 0 for front 1 for back camera.
| setNumberOfImages | `numberOfImages: Int` | Any positive `Int` value | void | Default value is 0. For value 0 is saved infinity images. When saved images reached the "number os images", the `onEndCapture` is triggered.
| setTimeBetweenImages | `milliseconds: number` | Any positive number that represent time in milli seconds | void | Set saving face/frame images time interval in milli seconds.
| setOutputImageWidth | `width: string` | Value format must be in `NNpx` | void | Set face image width to be created in pixels.
| setOutputImageHeight | `height: string` | Value format must be in `NNpx` | void | Set face image height to be created in pixels.
| setSaveImageCaptured | `enable: boolean` | `true` or `false` | void | Set to enable/disable save image when capturing face and frame.
| setFaceDetectionBox | `enable: boolean` | `true` or `false` | void | Set to show a detection box when face detected.
| setFacePaddingPercent | `percentage: string` | Value format must be in `NN%` | void | Set face image and bounding box padding in percent.
| setFaceCaptureMinSize | `percentage: string` | Value format must be in `NN%` | void | Set the minimum face capture based on the screen.
| setFaceCaptureMaxSize | `percentage: string` | Value format must be in `NN%` | void | Set the maximum face capture based on the screen.
| setFaceROIEnable | `enable: boolean` | `true` or `false` | void | Enable/disable face region of interest capture.
| setFaceROITopOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the top face bounding box with the top of the camera preview.
| setFaceROIRightOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the right face bounding box with the right of the camera preview.
| setFaceROIBottomOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the bottom face bounding box with the bottom of the camera preview.
| setFaceROILeftOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the left face bounding box with the left of the camera preview.
| setFaceROIMinSize | `percentage: string` | Value format must be in `NN%` | void | Set the minimum face size related with the region of interest.
#### Events
| Event | Parameters | Description
| - | - | -
| faceImage | `{ count: number, total: number, image: object = { path: string, source: blob } }` | Must have started capture type of face. Emitted when the face image file is created: <ul><li>count: current index</li><li>total: total to create</li><li>image.path: the face image path</li><li>image.source: the blob file</li><ul>
| frameImage | `{ count: number, total: number, image: object = { path: string, source: blob } }` | Must have started capture type of frame. Emitted when the frame image file is created: <ul><li>count: current index</li><li>total: total to create</li><li>image.path: the frame image path</li><li>image.source: the blob file</li><ul>
| faceDetected | `{ x: number, y: number, width: number, height: number }` | Must have started capture type of face. Emit the detected face bounding box. Emit all parameters null if no more face detecting.
| endCapture | - | Must have started capture type of face or frame. Emitted when the number of face or frame image files created is equal of the number of images set (see the method `setFaceNumberOfImages` for face and `setFrameNumberOfImages`for frame).
| qrCodeContent | `{ content: string }` | Must have started capture type of barcode (see `startCapture`). Emitted when the camera scan a QR Code.
| status | `{ type: 'error'/'message', status: string }` | Emit message error from native. Used more often for debug purpose.
| permissionDenied | - | Emit when try to `preview` but there is not camera permission.
| Event | Parameters | Description
| - | - | -
| imageCaptured | `{ type: string, count: number, total: number, image: object = { path: string, source: blob } }` | Must have started capture type of face/frame. Emitted when the face image file saved: <ul><li>type: "face" | "frame"</li>li>count: current index</li><li>total: total to create</li><li>image.path: the face/frame image path</li><li>image.source: the blob file</li><ul>
| faceDetected | `{ x: number, y: number, width: number, height: number }` | Must have started capture type of face. Emit the detected face bounding box. Emit all parameters null if no more face detecting.
| endCapture | - | Must have started capture type of face/frame. Emitted when the number of image files created is equal of the number of images set (see the method `setNumberOfImages`).
| qrCodeContent | `{ content: string }` | Must have started capture type of qrcode (see `startCapture`). Emitted when the camera read a QR Code.
| status | `{ type: 'error'/'message', status: string }` | Emit message error from native. Used more often for debug purpose.
| permissionDenied | - | Emit when try to `preview` but there is not camera permission.
### KeyError
Pre-define key error constants used by the `onError`event.
| KeyError | Description
| - | -
| NOT_STARTED_PREVIEW | Tried to start a process that depends on to start the camera preview.
| INVALID_CAPTURE_TYPE | Tried to start a non-existent capture type.
| INVALID_FACE_NUMBER_OF_IMAGES | Tried to input invalid face number of images to capture.
| INVALID_FACE_TIME_BETWEEN_IMAGES | Tried to input invalid face time interval to capture face.
| INVALID_FACE_PADDING_PERCENT | Tried to input invalid face padding percent.
| INVALID_FACE_IMAGE_SIZE | Tried to input invalid image width or height.
| INVALID_FACE_CAPTURE_MIN_SIZE | Tried to input invalid face capture minimum size.
| INVALID_FACE_CAPTURE_MAX_SIZE | Tried to input invalid face capture maximum size.
| INVALID_FRAME_NUMBER_OF_IMAGES | Tried to input invalid frame number of images to capture.
| INVALID_FRAME_TIME_BETWEEN_IMAGES | Tried to input invalid frame time interval to capture face.
| INVALID_FACE_ROI_OFFSET | Tried to input invalid face region of interest offset.
| INVALID_FACE_ROI_MIN_SIZE | Tried to input invalid face region of interest minimum size.
### Message
Pre-define message constants used by the `onMessage` event.
Pre-define message constants used by the `status` event.
| Message | Description
| - | -
| INVALID_CAPTURE_FACE_MIN_SIZE | Face width percentage in relation of the screen width is less than the setted (`setFaceCaptureMinSize`).
| INVALID_CAPTURE_FACE_MAX_SIZE | Face width percentage in relation of the screen width is more than the setted (`setFaceCaptureMaxSize`).
| INVALID_CAPTURE_FACE_OUT_OF_ROI | Face bounding box is out of the setted region of interest (`setFaceROIOffset`).
| INVALID_CAPTURE_FACE_ROI_MIN_SIZE | Face width percentage in relation of the screen width is less than the setted (`setFaceROIMinSize`).
| INVALID_CAPTURE_FACE_MIN_SIZE | Face width percentage in relation of the screen width is less than the set (`setFaceCaptureMinSize`).
| INVALID_CAPTURE_FACE_MAX_SIZE | Face width percentage in relation of the screen width is more than the set (`setFaceCaptureMaxSize`).
| INVALID_CAPTURE_FACE_OUT_OF_ROI | Face bounding box is out of the set region of interest (`setFaceROIOffset`).
| INVALID_CAPTURE_FACE_ROI_MIN_SIZE | Face width percentage in relation of the screen width is less than the set (`setFaceROIMinSize`).

@@ -214,0 +210,0 @@ ## To contribute and make it better

@@ -57,15 +57,17 @@ // +-+-+-+-+-+-+

startCapture,
setFaceNumberOfImages,
setNumberOfImages,
setTimeBetweenImages,
setOutputImageWidth,
setOutputImageHeight,
setFaceDetectionBox,
setFaceSaveImages,
setFaceTimeBetweenImages,
setSaveImageCaptured,
setFacePaddingPercent,
setFaceImageSize,
setFaceCaptureMinSize,
setFaceCaptureMaxSize,
setFrameNumberOfImages,
setFrameTimeBetweenImages,
setFaceROIEnable,
setFaceROIOffset,
setFaceROIMinSize,
setFaceROITopOffset,
setFaceROIRightOffset,
setFaceROIBottomOffset,
setFaceROILeftOffset,
} = element.nativeView

@@ -80,3 +82,2 @@

preview,
startCapture,
stopCapture,

@@ -86,15 +87,18 @@ setLens,

getLens,
setFaceNumberOfImages,
startCapture,
setNumberOfImages,
setTimeBetweenImages,
setOutputImageWidth,
setOutputImageHeight,
setFaceDetectionBox,
setFaceSaveImages,
setFaceTimeBetweenImages,
setSaveImageCaptured,
setFacePaddingPercent,
setFaceImageSize,
setFaceCaptureMinSize,
setFaceCaptureMaxSize,
setFrameNumberOfImages,
setFrameTimeBetweenImages,
setFaceROIEnable,
setFaceROIOffset,
setFaceROIMinSize,
setFaceROITopOffset,
setFaceROIRightOffset,
setFaceROIBottomOffset,
setFaceROILeftOffset,
}

@@ -101,0 +105,0 @@

@@ -29,44 +29,2 @@ import { CameraBase } from './Yoonit.Camera.common';

}
startCapture(captureType) {
this.nativeView.startCaptureType(captureType);
}
setFaceNumberOfImages(faceNumberOfImages) {
this.nativeView.setFaceNumberOfImages(faceNumberOfImages);
}
setFaceDetectionBox(faceDetectionBox) {
this.nativeView.setFaceDetectionBox(faceDetectionBox);
}
setFaceSaveImages(faceSaveImages) {
this.nativeView.setFaceSaveImages(faceSaveImages);
}
setFaceTimeBetweenImages(faceTimeBetweenImages) {
this.nativeView.setFaceTimeBetweenImages(faceTimeBetweenImages);
}
setFacePaddingPercent(facePaddingPercent) {
this.nativeView.setFacePaddingPercent(facePaddingPercent);
}
setFaceImageSize(width, height) {
this.nativeView.setFaceImageSize(width, height);
}
setFaceCaptureMinSize(faceCaptureMinSize) {
this.nativeView.setFaceCaptureMinSize(faceCaptureMinSize);
}
setFaceCaptureMaxSize(faceCaptureMaxSize) {
this.nativeView.setFaceCaptureMaxSize(faceCaptureMaxSize);
}
setFrameNumberOfImages(frameNumberOfImages) {
this.nativeView.setFrameNumberOfImages(frameNumberOfImages);
}
setFrameTimeBetweenImages(frameTimeBetweenImages) {
this.nativeView.setFrameTimeBetweenImages(frameTimeBetweenImages);
}
setFaceROIEnable(faceROIEnable) {
this.nativeView.setFaceROIEnable(faceROIEnable);
}
setFaceROIOffset(topOffset, rightOffset, bottomOffset, leftOffset) {
this.nativeView.setFaceROIOffset(topOffset, rightOffset, bottomOffset, leftOffset);
}
setFaceROIMinSize(minimumSize) {
this.nativeView.setFaceROIMinSize(minimumSize);
}
}

@@ -95,3 +53,3 @@ var CameraEventListener = /** @class */ (function (_super) {

};
CameraEventListener.prototype.onFaceImageCreated = function (count, total, imagePath) {
CameraEventListener.prototype.onImageCaptured = function (type, count, total, imagePath) {
var owner = this.owner.get();

@@ -101,4 +59,5 @@ var image = this.imageProcessing(imagePath);

owner.notify({
eventName: 'faceImage',
eventName: 'imageCaptured',
object: owner,
type: type,
count: count,

@@ -110,15 +69,2 @@ total: total,

};
CameraEventListener.prototype.onFrameImageCreated = function (count, total, imagePath) {
var owner = this.owner.get();
var image = this.imageProcessing(imagePath);
if (owner) {
owner.notify({
eventName: 'frameImage',
object: owner,
count: count,
total: total,
image: image
});
}
};
CameraEventListener.prototype.onFaceDetected = function (x, y, width, height) {

@@ -159,3 +105,3 @@ var owner = this.owner.get();

};
CameraEventListener.prototype.onBarcodeScanned = function (content) {
CameraEventListener.prototype.onQRCodeScanned = function (content) {
var owner = this.owner.get();

@@ -162,0 +108,0 @@ if (owner) {

@@ -1,3 +0,21 @@

import { ContentView } from '@nativescript/core';
import { ContentView, } from '@nativescript/core';
import Validator from "./helpers/Validator";
const { ValidateProps, Required, RegexNumber, RegexPX, PercentageToNumber, RegexPercentage, NumberToPixel, } = Validator;
export class CameraBase extends ContentView {
set initialLens(value) { this.setLens(value); }
set captureType(value) { this.startCapture(value); }
set numberOfImages(value) { this.setNumberOfImages(value); }
set timeBetweenImages(value) { this.setTimeBetweenImages(value); }
set outputImageWidth(value) { this.setOutputImageWidth(value); }
set outputImageHeight(value) { this.setOutputImageHeight(value); }
set faceMinSize(value) { this.setFaceCaptureMinSize(value); }
set faceMaxSize(value) { this.setFaceCaptureMaxSize(value); }
set faceDetectionBox(value) { this.setFaceDetectionBox(value); }
set saveImageCaptured(value) { this.setSaveImageCaptured(value); }
set faceROI(value) { this.setFaceROIEnable(value); }
set faceROITopOffset(value) { this.setFaceROITopOffset(value); }
set faceROIRightOffset(value) { this.setFaceROIRightOffset(value); }
set faceROIBottomOffset(value) { this.setFaceROIBottomOffset(value); }
set faceROILeftOffset(value) { this.setFaceROILeftOffset(value); }
set faceROIMinSize(value) { this.setFaceROIMinSize(value); }
requestPermission(explanationText) {

@@ -7,2 +25,5 @@ return new Promise((resolve, reject) => resolve());

hasPermission() { return false; }
setLens(lens) {
this.getLens() !== lens && this.toggleLens();
}
preview() {

@@ -14,5 +35,2 @@ this.nativeView.startPreview();

}
setLens(lens) {
this.getLens() !== lens && this.toggleLens();
}
toggleLens() {

@@ -22,19 +40,181 @@ this.nativeView.toggleCameraLens();

getLens() {
return this.nativeView.getCameraLens();
return this.nativeView.getCameraLens() === 0 ? 'front' : 'back';
}
startCapture(captureType) { }
setFaceNumberOfImages(faceNumberOfImages) { }
setFaceDetectionBox(faceDetectionBox) { }
setFaceSaveImages(faceSaveImages) { }
setFaceTimeBetweenImages(faceTimeBetweenImages) { }
setFacePaddingPercent(facePaddingPercent) { }
setFaceImageSize(width, height) { }
setFaceCaptureMinSize(faceCaptureMinSize) { }
setFaceCaptureMaxSize(faceCaptureMaxSize) { }
setFrameNumberOfImages(frameNumberOfImages) { }
setFrameTimeBetweenImages(frameTimeBetweenImages) { }
setFaceROIEnable(faceROIEnable) { }
setFaceROIOffset(topOffset, rightOffset, bottomOffset, leftOffset) { }
setFaceROIMinSize(minimumSize) { }
startCapture(type) {
this.nativeView.startCaptureType(type);
}
setNumberOfImages(numberOfImages) {
this.nativeView.setNumberOfImages(numberOfImages);
}
setTimeBetweenImages(milliseconds) {
this.nativeView.setTimeBetweenImages(milliseconds);
}
setOutputImageWidth(width) {
this.nativeView.setOutputImageWidth(width);
}
setOutputImageHeight(height) {
this.nativeView.setOutputImageHeight(height);
}
setFaceDetectionBox(enable) {
this.nativeView.setFaceDetectionBox(enable);
}
setFacePaddingPercent(percentage) {
this.nativeView.setFacePaddingPercent(percentage);
}
setFaceCaptureMinSize(percentage) {
this.nativeView.setFaceCaptureMinSize(percentage);
}
setFaceCaptureMaxSize(percentage) {
this.nativeView.setFaceCaptureMaxSize(percentage);
}
setSaveImageCaptured(enable) {
this.nativeView.setSaveImageCaptured(enable);
}
setFaceROIEnable(enable) {
this.nativeView.setFaceROIEnable(enable);
}
setFaceROITopOffset(percentage) {
this.nativeView.setFaceROITopOffset(percentage);
}
setFaceROIRightOffset(percentage) {
this.nativeView.setFaceROIRightOffset(percentage);
}
setFaceROIBottomOffset(percentage) {
this.nativeView.setFaceROIBottomOffset(percentage);
}
setFaceROILeftOffset(percentage) {
this.nativeView.setFaceROILeftOffset(percentage);
}
setFaceROIMinSize(percentage) {
this.nativeView.setFaceROIMinSize(percentage);
}
}
__decorate([
ValidateProps('lens', ['front', 'back']),
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [String]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setLens", null);
__decorate([
ValidateProps('captureType', ['face', 'qrcode', 'frame', 'none']),
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [String]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "startCapture", null);
__decorate([
ValidateProps('numberOfImages', RegexNumber),
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Number]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setNumberOfImages", null);
__decorate([
ValidateProps('timeBetweenImages', RegexNumber),
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Number]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setTimeBetweenImages", null);
__decorate([
ValidateProps('outputImageWidth', RegexPX),
NumberToPixel,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setOutputImageWidth", null);
__decorate([
ValidateProps('outputImageHeight', RegexPX),
NumberToPixel,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setOutputImageHeight", null);
__decorate([
ValidateProps('faceDetectionBox', [false, true]),
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Boolean]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceDetectionBox", null);
__decorate([
PercentageToNumber,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFacePaddingPercent", null);
__decorate([
ValidateProps('faceMinSize', RegexPercentage),
PercentageToNumber,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceCaptureMinSize", null);
__decorate([
ValidateProps('faceMaxSize', RegexPercentage),
PercentageToNumber,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceCaptureMaxSize", null);
__decorate([
ValidateProps('saveImageCaptured', [false, true]),
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Boolean]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setSaveImageCaptured", null);
__decorate([
ValidateProps('faceROI', [false, true]),
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Boolean]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceROIEnable", null);
__decorate([
ValidateProps('faceROITopOffset', RegexPercentage),
PercentageToNumber,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceROITopOffset", null);
__decorate([
ValidateProps('faceROIRightOffset', RegexPercentage),
PercentageToNumber,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceROIRightOffset", null);
__decorate([
ValidateProps('faceROIBottomOffset', RegexPercentage),
PercentageToNumber,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceROIBottomOffset", null);
__decorate([
ValidateProps('faceROILeftOffset', RegexPercentage),
PercentageToNumber,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceROILeftOffset", null);
__decorate([
ValidateProps('faceROIMinSize', RegexPercentage),
PercentageToNumber,
__param(0, Required),
__metadata("design:type", Function),
__metadata("design:paramtypes", [Object]),
__metadata("design:returntype", void 0)
], CameraBase.prototype, "setFaceROIMinSize", null);
//# sourceMappingURL=Yoonit.Camera.common.js.map

@@ -57,44 +57,2 @@ import { CameraBase } from './Yoonit.Camera.common';

}
startCapture(captureType) {
this.nativeView.startCaptureTypeWithCaptureType(captureType);
}
setFaceNumberOfImages(faceNumberOfImages) {
this.nativeView.setFaceNumberOfImagesWithFaceNumberOfImages(faceNumberOfImages);
}
setFaceDetectionBox(faceDetectionBox) {
this.nativeView.setFaceDetectionBoxWithFaceDetectionBox(faceDetectionBox);
}
setFaceSaveImages(faceSaveImages) {
this.nativeView.setFaceSaveImagesWithFaceSaveImages(faceSaveImages);
}
setFaceTimeBetweenImages(faceTimeBetweenImages) {
this.nativeView.setFaceTimeBetweenImagesWithFaceTimeBetweenImages(faceTimeBetweenImages);
}
setFacePaddingPercent(facePaddingPercent) {
this.nativeView.setFacePaddingPercentWithFacePaddingPercent(facePaddingPercent);
}
setFaceImageSize(width, height) {
this.nativeView.setFaceImageSizeWithWidthHeight(width, height);
}
setFaceCaptureMinSize(faceCaptureMinSize) {
this.nativeView.setFaceCaptureMinSizeWithFaceCaptureMinSize(faceCaptureMinSize);
}
setFaceCaptureMaxSize(faceCaptureMaxSize) {
this.nativeView.setFaceCaptureMaxSizeWithFaceCaptureMaxSize(faceCaptureMaxSize);
}
setFrameNumberOfImages(frameNumberOfImages) {
this.nativeView.setFrameNumberOfImagesWithFrameNumberOfImages(frameNumberOfImages);
}
setFrameTimeBetweenImages(frameTimeBetweenImages) {
this.nativeView.setFrameTimeBetweenImagesWithFrameTimeBetweenImages(frameTimeBetweenImages);
}
setFaceROIEnable(faceROIEnable) {
this.nativeView.setFaceROIEnableWithFaceROIEnable(faceROIEnable);
}
setFaceROIOffset(topOffset, rightOffset, bottomOffset, leftOffset) {
this.nativeView.setFaceROIOffsetWithTopOffsetRightOffsetBottomOffsetLeftOffset(topOffset, rightOffset, bottomOffset, leftOffset);
}
setFaceROIMinSize(minimumSize) {
this.nativeView.setFaceROIMinSizeWithMinimumSize(minimumSize);
}
}

@@ -127,3 +85,3 @@ var CameraEventListener = /** @class */ (function (_super) {

};
CameraEventListener.prototype.onFaceImageCreatedWithCountTotalImagePath = function (count, total, imagePath) {
CameraEventListener.prototype.onImageCaptured = function (type, count, total, imagePath) {
var owner = this.owner.get();

@@ -133,4 +91,5 @@ var image = this.imageProcessing(imagePath);

owner.notify({
eventName: 'faceImage',
eventName: 'imageCaptured',
object: owner,
type: type,
count: count,

@@ -142,19 +101,6 @@ total: total,

};
CameraEventListener.prototype.onFrameImageCreatedWithCountTotalImagePath = function (count, total, imagePath) {
CameraEventListener.prototype.onFaceDetected = function (x, y, width, height) {
var owner = this.owner.get();
var image = this.imageProcessing(imagePath);
if (owner) {
owner.notify({
eventName: 'frameImage',
object: owner,
count: count,
total: total,
image: image
});
}
};
CameraEventListener.prototype.onFaceDetectedWithXYWidthHeight = function (x, y, width, height) {
var owner = this.owner.get();
if (owner) {
owner.notify({
eventName: 'faceDetected',

@@ -191,6 +137,16 @@ object: owner,

};
CameraEventListener.prototype.onErrorWithError = function (error) {
CameraEventListener.prototype.onQRCodeScanned = function (content) {
var owner = this.owner.get();
if (owner) {
owner.notify({
eventName: 'qrCodeContent',
object: owner,
content: content
});
}
};
CameraEventListener.prototype.onError = function (error) {
var owner = this.owner.get();
if (owner) {
owner.notify({
eventName: 'status',

@@ -205,3 +161,3 @@ object: owner,

};
CameraEventListener.prototype.onMessageWithMessage = function (message) {
CameraEventListener.prototype.onMessage = function (message) {
var owner = this.owner.get();

@@ -228,12 +184,2 @@ if (owner) {

};
CameraEventListener.prototype.onBarcodeScannedWithContent = function (content) {
var owner = this.owner.get();
if (owner) {
owner.notify({
eventName: 'qrCodeContent',
object: owner,
content: content
});
}
};
var CameraEventListener_1;

@@ -240,0 +186,0 @@ CameraEventListener = CameraEventListener_1 = __decorate([

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc