@yoonit/nativescript-camera
Advanced tools
Comparing version 2.3.0 to 3.0.0
import "reflect-metadata"; | ||
import { Color, isAndroid } from '@nativescript/core'; | ||
const MetadataKey = Symbol("required"); | ||
@@ -62,3 +63,3 @@ class Validator { | ||
} | ||
static NativeMethod(nativeMethodName) { | ||
static NativeMethod({ name, length }) { | ||
return function (target, propertyName, descriptor) { | ||
@@ -76,4 +77,5 @@ let method = descriptor.value; | ||
Validator.PropMap.push({ | ||
name: nativeMethodName, | ||
value: arguments[parameterIndex] | ||
name, | ||
value: arguments[parameterIndex], | ||
length | ||
}); | ||
@@ -124,2 +126,36 @@ } | ||
} | ||
static ParseToNsColor(target, propertyName, descriptor) { | ||
let method = descriptor.value; | ||
descriptor.value = function () { | ||
let validateParameters = Reflect.getOwnMetadata(MetadataKey, target, propertyName); | ||
if (validateParameters) { | ||
for (let parameterIndex of validateParameters) { | ||
const invalid = parameterIndex >= arguments.length || | ||
arguments[parameterIndex] === undefined; | ||
if (invalid) { | ||
throw new Error("Missing argument."); | ||
} | ||
let rawColor = arguments[parameterIndex]; | ||
const nsColor = new Color(rawColor); | ||
if (isAndroid) { | ||
arguments[parameterIndex] = [ | ||
nsColor.a, | ||
nsColor.r, | ||
nsColor.g, | ||
nsColor.b | ||
]; | ||
} | ||
else { | ||
arguments[parameterIndex] = [ | ||
nsColor.a / 255, | ||
nsColor.r / 255, | ||
nsColor.g / 255, | ||
nsColor.b / 255 | ||
]; | ||
} | ||
} | ||
} | ||
return method.apply(this, arguments); | ||
}; | ||
} | ||
} | ||
@@ -126,0 +162,0 @@ Validator.RegexColor = /(#([\da-f]{3}){1,2}|(rgb|hsl)a\((\d{1,3}%?,\s?){3}(1|0?\.\d+)\)|(rgb|hsl)\(\d{1,3}%?(,\s?\d{1,3}%?){2}\))/ig; |
{ | ||
"name": "@yoonit/nativescript-camera", | ||
"version": "2.3.0", | ||
"version": "3.0.0", | ||
"description": "Yoonit Camera have a custom view that shows a preview layer of the front/back camera and detects human faces in it and read qr code.", | ||
@@ -38,3 +38,3 @@ "main": "Yoonit.Camera", | ||
"author": { | ||
"name": "Luigui Delyer, Haroldo Teruya, Victor Goulart & Márcio Bruffato @ Cyberlabs AI - 2020", | ||
"name": "Luigui Delyer, Haroldo Teruya, Victor Goulart, Gabriel Rizzo & Márcio Bruffato @ Cyberlabs AI - 2020-2021", | ||
"email": "contato@cyberlabs.ai" | ||
@@ -44,2 +44,3 @@ }, | ||
"devDependencies": { | ||
"@nativescript/android": "7.0.1", | ||
"@nativescript/core": "~7.0.0", | ||
@@ -46,0 +47,0 @@ "@nativescript/types": "~7.0.0", |
262
README.md
@@ -0,1 +1,2 @@ | ||
[<img src="https://raw.githubusercontent.com/Yoonit-Labs/nativescript-yoonit-camera/development/logo_cyberlabs.png" width="300">](https://cyberlabs.ai/) | ||
@@ -10,18 +11,30 @@ | ||
A NativeScript plugin to provide: | ||
- Modern Android Camera API (Camera X) | ||
- MLKit integration | ||
- Modern Android Camera API [Camera X](https://developer.android.com/training/camerax) | ||
- Camera preview (Front & Back) | ||
- Face detection (With Min & Max size) | ||
- Landmark detection (Soon) | ||
- Face crop | ||
- Face capture | ||
- [PyTorch](https://pytorch.org/mobile/home/) integration (Android) | ||
- Computer vision pipeline | ||
- Face detection, capture and image crop | ||
- Understanding of the human face | ||
- Frame capture | ||
- Face ROI | ||
- Capture timed images | ||
- QR Code scanning | ||
## Table Of Contents | ||
* [Installation](#installation) | ||
* [Usage](#usage) | ||
* [VueJS Plugin](#vuejs-plugin) | ||
* [Vue Component](#vue-component) | ||
* [API](#api) | ||
* [Props](#props) | ||
* [Methods](#methods) | ||
* [Events](#events) | ||
* [Messages](#messages) | ||
* [Contribute](#contribute-and-make-it-better) | ||
## Installation | ||
```javascript | ||
npm i -s @yoonit/nativescript-camera | ||
``` | ||
```javascript | ||
npm i -s @yoonit/nativescript-camera | ||
``` | ||
@@ -34,13 +47,15 @@ ## Usage | ||
`main.js` | ||
```javascript | ||
import Vue from 'nativescript-vue' | ||
import YoonitCamera from '@yoonit/nativescript-camera/vue' | ||
```javascript | ||
import Vue from 'nativescript-vue' | ||
import YoonitCamera from '@yoonit/nativescript-camera/vue' | ||
Vue.use(YoonitCamera) | ||
``` | ||
Vue.use(YoonitCamera) | ||
``` | ||
After that, you can access the camera object in your entire project using `this.$yoo.camera` | ||
#### Vue Component | ||
`App.vue` | ||
```vue | ||
@@ -56,3 +71,3 @@ <template> | ||
imageCaptureInterval=500 | ||
faceDetectionBox=true | ||
detectionBox=true | ||
@faceDetected="doFaceDetected" | ||
@@ -86,4 +101,29 @@ @imageCaptured="doImageCaptured" | ||
doFaceDetected({ x, y, width, height }) { | ||
console.log('[YooCamera] doFaceDetected', `{x: ${x}, y: ${y}, width: ${width}, height: ${height}}`) | ||
doFaceDetected({ | ||
x, | ||
y, | ||
width, | ||
height, | ||
leftEyeOpenProbability, | ||
rightEyeOpenProbability, | ||
smilingProbability, | ||
headEulerAngleX, | ||
headEulerAngleY, | ||
headEulerAngleZ | ||
}) { | ||
console.log( | ||
'[YooCamera] doFaceDetected', | ||
` | ||
x: ${x} | ||
y: ${y} | ||
width: ${width} | ||
height: ${height} | ||
leftEyeOpenProbability: ${leftEyeOpenProbability} | ||
rightEyeOpenProbability: ${rightEyeOpenProbability} | ||
smilingProbability: ${smilingProbability} | ||
headEulerAngleX: ${headEulerAngleX} | ||
headEulerAngleY: ${headEulerAngleY} | ||
headEulerAngleZ: ${headEulerAngleZ} | ||
` | ||
) | ||
if (!x || !y || !width || !height) { | ||
@@ -101,3 +141,4 @@ this.imagePath = null | ||
source | ||
} | ||
}, | ||
inferences | ||
}) { | ||
@@ -111,3 +152,3 @@ if (total === 0) { | ||
} | ||
console.log('[YooCamera] Mask Pytorch', inferences) | ||
this.imagePath = source | ||
@@ -134,88 +175,133 @@ }, | ||
</script> | ||
``` | ||
``` | ||
## API | ||
### Props | ||
#### Props | ||
| Props | Input/Format | Default value | Description | | ||
| - | - | - | - | | ||
| lens | `"front"` or `"back"` | `"front"` | The camera lens to use "front" or "back". | | ||
| captureType | `"none"`, `"front"`, `"frame"` or `"qrcode"` | `"none"` | The capture type of the camera. | | ||
| imageCapture | boolean | `false` | Enable/disabled save image capture. | | ||
| imageCaptureAmount | number | `0` | The image capture amount goal. | | ||
| imageCaptureInterval | number | `1000` | The image capture time interval in milliseconds. | | ||
| imageCaptureWidth | `"NNpx"` | `"200px"` | The image capture width in pixels. | | ||
| imageCaptureHeight | `"NNpx"` | `"200px"` | The image capture height in pixels. | | ||
| colorEncoding | `"RGB"` or `"YUV"` | `"RGB"` | Only for android. The image capture color encoding type: `"RGB"` or `"YUV"`. | | ||
| faceDetectionBox | boolean | `false` | Show/hide the face detection box. | | ||
| faceMinSize | `"NN%"` | `"0%"` | The face minimum size percentage to capture. | | ||
| faceMaxSize | `"NN%"` | `"100%"` | The face maximum size percentage to capture. | | ||
| faceROI | boolean | `false` | Enable/disable the region of interest capture. | | ||
| faceROITopOffset | `"NN%"` | `"0%"` | Distance in percentage of the top face bounding box with the top of the camera preview. | | ||
| faceROIRightOffset | `"NN%"` | `"0%"` | Distance in percentage of the right face bounding box with the right of the camera preview. | ||
| faceROIBottomOffset | `"NN%"` | `"0%"` | Distance in percentage of the bottom face bounding box with the bottom of the camera preview. | ||
| faceROILeftOffset | `"NN%"` | `"0%"` | Distance in percentage of the left face bounding box with the left of the camera preview. | ||
| faceROIMinSize | `"NN%"` | `"0%"` | The minimum face size related within the ROI. | | ||
| Props | Input/Format | Default value | Description | | ||
| - | - | - | - | | ||
| lens | `"front"` or `"back"` | `"front"` | The camera lens to use "front" or "back". | | ||
| captureType | `"none"`, `"front"`, `"frame"` or `"qrcode"` | `"none"` | The capture type of the camera. | | ||
| imageCapture | `boolean` | `false` | Enable/disabled save image capture. | | ||
| imageCaptureAmount | `number` | `0` | The image capture amount goal. | | ||
| imageCaptureInterval | `number` | `1000` | The image capture time interval in milliseconds. | | ||
| imageCaptureWidth | `"NNpx"` | `"200px"` | The image capture width in pixels. | | ||
| imageCaptureHeight | `"NNpx"` | `"200px"` | The image capture height in pixels. | | ||
| colorEncoding | `"RGB"` or `"YUV"` | `"RGB"` | Only for android. The image capture color encoding type: `"RGB"` or `"YUV"`. | | ||
| detectionBox | `boolean` | `false` | Show/hide the face detection box. | ||
| detectionBoxColor | `string` | `#ffffff` | Set detection box color. | | ||
| detectionMinSize | `"NN%"` | `"0%"` | The face minimum size percentage to capture. | | ||
| detectionMaxSize | `"NN%"` | `"100%"` | The face maximum size percentage to capture. | | ||
| roi | `boolean` | `false` | Enable/disable the region of interest capture. | | ||
| roiTopOffset | `"NN%"` | `"0%"` | Distance in percentage of the top face bounding box with the top of the camera preview. | | ||
| roiRightOffset | `"NN%"` | `"0%"` | Distance in percentage of the right face bounding box with the right of the camera preview. | | ||
| roiBottomOffset | `"NN%"` | `"0%"` | Distance in percentage of the bottom face bounding box with the bottom of the camera preview. | | ||
| roiLeftOffset | `"NN%"` | `"0%"` | Distance in percentage of the left face bounding box with the left of the camera preview. | | ||
| roiAreaOffset | `boolean` | `false` | Enable/disable display of the region of interest area offset. | | ||
| roiAreaOffsetColor | `string` | `'#ffffff73'` | Set display of the region of interest area offset color. | | ||
| faceContours (`Android Only`) | `boolean` | `false` | Enable/disable display list of points on a detected face. | | ||
| faceContoursColor (`Android Only`) | `string` | `'#FFFFFF'` | Set face contours color. | | ||
| computerVision (`Android Only`) | `boolean` | `false` | Enable/disable computer vision model. | | ||
| torch | `boolean` | `false` | Enable/disable device torch. Available only to camera lens `"back"`. | | ||
#### Methods | ||
| Function | Parameters | Valid values | Return Type | Description | ||
| - | - | - | - | - | ||
| requestPermission | - | - | promise | Ask the user to give the permission to access camera. | | ||
| hasPermission | - | - | boolean | Return if application has camera permission. | | ||
| preview | - | - | void | Start camera preview if has permission. | | ||
| startCapture | `type: string` | <ul><li>`"none"`</li><li>`"face"`</li><li>`"qrcode"`</li><li>`"frame"`</li></ul> | void | Set capture type "none", "face", "qrcode" or "frame". Default value is `"none"`. | | ||
| stopCapture | - | - | void | Stop any type of capture. | | ||
| destroy | - | - | void | Destroy preview. | | ||
| toggleLens | - | - | void | Toggle camera lens facing "front"/"back". | | ||
| setCameraLens | `lens: string` | `"front"` or `"back"` | void | Set camera to use "front" or "back" lens. Default value is `"front"`. | | ||
| getLens | - | - | string | Return "front" or "back". | | ||
| setImageCapture | `enable: boolean` | `true` or `false` | void | Enable/disabled save image capture. Default value is `false` | | ||
| setImageCaptureAmount | `amount: Int` | Any positive `Int` value | void | For value `0`, save infinity images. When the capture image amount is reached, the event `onEndCapture` is triggered. Default value is `0`. | | ||
| setImageCaptureInterval | `interval: number` | Any positive number that represent time in milliseconds | void | Set the image capture time interval in milliseconds. | | ||
| setImageCaptureWidth | `width: string` | Value format must be in `NNpx` | void | Set the image capture width in pixels. | | ||
| setImageCaptureHeight | `height: string` | Value format must be in `NNpx` | void | Set the image capture height in pixels. | | ||
| setImageCaptureColorEncoding | `colorEncoding: string` | `"YUV"` or `"RGB"` | void | Only for android. Set the image capture color encoding type: `"RGB"` or `"YUV"`. | | ||
| setFaceDetectionBox | `enable: boolean` | `true` or `false` | void | Set to show/hide the face detection box. | | ||
| setFacePaddingPercent | `percentage: string` | Value format must be in `NN%` | void | Set face image capture and detection box padding in percentage. | | ||
| setFaceCaptureMinSize | `percentage: string` | Value format must be in `NN%` | void | Set the face minimum size percentage to capture. | | ||
| setFaceCaptureMaxSize | `percentage: string` | Value format must be in `NN%` | void | Set the face maximum size percentage to capture. | | ||
| setFaceROIEnable | `enable: boolean` | `true` or `false` | void | Enable/disable face region of interest capture. | | ||
| setFaceROITopOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the top face bounding box with the top of the camera preview. | | ||
| setFaceROIRightOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the right face bounding box with the right of the camera preview. | | ||
| setFaceROIBottomOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the bottom face bounding box with the bottom of the camera preview. | | ||
| setFaceROILeftOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the left face bounding box with the left of the camera preview. | | ||
| setFaceROIMinSize | `percentage: string` | Value format must be in `NN%` | void | Set the minimum face size related within the ROI. | | ||
| Function | Parameters | Valid values | Return Type | Description | | ||
| - | - | - | - | - | | ||
| requestPermission | - | - | promise | Ask the user to give the permission to access camera. | | ||
| hasPermission | - | - | boolean | Return if application has camera permission. | | ||
| preview | - | - | void | Start camera preview if has permission. | | ||
| startCapture | `type: string` | <ul><li>`"none"`</li><li>`"face"`</li><li>`"qrcode"`</li><li>`"frame"`</li></ul> | void | Set capture type "none", "face", "qrcode" or "frame". Default value is `"none"`. | | ||
| stopCapture | - | - | void | Stop any type of capture. | | ||
| destroy | - | - | void | Destroy preview. | | ||
| toggleLens | - | - | void | Toggle camera lens facing "front"/"back". | | ||
| setCameraLens | `lens: string` | `"front"` or `"back"` | void | Set camera to use "front" or "back" lens. Default value is `"front"`. | | ||
| getLens | - | - | string | Return "front" or "back". | | ||
| setImageCapture | `enable: boolean` | `true` or `false` | void | Enable/disabled save image capture. Default value is `false`. | | ||
| setImageCaptureAmount | `amount: Int` | Any positive `Int` value | void | For value `0`, save infinity images. When the capture image amount is reached, the event `onEndCapture` is triggered. Default value is `0`. | | ||
| setImageCaptureInterval | `interval: number` | Any positive number that represent time in milliseconds | void | Set the image capture time interval in milliseconds. | | ||
| setImageCaptureWidth | `width: string` | Value format must be in `NNpx` | void | Set the image capture width in pixels. | | ||
| setImageCaptureHeight | `height: string` | Value format must be in `NNpx` | void | Set the image capture height in pixels. | | ||
| setImageCaptureColorEncoding | `colorEncoding: string` | `"YUV"` or `"RGB"` | void | Only for android. Set the image capture color encoding type: `"RGB"` or `"YUV"`. | | ||
| setDetectionBox | `enable: boolean` | `true` or `false` | void | Set to show/hide the face detection box. | | ||
| setDetectionBoxColor | `color: string` | hexadecimal | void | Set detection box color. | | ||
| setFacePaddingPercent | `percentage: string` | Value format must be in `NN%` | void | Set face image capture and detection box padding in percentage. | | ||
| setDetectionMinSize | `percentage: string` | Value format must be in `NN%` | void | Set the face minimum size percentage to capture. | | ||
| setDetectionMaxSize | `percentage: string` | Value format must be in `NN%` | void | Set the face maximum size percentage to capture. | | ||
| setROI | `enable: boolean` | `true` or `false` | void | Enable/disable face region of interest capture. | | ||
| setROITopOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the top face bounding box with the top of the camera preview. | | ||
| setROIRightOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the right face bounding box with the right of the camera preview. | | ||
| setROIBottomOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the bottom face bounding box with the bottom of the camera preview. | | ||
| setROILeftOffset | `percentage: string` | Value format must be in `NN%` | void | Distance in percentage of the left face bounding box with the left of the camera preview. | | ||
| setROIMinSize | `percentage: string` | Value format must be in `NN%` | void | Set the minimum face size related within the ROI. | | ||
| setROIAreaOffset | `enable: boolean` | `true` or `false` | void | Enable/disable display of the region of interest area offset. | | ||
| setROIAreaOffsetColor | `color: string` | Hexadecimal color | void | Set display of the region of interest area offset color. | | ||
| setFaceContours (`Android Only`) | `enable: boolean` | `true` or `false` | void | Enable/disable display list of points on a detected face. | | ||
| setFaceContoursColor (`Android Only`) | `color: string` | Hexadecimal color | void | Set face contours color. | | ||
| setComputerVision (`Android Only`) | `enable: boolean` | `true` or `false` | void | Enable/disable computer vision model. | | ||
| setComputerVisionLoadModels (`Android Only`) | `modelPaths: Array<string>` | Valid system path file to a PyTorch computer vision model | void | Set model to be used when image is captured. To se more about it, <a href="https://github.com/Yoonit-Labs/nativescript-yoonit-camera/wiki">Click Here</a>. | | ||
| computerVisionClearModels (`Android Only`) | - | - | void | Clear models that was previous added using `setComputerVisionLoadModels`. | | ||
| setTorch | `enable: boolean` | `true` or `false` | void | Enable/disable device torch. Available only to camera lens `"back"`. | | ||
#### Events | ||
#### Events | ||
| Event | Parameters | Description | ||
| - | - | - | ||
| imageCaptured | `{ type: string, count: number, total: number, image: object = { path: string, source: blob, bynary: blob } }` | Must have started capture type of face/frame. Emitted when the face image file saved: <ul><li>type: "face" or "frame"</li>count: current index</li><li>total: total to create</li><li>image.path: the face/frame image path</li><li>image.source: the blob file</li><li>image.binary: the blob file</li><ul> | ||
| faceDetected | `{ x: number, y: number, width: number, height: number }` | Must have started capture type of face. Emit the detected face bounding box. Emit all parameters null if no more face detecting. | ||
| endCapture | - | Must have started capture type of face/frame. Emitted when the number of image files created is equal of the number of images set (see the method `setImageCaptureAmount`). | ||
| qrCodeContent | `{ content: string }` | Must have started capture type of qrcode (see `startCapture`). Emitted when the camera read a QR Code. | ||
| status | `{ type: 'error'/'message', status: string }` | Emit message error from native. Used more often for debug purpose. | ||
| permissionDenied | - | Emit when try to `preview` but there is not camera permission. | ||
| Event | Parameters | Description | ||
| - | - | - | ||
| imageCaptured | `{ type: string, count: number, total: number, image: object = { path: string, source: any, binary: any }, inferences: [{ ['model name']: model output }] }` | Must have started capture type of face/frame. Emitted when the face image file saved: <ul><li>type: "face" or "frame"</li>count: current index</li><li>total: total to create</li><li>image.path: the face/frame image path</li><li>image.source: the blob file</li><li>image.binary: the blob file</li><li>inferences: An Array with models output</li><ul> | ||
| faceDetected | `{ x: number, y: number, width: number, height: number, leftEyeOpenProbability: number, rightEyeOpenProbability: number, smilingProbability: number, headEulerAngleX: number, headEulerAngleY: number, headEulerAngleZ: number }` | Must have started capture type of face. Emit the [face analysis](#face-analysis), all parameters null if no more face detecting. | ||
| endCapture | - | Must have started capture type of face/frame. Emitted when the number of image files created is equal of the number of images set (see the method `setImageCaptureAmount`). | ||
| qrCodeContent | `{ content: string }` | Must have started capture type of qrcode (see `startCapture`). Emitted when the camera read a QR Code. | ||
| status | `{ type: 'error'/'message', status: string }` | Emit message error from native. Used more often for debug purpose. | ||
| permissionDenied | - | Emit when try to `preview` but there is not camera permission. | ||
### Message | ||
#### Face Analysis | ||
The face analysis is the response send by the `onFaceDetected`. Here we specify all the parameters. | ||
| Attribute | Type | Description | | ||
| - | - | - | | ||
| x | `number` | The `x` position of the face in the screen. | | ||
| y | `number` | The `y` position of the face in the screen. | | ||
| width | `number` | The `width` position of the face in the screen. | | ||
| height | `number` | The `height` position of the face in the screen. | | ||
| leftEyeOpenProbability | `number` | The left eye open probability. | | ||
| rightEyeOpenProbability | `number` | The right eye open probability. | | ||
| smilingProbability | `number` | The smiling probability. | | ||
| headEulerAngleX | `number` | The angle in degrees that indicate the vertical head direction. See [Head Movements](#headmovements) | | ||
| headEulerAngleY | `number` | The angle in degrees that indicate the horizontal head direction. See [Head Movements](#headmovements) | | ||
| headEulerAngleZ | `number` | The angle in degrees that indicate the tilt head direction. See [Head Movements](#headmovements) | | ||
#### Head Movements | ||
Here we're explaining the above gif and how reached the "results". Each "movement" (vertical, horizontal and tilt) is a state, based in the angle in degrees that indicate head direction; | ||
| Head Direction | Attribute | _v_ < -36° | -36° < _v_ < -12° | -12° < _v_ < 12° | 12° < _v_ < 36° | 36° < _v_ | | ||
| - | - | - | - | - | - | - | | ||
| Vertical | `headEulerAngleX` | Super Down | Down | Frontal | Up | Super Up | | ||
| Horizontal | `headEulerAngleY` | Super Left | Left | Frontal | Right | Super Right | | ||
| Tilt | `headEulerAngleZ` | Super Right | Right | Frontal | Left | Super Left | | ||
#### Messages | ||
Pre-define message constants used by the `status` event. | ||
| Message | Description | ||
| - | - | ||
| INVALID_CAPTURE_FACE_MIN_SIZE | Face width percentage in relation of the screen width is less than the set (`setFaceCaptureMinSize`). | ||
| INVALID_CAPTURE_FACE_MAX_SIZE | Face width percentage in relation of the screen width is more than the set (`setFaceCaptureMaxSize`). | ||
| INVALID_CAPTURE_FACE_OUT_OF_ROI | Face bounding box is out of the set region of interest (`setFaceROIOffset`). | ||
| INVALID_CAPTURE_FACE_ROI_MIN_SIZE | Face width percentage in relation of the screen width is less than the set (`setFaceROIMinSize`). | ||
| Message | Description | ||
| - | - | ||
| INVALID_MINIMUM_SIZE | Face/QRCode width percentage in relation of the screen width is less than the set. | ||
| INVALID_MAXIMUM_SIZE | Face/QRCode width percentage in relation of the screen width is more than the set. | ||
| INVALID_OUT_OF_ROI | Face bounding box is out of the set region of interest. | ||
## To contribute and make it better | ||
## Contribute and make it better | ||
Clone the repo, change what you want and send PR. | ||
Contributions are always welcome! | ||
For commit messages we use <a href="https://www.conventionalcommits.org/">Conventional Commits</a>. | ||
--- | ||
Contributions are always welcome, some people that already contributed! | ||
[<img src="https://contrib.rocks/image?repo=Yoonit-Labs/nativescript-yoonit-camera" />](https://github.com/Yoonit-Labs/nativescript-yoonit-camera/graphs/contributors) | ||
--- | ||
Code with ❤ by the [**Cyberlabs AI**](https://cyberlabs.ai/) Front-End Team |
56
vue.js
@@ -7,4 +7,4 @@ // +-+-+-+-+-+-+ | ||
// | Yoonit Camera Plugin for NativeScript applications | | ||
// | Luigui Delyer, Haroldo Teruya, | | ||
// | Victor Goulart & Márcio Bruffato @ Cyberlabs AI 2020 | | ||
// | Luigui Delyer, Haroldo Teruya, Victor Goulart | | ||
// | Gabriel Rizzo & Márcio Bruffato @ Cyberlabs AI 2020-2021 | | ||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
@@ -64,12 +64,20 @@ | ||
setImageCaptureColorEncoding, | ||
setFaceDetectionBox, | ||
setFacePaddingPercent, | ||
setFaceCaptureMinSize, | ||
setFaceCaptureMaxSize, | ||
setFaceROIEnable, | ||
setFaceROITopOffset, | ||
setFaceROIRightOffset, | ||
setFaceROIBottomOffset, | ||
setFaceROILeftOffset, | ||
setFaceROIMinSize, | ||
setDetectionBox, | ||
setDetectionBoxColor, | ||
setDetectionMinSize, | ||
setDetectionMaxSize, | ||
setROI, | ||
setROITopOffset, | ||
setROIRightOffset, | ||
setROIBottomOffset, | ||
setROILeftOffset, | ||
setROIAreaOffset, | ||
setROIAreaOffsetColor, | ||
setFaceContours, | ||
setFaceContoursColor, | ||
setComputerVision, | ||
setComputerVisionLoadModels, | ||
computerVisionClearModels, | ||
setTorch, | ||
} = element.nativeView | ||
@@ -95,12 +103,20 @@ | ||
setImageCaptureColorEncoding, | ||
setFaceDetectionBox, | ||
setFacePaddingPercent, | ||
setFaceCaptureMinSize, | ||
setFaceCaptureMaxSize, | ||
setFaceROIEnable, | ||
setFaceROITopOffset, | ||
setFaceROIRightOffset, | ||
setFaceROIBottomOffset, | ||
setFaceROILeftOffset, | ||
setFaceROIMinSize, | ||
setDetectionBox, | ||
setDetectionBoxColor, | ||
setDetectionMinSize, | ||
setDetectionMaxSize, | ||
setROI, | ||
setROITopOffset, | ||
setROIRightOffset, | ||
setROIBottomOffset, | ||
setROILeftOffset, | ||
setROIAreaOffset, | ||
setROIAreaOffsetColor, | ||
setFaceContours, | ||
setFaceContoursColor, | ||
setComputerVision, | ||
setComputerVisionLoadModels, | ||
computerVisionClearModels, | ||
setTorch, | ||
} | ||
@@ -107,0 +123,0 @@ |
@@ -5,3 +5,3 @@ import { CameraBase } from './Yoonit.Camera.common'; | ||
import Validator from "./helpers/Validator"; | ||
const { ValidateProps, Required, NativeMethod, } = Validator; | ||
const { ValidateProps, Required, NativeMethod, RegexColor, ParseToNsColor, } = Validator; | ||
const CAMERA = () => android.Manifest.permission.CAMERA; | ||
@@ -19,2 +19,5 @@ export class YoonitCamera extends CameraBase { | ||
if (this.nativeView[prop.name]) { | ||
if (prop.length > 1) { | ||
return this.nativeView[prop.name](...prop.value); | ||
} | ||
this.nativeView[prop.name](prop.value); | ||
@@ -43,6 +46,25 @@ } | ||
} | ||
setFaceContours(enable) { | ||
this.nativeView.setFaceContours(enable); | ||
} | ||
setFaceContoursColor(color) { | ||
this.nativeView.setFaceContoursColor(...color); | ||
} | ||
setComputerVision(enable) { | ||
this.nativeView.setComputerVision(enable); | ||
} | ||
setComputerVisionLoadModels(modelPaths) { | ||
const nativeArrayList = new java.util.ArrayList(); | ||
modelPaths.forEach((path) => { | ||
nativeArrayList.add(path); | ||
}); | ||
this.nativeView.setComputerVisionLoadModels(nativeArrayList); | ||
} | ||
computerVisionClearModels() { | ||
this.nativeView.computerVisionClearModels(); | ||
} | ||
} | ||
__decorate([ | ||
ValidateProps('colorEncoding', ['RGB', 'YUV']), | ||
NativeMethod('setColorEncodingCapture'), | ||
NativeMethod({ name: 'setColorEncodingCapture', length: 1 }), | ||
__param(0, Required), | ||
@@ -53,2 +75,27 @@ __metadata("design:type", Function), | ||
], YoonitCamera.prototype, "setImageCaptureColorEncoding", null); | ||
__decorate([ | ||
ValidateProps('faceContours', [false, true]), | ||
NativeMethod({ name: 'setFaceContours', length: 1 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Boolean]), | ||
__metadata("design:returntype", void 0) | ||
], YoonitCamera.prototype, "setFaceContours", null); | ||
__decorate([ | ||
ValidateProps('faceContoursColor', RegexColor), | ||
ParseToNsColor, | ||
NativeMethod({ name: 'setFaceContoursColor', length: 4 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Object]), | ||
__metadata("design:returntype", void 0) | ||
], YoonitCamera.prototype, "setFaceContoursColor", null); | ||
__decorate([ | ||
ValidateProps('computerVision', [false, true]), | ||
NativeMethod({ name: 'setComputerVision', length: 1 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Boolean]), | ||
__metadata("design:returntype", void 0) | ||
], YoonitCamera.prototype, "setComputerVision", null); | ||
var CameraEventListener = /** @class */ (function (_super) { | ||
@@ -76,3 +123,17 @@ __extends(CameraEventListener, _super); | ||
}; | ||
CameraEventListener.prototype.onImageCaptured = function (type, count, total, imagePath, inferences) { | ||
CameraEventListener.prototype.onImageCaptured = function (type, count, total, imagePath, inferences // output generated by computer vision model | ||
) { | ||
var _a; | ||
var inferencesJsArray = new Array(); | ||
for (var i = 0; i < inferences.size(); i++) { | ||
var modelKey = inferences.get(i).first; | ||
var javaArray = inferences.get(i).second; | ||
var modelOutput = []; | ||
for (var k = 0; k < javaArray.length; k++) { | ||
modelOutput.push(javaArray[k]); | ||
} | ||
inferencesJsArray.push((_a = {}, | ||
_a[modelKey] = modelOutput, | ||
_a)); | ||
} | ||
var owner = this.owner.get(); | ||
@@ -87,7 +148,8 @@ if (owner && !!imagePath) { | ||
total: total, | ||
image: image | ||
image: image, | ||
inferences: inferencesJsArray | ||
}); | ||
} | ||
}; | ||
CameraEventListener.prototype.onFaceDetected = function (x, y, width, height) { | ||
CameraEventListener.prototype.onFaceDetected = function (x, y, width, height, leftEyeOpenProbability, rightEyeOpenProbability, smilingProbability, headEulerAngleX, headEulerAngleY, headEulerAngleZ) { | ||
var owner = this.owner.get(); | ||
@@ -101,3 +163,9 @@ if (owner) { | ||
width: width, | ||
height: height | ||
height: height, | ||
leftEyeOpenProbability: leftEyeOpenProbability, | ||
rightEyeOpenProbability: rightEyeOpenProbability, | ||
smilingProbability: smilingProbability, | ||
headEulerAngleX: headEulerAngleX, | ||
headEulerAngleY: headEulerAngleY, | ||
headEulerAngleZ: headEulerAngleZ | ||
}); | ||
@@ -115,3 +183,9 @@ } | ||
width: null, | ||
height: null | ||
height: null, | ||
leftEyeOpenProbability: null, | ||
rightEyeOpenProbability: null, | ||
smilingProbability: null, | ||
headEulerAngleX: null, | ||
headEulerAngleY: null, | ||
headEulerAngleZ: null | ||
}); | ||
@@ -118,0 +192,0 @@ } |
@@ -1,4 +0,4 @@ | ||
import { ContentView, } from '@nativescript/core'; | ||
import { ContentView } from '@nativescript/core'; | ||
import Validator from "./helpers/Validator"; | ||
const { ValidateProps, Required, NativeMethod, RegexNumber, RegexPX, PercentageToNumber, RegexPercentage, NumberToPixel, } = Validator; | ||
const { ValidateProps, Required, NativeMethod, RegexNumber, RegexPX, PercentageToNumber, RegexPercentage, NumberToPixel, ParseToNsColor, RegexColor } = Validator; | ||
export class CameraBase extends ContentView { | ||
@@ -29,31 +29,49 @@ set lens(value) { | ||
} | ||
set faceMinSize(value) { | ||
this.setFaceCaptureMinSize(value); | ||
set detectionBox(value) { | ||
this.setDetectionBox(value); | ||
} | ||
set faceMaxSize(value) { | ||
this.setFaceCaptureMaxSize(value); | ||
set detectionBoxColor(value) { | ||
this.setDetectionBoxColor(value); | ||
} | ||
set faceDetectionBox(value) { | ||
this.setFaceDetectionBox(value); | ||
set detectionMinSize(value) { | ||
this.setDetectionMinSize(value); | ||
} | ||
set faceROI(value) { | ||
this.setFaceROIEnable(value); | ||
set detectionMaxSize(value) { | ||
this.setDetectionMaxSize(value); | ||
} | ||
set faceROITopOffset(value) { | ||
this.setFaceROITopOffset(value); | ||
set roi(value) { | ||
this.setROI(value); | ||
} | ||
set faceROIRightOffset(value) { | ||
this.setFaceROIRightOffset(value); | ||
set roiTopOffset(value) { | ||
this.setROITopOffset(value); | ||
} | ||
set faceROIBottomOffset(value) { | ||
this.setFaceROIBottomOffset(value); | ||
set roiRightOffset(value) { | ||
this.setROIRightOffset(value); | ||
} | ||
set faceROILeftOffset(value) { | ||
this.setFaceROILeftOffset(value); | ||
set roiBottomOffset(value) { | ||
this.setROIBottomOffset(value); | ||
} | ||
set faceROIMinSize(value) { | ||
this.setFaceROIMinSize(value); | ||
set roiLeftOffset(value) { | ||
this.setROILeftOffset(value); | ||
} | ||
set roiAreaOffsetColor(value) { | ||
this.setROIAreaOffsetColor(value); | ||
} | ||
set roiAreaOffset(value) { | ||
this.setROIAreaOffset(value); | ||
} | ||
set faceContours(value) { | ||
this.setFaceContours(value); | ||
} | ||
set faceContoursColor(value) { | ||
this.setFaceContoursColor(value); | ||
} | ||
set computerVision(value) { | ||
this.setComputerVision(value); | ||
} | ||
set torch(value) { | ||
this.setTorch(value); | ||
} | ||
requestPermission(explanationText) { | ||
return new Promise((resolve, reject) => resolve()); | ||
return new Promise((resolve, reject) => resolve(true)); | ||
} | ||
@@ -100,36 +118,50 @@ hasPermission() { | ||
setImageCaptureColorEncoding(colorEncoding) { } | ||
setFaceDetectionBox(enable) { | ||
this.nativeView.setFaceDetectionBox(enable); | ||
} | ||
setFacePaddingPercent(percentage) { | ||
this.nativeView.setFacePaddingPercent(percentage); | ||
} | ||
setFaceCaptureMinSize(percentage) { | ||
this.nativeView.setFaceCaptureMinSize(percentage); | ||
setDetectionBox(enable) { | ||
this.nativeView.setDetectionBox(enable); | ||
} | ||
setFaceCaptureMaxSize(percentage) { | ||
this.nativeView.setFaceCaptureMaxSize(percentage); | ||
setDetectionBoxColor(color) { | ||
this.nativeView.setDetectionBoxColor(...color); | ||
} | ||
setFaceROIEnable(enable) { | ||
this.nativeView.setFaceROIEnable(enable); | ||
setDetectionMinSize(percentage) { | ||
this.nativeView.setDetectionMinSize(percentage); | ||
} | ||
setFaceROITopOffset(percentage) { | ||
this.nativeView.setFaceROITopOffset(percentage); | ||
setDetectionMaxSize(percentage) { | ||
this.nativeView.setDetectionMaxSize(percentage); | ||
} | ||
setFaceROIRightOffset(percentage) { | ||
this.nativeView.setFaceROIRightOffset(percentage); | ||
setROI(enable) { | ||
this.nativeView.setROI(enable); | ||
} | ||
setFaceROIBottomOffset(percentage) { | ||
this.nativeView.setFaceROIBottomOffset(percentage); | ||
setROITopOffset(percentage) { | ||
this.nativeView.setROITopOffset(percentage); | ||
} | ||
setFaceROILeftOffset(percentage) { | ||
this.nativeView.setFaceROILeftOffset(percentage); | ||
setROIRightOffset(percentage) { | ||
this.nativeView.setROIRightOffset(percentage); | ||
} | ||
setFaceROIMinSize(percentage) { | ||
this.nativeView.setFaceROIMinSize(percentage); | ||
setROIBottomOffset(percentage) { | ||
this.nativeView.setROIBottomOffset(percentage); | ||
} | ||
setROILeftOffset(percentage) { | ||
this.nativeView.setROILeftOffset(percentage); | ||
} | ||
setROIAreaOffset(enable) { | ||
this.nativeView.setROIAreaOffset(enable); | ||
} | ||
setROIAreaOffsetColor(color) { | ||
this.nativeView.setROIAreaOffsetColor(...color); | ||
} | ||
setFaceContours(enable) { } | ||
setFaceContoursColor(color) { } | ||
setComputerVision(enable) { } | ||
setComputerVisionLoadModels(modelPaths) { } | ||
computerVisionClearModels() { } | ||
setTorch(enable) { | ||
this.nativeView.setTorch(enable); | ||
} | ||
} | ||
__decorate([ | ||
ValidateProps('lens', ['front', 'back']), | ||
NativeMethod('setCameraLens'), | ||
NativeMethod({ name: 'setCameraLens', length: 1 }), | ||
__param(0, Required), | ||
@@ -142,3 +174,3 @@ __metadata("design:type", Function), | ||
ValidateProps('captureType', ['face', 'qrcode', 'frame', 'none']), | ||
NativeMethod('startCaptureType'), | ||
NativeMethod({ name: 'startCaptureType', length: 1 }), | ||
__param(0, Required), | ||
@@ -151,3 +183,3 @@ __metadata("design:type", Function), | ||
ValidateProps('imageCaptureAmount', RegexNumber), | ||
NativeMethod('setNumberOfImages'), | ||
NativeMethod({ name: 'setNumberOfImages', length: 1 }), | ||
__param(0, Required), | ||
@@ -160,3 +192,3 @@ __metadata("design:type", Function), | ||
ValidateProps('imageCaptureInterval', RegexNumber), | ||
NativeMethod('setTimeBetweenImages'), | ||
NativeMethod({ name: 'setTimeBetweenImages', length: 1 }), | ||
__param(0, Required), | ||
@@ -170,3 +202,3 @@ __metadata("design:type", Function), | ||
NumberToPixel, | ||
NativeMethod('setOutputImageWidth'), | ||
NativeMethod({ name: 'setOutputImageWidth', length: 1 }), | ||
__param(0, Required), | ||
@@ -180,3 +212,3 @@ __metadata("design:type", Function), | ||
NumberToPixel, | ||
NativeMethod('setOutputImageHeight'), | ||
NativeMethod({ name: 'setOutputImageHeight', length: 1 }), | ||
__param(0, Required), | ||
@@ -189,3 +221,3 @@ __metadata("design:type", Function), | ||
ValidateProps('imageCapture', [false, true]), | ||
NativeMethod('setSaveImageCaptured'), | ||
NativeMethod({ name: 'setSaveImageCaptured', length: 1 }), | ||
__param(0, Required), | ||
@@ -197,12 +229,21 @@ __metadata("design:type", Function), | ||
__decorate([ | ||
ValidateProps('faceDetectionBox', [false, true]), | ||
NativeMethod('setFaceDetectionBox'), | ||
PercentageToNumber, | ||
NativeMethod({ name: 'setFacePaddingPercent', length: 1 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Object]), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFacePaddingPercent", null); | ||
__decorate([ | ||
ValidateProps('detectionBox', [false, true]), | ||
NativeMethod({ name: 'setDetectionBox', length: 1 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Boolean]), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceDetectionBox", null); | ||
], CameraBase.prototype, "setDetectionBox", null); | ||
__decorate([ | ||
PercentageToNumber, | ||
NativeMethod('setFacePaddingPercent'), | ||
ValidateProps('detectionBoxColor', RegexColor), | ||
ParseToNsColor, | ||
NativeMethod({ name: 'setDetectionBoxColor', length: 4 }), | ||
__param(0, Required), | ||
@@ -212,7 +253,7 @@ __metadata("design:type", Function), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFacePaddingPercent", null); | ||
], CameraBase.prototype, "setDetectionBoxColor", null); | ||
__decorate([ | ||
ValidateProps('faceMinSize', RegexPercentage), | ||
ValidateProps('detectionMinSize', RegexPercentage), | ||
PercentageToNumber, | ||
NativeMethod('setFaceCaptureMinSize'), | ||
NativeMethod({ name: 'setDetectionMinSize', length: 1 }), | ||
__param(0, Required), | ||
@@ -222,7 +263,7 @@ __metadata("design:type", Function), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceCaptureMinSize", null); | ||
], CameraBase.prototype, "setDetectionMinSize", null); | ||
__decorate([ | ||
ValidateProps('faceMaxSize', RegexPercentage), | ||
ValidateProps('detectionMaxSize', RegexPercentage), | ||
PercentageToNumber, | ||
NativeMethod('setFaceCaptureMaxSize'), | ||
NativeMethod({ name: 'setDetectionMaxSize', length: 1 }), | ||
__param(0, Required), | ||
@@ -232,6 +273,6 @@ __metadata("design:type", Function), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceCaptureMaxSize", null); | ||
], CameraBase.prototype, "setDetectionMaxSize", null); | ||
__decorate([ | ||
ValidateProps('faceROI', [false, true]), | ||
NativeMethod('setFaceROIEnable'), | ||
ValidateProps('roi', [false, true]), | ||
NativeMethod({ name: 'setROI', length: 1 }), | ||
__param(0, Required), | ||
@@ -241,7 +282,7 @@ __metadata("design:type", Function), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceROIEnable", null); | ||
], CameraBase.prototype, "setROI", null); | ||
__decorate([ | ||
ValidateProps('faceROITopOffset', RegexPercentage), | ||
ValidateProps('roiTopOffset', RegexPercentage), | ||
PercentageToNumber, | ||
NativeMethod('setFaceROITopOffset'), | ||
NativeMethod({ name: 'setROITopOffset', length: 1 }), | ||
__param(0, Required), | ||
@@ -251,7 +292,7 @@ __metadata("design:type", Function), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceROITopOffset", null); | ||
], CameraBase.prototype, "setROITopOffset", null); | ||
__decorate([ | ||
ValidateProps('faceROIRightOffset', RegexPercentage), | ||
ValidateProps('roiRightOffset', RegexPercentage), | ||
PercentageToNumber, | ||
NativeMethod('setFaceROIRightOffset'), | ||
NativeMethod({ name: 'setROIRightOffset', length: 1 }), | ||
__param(0, Required), | ||
@@ -261,7 +302,7 @@ __metadata("design:type", Function), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceROIRightOffset", null); | ||
], CameraBase.prototype, "setROIRightOffset", null); | ||
__decorate([ | ||
ValidateProps('faceROIBottomOffset', RegexPercentage), | ||
ValidateProps('roiBottomOffset', RegexPercentage), | ||
PercentageToNumber, | ||
NativeMethod('setFaceROIBottomOffset'), | ||
NativeMethod({ name: 'setROIBottomOffset', length: 1 }), | ||
__param(0, Required), | ||
@@ -271,7 +312,7 @@ __metadata("design:type", Function), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceROIBottomOffset", null); | ||
], CameraBase.prototype, "setROIBottomOffset", null); | ||
__decorate([ | ||
ValidateProps('faceROILeftOffset', RegexPercentage), | ||
ValidateProps('roiLeftOffset', RegexPercentage), | ||
PercentageToNumber, | ||
NativeMethod('setFaceROILeftOffset'), | ||
NativeMethod({ name: 'setROILeftOffset', length: 1 }), | ||
__param(0, Required), | ||
@@ -281,12 +322,28 @@ __metadata("design:type", Function), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceROILeftOffset", null); | ||
], CameraBase.prototype, "setROILeftOffset", null); | ||
__decorate([ | ||
ValidateProps('faceROIMinSize', RegexPercentage), | ||
PercentageToNumber, | ||
NativeMethod('setFaceROIMinSize'), | ||
ValidateProps('roiAreaOffset', [false, true]), | ||
NativeMethod({ name: 'setROIAreaOffset', length: 1 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Boolean]), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setROIAreaOffset", null); | ||
__decorate([ | ||
ValidateProps('roiAreaOffsetColor', RegexColor), | ||
ParseToNsColor, | ||
NativeMethod({ name: 'setROIAreaOffsetColor', length: 4 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Object]), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setFaceROIMinSize", null); | ||
], CameraBase.prototype, "setROIAreaOffsetColor", null); | ||
__decorate([ | ||
ValidateProps('torch', [false, true]), | ||
NativeMethod({ name: 'setTorch', length: 1 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Boolean]), | ||
__metadata("design:returntype", void 0) | ||
], CameraBase.prototype, "setTorch", null); | ||
//# sourceMappingURL=Yoonit.Camera.common.js.map |
import { CameraBase } from './Yoonit.Camera.common'; | ||
import { ImageSource, knownFolders, path, File } from '@nativescript/core'; | ||
import Validator from "./helpers/Validator"; | ||
const { ValidateProps, Required, NativeMethod, RegexColor, ParseToNsColor, } = Validator; | ||
export class YoonitCamera extends CameraBase { | ||
@@ -19,2 +20,5 @@ constructor() { | ||
if (this.nativeView[prop.name]) { | ||
if (prop.length > 1) { | ||
return this.nativeView[prop.name](...prop.value); | ||
} | ||
this.nativeView[prop.name](prop.value); | ||
@@ -33,5 +37,12 @@ } | ||
return new Promise((resolve, reject) => { | ||
let PermissionStatus; | ||
(function (PermissionStatus) { | ||
PermissionStatus[PermissionStatus["NO_EXPLICIT_PERMISSION"] = 0] = "NO_EXPLICIT_PERMISSION"; | ||
PermissionStatus[PermissionStatus["NOT_ALLOWED"] = 1] = "NOT_ALLOWED"; | ||
PermissionStatus[PermissionStatus["EXPLICIT_DENIED"] = 2] = "EXPLICIT_DENIED"; | ||
PermissionStatus[PermissionStatus["EXPLICIT_ALLOWED"] = 3] = "EXPLICIT_ALLOWED"; | ||
})(PermissionStatus || (PermissionStatus = {})); | ||
const cameraStatus = AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeVideo); | ||
switch (cameraStatus) { | ||
case 0: { | ||
case PermissionStatus.NO_EXPLICIT_PERMISSION: { | ||
AVCaptureDevice.requestAccessForMediaTypeCompletionHandler(AVMediaTypeVideo, (granted) => { | ||
@@ -49,4 +60,4 @@ if (granted) { | ||
} | ||
case 1: | ||
case 2: { | ||
case PermissionStatus.NOT_ALLOWED: | ||
case PermissionStatus.EXPLICIT_DENIED: { | ||
this.permission = false; | ||
@@ -56,3 +67,3 @@ reject(false); | ||
} | ||
case 3: { | ||
case PermissionStatus.EXPLICIT_ALLOWED: { | ||
this.permission = true; | ||
@@ -68,3 +79,26 @@ resolve(true); | ||
} | ||
setFaceContours(enable) { | ||
this.nativeView.setFaceContours(enable); | ||
} | ||
setFaceContoursColor(color) { | ||
this.nativeView.setFaceContoursColor(...color); | ||
} | ||
} | ||
__decorate([ | ||
ValidateProps('faceContours', [false, true]), | ||
NativeMethod({ name: 'setFaceContours', length: 1 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Boolean]), | ||
__metadata("design:returntype", void 0) | ||
], YoonitCamera.prototype, "setFaceContours", null); | ||
__decorate([ | ||
ValidateProps('faceContoursColor', RegexColor), | ||
ParseToNsColor, | ||
NativeMethod({ name: 'setFaceContoursColor', length: 4 }), | ||
__param(0, Required), | ||
__metadata("design:type", Function), | ||
__metadata("design:paramtypes", [Object]), | ||
__metadata("design:returntype", void 0) | ||
], YoonitCamera.prototype, "setFaceContoursColor", null); | ||
var CameraEventListener = /** @class */ (function (_super) { | ||
@@ -106,7 +140,8 @@ __extends(CameraEventListener, _super); | ||
total: total, | ||
image: image | ||
image: image, | ||
inferences: [] | ||
}); | ||
} | ||
}; | ||
CameraEventListener.prototype.onFaceDetected = function (x, y, width, height) { | ||
CameraEventListener.prototype.onFaceDetected = function (x, y, width, height, leftEyeOpenProbability, rightEyeOpenProbability, smilingProbability, headEulerAngleX, headEulerAngleY, headEulerAngleZ) { | ||
var owner = this.owner.get(); | ||
@@ -120,3 +155,9 @@ if (owner) { | ||
width: width, | ||
height: height | ||
height: height, | ||
leftEyeOpenProbability: leftEyeOpenProbability, | ||
rightEyeOpenProbability: rightEyeOpenProbability, | ||
smilingProbability: smilingProbability, | ||
headEulerAngleX: headEulerAngleX, | ||
headEulerAngleY: headEulerAngleY, | ||
headEulerAngleZ: headEulerAngleZ | ||
}); | ||
@@ -134,3 +175,9 @@ } | ||
width: null, | ||
height: null | ||
height: null, | ||
leftEyeOpenProbability: null, | ||
rightEyeOpenProbability: null, | ||
smilingProbability: null, | ||
headEulerAngleX: null, | ||
headEulerAngleY: null, | ||
headEulerAngleZ: null | ||
}); | ||
@@ -137,0 +184,0 @@ } |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
66543
12
1092
301
10