fix(face-landmarks): work only when one face is detected (#11661)
* fix(face-landmarks): work only when one face is detected * fix: remove redundant check for detection * fix(face-landmarks): re-center and stop when more faces detected * fix: remove faceCount checking when sending message from worker * fix: add again the faceCount * fix: add comment * code review
This commit is contained in:
parent
624f88e069
commit
7dd85bb6ad
|
@ -795,10 +795,7 @@ var config = {
|
|||
// faceCenteringThreshold: 10,
|
||||
|
||||
// // Milliseconds for processing a new image capture in order to detect face coordinates if they exist.
|
||||
// captureInterval: 1000,
|
||||
|
||||
// // Maximum number of faces that can be detected from a video track.
|
||||
// maxFacesDetected: 4
|
||||
// captureInterval: 1000
|
||||
// },
|
||||
|
||||
// Controls the percentage of automatic feedback shown to participants when callstats is enabled.
|
||||
|
|
|
@ -3,11 +3,6 @@ import { Human, Config, FaceResult } from '@vladmandic/human';
|
|||
|
||||
import { DETECTION_TYPES, FACE_DETECTION_SCORE_THRESHOLD, FACE_EXPRESSIONS_NAMING_MAPPING } from './constants';
|
||||
|
||||
type Detection = {
|
||||
detections: Array<FaceResult>,
|
||||
threshold?: number
|
||||
};
|
||||
|
||||
type DetectInput = {
|
||||
image: ImageBitmap | ImageData,
|
||||
threshold: number
|
||||
|
@ -21,20 +16,22 @@ type FaceBox = {
|
|||
|
||||
type InitInput = {
|
||||
baseUrl: string,
|
||||
detectionTypes: string[],
|
||||
maxFacesDetected?: number
|
||||
detectionTypes: string[]
|
||||
}
|
||||
|
||||
type DetectOutput = {
|
||||
faceExpression?: string,
|
||||
faceBox?: FaceBox
|
||||
faceBox?: FaceBox,
|
||||
faceCount: number
|
||||
};
|
||||
|
||||
export interface FaceLandmarksHelper {
|
||||
getFaceBox({ detections, threshold }: Detection): FaceBox | undefined;
|
||||
getFaceExpression({ detections }: Detection): string | undefined;
|
||||
getFaceBox(detections: Array<FaceResult>, threshold: number): FaceBox | undefined;
|
||||
getFaceExpression(detections: Array<FaceResult>): string | undefined;
|
||||
getFaceCount(detections : Array<FaceResult>): number;
|
||||
getDetections(image: ImageBitmap | ImageData): Promise<Array<FaceResult>>;
|
||||
init(): Promise<void>;
|
||||
detect({ image, threshold } : DetectInput): Promise<DetectOutput | undefined>;
|
||||
detect({ image, threshold } : DetectInput): Promise<DetectOutput>;
|
||||
getDetectionInProgress(): boolean;
|
||||
}
|
||||
|
||||
|
@ -45,7 +42,6 @@ export class HumanHelper implements FaceLandmarksHelper {
|
|||
protected human: Human | undefined;
|
||||
protected faceDetectionTypes: string[];
|
||||
protected baseUrl: string;
|
||||
protected maxFacesDetected?: number;
|
||||
private detectionInProgress = false;
|
||||
private lastValidFaceBox: FaceBox | undefined;
|
||||
/**
|
||||
|
@ -66,7 +62,7 @@ export class HumanHelper implements FaceLandmarksHelper {
|
|||
enabled: false,
|
||||
rotation: false,
|
||||
modelPath: 'blazeface-front.json',
|
||||
maxDetected: 4
|
||||
maxDetected: 20
|
||||
},
|
||||
mesh: { enabled: false },
|
||||
iris: { enabled: false },
|
||||
|
@ -82,10 +78,9 @@ export class HumanHelper implements FaceLandmarksHelper {
|
|||
segmentation: { enabled: false }
|
||||
};
|
||||
|
||||
constructor({ baseUrl, detectionTypes, maxFacesDetected }: InitInput) {
|
||||
constructor({ baseUrl, detectionTypes }: InitInput) {
|
||||
this.faceDetectionTypes = detectionTypes;
|
||||
this.baseUrl = baseUrl;
|
||||
this.maxFacesDetected = maxFacesDetected;
|
||||
this.init();
|
||||
}
|
||||
|
||||
|
@ -102,10 +97,6 @@ export class HumanHelper implements FaceLandmarksHelper {
|
|||
if (this.faceDetectionTypes.length > 0 && this.config.face) {
|
||||
this.config.face.enabled = true
|
||||
}
|
||||
|
||||
if (this.maxFacesDetected && this.config.face?.detector) {
|
||||
this.config.face.detector.maxDetected = this.maxFacesDetected;
|
||||
}
|
||||
|
||||
if (this.faceDetectionTypes.includes(DETECTION_TYPES.FACE_BOX) && this.config.face?.detector) {
|
||||
this.config.face.detector.enabled = true;
|
||||
|
@ -126,15 +117,15 @@ export class HumanHelper implements FaceLandmarksHelper {
|
|||
}
|
||||
}
|
||||
|
||||
getFaceBox({ detections, threshold }: Detection): FaceBox | undefined {
|
||||
if (!detections.length) {
|
||||
getFaceBox(detections: Array<FaceResult>, threshold: number): FaceBox | undefined {
|
||||
if (this.getFaceCount(detections) !== 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
const faceBox: FaceBox = {
|
||||
// normalize to percentage based
|
||||
left: Math.round(Math.min(...detections.map(d => d.boxRaw[0])) * 100),
|
||||
right: Math.round(Math.max(...detections.map(d => d.boxRaw[0] + d.boxRaw[2])) * 100)
|
||||
left: Math.round(detections[0].boxRaw[0] * 100),
|
||||
right: Math.round((detections[0].boxRaw[0] + detections[0].boxRaw[2]) * 100)
|
||||
};
|
||||
|
||||
faceBox.width = Math.round(faceBox.right - faceBox.left);
|
||||
|
@ -148,15 +139,27 @@ export class HumanHelper implements FaceLandmarksHelper {
|
|||
return faceBox;
|
||||
}
|
||||
|
||||
getFaceExpression({ detections }: Detection): string | undefined {
|
||||
if (detections[0]?.emotion) {
|
||||
return FACE_EXPRESSIONS_NAMING_MAPPING[detections[0]?.emotion[0].emotion];
|
||||
getFaceExpression(detections: Array<FaceResult>): string | undefined {
|
||||
if (this.getFaceCount(detections) !== 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (detections[0].emotion) {
|
||||
return FACE_EXPRESSIONS_NAMING_MAPPING[detections[0].emotion[0].emotion];
|
||||
}
|
||||
}
|
||||
|
||||
async getDetections(image: ImageBitmap | ImageData) {
|
||||
if (!this.human) {
|
||||
return;
|
||||
getFaceCount(detections: Array<FaceResult> | undefined): number {
|
||||
if (detections) {
|
||||
return detections.length;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
async getDetections(image: ImageBitmap | ImageData): Promise<Array<FaceResult>> {
|
||||
if (!this.human || !this.faceDetectionTypes.length) {
|
||||
return [];
|
||||
}
|
||||
|
||||
this.human.tf.engine().startScope();
|
||||
|
@ -169,39 +172,42 @@ export class HumanHelper implements FaceLandmarksHelper {
|
|||
return detections.filter(detection => detection.score > FACE_DETECTION_SCORE_THRESHOLD);
|
||||
}
|
||||
|
||||
public async detect({ image, threshold } : DetectInput): Promise<DetectOutput | undefined> {
|
||||
public async detect({ image, threshold } : DetectInput): Promise<DetectOutput> {
|
||||
let detections;
|
||||
let faceExpression;
|
||||
let faceBox;
|
||||
|
||||
this.detectionInProgress = true;
|
||||
|
||||
if (this.faceDetectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
|
||||
detections = await this.getDetections(image);
|
||||
detections = await this.getDetections(image);
|
||||
|
||||
if (detections) {
|
||||
faceExpression = this.getFaceExpression({ detections });
|
||||
}
|
||||
if (this.faceDetectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
|
||||
faceExpression = this.getFaceExpression(detections);
|
||||
}
|
||||
|
||||
if (this.faceDetectionTypes.includes(DETECTION_TYPES.FACE_BOX)) {
|
||||
if (!detections) {
|
||||
detections = await this.getDetections(image);
|
||||
//if more than one face is detected the face centering will be disabled.
|
||||
if (this.getFaceCount(detections) > 1 ) {
|
||||
this.faceDetectionTypes.splice(this.faceDetectionTypes.indexOf(DETECTION_TYPES.FACE_BOX), 1);
|
||||
|
||||
//face-box for re-centering
|
||||
faceBox = {
|
||||
left: 0,
|
||||
right: 100,
|
||||
width: 100,
|
||||
};
|
||||
} else {
|
||||
faceBox = this.getFaceBox(detections, threshold);
|
||||
}
|
||||
|
||||
if(detections) {
|
||||
faceBox = this.getFaceBox({
|
||||
detections,
|
||||
threshold
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
this.detectionInProgress = false;
|
||||
|
||||
return {
|
||||
faceExpression,
|
||||
faceBox
|
||||
faceBox,
|
||||
faceCount: this.getFaceCount(detections)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -140,8 +140,7 @@ export function loadWorker() {
|
|||
worker.postMessage({
|
||||
type: INIT_WORKER,
|
||||
baseUrl,
|
||||
detectionTypes,
|
||||
maxFacesDetected: faceLandmarks?.maxFacesDetected
|
||||
detectionTypes
|
||||
});
|
||||
|
||||
dispatch(startFaceLandmarksDetection());
|
||||
|
|
|
@ -13,7 +13,7 @@ onmessage = async function(message: MessageEvent<any>) {
|
|||
|
||||
const detections = await helper.detect(message.data);
|
||||
|
||||
if (detections && (detections.faceBox || detections.faceExpression)) {
|
||||
if (detections && (detections.faceBox || detections.faceExpression || detections.faceCount)) {
|
||||
self.postMessage(detections);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue