ref(face-landmarks): convert to typescript and add detector class (#12144)
* fix(face-landmarks): stop recognition when imageCapture error * ref(face-landmarks): convert files in typescript fix: lint issues * code review * ref(face-landmarks): move detection part to a class * ref(face-landmarks): make FaceLandmarksDetector singleton * fix typo and ts-ignore problematic types * fix linting issues
This commit is contained in:
parent
b83c55e9c4
commit
d6f3c2a0f4
|
@ -52,6 +52,7 @@
|
||||||
"@tensorflow/tfjs-backend-wasm": "3.13.0",
|
"@tensorflow/tfjs-backend-wasm": "3.13.0",
|
||||||
"@tensorflow/tfjs-core": "3.13.0",
|
"@tensorflow/tfjs-core": "3.13.0",
|
||||||
"@types/audioworklet": "0.0.29",
|
"@types/audioworklet": "0.0.29",
|
||||||
|
"@types/w3c-image-capture": "1.0.6",
|
||||||
"@vladmandic/human": "2.6.5",
|
"@vladmandic/human": "2.6.5",
|
||||||
"@vladmandic/human-models": "2.5.9",
|
"@vladmandic/human-models": "2.5.9",
|
||||||
"@xmldom/xmldom": "0.7.5",
|
"@xmldom/xmldom": "0.7.5",
|
||||||
|
@ -6169,11 +6170,24 @@
|
||||||
"integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==",
|
"integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==",
|
||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
|
"node_modules/@types/w3c-image-capture": {
|
||||||
|
"version": "1.0.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/w3c-image-capture/-/w3c-image-capture-1.0.6.tgz",
|
||||||
|
"integrity": "sha512-YjU0tMPgi7exsy7qU+oh6CtNiUKhWFrcYRj6ogsFYlyjl7aLREgpkyS6ROA6D/2qF5ImmLivj01zocDR+aIlVQ==",
|
||||||
|
"dependencies": {
|
||||||
|
"@types/webrtc": "*"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@types/webgl-ext": {
|
"node_modules/@types/webgl-ext": {
|
||||||
"version": "0.0.30",
|
"version": "0.0.30",
|
||||||
"resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
|
"resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
|
||||||
"integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
|
"integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
|
||||||
},
|
},
|
||||||
|
"node_modules/@types/webrtc": {
|
||||||
|
"version": "0.0.32",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/webrtc/-/webrtc-0.0.32.tgz",
|
||||||
|
"integrity": "sha512-+F0Ozq+ksnKtjcMHujSgb4A1Vjt0b4wvvxP3/pTnXKsIrLo34EgHh2z3qJq3ntX4dbK6ytxpY1rzO/4Z8rVrHg=="
|
||||||
|
},
|
||||||
"node_modules/@types/ws": {
|
"node_modules/@types/ws": {
|
||||||
"version": "8.5.3",
|
"version": "8.5.3",
|
||||||
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz",
|
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz",
|
||||||
|
@ -25058,11 +25072,24 @@
|
||||||
"integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==",
|
"integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==",
|
||||||
"dev": true
|
"dev": true
|
||||||
},
|
},
|
||||||
|
"@types/w3c-image-capture": {
|
||||||
|
"version": "1.0.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/w3c-image-capture/-/w3c-image-capture-1.0.6.tgz",
|
||||||
|
"integrity": "sha512-YjU0tMPgi7exsy7qU+oh6CtNiUKhWFrcYRj6ogsFYlyjl7aLREgpkyS6ROA6D/2qF5ImmLivj01zocDR+aIlVQ==",
|
||||||
|
"requires": {
|
||||||
|
"@types/webrtc": "*"
|
||||||
|
}
|
||||||
|
},
|
||||||
"@types/webgl-ext": {
|
"@types/webgl-ext": {
|
||||||
"version": "0.0.30",
|
"version": "0.0.30",
|
||||||
"resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
|
"resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
|
||||||
"integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
|
"integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
|
||||||
},
|
},
|
||||||
|
"@types/webrtc": {
|
||||||
|
"version": "0.0.32",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/webrtc/-/webrtc-0.0.32.tgz",
|
||||||
|
"integrity": "sha512-+F0Ozq+ksnKtjcMHujSgb4A1Vjt0b4wvvxP3/pTnXKsIrLo34EgHh2z3qJq3ntX4dbK6ytxpY1rzO/4Z8rVrHg=="
|
||||||
|
},
|
||||||
"@types/ws": {
|
"@types/ws": {
|
||||||
"version": "8.5.3",
|
"version": "8.5.3",
|
||||||
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz",
|
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz",
|
||||||
|
|
|
@ -57,6 +57,7 @@
|
||||||
"@tensorflow/tfjs-backend-wasm": "3.13.0",
|
"@tensorflow/tfjs-backend-wasm": "3.13.0",
|
||||||
"@tensorflow/tfjs-core": "3.13.0",
|
"@tensorflow/tfjs-core": "3.13.0",
|
||||||
"@types/audioworklet": "0.0.29",
|
"@types/audioworklet": "0.0.29",
|
||||||
|
"@types/w3c-image-capture": "1.0.6",
|
||||||
"@vladmandic/human": "2.6.5",
|
"@vladmandic/human": "2.6.5",
|
||||||
"@vladmandic/human-models": "2.5.9",
|
"@vladmandic/human-models": "2.5.9",
|
||||||
"@xmldom/xmldom": "0.7.5",
|
"@xmldom/xmldom": "0.7.5",
|
||||||
|
|
|
@ -10,6 +10,7 @@ export interface Participant {
|
||||||
features?: {
|
features?: {
|
||||||
'screen-sharing'?: boolean;
|
'screen-sharing'?: boolean;
|
||||||
};
|
};
|
||||||
|
getId?: Function;
|
||||||
id: string;
|
id: string;
|
||||||
isFakeParticipant?: boolean;
|
isFakeParticipant?: boolean;
|
||||||
isJigasi?: boolean;
|
isJigasi?: boolean;
|
||||||
|
|
|
@ -0,0 +1,297 @@
|
||||||
|
/* eslint-disable lines-around-comment */
|
||||||
|
import 'image-capture';
|
||||||
|
import './createImageBitmap';
|
||||||
|
import { IStore } from '../app/types';
|
||||||
|
// @ts-ignore
|
||||||
|
import { getLocalVideoTrack } from '../base/tracks/functions';
|
||||||
|
import { getBaseUrl } from '../base/util/helpers';
|
||||||
|
|
||||||
|
import { NEW_FACE_COORDINATES } from './actionTypes';
|
||||||
|
import { addFaceExpression, clearFaceExpressionBuffer } from './actions';
|
||||||
|
import {
|
||||||
|
DETECTION_TYPES,
|
||||||
|
INIT_WORKER,
|
||||||
|
DETECT_FACE,
|
||||||
|
WEBHOOK_SEND_TIME_INTERVAL
|
||||||
|
} from './constants';
|
||||||
|
import {
|
||||||
|
getDetectionInterval,
|
||||||
|
getFaceExpressionDuration,
|
||||||
|
sendFaceExpressionsWebhook
|
||||||
|
} from './functions';
|
||||||
|
import logger from './logger';
|
||||||
|
declare const APP: any;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class for face language detection.
|
||||||
|
*/
|
||||||
|
class FaceLandmarksDetector {
|
||||||
|
private static instance: FaceLandmarksDetector;
|
||||||
|
private initialized = false;
|
||||||
|
private imageCapture: ImageCapture | null = null;
|
||||||
|
private worker: Worker | null = null;
|
||||||
|
private lastFaceExpression: string | null = null;
|
||||||
|
private lastFaceExpressionTimestamp: number | null = null;
|
||||||
|
private duplicateConsecutiveExpressions = 0;
|
||||||
|
private webhookSendInterval: number | null = null;
|
||||||
|
private detectionInterval: number | null = null;
|
||||||
|
private recognitionActive = false;
|
||||||
|
private canvas?: HTMLCanvasElement;
|
||||||
|
private context?: CanvasRenderingContext2D | null;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor for class, checks if the environment supports OffscreenCanvas.
|
||||||
|
*/
|
||||||
|
private constructor() {
|
||||||
|
if (typeof OffscreenCanvas === 'undefined') {
|
||||||
|
this.canvas = document.createElement('canvas');
|
||||||
|
this.context = this.canvas.getContext('2d');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Function for retrieving the FaceLandmarksDetector instance.
|
||||||
|
*
|
||||||
|
* @returns {FaceLandmarksDetector} - FaceLandmarksDetector instance.
|
||||||
|
*/
|
||||||
|
public static getInstance(): FaceLandmarksDetector {
|
||||||
|
if (!FaceLandmarksDetector.instance) {
|
||||||
|
FaceLandmarksDetector.instance = new FaceLandmarksDetector();
|
||||||
|
}
|
||||||
|
|
||||||
|
return FaceLandmarksDetector.instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns if the detected environment is initialized.
|
||||||
|
*
|
||||||
|
* @returns {boolean}
|
||||||
|
*/
|
||||||
|
isInitialized(): boolean {
|
||||||
|
return this.initialized;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialization function: the worker is loaded and initialized, and then if possible the detection stats.
|
||||||
|
*
|
||||||
|
* @param {IStore} store - Redux store with dispatch and getState methods.
|
||||||
|
* @returns {void}
|
||||||
|
*/
|
||||||
|
init({ dispatch, getState }: IStore) {
|
||||||
|
if (this.isInitialized()) {
|
||||||
|
logger.info('Worker has already been initialized');
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (navigator.product === 'ReactNative') {
|
||||||
|
logger.warn('Unsupported environment for face detection');
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const baseUrl = `${getBaseUrl()}libs/`;
|
||||||
|
let workerUrl = `${baseUrl}face-landmarks-worker.min.js`;
|
||||||
|
// @ts-ignore
|
||||||
|
const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
|
||||||
|
|
||||||
|
// @ts-ignore
|
||||||
|
workerUrl = window.URL.createObjectURL(workerBlob);
|
||||||
|
this.worker = new Worker(workerUrl, { name: 'Face Recognition Worker' });
|
||||||
|
this.worker.onmessage = ({ data }: MessageEvent<any>) => {
|
||||||
|
const { faceExpression, faceBox } = data;
|
||||||
|
|
||||||
|
if (faceExpression) {
|
||||||
|
if (faceExpression === this.lastFaceExpression) {
|
||||||
|
this.duplicateConsecutiveExpressions++;
|
||||||
|
} else {
|
||||||
|
if (this.lastFaceExpression && this.lastFaceExpressionTimestamp) {
|
||||||
|
dispatch(addFaceExpression(
|
||||||
|
this.lastFaceExpression,
|
||||||
|
getFaceExpressionDuration(getState(), this.duplicateConsecutiveExpressions + 1),
|
||||||
|
this.lastFaceExpressionTimestamp
|
||||||
|
));
|
||||||
|
}
|
||||||
|
this.lastFaceExpression = faceExpression;
|
||||||
|
this.lastFaceExpressionTimestamp = Date.now();
|
||||||
|
this.duplicateConsecutiveExpressions = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (faceBox) {
|
||||||
|
dispatch({
|
||||||
|
type: NEW_FACE_COORDINATES,
|
||||||
|
faceBox
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
APP.API.notifyFaceLandmarkDetected(faceBox, faceExpression);
|
||||||
|
};
|
||||||
|
|
||||||
|
const { faceLandmarks } = getState()['features/base/config'];
|
||||||
|
const detectionTypes = [
|
||||||
|
faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX,
|
||||||
|
faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS
|
||||||
|
].filter(Boolean);
|
||||||
|
|
||||||
|
this.worker.postMessage({
|
||||||
|
type: INIT_WORKER,
|
||||||
|
baseUrl,
|
||||||
|
detectionTypes
|
||||||
|
});
|
||||||
|
this.initialized = true;
|
||||||
|
|
||||||
|
this.startDetection({
|
||||||
|
dispatch,
|
||||||
|
getState
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function which starts the detection process.
|
||||||
|
*
|
||||||
|
* @param {IStore} store - Redux store with dispatch and getState methods.
|
||||||
|
* @param {any} track - Track from middleware; can be undefined.
|
||||||
|
* @returns {void}
|
||||||
|
*/
|
||||||
|
startDetection({ dispatch, getState }: IStore, track?: any) {
|
||||||
|
if (!this.isInitialized()) {
|
||||||
|
logger.info('Worker has not been initialized');
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.recognitionActive) {
|
||||||
|
logger.log('Face detection already active.');
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const state = getState();
|
||||||
|
const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
|
||||||
|
|
||||||
|
if (localVideoTrack === undefined) {
|
||||||
|
logger.warn('Face landmarks detection is disabled due to missing local track.');
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
|
||||||
|
const firstVideoTrack = stream.getVideoTracks()[0];
|
||||||
|
|
||||||
|
this.imageCapture = new ImageCapture(firstVideoTrack);
|
||||||
|
this.recognitionActive = true;
|
||||||
|
logger.log('Start face detection');
|
||||||
|
|
||||||
|
const { faceLandmarks } = state['features/base/config'];
|
||||||
|
|
||||||
|
this.detectionInterval = window.setInterval(() => {
|
||||||
|
|
||||||
|
if (this.worker && this.imageCapture) {
|
||||||
|
this.sendDataToWorker(
|
||||||
|
this.imageCapture,
|
||||||
|
faceLandmarks?.faceCenteringThreshold
|
||||||
|
).then(status => {
|
||||||
|
if (!status) {
|
||||||
|
this.stopDetection({
|
||||||
|
dispatch,
|
||||||
|
getState
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}, getDetectionInterval(state));
|
||||||
|
|
||||||
|
const { webhookProxyUrl } = state['features/base/config'];
|
||||||
|
|
||||||
|
if (faceLandmarks?.enableFaceExpressionsDetection && webhookProxyUrl) {
|
||||||
|
this.webhookSendInterval = window.setInterval(async () => {
|
||||||
|
const result = await sendFaceExpressionsWebhook(getState());
|
||||||
|
|
||||||
|
if (result) {
|
||||||
|
dispatch(clearFaceExpressionBuffer());
|
||||||
|
}
|
||||||
|
}, WEBHOOK_SEND_TIME_INTERVAL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function which stops the detection process.
|
||||||
|
*
|
||||||
|
* @param {IStore} store - Redux store with dispatch and getState methods.
|
||||||
|
* @returns {void}
|
||||||
|
*/
|
||||||
|
stopDetection({ dispatch, getState }: IStore) {
|
||||||
|
if (!this.recognitionActive || !this.isInitialized()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.lastFaceExpression && this.lastFaceExpressionTimestamp) {
|
||||||
|
dispatch(
|
||||||
|
addFaceExpression(
|
||||||
|
this.lastFaceExpression,
|
||||||
|
getFaceExpressionDuration(getState(), this.duplicateConsecutiveExpressions + 1),
|
||||||
|
this.lastFaceExpressionTimestamp
|
||||||
|
)
|
||||||
|
);
|
||||||
|
this.duplicateConsecutiveExpressions = 0;
|
||||||
|
this.lastFaceExpression = null;
|
||||||
|
this.lastFaceExpressionTimestamp = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.webhookSendInterval && window.clearInterval(this.webhookSendInterval);
|
||||||
|
this.detectionInterval && window.clearInterval(this.detectionInterval);
|
||||||
|
this.webhookSendInterval = null;
|
||||||
|
this.detectionInterval = null;
|
||||||
|
this.imageCapture = null;
|
||||||
|
this.recognitionActive = false;
|
||||||
|
logger.log('Stop face detection');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sends the image data a canvas from the track in the image capture to the face detection worker.
|
||||||
|
*
|
||||||
|
* @param {Object} imageCapture - Image capture that contains the current track.
|
||||||
|
* @param {number} faceCenteringThreshold - Movement threshold as percentage for sharing face coordinates.
|
||||||
|
* @returns {Promise<boolean>} - True if sent, false otherwise.
|
||||||
|
*/
|
||||||
|
private async sendDataToWorker(imageCapture: ImageCapture, faceCenteringThreshold = 10): Promise<boolean> {
|
||||||
|
if (!imageCapture || !this.worker) {
|
||||||
|
logger.log('Could not send data to worker');
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let imageBitmap;
|
||||||
|
let image;
|
||||||
|
|
||||||
|
try {
|
||||||
|
imageBitmap = await imageCapture.grabFrame();
|
||||||
|
} catch (err) {
|
||||||
|
logger.log('Could not send data to worker');
|
||||||
|
logger.warn(err);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof OffscreenCanvas === 'undefined' && this.canvas && this.context) {
|
||||||
|
this.canvas.width = imageBitmap.width;
|
||||||
|
this.canvas.height = imageBitmap.height;
|
||||||
|
this.context.drawImage(imageBitmap, 0, 0);
|
||||||
|
image = this.context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
|
||||||
|
} else {
|
||||||
|
image = imageBitmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.worker.postMessage({
|
||||||
|
type: DETECT_FACE,
|
||||||
|
image,
|
||||||
|
threshold: faceCenteringThreshold
|
||||||
|
});
|
||||||
|
|
||||||
|
imageBitmap.close();
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default FaceLandmarksDetector.getInstance();
|
||||||
|
|
|
@ -2,28 +2,7 @@ import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
|
||||||
import { Human, Config, FaceResult } from '@vladmandic/human';
|
import { Human, Config, FaceResult } from '@vladmandic/human';
|
||||||
|
|
||||||
import { DETECTION_TYPES, FACE_DETECTION_SCORE_THRESHOLD, FACE_EXPRESSIONS_NAMING_MAPPING } from './constants';
|
import { DETECTION_TYPES, FACE_DETECTION_SCORE_THRESHOLD, FACE_EXPRESSIONS_NAMING_MAPPING } from './constants';
|
||||||
|
import { DetectInput, DetectOutput, FaceBox, InitInput } from './types';
|
||||||
type DetectInput = {
|
|
||||||
image: ImageBitmap | ImageData;
|
|
||||||
threshold: number;
|
|
||||||
};
|
|
||||||
|
|
||||||
type FaceBox = {
|
|
||||||
left: number;
|
|
||||||
right: number;
|
|
||||||
width?: number;
|
|
||||||
};
|
|
||||||
|
|
||||||
type InitInput = {
|
|
||||||
baseUrl: string;
|
|
||||||
detectionTypes: string[];
|
|
||||||
};
|
|
||||||
|
|
||||||
type DetectOutput = {
|
|
||||||
faceBox?: FaceBox;
|
|
||||||
faceCount: number;
|
|
||||||
faceExpression?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
export interface FaceLandmarksHelper {
|
export interface FaceLandmarksHelper {
|
||||||
detect: ({ image, threshold }: DetectInput) => Promise<DetectOutput>;
|
detect: ({ image, threshold }: DetectInput) => Promise<DetectOutput>;
|
||||||
|
|
|
@ -28,24 +28,6 @@ export const ADD_TO_FACE_EXPRESSIONS_BUFFER = 'ADD_TO_FACE_EXPRESSIONS_BUFFER';
|
||||||
*/
|
*/
|
||||||
export const CLEAR_FACE_EXPRESSIONS_BUFFER = 'CLEAR_FACE_EXPRESSIONS_BUFFER';
|
export const CLEAR_FACE_EXPRESSIONS_BUFFER = 'CLEAR_FACE_EXPRESSIONS_BUFFER';
|
||||||
|
|
||||||
/**
|
|
||||||
* Redux action type dispatched in order to set recognition active in the state.
|
|
||||||
*
|
|
||||||
* {
|
|
||||||
* type: START_FACE_LANDMARKS_DETECTION
|
|
||||||
* }
|
|
||||||
*/
|
|
||||||
export const START_FACE_LANDMARKS_DETECTION = 'START_FACE_LANDMARKS_DETECTION';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Redux action type dispatched in order to set recognition inactive in the state.
|
|
||||||
*
|
|
||||||
* {
|
|
||||||
* type: STOP_FACE_LANDMARKS_DETECTION
|
|
||||||
* }
|
|
||||||
*/
|
|
||||||
export const STOP_FACE_LANDMARKS_DETECTION = 'STOP_FACE_LANDMARKS_DETECTION';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Redux action type dispatched in order to update coordinates of a detected face.
|
* Redux action type dispatched in order to update coordinates of a detected face.
|
||||||
*
|
*
|
||||||
|
@ -56,3 +38,14 @@ export const STOP_FACE_LANDMARKS_DETECTION = 'STOP_FACE_LANDMARKS_DETECTION';
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES';
|
export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Redux action type dispatched in order to signal new face coordinates were obtained for the local participant.
|
||||||
|
*
|
||||||
|
* {
|
||||||
|
* type: NEW_FACE_COORDINATES,
|
||||||
|
* faceBox: Object({ left, bottom, right, top }),
|
||||||
|
* participantId: string
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
export const NEW_FACE_COORDINATES = 'NEW_FACE_COORDINATES';
|
||||||
|
|
|
@ -1,286 +0,0 @@
|
||||||
// @flow
|
|
||||||
import 'image-capture';
|
|
||||||
import './createImageBitmap';
|
|
||||||
|
|
||||||
import { getCurrentConference } from '../base/conference';
|
|
||||||
import { getLocalParticipant, getParticipantCount } from '../base/participants';
|
|
||||||
import { getLocalVideoTrack } from '../base/tracks';
|
|
||||||
import { getBaseUrl } from '../base/util';
|
|
||||||
|
|
||||||
import {
|
|
||||||
ADD_FACE_EXPRESSION,
|
|
||||||
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
|
||||||
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
|
||||||
START_FACE_LANDMARKS_DETECTION,
|
|
||||||
STOP_FACE_LANDMARKS_DETECTION,
|
|
||||||
UPDATE_FACE_COORDINATES
|
|
||||||
} from './actionTypes';
|
|
||||||
import {
|
|
||||||
DETECTION_TYPES,
|
|
||||||
INIT_WORKER,
|
|
||||||
WEBHOOK_SEND_TIME_INTERVAL
|
|
||||||
} from './constants';
|
|
||||||
import {
|
|
||||||
getDetectionInterval,
|
|
||||||
sendDataToWorker,
|
|
||||||
sendFaceBoxToParticipants,
|
|
||||||
sendFaceExpressionsWebhook
|
|
||||||
} from './functions';
|
|
||||||
import logger from './logger';
|
|
||||||
|
|
||||||
declare var APP: Object;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Object containing a image capture of the local track.
|
|
||||||
*/
|
|
||||||
let imageCapture;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Object where the face landmarks worker is stored.
|
|
||||||
*/
|
|
||||||
let worker;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The last face expression received from the worker.
|
|
||||||
*/
|
|
||||||
let lastFaceExpression;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The last face expression timestamp.
|
|
||||||
*/
|
|
||||||
let lastFaceExpressionTimestamp;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* How many duplicate consecutive expression occurred.
|
|
||||||
* If a expression that is not the same as the last one it is reset to 0.
|
|
||||||
*/
|
|
||||||
let duplicateConsecutiveExpressions = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Variable that keeps the interval for sending expressions to webhook.
|
|
||||||
*/
|
|
||||||
let webhookSendInterval;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Variable that keeps the interval for detecting faces in a frame.
|
|
||||||
*/
|
|
||||||
let detectionInterval;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Loads the worker that detects the face landmarks.
|
|
||||||
*
|
|
||||||
* @returns {void}
|
|
||||||
*/
|
|
||||||
export function loadWorker() {
|
|
||||||
return function(dispatch: Function, getState: Function) {
|
|
||||||
if (worker) {
|
|
||||||
logger.info('Worker has already been initialized');
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (navigator.product === 'ReactNative') {
|
|
||||||
logger.warn('Unsupported environment for face recognition');
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const baseUrl = `${getBaseUrl()}libs/`;
|
|
||||||
let workerUrl = `${baseUrl}face-landmarks-worker.min.js`;
|
|
||||||
|
|
||||||
const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
|
|
||||||
|
|
||||||
workerUrl = window.URL.createObjectURL(workerBlob);
|
|
||||||
worker = new Worker(workerUrl, { name: 'Face Recognition Worker' });
|
|
||||||
worker.onmessage = function(e: Object) {
|
|
||||||
const { faceExpression, faceBox } = e.data;
|
|
||||||
|
|
||||||
if (faceExpression) {
|
|
||||||
if (faceExpression === lastFaceExpression) {
|
|
||||||
duplicateConsecutiveExpressions++;
|
|
||||||
} else {
|
|
||||||
if (lastFaceExpression && lastFaceExpressionTimestamp) {
|
|
||||||
dispatch(addFaceExpression(
|
|
||||||
lastFaceExpression,
|
|
||||||
duplicateConsecutiveExpressions + 1,
|
|
||||||
lastFaceExpressionTimestamp
|
|
||||||
));
|
|
||||||
}
|
|
||||||
lastFaceExpression = faceExpression;
|
|
||||||
lastFaceExpressionTimestamp = Date.now();
|
|
||||||
duplicateConsecutiveExpressions = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (faceBox) {
|
|
||||||
const state = getState();
|
|
||||||
const conference = getCurrentConference(state);
|
|
||||||
const localParticipant = getLocalParticipant(state);
|
|
||||||
|
|
||||||
if (getParticipantCount(state) > 1) {
|
|
||||||
sendFaceBoxToParticipants(conference, faceBox);
|
|
||||||
}
|
|
||||||
|
|
||||||
dispatch({
|
|
||||||
type: UPDATE_FACE_COORDINATES,
|
|
||||||
faceBox,
|
|
||||||
id: localParticipant.id
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
APP.API.notifyFaceLandmarkDetected(faceBox, faceExpression);
|
|
||||||
};
|
|
||||||
|
|
||||||
const { faceLandmarks } = getState()['features/base/config'];
|
|
||||||
const detectionTypes = [
|
|
||||||
faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX,
|
|
||||||
faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS
|
|
||||||
].filter(Boolean);
|
|
||||||
|
|
||||||
worker.postMessage({
|
|
||||||
type: INIT_WORKER,
|
|
||||||
baseUrl,
|
|
||||||
detectionTypes
|
|
||||||
});
|
|
||||||
|
|
||||||
dispatch(startFaceLandmarksDetection());
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Starts the recognition and detection of face expressions.
|
|
||||||
*
|
|
||||||
* @param {Track | undefined} track - Track for which to start detecting faces.
|
|
||||||
* @returns {Function}
|
|
||||||
*/
|
|
||||||
export function startFaceLandmarksDetection(track) {
|
|
||||||
return async function(dispatch: Function, getState: Function) {
|
|
||||||
if (!worker) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const state = getState();
|
|
||||||
const { recognitionActive } = state['features/face-landmarks'];
|
|
||||||
|
|
||||||
if (recognitionActive) {
|
|
||||||
logger.log('Face recognition already active.');
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
|
|
||||||
|
|
||||||
if (localVideoTrack === undefined) {
|
|
||||||
logger.warn('Face landmarks detection is disabled due to missing local track.');
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
|
|
||||||
|
|
||||||
dispatch({ type: START_FACE_LANDMARKS_DETECTION });
|
|
||||||
logger.log('Start face recognition');
|
|
||||||
|
|
||||||
const firstVideoTrack = stream.getVideoTracks()[0];
|
|
||||||
const { faceLandmarks } = state['features/base/config'];
|
|
||||||
|
|
||||||
imageCapture = new ImageCapture(firstVideoTrack);
|
|
||||||
|
|
||||||
detectionInterval = setInterval(() => {
|
|
||||||
sendDataToWorker(
|
|
||||||
worker,
|
|
||||||
imageCapture,
|
|
||||||
faceLandmarks?.faceCenteringThreshold
|
|
||||||
);
|
|
||||||
}, getDetectionInterval(state));
|
|
||||||
|
|
||||||
if (faceLandmarks?.enableFaceExpressionsDetection) {
|
|
||||||
webhookSendInterval = setInterval(async () => {
|
|
||||||
const result = await sendFaceExpressionsWebhook(getState());
|
|
||||||
|
|
||||||
if (result) {
|
|
||||||
dispatch(clearFaceExpressionBuffer());
|
|
||||||
}
|
|
||||||
}, WEBHOOK_SEND_TIME_INTERVAL);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Stops the recognition and detection of face expressions.
|
|
||||||
*
|
|
||||||
* @returns {void}
|
|
||||||
*/
|
|
||||||
export function stopFaceLandmarksDetection() {
|
|
||||||
return function(dispatch: Function, getState: Function) {
|
|
||||||
const { recognitionActive } = getState()['features/face-landmarks'];
|
|
||||||
|
|
||||||
if (lastFaceExpression && lastFaceExpressionTimestamp && recognitionActive) {
|
|
||||||
dispatch(
|
|
||||||
addFaceExpression(
|
|
||||||
lastFaceExpression,
|
|
||||||
duplicateConsecutiveExpressions + 1,
|
|
||||||
lastFaceExpressionTimestamp
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
clearInterval(webhookSendInterval);
|
|
||||||
clearInterval(detectionInterval);
|
|
||||||
|
|
||||||
duplicateConsecutiveExpressions = 0;
|
|
||||||
lastFaceExpression = null;
|
|
||||||
lastFaceExpressionTimestamp = null;
|
|
||||||
webhookSendInterval = null;
|
|
||||||
detectionInterval = null;
|
|
||||||
imageCapture = null;
|
|
||||||
|
|
||||||
dispatch({ type: STOP_FACE_LANDMARKS_DETECTION });
|
|
||||||
logger.log('Stop face recognition');
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Adds a new face expression and its duration.
|
|
||||||
*
|
|
||||||
* @param {string} faceExpression - Face expression to be added.
|
|
||||||
* @param {number} duration - Duration in seconds of the face expression.
|
|
||||||
* @param {number} timestamp - Duration in seconds of the face expression.
|
|
||||||
* @returns {Object}
|
|
||||||
*/
|
|
||||||
function addFaceExpression(faceExpression: string, duration: number, timestamp: number) {
|
|
||||||
return function(dispatch: Function, getState: Function) {
|
|
||||||
const finalDuration = duration * getDetectionInterval(getState()) / 1000;
|
|
||||||
|
|
||||||
dispatch({
|
|
||||||
type: ADD_FACE_EXPRESSION,
|
|
||||||
faceExpression,
|
|
||||||
duration: finalDuration,
|
|
||||||
timestamp
|
|
||||||
});
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Adds a face expression with its timestamp to the face expression buffer.
|
|
||||||
*
|
|
||||||
* @param {Object} faceExpression - Object containing face expression string and its timestamp.
|
|
||||||
* @returns {Object}
|
|
||||||
*/
|
|
||||||
export function addToFaceExpressionsBuffer(faceExpression: Object) {
|
|
||||||
return {
|
|
||||||
type: ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
|
||||||
faceExpression
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Clears the face expressions array in the state.
|
|
||||||
*
|
|
||||||
* @returns {Object}
|
|
||||||
*/
|
|
||||||
function clearFaceExpressionBuffer() {
|
|
||||||
return {
|
|
||||||
type: CLEAR_FACE_EXPRESSIONS_BUFFER
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
import 'image-capture';
|
||||||
|
import './createImageBitmap';
|
||||||
|
import { AnyAction } from 'redux';
|
||||||
|
|
||||||
|
import {
|
||||||
|
ADD_FACE_EXPRESSION,
|
||||||
|
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||||
|
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
||||||
|
NEW_FACE_COORDINATES
|
||||||
|
} from './actionTypes';
|
||||||
|
import { FaceBox } from './types';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a new face expression and its duration.
|
||||||
|
*
|
||||||
|
* @param {string} faceExpression - Face expression to be added.
|
||||||
|
* @param {number} duration - Duration in seconds of the face expression.
|
||||||
|
* @param {number} timestamp - Duration in seconds of the face expression.
|
||||||
|
* @returns {AnyAction}
|
||||||
|
*/
|
||||||
|
export function addFaceExpression(faceExpression: string, duration: number, timestamp: number): AnyAction {
|
||||||
|
return {
|
||||||
|
type: ADD_FACE_EXPRESSION,
|
||||||
|
faceExpression,
|
||||||
|
duration,
|
||||||
|
timestamp
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a face expression with its timestamp to the face expression buffer.
|
||||||
|
*
|
||||||
|
* @param {Object} faceExpression - Object containing face expression string and its timestamp.
|
||||||
|
* @returns {AnyAction}
|
||||||
|
*/
|
||||||
|
export function addToFaceExpressionsBuffer(
|
||||||
|
faceExpression: {
|
||||||
|
emotion: string;
|
||||||
|
timestamp: number;
|
||||||
|
}
|
||||||
|
): AnyAction {
|
||||||
|
return {
|
||||||
|
type: ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||||
|
faceExpression
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clears the face expressions array in the state.
|
||||||
|
*
|
||||||
|
* @returns {Object}
|
||||||
|
*/
|
||||||
|
export function clearFaceExpressionBuffer() {
|
||||||
|
return {
|
||||||
|
type: CLEAR_FACE_EXPRESSIONS_BUFFER
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Signals that a new face box was obtained for the local participant.
|
||||||
|
*
|
||||||
|
* @param {FaceBox} faceBox - The face box of the local participant.
|
||||||
|
* @returns {AnyAction}
|
||||||
|
*/
|
||||||
|
export function newFaceBox(faceBox: FaceBox): AnyAction {
|
||||||
|
return {
|
||||||
|
type: NEW_FACE_COORDINATES,
|
||||||
|
faceBox
|
||||||
|
};
|
||||||
|
}
|
|
@ -59,4 +59,4 @@ export const DETECTION_TYPES = {
|
||||||
/**
|
/**
|
||||||
* Threshold for detection score of face.
|
* Threshold for detection score of face.
|
||||||
*/
|
*/
|
||||||
export const FACE_DETECTION_SCORE_THRESHOLD = 0.6;
|
export const FACE_DETECTION_SCORE_THRESHOLD = 0.75;
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
*
|
*
|
||||||
* Support source image types: Canvas.
|
* Support source image types: Canvas.
|
||||||
*/
|
*/
|
||||||
|
// @ts-nocheck
|
||||||
if (!('createImageBitmap' in window)) {
|
if (!('createImageBitmap' in window)) {
|
||||||
window.createImageBitmap = async function(data) {
|
window.createImageBitmap = async function(data) {
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
|
@ -16,12 +17,14 @@ if (!('createImageBitmap' in window)) {
|
||||||
}
|
}
|
||||||
const img = document.createElement('img');
|
const img = document.createElement('img');
|
||||||
|
|
||||||
// eslint-disable-next-line no-empty-function
|
img.close = () => {
|
||||||
img.close = () => {};
|
// empty
|
||||||
|
};
|
||||||
|
|
||||||
img.addEventListener('load', () => {
|
img.addEventListener('load', () => {
|
||||||
resolve(img);
|
resolve(img);
|
||||||
});
|
});
|
||||||
|
|
||||||
img.src = dataURL;
|
img.src = dataURL;
|
||||||
});
|
});
|
||||||
};
|
};
|
|
@ -1,17 +1,16 @@
|
||||||
import { FaceLandmarksHelper, HumanHelper } from './FaceLandmarksHelper';
|
import { FaceLandmarksHelper, HumanHelper } from './FaceLandmarksHelper';
|
||||||
import { DETECT_FACE, INIT_WORKER } from './constants';
|
import { DETECT_FACE, INIT_WORKER } from './constants';
|
||||||
|
|
||||||
|
|
||||||
let helper: FaceLandmarksHelper;
|
let helper: FaceLandmarksHelper;
|
||||||
|
|
||||||
onmessage = async function(message: MessageEvent<any>) {
|
onmessage = async function({ data }: MessageEvent<any>) {
|
||||||
switch (message.data.type) {
|
switch (data.type) {
|
||||||
case DETECT_FACE: {
|
case DETECT_FACE: {
|
||||||
if (!helper || helper.getDetectionInProgress()) {
|
if (!helper || helper.getDetectionInProgress()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const detections = await helper.detect(message.data);
|
const detections = await helper.detect(data);
|
||||||
|
|
||||||
if (detections && (detections.faceBox || detections.faceExpression || detections.faceCount)) {
|
if (detections && (detections.faceBox || detections.faceExpression || detections.faceCount)) {
|
||||||
self.postMessage(detections);
|
self.postMessage(detections);
|
||||||
|
@ -21,7 +20,7 @@ onmessage = async function(message: MessageEvent<any>) {
|
||||||
}
|
}
|
||||||
|
|
||||||
case INIT_WORKER: {
|
case INIT_WORKER: {
|
||||||
helper = new HumanHelper(message.data);
|
helper = new HumanHelper(data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
// @flow
|
/* eslint-disable lines-around-comment */
|
||||||
import { getLocalParticipant } from '../base/participants';
|
import { IState } from '../app/types';
|
||||||
|
import { getLocalParticipant } from '../base/participants/functions';
|
||||||
|
// @ts-ignore
|
||||||
import { extractFqnFromPath } from '../dynamic-branding/functions.any';
|
import { extractFqnFromPath } from '../dynamic-branding/functions.any';
|
||||||
|
|
||||||
import { DETECT_FACE, FACE_BOX_EVENT_TYPE, SEND_IMAGE_INTERVAL_MS } from './constants';
|
import { DETECT_FACE, FACE_BOX_EVENT_TYPE, SEND_IMAGE_INTERVAL_MS } from './constants';
|
||||||
import logger from './logger';
|
import logger from './logger';
|
||||||
|
import { FaceBox } from './types';
|
||||||
|
|
||||||
let canvas;
|
let canvas: HTMLCanvasElement;
|
||||||
let context;
|
let context: CanvasRenderingContext2D | null;
|
||||||
|
|
||||||
if (typeof OffscreenCanvas === 'undefined') {
|
if (typeof OffscreenCanvas === 'undefined') {
|
||||||
canvas = document.createElement('canvas');
|
canvas = document.createElement('canvas');
|
||||||
|
@ -16,13 +19,13 @@ if (typeof OffscreenCanvas === 'undefined') {
|
||||||
/**
|
/**
|
||||||
* Sends the face expression with its duration to all the other participants.
|
* Sends the face expression with its duration to all the other participants.
|
||||||
*
|
*
|
||||||
* @param {Object} conference - The current conference.
|
* @param {any} conference - The current conference.
|
||||||
* @param {string} faceExpression - Face expression to be sent.
|
* @param {string} faceExpression - Face expression to be sent.
|
||||||
* @param {number} duration - The duration of the face expression in seconds.
|
* @param {number} duration - The duration of the face expression in seconds.
|
||||||
* @returns {void}
|
* @returns {void}
|
||||||
*/
|
*/
|
||||||
export function sendFaceExpressionToParticipants(
|
export function sendFaceExpressionToParticipants(
|
||||||
conference: Object,
|
conference: any,
|
||||||
faceExpression: string,
|
faceExpression: string,
|
||||||
duration: number
|
duration: number
|
||||||
): void {
|
): void {
|
||||||
|
@ -41,13 +44,13 @@ export function sendFaceExpressionToParticipants(
|
||||||
/**
|
/**
|
||||||
* Sends the face box to all the other participants.
|
* Sends the face box to all the other participants.
|
||||||
*
|
*
|
||||||
* @param {Object} conference - The current conference.
|
* @param {any} conference - The current conference.
|
||||||
* @param {Object} faceBox - Face box to be sent.
|
* @param {FaceBox} faceBox - Face box to be sent.
|
||||||
* @returns {void}
|
* @returns {void}
|
||||||
*/
|
*/
|
||||||
export function sendFaceBoxToParticipants(
|
export function sendFaceBoxToParticipants(
|
||||||
conference: Object,
|
conference: any,
|
||||||
faceBox: Object
|
faceBox: FaceBox
|
||||||
): void {
|
): void {
|
||||||
try {
|
try {
|
||||||
conference.sendEndpointMessage('', {
|
conference.sendEndpointMessage('', {
|
||||||
|
@ -62,13 +65,13 @@ export function sendFaceBoxToParticipants(
|
||||||
/**
|
/**
|
||||||
* Sends the face expression with its duration to xmpp server.
|
* Sends the face expression with its duration to xmpp server.
|
||||||
*
|
*
|
||||||
* @param {Object} conference - The current conference.
|
* @param {any} conference - The current conference.
|
||||||
* @param {string} faceExpression - Face expression to be sent.
|
* @param {string} faceExpression - Face expression to be sent.
|
||||||
* @param {number} duration - The duration of the face expression in seconds.
|
* @param {number} duration - The duration of the face expression in seconds.
|
||||||
* @returns {void}
|
* @returns {void}
|
||||||
*/
|
*/
|
||||||
export function sendFaceExpressionToServer(
|
export function sendFaceExpressionToServer(
|
||||||
conference: Object,
|
conference: any,
|
||||||
faceExpression: string,
|
faceExpression: string,
|
||||||
duration: number
|
duration: number
|
||||||
): void {
|
): void {
|
||||||
|
@ -88,12 +91,12 @@ export function sendFaceExpressionToServer(
|
||||||
* @param {Object} state - Redux state.
|
* @param {Object} state - Redux state.
|
||||||
* @returns {boolean} - True if sent, false otherwise.
|
* @returns {boolean} - True if sent, false otherwise.
|
||||||
*/
|
*/
|
||||||
export async function sendFaceExpressionsWebhook(state: Object) {
|
export async function sendFaceExpressionsWebhook(state: IState) {
|
||||||
const { webhookProxyUrl: url } = state['features/base/config'];
|
const { webhookProxyUrl: url } = state['features/base/config'];
|
||||||
const { conference } = state['features/base/conference'];
|
const { conference } = state['features/base/conference'];
|
||||||
const { jwt } = state['features/base/jwt'];
|
const { jwt } = state['features/base/jwt'];
|
||||||
const { connection } = state['features/base/connection'];
|
const { connection } = state['features/base/connection'];
|
||||||
const jid = connection.getJid();
|
const jid = connection?.getJid();
|
||||||
const localParticipant = getLocalParticipant(state);
|
const localParticipant = getLocalParticipant(state);
|
||||||
const { faceExpressionsBuffer } = state['features/face-landmarks'];
|
const { faceExpressionsBuffer } = state['features/face-landmarks'];
|
||||||
|
|
||||||
|
@ -111,8 +114,8 @@ export async function sendFaceExpressionsWebhook(state: Object) {
|
||||||
sessionId: conference.sessionId,
|
sessionId: conference.sessionId,
|
||||||
submitted: Date.now(),
|
submitted: Date.now(),
|
||||||
emotions: faceExpressionsBuffer,
|
emotions: faceExpressionsBuffer,
|
||||||
participantId: localParticipant.jwtId,
|
participantId: localParticipant?.jwtId,
|
||||||
participantName: localParticipant.name,
|
participantName: localParticipant?.name,
|
||||||
participantJid: jid
|
participantJid: jid
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -143,15 +146,15 @@ export async function sendFaceExpressionsWebhook(state: Object) {
|
||||||
* @param {Worker} worker - Face recognition worker.
|
* @param {Worker} worker - Face recognition worker.
|
||||||
* @param {Object} imageCapture - Image capture that contains the current track.
|
* @param {Object} imageCapture - Image capture that contains the current track.
|
||||||
* @param {number} threshold - Movement threshold as percentage for sharing face coordinates.
|
* @param {number} threshold - Movement threshold as percentage for sharing face coordinates.
|
||||||
* @returns {Promise<void>}
|
* @returns {Promise<boolean>} - True if sent, false otherwise.
|
||||||
*/
|
*/
|
||||||
export async function sendDataToWorker(
|
export async function sendDataToWorker(
|
||||||
worker: Worker,
|
worker: Worker,
|
||||||
imageCapture: Object,
|
imageCapture: ImageCapture,
|
||||||
threshold: number = 10
|
threshold = 10
|
||||||
): Promise<void> {
|
): Promise<boolean> {
|
||||||
if (imageCapture === null || imageCapture === undefined) {
|
if (imageCapture === null || imageCapture === undefined) {
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
let imageBitmap;
|
let imageBitmap;
|
||||||
|
@ -162,15 +165,15 @@ export async function sendDataToWorker(
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
logger.warn(err);
|
logger.warn(err);
|
||||||
|
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (typeof OffscreenCanvas === 'undefined') {
|
if (typeof OffscreenCanvas === 'undefined') {
|
||||||
canvas.width = imageBitmap.width;
|
canvas.width = imageBitmap.width;
|
||||||
canvas.height = imageBitmap.height;
|
canvas.height = imageBitmap.height;
|
||||||
context.drawImage(imageBitmap, 0, 0);
|
context?.drawImage(imageBitmap, 0, 0);
|
||||||
|
|
||||||
image = context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
|
image = context?.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
|
||||||
} else {
|
} else {
|
||||||
image = imageBitmap;
|
image = imageBitmap;
|
||||||
}
|
}
|
||||||
|
@ -182,33 +185,37 @@ export async function sendDataToWorker(
|
||||||
});
|
});
|
||||||
|
|
||||||
imageBitmap.close();
|
imageBitmap.close();
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets face box for a participant id.
|
* Gets face box for a participant id.
|
||||||
*
|
*
|
||||||
* @param {string} id - The participant id.
|
* @param {string} id - The participant id.
|
||||||
* @param {Object} state - The redux state.
|
* @param {IState} state - The redux state.
|
||||||
* @returns {Object}
|
* @returns {Object}
|
||||||
*/
|
*/
|
||||||
function getFaceBoxForId(id: string, state: Object) {
|
function getFaceBoxForId(id: string, state: IState) {
|
||||||
return state['features/face-landmarks'].faceBoxes[id];
|
return state['features/face-landmarks'].faceBoxes[id];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the video object position for a participant id.
|
* Gets the video object position for a participant id.
|
||||||
*
|
*
|
||||||
* @param {Object} state - The redux state.
|
* @param {IState} state - The redux state.
|
||||||
* @param {string} id - The participant id.
|
* @param {string} id - The participant id.
|
||||||
* @returns {string} - CSS object-position in the shape of '{horizontalPercentage}% {verticalPercentage}%'.
|
* @returns {string} - CSS object-position in the shape of '{horizontalPercentage}% {verticalPercentage}%'.
|
||||||
*/
|
*/
|
||||||
export function getVideoObjectPosition(state: Object, id: string) {
|
export function getVideoObjectPosition(state: IState, id?: string) {
|
||||||
const faceBox = getFaceBoxForId(id, state);
|
const faceBox = id && getFaceBoxForId(id, state);
|
||||||
|
|
||||||
if (faceBox) {
|
if (faceBox) {
|
||||||
const { right, width } = faceBox;
|
const { right, width } = faceBox;
|
||||||
|
|
||||||
return `${right - (width / 2)}% 50%`;
|
if (right && width) {
|
||||||
|
return `${right - (width / 2)}% 50%`;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return '50% 50%';
|
return '50% 50%';
|
||||||
|
@ -217,11 +224,22 @@ export function getVideoObjectPosition(state: Object, id: string) {
|
||||||
/**
|
/**
|
||||||
* Gets the video object position for a participant id.
|
* Gets the video object position for a participant id.
|
||||||
*
|
*
|
||||||
* @param {Object} state - The redux state.
|
* @param {IState} state - The redux state.
|
||||||
* @returns {number} - Number of milliseconds for doing face detection.
|
* @returns {number} - Number of milliseconds for doing face detection.
|
||||||
*/
|
*/
|
||||||
export function getDetectionInterval(state: Object) {
|
export function getDetectionInterval(state: IState) {
|
||||||
const { faceLandmarks } = state['features/base/config'];
|
const { faceLandmarks } = state['features/base/config'];
|
||||||
|
|
||||||
return Math.max(faceLandmarks?.captureInterval || SEND_IMAGE_INTERVAL_MS);
|
return Math.max(faceLandmarks?.captureInterval || SEND_IMAGE_INTERVAL_MS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the duration in seconds of a face expression.
|
||||||
|
*
|
||||||
|
* @param {IState} state - The redux state.
|
||||||
|
* @param {number} faceExpressionCount - The number of consecutive face expressions.
|
||||||
|
* @returns {number} - Duration of face expression in seconds.
|
||||||
|
*/
|
||||||
|
export function getFaceExpressionDuration(state: IState, faceExpressionCount: number) {
|
||||||
|
return faceExpressionCount * (getDetectionInterval(state) / 1000);
|
||||||
|
}
|
|
@ -1 +0,0 @@
|
||||||
export * from './actions';
|
|
|
@ -1,38 +1,41 @@
|
||||||
|
/* eslint-disable lines-around-comment */
|
||||||
|
import { IStore } from '../app/types';
|
||||||
import {
|
import {
|
||||||
CONFERENCE_JOINED,
|
CONFERENCE_JOINED,
|
||||||
CONFERENCE_WILL_LEAVE,
|
CONFERENCE_WILL_LEAVE
|
||||||
getCurrentConference
|
} from '../base/conference/actionTypes';
|
||||||
} from '../base/conference';
|
// @ts-ignore
|
||||||
|
import { getCurrentConference } from '../base/conference/functions';
|
||||||
import { JitsiConferenceEvents } from '../base/lib-jitsi-meet';
|
import { JitsiConferenceEvents } from '../base/lib-jitsi-meet';
|
||||||
import { getParticipantCount } from '../base/participants';
|
import { getLocalParticipant, getParticipantCount } from '../base/participants/functions';
|
||||||
import { MiddlewareRegistry } from '../base/redux';
|
import { Participant } from '../base/participants/types';
|
||||||
import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks';
|
import MiddlewareRegistry from '../base/redux/MiddlewareRegistry';
|
||||||
|
import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks/actionTypes';
|
||||||
|
|
||||||
import { ADD_FACE_EXPRESSION, UPDATE_FACE_COORDINATES } from './actionTypes';
|
import FaceLandmarksDetector from './FaceLandmarksDetector';
|
||||||
|
import { ADD_FACE_EXPRESSION, NEW_FACE_COORDINATES, UPDATE_FACE_COORDINATES } from './actionTypes';
|
||||||
import {
|
import {
|
||||||
addToFaceExpressionsBuffer,
|
addToFaceExpressionsBuffer
|
||||||
loadWorker,
|
|
||||||
stopFaceLandmarksDetection,
|
|
||||||
startFaceLandmarksDetection
|
|
||||||
} from './actions';
|
} from './actions';
|
||||||
import { FACE_BOX_EVENT_TYPE } from './constants';
|
import { FACE_BOX_EVENT_TYPE } from './constants';
|
||||||
import { sendFaceExpressionToParticipants, sendFaceExpressionToServer } from './functions';
|
import { sendFaceBoxToParticipants, sendFaceExpressionToParticipants, sendFaceExpressionToServer } from './functions';
|
||||||
|
|
||||||
|
|
||||||
MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
MiddlewareRegistry.register((store: IStore) => (next: Function) => (action: any) => {
|
||||||
|
const { dispatch, getState } = store;
|
||||||
const { faceLandmarks } = getState()['features/base/config'];
|
const { faceLandmarks } = getState()['features/base/config'];
|
||||||
const isEnabled = faceLandmarks?.enableFaceCentering || faceLandmarks?.enableFaceExpressionsDetection;
|
const isEnabled = faceLandmarks?.enableFaceCentering || faceLandmarks?.enableFaceExpressionsDetection;
|
||||||
|
|
||||||
if (action.type === CONFERENCE_JOINED) {
|
if (action.type === CONFERENCE_JOINED) {
|
||||||
if (isEnabled) {
|
if (isEnabled) {
|
||||||
dispatch(loadWorker());
|
FaceLandmarksDetector.init(store);
|
||||||
}
|
}
|
||||||
|
|
||||||
// allow using remote face centering data when local face centering is not enabled
|
// allow using remote face centering data when local face centering is not enabled
|
||||||
action.conference.on(
|
action.conference.on(
|
||||||
JitsiConferenceEvents.ENDPOINT_MESSAGE_RECEIVED,
|
JitsiConferenceEvents.ENDPOINT_MESSAGE_RECEIVED,
|
||||||
(participant, eventData) => {
|
(participant: Participant | undefined, eventData: any) => {
|
||||||
if (!participant || !eventData) {
|
if (!participant || !eventData || !participant.getId) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +58,7 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
||||||
|
|
||||||
switch (action.type) {
|
switch (action.type) {
|
||||||
case CONFERENCE_WILL_LEAVE : {
|
case CONFERENCE_WILL_LEAVE : {
|
||||||
dispatch(stopFaceLandmarksDetection());
|
FaceLandmarksDetector.stopDetection(store);
|
||||||
|
|
||||||
return next(action);
|
return next(action);
|
||||||
}
|
}
|
||||||
|
@ -64,7 +67,7 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
||||||
|
|
||||||
if (videoType === 'camera' && isLocal()) {
|
if (videoType === 'camera' && isLocal()) {
|
||||||
// need to pass this since the track is not yet added in the store
|
// need to pass this since the track is not yet added in the store
|
||||||
dispatch(startFaceLandmarksDetection(action.track));
|
FaceLandmarksDetector.startDetection(store, action.track);
|
||||||
}
|
}
|
||||||
|
|
||||||
return next(action);
|
return next(action);
|
||||||
|
@ -81,9 +84,9 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
||||||
if (muted !== undefined) {
|
if (muted !== undefined) {
|
||||||
// addresses video mute state changes
|
// addresses video mute state changes
|
||||||
if (muted) {
|
if (muted) {
|
||||||
dispatch(stopFaceLandmarksDetection());
|
FaceLandmarksDetector.stopDetection(store);
|
||||||
} else {
|
} else {
|
||||||
dispatch(startFaceLandmarksDetection());
|
FaceLandmarksDetector.startDetection(store);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,26 +96,43 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
||||||
const { jitsiTrack: { isLocal, videoType } } = action.track;
|
const { jitsiTrack: { isLocal, videoType } } = action.track;
|
||||||
|
|
||||||
if (videoType === 'camera' && isLocal()) {
|
if (videoType === 'camera' && isLocal()) {
|
||||||
dispatch(stopFaceLandmarksDetection());
|
FaceLandmarksDetector.stopDetection(store);
|
||||||
}
|
}
|
||||||
|
|
||||||
return next(action);
|
return next(action);
|
||||||
}
|
}
|
||||||
case ADD_FACE_EXPRESSION: {
|
case ADD_FACE_EXPRESSION: {
|
||||||
const state = getState();
|
const state = getState();
|
||||||
|
const { faceExpression, duration, timestamp } = action;
|
||||||
const conference = getCurrentConference(state);
|
const conference = getCurrentConference(state);
|
||||||
|
|
||||||
if (getParticipantCount(state) > 1) {
|
if (getParticipantCount(state) > 1) {
|
||||||
sendFaceExpressionToParticipants(conference, action.faceExpression, action.duration);
|
sendFaceExpressionToParticipants(conference, faceExpression, duration);
|
||||||
}
|
}
|
||||||
sendFaceExpressionToServer(conference, action.faceExpression, action.duration);
|
sendFaceExpressionToServer(conference, faceExpression, duration);
|
||||||
dispatch(addToFaceExpressionsBuffer({
|
dispatch(addToFaceExpressionsBuffer({
|
||||||
emotion: action.faceExpression,
|
emotion: faceExpression,
|
||||||
timestamp: action.timestamp
|
timestamp
|
||||||
}));
|
}));
|
||||||
|
|
||||||
return next(action);
|
return next(action);
|
||||||
}
|
}
|
||||||
|
case NEW_FACE_COORDINATES: {
|
||||||
|
const state = getState();
|
||||||
|
const { faceBox } = action;
|
||||||
|
const conference = getCurrentConference(state);
|
||||||
|
const localParticipant = getLocalParticipant(state);
|
||||||
|
|
||||||
|
if (getParticipantCount(state) > 1) {
|
||||||
|
sendFaceBoxToParticipants(conference, faceBox);
|
||||||
|
}
|
||||||
|
|
||||||
|
dispatch({
|
||||||
|
type: UPDATE_FACE_COORDINATES,
|
||||||
|
faceBox,
|
||||||
|
id: localParticipant?.id
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return next(action);
|
return next(action);
|
|
@ -4,10 +4,9 @@ import {
|
||||||
ADD_FACE_EXPRESSION,
|
ADD_FACE_EXPRESSION,
|
||||||
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||||
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
||||||
START_FACE_LANDMARKS_DETECTION,
|
|
||||||
STOP_FACE_LANDMARKS_DETECTION,
|
|
||||||
UPDATE_FACE_COORDINATES
|
UPDATE_FACE_COORDINATES
|
||||||
} from './actionTypes';
|
} from './actionTypes';
|
||||||
|
import { FaceBox } from './types';
|
||||||
|
|
||||||
const defaultState = {
|
const defaultState = {
|
||||||
faceBoxes: {},
|
faceBoxes: {},
|
||||||
|
@ -25,11 +24,7 @@ const defaultState = {
|
||||||
};
|
};
|
||||||
|
|
||||||
export interface IFaceLandmarksState {
|
export interface IFaceLandmarksState {
|
||||||
faceBoxes: {
|
faceBoxes: { [key: string]: FaceBox; };
|
||||||
left?: number;
|
|
||||||
right?: number;
|
|
||||||
width?: number;
|
|
||||||
};
|
|
||||||
faceExpressions: {
|
faceExpressions: {
|
||||||
angry: number;
|
angry: number;
|
||||||
disgusted: number;
|
disgusted: number;
|
||||||
|
@ -71,18 +66,6 @@ ReducerRegistry.register<IFaceLandmarksState>('features/face-landmarks',
|
||||||
faceExpressionsBuffer: []
|
faceExpressionsBuffer: []
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
case START_FACE_LANDMARKS_DETECTION: {
|
|
||||||
return {
|
|
||||||
...state,
|
|
||||||
recognitionActive: true
|
|
||||||
};
|
|
||||||
}
|
|
||||||
case STOP_FACE_LANDMARKS_DETECTION: {
|
|
||||||
return {
|
|
||||||
...state,
|
|
||||||
recognitionActive: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
case UPDATE_FACE_COORDINATES: {
|
case UPDATE_FACE_COORDINATES: {
|
||||||
return {
|
return {
|
||||||
...state,
|
...state,
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
export type DetectInput = {
|
||||||
|
image: ImageBitmap | ImageData;
|
||||||
|
threshold: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type FaceBox = {
|
||||||
|
left: number;
|
||||||
|
right: number;
|
||||||
|
width?: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type InitInput = {
|
||||||
|
baseUrl: string;
|
||||||
|
detectionTypes: string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
export type DetectOutput = {
|
||||||
|
faceBox?: FaceBox;
|
||||||
|
faceCount: number;
|
||||||
|
faceExpression?: string;
|
||||||
|
};
|
|
@ -37,7 +37,6 @@ import {
|
||||||
trackStreamingStatusChanged
|
trackStreamingStatusChanged
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
} from '../../../base/tracks';
|
} from '../../../base/tracks';
|
||||||
// @ts-ignore
|
|
||||||
import { getVideoObjectPosition } from '../../../face-landmarks/functions';
|
import { getVideoObjectPosition } from '../../../face-landmarks/functions';
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
import { hideGif, showGif } from '../../../gifs/actions';
|
import { hideGif, showGif } from '../../../gifs/actions';
|
||||||
|
|
Loading…
Reference in New Issue