ref(face-landmarks): convert to typescript and add detector class (#12144)
* fix(face-landmarks): stop recognition when imageCapture error * ref(face-landmarks): convert files in typescript fix: lint issues * code review * ref(face-landmarks): move detection part to a class * ref(face-landmarks): make FaceLandmarksDetector singleton * fix typo and ts-ignore problematic types * fix linting issues
This commit is contained in:
parent
b83c55e9c4
commit
d6f3c2a0f4
|
@ -52,6 +52,7 @@
|
|||
"@tensorflow/tfjs-backend-wasm": "3.13.0",
|
||||
"@tensorflow/tfjs-core": "3.13.0",
|
||||
"@types/audioworklet": "0.0.29",
|
||||
"@types/w3c-image-capture": "1.0.6",
|
||||
"@vladmandic/human": "2.6.5",
|
||||
"@vladmandic/human-models": "2.5.9",
|
||||
"@xmldom/xmldom": "0.7.5",
|
||||
|
@ -6169,11 +6170,24 @@
|
|||
"integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@types/w3c-image-capture": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/w3c-image-capture/-/w3c-image-capture-1.0.6.tgz",
|
||||
"integrity": "sha512-YjU0tMPgi7exsy7qU+oh6CtNiUKhWFrcYRj6ogsFYlyjl7aLREgpkyS6ROA6D/2qF5ImmLivj01zocDR+aIlVQ==",
|
||||
"dependencies": {
|
||||
"@types/webrtc": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/webgl-ext": {
|
||||
"version": "0.0.30",
|
||||
"resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
|
||||
"integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
|
||||
},
|
||||
"node_modules/@types/webrtc": {
|
||||
"version": "0.0.32",
|
||||
"resolved": "https://registry.npmjs.org/@types/webrtc/-/webrtc-0.0.32.tgz",
|
||||
"integrity": "sha512-+F0Ozq+ksnKtjcMHujSgb4A1Vjt0b4wvvxP3/pTnXKsIrLo34EgHh2z3qJq3ntX4dbK6ytxpY1rzO/4Z8rVrHg=="
|
||||
},
|
||||
"node_modules/@types/ws": {
|
||||
"version": "8.5.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz",
|
||||
|
@ -25058,11 +25072,24 @@
|
|||
"integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==",
|
||||
"dev": true
|
||||
},
|
||||
"@types/w3c-image-capture": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/w3c-image-capture/-/w3c-image-capture-1.0.6.tgz",
|
||||
"integrity": "sha512-YjU0tMPgi7exsy7qU+oh6CtNiUKhWFrcYRj6ogsFYlyjl7aLREgpkyS6ROA6D/2qF5ImmLivj01zocDR+aIlVQ==",
|
||||
"requires": {
|
||||
"@types/webrtc": "*"
|
||||
}
|
||||
},
|
||||
"@types/webgl-ext": {
|
||||
"version": "0.0.30",
|
||||
"resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
|
||||
"integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
|
||||
},
|
||||
"@types/webrtc": {
|
||||
"version": "0.0.32",
|
||||
"resolved": "https://registry.npmjs.org/@types/webrtc/-/webrtc-0.0.32.tgz",
|
||||
"integrity": "sha512-+F0Ozq+ksnKtjcMHujSgb4A1Vjt0b4wvvxP3/pTnXKsIrLo34EgHh2z3qJq3ntX4dbK6ytxpY1rzO/4Z8rVrHg=="
|
||||
},
|
||||
"@types/ws": {
|
||||
"version": "8.5.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.3.tgz",
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
"@tensorflow/tfjs-backend-wasm": "3.13.0",
|
||||
"@tensorflow/tfjs-core": "3.13.0",
|
||||
"@types/audioworklet": "0.0.29",
|
||||
"@types/w3c-image-capture": "1.0.6",
|
||||
"@vladmandic/human": "2.6.5",
|
||||
"@vladmandic/human-models": "2.5.9",
|
||||
"@xmldom/xmldom": "0.7.5",
|
||||
|
|
|
@ -10,6 +10,7 @@ export interface Participant {
|
|||
features?: {
|
||||
'screen-sharing'?: boolean;
|
||||
};
|
||||
getId?: Function;
|
||||
id: string;
|
||||
isFakeParticipant?: boolean;
|
||||
isJigasi?: boolean;
|
||||
|
|
|
@ -0,0 +1,297 @@
|
|||
/* eslint-disable lines-around-comment */
|
||||
import 'image-capture';
|
||||
import './createImageBitmap';
|
||||
import { IStore } from '../app/types';
|
||||
// @ts-ignore
|
||||
import { getLocalVideoTrack } from '../base/tracks/functions';
|
||||
import { getBaseUrl } from '../base/util/helpers';
|
||||
|
||||
import { NEW_FACE_COORDINATES } from './actionTypes';
|
||||
import { addFaceExpression, clearFaceExpressionBuffer } from './actions';
|
||||
import {
|
||||
DETECTION_TYPES,
|
||||
INIT_WORKER,
|
||||
DETECT_FACE,
|
||||
WEBHOOK_SEND_TIME_INTERVAL
|
||||
} from './constants';
|
||||
import {
|
||||
getDetectionInterval,
|
||||
getFaceExpressionDuration,
|
||||
sendFaceExpressionsWebhook
|
||||
} from './functions';
|
||||
import logger from './logger';
|
||||
declare const APP: any;
|
||||
|
||||
/**
|
||||
* Class for face language detection.
|
||||
*/
|
||||
class FaceLandmarksDetector {
|
||||
private static instance: FaceLandmarksDetector;
|
||||
private initialized = false;
|
||||
private imageCapture: ImageCapture | null = null;
|
||||
private worker: Worker | null = null;
|
||||
private lastFaceExpression: string | null = null;
|
||||
private lastFaceExpressionTimestamp: number | null = null;
|
||||
private duplicateConsecutiveExpressions = 0;
|
||||
private webhookSendInterval: number | null = null;
|
||||
private detectionInterval: number | null = null;
|
||||
private recognitionActive = false;
|
||||
private canvas?: HTMLCanvasElement;
|
||||
private context?: CanvasRenderingContext2D | null;
|
||||
|
||||
/**
|
||||
* Constructor for class, checks if the environment supports OffscreenCanvas.
|
||||
*/
|
||||
private constructor() {
|
||||
if (typeof OffscreenCanvas === 'undefined') {
|
||||
this.canvas = document.createElement('canvas');
|
||||
this.context = this.canvas.getContext('2d');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Function for retrieving the FaceLandmarksDetector instance.
|
||||
*
|
||||
* @returns {FaceLandmarksDetector} - FaceLandmarksDetector instance.
|
||||
*/
|
||||
public static getInstance(): FaceLandmarksDetector {
|
||||
if (!FaceLandmarksDetector.instance) {
|
||||
FaceLandmarksDetector.instance = new FaceLandmarksDetector();
|
||||
}
|
||||
|
||||
return FaceLandmarksDetector.instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if the detected environment is initialized.
|
||||
*
|
||||
* @returns {boolean}
|
||||
*/
|
||||
isInitialized(): boolean {
|
||||
return this.initialized;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialization function: the worker is loaded and initialized, and then if possible the detection stats.
|
||||
*
|
||||
* @param {IStore} store - Redux store with dispatch and getState methods.
|
||||
* @returns {void}
|
||||
*/
|
||||
init({ dispatch, getState }: IStore) {
|
||||
if (this.isInitialized()) {
|
||||
logger.info('Worker has already been initialized');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (navigator.product === 'ReactNative') {
|
||||
logger.warn('Unsupported environment for face detection');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const baseUrl = `${getBaseUrl()}libs/`;
|
||||
let workerUrl = `${baseUrl}face-landmarks-worker.min.js`;
|
||||
// @ts-ignore
|
||||
const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
|
||||
|
||||
// @ts-ignore
|
||||
workerUrl = window.URL.createObjectURL(workerBlob);
|
||||
this.worker = new Worker(workerUrl, { name: 'Face Recognition Worker' });
|
||||
this.worker.onmessage = ({ data }: MessageEvent<any>) => {
|
||||
const { faceExpression, faceBox } = data;
|
||||
|
||||
if (faceExpression) {
|
||||
if (faceExpression === this.lastFaceExpression) {
|
||||
this.duplicateConsecutiveExpressions++;
|
||||
} else {
|
||||
if (this.lastFaceExpression && this.lastFaceExpressionTimestamp) {
|
||||
dispatch(addFaceExpression(
|
||||
this.lastFaceExpression,
|
||||
getFaceExpressionDuration(getState(), this.duplicateConsecutiveExpressions + 1),
|
||||
this.lastFaceExpressionTimestamp
|
||||
));
|
||||
}
|
||||
this.lastFaceExpression = faceExpression;
|
||||
this.lastFaceExpressionTimestamp = Date.now();
|
||||
this.duplicateConsecutiveExpressions = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (faceBox) {
|
||||
dispatch({
|
||||
type: NEW_FACE_COORDINATES,
|
||||
faceBox
|
||||
});
|
||||
}
|
||||
|
||||
APP.API.notifyFaceLandmarkDetected(faceBox, faceExpression);
|
||||
};
|
||||
|
||||
const { faceLandmarks } = getState()['features/base/config'];
|
||||
const detectionTypes = [
|
||||
faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX,
|
||||
faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS
|
||||
].filter(Boolean);
|
||||
|
||||
this.worker.postMessage({
|
||||
type: INIT_WORKER,
|
||||
baseUrl,
|
||||
detectionTypes
|
||||
});
|
||||
this.initialized = true;
|
||||
|
||||
this.startDetection({
|
||||
dispatch,
|
||||
getState
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* The function which starts the detection process.
|
||||
*
|
||||
* @param {IStore} store - Redux store with dispatch and getState methods.
|
||||
* @param {any} track - Track from middleware; can be undefined.
|
||||
* @returns {void}
|
||||
*/
|
||||
startDetection({ dispatch, getState }: IStore, track?: any) {
|
||||
if (!this.isInitialized()) {
|
||||
logger.info('Worker has not been initialized');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.recognitionActive) {
|
||||
logger.log('Face detection already active.');
|
||||
|
||||
return;
|
||||
}
|
||||
const state = getState();
|
||||
const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
|
||||
|
||||
if (localVideoTrack === undefined) {
|
||||
logger.warn('Face landmarks detection is disabled due to missing local track.');
|
||||
|
||||
return;
|
||||
}
|
||||
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
|
||||
const firstVideoTrack = stream.getVideoTracks()[0];
|
||||
|
||||
this.imageCapture = new ImageCapture(firstVideoTrack);
|
||||
this.recognitionActive = true;
|
||||
logger.log('Start face detection');
|
||||
|
||||
const { faceLandmarks } = state['features/base/config'];
|
||||
|
||||
this.detectionInterval = window.setInterval(() => {
|
||||
|
||||
if (this.worker && this.imageCapture) {
|
||||
this.sendDataToWorker(
|
||||
this.imageCapture,
|
||||
faceLandmarks?.faceCenteringThreshold
|
||||
).then(status => {
|
||||
if (!status) {
|
||||
this.stopDetection({
|
||||
dispatch,
|
||||
getState
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}, getDetectionInterval(state));
|
||||
|
||||
const { webhookProxyUrl } = state['features/base/config'];
|
||||
|
||||
if (faceLandmarks?.enableFaceExpressionsDetection && webhookProxyUrl) {
|
||||
this.webhookSendInterval = window.setInterval(async () => {
|
||||
const result = await sendFaceExpressionsWebhook(getState());
|
||||
|
||||
if (result) {
|
||||
dispatch(clearFaceExpressionBuffer());
|
||||
}
|
||||
}, WEBHOOK_SEND_TIME_INTERVAL);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The function which stops the detection process.
|
||||
*
|
||||
* @param {IStore} store - Redux store with dispatch and getState methods.
|
||||
* @returns {void}
|
||||
*/
|
||||
stopDetection({ dispatch, getState }: IStore) {
|
||||
if (!this.recognitionActive || !this.isInitialized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.lastFaceExpression && this.lastFaceExpressionTimestamp) {
|
||||
dispatch(
|
||||
addFaceExpression(
|
||||
this.lastFaceExpression,
|
||||
getFaceExpressionDuration(getState(), this.duplicateConsecutiveExpressions + 1),
|
||||
this.lastFaceExpressionTimestamp
|
||||
)
|
||||
);
|
||||
this.duplicateConsecutiveExpressions = 0;
|
||||
this.lastFaceExpression = null;
|
||||
this.lastFaceExpressionTimestamp = null;
|
||||
}
|
||||
|
||||
this.webhookSendInterval && window.clearInterval(this.webhookSendInterval);
|
||||
this.detectionInterval && window.clearInterval(this.detectionInterval);
|
||||
this.webhookSendInterval = null;
|
||||
this.detectionInterval = null;
|
||||
this.imageCapture = null;
|
||||
this.recognitionActive = false;
|
||||
logger.log('Stop face detection');
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends the image data a canvas from the track in the image capture to the face detection worker.
|
||||
*
|
||||
* @param {Object} imageCapture - Image capture that contains the current track.
|
||||
* @param {number} faceCenteringThreshold - Movement threshold as percentage for sharing face coordinates.
|
||||
* @returns {Promise<boolean>} - True if sent, false otherwise.
|
||||
*/
|
||||
private async sendDataToWorker(imageCapture: ImageCapture, faceCenteringThreshold = 10): Promise<boolean> {
|
||||
if (!imageCapture || !this.worker) {
|
||||
logger.log('Could not send data to worker');
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
let imageBitmap;
|
||||
let image;
|
||||
|
||||
try {
|
||||
imageBitmap = await imageCapture.grabFrame();
|
||||
} catch (err) {
|
||||
logger.log('Could not send data to worker');
|
||||
logger.warn(err);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (typeof OffscreenCanvas === 'undefined' && this.canvas && this.context) {
|
||||
this.canvas.width = imageBitmap.width;
|
||||
this.canvas.height = imageBitmap.height;
|
||||
this.context.drawImage(imageBitmap, 0, 0);
|
||||
image = this.context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
|
||||
} else {
|
||||
image = imageBitmap;
|
||||
}
|
||||
|
||||
this.worker.postMessage({
|
||||
type: DETECT_FACE,
|
||||
image,
|
||||
threshold: faceCenteringThreshold
|
||||
});
|
||||
|
||||
imageBitmap.close();
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
export default FaceLandmarksDetector.getInstance();
|
||||
|
|
@ -2,28 +2,7 @@ import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
|
|||
import { Human, Config, FaceResult } from '@vladmandic/human';
|
||||
|
||||
import { DETECTION_TYPES, FACE_DETECTION_SCORE_THRESHOLD, FACE_EXPRESSIONS_NAMING_MAPPING } from './constants';
|
||||
|
||||
type DetectInput = {
|
||||
image: ImageBitmap | ImageData;
|
||||
threshold: number;
|
||||
};
|
||||
|
||||
type FaceBox = {
|
||||
left: number;
|
||||
right: number;
|
||||
width?: number;
|
||||
};
|
||||
|
||||
type InitInput = {
|
||||
baseUrl: string;
|
||||
detectionTypes: string[];
|
||||
};
|
||||
|
||||
type DetectOutput = {
|
||||
faceBox?: FaceBox;
|
||||
faceCount: number;
|
||||
faceExpression?: string;
|
||||
};
|
||||
import { DetectInput, DetectOutput, FaceBox, InitInput } from './types';
|
||||
|
||||
export interface FaceLandmarksHelper {
|
||||
detect: ({ image, threshold }: DetectInput) => Promise<DetectOutput>;
|
||||
|
|
|
@ -28,24 +28,6 @@ export const ADD_TO_FACE_EXPRESSIONS_BUFFER = 'ADD_TO_FACE_EXPRESSIONS_BUFFER';
|
|||
*/
|
||||
export const CLEAR_FACE_EXPRESSIONS_BUFFER = 'CLEAR_FACE_EXPRESSIONS_BUFFER';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to set recognition active in the state.
|
||||
*
|
||||
* {
|
||||
* type: START_FACE_LANDMARKS_DETECTION
|
||||
* }
|
||||
*/
|
||||
export const START_FACE_LANDMARKS_DETECTION = 'START_FACE_LANDMARKS_DETECTION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to set recognition inactive in the state.
|
||||
*
|
||||
* {
|
||||
* type: STOP_FACE_LANDMARKS_DETECTION
|
||||
* }
|
||||
*/
|
||||
export const STOP_FACE_LANDMARKS_DETECTION = 'STOP_FACE_LANDMARKS_DETECTION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to update coordinates of a detected face.
|
||||
*
|
||||
|
@ -56,3 +38,14 @@ export const STOP_FACE_LANDMARKS_DETECTION = 'STOP_FACE_LANDMARKS_DETECTION';
|
|||
* }
|
||||
*/
|
||||
export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to signal new face coordinates were obtained for the local participant.
|
||||
*
|
||||
* {
|
||||
* type: NEW_FACE_COORDINATES,
|
||||
* faceBox: Object({ left, bottom, right, top }),
|
||||
* participantId: string
|
||||
* }
|
||||
*/
|
||||
export const NEW_FACE_COORDINATES = 'NEW_FACE_COORDINATES';
|
||||
|
|
|
@ -1,286 +0,0 @@
|
|||
// @flow
|
||||
import 'image-capture';
|
||||
import './createImageBitmap';
|
||||
|
||||
import { getCurrentConference } from '../base/conference';
|
||||
import { getLocalParticipant, getParticipantCount } from '../base/participants';
|
||||
import { getLocalVideoTrack } from '../base/tracks';
|
||||
import { getBaseUrl } from '../base/util';
|
||||
|
||||
import {
|
||||
ADD_FACE_EXPRESSION,
|
||||
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
||||
START_FACE_LANDMARKS_DETECTION,
|
||||
STOP_FACE_LANDMARKS_DETECTION,
|
||||
UPDATE_FACE_COORDINATES
|
||||
} from './actionTypes';
|
||||
import {
|
||||
DETECTION_TYPES,
|
||||
INIT_WORKER,
|
||||
WEBHOOK_SEND_TIME_INTERVAL
|
||||
} from './constants';
|
||||
import {
|
||||
getDetectionInterval,
|
||||
sendDataToWorker,
|
||||
sendFaceBoxToParticipants,
|
||||
sendFaceExpressionsWebhook
|
||||
} from './functions';
|
||||
import logger from './logger';
|
||||
|
||||
declare var APP: Object;
|
||||
|
||||
/**
|
||||
* Object containing a image capture of the local track.
|
||||
*/
|
||||
let imageCapture;
|
||||
|
||||
/**
|
||||
* Object where the face landmarks worker is stored.
|
||||
*/
|
||||
let worker;
|
||||
|
||||
/**
|
||||
* The last face expression received from the worker.
|
||||
*/
|
||||
let lastFaceExpression;
|
||||
|
||||
/**
|
||||
* The last face expression timestamp.
|
||||
*/
|
||||
let lastFaceExpressionTimestamp;
|
||||
|
||||
/**
|
||||
* How many duplicate consecutive expression occurred.
|
||||
* If a expression that is not the same as the last one it is reset to 0.
|
||||
*/
|
||||
let duplicateConsecutiveExpressions = 0;
|
||||
|
||||
/**
|
||||
* Variable that keeps the interval for sending expressions to webhook.
|
||||
*/
|
||||
let webhookSendInterval;
|
||||
|
||||
/**
|
||||
* Variable that keeps the interval for detecting faces in a frame.
|
||||
*/
|
||||
let detectionInterval;
|
||||
|
||||
/**
|
||||
* Loads the worker that detects the face landmarks.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
export function loadWorker() {
|
||||
return function(dispatch: Function, getState: Function) {
|
||||
if (worker) {
|
||||
logger.info('Worker has already been initialized');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (navigator.product === 'ReactNative') {
|
||||
logger.warn('Unsupported environment for face recognition');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const baseUrl = `${getBaseUrl()}libs/`;
|
||||
let workerUrl = `${baseUrl}face-landmarks-worker.min.js`;
|
||||
|
||||
const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
|
||||
|
||||
workerUrl = window.URL.createObjectURL(workerBlob);
|
||||
worker = new Worker(workerUrl, { name: 'Face Recognition Worker' });
|
||||
worker.onmessage = function(e: Object) {
|
||||
const { faceExpression, faceBox } = e.data;
|
||||
|
||||
if (faceExpression) {
|
||||
if (faceExpression === lastFaceExpression) {
|
||||
duplicateConsecutiveExpressions++;
|
||||
} else {
|
||||
if (lastFaceExpression && lastFaceExpressionTimestamp) {
|
||||
dispatch(addFaceExpression(
|
||||
lastFaceExpression,
|
||||
duplicateConsecutiveExpressions + 1,
|
||||
lastFaceExpressionTimestamp
|
||||
));
|
||||
}
|
||||
lastFaceExpression = faceExpression;
|
||||
lastFaceExpressionTimestamp = Date.now();
|
||||
duplicateConsecutiveExpressions = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (faceBox) {
|
||||
const state = getState();
|
||||
const conference = getCurrentConference(state);
|
||||
const localParticipant = getLocalParticipant(state);
|
||||
|
||||
if (getParticipantCount(state) > 1) {
|
||||
sendFaceBoxToParticipants(conference, faceBox);
|
||||
}
|
||||
|
||||
dispatch({
|
||||
type: UPDATE_FACE_COORDINATES,
|
||||
faceBox,
|
||||
id: localParticipant.id
|
||||
});
|
||||
}
|
||||
|
||||
APP.API.notifyFaceLandmarkDetected(faceBox, faceExpression);
|
||||
};
|
||||
|
||||
const { faceLandmarks } = getState()['features/base/config'];
|
||||
const detectionTypes = [
|
||||
faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX,
|
||||
faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS
|
||||
].filter(Boolean);
|
||||
|
||||
worker.postMessage({
|
||||
type: INIT_WORKER,
|
||||
baseUrl,
|
||||
detectionTypes
|
||||
});
|
||||
|
||||
dispatch(startFaceLandmarksDetection());
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the recognition and detection of face expressions.
|
||||
*
|
||||
* @param {Track | undefined} track - Track for which to start detecting faces.
|
||||
* @returns {Function}
|
||||
*/
|
||||
export function startFaceLandmarksDetection(track) {
|
||||
return async function(dispatch: Function, getState: Function) {
|
||||
if (!worker) {
|
||||
return;
|
||||
}
|
||||
|
||||
const state = getState();
|
||||
const { recognitionActive } = state['features/face-landmarks'];
|
||||
|
||||
if (recognitionActive) {
|
||||
logger.log('Face recognition already active.');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
|
||||
|
||||
if (localVideoTrack === undefined) {
|
||||
logger.warn('Face landmarks detection is disabled due to missing local track.');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
|
||||
|
||||
dispatch({ type: START_FACE_LANDMARKS_DETECTION });
|
||||
logger.log('Start face recognition');
|
||||
|
||||
const firstVideoTrack = stream.getVideoTracks()[0];
|
||||
const { faceLandmarks } = state['features/base/config'];
|
||||
|
||||
imageCapture = new ImageCapture(firstVideoTrack);
|
||||
|
||||
detectionInterval = setInterval(() => {
|
||||
sendDataToWorker(
|
||||
worker,
|
||||
imageCapture,
|
||||
faceLandmarks?.faceCenteringThreshold
|
||||
);
|
||||
}, getDetectionInterval(state));
|
||||
|
||||
if (faceLandmarks?.enableFaceExpressionsDetection) {
|
||||
webhookSendInterval = setInterval(async () => {
|
||||
const result = await sendFaceExpressionsWebhook(getState());
|
||||
|
||||
if (result) {
|
||||
dispatch(clearFaceExpressionBuffer());
|
||||
}
|
||||
}, WEBHOOK_SEND_TIME_INTERVAL);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the recognition and detection of face expressions.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
export function stopFaceLandmarksDetection() {
|
||||
return function(dispatch: Function, getState: Function) {
|
||||
const { recognitionActive } = getState()['features/face-landmarks'];
|
||||
|
||||
if (lastFaceExpression && lastFaceExpressionTimestamp && recognitionActive) {
|
||||
dispatch(
|
||||
addFaceExpression(
|
||||
lastFaceExpression,
|
||||
duplicateConsecutiveExpressions + 1,
|
||||
lastFaceExpressionTimestamp
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
clearInterval(webhookSendInterval);
|
||||
clearInterval(detectionInterval);
|
||||
|
||||
duplicateConsecutiveExpressions = 0;
|
||||
lastFaceExpression = null;
|
||||
lastFaceExpressionTimestamp = null;
|
||||
webhookSendInterval = null;
|
||||
detectionInterval = null;
|
||||
imageCapture = null;
|
||||
|
||||
dispatch({ type: STOP_FACE_LANDMARKS_DETECTION });
|
||||
logger.log('Stop face recognition');
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new face expression and its duration.
|
||||
*
|
||||
* @param {string} faceExpression - Face expression to be added.
|
||||
* @param {number} duration - Duration in seconds of the face expression.
|
||||
* @param {number} timestamp - Duration in seconds of the face expression.
|
||||
* @returns {Object}
|
||||
*/
|
||||
function addFaceExpression(faceExpression: string, duration: number, timestamp: number) {
|
||||
return function(dispatch: Function, getState: Function) {
|
||||
const finalDuration = duration * getDetectionInterval(getState()) / 1000;
|
||||
|
||||
dispatch({
|
||||
type: ADD_FACE_EXPRESSION,
|
||||
faceExpression,
|
||||
duration: finalDuration,
|
||||
timestamp
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a face expression with its timestamp to the face expression buffer.
|
||||
*
|
||||
* @param {Object} faceExpression - Object containing face expression string and its timestamp.
|
||||
* @returns {Object}
|
||||
*/
|
||||
export function addToFaceExpressionsBuffer(faceExpression: Object) {
|
||||
return {
|
||||
type: ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
faceExpression
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the face expressions array in the state.
|
||||
*
|
||||
* @returns {Object}
|
||||
*/
|
||||
function clearFaceExpressionBuffer() {
|
||||
return {
|
||||
type: CLEAR_FACE_EXPRESSIONS_BUFFER
|
||||
};
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
import 'image-capture';
|
||||
import './createImageBitmap';
|
||||
import { AnyAction } from 'redux';
|
||||
|
||||
import {
|
||||
ADD_FACE_EXPRESSION,
|
||||
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
||||
NEW_FACE_COORDINATES
|
||||
} from './actionTypes';
|
||||
import { FaceBox } from './types';
|
||||
|
||||
/**
|
||||
* Adds a new face expression and its duration.
|
||||
*
|
||||
* @param {string} faceExpression - Face expression to be added.
|
||||
* @param {number} duration - Duration in seconds of the face expression.
|
||||
* @param {number} timestamp - Duration in seconds of the face expression.
|
||||
* @returns {AnyAction}
|
||||
*/
|
||||
export function addFaceExpression(faceExpression: string, duration: number, timestamp: number): AnyAction {
|
||||
return {
|
||||
type: ADD_FACE_EXPRESSION,
|
||||
faceExpression,
|
||||
duration,
|
||||
timestamp
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a face expression with its timestamp to the face expression buffer.
|
||||
*
|
||||
* @param {Object} faceExpression - Object containing face expression string and its timestamp.
|
||||
* @returns {AnyAction}
|
||||
*/
|
||||
export function addToFaceExpressionsBuffer(
|
||||
faceExpression: {
|
||||
emotion: string;
|
||||
timestamp: number;
|
||||
}
|
||||
): AnyAction {
|
||||
return {
|
||||
type: ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
faceExpression
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the face expressions array in the state.
|
||||
*
|
||||
* @returns {Object}
|
||||
*/
|
||||
export function clearFaceExpressionBuffer() {
|
||||
return {
|
||||
type: CLEAR_FACE_EXPRESSIONS_BUFFER
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Signals that a new face box was obtained for the local participant.
|
||||
*
|
||||
* @param {FaceBox} faceBox - The face box of the local participant.
|
||||
* @returns {AnyAction}
|
||||
*/
|
||||
export function newFaceBox(faceBox: FaceBox): AnyAction {
|
||||
return {
|
||||
type: NEW_FACE_COORDINATES,
|
||||
faceBox
|
||||
};
|
||||
}
|
|
@ -59,4 +59,4 @@ export const DETECTION_TYPES = {
|
|||
/**
|
||||
* Threshold for detection score of face.
|
||||
*/
|
||||
export const FACE_DETECTION_SCORE_THRESHOLD = 0.6;
|
||||
export const FACE_DETECTION_SCORE_THRESHOLD = 0.75;
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
*
|
||||
* Support source image types: Canvas.
|
||||
*/
|
||||
// @ts-nocheck
|
||||
if (!('createImageBitmap' in window)) {
|
||||
window.createImageBitmap = async function(data) {
|
||||
return new Promise((resolve, reject) => {
|
||||
|
@ -16,12 +17,14 @@ if (!('createImageBitmap' in window)) {
|
|||
}
|
||||
const img = document.createElement('img');
|
||||
|
||||
// eslint-disable-next-line no-empty-function
|
||||
img.close = () => {};
|
||||
img.close = () => {
|
||||
// empty
|
||||
};
|
||||
|
||||
img.addEventListener('load', () => {
|
||||
resolve(img);
|
||||
});
|
||||
|
||||
img.src = dataURL;
|
||||
});
|
||||
};
|
|
@ -1,17 +1,16 @@
|
|||
import { FaceLandmarksHelper, HumanHelper } from './FaceLandmarksHelper';
|
||||
import { DETECT_FACE, INIT_WORKER } from './constants';
|
||||
|
||||
|
||||
let helper: FaceLandmarksHelper;
|
||||
|
||||
onmessage = async function(message: MessageEvent<any>) {
|
||||
switch (message.data.type) {
|
||||
onmessage = async function({ data }: MessageEvent<any>) {
|
||||
switch (data.type) {
|
||||
case DETECT_FACE: {
|
||||
if (!helper || helper.getDetectionInProgress()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const detections = await helper.detect(message.data);
|
||||
const detections = await helper.detect(data);
|
||||
|
||||
if (detections && (detections.faceBox || detections.faceExpression || detections.faceCount)) {
|
||||
self.postMessage(detections);
|
||||
|
@ -21,7 +20,7 @@ onmessage = async function(message: MessageEvent<any>) {
|
|||
}
|
||||
|
||||
case INIT_WORKER: {
|
||||
helper = new HumanHelper(message.data);
|
||||
helper = new HumanHelper(data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,15 @@
|
|||
// @flow
|
||||
import { getLocalParticipant } from '../base/participants';
|
||||
/* eslint-disable lines-around-comment */
|
||||
import { IState } from '../app/types';
|
||||
import { getLocalParticipant } from '../base/participants/functions';
|
||||
// @ts-ignore
|
||||
import { extractFqnFromPath } from '../dynamic-branding/functions.any';
|
||||
|
||||
import { DETECT_FACE, FACE_BOX_EVENT_TYPE, SEND_IMAGE_INTERVAL_MS } from './constants';
|
||||
import logger from './logger';
|
||||
import { FaceBox } from './types';
|
||||
|
||||
let canvas;
|
||||
let context;
|
||||
let canvas: HTMLCanvasElement;
|
||||
let context: CanvasRenderingContext2D | null;
|
||||
|
||||
if (typeof OffscreenCanvas === 'undefined') {
|
||||
canvas = document.createElement('canvas');
|
||||
|
@ -16,13 +19,13 @@ if (typeof OffscreenCanvas === 'undefined') {
|
|||
/**
|
||||
* Sends the face expression with its duration to all the other participants.
|
||||
*
|
||||
* @param {Object} conference - The current conference.
|
||||
* @param {any} conference - The current conference.
|
||||
* @param {string} faceExpression - Face expression to be sent.
|
||||
* @param {number} duration - The duration of the face expression in seconds.
|
||||
* @returns {void}
|
||||
*/
|
||||
export function sendFaceExpressionToParticipants(
|
||||
conference: Object,
|
||||
conference: any,
|
||||
faceExpression: string,
|
||||
duration: number
|
||||
): void {
|
||||
|
@ -41,13 +44,13 @@ export function sendFaceExpressionToParticipants(
|
|||
/**
|
||||
* Sends the face box to all the other participants.
|
||||
*
|
||||
* @param {Object} conference - The current conference.
|
||||
* @param {Object} faceBox - Face box to be sent.
|
||||
* @param {any} conference - The current conference.
|
||||
* @param {FaceBox} faceBox - Face box to be sent.
|
||||
* @returns {void}
|
||||
*/
|
||||
export function sendFaceBoxToParticipants(
|
||||
conference: Object,
|
||||
faceBox: Object
|
||||
conference: any,
|
||||
faceBox: FaceBox
|
||||
): void {
|
||||
try {
|
||||
conference.sendEndpointMessage('', {
|
||||
|
@ -62,13 +65,13 @@ export function sendFaceBoxToParticipants(
|
|||
/**
|
||||
* Sends the face expression with its duration to xmpp server.
|
||||
*
|
||||
* @param {Object} conference - The current conference.
|
||||
* @param {any} conference - The current conference.
|
||||
* @param {string} faceExpression - Face expression to be sent.
|
||||
* @param {number} duration - The duration of the face expression in seconds.
|
||||
* @returns {void}
|
||||
*/
|
||||
export function sendFaceExpressionToServer(
|
||||
conference: Object,
|
||||
conference: any,
|
||||
faceExpression: string,
|
||||
duration: number
|
||||
): void {
|
||||
|
@ -88,12 +91,12 @@ export function sendFaceExpressionToServer(
|
|||
* @param {Object} state - Redux state.
|
||||
* @returns {boolean} - True if sent, false otherwise.
|
||||
*/
|
||||
export async function sendFaceExpressionsWebhook(state: Object) {
|
||||
export async function sendFaceExpressionsWebhook(state: IState) {
|
||||
const { webhookProxyUrl: url } = state['features/base/config'];
|
||||
const { conference } = state['features/base/conference'];
|
||||
const { jwt } = state['features/base/jwt'];
|
||||
const { connection } = state['features/base/connection'];
|
||||
const jid = connection.getJid();
|
||||
const jid = connection?.getJid();
|
||||
const localParticipant = getLocalParticipant(state);
|
||||
const { faceExpressionsBuffer } = state['features/face-landmarks'];
|
||||
|
||||
|
@ -111,8 +114,8 @@ export async function sendFaceExpressionsWebhook(state: Object) {
|
|||
sessionId: conference.sessionId,
|
||||
submitted: Date.now(),
|
||||
emotions: faceExpressionsBuffer,
|
||||
participantId: localParticipant.jwtId,
|
||||
participantName: localParticipant.name,
|
||||
participantId: localParticipant?.jwtId,
|
||||
participantName: localParticipant?.name,
|
||||
participantJid: jid
|
||||
};
|
||||
|
||||
|
@ -143,15 +146,15 @@ export async function sendFaceExpressionsWebhook(state: Object) {
|
|||
* @param {Worker} worker - Face recognition worker.
|
||||
* @param {Object} imageCapture - Image capture that contains the current track.
|
||||
* @param {number} threshold - Movement threshold as percentage for sharing face coordinates.
|
||||
* @returns {Promise<void>}
|
||||
* @returns {Promise<boolean>} - True if sent, false otherwise.
|
||||
*/
|
||||
export async function sendDataToWorker(
|
||||
worker: Worker,
|
||||
imageCapture: Object,
|
||||
threshold: number = 10
|
||||
): Promise<void> {
|
||||
imageCapture: ImageCapture,
|
||||
threshold = 10
|
||||
): Promise<boolean> {
|
||||
if (imageCapture === null || imageCapture === undefined) {
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
let imageBitmap;
|
||||
|
@ -162,15 +165,15 @@ export async function sendDataToWorker(
|
|||
} catch (err) {
|
||||
logger.warn(err);
|
||||
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (typeof OffscreenCanvas === 'undefined') {
|
||||
canvas.width = imageBitmap.width;
|
||||
canvas.height = imageBitmap.height;
|
||||
context.drawImage(imageBitmap, 0, 0);
|
||||
context?.drawImage(imageBitmap, 0, 0);
|
||||
|
||||
image = context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
|
||||
image = context?.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
|
||||
} else {
|
||||
image = imageBitmap;
|
||||
}
|
||||
|
@ -182,33 +185,37 @@ export async function sendDataToWorker(
|
|||
});
|
||||
|
||||
imageBitmap.close();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets face box for a participant id.
|
||||
*
|
||||
* @param {string} id - The participant id.
|
||||
* @param {Object} state - The redux state.
|
||||
* @param {IState} state - The redux state.
|
||||
* @returns {Object}
|
||||
*/
|
||||
function getFaceBoxForId(id: string, state: Object) {
|
||||
function getFaceBoxForId(id: string, state: IState) {
|
||||
return state['features/face-landmarks'].faceBoxes[id];
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the video object position for a participant id.
|
||||
*
|
||||
* @param {Object} state - The redux state.
|
||||
* @param {IState} state - The redux state.
|
||||
* @param {string} id - The participant id.
|
||||
* @returns {string} - CSS object-position in the shape of '{horizontalPercentage}% {verticalPercentage}%'.
|
||||
*/
|
||||
export function getVideoObjectPosition(state: Object, id: string) {
|
||||
const faceBox = getFaceBoxForId(id, state);
|
||||
export function getVideoObjectPosition(state: IState, id?: string) {
|
||||
const faceBox = id && getFaceBoxForId(id, state);
|
||||
|
||||
if (faceBox) {
|
||||
const { right, width } = faceBox;
|
||||
|
||||
return `${right - (width / 2)}% 50%`;
|
||||
if (right && width) {
|
||||
return `${right - (width / 2)}% 50%`;
|
||||
}
|
||||
}
|
||||
|
||||
return '50% 50%';
|
||||
|
@ -217,11 +224,22 @@ export function getVideoObjectPosition(state: Object, id: string) {
|
|||
/**
|
||||
* Gets the video object position for a participant id.
|
||||
*
|
||||
* @param {Object} state - The redux state.
|
||||
* @param {IState} state - The redux state.
|
||||
* @returns {number} - Number of milliseconds for doing face detection.
|
||||
*/
|
||||
export function getDetectionInterval(state: Object) {
|
||||
export function getDetectionInterval(state: IState) {
|
||||
const { faceLandmarks } = state['features/base/config'];
|
||||
|
||||
return Math.max(faceLandmarks?.captureInterval || SEND_IMAGE_INTERVAL_MS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the duration in seconds of a face expression.
|
||||
*
|
||||
* @param {IState} state - The redux state.
|
||||
* @param {number} faceExpressionCount - The number of consecutive face expressions.
|
||||
* @returns {number} - Duration of face expression in seconds.
|
||||
*/
|
||||
export function getFaceExpressionDuration(state: IState, faceExpressionCount: number) {
|
||||
return faceExpressionCount * (getDetectionInterval(state) / 1000);
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
export * from './actions';
|
|
@ -1,38 +1,41 @@
|
|||
/* eslint-disable lines-around-comment */
|
||||
import { IStore } from '../app/types';
|
||||
import {
|
||||
CONFERENCE_JOINED,
|
||||
CONFERENCE_WILL_LEAVE,
|
||||
getCurrentConference
|
||||
} from '../base/conference';
|
||||
CONFERENCE_WILL_LEAVE
|
||||
} from '../base/conference/actionTypes';
|
||||
// @ts-ignore
|
||||
import { getCurrentConference } from '../base/conference/functions';
|
||||
import { JitsiConferenceEvents } from '../base/lib-jitsi-meet';
|
||||
import { getParticipantCount } from '../base/participants';
|
||||
import { MiddlewareRegistry } from '../base/redux';
|
||||
import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks';
|
||||
import { getLocalParticipant, getParticipantCount } from '../base/participants/functions';
|
||||
import { Participant } from '../base/participants/types';
|
||||
import MiddlewareRegistry from '../base/redux/MiddlewareRegistry';
|
||||
import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks/actionTypes';
|
||||
|
||||
import { ADD_FACE_EXPRESSION, UPDATE_FACE_COORDINATES } from './actionTypes';
|
||||
import FaceLandmarksDetector from './FaceLandmarksDetector';
|
||||
import { ADD_FACE_EXPRESSION, NEW_FACE_COORDINATES, UPDATE_FACE_COORDINATES } from './actionTypes';
|
||||
import {
|
||||
addToFaceExpressionsBuffer,
|
||||
loadWorker,
|
||||
stopFaceLandmarksDetection,
|
||||
startFaceLandmarksDetection
|
||||
addToFaceExpressionsBuffer
|
||||
} from './actions';
|
||||
import { FACE_BOX_EVENT_TYPE } from './constants';
|
||||
import { sendFaceExpressionToParticipants, sendFaceExpressionToServer } from './functions';
|
||||
import { sendFaceBoxToParticipants, sendFaceExpressionToParticipants, sendFaceExpressionToServer } from './functions';
|
||||
|
||||
|
||||
MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
||||
MiddlewareRegistry.register((store: IStore) => (next: Function) => (action: any) => {
|
||||
const { dispatch, getState } = store;
|
||||
const { faceLandmarks } = getState()['features/base/config'];
|
||||
const isEnabled = faceLandmarks?.enableFaceCentering || faceLandmarks?.enableFaceExpressionsDetection;
|
||||
|
||||
if (action.type === CONFERENCE_JOINED) {
|
||||
if (isEnabled) {
|
||||
dispatch(loadWorker());
|
||||
FaceLandmarksDetector.init(store);
|
||||
}
|
||||
|
||||
// allow using remote face centering data when local face centering is not enabled
|
||||
action.conference.on(
|
||||
JitsiConferenceEvents.ENDPOINT_MESSAGE_RECEIVED,
|
||||
(participant, eventData) => {
|
||||
if (!participant || !eventData) {
|
||||
(participant: Participant | undefined, eventData: any) => {
|
||||
if (!participant || !eventData || !participant.getId) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -55,7 +58,7 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
|
||||
switch (action.type) {
|
||||
case CONFERENCE_WILL_LEAVE : {
|
||||
dispatch(stopFaceLandmarksDetection());
|
||||
FaceLandmarksDetector.stopDetection(store);
|
||||
|
||||
return next(action);
|
||||
}
|
||||
|
@ -64,7 +67,7 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
|
||||
if (videoType === 'camera' && isLocal()) {
|
||||
// need to pass this since the track is not yet added in the store
|
||||
dispatch(startFaceLandmarksDetection(action.track));
|
||||
FaceLandmarksDetector.startDetection(store, action.track);
|
||||
}
|
||||
|
||||
return next(action);
|
||||
|
@ -81,9 +84,9 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
if (muted !== undefined) {
|
||||
// addresses video mute state changes
|
||||
if (muted) {
|
||||
dispatch(stopFaceLandmarksDetection());
|
||||
FaceLandmarksDetector.stopDetection(store);
|
||||
} else {
|
||||
dispatch(startFaceLandmarksDetection());
|
||||
FaceLandmarksDetector.startDetection(store);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,26 +96,43 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
const { jitsiTrack: { isLocal, videoType } } = action.track;
|
||||
|
||||
if (videoType === 'camera' && isLocal()) {
|
||||
dispatch(stopFaceLandmarksDetection());
|
||||
FaceLandmarksDetector.stopDetection(store);
|
||||
}
|
||||
|
||||
return next(action);
|
||||
}
|
||||
case ADD_FACE_EXPRESSION: {
|
||||
const state = getState();
|
||||
const { faceExpression, duration, timestamp } = action;
|
||||
const conference = getCurrentConference(state);
|
||||
|
||||
if (getParticipantCount(state) > 1) {
|
||||
sendFaceExpressionToParticipants(conference, action.faceExpression, action.duration);
|
||||
sendFaceExpressionToParticipants(conference, faceExpression, duration);
|
||||
}
|
||||
sendFaceExpressionToServer(conference, action.faceExpression, action.duration);
|
||||
sendFaceExpressionToServer(conference, faceExpression, duration);
|
||||
dispatch(addToFaceExpressionsBuffer({
|
||||
emotion: action.faceExpression,
|
||||
timestamp: action.timestamp
|
||||
emotion: faceExpression,
|
||||
timestamp
|
||||
}));
|
||||
|
||||
return next(action);
|
||||
}
|
||||
case NEW_FACE_COORDINATES: {
|
||||
const state = getState();
|
||||
const { faceBox } = action;
|
||||
const conference = getCurrentConference(state);
|
||||
const localParticipant = getLocalParticipant(state);
|
||||
|
||||
if (getParticipantCount(state) > 1) {
|
||||
sendFaceBoxToParticipants(conference, faceBox);
|
||||
}
|
||||
|
||||
dispatch({
|
||||
type: UPDATE_FACE_COORDINATES,
|
||||
faceBox,
|
||||
id: localParticipant?.id
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return next(action);
|
|
@ -4,10 +4,9 @@ import {
|
|||
ADD_FACE_EXPRESSION,
|
||||
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
||||
START_FACE_LANDMARKS_DETECTION,
|
||||
STOP_FACE_LANDMARKS_DETECTION,
|
||||
UPDATE_FACE_COORDINATES
|
||||
} from './actionTypes';
|
||||
import { FaceBox } from './types';
|
||||
|
||||
const defaultState = {
|
||||
faceBoxes: {},
|
||||
|
@ -25,11 +24,7 @@ const defaultState = {
|
|||
};
|
||||
|
||||
export interface IFaceLandmarksState {
|
||||
faceBoxes: {
|
||||
left?: number;
|
||||
right?: number;
|
||||
width?: number;
|
||||
};
|
||||
faceBoxes: { [key: string]: FaceBox; };
|
||||
faceExpressions: {
|
||||
angry: number;
|
||||
disgusted: number;
|
||||
|
@ -71,18 +66,6 @@ ReducerRegistry.register<IFaceLandmarksState>('features/face-landmarks',
|
|||
faceExpressionsBuffer: []
|
||||
};
|
||||
}
|
||||
case START_FACE_LANDMARKS_DETECTION: {
|
||||
return {
|
||||
...state,
|
||||
recognitionActive: true
|
||||
};
|
||||
}
|
||||
case STOP_FACE_LANDMARKS_DETECTION: {
|
||||
return {
|
||||
...state,
|
||||
recognitionActive: false
|
||||
};
|
||||
}
|
||||
case UPDATE_FACE_COORDINATES: {
|
||||
return {
|
||||
...state,
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
export type DetectInput = {
|
||||
image: ImageBitmap | ImageData;
|
||||
threshold: number;
|
||||
};
|
||||
|
||||
export type FaceBox = {
|
||||
left: number;
|
||||
right: number;
|
||||
width?: number;
|
||||
};
|
||||
|
||||
export type InitInput = {
|
||||
baseUrl: string;
|
||||
detectionTypes: string[];
|
||||
};
|
||||
|
||||
export type DetectOutput = {
|
||||
faceBox?: FaceBox;
|
||||
faceCount: number;
|
||||
faceExpression?: string;
|
||||
};
|
|
@ -37,7 +37,6 @@ import {
|
|||
trackStreamingStatusChanged
|
||||
// @ts-ignore
|
||||
} from '../../../base/tracks';
|
||||
// @ts-ignore
|
||||
import { getVideoObjectPosition } from '../../../face-landmarks/functions';
|
||||
// @ts-ignore
|
||||
import { hideGif, showGif } from '../../../gifs/actions';
|
||||
|
|
Loading…
Reference in New Issue