ref(face-landmarks) refactor namings (#11307)
* ref(face-landmarks) refactor namings * code review
This commit is contained in:
parent
a99532b0d8
commit
42703fed47
|
@ -8,7 +8,7 @@ libs/*
|
|||
resources/*
|
||||
react/features/stream-effects/virtual-background/vendor/*
|
||||
load-test/*
|
||||
react/features/facial-recognition/resources/*
|
||||
react/features/face-landmarks/resources/*
|
||||
|
||||
# ESLint will by default ignore its own configuration file. However, there does
|
||||
# not seem to be a reason why we will want to risk being inconsistent with our
|
||||
|
|
14
Makefile
14
Makefile
|
@ -8,7 +8,7 @@ TF_WASM_DIR = node_modules/@tensorflow/tfjs-backend-wasm/dist/
|
|||
RNNOISE_WASM_DIR = node_modules/rnnoise-wasm/dist
|
||||
TFLITE_WASM = react/features/stream-effects/virtual-background/vendor/tflite
|
||||
MEET_MODELS_DIR = react/features/stream-effects/virtual-background/vendor/models
|
||||
FACIAL_MODELS_DIR = react/features/facial-recognition/resources
|
||||
FACE_MODELS_DIR = react/features/face-landmarks/resources
|
||||
NODE_SASS = ./node_modules/.bin/sass
|
||||
NPM = npm
|
||||
OUTPUT_DIR = .
|
||||
|
@ -30,7 +30,7 @@ clean:
|
|||
rm -fr $(BUILD_DIR)
|
||||
|
||||
.NOTPARALLEL:
|
||||
deploy: deploy-init deploy-appbundle deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-tf-wasm deploy-css deploy-local deploy-facial-expressions
|
||||
deploy: deploy-init deploy-appbundle deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-tf-wasm deploy-css deploy-local deploy-face-landmarks
|
||||
|
||||
deploy-init:
|
||||
rm -fr $(DEPLOY_DIR)
|
||||
|
@ -53,8 +53,8 @@ deploy-appbundle:
|
|||
$(OUTPUT_DIR)/analytics-ga.js \
|
||||
$(BUILD_DIR)/analytics-ga.min.js \
|
||||
$(BUILD_DIR)/analytics-ga.min.js.map \
|
||||
$(BUILD_DIR)/facial-expressions-worker.min.js \
|
||||
$(BUILD_DIR)/facial-expressions-worker.min.js.map \
|
||||
$(BUILD_DIR)/face-landmarks-worker.min.js \
|
||||
$(BUILD_DIR)/face-landmarks-worker.min.js.map \
|
||||
$(DEPLOY_DIR)
|
||||
cp \
|
||||
$(BUILD_DIR)/close3.min.js \
|
||||
|
@ -101,9 +101,9 @@ deploy-meet-models:
|
|||
$(MEET_MODELS_DIR)/*.tflite \
|
||||
$(DEPLOY_DIR)
|
||||
|
||||
deploy-facial-expressions:
|
||||
deploy-face-landmarks:
|
||||
cp \
|
||||
$(FACIAL_MODELS_DIR)/* \
|
||||
$(FACE_MODELS_DIR)/* \
|
||||
$(DEPLOY_DIR)
|
||||
|
||||
deploy-css:
|
||||
|
@ -115,7 +115,7 @@ deploy-local:
|
|||
([ ! -x deploy-local.sh ] || ./deploy-local.sh)
|
||||
|
||||
.NOTPARALLEL:
|
||||
dev: deploy-init deploy-css deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-tf-wasm deploy-facial-expressions
|
||||
dev: deploy-init deploy-css deploy-rnnoise-binary deploy-tflite deploy-meet-models deploy-lib-jitsi-meet deploy-libflac deploy-olm deploy-tf-wasm deploy-face-landmarks
|
||||
$(WEBPACK_DEV_SERVER)
|
||||
|
||||
source-package:
|
||||
|
|
|
@ -3116,15 +3116,6 @@ export default {
|
|||
room.sendEndpointMessage(to, payload);
|
||||
},
|
||||
|
||||
/**
|
||||
* Sends a facial expression as a string and its duration as a number
|
||||
* @param {object} payload - Object containing the {string} facialExpression
|
||||
* and {number} duration
|
||||
*/
|
||||
sendFacialExpression(payload) {
|
||||
room.sendFacialExpression(payload);
|
||||
},
|
||||
|
||||
/**
|
||||
* Adds new listener.
|
||||
* @param {String} eventName the name of the event
|
||||
|
|
20
config.js
20
config.js
|
@ -752,18 +752,18 @@ var config = {
|
|||
// Enables sending participants' emails (if available) to callstats and other analytics
|
||||
// enableEmailInStats: false,
|
||||
|
||||
// Enables detecting faces of participants and get their expression and send it to other participants
|
||||
// enableFacialRecognition: true,
|
||||
|
||||
// Enables displaying facial expressions in speaker stats
|
||||
// enableDisplayFacialExpressions: true,
|
||||
|
||||
// faceCoordinatesSharing: {
|
||||
// faceLandmarks: {
|
||||
// // Enables sharing your face cordinates. Used for centering faces within a video.
|
||||
// enabled: false,
|
||||
// enableFaceCentering: false,
|
||||
|
||||
// // Minimum required face movement percentage threshold for sending new face coordinates data.
|
||||
// threshold: 10,
|
||||
// // Enables detecting face expressions and sharing data with other participants
|
||||
// enableFaceExpressionsDetection: false,
|
||||
|
||||
// // Enables displaying face expressions in speaker stats
|
||||
// enableDisplayFaceExpressions: false,
|
||||
|
||||
// // Minimum required face movement percentage threshold for sending new face centering coordinates data.
|
||||
// faceCenteringThreshold: 10,
|
||||
|
||||
// // Miliseconds for processing a new image capture in order to detect face coordinates if they exist.
|
||||
// captureInterval: 100
|
||||
|
|
|
@ -20,7 +20,7 @@ import '../shared-video/middleware';
|
|||
import '../settings/middleware';
|
||||
import '../talk-while-muted/middleware';
|
||||
import '../virtual-background/middleware';
|
||||
import '../facial-recognition/middleware';
|
||||
import '../face-landmarks/middleware';
|
||||
import '../gifs/middleware';
|
||||
|
||||
import './middlewares.any';
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import '../base/devices/reducer';
|
||||
import '../e2ee/reducer';
|
||||
import '../facial-recognition/reducer';
|
||||
import '../face-landmarks/reducer';
|
||||
import '../feedback/reducer';
|
||||
import '../local-recording/reducer';
|
||||
import '../no-audio-signal/reducer';
|
||||
|
|
|
@ -136,7 +136,6 @@ export default [
|
|||
'enableDisplayNameInStats',
|
||||
'enableEmailInStats',
|
||||
'enableEncodedTransformSupport',
|
||||
'enableFacialRecognition',
|
||||
'enableIceRestart',
|
||||
'enableInsecureRoomNameWarning',
|
||||
'enableLayerSuspension',
|
||||
|
@ -152,7 +151,7 @@ export default [
|
|||
'enableTcc',
|
||||
'enableAutomaticUrlCopy',
|
||||
'etherpad_base',
|
||||
'faceCoordinatesSharing',
|
||||
'faceLandmarks',
|
||||
'failICE',
|
||||
'feedbackPercentage',
|
||||
'fileRecordingsEnabled',
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
// @flow
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to add a face expression.
|
||||
*
|
||||
* {
|
||||
* type: ADD_FACE_EXPRESSION,
|
||||
* faceExpression: string,
|
||||
* duration: number
|
||||
* }
|
||||
*/
|
||||
export const ADD_FACE_EXPRESSION = 'ADD_FACE_EXPRESSION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to add a expression to the face expressions buffer.
|
||||
*
|
||||
* {
|
||||
* type: ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
* faceExpression: string
|
||||
* }
|
||||
*/
|
||||
export const ADD_TO_FACE_EXPRESSIONS_BUFFER = 'ADD_TO_FACE_EXPRESSIONS_BUFFER ';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to clear the face expressions buffer in the state.
|
||||
*
|
||||
* {
|
||||
* type: CLEAR_FACE_EXPRESSIONS_BUFFER
|
||||
* }
|
||||
*/
|
||||
export const CLEAR_FACE_EXPRESSIONS_BUFFER = 'CLEAR_FACE_EXPRESSIONS_BUFFER';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to set recognition active in the state.
|
||||
*
|
||||
* {
|
||||
* type: START_FACE_LANDMARKS_DETECTION
|
||||
* }
|
||||
*/
|
||||
export const START_FACE_LANDMARKS_DETECTION = 'START_FACE_LANDMARKS_DETECTION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to set recognition inactive in the state.
|
||||
*
|
||||
* {
|
||||
* type: STOP_FACE_LANDMARKS_DETECTION
|
||||
* }
|
||||
*/
|
||||
export const STOP_FACE_LANDMARKS_DETECTION = 'STOP_FACE_LANDMARKS_DETECTION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to update coordinates of a detected face.
|
||||
*
|
||||
* {
|
||||
* type: UPDATE_FACE_COORDINATES,
|
||||
* faceBox: Object({ left, bottom, right, top }),
|
||||
* participantId: string
|
||||
* }
|
||||
*/
|
||||
export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES';
|
|
@ -8,11 +8,11 @@ import { getLocalVideoTrack } from '../base/tracks';
|
|||
import { getBaseUrl } from '../base/util';
|
||||
|
||||
import {
|
||||
ADD_FACIAL_EXPRESSION,
|
||||
ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACIAL_EXPRESSIONS_BUFFER,
|
||||
START_FACIAL_RECOGNITION,
|
||||
STOP_FACIAL_RECOGNITION,
|
||||
ADD_FACE_EXPRESSION,
|
||||
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
||||
START_FACE_LANDMARKS_DETECTION,
|
||||
STOP_FACE_LANDMARKS_DETECTION,
|
||||
UPDATE_FACE_COORDINATES
|
||||
} from './actionTypes';
|
||||
import {
|
||||
|
@ -24,7 +24,7 @@ import {
|
|||
getDetectionInterval,
|
||||
sendDataToWorker,
|
||||
sendFaceBoxToParticipants,
|
||||
sendFacialExpressionsWebhook
|
||||
sendFaceExpressionsWebhook
|
||||
} from './functions';
|
||||
import logger from './logger';
|
||||
|
||||
|
@ -34,19 +34,19 @@ import logger from './logger';
|
|||
let imageCapture;
|
||||
|
||||
/**
|
||||
* Object where the facial expression worker is stored.
|
||||
* Object where the face landmarks worker is stored.
|
||||
*/
|
||||
let worker;
|
||||
|
||||
/**
|
||||
* The last facial expression received from the worker.
|
||||
* The last face expression received from the worker.
|
||||
*/
|
||||
let lastFacialExpression;
|
||||
let lastFaceExpression;
|
||||
|
||||
/**
|
||||
* The last facial expression timestamp.
|
||||
* The last face expression timestamp.
|
||||
*/
|
||||
let lastFacialExpressionTimestamp;
|
||||
let lastFaceExpressionTimestamp;
|
||||
|
||||
/**
|
||||
* How many duplicate consecutive expression occurred.
|
||||
|
@ -65,7 +65,7 @@ let webhookSendInterval;
|
|||
let detectionInterval;
|
||||
|
||||
/**
|
||||
* Loads the worker that predicts the facial expression.
|
||||
* Loads the worker that detects the face landmarks.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
|
@ -84,7 +84,7 @@ export function loadWorker() {
|
|||
}
|
||||
|
||||
const baseUrl = `${getBaseUrl()}libs/`;
|
||||
let workerUrl = `${baseUrl}facial-expressions-worker.min.js`;
|
||||
let workerUrl = `${baseUrl}face-landmarks-worker.min.js`;
|
||||
|
||||
const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
|
||||
|
||||
|
@ -94,18 +94,18 @@ export function loadWorker() {
|
|||
const { faceExpression, faceBox } = e.data;
|
||||
|
||||
if (faceExpression) {
|
||||
if (faceExpression === lastFacialExpression) {
|
||||
if (faceExpression === lastFaceExpression) {
|
||||
duplicateConsecutiveExpressions++;
|
||||
} else {
|
||||
if (lastFacialExpression && lastFacialExpressionTimestamp) {
|
||||
dispatch(addFacialExpression(
|
||||
lastFacialExpression,
|
||||
if (lastFaceExpression && lastFaceExpressionTimestamp) {
|
||||
dispatch(addFaceExpression(
|
||||
lastFaceExpression,
|
||||
duplicateConsecutiveExpressions + 1,
|
||||
lastFacialExpressionTimestamp
|
||||
lastFaceExpressionTimestamp
|
||||
));
|
||||
}
|
||||
lastFacialExpression = faceExpression;
|
||||
lastFacialExpressionTimestamp = Date.now();
|
||||
lastFaceExpression = faceExpression;
|
||||
lastFaceExpressionTimestamp = Date.now();
|
||||
duplicateConsecutiveExpressions = 0;
|
||||
}
|
||||
}
|
||||
|
@ -127,10 +127,10 @@ export function loadWorker() {
|
|||
}
|
||||
};
|
||||
|
||||
const { enableFacialRecognition, faceCoordinatesSharing } = getState()['features/base/config'];
|
||||
const { faceLandmarks } = getState()['features/base/config'];
|
||||
const detectionTypes = [
|
||||
faceCoordinatesSharing?.enabled && DETECTION_TYPES.FACE_BOX,
|
||||
enableFacialRecognition && DETECTION_TYPES.FACE_EXPRESSIONS
|
||||
faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX,
|
||||
faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS
|
||||
].filter(Boolean);
|
||||
|
||||
worker.postMessage({
|
||||
|
@ -139,7 +139,7 @@ export function loadWorker() {
|
|||
detectionTypes
|
||||
});
|
||||
|
||||
dispatch(startFacialRecognition());
|
||||
dispatch(startFaceLandmarksDetection());
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -149,14 +149,14 @@ export function loadWorker() {
|
|||
* @param {Track | undefined} track - Track for which to start detecting faces.
|
||||
* @returns {Function}
|
||||
*/
|
||||
export function startFacialRecognition(track) {
|
||||
export function startFaceLandmarksDetection(track) {
|
||||
return async function(dispatch: Function, getState: Function) {
|
||||
if (!worker) {
|
||||
return;
|
||||
}
|
||||
|
||||
const state = getState();
|
||||
const { recognitionActive } = state['features/facial-recognition'];
|
||||
const { recognitionActive } = state['features/face-landmarks'];
|
||||
|
||||
if (recognitionActive) {
|
||||
logger.log('Face recognition already active.');
|
||||
|
@ -167,18 +167,18 @@ export function startFacialRecognition(track) {
|
|||
const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
|
||||
|
||||
if (localVideoTrack === undefined) {
|
||||
logger.warn('Facial recognition is disabled due to missing local track.');
|
||||
logger.warn('Face landmarks detection is disabled due to missing local track.');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
|
||||
|
||||
dispatch({ type: START_FACIAL_RECOGNITION });
|
||||
dispatch({ type: START_FACE_LANDMARKS_DETECTION });
|
||||
logger.log('Start face recognition');
|
||||
|
||||
const firstVideoTrack = stream.getVideoTracks()[0];
|
||||
const { enableFacialRecognition, faceCoordinatesSharing } = state['features/base/config'];
|
||||
const { faceLandmarks } = state['features/base/config'];
|
||||
|
||||
imageCapture = new ImageCapture(firstVideoTrack);
|
||||
|
||||
|
@ -186,16 +186,16 @@ export function startFacialRecognition(track) {
|
|||
sendDataToWorker(
|
||||
worker,
|
||||
imageCapture,
|
||||
faceCoordinatesSharing?.threshold
|
||||
faceLandmarks?.faceCenteringThreshold
|
||||
);
|
||||
}, getDetectionInterval(state));
|
||||
|
||||
if (enableFacialRecognition) {
|
||||
if (faceLandmarks?.enableFaceExpressionsDetection) {
|
||||
webhookSendInterval = setInterval(async () => {
|
||||
const result = await sendFacialExpressionsWebhook(getState());
|
||||
const result = await sendFaceExpressionsWebhook(getState());
|
||||
|
||||
if (result) {
|
||||
dispatch(clearFacialExpressionBuffer());
|
||||
dispatch(clearFaceExpressionBuffer());
|
||||
}
|
||||
}, WEBHOOK_SEND_TIME_INTERVAL);
|
||||
}
|
||||
|
@ -207,14 +207,14 @@ export function startFacialRecognition(track) {
|
|||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
export function stopFacialRecognition() {
|
||||
export function stopFaceLandmarksDetection() {
|
||||
return function(dispatch: Function) {
|
||||
if (lastFacialExpression && lastFacialExpressionTimestamp) {
|
||||
if (lastFaceExpression && lastFaceExpressionTimestamp) {
|
||||
dispatch(
|
||||
addFacialExpression(
|
||||
lastFacialExpression,
|
||||
addFaceExpression(
|
||||
lastFaceExpression,
|
||||
duplicateConsecutiveExpressions + 1,
|
||||
lastFacialExpressionTimestamp
|
||||
lastFaceExpressionTimestamp
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -227,26 +227,26 @@ export function stopFacialRecognition() {
|
|||
detectionInterval = null;
|
||||
imageCapture = null;
|
||||
|
||||
dispatch({ type: STOP_FACIAL_RECOGNITION });
|
||||
dispatch({ type: STOP_FACE_LANDMARKS_DETECTION });
|
||||
logger.log('Stop face recognition');
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new facial expression and its duration.
|
||||
* Adds a new face expression and its duration.
|
||||
*
|
||||
* @param {string} facialExpression - Facial expression to be added.
|
||||
* @param {number} duration - Duration in seconds of the facial expression.
|
||||
* @param {number} timestamp - Duration in seconds of the facial expression.
|
||||
* @param {string} faceExpression - Face expression to be added.
|
||||
* @param {number} duration - Duration in seconds of the face expression.
|
||||
* @param {number} timestamp - Duration in seconds of the face expression.
|
||||
* @returns {Object}
|
||||
*/
|
||||
function addFacialExpression(facialExpression: string, duration: number, timestamp: number) {
|
||||
function addFaceExpression(faceExpression: string, duration: number, timestamp: number) {
|
||||
return function(dispatch: Function, getState: Function) {
|
||||
const finalDuration = duration * getDetectionInterval(getState()) / 1000;
|
||||
|
||||
dispatch({
|
||||
type: ADD_FACIAL_EXPRESSION,
|
||||
facialExpression,
|
||||
type: ADD_FACE_EXPRESSION,
|
||||
faceExpression,
|
||||
duration: finalDuration,
|
||||
timestamp
|
||||
});
|
||||
|
@ -254,25 +254,25 @@ function addFacialExpression(facialExpression: string, duration: number, timesta
|
|||
}
|
||||
|
||||
/**
|
||||
* Adds a facial expression with its timestamp to the facial expression buffer.
|
||||
* Adds a face expression with its timestamp to the face expression buffer.
|
||||
*
|
||||
* @param {Object} facialExpression - Object containing facial expression string and its timestamp.
|
||||
* @param {Object} faceExpression - Object containing face expression string and its timestamp.
|
||||
* @returns {Object}
|
||||
*/
|
||||
export function addToFacialExpressionsBuffer(facialExpression: Object) {
|
||||
export function addToFaceExpressionsBuffer(faceExpression: Object) {
|
||||
return {
|
||||
type: ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
|
||||
facialExpression
|
||||
type: ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
faceExpression
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the facial expressions array in the state.
|
||||
* Clears the face expressions array in the state.
|
||||
*
|
||||
* @returns {Object}
|
||||
*/
|
||||
function clearFacialExpressionBuffer() {
|
||||
function clearFaceExpressionBuffer() {
|
||||
return {
|
||||
type: CLEAR_FACIAL_EXPRESSIONS_BUFFER
|
||||
type: CLEAR_FACE_EXPRESSIONS_BUFFER
|
||||
};
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
// @flow
|
||||
|
||||
export const FACIAL_EXPRESSION_EMOJIS = {
|
||||
export const FACE_EXPRESSIONS_EMOJIS = {
|
||||
happy: '😊',
|
||||
neutral: '😐',
|
||||
sad: '🙁',
|
||||
|
@ -11,7 +11,7 @@ export const FACIAL_EXPRESSION_EMOJIS = {
|
|||
// disgusted: '🤢'
|
||||
};
|
||||
|
||||
export const FACIAL_EXPRESSIONS = [ 'happy', 'neutral', 'sad', 'surprised', 'angry', 'fearful' ];
|
||||
export const FACE_EXPRESSIONS = [ 'happy', 'neutral', 'sad', 'surprised', 'angry', 'fearful' ];
|
||||
|
||||
/**
|
||||
* Time is ms used for sending expression.
|
|
@ -14,26 +14,26 @@ if (typeof OffscreenCanvas === 'undefined') {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sends the facial expression with its duration to all the other participants.
|
||||
* Sends the face expression with its duration to all the other participants.
|
||||
*
|
||||
* @param {Object} conference - The current conference.
|
||||
* @param {string} facialExpression - Facial expression to be sent.
|
||||
* @param {number} duration - The duration of the facial expression in seconds.
|
||||
* @param {string} faceExpression - Face expression to be sent.
|
||||
* @param {number} duration - The duration of the face expression in seconds.
|
||||
* @returns {void}
|
||||
*/
|
||||
export function sendFacialExpressionToParticipants(
|
||||
export function sendFaceExpressionToParticipants(
|
||||
conference: Object,
|
||||
facialExpression: string,
|
||||
faceExpression: string,
|
||||
duration: number
|
||||
): void {
|
||||
try {
|
||||
conference.sendEndpointMessage('', {
|
||||
type: 'facial_expression',
|
||||
facialExpression,
|
||||
type: 'face_landmark',
|
||||
faceExpression,
|
||||
duration
|
||||
});
|
||||
} catch (err) {
|
||||
logger.warn('Could not broadcast the facial expression to the other participants', err);
|
||||
logger.warn('Could not broadcast the face expression to the other participants', err);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -60,44 +60,44 @@ export function sendFaceBoxToParticipants(
|
|||
}
|
||||
|
||||
/**
|
||||
* Sends the facial expression with its duration to xmpp server.
|
||||
* Sends the face expression with its duration to xmpp server.
|
||||
*
|
||||
* @param {Object} conference - The current conference.
|
||||
* @param {string} facialExpression - Facial expression to be sent.
|
||||
* @param {number} duration - The duration of the facial expression in seconds.
|
||||
* @param {string} faceExpression - Face expression to be sent.
|
||||
* @param {number} duration - The duration of the face expression in seconds.
|
||||
* @returns {void}
|
||||
*/
|
||||
export function sendFacialExpressionToServer(
|
||||
export function sendFaceExpressionToServer(
|
||||
conference: Object,
|
||||
facialExpression: string,
|
||||
faceExpression: string,
|
||||
duration: number
|
||||
): void {
|
||||
try {
|
||||
conference.sendFacialExpression({
|
||||
facialExpression,
|
||||
conference.sendFaceLandmarks({
|
||||
faceExpression,
|
||||
duration
|
||||
});
|
||||
} catch (err) {
|
||||
logger.warn('Could not send the facial expression to xmpp server', err);
|
||||
logger.warn('Could not send the face expression to xmpp server', err);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends facial expression to backend.
|
||||
* Sends face expression to backend.
|
||||
*
|
||||
* @param {Object} state - Redux state.
|
||||
* @returns {boolean} - True if sent, false otherwise.
|
||||
*/
|
||||
export async function sendFacialExpressionsWebhook(state: Object) {
|
||||
export async function sendFaceExpressionsWebhook(state: Object) {
|
||||
const { webhookProxyUrl: url } = state['features/base/config'];
|
||||
const { conference } = state['features/base/conference'];
|
||||
const { jwt } = state['features/base/jwt'];
|
||||
const { connection } = state['features/base/connection'];
|
||||
const jid = connection.getJid();
|
||||
const localParticipant = getLocalParticipant(state);
|
||||
const { facialExpressionsBuffer } = state['features/facial-recognition'];
|
||||
const { faceExpressionsBuffer } = state['features/face-landmarks'];
|
||||
|
||||
if (facialExpressionsBuffer.length === 0) {
|
||||
if (faceExpressionsBuffer.length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ export async function sendFacialExpressionsWebhook(state: Object) {
|
|||
meetingFqn: extractFqnFromPath(),
|
||||
sessionId: conference.sessionId,
|
||||
submitted: Date.now(),
|
||||
emotions: facialExpressionsBuffer,
|
||||
emotions: faceExpressionsBuffer,
|
||||
participantId: localParticipant.jwtId,
|
||||
participantName: localParticipant.name,
|
||||
participantJid: jid
|
||||
|
@ -192,7 +192,7 @@ export async function sendDataToWorker(
|
|||
* @returns {Object}
|
||||
*/
|
||||
function getFaceBoxForId(id: string, state: Object) {
|
||||
return state['features/facial-recognition'].faceBoxes[id];
|
||||
return state['features/face-landmarks'].faceBoxes[id];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -221,7 +221,7 @@ export function getVideoObjectPosition(state: Object, id: string) {
|
|||
* @returns {number} - Number of miliseconds for doing face detection.
|
||||
*/
|
||||
export function getDetectionInterval(state: Object) {
|
||||
const { faceCoordinatesSharing } = state['features/base/config'];
|
||||
const { faceLandmarks } = state['features/base/config'];
|
||||
|
||||
return Math.min(faceCoordinatesSharing?.captureInterval || SEND_IMAGE_INTERVAL_MS);
|
||||
return Math.max(faceLandmarks?.captureInterval || SEND_IMAGE_INTERVAL_MS);
|
||||
}
|
|
@ -2,4 +2,4 @@
|
|||
|
||||
import { getLogger } from '../base/logging/functions';
|
||||
|
||||
export default getLogger('features/facial-recognition');
|
||||
export default getLogger('features/face-landmarks');
|
|
@ -10,20 +10,20 @@ import { getParticipantCount } from '../base/participants';
|
|||
import { MiddlewareRegistry } from '../base/redux';
|
||||
import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks';
|
||||
|
||||
import { ADD_FACIAL_EXPRESSION, UPDATE_FACE_COORDINATES } from './actionTypes';
|
||||
import { ADD_FACE_EXPRESSION, UPDATE_FACE_COORDINATES } from './actionTypes';
|
||||
import {
|
||||
addToFacialExpressionsBuffer,
|
||||
addToFaceExpressionsBuffer,
|
||||
loadWorker,
|
||||
stopFacialRecognition,
|
||||
startFacialRecognition
|
||||
stopFaceLandmarksDetection,
|
||||
startFaceLandmarksDetection
|
||||
} from './actions';
|
||||
import { FACE_BOX_EVENT_TYPE } from './constants';
|
||||
import { sendFacialExpressionToParticipants, sendFacialExpressionToServer } from './functions';
|
||||
import { sendFaceExpressionToParticipants, sendFaceExpressionToServer } from './functions';
|
||||
|
||||
|
||||
MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
||||
const { enableFacialRecognition, faceCoordinatesSharing } = getState()['features/base/config'];
|
||||
const isEnabled = enableFacialRecognition || faceCoordinatesSharing?.enabled;
|
||||
const { faceLandmarks } = getState()['features/base/config'];
|
||||
const isEnabled = faceLandmarks?.enableFaceCentering || faceLandmarks?.enableFaceExpressionsDetection;
|
||||
|
||||
if (action.type === CONFERENCE_JOINED) {
|
||||
if (isEnabled) {
|
||||
|
@ -57,7 +57,7 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
|
||||
switch (action.type) {
|
||||
case CONFERENCE_WILL_LEAVE : {
|
||||
dispatch(stopFacialRecognition());
|
||||
dispatch(stopFaceLandmarksDetection());
|
||||
|
||||
return next(action);
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
|
||||
if (videoType === 'camera' && isLocal()) {
|
||||
// need to pass this since the track is not yet added in the store
|
||||
dispatch(startFacialRecognition(action.track));
|
||||
dispatch(startFaceLandmarksDetection(action.track));
|
||||
}
|
||||
|
||||
return next(action);
|
||||
|
@ -83,9 +83,9 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
if (muted !== undefined) {
|
||||
// addresses video mute state changes
|
||||
if (muted) {
|
||||
dispatch(stopFacialRecognition());
|
||||
dispatch(stopFaceLandmarksDetection());
|
||||
} else {
|
||||
dispatch(startFacialRecognition());
|
||||
dispatch(startFaceLandmarksDetection());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -95,21 +95,21 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
const { jitsiTrack: { isLocal, videoType } } = action.track;
|
||||
|
||||
if (videoType === 'camera' && isLocal()) {
|
||||
dispatch(stopFacialRecognition());
|
||||
dispatch(stopFaceLandmarksDetection());
|
||||
}
|
||||
|
||||
return next(action);
|
||||
}
|
||||
case ADD_FACIAL_EXPRESSION: {
|
||||
case ADD_FACE_EXPRESSION: {
|
||||
const state = getState();
|
||||
const conference = getCurrentConference(state);
|
||||
|
||||
if (getParticipantCount(state) > 1) {
|
||||
sendFacialExpressionToParticipants(conference, action.facialExpression, action.duration);
|
||||
sendFaceExpressionToParticipants(conference, action.faceExpression, action.duration);
|
||||
}
|
||||
sendFacialExpressionToServer(conference, action.facialExpression, action.duration);
|
||||
dispatch(addToFacialExpressionsBuffer({
|
||||
emotion: action.facialExpression,
|
||||
sendFaceExpressionToServer(conference, action.faceExpression, action.duration);
|
||||
dispatch(addToFaceExpressionsBuffer({
|
||||
emotion: action.faceExpression,
|
||||
timestamp: action.timestamp
|
||||
}));
|
||||
|
|
@ -3,17 +3,17 @@
|
|||
import { ReducerRegistry } from '../base/redux';
|
||||
|
||||
import {
|
||||
ADD_FACIAL_EXPRESSION,
|
||||
ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACIAL_EXPRESSIONS_BUFFER,
|
||||
START_FACIAL_RECOGNITION,
|
||||
STOP_FACIAL_RECOGNITION,
|
||||
ADD_FACE_EXPRESSION,
|
||||
ADD_TO_FACE_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACE_EXPRESSIONS_BUFFER,
|
||||
START_FACE_LANDMARKS_DETECTION,
|
||||
STOP_FACE_LANDMARKS_DETECTION,
|
||||
UPDATE_FACE_COORDINATES
|
||||
} from './actionTypes';
|
||||
|
||||
const defaultState = {
|
||||
faceBoxes: {},
|
||||
facialExpressions: {
|
||||
faceExpressions: {
|
||||
happy: 0,
|
||||
neutral: 0,
|
||||
surprised: 0,
|
||||
|
@ -22,36 +22,40 @@ const defaultState = {
|
|||
disgusted: 0,
|
||||
sad: 0
|
||||
},
|
||||
facialExpressionsBuffer: [],
|
||||
faceExpressionsBuffer: [],
|
||||
recognitionActive: false
|
||||
};
|
||||
|
||||
ReducerRegistry.register('features/facial-recognition', (state = defaultState, action) => {
|
||||
ReducerRegistry.register('features/face-landmarks', (state = defaultState, action) => {
|
||||
switch (action.type) {
|
||||
case ADD_FACIAL_EXPRESSION: {
|
||||
state.facialExpressions[action.facialExpression] += action.duration;
|
||||
|
||||
return state;
|
||||
}
|
||||
case ADD_TO_FACIAL_EXPRESSIONS_BUFFER: {
|
||||
case ADD_FACE_EXPRESSION: {
|
||||
return {
|
||||
...state,
|
||||
facialExpressionsBuffer: [ ...state.facialExpressionsBuffer, action.facialExpression ]
|
||||
faceExpressions: {
|
||||
...state.faceExpressions,
|
||||
[action.faceExpression]: state.faceExpressions[action.faceExpression] + action.duration
|
||||
}
|
||||
};
|
||||
}
|
||||
case CLEAR_FACIAL_EXPRESSIONS_BUFFER: {
|
||||
case ADD_TO_FACE_EXPRESSIONS_BUFFER: {
|
||||
return {
|
||||
...state,
|
||||
facialExpressionsBuffer: []
|
||||
faceExpressionsBuffer: [ ...state.faceExpressionsBuffer, action.faceExpression ]
|
||||
};
|
||||
}
|
||||
case START_FACIAL_RECOGNITION: {
|
||||
case CLEAR_FACE_EXPRESSIONS_BUFFER: {
|
||||
return {
|
||||
...state,
|
||||
faceExpressionsBuffer: []
|
||||
};
|
||||
}
|
||||
case START_FACE_LANDMARKS_DETECTION: {
|
||||
return {
|
||||
...state,
|
||||
recognitionActive: true
|
||||
};
|
||||
}
|
||||
case STOP_FACIAL_RECOGNITION: {
|
||||
case STOP_FACE_LANDMARKS_DETECTION: {
|
||||
return {
|
||||
...state,
|
||||
recognitionActive: false
|
|
@ -1,4 +1,4 @@
|
|||
# Facial Recognition and Facial Expression Models and Weights
|
||||
# Face Landmarks Detection and Face Expression Models and Weights
|
||||
|
||||
### Tiny Face Detector
|
||||
|
||||
|
@ -6,7 +6,7 @@ It is a realtime face detector.
|
|||
|
||||
### Face Expression Recognition Model
|
||||
|
||||
It is a models that recognizes facial expressions.
|
||||
It is a models that recognizes face expressions.
|
||||
|
||||
### Usage
|
||||
|
|
@ -1,59 +0,0 @@
|
|||
// @flow
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to add a facial expression.
|
||||
*
|
||||
* {
|
||||
* type: ADD_FACIAL_EXPRESSION,
|
||||
* facialExpression: string,
|
||||
* duration: number
|
||||
* }
|
||||
*/
|
||||
export const ADD_FACIAL_EXPRESSION = 'ADD_FACIAL_EXPRESSION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to set recognition active in the state.
|
||||
*
|
||||
* {
|
||||
* type: START_FACIAL_RECOGNITION
|
||||
* }
|
||||
*/
|
||||
export const START_FACIAL_RECOGNITION = 'START_FACIAL_RECOGNITION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to set recognition inactive in the state.
|
||||
*
|
||||
* {
|
||||
* type: STOP_FACIAL_RECOGNITION
|
||||
* }
|
||||
*/
|
||||
export const STOP_FACIAL_RECOGNITION = 'STOP_FACIAL_RECOGNITION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to clear the facial expressions buffer in the state.
|
||||
*
|
||||
* {
|
||||
* type: CLEAR_FACIAL_EXPRESSIONS_BUFFER
|
||||
* }
|
||||
*/
|
||||
export const CLEAR_FACIAL_EXPRESSIONS_BUFFER = 'CLEAR_FACIAL_EXPRESSIONS_BUFFER';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to add a expression to the facial expressions buffer.
|
||||
*
|
||||
* {
|
||||
* type: ADD_TO_FACIAL_EXPRESSIONS_BUFFER
|
||||
* }
|
||||
*/
|
||||
export const ADD_TO_FACIAL_EXPRESSIONS_BUFFER = 'ADD_TO_FACIAL_EXPRESSIONS_BUFFER ';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to update coordinates of a detected face.
|
||||
*
|
||||
* {
|
||||
* type: UPDATE_FACE_COORDINATES,
|
||||
* faceBox: Object({ left, bottom, right, top }),
|
||||
* participantId: string
|
||||
* }
|
||||
*/
|
||||
export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES';
|
|
@ -25,7 +25,7 @@ import {
|
|||
getFakeScreenshareParticipantTrack,
|
||||
updateLastTrackVideoMediaEvent
|
||||
} from '../../../base/tracks';
|
||||
import { getVideoObjectPosition } from '../../../facial-recognition/functions';
|
||||
import { getVideoObjectPosition } from '../../../face-landmarks/functions';
|
||||
import { hideGif, showGif } from '../../../gifs/actions';
|
||||
import { getGifDisplayMode, getGifForParticipant } from '../../../gifs/functions';
|
||||
import { PresenceLabel } from '../../../presence-status';
|
||||
|
|
|
@ -106,13 +106,13 @@ class RTCStats {
|
|||
}
|
||||
|
||||
/**
|
||||
* Send facial expression data, the data will be processed by rtcstats-server and saved in the dump file.
|
||||
* Send face expression data, the data will be processed by rtcstats-server and saved in the dump file.
|
||||
*
|
||||
* @param {Object} facialExpressionData - Facial expression data to be saved in the rtcstats dump.
|
||||
* @param {Object} faceExpressionData - Face expression data to be saved in the rtcstats dump.
|
||||
* @returns {void}
|
||||
*/
|
||||
sendFacialExpressionData(facialExpressionData) {
|
||||
this.trace && this.trace.statsEntry('facialExpression', null, facialExpressionData);
|
||||
sendFaceExpressionData(faceExpressionData) {
|
||||
this.trace && this.trace.statsEntry('faceExpression', null, faceExpressionData);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -7,7 +7,7 @@ import { CONFERENCE_UNIQUE_ID_SET, E2E_RTT_CHANGED, getConferenceOptions, getRoo
|
|||
import { LIB_WILL_INIT } from '../base/lib-jitsi-meet';
|
||||
import { DOMINANT_SPEAKER_CHANGED, getLocalParticipant } from '../base/participants';
|
||||
import { MiddlewareRegistry } from '../base/redux';
|
||||
import { ADD_FACIAL_EXPRESSION } from '../facial-recognition/actionTypes';
|
||||
import { ADD_FACE_EXPRESSION } from '../face-landmarks/actionTypes';
|
||||
|
||||
import RTCStats from './RTCStats';
|
||||
import { canSendRtcstatsData, isRtcstatsEnabled } from './functions';
|
||||
|
@ -117,13 +117,13 @@ MiddlewareRegistry.register(store => next => action => {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case ADD_FACIAL_EXPRESSION: {
|
||||
case ADD_FACE_EXPRESSION: {
|
||||
if (canSendRtcstatsData(state)) {
|
||||
const { duration, facialExpression } = action;
|
||||
const { duration, faceExpression } = action;
|
||||
|
||||
RTCStats.sendFacialExpressionData({
|
||||
RTCStats.sendFaceExpressionData({
|
||||
duration,
|
||||
facialExpression
|
||||
faceExpression
|
||||
});
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -48,10 +48,10 @@ export const INIT_REORDER_STATS = 'INIT_REORDER_STATS';
|
|||
export const RESET_SEARCH_CRITERIA = 'RESET_SEARCH_CRITERIA'
|
||||
|
||||
/**
|
||||
* Action type to toggle the facial expressions grid.
|
||||
* Action type to toggle the face expressions grid.
|
||||
* {
|
||||
* type: TOGGLE_FACIAL_EXPRESSIONS
|
||||
* type: TOGGLE_FACE_EXPRESSIONS
|
||||
* }
|
||||
*/
|
||||
export const TOGGLE_FACIAL_EXPRESSIONS = 'SHOW_FACIAL_EXPRESSIONS';
|
||||
export const TOGGLE_FACE_EXPRESSIONS = 'SHOW_FACE_EXPRESSIONS';
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ import {
|
|||
UPDATE_STATS,
|
||||
INIT_REORDER_STATS,
|
||||
RESET_SEARCH_CRITERIA,
|
||||
TOGGLE_FACIAL_EXPRESSIONS
|
||||
TOGGLE_FACE_EXPRESSIONS
|
||||
} from './actionTypes';
|
||||
|
||||
/**
|
||||
|
@ -71,12 +71,12 @@ export function resetSearchCriteria() {
|
|||
}
|
||||
|
||||
/**
|
||||
* Toggles the facial expressions grid.
|
||||
* Toggles the face expressions grid.
|
||||
*
|
||||
* @returns {Object}
|
||||
*/
|
||||
export function toggleFacialExpressions() {
|
||||
export function toggleFaceExpressions() {
|
||||
return {
|
||||
type: TOGGLE_FACIAL_EXPRESSIONS
|
||||
type: TOGGLE_FACE_EXPRESSIONS
|
||||
};
|
||||
}
|
||||
|
|
|
@ -21,13 +21,13 @@ const abstractSpeakerStatsList = (speakerStatsItem: Function, itemStyles?: Objec
|
|||
const dispatch = useDispatch();
|
||||
const { t } = useTranslation();
|
||||
const conference = useSelector(state => state['features/base/conference'].conference);
|
||||
const { stats: speakerStats, showFacialExpressions } = useSelector(state => state['features/speaker-stats']);
|
||||
const { stats: speakerStats, showFaceExpressions } = useSelector(state => state['features/speaker-stats']);
|
||||
const localParticipant = useSelector(getLocalParticipant);
|
||||
const { defaultRemoteDisplayName } = useSelector(
|
||||
state => state['features/base/config']) || {};
|
||||
const { enableDisplayFacialExpressions } = useSelector(state => state['features/base/config']) || {};
|
||||
const { facialExpressions: localFacialExpressions } = useSelector(
|
||||
state => state['features/facial-recognition']) || {};
|
||||
const { faceLandmarks } = useSelector(state => state['features/base/config']) || {};
|
||||
const { faceExpressions: localFaceExpressions } = useSelector(
|
||||
state => state['features/face-landmarks']) || {};
|
||||
|
||||
/**
|
||||
* Update the internal state with the latest speaker stats.
|
||||
|
@ -48,8 +48,8 @@ const abstractSpeakerStatsList = (speakerStatsItem: Function, itemStyles?: Objec
|
|||
? `${localParticipant.name} (${meString})`
|
||||
: meString
|
||||
);
|
||||
if (enableDisplayFacialExpressions) {
|
||||
stats[userId].setFacialExpressions(localFacialExpressions);
|
||||
if (faceLandmarks?.enableDisplayFaceExpressions) {
|
||||
stats[userId].setFaceExpressions(localFaceExpressions);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -87,11 +87,11 @@ const abstractSpeakerStatsList = (speakerStatsItem: Function, itemStyles?: Objec
|
|||
props.dominantSpeakerTime = statsModel.getTotalDominantSpeakerTime();
|
||||
props.participantId = userId;
|
||||
props.hasLeft = statsModel.hasLeft();
|
||||
if (showFacialExpressions) {
|
||||
props.facialExpressions = statsModel.getFacialExpressions();
|
||||
if (showFaceExpressions) {
|
||||
props.faceExpressions = statsModel.getFaceExpressions();
|
||||
}
|
||||
props.hidden = statsModel.hidden;
|
||||
props.showFacialExpressions = showFacialExpressions;
|
||||
props.showFaceExpressions = showFaceExpressions;
|
||||
props.displayName = statsModel.getDisplayName() || defaultRemoteDisplayName;
|
||||
if (itemStyles) {
|
||||
props.styles = itemStyles;
|
||||
|
|
|
@ -55,7 +55,7 @@ const useStyles = makeStyles(theme => {
|
|||
});
|
||||
|
||||
/**
|
||||
* The type of the React {@code Component} props of {@link ToggleFacialExpressionsButton}.
|
||||
* The type of the React {@code Component} props of {@link ToggleFaceExpressionsButton}.
|
||||
*/
|
||||
type Props = {
|
||||
|
||||
|
@ -67,16 +67,16 @@ type Props = {
|
|||
/**
|
||||
* The state of the button.
|
||||
*/
|
||||
showFacialExpressions: boolean,
|
||||
showFaceExpressions: boolean,
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* React component for toggling facial expressions grid.
|
||||
* React component for toggling face expressions grid.
|
||||
*
|
||||
* @returns {React$Element<any>}
|
||||
*/
|
||||
export default function FacialExpressionsSwitch({ onChange, showFacialExpressions }: Props) {
|
||||
export default function FaceExpressionsSwitch({ onChange, showFaceExpressions }: Props) {
|
||||
const classes = useStyles();
|
||||
const { t } = useTranslation();
|
||||
|
||||
|
@ -84,14 +84,14 @@ export default function FacialExpressionsSwitch({ onChange, showFacialExpression
|
|||
<div className = { classes.switchContainer } >
|
||||
<label
|
||||
className = { classes.switchLabel }
|
||||
htmlFor = 'facial-expressions-switch'>
|
||||
htmlFor = 'face-expressions-switch'>
|
||||
{ t('speakerStats.displayEmotions')}
|
||||
</label>
|
||||
<Switch
|
||||
id = 'facial-expressions-switch'
|
||||
id = 'face-expressions-switch'
|
||||
onValueChange = { onChange }
|
||||
trackColor = {{ false: 'blue' }}
|
||||
value = { showFacialExpressions } />
|
||||
value = { showFaceExpressions } />
|
||||
</div>
|
||||
);
|
||||
}
|
|
@ -6,14 +6,14 @@ import { useSelector, useDispatch } from 'react-redux';
|
|||
|
||||
import { Dialog } from '../../../base/dialog';
|
||||
import { escapeRegexp } from '../../../base/util';
|
||||
import { resetSearchCriteria, toggleFacialExpressions, initSearch } from '../../actions';
|
||||
import { resetSearchCriteria, toggleFaceExpressions, initSearch } from '../../actions';
|
||||
import {
|
||||
DISPLAY_SWITCH_BREAKPOINT,
|
||||
MOBILE_BREAKPOINT,
|
||||
RESIZE_SEARCH_SWITCH_CONTAINER_BREAKPOINT
|
||||
} from '../../constants';
|
||||
|
||||
import FacialExpressionsSwitch from './FacialExpressionsSwitch';
|
||||
import FaceExpressionsSwitch from './FaceExpressionsSwitch';
|
||||
import SpeakerStatsLabels from './SpeakerStatsLabels';
|
||||
import SpeakerStatsList from './SpeakerStatsList';
|
||||
import SpeakerStatsSearch from './SpeakerStatsSearch';
|
||||
|
@ -88,16 +88,16 @@ const useStyles = makeStyles(theme => {
|
|||
});
|
||||
|
||||
const SpeakerStats = () => {
|
||||
const { enableDisplayFacialExpressions } = useSelector(state => state['features/base/config']);
|
||||
const { showFacialExpressions } = useSelector(state => state['features/speaker-stats']);
|
||||
const { faceLandmarks } = useSelector(state => state['features/base/config']);
|
||||
const { showFaceExpressions } = useSelector(state => state['features/speaker-stats']);
|
||||
const { clientWidth } = useSelector(state => state['features/base/responsive-ui']);
|
||||
const displaySwitch = enableDisplayFacialExpressions && clientWidth > DISPLAY_SWITCH_BREAKPOINT;
|
||||
const displaySwitch = faceLandmarks?.enableDisplayFaceExpressions && clientWidth > DISPLAY_SWITCH_BREAKPOINT;
|
||||
const displayLabels = clientWidth > MOBILE_BREAKPOINT;
|
||||
const dispatch = useDispatch();
|
||||
const classes = useStyles();
|
||||
|
||||
const onToggleFacialExpressions = useCallback(() =>
|
||||
dispatch(toggleFacialExpressions())
|
||||
const onToggleFaceExpressions = useCallback(() =>
|
||||
dispatch(toggleFaceExpressions())
|
||||
, [ dispatch ]);
|
||||
|
||||
const onSearch = useCallback((criteria = '') => {
|
||||
|
@ -106,7 +106,7 @@ const SpeakerStats = () => {
|
|||
, [ dispatch ]);
|
||||
|
||||
useEffect(() => {
|
||||
showFacialExpressions && !displaySwitch && dispatch(toggleFacialExpressions());
|
||||
showFaceExpressions && !displaySwitch && dispatch(toggleFaceExpressions());
|
||||
}, [ clientWidth ]);
|
||||
useEffect(() => () => dispatch(resetSearchCriteria()), []);
|
||||
|
||||
|
@ -117,12 +117,12 @@ const SpeakerStats = () => {
|
|||
hideCancelButton = { true }
|
||||
submitDisabled = { true }
|
||||
titleKey = 'speakerStats.speakerStats'
|
||||
width = { showFacialExpressions ? '664px' : 'small' }>
|
||||
width = { showFaceExpressions ? '664px' : 'small' }>
|
||||
<div className = { classes.speakerStats }>
|
||||
<div
|
||||
className = {
|
||||
`${classes.searchSwitchContainer}
|
||||
${showFacialExpressions ? classes.searchSwitchContainerExpressionsOn : ''}`
|
||||
${showFaceExpressions ? classes.searchSwitchContainerExpressionsOn : ''}`
|
||||
}>
|
||||
<div
|
||||
className = {
|
||||
|
@ -134,15 +134,15 @@ const SpeakerStats = () => {
|
|||
</div>
|
||||
|
||||
{ displaySwitch
|
||||
&& <FacialExpressionsSwitch
|
||||
onChange = { onToggleFacialExpressions }
|
||||
showFacialExpressions = { showFacialExpressions } />
|
||||
&& <FaceExpressionsSwitch
|
||||
onChange = { onToggleFaceExpressions }
|
||||
showFaceExpressions = { showFaceExpressions } />
|
||||
}
|
||||
</div>
|
||||
{ displayLabels && (
|
||||
<div className = { classes.labelsContainer }>
|
||||
<SpeakerStatsLabels
|
||||
showFacialExpressions = { showFacialExpressions ?? false } />
|
||||
showFaceExpressions = { showFaceExpressions ?? false } />
|
||||
<div className = { classes.separator } />
|
||||
</div>
|
||||
)}
|
||||
|
|
|
@ -5,7 +5,7 @@ import React from 'react';
|
|||
import { Avatar, StatelessAvatar } from '../../../base/avatar';
|
||||
import { getInitials } from '../../../base/avatar/functions';
|
||||
import BaseTheme from '../../../base/ui/components/BaseTheme';
|
||||
import { FACIAL_EXPRESSIONS } from '../../../facial-recognition/constants.js';
|
||||
import { FACE_EXPRESSIONS } from '../../../face-landmarks/constants.js';
|
||||
|
||||
import TimeElapsed from './TimeElapsed';
|
||||
|
||||
|
@ -20,15 +20,15 @@ type Props = {
|
|||
displayName: string,
|
||||
|
||||
/**
|
||||
* The object that has as keys the facial expressions of the
|
||||
* The object that has as keys the face expressions of the
|
||||
* participant and as values a number that represents the count .
|
||||
*/
|
||||
facialExpressions: Object,
|
||||
faceExpressions: Object,
|
||||
|
||||
/**
|
||||
* True if the facial recognition is not disabled.
|
||||
* True if the face expressions detection is not disabled.
|
||||
*/
|
||||
showFacialExpressions: boolean,
|
||||
showFaceExpressions: boolean,
|
||||
|
||||
/**
|
||||
* The total milliseconds the participant has been dominant speaker.
|
||||
|
@ -71,22 +71,22 @@ const SpeakerStatsItem = (props: Props) => {
|
|||
const rowDisplayClass = `row ${hasLeftClass} ${props.styles.item}`;
|
||||
const expressionClass = 'expression';
|
||||
const nameTimeClass = `name-time${
|
||||
props.showFacialExpressions ? ' name-time_expressions-on' : ''
|
||||
props.showFaceExpressions ? ' name-time_expressions-on' : ''
|
||||
}`;
|
||||
const timeClass = `${props.styles.time} ${props.isDominantSpeaker ? props.styles.dominant : ''}`;
|
||||
|
||||
|
||||
const FacialExpressions = () => FACIAL_EXPRESSIONS.map(
|
||||
const FaceExpressions = () => FACE_EXPRESSIONS.map(
|
||||
expression => (
|
||||
<div
|
||||
aria-label = { props.t(`speakerStats.${expression}`) }
|
||||
className = {
|
||||
`${expressionClass} ${
|
||||
props.facialExpressions[expression] === 0 ? props.styles.hasLeft : ''
|
||||
props.faceExpressions[expression] === 0 ? props.styles.hasLeft : ''
|
||||
}`
|
||||
}
|
||||
key = { expression }>
|
||||
{ props.facialExpressions[expression] }
|
||||
{ props.faceExpressions[expression] }
|
||||
</div>
|
||||
)
|
||||
);
|
||||
|
@ -123,10 +123,10 @@ const SpeakerStatsItem = (props: Props) => {
|
|||
time = { props.dominantSpeakerTime } />
|
||||
</div>
|
||||
</div>
|
||||
{ props.showFacialExpressions
|
||||
{ props.showFaceExpressions
|
||||
&& (
|
||||
<div className = { `expressions ${props.styles.expressions}` }>
|
||||
<FacialExpressions />
|
||||
<FaceExpressions />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
|
|
@ -4,7 +4,7 @@ import React from 'react';
|
|||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import { Tooltip } from '../../../base/tooltip';
|
||||
import { FACIAL_EXPRESSION_EMOJIS } from '../../../facial-recognition/constants.js';
|
||||
import { FACE_EXPRESSIONS_EMOJIS } from '../../../face-landmarks/constants.js';
|
||||
|
||||
const useStyles = makeStyles(theme => {
|
||||
return {
|
||||
|
@ -26,15 +26,15 @@ const useStyles = makeStyles(theme => {
|
|||
type Props = {
|
||||
|
||||
/**
|
||||
* True if the facial recognition is not disabled.
|
||||
* True if the face expressions detection is not disabled.
|
||||
*/
|
||||
showFacialExpressions: boolean,
|
||||
showFaceExpressions: boolean,
|
||||
};
|
||||
|
||||
const SpeakerStatsLabels = (props: Props) => {
|
||||
const { t } = useTranslation();
|
||||
const classes = useStyles();
|
||||
const FacialExpressionsLabels = () => Object.keys(FACIAL_EXPRESSION_EMOJIS).map(
|
||||
const FaceExpressionsLabels = () => Object.keys(FACE_EXPRESSIONS_EMOJIS).map(
|
||||
expression => (
|
||||
<div
|
||||
className = 'expression'
|
||||
|
@ -43,7 +43,7 @@ const SpeakerStatsLabels = (props: Props) => {
|
|||
content = { t(`speakerStats.${expression}`) }
|
||||
position = { 'top' } >
|
||||
<div>
|
||||
{ FACIAL_EXPRESSION_EMOJIS[expression] }
|
||||
{ FACE_EXPRESSIONS_EMOJIS[expression] }
|
||||
</div>
|
||||
|
||||
</Tooltip>
|
||||
|
@ -51,7 +51,7 @@ const SpeakerStatsLabels = (props: Props) => {
|
|||
)
|
||||
);
|
||||
const nameTimeClass = `name-time${
|
||||
props.showFacialExpressions ? ' name-time_expressions-on' : ''
|
||||
props.showFaceExpressions ? ' name-time_expressions-on' : ''
|
||||
}`;
|
||||
|
||||
return (
|
||||
|
@ -67,9 +67,9 @@ const SpeakerStatsLabels = (props: Props) => {
|
|||
</div>
|
||||
</div>
|
||||
{
|
||||
props.showFacialExpressions
|
||||
props.showFaceExpressions
|
||||
&& <div className = { `expressions ${classes.emojis}` }>
|
||||
<FacialExpressionsLabels />
|
||||
<FaceExpressionsLabels />
|
||||
</div>
|
||||
}
|
||||
</div>
|
||||
|
|
|
@ -9,7 +9,7 @@ import {
|
|||
UPDATE_STATS,
|
||||
INIT_REORDER_STATS,
|
||||
RESET_SEARCH_CRITERIA,
|
||||
TOGGLE_FACIAL_EXPRESSIONS
|
||||
TOGGLE_FACE_EXPRESSIONS
|
||||
} from './actionTypes';
|
||||
|
||||
/**
|
||||
|
@ -22,7 +22,7 @@ const INITIAL_STATE = {
|
|||
isOpen: false,
|
||||
pendingReorder: true,
|
||||
criteria: null,
|
||||
showFacialExpressions: false
|
||||
showFaceExpressions: false
|
||||
};
|
||||
|
||||
ReducerRegistry.register('features/speaker-stats', (state = _getInitialState(), action) => {
|
||||
|
@ -35,10 +35,10 @@ ReducerRegistry.register('features/speaker-stats', (state = _getInitialState(),
|
|||
return _initReorderStats(state);
|
||||
case RESET_SEARCH_CRITERIA:
|
||||
return _updateCriteria(state, { criteria: null });
|
||||
case TOGGLE_FACIAL_EXPRESSIONS: {
|
||||
case TOGGLE_FACE_EXPRESSIONS: {
|
||||
return {
|
||||
...state,
|
||||
showFacialExpressions: !state.showFacialExpressions
|
||||
showFaceExpressions: !state.showFaceExpressions
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,10 +77,10 @@ function on_message(event)
|
|||
room.speakerStats['dominantSpeakerId'] = occupant.jid;
|
||||
end
|
||||
|
||||
local facialExpression = event.stanza:get_child('facialExpression', 'http://jitsi.org/jitmeet');
|
||||
local faceExpression = event.stanza:get_child('faceExpression', 'http://jitsi.org/jitmeet');
|
||||
|
||||
if facialExpression then
|
||||
local roomAddress = facialExpression.attr.room;
|
||||
if faceExpression then
|
||||
local roomAddress = faceExpression.attr.room;
|
||||
local room = get_room_from_jid(room_jid_match_rewrite(roomAddress));
|
||||
|
||||
if not room then
|
||||
|
@ -98,9 +98,9 @@ function on_message(event)
|
|||
log("warn", "No occupant %s found for %s", from, roomAddress);
|
||||
return false;
|
||||
end
|
||||
local facialExpressions = room.speakerStats[occupant.jid].facialExpressions;
|
||||
facialExpressions[facialExpression.attr.expression] =
|
||||
facialExpressions[facialExpression.attr.expression] + tonumber(facialExpression.attr.duration);
|
||||
local faceExpressions = room.speakerStats[occupant.jid].faceExpressions;
|
||||
faceExpressions[faceExpression.attr.expression] =
|
||||
faceExpressions[faceExpression.attr.expression] + tonumber(faceExpression.attr.duration);
|
||||
end
|
||||
|
||||
return true
|
||||
|
@ -117,7 +117,7 @@ function new_SpeakerStats(nick, context_user)
|
|||
nick = nick;
|
||||
context_user = context_user;
|
||||
displayName = nil;
|
||||
facialExpressions = {
|
||||
faceExpressions = {
|
||||
happy = 0,
|
||||
neutral = 0,
|
||||
surprised = 0,
|
||||
|
@ -186,9 +186,9 @@ function occupant_joined(event)
|
|||
-- and skip focus if sneaked into the table
|
||||
if values.nick ~= nil and values.nick ~= 'focus' then
|
||||
local totalDominantSpeakerTime = values.totalDominantSpeakerTime;
|
||||
local facialExpressions = values.facialExpressions;
|
||||
local faceExpressions = values.faceExpressions;
|
||||
if totalDominantSpeakerTime > 0 or room:get_occupant_jid(jid) == nil or values:isDominantSpeaker()
|
||||
or get_participant_expressions_count(facialExpressions) > 0 then
|
||||
or get_participant_expressions_count(faceExpressions) > 0 then
|
||||
-- before sending we need to calculate current dominant speaker state
|
||||
if values:isDominantSpeaker() then
|
||||
local timeElapsed = math.floor(socket.gettime()*1000 - values._dominantSpeakerStart);
|
||||
|
@ -198,7 +198,7 @@ function occupant_joined(event)
|
|||
users_json[values.nick] = {
|
||||
displayName = values.displayName,
|
||||
totalDominantSpeakerTime = totalDominantSpeakerTime,
|
||||
facialExpressions = facialExpressions
|
||||
faceExpressions = faceExpressions
|
||||
};
|
||||
end
|
||||
end
|
||||
|
@ -285,9 +285,9 @@ else
|
|||
process_host(muc_component_host);
|
||||
end
|
||||
|
||||
function get_participant_expressions_count(facialExpressions)
|
||||
function get_participant_expressions_count(faceExpressions)
|
||||
local count = 0;
|
||||
for expression, value in pairs(facialExpressions) do
|
||||
for expression, value in pairs(faceExpressions) do
|
||||
count = count + value;
|
||||
end
|
||||
|
||||
|
|
|
@ -386,11 +386,11 @@ module.exports = (_env, argv) => {
|
|||
}),
|
||||
Object.assign({}, config, {
|
||||
entry: {
|
||||
'facial-expressions-worker': './react/features/facial-recognition/facialExpressionsWorker.js'
|
||||
'face-landmarks-worker': './react/features/face-landmarks/faceLandmarksWorker.js'
|
||||
},
|
||||
plugins: [
|
||||
...config.plugins,
|
||||
...getBundleAnalyzerPlugin(analyzeBundle, 'facial-expressions-worker')
|
||||
...getBundleAnalyzerPlugin(analyzeBundle, 'face-landmarks-worker')
|
||||
],
|
||||
performance: getPerformanceHints(perfHintOptions, 1024 * 1024 * 1.5)
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue