feat(facial-expressions): send facial expressions to webhook endpoint (#10585)
This commit is contained in:
parent
da2b920dbe
commit
b9e182b7cc
|
@ -39,3 +39,21 @@ export const START_FACIAL_RECOGNITION = 'START_FACIAL_RECOGNITION';
|
|||
* }
|
||||
*/
|
||||
export const STOP_FACIAL_RECOGNITION = 'STOP_FACIAL_RECOGNITION';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to clear the facial expressions buffer in the state.
|
||||
*
|
||||
* {
|
||||
* type: CLEAR_FACIAL_EXPRESSIONS_BUFFER
|
||||
* }
|
||||
*/
|
||||
export const CLEAR_FACIAL_EXPRESSIONS_BUFFER = 'CLEAR_FACIAL_EXPRESSIONS_BUFFER';
|
||||
|
||||
/**
|
||||
* Redux action type dispatched in order to add a expression to the facial expressions buffer.
|
||||
*
|
||||
* {
|
||||
* type: ADD_TO_FACIAL_EXPRESSIONS_BUFFER
|
||||
* }
|
||||
*/
|
||||
export const ADD_TO_FACIAL_EXPRESSIONS_BUFFER = 'ADD_TO_FACIAL_EXPRESSIONS_BUFFER ';
|
||||
|
|
|
@ -6,23 +6,20 @@ import './createImageBitmap';
|
|||
|
||||
import {
|
||||
ADD_FACIAL_EXPRESSION,
|
||||
ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACIAL_EXPRESSIONS_BUFFER,
|
||||
SET_DETECTION_TIME_INTERVAL,
|
||||
START_FACIAL_RECOGNITION,
|
||||
STOP_FACIAL_RECOGNITION
|
||||
} from './actionTypes';
|
||||
import { sendDataToWorker } from './functions';
|
||||
import {
|
||||
CPU_TIME_INTERVAL,
|
||||
WEBGL_TIME_INTERVAL,
|
||||
WEBHOOK_SEND_TIME_INTERVAL
|
||||
} from './constants';
|
||||
import { sendDataToWorker, sendFacialExpressionsWebhook } from './functions';
|
||||
import logger from './logger';
|
||||
|
||||
/**
|
||||
* Time used for detection interval when facial expressions worker uses webgl backend.
|
||||
*/
|
||||
const WEBGL_TIME_INTERVAL = 1000;
|
||||
|
||||
/**
|
||||
* Time used for detection interval when facial expression worker uses cpu backend.
|
||||
*/
|
||||
const CPU_TIME_INTERVAL = 6000;
|
||||
|
||||
/**
|
||||
* Object containing a image capture of the local track.
|
||||
*/
|
||||
|
@ -38,12 +35,22 @@ let worker;
|
|||
*/
|
||||
let lastFacialExpression;
|
||||
|
||||
/**
|
||||
* The last facial expression timestamp.
|
||||
*/
|
||||
let lastFacialExpressionTimestamp;
|
||||
|
||||
/**
|
||||
* How many duplicate consecutive expression occurred.
|
||||
* If a expression that is not the same as the last one it is reset to 0.
|
||||
*/
|
||||
let duplicateConsecutiveExpressions = 0;
|
||||
|
||||
/**
|
||||
* Variable that keeps the interval for sending expressions to webhook.
|
||||
*/
|
||||
let sendInterval;
|
||||
|
||||
/**
|
||||
* Loads the worker that predicts the facial expression.
|
||||
*
|
||||
|
@ -95,9 +102,17 @@ export function loadWorker() {
|
|||
if (value === lastFacialExpression) {
|
||||
duplicateConsecutiveExpressions++;
|
||||
} else {
|
||||
lastFacialExpression
|
||||
&& dispatch(addFacialExpression(lastFacialExpression, duplicateConsecutiveExpressions + 1));
|
||||
if (lastFacialExpression && lastFacialExpressionTimestamp) {
|
||||
dispatch(
|
||||
addFacialExpression(
|
||||
lastFacialExpression,
|
||||
duplicateConsecutiveExpressions + 1,
|
||||
lastFacialExpressionTimestamp
|
||||
)
|
||||
);
|
||||
}
|
||||
lastFacialExpression = value;
|
||||
lastFacialExpressionTimestamp = Date.now();
|
||||
duplicateConsecutiveExpressions = 0;
|
||||
}
|
||||
}
|
||||
|
@ -143,9 +158,15 @@ export function startFacialRecognition() {
|
|||
|
||||
// $FlowFixMe
|
||||
imageCapture = new ImageCapture(firstVideoTrack);
|
||||
|
||||
sendDataToWorker(worker, imageCapture);
|
||||
sendInterval = setInterval(async () => {
|
||||
const result = await sendFacialExpressionsWebhook(getState());
|
||||
|
||||
if (result) {
|
||||
dispatch(clearFacialExpressionBuffer());
|
||||
}
|
||||
}
|
||||
, WEBHOOK_SEND_TIME_INTERVAL);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -169,9 +190,21 @@ export function stopFacialRecognition() {
|
|||
id: 'CLEAR_TIMEOUT'
|
||||
});
|
||||
|
||||
lastFacialExpression
|
||||
&& dispatch(addFacialExpression(lastFacialExpression, duplicateConsecutiveExpressions + 1));
|
||||
if (lastFacialExpression && lastFacialExpressionTimestamp) {
|
||||
dispatch(
|
||||
addFacialExpression(
|
||||
lastFacialExpression,
|
||||
duplicateConsecutiveExpressions + 1,
|
||||
lastFacialExpressionTimestamp
|
||||
)
|
||||
);
|
||||
}
|
||||
duplicateConsecutiveExpressions = 0;
|
||||
|
||||
if (sendInterval) {
|
||||
clearInterval(sendInterval);
|
||||
sendInterval = null;
|
||||
}
|
||||
dispatch({ type: STOP_FACIAL_RECOGNITION });
|
||||
logger.log('Stop face recognition');
|
||||
};
|
||||
|
@ -215,9 +248,10 @@ export function changeTrack(track: Object) {
|
|||
*
|
||||
* @param {string} facialExpression - Facial expression to be added.
|
||||
* @param {number} duration - Duration in seconds of the facial expression.
|
||||
* @param {number} timestamp - Duration in seconds of the facial expression.
|
||||
* @returns {Object}
|
||||
*/
|
||||
function addFacialExpression(facialExpression: string, duration: number) {
|
||||
function addFacialExpression(facialExpression: string, duration: number, timestamp: number) {
|
||||
return function(dispatch: Function, getState: Function) {
|
||||
const { detectionTimeInterval } = getState()['features/facial-recognition'];
|
||||
let finalDuration = duration;
|
||||
|
@ -228,7 +262,8 @@ function addFacialExpression(facialExpression: string, duration: number) {
|
|||
dispatch({
|
||||
type: ADD_FACIAL_EXPRESSION,
|
||||
facialExpression,
|
||||
duration: finalDuration
|
||||
duration: finalDuration,
|
||||
timestamp
|
||||
});
|
||||
};
|
||||
}
|
||||
|
@ -245,3 +280,27 @@ function setDetectionTimeInterval(time: number) {
|
|||
time
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a facial expression with its timestamp to the facial expression buffer.
|
||||
*
|
||||
* @param {Object} facialExpression - Object containing facial expression string and its timestamp.
|
||||
* @returns {Object}
|
||||
*/
|
||||
export function addToFacialExpressionsBuffer(facialExpression: Object) {
|
||||
return {
|
||||
type: ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
|
||||
facialExpression
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the facial expressions array in the state.
|
||||
*
|
||||
* @returns {Object}
|
||||
*/
|
||||
function clearFacialExpressionBuffer() {
|
||||
return {
|
||||
type: CLEAR_FACIAL_EXPRESSIONS_BUFFER
|
||||
};
|
||||
}
|
||||
|
|
|
@ -9,3 +9,18 @@ export const FACIAL_EXPRESSION_EMOJIS = {
|
|||
fearful: '😨',
|
||||
disgusted: '🤢'
|
||||
};
|
||||
|
||||
/**
|
||||
* Time used for detection interval when facial expressions worker uses webgl backend.
|
||||
*/
|
||||
export const WEBGL_TIME_INTERVAL = 1000;
|
||||
|
||||
/**
|
||||
* Time used for detection interval when facial expression worker uses cpu backend.
|
||||
*/
|
||||
export const CPU_TIME_INTERVAL = 6000;
|
||||
|
||||
/**
|
||||
* Time is ms used for sending expression.
|
||||
*/
|
||||
export const WEBHOOK_SEND_TIME_INTERVAL = 15000;
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
// @flow
|
||||
import { getLocalParticipant } from '../base/participants';
|
||||
import { extractFqnFromPath } from '../dynamic-branding';
|
||||
|
||||
import logger from './logger';
|
||||
|
||||
/**
|
||||
|
@ -49,6 +52,60 @@ export function sendFacialExpressionToServer(
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends facial expression to backend.
|
||||
*
|
||||
* @param {Object} state - Redux state.
|
||||
* @returns {boolean} - True if sent, false otherwise.
|
||||
*/
|
||||
export async function sendFacialExpressionsWebhook(state: Object) {
|
||||
const { webhookProxyUrl: url } = state['features/base/config'];
|
||||
const { conference } = state['features/base/conference'];
|
||||
const { jwt } = state['features/base/jwt'];
|
||||
const { connection } = state['features/base/connection'];
|
||||
const jid = connection.getJid();
|
||||
const localParticipant = getLocalParticipant(state);
|
||||
const { facialExpressionsBuffer } = state['features/facial-recognition'];
|
||||
|
||||
if (facialExpressionsBuffer.length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const headers = {
|
||||
...jwt ? { 'Authorization': `Bearer ${jwt}` } : {},
|
||||
'Content-Type': 'application/json'
|
||||
};
|
||||
|
||||
const reqBody = {
|
||||
meetingFqn: extractFqnFromPath(),
|
||||
sessionId: conference.sessionId,
|
||||
submitted: Date.now(),
|
||||
emotions: facialExpressionsBuffer,
|
||||
participantId: localParticipant.jwtId,
|
||||
participantName: localParticipant.name,
|
||||
participantJid: jid
|
||||
};
|
||||
|
||||
if (url) {
|
||||
try {
|
||||
const res = await fetch(`${url}/emotions`, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(reqBody)
|
||||
});
|
||||
|
||||
if (res.ok) {
|
||||
return true;
|
||||
}
|
||||
logger.error('Status error:', res.status);
|
||||
} catch (err) {
|
||||
logger.error('Could not send request', err);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends the image data a canvas from the track in the image capture to the facial expression worker.
|
||||
*
|
||||
|
|
|
@ -12,6 +12,7 @@ import { VIRTUAL_BACKGROUND_TRACK_CHANGED } from '../virtual-background/actionTy
|
|||
|
||||
import { ADD_FACIAL_EXPRESSION } from './actionTypes';
|
||||
import {
|
||||
addToFacialExpressionsBuffer,
|
||||
changeTrack,
|
||||
loadWorker,
|
||||
resetTrack,
|
||||
|
@ -96,6 +97,10 @@ MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
|
|||
sendFacialExpressionToParticipants(conference, action.facialExpression, action.duration);
|
||||
}
|
||||
sendFacialExpressionToServer(conference, action.facialExpression, action.duration);
|
||||
dispatch(addToFacialExpressionsBuffer({
|
||||
emotion: action.facialExpression,
|
||||
timestamp: action.timestamp
|
||||
}));
|
||||
|
||||
return next(action);
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ import { ReducerRegistry } from '../base/redux';
|
|||
|
||||
import {
|
||||
ADD_FACIAL_EXPRESSION,
|
||||
ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
|
||||
CLEAR_FACIAL_EXPRESSIONS_BUFFER,
|
||||
SET_DETECTION_TIME_INTERVAL,
|
||||
START_FACIAL_RECOGNITION,
|
||||
STOP_FACIAL_RECOGNITION
|
||||
|
@ -19,6 +21,7 @@ const defaultState = {
|
|||
disgusted: 0,
|
||||
sad: 0
|
||||
},
|
||||
facialExpressionsBuffer: [],
|
||||
detectionTimeInterval: -1,
|
||||
recognitionActive: false
|
||||
};
|
||||
|
@ -30,6 +33,18 @@ ReducerRegistry.register('features/facial-recognition', (state = defaultState, a
|
|||
|
||||
return state;
|
||||
}
|
||||
case ADD_TO_FACIAL_EXPRESSIONS_BUFFER: {
|
||||
return {
|
||||
...state,
|
||||
facialExpressionsBuffer: [ ...state.facialExpressionsBuffer, action.facialExpression ]
|
||||
};
|
||||
}
|
||||
case CLEAR_FACIAL_EXPRESSIONS_BUFFER: {
|
||||
return {
|
||||
...state,
|
||||
facialExpressionsBuffer: []
|
||||
};
|
||||
}
|
||||
case SET_DETECTION_TIME_INTERVAL: {
|
||||
return {
|
||||
...state,
|
||||
|
|
Loading…
Reference in New Issue