2021-11-17 14:33:03 +00:00
|
|
|
// @flow
|
2021-12-21 11:46:54 +00:00
|
|
|
import { getLocalParticipant } from '../base/participants';
|
2022-05-23 15:02:14 +00:00
|
|
|
import { extractFqnFromPath } from '../dynamic-branding/functions.any';
|
2021-12-21 11:46:54 +00:00
|
|
|
|
2022-04-04 13:09:14 +00:00
|
|
|
import { DETECT_FACE, FACE_BOX_EVENT_TYPE, SEND_IMAGE_INTERVAL_MS } from './constants';
|
2021-11-17 14:33:03 +00:00
|
|
|
import logger from './logger';
|
|
|
|
|
2022-04-04 13:09:14 +00:00
|
|
|
let canvas;
|
|
|
|
let context;
|
|
|
|
|
|
|
|
if (typeof OffscreenCanvas === 'undefined') {
|
|
|
|
canvas = document.createElement('canvas');
|
|
|
|
context = canvas.getContext('2d');
|
|
|
|
}
|
|
|
|
|
2021-11-17 14:33:03 +00:00
|
|
|
/**
|
2022-04-06 09:10:31 +00:00
|
|
|
* Sends the face expression with its duration to all the other participants.
|
2021-11-17 14:33:03 +00:00
|
|
|
*
|
|
|
|
* @param {Object} conference - The current conference.
|
2022-04-06 09:10:31 +00:00
|
|
|
* @param {string} faceExpression - Face expression to be sent.
|
|
|
|
* @param {number} duration - The duration of the face expression in seconds.
|
2021-11-17 14:33:03 +00:00
|
|
|
* @returns {void}
|
|
|
|
*/
|
2022-04-06 09:10:31 +00:00
|
|
|
export function sendFaceExpressionToParticipants(
|
2021-11-17 14:33:03 +00:00
|
|
|
conference: Object,
|
2022-04-06 09:10:31 +00:00
|
|
|
faceExpression: string,
|
2021-11-17 14:33:03 +00:00
|
|
|
duration: number
|
|
|
|
): void {
|
|
|
|
try {
|
|
|
|
conference.sendEndpointMessage('', {
|
2022-04-06 09:10:31 +00:00
|
|
|
type: 'face_landmark',
|
|
|
|
faceExpression,
|
2021-11-17 14:33:03 +00:00
|
|
|
duration
|
|
|
|
});
|
|
|
|
} catch (err) {
|
2022-04-06 09:10:31 +00:00
|
|
|
logger.warn('Could not broadcast the face expression to the other participants', err);
|
2021-11-17 14:33:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-04-04 13:09:14 +00:00
|
|
|
/**
|
|
|
|
* Sends the face box to all the other participants.
|
|
|
|
*
|
|
|
|
* @param {Object} conference - The current conference.
|
|
|
|
* @param {Object} faceBox - Face box to be sent.
|
|
|
|
* @returns {void}
|
|
|
|
*/
|
|
|
|
export function sendFaceBoxToParticipants(
|
|
|
|
conference: Object,
|
|
|
|
faceBox: Object
|
|
|
|
): void {
|
|
|
|
try {
|
|
|
|
conference.sendEndpointMessage('', {
|
|
|
|
type: FACE_BOX_EVENT_TYPE,
|
|
|
|
faceBox
|
|
|
|
});
|
|
|
|
} catch (err) {
|
|
|
|
logger.warn('Could not broadcast the face box to the other participants', err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-17 14:33:03 +00:00
|
|
|
/**
|
2022-04-06 09:10:31 +00:00
|
|
|
* Sends the face expression with its duration to xmpp server.
|
2021-11-17 14:33:03 +00:00
|
|
|
*
|
|
|
|
* @param {Object} conference - The current conference.
|
2022-04-06 09:10:31 +00:00
|
|
|
* @param {string} faceExpression - Face expression to be sent.
|
|
|
|
* @param {number} duration - The duration of the face expression in seconds.
|
2021-11-17 14:33:03 +00:00
|
|
|
* @returns {void}
|
|
|
|
*/
|
2022-04-06 09:10:31 +00:00
|
|
|
export function sendFaceExpressionToServer(
|
2021-11-17 14:33:03 +00:00
|
|
|
conference: Object,
|
2022-04-06 09:10:31 +00:00
|
|
|
faceExpression: string,
|
2021-11-17 14:33:03 +00:00
|
|
|
duration: number
|
|
|
|
): void {
|
|
|
|
try {
|
2022-04-06 09:10:31 +00:00
|
|
|
conference.sendFaceLandmarks({
|
|
|
|
faceExpression,
|
2021-11-17 14:33:03 +00:00
|
|
|
duration
|
|
|
|
});
|
|
|
|
} catch (err) {
|
2022-04-06 09:10:31 +00:00
|
|
|
logger.warn('Could not send the face expression to xmpp server', err);
|
2021-11-17 14:33:03 +00:00
|
|
|
}
|
2021-12-21 11:46:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2022-04-06 09:10:31 +00:00
|
|
|
* Sends face expression to backend.
|
2021-12-21 11:46:54 +00:00
|
|
|
*
|
|
|
|
* @param {Object} state - Redux state.
|
|
|
|
* @returns {boolean} - True if sent, false otherwise.
|
|
|
|
*/
|
2022-04-06 09:10:31 +00:00
|
|
|
export async function sendFaceExpressionsWebhook(state: Object) {
|
2021-12-21 11:46:54 +00:00
|
|
|
const { webhookProxyUrl: url } = state['features/base/config'];
|
|
|
|
const { conference } = state['features/base/conference'];
|
|
|
|
const { jwt } = state['features/base/jwt'];
|
|
|
|
const { connection } = state['features/base/connection'];
|
|
|
|
const jid = connection.getJid();
|
|
|
|
const localParticipant = getLocalParticipant(state);
|
2022-04-06 09:10:31 +00:00
|
|
|
const { faceExpressionsBuffer } = state['features/face-landmarks'];
|
2021-12-21 11:46:54 +00:00
|
|
|
|
2022-04-06 09:10:31 +00:00
|
|
|
if (faceExpressionsBuffer.length === 0) {
|
2021-12-21 11:46:54 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const headers = {
|
|
|
|
...jwt ? { 'Authorization': `Bearer ${jwt}` } : {},
|
|
|
|
'Content-Type': 'application/json'
|
|
|
|
};
|
|
|
|
|
|
|
|
const reqBody = {
|
|
|
|
meetingFqn: extractFqnFromPath(),
|
|
|
|
sessionId: conference.sessionId,
|
|
|
|
submitted: Date.now(),
|
2022-04-06 09:10:31 +00:00
|
|
|
emotions: faceExpressionsBuffer,
|
2021-12-21 11:46:54 +00:00
|
|
|
participantId: localParticipant.jwtId,
|
|
|
|
participantName: localParticipant.name,
|
|
|
|
participantJid: jid
|
|
|
|
};
|
|
|
|
|
|
|
|
if (url) {
|
|
|
|
try {
|
|
|
|
const res = await fetch(`${url}/emotions`, {
|
|
|
|
method: 'POST',
|
|
|
|
headers,
|
|
|
|
body: JSON.stringify(reqBody)
|
|
|
|
});
|
|
|
|
|
|
|
|
if (res.ok) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
logger.error('Status error:', res.status);
|
|
|
|
} catch (err) {
|
|
|
|
logger.error('Could not send request', err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2021-11-17 14:33:03 +00:00
|
|
|
}
|
|
|
|
|
2022-04-04 13:09:14 +00:00
|
|
|
|
2021-11-17 14:33:03 +00:00
|
|
|
/**
|
2022-04-04 13:09:14 +00:00
|
|
|
* Sends the image data a canvas from the track in the image capture to the face recognition worker.
|
2021-11-17 14:33:03 +00:00
|
|
|
*
|
2022-04-04 13:09:14 +00:00
|
|
|
* @param {Worker} worker - Face recognition worker.
|
2021-11-17 14:33:03 +00:00
|
|
|
* @param {Object} imageCapture - Image capture that contains the current track.
|
2022-04-04 13:09:14 +00:00
|
|
|
* @param {number} threshold - Movement threshold as percentage for sharing face coordinates.
|
2021-11-17 14:33:03 +00:00
|
|
|
* @returns {Promise<void>}
|
|
|
|
*/
|
|
|
|
export async function sendDataToWorker(
|
|
|
|
worker: Worker,
|
2022-04-04 13:09:14 +00:00
|
|
|
imageCapture: Object,
|
|
|
|
threshold: number = 10
|
2021-11-17 14:33:03 +00:00
|
|
|
): Promise<void> {
|
|
|
|
if (imageCapture === null || imageCapture === undefined) {
|
|
|
|
return;
|
|
|
|
}
|
2022-04-04 13:09:14 +00:00
|
|
|
|
2021-11-17 14:33:03 +00:00
|
|
|
let imageBitmap;
|
2022-04-04 13:09:14 +00:00
|
|
|
let image;
|
2021-11-17 14:33:03 +00:00
|
|
|
|
|
|
|
try {
|
|
|
|
imageBitmap = await imageCapture.grabFrame();
|
|
|
|
} catch (err) {
|
|
|
|
logger.warn(err);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-04-04 13:09:14 +00:00
|
|
|
if (typeof OffscreenCanvas === 'undefined') {
|
|
|
|
canvas.width = imageBitmap.width;
|
|
|
|
canvas.height = imageBitmap.height;
|
|
|
|
context.drawImage(imageBitmap, 0, 0);
|
|
|
|
|
|
|
|
image = context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
|
|
|
|
} else {
|
|
|
|
image = imageBitmap;
|
|
|
|
}
|
|
|
|
|
2021-11-17 14:33:03 +00:00
|
|
|
worker.postMessage({
|
2022-04-04 13:09:14 +00:00
|
|
|
type: DETECT_FACE,
|
|
|
|
image,
|
|
|
|
threshold
|
2021-11-17 14:33:03 +00:00
|
|
|
});
|
2022-04-04 13:09:14 +00:00
|
|
|
|
|
|
|
imageBitmap.close();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Gets face box for a participant id.
|
|
|
|
*
|
|
|
|
* @param {string} id - The participant id.
|
|
|
|
* @param {Object} state - The redux state.
|
|
|
|
* @returns {Object}
|
|
|
|
*/
|
|
|
|
function getFaceBoxForId(id: string, state: Object) {
|
2022-04-06 09:10:31 +00:00
|
|
|
return state['features/face-landmarks'].faceBoxes[id];
|
2022-04-04 13:09:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Gets the video object position for a participant id.
|
|
|
|
*
|
|
|
|
* @param {Object} state - The redux state.
|
|
|
|
* @param {string} id - The participant id.
|
|
|
|
* @returns {string} - CSS object-position in the shape of '{horizontalPercentage}% {verticalPercentage}%'.
|
|
|
|
*/
|
|
|
|
export function getVideoObjectPosition(state: Object, id: string) {
|
|
|
|
const faceBox = getFaceBoxForId(id, state);
|
|
|
|
|
|
|
|
if (faceBox) {
|
|
|
|
const { right, width } = faceBox;
|
|
|
|
|
|
|
|
return `${right - (width / 2)}% 50%`;
|
|
|
|
}
|
|
|
|
|
|
|
|
return '50% 50%';
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Gets the video object position for a participant id.
|
|
|
|
*
|
|
|
|
* @param {Object} state - The redux state.
|
|
|
|
* @returns {number} - Number of miliseconds for doing face detection.
|
|
|
|
*/
|
|
|
|
export function getDetectionInterval(state: Object) {
|
2022-04-06 09:10:31 +00:00
|
|
|
const { faceLandmarks } = state['features/base/config'];
|
2022-04-04 13:09:14 +00:00
|
|
|
|
2022-04-06 09:10:31 +00:00
|
|
|
return Math.max(faceLandmarks?.captureInterval || SEND_IMAGE_INTERVAL_MS);
|
2021-11-17 14:33:03 +00:00
|
|
|
}
|